blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 5
283
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
41
| license_type
stringclasses 2
values | repo_name
stringlengths 7
96
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 58
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 12.7k
662M
⌀ | star_events_count
int64 0
35.5k
| fork_events_count
int64 0
20.6k
| gha_license_id
stringclasses 11
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 43
values | src_encoding
stringclasses 9
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
5.88M
| extension
stringclasses 30
values | content
stringlengths 7
5.88M
| authors
sequencelengths 1
1
| author
stringlengths 0
73
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c68e398d0bbc80c7096a5f5ae75510261395be33 | cab14a6de18660d831eca8da3a713cb5d7c06476 | /decorator/5_deco_arg.py | 9120ed28307e21ff1a70b70025ec25871208384a | [] | no_license | sjl421/python_batteries_examples | 85da0dba187aa55b00f369f316cd948467712f43 | 3461d3561b6f45da15216e909fd91f396308ff74 | refs/heads/master | 2020-12-24T09:53:40.112603 | 2014-10-13T21:57:25 | 2014-10-13T21:57:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py |
'''
after decorator wrapping, i can use functools.wrapp to protect my internal information
'''
from functools import wraps
def tag(tag_name): # <== it takes an argu, return a decorator
def tags_dec(func):
@wraps(func)
def func_wrapper(name):
'this is a wrapper'
return "<{0}>{1}</{0}>".format(tag_name, name)
return func_wrapper
return tags_dec
@tag('div')
def get_text(name):
'return hello gretting'
return "hello " + name
print get_text("hiro")
print "however, the get_text's function name has been changed as well"
print get_text.__name__
print get_text.__doc__
| [
"[email protected]"
] | |
48014e8268d7ec2ec7fcb92fc8e09e35141019bf | 3823bb52ce03e1f79c41b61c8c660b49ffdb32cd | /ApiGateway_StepFunc_CF/product_function.py | d5ec6a48234dedbffb3e1c2cc99d4ffc1902d985 | [
"Apache-2.0"
] | permissive | SSopin/aws-playground | 33c0588c350264c943d96eedc56ef97d28bac458 | 78449c0b0df3bed283330a1f84c1ffbbcfed4460 | refs/heads/main | 2023-06-30T04:18:19.574973 | 2021-08-08T00:59:40 | 2021-08-08T00:59:40 | 337,905,098 | 0 | 1 | Apache-2.0 | 2021-08-07T18:34:50 | 2021-02-11T01:52:27 | Python | UTF-8 | Python | false | false | 816 | py | import os
import json
import boto3
from decimal import Decimal
class DecimalEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Decimal):
return str(obj)
return json.JSONEncoder.default(self, obj)
def lambda_handler(event, context):
json_region = os.environ['AWS_REGION']
# Get the service resource.
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table('product')
try:
response = table.get_item(
Key={
'user': event['user']
}
)
item = response['Item']
except:
item = 'Not found'
return {
"statusCode": 200,
"headers": {
"Content-Type": "application/json"
},
"body": json.dumps(item, cls=DecimalEncoder)
} | [
"[email protected]"
] | |
6aef9694bff099a39f6f438b8b39618c6dff377a | e84b00e384f6258634877b53f84d3c934a155104 | /test/test_add_contact.py | d13bb1c1f47caec0dc50fb1a9133b9a0c7e252c7 | [
"Apache-2.0"
] | permissive | yevhen-hornysh/python_courses | bca39e5f871a4777920dfba0c72e29aca603cfad | 729b54af3a120305a812e7db558f74c81c419a83 | refs/heads/master | 2022-12-18T12:06:58.822539 | 2020-09-28T15:00:49 | 2020-09-28T15:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | from model.contact import Contact
def test_add_contact(app, json_contacts):
contact = json_contacts
old_contacts = app.contact.get_contact_list()
app.contact.create(contact)
assert len(old_contacts) + 1 == app.contact.count()
new_contacts = app.contact.get_contact_list()
old_contacts.append(contact)
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max) | [
"[email protected]"
] | |
94621ca749f8c94686b0a0c342ba70ad63847902 | eedef2d97a4a98c6ef3e6a9775c360b88ffb68b5 | /tumblr_ranking/asgi.py | b1696d23d8210bccb5c8fbab74fd0f6c455ea86f | [
"MIT"
] | permissive | hnkm525/dashboard_ranking | 2a60c4892e49454b3239664c50e5662c8f0ddcea | 3c2087c3d02270c0748d81238d5f6e2967c34c1c | refs/heads/main | 2023-07-04T07:30:25.613550 | 2021-08-12T10:20:42 | 2021-08-12T10:20:42 | 304,541,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
ASGI config for tumblr_ranking project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tumblr_ranking.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
f0f5f9bb71d27cc86c17f2383ef9578dabca489c | 8edcc353815cd13b3e921c0f4e4acebea7498d72 | /packages/py-ab-testing/ABTesting/utils.py | 950df0425f58339137d318df8f312ac15af7f57f | [
"MIT"
] | permissive | ZigZagT/ab-testing | 03e4766513401a434b71ea044b268b89df772684 | a99983eb692f4cdd81cc8c96d732c806fad4e626 | refs/heads/master | 2023-07-17T17:40:26.688925 | 2021-08-20T23:14:46 | 2021-08-20T23:14:46 | 386,172,921 | 0 | 0 | MIT | 2021-07-15T05:19:54 | 2021-07-15T05:19:53 | null | UTF-8 | Python | false | false | 605 | py | import hashlib
from typing import Any, Dict, Union, List
def hash_with_salt(value, salt):
# type: (Any, str) -> str
ret = hashlib.sha256()
ret.update(salt.encode())
ret.update(str(value).encode())
return ret.hexdigest()
def hash_dict(in_dict, salt):
# type: (Dict[str, Union[List[Any], Any]], str) -> Dict
return {
hash_with_salt(k, salt):
[hash_with_salt(v, salt) for v in value_list]
if isinstance(value_list, list)
else hash_with_salt(value_list, salt)
for k, value_list in in_dict.items()
if value_list
}
| [
"[email protected]"
] | |
b2792abda27dd21efd115c3f7a3e03fd913de933 | e3afe6d75545f1910215bad7d648b0d2aca71e84 | /ROS/catkin_ws/build/mavros_msgs/catkin_generated/pkg.develspace.context.pc.py | 07bc0c391e1c31b8fc03a82d50dfb52a57694070 | [] | no_license | TonMise/ROS_robot | 1c1c1c98c26435276e609d6f890833b0cb3ef1be | 796cf81e97fe02faa9fd90b82d173c2b3e7bd0e7 | refs/heads/main | 2023-02-05T02:34:05.291874 | 2020-12-21T19:15:53 | 2020-12-21T19:15:53 | 322,928,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ros/catkin_ws/devel/.private/mavros_msgs/include;/home/ros/catkin_ws/src/mavros/mavros_msgs/include".split(';') if "/home/ros/catkin_ws/devel/.private/mavros_msgs/include;/home/ros/catkin_ws/src/mavros/mavros_msgs/include" != "" else []
PROJECT_CATKIN_DEPENDS = "geographic_msgs;geometry_msgs;message_runtime;sensor_msgs;std_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "mavros_msgs"
PROJECT_SPACE_DIR = "/home/ros/catkin_ws/devel/.private/mavros_msgs"
PROJECT_VERSION = "1.5.0"
| [
"[email protected]"
] | |
80bd30070081c87580a675c4a7064188852123d7 | 03cdc6e6150c8729fc5675c5902a9f5d3e299ab8 | /While_loop.py | a30f52d8e2c96c8f020130c40650d60e5b4badae | [] | no_license | Rahul-D78/Git_learningRepo | cd581b7d352ca45dc875e4db74db53fb02e94668 | 1ae80236b34acc9162f3b1c7445501a351821b89 | refs/heads/master | 2020-12-15T15:11:22.065959 | 2020-01-20T17:47:06 | 2020-01-20T17:47:06 | 235,152,564 | 1 | 0 | null | 2020-01-20T17:23:29 | 2020-01-20T17:01:36 | Python | UTF-8 | Python | false | false | 69 | py | count = 0
while count < 9:
print(count)
count = count +1 | [
"[email protected]"
] | |
b4c5bf77782cafdad5cf024489467e9df7a2e79c | c6de409840b15f8dbd414ff204a4b0d464ef2a72 | /dangerzone/cli.py | 2e40e9728bcc10b1336bf8facdb3e0f870640860 | [
"MIT"
] | permissive | pianomanx/dangerzone | 6a102cbb0cab6267a8b68aea43f8272d6c5c91d1 | 7a4788f7a444077d734d119cfe444503bb67e91c | refs/heads/master | 2023-07-04T05:25:50.899746 | 2021-08-06T18:46:19 | 2021-08-06T18:46:19 | 326,886,504 | 0 | 0 | MIT | 2021-08-06T21:40:54 | 2021-01-05T04:32:17 | Python | UTF-8 | Python | false | false | 5,810 | py | import os
import shutil
import click
from colorama import Fore, Back, Style
from .global_common import GlobalCommon
from .common import Common
def print_header(s):
click.echo("")
click.echo(Style.BRIGHT + s)
def exec_container(global_common, args):
output = ""
with global_common.exec_dangerzone_container(args) as p:
for line in p.stdout:
output += line.decode()
# Hack to add colors to the command executing
if line.startswith(b"> "):
print(
Style.DIM + "> " + Style.NORMAL + Fore.CYAN + line.decode()[2:],
end="",
)
else:
print(" " + line.decode(), end="")
stderr = p.stderr.read().decode()
if len(stderr) > 0:
print("")
for line in stderr.strip().split("\n"):
print(" " + Style.DIM + line)
if p.returncode != 0:
click.echo(f"Return code: {p.returncode}")
if p.returncode == 126 or p.returncode == 127:
click.echo(f"Authorization failed")
return p.returncode, output, stderr
@click.command()
@click.option("--custom-container", help="Use a custom container")
@click.option("--safe-pdf-filename", help="Default is filename ending with -safe.pdf")
@click.option("--ocr-lang", help="Language to OCR, defaults to none")
@click.option(
"--skip-update",
is_flag=True,
help="Don't update flmcode/dangerzone container",
)
@click.argument("filename", required=True)
def cli_main(custom_container, safe_pdf_filename, ocr_lang, skip_update, filename):
global_common = GlobalCommon()
common = Common()
global_common.display_banner()
# Validate filename
valid = True
try:
with open(os.path.abspath(filename), "rb") as f:
pass
except:
valid = False
if not valid:
click.echo("Invalid filename")
return
common.document_filename = os.path.abspath(filename)
# Validate safe PDF output filename
if safe_pdf_filename:
valid = True
if not safe_pdf_filename.endswith(".pdf"):
click.echo("Safe PDF filename must end in '.pdf'")
return
try:
with open(os.path.abspath(safe_pdf_filename), "wb") as f:
pass
except:
valid = False
if not valid:
click.echo("Safe PDF filename is not writable")
return
common.save_filename = os.path.abspath(safe_pdf_filename)
else:
common.save_filename = (
f"{os.path.splitext(common.document_filename)[0]}-safe.pdf"
)
try:
with open(common.save_filename, "wb") as f:
pass
except:
click.echo(
f"Output filename {common.save_filename} is not writable, use --safe-pdf-filename"
)
return
# Validate OCR language
if ocr_lang:
valid = False
for lang in global_common.ocr_languages:
if global_common.ocr_languages[lang] == ocr_lang:
valid = True
break
if not valid:
click.echo("Invalid OCR language code. Valid language codes:")
for lang in global_common.ocr_languages:
click.echo(f"{global_common.ocr_languages[lang]}: {lang}")
return
# Validate custom container
if custom_container:
success, error_message = global_common.container_exists(custom_container)
if not success:
click.echo(error_message)
return
global_common.custom_container = custom_container
else:
if skip_update:
# Make sure flmcode/dangerzone exists
success, error_message = global_common.container_exists(
"flmcode/dangerzone"
)
if not success:
click.echo(
"You don't have the flmcode/dangerzone container so you can't use --skip-update"
)
return
# Pull the latest image
if not skip_update:
print_header("Pulling container image (this might take a few minutes)")
returncode, _, _ = exec_container(global_common, ["pull"])
if returncode != 0:
return
# Convert to pixels
print_header("Converting document to pixels")
returncode, output, _ = exec_container(
global_common,
[
"documenttopixels",
"--document-filename",
common.document_filename,
"--pixel-dir",
common.pixel_dir.name,
"--container-name",
global_common.get_container_name(),
],
)
if returncode != 0:
return
success, error_message = global_common.validate_convert_to_pixel_output(
common, output
)
if not success:
click.echo(error_message)
return
# Convert to PDF
print_header("Converting pixels to safe PDF")
if ocr_lang:
ocr = "1"
else:
ocr = "0"
ocr_lang = ""
returncode, _, _ = exec_container(
global_common,
[
"pixelstopdf",
"--pixel-dir",
common.pixel_dir.name,
"--safe-dir",
common.safe_dir.name,
"--container-name",
global_common.get_container_name(),
"--ocr",
ocr,
"--ocr-lang",
ocr_lang,
],
)
if returncode != 0:
return
# Save the safe PDF
source_filename = f"{common.safe_dir.name}/safe-output-compressed.pdf"
shutil.move(source_filename, common.save_filename)
print_header("Safe PDF created successfully")
click.echo(common.save_filename)
| [
"[email protected]"
] | |
dd9675cb1854f23944c78c9feb2843772a73ecc3 | 1617f6f07eaaa33681ccddb14c3c2cb1c834468d | /cryptoshredding/s3/stream_body_wrapper.py | c7b135db333902a77beba90c1170417b8a93b043 | [
"MIT"
] | permissive | hupe1980/cryptoshredding | 96e576b16913d33371f28628c9794ec4456de6da | 1ab5ee452c4435f486006aa2cc1a7bee440d91fe | refs/heads/main | 2023-02-27T17:46:18.405696 | 2021-02-10T19:36:00 | 2021-02-10T19:36:00 | 334,744,116 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,275 | py | import base64
import json
from cryptography.hazmat.primitives.ciphers.aead import AESGCM
from ..key_store import KeyStore
class StreamBodyWrapper(object):
def __init__(self, key_store: KeyStore, stream_body, metadata) -> None:
self._key_store = key_store
self._stream_body = stream_body
self._metadata = metadata
def read(self):
iv = base64.b64decode(self._metadata["x-amz-iv"])
encrypted_data_key = base64.b64decode(self._metadata["x-amz-key-v2"])
encryption_context = json.loads(self._metadata["x-amz-matdesc"])
main_key = self._key_store.get_main_key(encryption_context["key_id"])
data_key = main_key.decrypt(encrypted_data_key)
bytes = base64.b64decode(self._stream_body.read())
return AESGCM(data_key).decrypt(iv, bytes, None)
def __getattr__(self, name: str):
"""Catch any method/attribute lookups that are not defined in this class and try
to find them on the provided bridge object.
:param str name: Attribute name
:returns: Result of asking the provided stream object for that attribute name
:raises AttributeError: if attribute is not found on provided bridge object
"""
return getattr(self._stream_body, name)
| [
"[email protected]"
] | |
242131050ae51d854e5df08fbdd9fef3a8d5c0b4 | 79b10d9f827c489d7964bf024acb834265cbf249 | /snippets/linspace.py | 5f20152c8423ad927e8f4394849a3feb27a4d276 | [
"MIT"
] | permissive | lshappylife/snippet-manager | d9b2428f14f10b4ee7c78893dce1b2dd227eca88 | bebe45a601368947168e3ee6e6ab8c1fc2ee2055 | refs/heads/master | 2021-05-20T13:59:57.336622 | 2019-01-17T06:01:41 | 2019-01-17T06:02:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | >>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
| [
"[email protected]"
] | |
15e0f1f816ce0dfd0f44058032326839b6d3164c | 0012d7e04ad2728f29bb2bfc8393058927095c24 | /sms_server.py | 26a455c09c99e1f6bea799626869c25903aef6c4 | [] | no_license | wjimenez5271/ignite-chat | f11613f08e94223e0069b4ad90108f1474820876 | e16fecc86e2f1b67aebba9354486001ad8f247f4 | refs/heads/master | 2020-12-12T22:47:15.664414 | 2016-04-10T03:24:45 | 2016-04-10T03:24:45 | 23,649,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,911 | py | from flask import Flask, request, redirect
import argparse
import twilio.twiml
import db_json as db
import s3_sync
import logging
app = Flask(__name__)
@app.route("/", methods=['GET', 'POST'])
def receive_sms():
from_number = request.values.get('From', None)
message_body = request.values.get('Body', None)
message_sid = request.values.get('MessageSid', None)
if "unsubscribe" in message_body.lower():
try:
db.db_remove_phonenumber(int(from_number))
message = "Your phone number has been removed"
logging.info('Phone number {} removed from db'.format(from_number))
except Exception as e:
logging.error('Exception removing phone number {} from db: {}'.format(from_number, e))
elif "subscribe" in message_body.lower(): # if not check to see if subscribe is in body
try:
if db.db_get_phonenumber(int(from_number)): # check if number is already in db
message = "This number is currently subscribed to ignite chat"
logging.info('Phone number {} already in DB'.format(from_number))
except Exception as e:
logging.error('Exception looking up phone number {} in db: {}'.format(from_number, e))
else:
try:
db.db_set_phonenumber(int(from_number), message_sid) # if subscribe is in body add to db
message = "You've been subscribed to Ignite Chat"
logging.info('Phone number {} added to DB'.format(from_number))
except Exception as e:
logging.error('Exception inserting phone number {} into db: {}'.format(from_number, e))
else: # if not reply with help message
pass
message = "To subscribe, reply with 'subscribe'. To unsubscribe, reply 'unsubscribe'"
resp = twilio.twiml.Response()
resp.message(message)
return str(resp)
def setup_logging(loglevel, logfile):
#Setup Logger
numeric_log_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_log_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(filename=logfile, level=numeric_log_level,
format="%(asctime)s - [ignite-chat] - "
"%(levelname)s - %(message)s")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--loglevel', help='logging level', type=str, default='INFO')
parser.add_argument('--logfile', help='path to write logfile to', type=str, default='sms-server.log')
args = parser.parse_args()
setup_logging(args.loglevel, args.logfile)
logging.info('loading latest db for S3')
s3_sync.get_data()
logging.info('Starting server')
if args.loglevel.upper() == 'DEBUG':
floglevel=True
else:
floglevel=False
app.run(debug=floglevel, host='0.0.0.0')
| [
"[email protected]"
] | |
aab5a54931faf087288ffdc28be9628d6cacce0c | c95817a60f6cd2b2e6fe8a7a0bda78cea6fe7f1e | /allesfitter/v2/detection/injection_recovery_output.py | a02592464446673756c0c20a3747a65f50a0a64e | [
"MIT"
] | permissive | MNGuenther/allesfitter | 0f41b860f5274dc78ddb9d84406650edcc7cc3ed | 81e37ab16c28648858e4d9188182949023465f58 | refs/heads/master | 2023-04-30T10:25:08.821361 | 2023-04-14T20:07:16 | 2023-04-14T20:07:16 | 150,273,868 | 56 | 39 | MIT | 2023-04-14T20:07:17 | 2018-09-25T13:56:51 | Jupyter Notebook | UTF-8 | Python | false | false | 8,791 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 9 10:01:30 2019
@author:
Maximilian N. Günther
MIT Kavli Institute for Astrophysics and Space Research,
Massachusetts Institute of Technology,
77 Massachusetts Avenue,
Cambridge, MA 02109,
USA
Email: [email protected]
Web: www.mnguenther.com
"""
from __future__ import print_function, division, absolute_import
#::: plotting settings
import seaborn as sns
sns.set(context='paper', style='ticks', palette='deep', font='sans-serif', font_scale=1.5, color_codes=True)
sns.set_style({"xtick.direction": "in","ytick.direction": "in"})
sns.set_context(rc={'lines.markeredgewidth': 1})
#::: modules
import numpy as np
import matplotlib.pyplot as plt
import warnings
#import os, sys
#import scipy.ndimage
#from scipy.interpolate import griddata
#import matplotlib.ticker as mtick
###############################################################################
#::: fct to check if the right signal was found
###############################################################################
def is_multiple_of(a, b, tolerance=0.05):
a = np.float(a)
b = np.float(b)
result = a % b
return (abs(result/b) <= tolerance) or (abs((b-result)/b) <= tolerance)
def is_detected(inj_period, tls_period):
right_period = is_multiple_of(tls_period, inj_period/2.) #check if it is a multiple of half the period to within 5%
# right_epoch = False
# for tt in results.transit_times:
# for i in range(-5,5):
# right_epoch = right_epoch or (np.abs(tt-epoch+i*period) < (1./24.)) #check if any epochs matches to within 1 hour
# right_depth = (np.abs(np.sqrt(1.-results.depth)*rstar - rplanet)/rplanet < 0.05) #check if the depth matches
if right_period:
return True
else:
return False
def is_detected_list(inj_periods, tls_periods):
detected = []
for i in range(len(inj_periods)):
detected.append(is_detected(inj_periods[i], tls_periods[i]))
return np.array(detected)
###############################################################################
#::: plot
###############################################################################
def irplot(fname, period_bins=None, rplanet_bins=None, outdir=None):
#::: load the files and check which TLS detection matches an injection;
#::: note that one injection will have multiple TLS detections (due not false positives)
results = np.genfromtxt(fname, delimiter=',', dtype=None, names=True)
inj_periods = results['inj_period']
inj_rplanets = results['inj_rplanet']
tls_periods = results['tls_period']
detected = is_detected_list(inj_periods, tls_periods)
# print(detected)
#::: now boil it down to unique injections and see whether any TLS detection matched it
# period = np.unique(inj_periods)
# rplanet = np.unique(inj_rplanets)
period = []
rplanet = []
found = []
for p in np.unique(inj_periods):
for r in np.unique(inj_rplanets):
period.append(p)
rplanet.append(r)
ind = np.where( (inj_periods==p) & (inj_rplanets==r) )[0]
f = any( detected[ind] )
found.append(f)
# print(p,r,ind,f)
period = np.array(period)
rplanet = np.array(rplanet)
found = np.array(found)
###############################################################################
#::: scatter plot
###############################################################################
fig, ax = plt.subplots(figsize=(5,5))
ax.scatter(period, rplanet, c=found, s=100, cmap='Blues_r', edgecolors='b')
ax.set(xlabel='Period (days)', ylabel='Radius '+r'$(R_\oplus)$')
ax.text(0.5,1.05,'filled: not recovered | unfilled: recovered',ha='center',va='center',transform=ax.transAxes)
fig.savefig('injection_recovery_test_scatter.pdf', bbox_inches='tight')
# err
###############################################################################
#::: histogram (normed)
###############################################################################
if ( len(np.unique(inj_periods)) * len(np.unique(inj_periods)) ) < 100:
print('\n!-- WARNING: not enough samples to create a 2D histogram plot. --!\n')
else:
if (period_bins is not None) and (rplanet_bins is not None):
bins = [period_bins, rplanet_bins]
else:
bins = [np.histogram_bin_edges(period, bins='auto'), np.histogram_bin_edges(rplanet, bins='auto')]
h1,x,y = np.histogram2d(period[found==1], rplanet[found==1], bins=bins)
h2,x,y = np.histogram2d(period[found==0], rplanet[found==0], bins=bins)
normed_hist = (100.*h1/(h1+h2))
fig, ax = plt.subplots(figsize=(6.5,5))
im = plt.imshow(normed_hist.T, origin='lower', extent=(x[0], x[-1], y[0], y[-1]), interpolation='none', aspect='auto', cmap='Blues_r', vmin=0, vmax=100, rasterized=True)
plt.colorbar(im, label='Recovery rate (%)')
plt.xlabel('Injected period (days)')
plt.ylabel(r'Injected radius (R$_\oplus$)')
# change_font(ax)
fig.savefig('injection_recovery_test_hist2d.pdf', bbox_inches='tight')
###############################################################################
#::: pyplot histograms (total counts)
###############################################################################
#fig, ax = plt.subplots(figsize=(6.5,5))
#h1,x,y,im = plt.hist2d(period[found==1], rplanet[found==1], bins=bins, cmap='Blues_r')
#plt.colorbar(im, label='Recovery rate (%)')
#plt.xlabel('Injected period (days)')
#plt.ylabel(r'Injected radius (R$_\oplus$)')
#change_font(ax)
#fig, ax = plt.subplots(figsize=(6.5,5))
#h2,x,y,im = plt.hist2d(period[found==0], rplanet[found==0], bins=bins, cmap='Blues_r')
#plt.colorbar(im, label='Recovery rate (%)')
#plt.xlabel('Injected period (days)')
#plt.ylabel(r'Injected radius (R$_\oplus$)')
#change_font(ax)
###############################################################################
#::: kdeplots
###############################################################################
# fig, ax = plt.subplots(figsize=(6.5,5))
# ax = sns.kdeplot(period[found==0], rplanet[found==0], shade=True, cmap='Blues', cbar=True, alpha=0.5)
# ax = sns.kdeplot(period[found==1], rplanet[found==1], shade=True, cmap='Blues_r', cbar=True)
# ax.set(xlim=[0,120], ylim=[0.8,4])
# plt.xlabel('Injected period (days)')
# plt.ylabel(r'Injected radius (R$_\oplus$)')
# change_font(ax)
#fig, ax = plt.subplots(figsize=(6.5,5))
#ax = sns.kdeplot(period[found==0], rplanet[found==0], shade=True, cmap='Blues', cbar=True)
#ax.set(xlim=[15,85], ylim=[0.8,2.0])
#plt.xlabel('Injected period (days)')
#plt.ylabel(r'Injected radius (R$_\oplus$)')
#change_font(ax)
###############################################################################
#::: others
###############################################################################
# plt.figure(figsize=(5,5))
# z = found.reshape(len(np.unique(period)), len(np.unique(rplanet)))
# plt.imshow(z.T, origin='lower', extent=(np.amin(period), np.amax(period), np.amin(rplanet), np.amax(rplanet)), aspect='auto', interpolation='gaussian', filterrad=5, cmap='Blues_r')
# plt.xlabel('Period (days)')
# plt.ylabel(r'Radius (R$_\oplus$)')
#fig, ax = plt.subplots(figsize=(6.5,5))
#plt.tricontourf(period, rplanet, found, cmap='Blues_r')
#plt.xlabel('Injected period (days)')
#plt.ylabel(r'Injected radius (R$_\oplus$)')
# grid_x, grid_y = np.mgrid[np.amin(period):np.amax(period):100j, np.amin(rplanet):np.amax(rplanet):100j]
# grid_z = griddata((period, rplanet), found*100, (grid_x, grid_y), method='linear')
# fig, ax = plt.subplots(figsize=(6.5,5))
# im = plt.imshow(grid_z.T, origin='lower', extent=(np.amin(period), np.amax(period), np.amin(rplanet), np.amax(rplanet)), interpolation='none', aspect='auto', cmap='Blues_r', rasterized=True, vmin=0, vmax=100)
# plt.colorbar(im, label='Recovery rate (%)')
# plt.xlabel('Injected period (days)')
# plt.ylabel(r'Injected radius (R$_\oplus$)')
#change_font(ax)
#
#plt.savefig('injected_transit_search.pdf', bbox_inches='tight')
###############################################################################
#::: run
###############################################################################
# plot('TIC_269701147.csv')
| [
"[email protected]"
] | |
7c48a6336ed13d3cafa8be90309f7e193d5a44da | 159d4ae61f4ca91d94e29e769697ff46d11ae4a4 | /venv/lib/python3.9/site-packages/pandas/core/arraylike.py | 7cf34635ce9c1b6aa5d15eb0d47d8400b01cdff4 | [
"MIT"
] | permissive | davidycliao/bisCrawler | 729db002afe10ae405306b9eed45b782e68eace8 | f42281f35b866b52e5860b6a062790ae8147a4a4 | refs/heads/main | 2023-05-24T00:41:50.224279 | 2023-01-22T23:17:51 | 2023-01-22T23:17:51 | 411,470,732 | 8 | 0 | MIT | 2023-02-09T16:28:24 | 2021-09-28T23:48:13 | Python | UTF-8 | Python | false | false | 13,829 | py | """
Methods that can be shared by many array-like classes or subclasses:
Series
Index
ExtensionArray
"""
import operator
from typing import Any
import warnings
import numpy as np
from pandas._libs import lib
from pandas.core.construction import extract_array
from pandas.core.ops import (
maybe_dispatch_ufunc_to_dunder_op,
roperator,
)
from pandas.core.ops.common import unpack_zerodim_and_defer
class OpsMixin:
# -------------------------------------------------------------
# Comparisons
def _cmp_method(self, other, op):
return NotImplemented
@unpack_zerodim_and_defer("__eq__")
def __eq__(self, other):
return self._cmp_method(other, operator.eq)
@unpack_zerodim_and_defer("__ne__")
def __ne__(self, other):
return self._cmp_method(other, operator.ne)
@unpack_zerodim_and_defer("__lt__")
def __lt__(self, other):
return self._cmp_method(other, operator.lt)
@unpack_zerodim_and_defer("__le__")
def __le__(self, other):
return self._cmp_method(other, operator.le)
@unpack_zerodim_and_defer("__gt__")
def __gt__(self, other):
return self._cmp_method(other, operator.gt)
@unpack_zerodim_and_defer("__ge__")
def __ge__(self, other):
return self._cmp_method(other, operator.ge)
# -------------------------------------------------------------
# Logical Methods
def _logical_method(self, other, op):
return NotImplemented
@unpack_zerodim_and_defer("__and__")
def __and__(self, other):
return self._logical_method(other, operator.and_)
@unpack_zerodim_and_defer("__rand__")
def __rand__(self, other):
return self._logical_method(other, roperator.rand_)
@unpack_zerodim_and_defer("__or__")
def __or__(self, other):
return self._logical_method(other, operator.or_)
@unpack_zerodim_and_defer("__ror__")
def __ror__(self, other):
return self._logical_method(other, roperator.ror_)
@unpack_zerodim_and_defer("__xor__")
def __xor__(self, other):
return self._logical_method(other, operator.xor)
@unpack_zerodim_and_defer("__rxor__")
def __rxor__(self, other):
return self._logical_method(other, roperator.rxor)
# -------------------------------------------------------------
# Arithmetic Methods
def _arith_method(self, other, op):
return NotImplemented
@unpack_zerodim_and_defer("__add__")
def __add__(self, other):
return self._arith_method(other, operator.add)
@unpack_zerodim_and_defer("__radd__")
def __radd__(self, other):
return self._arith_method(other, roperator.radd)
@unpack_zerodim_and_defer("__sub__")
def __sub__(self, other):
return self._arith_method(other, operator.sub)
@unpack_zerodim_and_defer("__rsub__")
def __rsub__(self, other):
return self._arith_method(other, roperator.rsub)
@unpack_zerodim_and_defer("__mul__")
def __mul__(self, other):
return self._arith_method(other, operator.mul)
@unpack_zerodim_and_defer("__rmul__")
def __rmul__(self, other):
return self._arith_method(other, roperator.rmul)
@unpack_zerodim_and_defer("__truediv__")
def __truediv__(self, other):
return self._arith_method(other, operator.truediv)
@unpack_zerodim_and_defer("__rtruediv__")
def __rtruediv__(self, other):
return self._arith_method(other, roperator.rtruediv)
@unpack_zerodim_and_defer("__floordiv__")
def __floordiv__(self, other):
return self._arith_method(other, operator.floordiv)
@unpack_zerodim_and_defer("__rfloordiv")
def __rfloordiv__(self, other):
return self._arith_method(other, roperator.rfloordiv)
@unpack_zerodim_and_defer("__mod__")
def __mod__(self, other):
return self._arith_method(other, operator.mod)
@unpack_zerodim_and_defer("__rmod__")
def __rmod__(self, other):
return self._arith_method(other, roperator.rmod)
@unpack_zerodim_and_defer("__divmod__")
def __divmod__(self, other):
return self._arith_method(other, divmod)
@unpack_zerodim_and_defer("__rdivmod__")
def __rdivmod__(self, other):
return self._arith_method(other, roperator.rdivmod)
@unpack_zerodim_and_defer("__pow__")
def __pow__(self, other):
return self._arith_method(other, operator.pow)
@unpack_zerodim_and_defer("__rpow__")
def __rpow__(self, other):
return self._arith_method(other, roperator.rpow)
# -----------------------------------------------------------------------------
# Helpers to implement __array_ufunc__
def _is_aligned(frame, other):
"""
Helper to check if a DataFrame is aligned with another DataFrame or Series.
"""
from pandas import DataFrame
if isinstance(other, DataFrame):
return frame._indexed_same(other)
else:
# Series -> match index
return frame.columns.equals(other.index)
def _maybe_fallback(ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any):
"""
In the future DataFrame, inputs to ufuncs will be aligned before applying
the ufunc, but for now we ignore the index but raise a warning if behaviour
would change in the future.
This helper detects the case where a warning is needed and then fallbacks
to applying the ufunc on arrays to avoid alignment.
See https://github.com/pandas-dev/pandas/pull/39239
"""
from pandas import DataFrame
from pandas.core.generic import NDFrame
n_alignable = sum(isinstance(x, NDFrame) for x in inputs)
n_frames = sum(isinstance(x, DataFrame) for x in inputs)
if n_alignable >= 2 and n_frames >= 1:
# if there are 2 alignable inputs (Series or DataFrame), of which at least 1
# is a DataFrame -> we would have had no alignment before -> warn that this
# will align in the future
# the first frame is what determines the output index/columns in pandas < 1.2
first_frame = next(x for x in inputs if isinstance(x, DataFrame))
# check if the objects are aligned or not
non_aligned = sum(
not _is_aligned(first_frame, x) for x in inputs if isinstance(x, NDFrame)
)
# if at least one is not aligned -> warn and fallback to array behaviour
if non_aligned:
warnings.warn(
"Calling a ufunc on non-aligned DataFrames (or DataFrame/Series "
"combination). Currently, the indices are ignored and the result "
"takes the index/columns of the first DataFrame. In the future , "
"the DataFrames/Series will be aligned before applying the ufunc.\n"
"Convert one of the arguments to a NumPy array "
"(eg 'ufunc(df1, np.asarray(df2)') to keep the current behaviour, "
"or align manually (eg 'df1, df2 = df1.align(df2)') before passing to "
"the ufunc to obtain the future behaviour and silence this warning.",
FutureWarning,
stacklevel=4,
)
# keep the first dataframe of the inputs, other DataFrame/Series is
# converted to array for fallback behaviour
new_inputs = []
for x in inputs:
if x is first_frame:
new_inputs.append(x)
elif isinstance(x, NDFrame):
new_inputs.append(np.asarray(x))
else:
new_inputs.append(x)
# call the ufunc on those transformed inputs
return getattr(ufunc, method)(*new_inputs, **kwargs)
# signal that we didn't fallback / execute the ufunc yet
return NotImplemented
def array_ufunc(self, ufunc: np.ufunc, method: str, *inputs: Any, **kwargs: Any):
"""
Compatibility with numpy ufuncs.
See also
--------
numpy.org/doc/stable/reference/arrays.classes.html#numpy.class.__array_ufunc__
"""
from pandas.core.generic import NDFrame
from pandas.core.internals import BlockManager
cls = type(self)
# for backwards compatibility check and potentially fallback for non-aligned frames
result = _maybe_fallback(ufunc, method, *inputs, **kwargs)
if result is not NotImplemented:
return result
# for binary ops, use our custom dunder methods
result = maybe_dispatch_ufunc_to_dunder_op(self, ufunc, method, *inputs, **kwargs)
if result is not NotImplemented:
return result
# Determine if we should defer.
# error: "Type[ndarray]" has no attribute "__array_ufunc__"
no_defer = (
np.ndarray.__array_ufunc__, # type: ignore[attr-defined]
cls.__array_ufunc__,
)
for item in inputs:
higher_priority = (
hasattr(item, "__array_priority__")
and item.__array_priority__ > self.__array_priority__
)
has_array_ufunc = (
hasattr(item, "__array_ufunc__")
and type(item).__array_ufunc__ not in no_defer
and not isinstance(item, self._HANDLED_TYPES)
)
if higher_priority or has_array_ufunc:
return NotImplemented
# align all the inputs.
types = tuple(type(x) for x in inputs)
alignable = [x for x, t in zip(inputs, types) if issubclass(t, NDFrame)]
if len(alignable) > 1:
# This triggers alignment.
# At the moment, there aren't any ufuncs with more than two inputs
# so this ends up just being x1.index | x2.index, but we write
# it to handle *args.
if len(set(types)) > 1:
# We currently don't handle ufunc(DataFrame, Series)
# well. Previously this raised an internal ValueError. We might
# support it someday, so raise a NotImplementedError.
raise NotImplementedError(
"Cannot apply ufunc {} to mixed DataFrame and Series "
"inputs.".format(ufunc)
)
axes = self.axes
for obj in alignable[1:]:
# this relies on the fact that we aren't handling mixed
# series / frame ufuncs.
for i, (ax1, ax2) in enumerate(zip(axes, obj.axes)):
axes[i] = ax1.union(ax2)
reconstruct_axes = dict(zip(self._AXIS_ORDERS, axes))
inputs = tuple(
x.reindex(**reconstruct_axes) if issubclass(t, NDFrame) else x
for x, t in zip(inputs, types)
)
else:
reconstruct_axes = dict(zip(self._AXIS_ORDERS, self.axes))
if self.ndim == 1:
names = [getattr(x, "name") for x in inputs if hasattr(x, "name")]
name = names[0] if len(set(names)) == 1 else None
reconstruct_kwargs = {"name": name}
else:
reconstruct_kwargs = {}
def reconstruct(result):
if lib.is_scalar(result):
return result
if result.ndim != self.ndim:
if method == "outer":
if self.ndim == 2:
# we already deprecated for Series
msg = (
"outer method for ufunc {} is not implemented on "
"pandas objects. Returning an ndarray, but in the "
"future this will raise a 'NotImplementedError'. "
"Consider explicitly converting the DataFrame "
"to an array with '.to_numpy()' first."
)
warnings.warn(msg.format(ufunc), FutureWarning, stacklevel=4)
return result
raise NotImplementedError
return result
if isinstance(result, BlockManager):
# we went through BlockManager.apply
result = self._constructor(result, **reconstruct_kwargs, copy=False)
else:
# we converted an array, lost our axes
result = self._constructor(
result, **reconstruct_axes, **reconstruct_kwargs, copy=False
)
# TODO: When we support multiple values in __finalize__, this
# should pass alignable to `__finalize__` instead of self.
# Then `np.add(a, b)` would consider attrs from both a and b
# when a and b are NDFrames.
if len(alignable) == 1:
result = result.__finalize__(self)
return result
if self.ndim > 1 and (len(inputs) > 1 or ufunc.nout > 1):
# Just give up on preserving types in the complex case.
# In theory we could preserve them for them.
# * nout>1 is doable if BlockManager.apply took nout and
# returned a Tuple[BlockManager].
# * len(inputs) > 1 is doable when we know that we have
# aligned blocks / dtypes.
inputs = tuple(np.asarray(x) for x in inputs)
result = getattr(ufunc, method)(*inputs, **kwargs)
elif self.ndim == 1:
# ufunc(series, ...)
inputs = tuple(extract_array(x, extract_numpy=True) for x in inputs)
result = getattr(ufunc, method)(*inputs, **kwargs)
else:
# ufunc(dataframe)
if method == "__call__" and not kwargs:
# for np.<ufunc>(..) calls
# kwargs cannot necessarily be handled block-by-block, so only
# take this path if there are no kwargs
mgr = inputs[0]._mgr
result = mgr.apply(getattr(ufunc, method))
else:
# otherwise specific ufunc methods (eg np.<ufunc>.accumulate(..))
# Those can have an axis keyword and thus can't be called block-by-block
result = getattr(ufunc, method)(np.asarray(inputs[0]), **kwargs)
if ufunc.nout > 1:
result = tuple(reconstruct(x) for x in result)
else:
result = reconstruct(result)
return result
| [
"[email protected]"
] | |
4762d94945681bd6e8e5976586adc89a1d38421a | 50e9ce5de71e78384e8c2aa658016dc36b01c356 | /WebIntro.py | b0a1e503891d722cb685cefcaa431fe04859238a | [] | no_license | LRBeaver/WebProgrammingPython35 | bca1e9c3a69a01fd9d1af733d665616774e41ac4 | fc8067dc91ac4dcca6373d8deab8289835884e98 | refs/heads/master | 2021-01-12T13:29:02.510126 | 2016-09-30T12:47:52 | 2016-09-30T12:47:52 | 69,954,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37 | py | import os
os.system('cls')
print(2+3) | [
"GitPass16!"
] | GitPass16! |
1b9cfcc3315a6b382af914c6a625a67b54624d09 | 9ad76bcc6b959491911bd3f49870cda205cf721a | /cleaner.py | 481e524c5bc23110e7a36434a26a4b08df6aa6ba | [] | no_license | Makeshiftshelter01/preprocessing | 6378ecbb96d731e813896ba36afcda644d1cb5e0 | 1e4d9d83047502629b4402328422e52ec53b7d75 | refs/heads/master | 2020-04-18T07:32:35.339570 | 2019-02-11T11:28:35 | 2019-02-11T11:28:35 | 167,362,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,834 | py | from cleaning import first
from cleaning import cleaner
from cleaning import tokenizer
from cleaning import classifier
from cleaning import update_db
import time
from ckonlpy.tag import Twitter as Okt
import re
import psutil
import os
timehisory = []
memory = []
process = psutil.Process(os.getpid()) # 메모리 확인용
mem_before = process.memory_info().rss / 1024 / 1024
for i in range(143,286):
startime = time.time()
total = 1000
##### 시작 전 config.1.ini 파일에서 저장할 collection 이름 설정할 것!!!
##### 부분에 전처리할 collection 이름 넣으세요
collectionname = 'ilbe'
start = total*i
newdata = first(collectionname, start, total) # start 부터 1000개씩 DB로부터 가져오기
labels =newdata.temp(ilbe='ilbe', inven='inven_sungmok',cook='realcook',ruri='ruri',\
fmkorea='realfmkorea',clien='clien',womad='womad_sungmok',theqoo='realtheqoo',mlbpark='mlbpark',ygosu='ygosu')
# 데이터 가져오기
gd = newdata.bringgd()
# 전처리된 데이터를 담을 리스트 준비(제목, 내용, 댓글말 전처리할 예정)
cno = []
title = []
thumbup = []
cthumbdownl = []
cthumbupl = []
ccontent = []
creplies = []
clinks = []
idate = []
# print(gd.title[0])
# print(gd.creplies[0][1])
# 내용 전처리
new_ccontent = cleaner(gd, gd.ccontent, ccontent,collectionname)
ccontent = new_ccontent.cleaning()
# 타이틀 전처리
new_title = cleaner(gd, gd.title, title,collectionname)
title = new_title.cleaning()
# 댓글 전처리
new_replies = cleaner(gd, gd.creplies, creplies,collectionname)
creplies = new_replies.cleaning()
# 날짜 전처리
new_date = cleaner(gd, gd.idate, idate, collectionname, labels)
idate = new_date.cleaning()
# 링크 전처리
new_link = cleaner(gd, gd.clinks, clinks,collectionname)
clinks = new_link.cleaning()
# upper page 추천수
new_thumbup = cleaner(gd, gd.thumbup, thumbup,collectionname)
thumbup = new_thumbup.cleaning()
# 추천수
new_cthumbupl = cleaner(gd, gd.cthumbupl, cthumbupl,collectionname)
cthumbupl = new_cthumbupl.cleaning()
# 반대수
new_cthumbdownl = cleaner(gd, gd.cthumbdownl, cthumbdownl,collectionname)
cthumbdownl = new_cthumbdownl.cleaning()
# 토큰화
print(i,'번째 루프 토큰화 시작')
tk = tokenizer()
token = tk.token(title, ccontent, creplies) # 순서대로 return T_OR_title, T_title, T_OR_ccontent, T_ccontent, T_OR_creplies, T_creplies
#token변수만 메모리를 쓰도록 다른 변수를 선언하지 않는다.
# 품사 나눔
print(i,'번째 루프 품사나눔')
clf = classifier()
pos = clf.classify(token) # pos(parts of speech), 순서대로 T_adjective, T_adverb, T_verb, T_nouns
# mongoDB 에 입력
collectionname = update_db() # 전처리하고자하는 collection명을 인스턴스로 바꿀 것
collectionname.setdbinfo() # collection명.setdbinfo()
collectionname.insertone(gd, title, ccontent, idate, clinks, creplies, token, pos) # collection명.insertone()
# json 파일 생성
#collectionname.make_json('token_clien.json',gd, title, ccontent, idate, clinks, creplies, token, pos)
lasttime = time.time()
mem_after = process.memory_info().rss / 1024 / 1024
timehisory.append(lasttime-startime)
memory.append(mem_after)
print(i,'번째 루프 도는 중','소요시간', lasttime-startime)
print('시작 전 메모리 사용량: {} MB'.format(mem_before))
print('종료 후 메모리 사용량: {} MB'.format(mem_after))
print('루프 한번 도는데 걸린 시간',timehisory)
print('루프 한번 도는데 쓰인 메모리',memory)
| [
"[email protected]"
] | |
8d2f0ebd90fd687534ecf68cd168d277f3317c92 | 9db9e8b2458e5c65513081047d7d0d460ea4082c | /2020_08_16/excises/请求接口操作.py | bf57b9464f43aee59c5ebc239b4a08424f0f50f3 | [] | no_license | wsdxl/new_review | 8e8a3a755859cc4817f8879af2ee2ea24d79d5b3 | e4f1ca4850430868ce3a2076c50f232432c757cf | refs/heads/master | 2022-12-15T09:03:23.530782 | 2020-09-03T15:37:07 | 2020-09-03T15:37:07 | 288,185,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | """
Author:dxl
Time: 2020/8/16 16:54
File: 请求接口操作.py
"""
import jsonpath
import requests
login_url='http://api.lemonban.com/futureloan/member/login'
data={
'mobile_phone':'13641878150',
'pwd':'12345678',
}
headers = {
"X-Lemonban-Media-Type": "lemonban.v2",
"Content-Type": "application/json"
}
response=requests.post(url=login_url, json=data, headers=headers)
res=response.json()
print(res)
# id=res['data']['id']
member_id=jsonpath.jsonpath(res,'$..id')[0]
# print(id)
token_type=jsonpath.jsonpath(res,'$..token_type')[0]
token=jsonpath.jsonpath(res,'$..token')[0]
token_data=token_type+' '+token
# print(token_data)
#----------------------充值接口--------------
headers_data = {
"X-Lemonban-Media-Type": "lemonban.v2",
"Content-Type": "application/json",
"Authorization":token_data
}
recharge_url='http://api.lemonban.com/futureloan/member/recharge'
recharge_data={
'member_id':member_id,
'amount':20000
}
response=requests.post(url=recharge_url,json=recharge_data,headers=headers_data)
res1=response.json()
print(res1)
| [
"[email protected]"
] | |
74a71cc9b00f4156f22b00b4a5e3d19efbf05ca0 | 808a412cd83a12676aedaf3dcae212c0ba559986 | /05_number_checker_v1.py | 8a4015eb6c9c69c852cf3147c29b3af0ddb66edd | [] | no_license | jesset685/temperature_converter | ff42d04209acd78defbb69d0efcd67222a819706 | af2651e08148dd9f0687dada602d60248c6f6947 | refs/heads/master | 2023-05-03T21:09:06.135117 | 2021-05-18T21:51:40 | 2021-05-18T21:51:40 | 343,579,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 565 | py | # Code to check that number is valid...
def temp_check(low):
valid = False
while not valid:
try:
response = float(input("Enter a number: "))
if response < low:
print("Too Cold!!")
else:
return response
except ValueError:
print("Please enter a number")
# main routine
# run this code twice (for two valid responses in test plan)
number = temp_check(-273)
print("You chose {}".format(number))
number = temp_check(-459)
print("You chose {}".format(number))
| [
"[email protected]"
] | |
d9ae65225c06397fa8783c0e427e4a38cdfa071f | 167b5a809781d6f5d140dfe1e060da22d1580067 | /app/auth/views.py | 0817f3cd37074cfa4c8b5a74d207af5642187dfe | [
"MIT"
] | permissive | guomaoqiu/flask_bootstrap | a42cb786a861a939540dcab70448901bb424a611 | d1c0aa1b055d317bc0d58faf336a8938d6818e24 | refs/heads/master | 2022-12-13T06:36:38.854338 | 2018-07-20T09:46:33 | 2018-07-20T09:46:33 | 129,188,981 | 1 | 0 | MIT | 2022-12-07T23:39:39 | 2018-04-12T03:43:41 | Python | UTF-8 | Python | false | false | 6,271 | py | # -*- coding: utf-8 -*-
from flask import render_template, redirect, request, url_for, flash
from flask_login import login_user, logout_user, login_required, \
current_user
from . import auth
from .. import db
from ..models import User
from ..email import send_email
from .forms import LoginForm, RegistrationForm, ChangePasswordForm,\
PasswordResetRequestForm, PasswordResetForm, ChangeEmailForm
import sys
reload(sys) # Python2.5 初始化后会删除 sys.setdefaultencoding 这个方法,我们需要重新载入
sys.setdefaultencoding('utf-8')
@auth.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.ping()
if not current_user.confirmed \
and request.endpoint[:5] != 'auth.' \
and request.endpoint != 'static':
return redirect(url_for('auth.unconfirmed'))
@auth.route('/unconfirmed')
def unconfirmed():
if current_user.is_anonymous or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html')
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
flash('登录成功','success')
return redirect(request.args.get('next') or url_for('main.index'))
flash('密码错误','danger')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('退出登录成功.','success')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
send_email(user.email, '账户确认',
'auth/email/confirm', user=user, token=token)
flash('已通过电子邮件向您发送确认电子邮件.','info')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/confirm/<token>')
@login_required
def confirm(token):
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm(token):
flash('您已经确认了您的帐户. 谢谢!','info')
else:
flash('确认链接无效或已过期.','warning')
return redirect(url_for('main.index'))
@auth.route('/confirm')
@login_required
def resend_confirmation():
token = current_user.generate_confirmation_token()
send_email(current_user.email, 'Confirm Your Account',
'auth/email/confirm', user=current_user, token=token)
flash('通过电子邮件发送了一封新的确认电子邮件.','info')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
flash('您的密码已更新.','info')
return redirect(url_for('main.index'))
else:
flash('无效的密码.','danger')
return render_template("auth/change_password.html", form=form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_request():
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, u'重设密码',
'auth/email/reset_password',
user=user, token=token,
next=request.args.get('next'))
flash(u'密码重设邮件已经发送到你的邮箱,请及时查收。', 'info')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
return redirect(url_for('main.index'))
if user.reset_password(token, form.password.data):
flash('您的密码已更新.','success')
return redirect(url_for('auth.login'))
else:
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change-email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.email.data
token = current_user.generate_email_change_token(new_email)
send_email(new_email, 'Confirm your email address',
'auth/email/change_email',
user=current_user, token=token)
flash('已发送一封包含确认您的新电子邮件地址的说明的电子邮件。','info')
return redirect(url_for('main.index'))
else:
flash('无效的邮箱或密码','danger')
return render_template("auth/change_email.html", form=form)
@auth.route('/change-email/<token>')
@login_required
def change_email(token):
if current_user.change_email(token):
flash('您的电子邮件地址已更新.','info')
else:
flash('无效的请求.','danger')
return redirect(url_for('main.index'))
| [
"[email protected]"
] | |
d3487dbe30d9753fca0d826f19057405136dca9d | be84a1d38369ef6649a7e5d53338f8afec969f33 | /src/SaveWordTextNum.py | 2d8710141033d3a270a80ada632ec5eb76bf677d | [] | no_license | Joylim/Classifier | 0229ddfa64ce033b6999b8a269f0189dd2b69a67 | 4c675dc34dbf43d5d56d7e06ae9a309abbef6898 | refs/heads/master | 2021-03-12T20:15:40.424282 | 2013-12-29T14:14:04 | 2013-12-29T14:14:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 863 | py | # -*- coding = utf-8 -*-
#Huangyao THU
#12-14 2013
import util
import featureSelected
import json
import pickle
def saveWordTextDict(label,fileNum):
str1=('../data/trainData/Dict/%d.txt') % label
str2=('../data/trainData/Dict/words%d.txt') %label
f1=open(str1,'rb')
Dict=pickle.load(f1)
f1.close()
wordDict=util.Counter()
for item in Dict.items():
wordDict[item]=0
for item in Dict.items():
print 'item',item
for i in range(fileNum):
tmp=featureSelected.featureDict(label,i)
if item in tmp:
wordDict[item]+=1
print "saveWordTextDict has finished!"
f2=open(str2,'wb')
pickle.dump(wordDict,f2)
f2.close()
if __name__=='__main__':
saveWordTextDict(1,500)
#saveWordTextDict(2,500)
#saveWordTextDict(3,500)
#saveWordTextDict(4,500)
| [
"[email protected]"
] | |
c53dfb794093c790c63e72f2080aae10a8cf91a1 | de606c031df83cc697fe2b3786fe627349d9f814 | /connction_pool/function_redirect_client.py | d62b855a9ad069b4e1dd430048a49554e3146ad4 | [] | no_license | rajaramanram/python | 06c2219f3ca1444779146f7055f493927ffcd3eb | aed865d9ccd5e07460bd3c157a753074dfdfb828 | refs/heads/main | 2023-06-17T04:42:53.251368 | 2021-07-12T15:38:36 | 2021-07-12T15:38:36 | 310,102,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,049 | py | from flask import Flask, request, jsonify, g,url_for
from flask_sqlalchemy import SQLAlchemy
import json
from flask_oidc import OpenIDConnect
from urllib.parse import urlparse
import psycopg2
from psycopg2 import pool
app = Flask(__name__)
app.config['SERVER_NAME'] = "localhost:5000"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
app.config.update({
'SECRET_KEY': 'SomethingNotEntirelySecret',
'DEBUG': True,
'OIDC_ID_TOKEN_COOKIE_SECURE': False,
'OIDC_REQUIRE_VERIFIED_EMAIL': False,
'OIDC_USER_INFO_ENABLED': True,
'OIDC_SCOPES': ['openid'],
'OIDC_OPENID_REALM': 'master',
'TESTING': True,
'OIDC_INTROSPECTION_AUTH_METHOD': 'client_secret_post',
'OIDC_TOKEN_TYPE_HINT': 'access_token'
})
#'OIDC_CLIENT_SECRETS': 'c:/Python/codes/Autointelli/Flask/four_api/postgre_multi_tenant/connction_pool/dummy_sample.json',
oidc = OpenIDConnect()
class AlertModel(db.Model):
__tablename__ = 'alert1'
id = db.Column(db.Integer, primary_key=True)
alert_id = db.Column(db.String())
environment = db.Column(db.String())
count = db.Column(db.Integer())
def __init__(self, alert_id, environment, count):
self.alert_id = alert_id
self.environment = environment
self.count = count
#@app.before_request
def start():
subdomain = urlparse(request.url).hostname.split('.')[0]
print(subdomain)
postgreSQL_pool = psycopg2.pool.SimpleConnectionPool(1, 20, user="postgres",
password="rajaraman", host="127.0.0.1", port="5432", database="customer_metadata")
if(postgreSQL_pool):
print("Connection pool created successfully")
ps_connection = postgreSQL_pool.getconn()
if(ps_connection):
ps_cursor = ps_connection.cursor()
ps_cursor.execute(
f"select * from cusotmer_secret where sub_domain = '{subdomain}'")
alert_records = ps_cursor.fetchall()
print(alert_records)
'''list_var = []
re_uri = list_var.append(alert_records[0][6].format(
"http", subdomain, "localhost", "5000"))
print(re_uri)'''
web_urls = {
"web":{
"issuer": alert_records[0][2].format("http", "localhost", "8080", subdomain),
"auth_uri": alert_records[0][3].format("http", "localhost", "8080", subdomain),
"client_id": alert_records[0][4].format("http", "localhost", "8080", subdomain),
"client_secret": alert_records[0][5].format("http", "localhost", "8080", subdomain),
"redirect_uris": [alert_records[0][6].format(
"http", "localhost", "5000")],
"userinfo_uri": alert_records[0][7].format("http", "localhost", "8080", subdomain),
"token_uri": alert_records[0][8].format("http", "localhost", "8080", subdomain),
"token_introspection_uri": alert_records[0][9].format("http", "localhost", "8080", subdomain)
}
}
print(web_urls)
with open('c:/Python/codes/Autointelli/Flask/four_api/postgre_multi_tenant/connction_pool/client_secrets.json', 'w') as outfile:
json_object = json.dumps(web_urls, indent=4)
print(json_object)
outfile.write(json_object)
ps_cursor.close()
#json_object = json.dumps(web_urls, indent=4)
#print(json_object)
#'OIDC_CLIENT_SECRETS': jsonify({'web':web_urls})
#Flask/four_api/postgre_multi_tenant/connection_pool/
#Autointelli/Flask/four_api/postgre_multi_tenant/
app.config.update({'OIDC_CLIENT_SECRETS': 'c:/Python/codes/Autointelli/Flask/four_api/postgre_multi_tenant/connction_pool/client_secrets.json'})
oidc.init_app(app)
#oidc = OpenIDConnect(app=app)
#decorator for oidc init
def init_oidc_fun(function):
def start(function):
subdomain = urlparse(request.url).hostname.split('.')[0]
#subdomain = username
print(subdomain)
postgreSQL_pool = psycopg2.pool.SimpleConnectionPool(1, 20, user="postgres",
password="rajaraman", host="127.0.0.1", port="5432", database="customer_metadata")
if(postgreSQL_pool):
print("Connection pool created successfully")
ps_connection = postgreSQL_pool.getconn()
if(ps_connection):
ps_cursor = ps_connection.cursor()
ps_cursor.execute(
f"select * from cusotmer_secret where sub_domain = '{subdomain}'")
alert_records = ps_cursor.fetchall()
print(alert_records)
'''list_var = []
re_uri = list_var.append(alert_records[0][6].format(
"http", subdomain, "localhost", "5000"))
print(re_uri)'''
web_urls = {
"web": {
"issuer": alert_records[0][2].format("http", "localhost", "8080", subdomain),
"auth_uri": alert_records[0][3].format("http", "localhost", "8080", subdomain),
"client_id": alert_records[0][4].format("http", "localhost", "8080", subdomain),
"client_secret": alert_records[0][5].format("http", "localhost", "8080", subdomain),
"redirect_uris": [alert_records[0][6].format(
"http", "localhost", "5000")],
"userinfo_uri": alert_records[0][7].format("http", "localhost", "8080", subdomain),
"token_uri": alert_records[0][8].format("http", "localhost", "8080", subdomain),
"token_introspection_uri": alert_records[0][9].format("http", "localhost", "8080", subdomain)
}
}
print(web_urls)
with open('c:/Python/codes/Autointelli/Flask/four_api/postgre_multi_tenant/connction_pool/client_secrets.json', 'w') as outfile:
json_object = json.dumps(web_urls, indent=4)
print(json_object)
outfile.write(json_object)
ps_cursor.close()
#json_object = json.dumps(web_urls, indent=4)
#print(json_object)
#'OIDC_CLIENT_SECRETS': jsonify({'web':web_urls})
#Flask/four_api/postgre_multi_tenant/connection_pool/
#Autointelli/Flask/four_api/postgre_multi_tenant/
app.config.update(
{'OIDC_CLIENT_SECRETS': 'c:/Python/codes/Autointelli/Flask/four_api/postgre_multi_tenant/connction_pool/client_secrets.json'})
oidc.init_app(app)
return function
return start
#@app.before_request(start)
@ app.route('/private', subdomain="<username>")
@init_oidc_fun
@oidc.require_login
def hello_me():
info = oidc.user_getinfo(['preferred_username', 'email', 'sub'])
user_id = info.get('sub')
print(user_id)
if user_id in oidc.credentials_store:
try:
from oauth2client.client import OAuth2Credentials
access_token = OAuth2Credentials.from_json(
oidc.credentials_store[user_id]).access_token
print('access_token=<%s>' % access_token)
except:
print("Could not service")
return access_token
@app.route('/receive', methods=['GET', 'POST'], subdomain="<username>")
@oidc.accept_token(require_token=True)
def receive_data():
if request.method == 'GET':
#app.config['SQLALCHEMY_DATABASE_URI'] = "postgresql://postgres:rajaraman@localhost:5432/"+username
app.config['SQLALCHEMY_DATABASE_URI'] = f"postgresql://postgres:rajaraman@localhost:5432/{username}"
alerts = AlertModel.query.all()
results = [
{
"alert_id": alert.alert_id,
"environment": alert.environment,
"count": alert.count
} for alert in alerts]
return {"alerts": results}
if __name__ == "__main__":
#oidc.init_app(app)
app.run(debug=True)
| [
"[email protected]"
] | |
91646ef48a02f3186ef12a064074f34674dd5102 | ef30056d39a1767123399fc31de20f2e09f3a1aa | /hello7/settings/prod.py | 7d77d628f8ef7b5f5c02299a18ac1c9f49ec5806 | [] | no_license | honestjjun/exercise | 2cc63bdd9c8d9fb5754e0a1e5c8f7622357892f8 | 7be69bf9d57bb659729e9c5226bae059cde3ee01 | refs/heads/master | 2020-07-15T04:07:21.904876 | 2017-06-14T07:50:42 | 2017-06-14T07:50:42 | 94,304,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py | from .base import *
ALLOWED_HOSTS = ['*']
# DEBUG = True
INSTALLED_APPS += [
'storages'
]
DATABASES = {
'default': {
'ENGINE': os.environ.get('DB_ENGINE', 'django.db.backends.postgresql_psycopg2'),
'NAME': os.environ['DB_NAME'],
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASSWORD'],
'HOST': os.environ['DB_HOST'],
'PORT': os.environ['DB_PORT'],
}
}
# 기본 static/media 저장소를 django-storages 로 변경
STATICFILES_STORAGE = 'hello6.storages.StaticS3Boto3Storage'
DEFAULT_FILE_STORAGE = 'hello6.storages.MediaS3Boto3Storage'
# S3 파일 관리에 필요한 최소한 설정
# 소스 코드에 설정 정보를 남기지 마세요. 환경 변수를 통한 설정 추천
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']
#AWS_S3_CUSTOM_DOMAIN = 'd1k76vw1kjnwej.cloudfront.net'
AWS_S3_REGION_NAME = os.environ.get('AWS_S3_REGION_NAME', 'ap-northeast-2') | [
"[email protected]"
] | |
bfdf7a5f7fc0d22e55cb3938c474e0931f238b11 | d3c2fb9fc557aa371b2954992b2f89e9e8453d03 | /tests/contracts/interop/test_native.py | 12fab39808d68b8ab6860f4eb2c4c169f1f0a99a | [
"MIT"
] | permissive | simplitech/neo-mamba | f2bf14a5027d26d79f0a91834fbb8b247768d044 | 8b8a7bf2e600f89b91caff253f25c1c8afee6c0a | refs/heads/master | 2023-07-04T08:05:01.960301 | 2020-12-24T12:06:55 | 2020-12-24T12:06:55 | 351,549,364 | 0 | 1 | MIT | 2021-05-12T17:06:50 | 2021-03-25T19:15:53 | null | UTF-8 | Python | false | false | 36,916 | py | import unittest
import binascii
from unittest import mock
from collections import namedtuple
from neo3 import vm, contracts, storage, settings, cryptography
from neo3.core import types, to_script_hash, msgrouter
from neo3.network import message
from .utils import syscall_name_to_int, test_engine, test_block, TestIVerifiable
def test_native_contract(contract_hash: types.UInt160, operation: str, args=None):
engine = test_engine(has_snapshot=True)
block = test_block(0)
# or we won't pass the native deploy call
engine.snapshot.persisting_block = block
sb = vm.ScriptBuilder()
sb.emit_syscall(syscall_name_to_int("Neo.Native.Deploy"))
# now call the actual native contract
sb.emit_contract_call(contract_hash, operation)
script = vm.Script(sb.to_array())
engine.load_script(script)
# storing the current script in a contract otherwise "System.Contract.Call" will fail its checks
engine.snapshot.contracts.put(storage.ContractState(sb.to_array(), contracts.ContractManifest()))
return engine
class NativeInteropTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
settings.network.standby_committee = ['02158c4a4810fa2a6a12f7d33d835680429e1a68ae61161c5b3fbc98c7f1f17765']
settings.network.validators_count = 1
@classmethod
def tearDownClass(cls) -> None:
settings.reset_settings_to_default()
def shortDescription(self):
# disable docstring printing in test runner
return None
def test_native_deploy_fail(self):
engine = test_engine(has_snapshot=True)
block = test_block(1)
engine.snapshot.persisting_block = block
with self.assertRaises(ValueError) as context:
engine.invoke_syscall_by_name("Neo.Native.Deploy")
self.assertEqual("Can only deploy native contracts in the genenis block", str(context.exception))
def test_native_deploy_ok(self):
engine = test_engine(has_snapshot=True)
block = test_block(0)
engine.snapshot.persisting_block = block
engine.invoke_syscall_by_name("Neo.Native.Deploy")
self.assertIn("Policy", contracts.NativeContract().registered_contract_names)
self.assertEqual(contracts.PolicyContract(), contracts.NativeContract.get_contract("Policy"))
def test_native_call(self):
engine = test_engine(has_snapshot=True, default_script=True)
block = test_block(0)
engine.snapshot.persisting_block = block
# need to create and store a contract matching the current_context.script
# otherwise system.contract.call checks will fail
engine.snapshot.contracts.put(storage.ContractState(b'\x40', contracts.ContractManifest()))
engine.invoke_syscall_by_name("Neo.Native.Deploy")
engine.push(vm.ArrayStackItem(engine.reference_counter)) # empty array for no arguments
engine.push(vm.ByteStringStackItem(b'getMaxTransactionsPerBlock'))
policy_contract_hash = vm.ByteStringStackItem(contracts.PolicyContract().script_hash.to_array())
engine.push(policy_contract_hash)
engine.invoke_syscall_by_name("System.Contract.Call")
class TestNativeContract(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
settings.network.standby_committee = ['02158c4a4810fa2a6a12f7d33d835680429e1a68ae61161c5b3fbc98c7f1f17765']
settings.network.validators_count = 1
cls.validator_public_key = cryptography.ECPoint.deserialize_from_bytes(
binascii.unhexlify(settings.network.standby_committee[0])
)
cls.validator_account = to_script_hash(
contracts.Contract.create_multisig_redeemscript(1, [cls.validator_public_key]))
@classmethod
def tearDownClass(cls) -> None:
settings.reset_settings_to_default()
def test_requesting_non_existing_contract(self):
with self.assertRaises(ValueError) as context:
contracts.NativeContract.get_contract("bogus_contract")
self.assertEqual("There is no native contract with name: bogus_contract", str(context.exception))
def test_parameter_types_matched_parameter_names(self):
class NativeTestContract(contracts.NativeContract):
def init(self):
self._register_contract_method(None, None, 0, None, parameter_types=[], parameter_names=["error"])
with self.assertRaises(ValueError) as context:
NativeTestContract()
self.assertEqual("Parameter types count must match parameter names count! 0!=1", str(context.exception))
def test_invoke_not_allowed_through_native_syscall(self):
engine = test_engine(has_snapshot=True)
engine.snapshot.persisting_block = test_block(0)
engine.invoke_syscall_by_name("Neo.Native.Deploy")
engine.push(vm.ByteStringStackItem(b'Policy'))
with self.assertRaises(SystemError) as context:
engine.invoke_syscall_by_name("Neo.Native.Call")
self.assertEqual("It is not allowed to use Neo.Native.Call directly, use System.Contract.Call", str(context.exception))
def test_various(self):
native = contracts.NativeContract()
known_contracts = native.registered_contracts
self.assertIn(contracts.GasToken(), known_contracts)
self.assertIn(contracts.NeoToken(), known_contracts)
self.assertIn(contracts.PolicyContract(), known_contracts)
class TestPolicyContract(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
settings.network.standby_committee = ['02158c4a4810fa2a6a12f7d33d835680429e1a68ae61161c5b3fbc98c7f1f17765']
settings.network.validators_count = 1
@classmethod
def tearDownClass(cls) -> None:
settings.reset_settings_to_default()
def test_basics(self):
policy = contracts.PolicyContract()
self.assertEqual(-3, policy.id)
self.assertEqual("Policy", contracts.PolicyContract.service_name())
self.assertEqual([], policy.supported_standards())
def test_policy_get_max_tx_per_block(self):
engine = test_native_contract(contracts.PolicyContract().script_hash, "getMaxTransactionsPerBlock")
engine.execute()
self.assertEqual(vm.VMState.HALT, engine.state)
self.assertEqual(1, len(engine.result_stack))
item = engine.result_stack.pop()
self.assertEqual(vm.IntegerStackItem(512), item)
def test_policy_get_max_block_size(self):
engine = test_native_contract(contracts.PolicyContract().script_hash, "getMaxBlockSize")
engine.execute()
self.assertEqual(vm.VMState.HALT, engine.state)
self.assertEqual(1, len(engine.result_stack))
item = engine.result_stack.pop()
self.assertEqual(vm.IntegerStackItem(262144), item)
def test_policy_get_max_block_system_fee(self):
engine = test_native_contract(contracts.PolicyContract().script_hash, "getMaxBlockSystemFee")
engine.execute()
self.assertEqual(vm.VMState.HALT, engine.state)
self.assertEqual(1, len(engine.result_stack))
item = engine.result_stack.pop()
self.assertEqual(vm.IntegerStackItem(900000000000), item)
def test_policy_get_fee_per_byte(self):
engine = test_native_contract(contracts.PolicyContract().script_hash, "getFeePerByte")
engine.execute()
self.assertEqual(vm.VMState.HALT, engine.state)
self.assertEqual(1, len(engine.result_stack))
item = engine.result_stack.pop()
self.assertEqual(vm.IntegerStackItem(1000), item)
def test_policy_set_and_get_blocked_accounts(self):
engine = test_engine(has_snapshot=True)
block = test_block(0)
# set or we won't pass the native deploy call
engine.snapshot.persisting_block = block
sb = vm.ScriptBuilder()
sb.emit_syscall(syscall_name_to_int("Neo.Native.Deploy"))
# set or we won't pass the check_comittee() in the policy contract function implementations
engine.script_container = TestIVerifiable()
validator = settings.standby_committee[0]
script_hash = to_script_hash(contracts.Contract.create_multisig_redeemscript(1, [validator]))
engine.script_container.script_hashes = [script_hash]
# first we setup the stack for calling `blockAccount`
# push data to create a vm.Array holding 20 bytes for the UInt160 Account parameter of the _block_account function.
sb.emit_push(b'\x11' * 20)
sb.emit(vm.OpCode.PUSH1)
sb.emit(vm.OpCode.PACK)
sb.emit_push("blockAccount")
sb.emit_push(contracts.PolicyContract().script_hash.to_array())
sb.emit_syscall(syscall_name_to_int("System.Contract.Call"))
# next we call `getBlockedAccounts`
sb.emit_contract_call(contracts.PolicyContract().script_hash, "getBlockedAccounts")
script = vm.Script(sb.to_array())
engine.load_script(script)
# storing the current script in a contract otherwise "System.Contract.Call" will fail its checks
engine.snapshot.contracts.put(storage.ContractState(sb.to_array(), contracts.ContractManifest()))
engine.execute()
self.assertEqual(vm.VMState.HALT, engine.state)
self.assertEqual(2, len(engine.result_stack))
get_blocked_accounts_result = engine.result_stack.pop()
set_blocked_accounts_result = engine.result_stack.pop()
self.assertTrue(set_blocked_accounts_result.to_boolean())
self.assertIsInstance(get_blocked_accounts_result, vm.InteropStackItem)
stored_accounts = get_blocked_accounts_result.get_object()
self.assertEqual(1, len(stored_accounts))
expected_account = types.UInt160(data=b'\x11' * 20)
self.assertEqual(expected_account, stored_accounts[0])
def test_policy_unblock_account(self):
# we've tested the full round trip via "System.Contract.Call" in the test
# test_policy_set_and_get_blocked_accounts()
# Here we take the shortcut and test the unblock account function directly
engine = test_engine(has_snapshot=True)
block = test_block(0)
engine.snapshot.persisting_block = block
engine.invoke_syscall_by_name("Neo.Native.Deploy")
# we must add a script_container with valid signature to pass the check_comittee() validation check
# in the function itself
engine.script_container = TestIVerifiable()
validator = settings.standby_committee[0]
script_hash = to_script_hash(contracts.Contract.create_multisig_redeemscript(1, [validator]))
engine.script_container.script_hashes = [script_hash]
policy = contracts.PolicyContract()
account_not_found = types.UInt160(data=b'\x11' * 20)
account = types.UInt160.zero()
self.assertTrue(policy._block_account(engine, account))
self.assertFalse(policy._unblock_account(engine, account_not_found))
self.assertTrue(policy._unblock_account(engine, account))
storage_key = storage.StorageKey(policy.script_hash, policy._PREFIX_BLOCKED_ACCOUNTS)
storage_item = engine.snapshot.storages.try_get(storage_key)
self.assertIsNotNone(storage_item)
self.assertEqual(b'\x00', storage_item.value)
def test_policy_limit_setters(self):
policy = contracts.PolicyContract()
D = namedtuple('D', ['test_func', 'value', 'expected_return', 'storage_prefix'])
testdata = [
D(policy._set_max_block_size, message.Message.PAYLOAD_MAX_SIZE, False, policy._PREFIX_MAX_BLOCK_SIZE),
D(policy._set_max_block_size, 123, True, policy._PREFIX_MAX_BLOCK_SIZE),
D(policy._set_max_transactions_per_block, 123, True, policy._PREFIX_MAX_TRANSACTIONS_PER_BLOCK),
D(policy._set_max_block_system_fee, 123, False, policy._PREFIX_MAX_BLOCK_SYSTEM_FEE),
# value is lower than magic number
D(policy._set_max_block_system_fee, 5_000_000, True, policy._PREFIX_MAX_BLOCK_SYSTEM_FEE),
D(policy._set_fee_per_byte, 123, True, policy._PREFIX_FEE_PER_BYTE)
]
engine = test_engine(has_snapshot=True)
block = test_block(0)
engine.snapshot.persisting_block = block
engine.invoke_syscall_by_name("Neo.Native.Deploy")
# set or we won't pass the check_comittee() in the policy contract function implementations
engine.script_container = TestIVerifiable()
validator = settings.standby_committee[0]
script_hash = to_script_hash(contracts.Contract.create_multisig_redeemscript(1, [validator]))
engine.script_container.script_hashes = [script_hash]
for d in testdata:
self.assertEqual(d.expected_return, d.test_func(engine, d.value))
if d.expected_return is True:
item = engine.snapshot.storages.try_get(storage.StorageKey(policy.script_hash, d.storage_prefix))
self.assertIsNotNone(item)
self.assertEqual(d.value, int.from_bytes(item.value, 'little'))
def test_policy_setters_fail_without_signatures(self):
# cover set functions where check_committee fails
policy = contracts.PolicyContract()
engine = test_engine(has_snapshot=True)
block = test_block(0)
engine.snapshot.persisting_block = block
engine.invoke_syscall_by_name("Neo.Native.Deploy")
engine.script_container = TestIVerifiable()
self.assertFalse(policy._set_max_block_size(engine, None))
self.assertFalse(policy._set_max_transactions_per_block(engine, None))
self.assertFalse(policy._set_max_block_system_fee(engine, None))
self.assertFalse(policy._set_fee_per_byte(engine, None))
self.assertFalse(policy._block_account(engine, None))
self.assertFalse(policy._unblock_account(engine, None))
class Nep5TestCase(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
settings.network.standby_committee = ['02158c4a4810fa2a6a12f7d33d835680429e1a68ae61161c5b3fbc98c7f1f17765']
settings.network.validators_count = 1
cls.validator_public_key = cryptography.ECPoint.deserialize_from_bytes(
binascii.unhexlify(settings.network.standby_committee[0])
)
cls.validator_account = to_script_hash(
contracts.Contract.create_multisig_redeemscript(1, [cls.validator_public_key]))
@classmethod
def tearDownClass(cls) -> None:
settings.reset_settings_to_default()
def test_token_standards(self):
gas_standards = contracts.GasToken().supported_standards()
neo_standards = contracts.NeoToken().supported_standards()
self.assertEqual(["NEP-5"], gas_standards)
self.assertEqual(["NEP-5"], neo_standards)
def test_token_symbols(self):
gas_symbol = contracts.GasToken().symbol()
neo_symbol = contracts.NeoToken().symbol()
self.assertEqual("gas", gas_symbol)
self.assertEqual("neo", neo_symbol)
def test_total_supply(self):
engine = test_engine(has_snapshot=True)
block = test_block(0)
# set or we won't pass the native deploy call
engine.snapshot.persisting_block = block
engine.invoke_syscall_by_name("Neo.Native.Deploy")
gas = contracts.GasToken()
neo = contracts.NeoToken()
self.assertEqual(30_000_000 * gas.factor, gas.total_supply(engine.snapshot))
self.assertEqual(100_000_000, neo.total_supply(engine.snapshot))
def test_burn(self):
engine = test_engine(has_snapshot=True)
block = test_block(0)
# set or we won't pass the native deploy call
engine.snapshot.persisting_block = block
engine.invoke_syscall_by_name("Neo.Native.Deploy")
gas = contracts.GasToken()
with self.assertRaises(ValueError) as context:
gas.burn(engine, self.validator_account, vm.BigInteger(-1))
self.assertEqual("Can't burn a negative amount", str(context.exception))
# burning nothing should not change the total supply
default_gas = 30_000_000
self.assertEqual(default_gas, gas.total_supply(engine.snapshot) / gas.factor)
gas.burn(engine, self.validator_account, vm.BigInteger(0))
self.assertEqual(default_gas, gas.total_supply(engine.snapshot) / gas.factor)
# Note: our account holds the total supply
with self.assertRaises(ValueError) as context:
gas.burn(engine, self.validator_account, vm.BigInteger(default_gas + 1) * gas.factor)
self.assertEqual("Insufficient balance. Requesting to burn 3000000100000000, available 3000000000000000",
str(context.exception))
# burn a bit
gas.burn(engine, self.validator_account, vm.BigInteger(10) * gas.factor)
remaining_total_supply = int(gas.total_supply(engine.snapshot) / gas.factor)
self.assertEqual(default_gas - 10, remaining_total_supply)
# now burn it all
gas.burn(engine, self.validator_account, vm.BigInteger(remaining_total_supply) * gas.factor)
self.assertEqual(0, gas.total_supply(engine.snapshot) / gas.factor)
def test_balance_of(self):
engine = test_engine(has_snapshot=True)
block = test_block(0)
# set or we won't pass the native deploy call
engine.snapshot.persisting_block = block
engine.invoke_syscall_by_name("Neo.Native.Deploy")
gas = contracts.GasToken()
neo = contracts.NeoToken()
deploy_expected_gas = 30_000_000
deploy_expected_neo = 100_000_000
self.assertEqual(deploy_expected_gas, gas.balance_of(engine.snapshot, self.validator_account) / gas.factor)
self.assertEqual(deploy_expected_neo, neo.balance_of(engine.snapshot, self.validator_account))
self.assertEqual(vm.BigInteger.zero(), gas.balance_of(engine.snapshot, types.UInt160.zero()))
self.assertEqual(vm.BigInteger.zero(), neo.balance_of(engine.snapshot, types.UInt160.zero()))
def test_on_persist(self):
"""
OnPersist will do the following
* burn the system and network fees for all transactions
* mint the sum of network_fees for all transactions to the address of the consensus node that acted as primary
speaker for the block
"""
engine = test_engine(has_snapshot=True)
block = test_block(0)
# set or we won't pass the native deploy call
engine.snapshot.persisting_block = block
engine.invoke_syscall_by_name("Neo.Native.Deploy")
gas = contracts.GasToken()
# with the default Application trigger type we're not allowed to call on_persist
with self.assertRaises(SystemError) as context:
gas.on_persist(engine)
self.assertEqual("Invalid operation", str(context.exception))
# set correct trigger type or we fail super().on_persist()
engine.trigger = contracts.TriggerType.SYSTEM
# update the TX signer account to point to our validator or the token burn() (part of on persist)
# will fail because it can't find an account with balance
mock_signer = mock.MagicMock()
mock_signer.account = self.validator_account
engine.snapshot.persisting_block.transactions[0].signers = [mock_signer]
# our consensus_data is not setup in a realistic way, so we have to correct for that here
# or we fail to get the account of primary consensus node
engine.snapshot.persisting_block.consensus_data.primary_index = settings.network.validators_count - 1
gas.on_persist(engine)
"""
Drop the below in a test in UT_NativeContract.cs and change ProtocolSettings.cs to
* have a ValidatorsCount of 1
* and the StandbyCommittee should be: 02158c4a4810fa2a6a12f7d33d835680429e1a68ae61161c5b3fbc98c7f1f17765
var snapshot = Blockchain.Singleton.GetSnapshot();
snapshot.PersistingBlock = new Block() { Index = 1000 };
var point = ECPoint.Parse("02158c4a4810fa2a6a12f7d33d835680429e1a68ae61161c5b3fbc98c7f1f17765", ECCurve.Secp256r1);
var account = Contract.CreateMultiSigRedeemScript(1, new ECPoint[] {point}).ToScriptHash();
var tx = TestUtils.GetTransaction(account);
tx.SystemFee = 456;
tx.NetworkFee = 789;
snapshot.PersistingBlock.Transactions = new Transaction[] {tx};
snapshot.PersistingBlock.ConsensusData = new ConsensusData { PrimaryIndex = 0};
ApplicationEngine engine2 = ApplicationEngine.Create(TriggerType.System, tx, snapshot, 0);
NativeContract.GAS.OnPersist(engine2);
var key = new byte[] {0x14};
var sk = key.Concat(account.ToArray());
var item = engine2.Snapshot.Storages.TryGet(new StorageKey {Id = NativeContract.GAS.Id, Key = sk.ToArray()});
var state = item.GetInteroperable<AccountState>();
Console.WriteLine($"account state {state.Balance}");
var item2 = engine2.Snapshot.Storages.TryGet(new StorageKey {Id = NativeContract.GAS.Id, Key = new byte[]{11}});
Console.WriteLine($"total supply {(BigInteger)item2}");
var primary_account = Contract.CreateSignatureRedeemScript(point).ToScriptHash();
var primary_sk = key.Concat(primary_account.ToArray());
var primary_item = engine2.Snapshot.Storages.TryGet(new StorageKey {Id = NativeContract.GAS.Id, Key = primary_sk.ToArray()});
var primary_state = primary_item.GetInteroperable<AccountState>();
Console.WriteLine($"primary account state {primary_state.Balance}");
"""
# * our validator prior to on_persist had a balance of 30_000_000
# * after it should have been reduced by the network + system_fee's paid in the transaction
sk_gas_supply = storage.StorageKey(gas.script_hash, gas._PREFIX_ACCOUNT + self.validator_account.to_array())
si_supply = engine.snapshot.storages.try_get(sk_gas_supply)
self.assertIsNotNone(si_supply)
token_state = gas._state.deserialize_from_bytes(si_supply.value)
total_fees = engine.snapshot.persisting_block.transactions[0].network_fee + \
engine.snapshot.persisting_block.transactions[0].system_fee
expected = (30_000_000 * gas.factor) - total_fees
self.assertEqual(expected, int(token_state.balance))
# * total GAS supply was 30_000_000, should be reduced by the system_fee
sk_total_supply = storage.StorageKey(gas.script_hash, gas._PREFIX_TOTAL_SUPPLY)
si_total_supply = engine.snapshot.storages.try_get(sk_total_supply)
self.assertIsNotNone(si_total_supply)
expected = (30_000_000 * gas.factor) - engine.snapshot.persisting_block.transactions[0].system_fee
self.assertEqual(expected, vm.BigInteger(si_total_supply.value))
# * the persisting block contains exactly 1 transaction
# * after on_persist the account our primary validator should have been credited with the transaction's
# network_fee
primary_validator = to_script_hash(contracts.Contract.create_signature_redeemscript(self.validator_public_key))
sk_gas_supply = storage.StorageKey(gas.script_hash, gas._PREFIX_ACCOUNT + primary_validator.to_array())
si_supply = engine.snapshot.storages.try_get(sk_gas_supply)
self.assertIsNotNone(si_supply)
token_state = gas._state.deserialize_from_bytes(si_supply.value)
expected = engine.snapshot.persisting_block.transactions[0].network_fee
self.assertEqual(expected, int(token_state.balance))
def transfer_helper(self, contract: contracts.NativeContract,
from_account: types.UInt160,
to_account: types.UInt160,
amount: vm.BigInteger):
engine = test_engine(has_snapshot=True)
block = test_block(0)
# set or we won't pass the native deploy call
engine.snapshot.persisting_block = block
engine.invoke_syscall_by_name("Neo.Native.Deploy")
engine.invocation_stack.pop() # we no longer need the default script
engine.script_container = TestIVerifiable()
engine.script_container.script_hashes = [from_account]
sb = vm.ScriptBuilder()
sb.emit_push(amount)
sb.emit_push(to_account.to_array())
sb.emit_push(from_account.to_array())
sb.emit_push(3)
sb.emit(vm.OpCode.PACK)
sb.emit_push(b'transfer')
sb.emit_push(contract.script_hash.to_array())
sb.emit_syscall(syscall_name_to_int("System.Contract.Call"))
engine.load_script(vm.Script(sb.to_array()))
engine.snapshot.contracts.put(storage.ContractState(sb.to_array(), contracts.ContractManifest()))
return engine
def test_transfer_negative_amount(self):
engine = test_engine(has_snapshot=True, default_script=False)
engine.load_script(vm.Script(contracts.GasToken().script))
block = test_block(0)
# set or we won't pass the native deploy call
engine.snapshot.persisting_block = block
engine.invoke_syscall_by_name("Neo.Native.Deploy")
gas = contracts.GasToken()
with self.assertRaises(ValueError) as context:
gas.transfer(engine, types.UInt160.zero(), types.UInt160.zero(), vm.BigInteger(-1))
self.assertEqual("Can't transfer a negative amount", str(context.exception))
def test_transfer_fail_no_permission(self):
"""
Test to transfer tokens from a source account that is not owned by the smart contract that is asking for the transfer.
We do not add a witness that approves that we can transfer from the source account, thus it should fail
Returns:
"""
gas = contracts.GasToken()
engine = self.transfer_helper(gas, types.UInt160.zero(), types.UInt160.zero(), vm.BigInteger(1))
engine.script_container.script_hashes = [] # ensure checkwitness returns False
engine.execute()
self.assertEqual(1, len(engine.result_stack))
result = engine.result_stack.pop()
self.assertFalse(result)
def test_to_account_not_payable(self):
gas = contracts.GasToken()
state = storage.ContractState(b'\x00', contracts.ContractManifest())
engine = self.transfer_helper(gas, types.UInt160.zero(), state.script_hash(), vm.BigInteger(1))
# default manifest is not payable
engine.snapshot.contracts.put(state)
engine.execute()
self.assertEqual(1, len(engine.result_stack))
result = engine.result_stack.pop()
self.assertFalse(result)
def test_transfer_from_empty_account(self):
gas = contracts.GasToken()
manifest = contracts.ContractManifest()
manifest.features = contracts.ContractFeatures.PAYABLE
state = storage.ContractState(b'\x00', manifest)
engine = self.transfer_helper(gas, types.UInt160.zero(), state.script_hash(), vm.BigInteger(1))
engine.snapshot.contracts.put(state)
engine.execute()
self.assertEqual(1, len(engine.result_stack))
result = engine.result_stack.pop()
self.assertFalse(result)
def test_transfer_zero_amount(self):
gas = contracts.GasToken()
account_from = types.UInt160(b'\x01' * 20)
storage_key_from = storage.StorageKey(gas.script_hash, gas._PREFIX_ACCOUNT + account_from.to_array())
account_state = gas._state()
account_state.balance = vm.BigInteger(123)
storage_item_from = storage.StorageItem(account_state.to_array())
manifest = contracts.ContractManifest()
manifest.features = contracts.ContractFeatures.PAYABLE
state_to = storage.ContractState(b'\x00', manifest)
account_to = state_to.script_hash()
amount = vm.BigInteger(0)
engine = self.transfer_helper(gas, account_from, account_to, amount)
# ensure the destination contract exists
engine.snapshot.contracts.put(state_to)
# ensure the source account has balance
engine.snapshot.storages.put(storage_key_from, storage_item_from)
transfer_event = ()
def notify_listener(contract_script_hash, event, state):
nonlocal transfer_event
transfer_event = (contract_script_hash, event, state)
msgrouter.interop_notify += notify_listener
engine.execute()
self.assertEqual(1, len(engine.result_stack))
result = engine.result_stack.pop()
self.assertTrue(result)
self.assertEqual(gas.script_hash, transfer_event[0])
self.assertEqual("Transfer", transfer_event[1])
state_items = list(transfer_event[2])
self.assertEqual(account_from, types.UInt160(state_items[0].to_array()))
self.assertEqual(account_to, types.UInt160(state_items[1].to_array()))
self.assertEqual(amount, state_items[2].to_biginteger())
def test_transfer_more_than_balance(self):
gas = contracts.GasToken()
account_from = types.UInt160.zero()
storage_key_from = storage.StorageKey(gas.script_hash, gas._PREFIX_ACCOUNT + account_from.to_array())
account_state = gas._state()
account_state.balance = vm.BigInteger(123)
storage_item_from = storage.StorageItem(account_state.to_array())
manifest = contracts.ContractManifest()
manifest.features = contracts.ContractFeatures.PAYABLE
state_to = storage.ContractState(b'\x00', manifest)
account_to = state_to.script_hash()
amount = account_state.balance + 1
engine = self.transfer_helper(gas, account_from, account_to, amount)
# ensure the destination contract exists
engine.snapshot.contracts.put(state_to)
# ensure the source account has balance
engine.snapshot.storages.put(storage_key_from, storage_item_from)
engine.execute()
self.assertEqual(1, len(engine.result_stack))
result = engine.result_stack.pop()
self.assertFalse(result)
def test_transfer_to_self(self):
gas = contracts.GasToken()
manifest = contracts.ContractManifest()
manifest.features = contracts.ContractFeatures.PAYABLE
state_to = storage.ContractState(b'\x00' * 20, manifest)
account = state_to.script_hash()
storage_key_from = storage.StorageKey(gas.script_hash, gas._PREFIX_ACCOUNT + account.to_array())
account_state = gas._state()
account_state.balance = vm.BigInteger(123)
storage_item_from = storage.StorageItem(account_state.to_array())
amount = account_state.balance
engine = self.transfer_helper(gas, account, account, amount)
# ensure the destination contract exists
engine.snapshot.contracts.put(state_to)
# ensure the source account has balance
engine.snapshot.storages.put(storage_key_from, storage_item_from)
transfer_event = ()
def notify_listener(contract_script_hash, event, state):
nonlocal transfer_event
transfer_event = (contract_script_hash, event, state)
msgrouter.interop_notify += notify_listener
engine.execute()
self.assertEqual(1, len(engine.result_stack))
result = engine.result_stack.pop()
self.assertTrue(result)
self.assertEqual(gas.script_hash, transfer_event[0])
self.assertEqual("Transfer", transfer_event[1])
state_items = list(transfer_event[2])
self.assertEqual(account, types.UInt160(state_items[0].to_array()))
self.assertEqual(account, types.UInt160(state_items[1].to_array()))
self.assertEqual(amount, state_items[2].to_biginteger())
def test_transfer_full_balance(self):
gas = contracts.GasToken()
manifest = contracts.ContractManifest()
manifest.features = contracts.ContractFeatures.PAYABLE
state_to = storage.ContractState(b'\x00' * 20, manifest)
account_to = state_to.script_hash()
account_from = types.UInt160(b'\x01' * 20)
storage_key_from = storage.StorageKey(gas.script_hash, gas._PREFIX_ACCOUNT + account_from.to_array())
account_from_state = gas._state()
account_from_state.balance = vm.BigInteger(123)
storage_item_from = storage.StorageItem(account_from_state.to_array())
amount = account_from_state.balance
engine = self.transfer_helper(gas, account_from, account_to, amount)
# ensure the destination contract exists
engine.snapshot.contracts.put(state_to)
# ensure the source account has balance
engine.snapshot.storages.put(storage_key_from, storage_item_from)
transfer_event = ()
def notify_listener(contract_script_hash, event, state):
nonlocal transfer_event
transfer_event = (contract_script_hash, event, state)
msgrouter.interop_notify += notify_listener
engine.execute()
self.assertEqual(1, len(engine.result_stack))
result = engine.result_stack.pop()
self.assertTrue(result)
self.assertEqual(gas.script_hash, transfer_event[0])
self.assertEqual("Transfer", transfer_event[1])
state_items = list(transfer_event[2])
self.assertEqual(account_from, types.UInt160(state_items[0].to_array()))
self.assertEqual(account_to, types.UInt160(state_items[1].to_array()))
self.assertEqual(amount, state_items[2].to_biginteger())
# test that the source account is no longer present in storage as the balance is zero
self.assertIsNone(engine.snapshot.storages.try_get(storage_key_from))
def test_transfer_partial_balance_to_account_with_balance(self):
gas = contracts.GasToken()
manifest = contracts.ContractManifest()
manifest.features = contracts.ContractFeatures.PAYABLE
state_to = storage.ContractState(b'\x00' * 20, manifest)
account_to = state_to.script_hash()
storage_key_to = storage.StorageKey(gas.script_hash, gas._PREFIX_ACCOUNT + account_to.to_array())
account_to_state = gas._state()
account_to_state.balance = vm.BigInteger(100)
storage_item_to = storage.StorageItem(account_to_state.to_array())
account_from = types.UInt160(b'\x01' * 20)
storage_key_from = storage.StorageKey(gas.script_hash, gas._PREFIX_ACCOUNT + account_from.to_array())
account_from_state = gas._state()
account_from_state.balance = vm.BigInteger(123)
storage_item_from = storage.StorageItem(account_from_state.to_array())
amount = vm.BigInteger(50)
engine = self.transfer_helper(gas, account_from, account_to, amount)
# ensure the destination contract exists
engine.snapshot.contracts.put(state_to)
# ensure the source and destination account have balances
engine.snapshot.storages.put(storage_key_from, storage_item_from)
engine.snapshot.storages.put(storage_key_to, storage_item_to)
transfer_event = ()
def notify_listener(contract_script_hash, event, state):
nonlocal transfer_event
transfer_event = (contract_script_hash, event, state)
msgrouter.interop_notify += notify_listener
engine.execute()
self.assertEqual(1, len(engine.result_stack))
result = engine.result_stack.pop()
self.assertTrue(result)
self.assertEqual(gas.script_hash, transfer_event[0])
self.assertEqual("Transfer", transfer_event[1])
state_items = list(transfer_event[2])
self.assertEqual(account_from, types.UInt160(state_items[0].to_array()))
self.assertEqual(account_to, types.UInt160(state_items[1].to_array()))
self.assertEqual(amount, state_items[2].to_biginteger())
# validate from account is deducted by `amount`
new_storage_account_from = engine.snapshot.storages.get(storage_key_from)
new_account_state_from = gas._state.deserialize_from_bytes(new_storage_account_from.value)
self.assertEqual(account_from_state.balance - amount, new_account_state_from.balance)
# validate to account is credited with `amount`
new_storage_account_to = engine.snapshot.storages.get(storage_key_to)
new_account_state_to = gas._state.deserialize_from_bytes(new_storage_account_to.value)
self.assertEqual(account_to_state.balance + amount, new_account_state_to.balance)
def test_negative_mint(self):
gas = contracts.GasToken()
with self.assertRaises(ValueError) as context:
gas.mint(None, None, vm.BigInteger(-1))
self.assertEqual("Can't mint a negative amount", str(context.exception))
| [
"[email protected]"
] | |
d891cf67239e4bbaad3bcfd8585d19565271145f | 0cc7c0347ea613311ea31b834eed3f46cf1403bd | /baidu/segment.py | 1bdae3f933d4184db70d1ac283a658cb07820a32 | [] | no_license | Goerwa/Craw | b53e280e9adffa54eec4f8ae6c25b99315b269e7 | 8a19b7631f2f8678197bb8afe533e7ee8e942eee | refs/heads/master | 2021-03-28T22:54:28.849789 | 2020-03-17T13:30:19 | 2020-03-17T13:30:19 | 247,903,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,008 | py | import time
import os
import json
def ltp_seg():
"""
使用LTP 对json文件中的标题和正文进行分词
"""
LTP_DATA_DIR = 'D:\BaiduNetdiskDownload\ltp_data_v3.4.0' # ltp模型目录的路径
cws_model_path = os.path.join(LTP_DATA_DIR, 'cws.model') # 分词模型路径,模型名称为`cws.model`
from pyltp import Segmentor
segmentor = Segmentor() # 初始化实例
segmentor.load(cws_model_path) # 加载模型
# 读入json文件
with open('data/data.json', encoding='utf-8') as fin:
read_results = [json.loads(line.strip()) for line in fin.readlines()]
# 读取停用词
stopwords = read_stop_word()
# 对标题和正文进行分词
result = []
start = time.time() # 计时
for res in read_results:
tmp = {}
tmp['url'] = res['url']
# 标题分词
title_words = segmentor.segment(res['title'])
tmp['segmented_title'] = ' '.join(title_words)
# 正文分词
words = segmentor.segment(res['paragraphs'])
# 去除正文中的停用词
remove_stop_words = [word for word in words if word not in stopwords]
tmp['segmented_paragraphs'] = ' '.join(remove_stop_words)
tmp['file_name'] = res['file_name']
result.append(tmp)
# print(res['paragraphs'])
end = time.time() # 计时
# 写回json文件
with open('data/preprocessed.json', 'w', encoding='utf-8') as fout:
for sample in result:
fout.write(json.dumps(sample, ensure_ascii=False) + '\n')
segmentor.release() # 释放模型
print("LTP seg done, use time: {}s".format(end - start))
def read_stop_word():
"""
读取停用词
:return:
"""
stopwords = set()
with open('data/stopwords(new).txt', 'r', encoding='utf-8') as f:
for line in f:
stopwords.add(line.strip())
print("stopwords num:{}".format(len(stopwords)))
return stopwords
if __name__ == '__main__':
ltp_seg()
| [
"[email protected]"
] | |
f28b53a8e49e5b661deda7350778a27a7ba6095a | a24269de977124c853023eb5cb5e3971178acebe | /db/init_db.py | 24d9ff23d948791594847c22165434806ccd1a83 | [] | no_license | 05113/fastapi-testplatform | 05de134db4a06d8906fff0b017dfdf08eb85dd10 | 8de0839d502106b2e0169d999942355273caf1fd | refs/heads/master | 2023-06-21T19:41:39.124467 | 2021-07-18T10:03:41 | 2021-07-18T10:03:41 | 329,851,342 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | from sqlalchemy.orm import Session
def init_db(db:Session) -> None :
pass
| [
"[email protected]"
] | |
cd3751c7353dba0e6904e5ee5cb6bbf17323d40d | 808443b95bf49567ee82f1967892156cf1c6a62c | /0x08-user_authentication_service/auth.py | 066fc54a1174d6de4853b260883e3db40de07f21 | [] | no_license | NasserAbuchaibe/holbertonschool-web_back_end | f0d93901b193058fd70451678e1d6174537bd226 | 3a051eab09c1c50ac56c05f530c3a4b616dde5f7 | refs/heads/master | 2023-08-25T13:34:30.278442 | 2021-10-12T04:07:47 | 2021-10-12T04:07:47 | 361,899,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,450 | py | #!/usr/bin/env python3
""" Authentication
"""
from typing import Union
from uuid import uuid4
from bcrypt import checkpw, gensalt, hashpw
from sqlalchemy.orm.exc import NoResultFound
from db import DB
from user import User
def _hash_password(password: str) -> str:
""" that takes in a password string arguments and returns bytes.
"""
return hashpw(password.encode('utf-8'), gensalt())
def _generate_uuid() -> str:
"""[generate uuid]
Returns:
str: [uuid]
"""
return str(uuid4())
class Auth:
"""Auth class to interact with the authentication database.
"""
def __init__(self):
""" Instance """
self._db = DB()
def register_user(self, email: str, password: str) -> User:
""" Registers and returns a new user if email isn't listed """
try:
self._db.find_user_by(email=email)
raise ValueError(f"User {email} already exists")
except NoResultFound:
return self._db.add_user(email, _hash_password(password))
def valid_login(self, email: str, password: str) -> bool:
"""[summary]
Args:
email (str): [description]
password (str): [description]
Returns:
bool: [description]
"""
try:
found_user = self._db.find_user_by(email=email)
return checkpw(
password.encode('utf-8'),
found_user.hashed_password
)
except NoResultFound:
return False
def create_session(self, email: str) -> str:
""" Creates session ID using UUID, finds user by email """
try:
found_user = self._db.find_user_by(email=email)
except NoResultFound:
return None
session_id = _generate_uuid()
self._db.update_user(found_user.id, session_id=session_id)
return session_id
def get_user_from_session_id(self, session_id: str) -> Union[str, None]:
""" Finds user by session_id """
if session_id is None:
return None
try:
found_user = self._db.find_user_by(session_id=session_id)
return found_user
except NoResultFound:
return None
def destroy_session(self, user_id: str) -> None:
""" Updates user's session_id to None """
if user_id is None:
return None
try:
found_user = self._db.find_user_by(id=user_id)
self._db.update_user(found_user.id, session_id=None)
except NoResultFound:
return None
def get_reset_password_token(self, email: str) -> str:
""" Finds user by email, updates user's reset_token with UUID """
try:
found_user = self._db.find_user_by(email=email)
except NoResultFound:
raise ValueError
reset_token = _generate_uuid()
self._db.update_user(found_user.id, reset_token=reset_token)
return reset_token
def update_password(self, reset_token: str, password: str) -> None:
""" Finds user by reset_token, updates user's pswd """
try:
found_user = self._db.find_user_by(reset_token=reset_token)
except NoResultFound:
raise ValueError
new_pswd = _hash_password(password)
self._db.update_user(
found_user.id,
hashed_password=new_pswd,
reset_token=None)
| [
"[email protected]"
] | |
a9275a724adb5f73c2ef4c7c132a2920032b6135 | 1a1647e1e37f6c06f97edeb89dbc4daab2fc3f85 | /train_and_collect_images.py | 4255688b405ef4865b7ac3c76d1df992f7279d00 | [] | no_license | pankajrajput0312/automatic_attendance_macUC | 120e28317b2a6c1dbeb9e4745822e3ff78c7c8ed | 7be02fe39851f67c55bb361cde9e4feec18d35de | refs/heads/main | 2022-12-26T00:46:02.391712 | 2020-10-04T15:59:58 | 2020-10-04T15:59:58 | 301,161,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,179 | py | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import cv2
from PIL import Image
import PIL
import os
import shutil
import datetime
import time
from threading import Thread
def collect_data():
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import cv2
from PIL import Image
import PIL
import os
import shutil
import datetime
import time
from threading import Thread
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier(
'./haarcascade_frontalface_default.xml')
count = 0
Id = input(" enter unique id")
name = input("enter name of person")
Email_id = input("enter Email_id")
while(True):
ret, frame = cap.read()
if(ret == False):
continue
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (51, 255, 51), 3)
count += 1
img_name_path = Id + '.' + str(count) + ".jpg"
offset = 10
if(count % 50 == 0):
print(img_name_path)
saving_image = gray[x:x+w, y:y+h]
plt.imsave(img_name_path, gray[y:y+h, x:x+w], cmap='gray')
# status=cv2.imwrite(img_name_path, saving_image, [cv2.IMWRITE_JPEG_QUALITY, 100])
dest = './data_images'
shutil.move(img_name_path, dest)
cv2.imshow("frame", frame)
if cv2.waitKey(100) & 0xFF == ord('q'):
break
elif(count > 200):
break
cap.release()
cv2.destroyAllWindows()
def capture_data_details():
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import cv2
from PIL import Image
import PIL
import os
import shutil
import datetime
import time
from threading import Thread
faces = []
Ids = []
for one in os.listdir('data_images'):
new_path = os.path.join('data_images', one)
img = Image.open(new_path).convert('L')
img = np.array(img, 'uint8')
curr_id = int(one.split('.')[0])
Ids.append(curr_id)
faces.append(img)
return faces, Ids
def train_data():
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import cv2
from PIL import Image
import PIL
import os
import shutil
import datetime
import time
from threading import Thread
recognizer = cv2.face.LBPHFaceRecognizer_create()
cap = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
face, Ids = capture_data_details()
recognizer.train(face, np.array(Ids))
try:
recognizer.save("trained_model.yml")
print("model trained successfully!")
except:
print("unable to train model")
def add_new_person():
collect_data()
print("collect data successfullt")
print("model training start...")
train_data()
add_new_person()
| [
"[email protected]"
] | |
8b2525db6dfc58d5c9dd37f6b89837cb56ae4aa7 | 46ac0965941d06fde419a6f216db2a653a245dbd | /sdks/python/appcenter_sdk/models/ExportConfigurationListResult.py | dc23e10e4c72c670b462af795bcded85e19b94ee | [
"MIT",
"Unlicense"
] | permissive | b3nab/appcenter-sdks | 11f0bab00d020abb30ee951f7656a3d7ed783eac | bcc19c998b5f648a147f0d6a593dd0324e2ab1ea | refs/heads/master | 2022-01-27T15:06:07.202852 | 2019-05-19T00:12:43 | 2019-05-19T00:12:43 | 187,386,747 | 0 | 3 | MIT | 2022-01-22T07:57:59 | 2019-05-18T17:29:21 | Python | UTF-8 | Python | false | false | 4,770 | py | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: [email protected]
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class ExportConfigurationListResult(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
allowed enum values
"""
crashes = "crashes"
errors = "errors"
attachments = "attachments"
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'values': 'array',
'total': 'integer',
'next_link': 'string'
}
attribute_map = {
'values': 'values',
'total': 'total',
'next_link': 'next_link'
}
def __init__(self, values=None, total=None, next_link=None): # noqa: E501
"""ExportConfigurationListResult - a model defined in Swagger""" # noqa: E501
self._values = None
self._total = None
self._next_link = None
self.discriminator = None
self.values = values
if total is not None:
self.total = total
if next_link is not None:
self.next_link = next_link
@property
def values(self):
"""Gets the values of this ExportConfigurationListResult. # noqa: E501
:return: The values of this ExportConfigurationListResult. # noqa: E501
:rtype: array
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this ExportConfigurationListResult.
:param values: The values of this ExportConfigurationListResult. # noqa: E501
:type: array
"""
if values is None:
raise ValueError("Invalid value for `values`, must not be `None`") # noqa: E501
self._values = values
@property
def total(self):
"""Gets the total of this ExportConfigurationListResult. # noqa: E501
the total count of exports # noqa: E501
:return: The total of this ExportConfigurationListResult. # noqa: E501
:rtype: integer
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ExportConfigurationListResult.
the total count of exports # noqa: E501
:param total: The total of this ExportConfigurationListResult. # noqa: E501
:type: integer
"""
self._total = total
@property
def next_link(self):
"""Gets the next_link of this ExportConfigurationListResult. # noqa: E501
:return: The next_link of this ExportConfigurationListResult. # noqa: E501
:rtype: string
"""
return self._next_link
@next_link.setter
def next_link(self, next_link):
"""Sets the next_link of this ExportConfigurationListResult.
:param next_link: The next_link of this ExportConfigurationListResult. # noqa: E501
:type: string
"""
self._next_link = next_link
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExportConfigurationListResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
ca60621aa6c8dcb7c069ed6f98b0540b50236ff7 | ca4c1ff931e5a2b54a669aec1dc9ae53083544cc | /ntx/components/navigation.py | 413e79aaeac84f7e20b7e2f202ad441a09fe2a09 | [] | no_license | medubin/ntx | c92e6f74c70a1bb628ef2275228c5d3bc133027f | 3367f19c4d525d9e6ab346453cd0b9dccba84241 | refs/heads/master | 2021-03-30T21:36:10.622042 | 2018-05-17T23:58:57 | 2018-05-17T23:58:57 | 124,611,834 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,750 | py | import urwid
import os
import ntx.helpers.directory_helper as directory_helper
from pathlib import Path
from ntx.base.base_component import BaseComponent
class Navigation(BaseComponent):
BASE_DIRECTORY = str(Path.home()) + '/.ntx'
def __init__(self, env):
self.env = env
self.__last_search = ''
self.__file_indices = [0]
self.__files = []
self.__directory = ''
self.__tags = {}
self.__selected_tag = ''
self.set_files_from_directory(self.BASE_DIRECTORY)
self.content = urwid.SimpleFocusListWalker(self.create_files(self.get_files()))
self.widget = self.__render()
def __render(self):
listbox = ListBoxOverride(self.content)
if len(self.content):
listbox.set_focus(0)
return listbox
def create_files(self, files):
contents = []
for file in files:
full_path = self.get_full_directory() + '/' + file
content = urwid.Text(file)
if os.path.isdir(full_path):
contents.append(urwid.AttrMap(content, 'folder', 'reveal focus'))
else:
contents.append(urwid.AttrMap(content, None, 'reveal focus'))
return contents
def set_focus(self, focus):
if len(self.content):
self.widget.set_focus(focus)
def scroll(self, direction):
if 0 <= (self.get_file_index() + direction) <= len(self.get_files()) - 1:
self.change_file_index(direction)
self.set_focus(self.get_file_index())
# Getters and Setters
# directory
def get_directory(self):
return self.__directory
def get_full_directory(self):
return self.BASE_DIRECTORY + self.__directory
def push_directory(self, new_directory):
self.__directory += '/' + new_directory
def pop_directory(self):
self.__directory = '/'.join(self.__directory.split('/')[:-1])
# files
def get_files(self):
return self.__files
def set_files(self, files):
self.__files = files
def set_files_from_directory(self, directory):
folders = []
notes = []
all_files = os.listdir(directory)
all_files = directory_helper.filter_hidden(all_files)
for file in all_files:
full_path = self.get_full_directory() + '/' + file
if os.path.isdir(full_path):
folders.append(file)
else:
notes.append(file)
self.__files = sorted(folders) + sorted(notes)
def get_selected_file_name(self):
if self.get_file_index() < len(self.__files):
return self.__files[self.get_file_index()]
return ''
#file index
def get_file_index(self):
return self.__file_indices[-1]
def pop_file_index(self):
self.__file_indices = self.__file_indices[:-1]
def push_file_index(self, index):
self.__file_indices.append(index)
def change_file_index(self, velocity):
self.__file_indices[-1] += velocity
# tags
def get_tags(self):
return self.__tags
def set_tags(self, tags):
self.__tags = tags
#selected tag
def get_selected_tag(self):
return self.__selected_tag
def set_selected_tag(self, tag):
self.__selected_tag = tag
#last search
def get_last_search(self):
return self.__last_search
def set_last_search(self, search_term):
self.__last_search = search_term
# overrides the keypress which has some weird behavior in urwid.
class ListBoxOverride(urwid.ListBox):
def keypress(self, size, key):
return key
| [
"[email protected]"
] | |
019b9314fefde11f12dde8785c9b56d0dd7a1851 | 0e686997d9d83e2a6dd14cd7ed7fa521ada788f6 | /Bit Manipulation/502 Binary to String.py | 69cd30aee2152cf0eaa4ee4e60ae76d758111ed7 | [] | no_license | vaibhavigaekwad007/Cracking-the-Coding-Interview | 97cec02d87463971108a42f2434896fe126bf5a9 | 1677a2d78d6aa9df8f1da04a1c086b9f1ff4858e | refs/heads/master | 2022-03-21T07:17:56.602596 | 2020-01-02T06:32:26 | 2020-01-02T06:32:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,120 | py | """
Given a real number between 8 and 1 (e.g., 0.72) that is passed in as a double, print the binary representation.
If the number cannot be represented accurately in binary with at most 32 characters, print "ERROR:'
Solution:
1. Compare number to . 5, then. 25, and so on
"""
__author__ = 'abhireddy96'
class Solution:
def binaryToString(self, num):
if num <= 0 or num >= 1:
return "ERROR"
res = '.'
frac = 0.5
while num > 0:
# Setting a limit on length: 32 characters
if len(res) >= 32:
return res
# If num is greater than frac
if num >= frac:
# Append 1 to result and subtract frac from num
res += '1'
num -= frac
else:
# Append 0 to result
res += '0'
# Take half from frac after each iteration
frac /= 2.0
return res
if __name__ == "__main__":
print(Solution().binaryToString(0.72))
| [
"[email protected]"
] | |
e3b21efcb547b054c173e8a767e3fdb62f682a42 | b3b105cff650721c718c0121b2786b2d232daa2c | /SAC_modular/DQ_visual.py | 1ed3993f40cad0bef6540409b01592db4a7d81fa | [] | no_license | sholtodouglas/SAC_TF2 | 8456eb31e596d3f39f8f7bb94e2ea78461f50e85 | 0ed808ead85fd5e48a86cd6de33c5df7bd9cd724 | refs/heads/master | 2020-06-15T23:21:00.190018 | 2019-10-13T05:11:27 | 2019-10-13T05:11:27 | 195,418,783 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,316 | py | #!/usr/bin/env python
# coding: utf-8
# In[5]:
from tensorflow.keras.layers import Dense, Lambda, Conv2D, Flatten, LeakyReLU, Conv2DTranspose
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
import tensorflow as tf
import numpy as np
from huskarl.policy import EpsGreedy, Greedy
from huskarl.core import Agent, HkException
from huskarl import memory
from tensorflow.keras.models import Sequential
from itertools import count
from collections import namedtuple
from queue import Empty
from time import sleep
import multiprocessing as mp
import numpy as np
import cloudpickle # For pickling lambda functions and more
from huskarl.memory import Transition
from huskarl.core import HkException
import matplotlib.pyplot as plt
import gym
import ur5_RL
import huskarl as hk
from scipy import ndimage, misc
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)
if tf.test.gpu_device_name() != '/device:GPU:0':
print('WARNING: GPU device not found.')
else:
print('SUCCESS: Found GPU: {}'.format(tf.test.gpu_device_name()))
# In[2]:
class DQN(Agent):
"""Deep Q-Learning Network
Base implementation:
"Playing Atari with Deep Reinforcement Learning" (Mnih et al., 2013)
Extensions:
Multi-step returns: "Reinforcement Learning: An Introduction" 2nd ed. (Sutton & Barto, 2018)
Double Q-Learning: "Deep Reinforcement Learning with Double Q-learning" (van Hasselt et al., 2015)
Dueling Q-Network: "Dueling Network Architectures for Deep Reinforcement Learning" (Wang et al., 2016)
"""
def __init__(self, model, optimizer=None, policy=None, test_policy=None,
memsize=10_000, target_update=3, gamma=0.6, batch_size=32, nsteps=1,
enable_double_dqn=True, enable_dueling_network=False, dueling_type='avg'):
"""
TODO: Describe parameters
"""
self.optimizer = Adam(lr=3e-3) if optimizer is None else optimizer
self.policy = EpsGreedy(0.1) if policy is None else policy
self.test_policy = Greedy() if test_policy is None else test_policy
self.memsize = memsize
self.memory = memory.PrioritizedExperienceReplay(memsize, nsteps)
self.target_update = target_update
self.gamma = gamma
self.batch_size = batch_size
self.nsteps = nsteps
self.training = True
# Extension options
self.enable_double_dqn = enable_double_dqn
self.enable_dueling_network = enable_dueling_network
self.dueling_type = dueling_type
self.model =model
# Define loss function that computes the MSE between target Q-values and cumulative discounted rewards
# If using PrioritizedExperienceReplay, the loss function also computes the TD error and updates the trace priorities
def masked_q_loss(data, y_pred):
"""Computes the MSE between the Q-values of the actions that were taken and the cumulative discounted
rewards obtained after taking those actions. Updates trace priorities if using PrioritizedExperienceReplay.
"""
action_batch, target_qvals = data[:, 0], data[:, 1]
seq = tf.cast(tf.range(0, tf.shape(action_batch)[0]), tf.int32)
action_idxs = tf.transpose(tf.stack([seq, tf.cast(action_batch, tf.int32)]))
qvals = tf.gather_nd(y_pred, action_idxs)
if isinstance(self.memory, memory.PrioritizedExperienceReplay):
def update_priorities(_qvals, _target_qvals, _traces_idxs):
"""Computes the TD error and updates memory priorities."""
td_error = np.abs((_target_qvals - _qvals).numpy())
_traces_idxs = (tf.cast(_traces_idxs, tf.int32)).numpy()
self.memory.update_priorities(_traces_idxs, td_error)
return _qvals
qvals = tf.py_function(func=update_priorities, inp=[qvals, target_qvals, data[:,2]], Tout=tf.float32)
return tf.keras.losses.mse(qvals, target_qvals)
self.model.compile(optimizer=self.optimizer, loss=masked_q_loss)
# Clone model to use for delayed Q targets
self.target_model = tf.keras.models.clone_model(self.model)
self.target_model.set_weights(self.model.get_weights())
def save(self, filename, overwrite=False):
"""Saves the model parameters to the specified file."""
self.model.save_weights(filename, overwrite=overwrite)
def act(self, state, instance=0):
"""Returns the action to be taken given a state."""
qvals = self.model.predict(np.array([state]))[0]
# plt.imshow(state)
# plt.imshow(np.reshape(qvals, [128,128]), alpha = 0.5, cmap = 'plasma')
# plt.savefig('q_overlay')
# # plt.show()
# we know our original shape is 1,128,128,1
world_range = 0.26 * 2
pixel_range = 128
mid = pixel_range / 2
index = self.policy.act(qvals) if self.training else self.test_policy.act(qvals)
pixel_index = np.unravel_index(index, [128,128])
return np.array(list((np.array(pixel_index)-mid)/(pixel_range/world_range)) + list([0])), index# eventually have it from the depth map
def push(self, transition, instance=0):
"""Stores the transition in memory."""
self.memory.put(transition)
def train(self, step):
"""Trains the agent for one step."""
if len(self.memory) == 0:
return
# Update target network
if self.target_update >= 1 and step % self.target_update == 0:
# Perform a hard update
self.target_model.set_weights(self.model.get_weights())
elif self.target_update < 1:
# Perform a soft update
mw = np.array(self.model.get_weights())
tmw = np.array(self.target_model.get_weights())
self.target_model.set_weights(self.target_update * mw + (1 - self.target_update) * tmw)
# Train even when memory has fewer than the specified batch_size
batch_size = min(len(self.memory), self.batch_size)
# Sample batch_size traces from memory
state_batch, action_batch, reward_batches, end_state_batch, not_done_mask = self.memory.get(batch_size)
# Compute the value of the last next states
target_qvals = np.zeros(batch_size)
non_final_last_next_states = [es for es in end_state_batch if es is not None]
if len(non_final_last_next_states) > 0:
if self.enable_double_dqn:
# "Deep Reinforcement Learning with Double Q-learning" (van Hasselt et al., 2015)
# The online network predicts the actions while the target network is used to estimate the Q-values
q_values = self.model.predict_on_batch(np.array(non_final_last_next_states))
actions = np.argmax(q_values, axis=1)
# Estimate Q-values using the target network but select the values with the
# highest Q-value wrt to the online model (as computed above).
target_q_values = self.target_model.predict_on_batch(np.array(non_final_last_next_states))
selected_target_q_vals = target_q_values[range(len(target_q_values)), actions]
else:
# Use delayed target network to compute target Q-values
selected_target_q_vals = self.target_model.predict_on_batch(np.array(non_final_last_next_states)).max(1)
non_final_mask = list(map(lambda s: s is not None, end_state_batch))
target_qvals[non_final_mask] = selected_target_q_vals
# Compute n-step discounted return
# If episode ended within any sampled nstep trace - zero out remaining rewards
for n in reversed(range(self.nsteps)):
rewards = np.array([b[n] for b in reward_batches])
target_qvals *= np.array([t[n] for t in not_done_mask])
target_qvals = rewards + (self.gamma * target_qvals)
# Compile information needed by the custom loss function
loss_data = [action_batch, target_qvals]
# If using PrioritizedExperienceReplay then we need to provide the trace indexes
# to the loss function as well so we can update the priorities of the traces
if isinstance(self.memory, memory.PrioritizedExperienceReplay):
loss_data.append(self.memory.last_traces_idxs())
# Train model
self.model.train_on_batch(np.array(state_batch), np.stack(loss_data).transpose())
# In[ ]:
# Packet used to transmit experience from environment subprocesses to main process
# The first packet of every episode will have reward set to None
# The last packet of every episode will have state set to None
RewardState = namedtuple('RewardState', ['reward', 'state'])
class Simulation:
"""Simulates an agent interacting with one of multiple environments."""
def __init__(self, create_env, agent, mapping=None):
self.create_env = create_env
self.agent = agent
self.mapping = mapping
def train(self, max_steps=100_000, instances=1, visualize=False, plot=None, max_subprocesses=0):
"""Trains the agent on the specified number of environment instances."""
self.agent.training = True
if max_subprocesses == 0:
# Use single process implementation
self._sp_train(max_steps, instances, visualize, plot)
elif max_subprocesses is None or max_subprocesses > 0:
# Use multiprocess implementation
self._mp_train(max_steps, instances, visualize, plot, max_subprocesses)
else:
raise HkException(f"Invalid max_subprocesses setting: {max_subprocesses}")
def _sp_train(self, max_steps, instances, visualize, plot):
"""Trains using a single process."""
# Keep track of rewards per episode per instance
episode_reward_sequences = [[] for i in range(instances)]
episode_step_sequences = [[] for i in range(instances)]
episode_rewards = [0] * instances
# Create and initialize environment instances
envs = [self.create_env() for i in range(instances)]
envs[0].render(mode='human')
states = [env.reset()['observation'][0] for env in envs] # get the image
for step in range(max_steps):
for i in range(instances):
if visualize: envs[i].render()
action, action_index = self.agent.act(states[i], i)
next_state, reward, done, _ = envs[i].step(action)
(next_image, next_depth) = next_state['observation']
self.agent.push(Transition(states[i], action_index, reward, None if done else next_image), i)
episode_rewards[i] += reward
if done:
episode_reward_sequences[i].append(episode_rewards[i])
episode_step_sequences[i].append(step)
episode_rewards[i] = 0
if plot: plot(episode_reward_sequences, episode_step_sequences)
(image, depth) = envs[i].reset()['observation']
states[i] = image
else:
states[i] = next_image
# Perform one step of the optimization
self.agent.train(step)
if plot: plot(episode_reward_sequences, episode_step_sequences, done=True)
# In[3]:
# Setup gym environment
create_env = lambda: gym.make('ur5_RL_lego-v0')
dummy_env = create_env()
# Build a simple neural network with 3 fully connected layers as our model
# model = Sequential([
# Dense(16, activation='relu', input_shape=dummy_env.observation_space.shape),
# Dense(16, activation='relu'),
# Dense(16, activation='relu'),
# ])
inputs = tf.keras.Input(shape=(128,128,3), name='img')
x = Conv2D(filters=32, kernel_size=4, strides=2, padding='same')(inputs)
x = LeakyReLU()(x)
x = Conv2D(filters=64, kernel_size=4, strides=2, padding='same')(x)
x = LeakyReLU()(x)
x = Conv2D(filters=128, kernel_size=4, strides=1, padding='same')(x)
x = LeakyReLU()(x)
x = Conv2D(filters=256, kernel_size=4, strides=1, padding='same')(x)
x = LeakyReLU()(x)
x = Conv2DTranspose(filters=32,kernel_size=4,strides=2,padding='same')(x)
x = LeakyReLU()(x)
outputs = Conv2DTranspose(filters=1,kernel_size=4,strides=2,padding='same')(x)
outputs = Flatten()(outputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs, name='model')
print(model.summary())
# Create Deep Q-Learning Network agent
#agent = DQN(model, actions=dummy_env.action_space.n, nsteps=3)
agent = DQN(model, nsteps=2)
def plot_rewards(episode_rewards, episode_steps, done=False):
plt.clf()
plt.xlabel('Step')
plt.ylabel('Reward')
for ed, steps in zip(episode_rewards, episode_steps):
plt.plot(steps, ed)
plt.show() if done else plt.pause(0.001) # Pause a bit so that the graph is updated
# Create simulation, train and then test
sim = Simulation(create_env, agent)
model.save('convolutional_boi.h5')
sim.train(max_steps=3000, visualize=True, plot=plot_rewards)
model.save('convolutional_boi.h5')
sim.test(max_steps=1000)
# In[4]:
sim.test(max_steps=1000)
# In[ ]:
| [
"[email protected]"
] | |
1bf98af7c914e7b1f6af93dc0673a32b3ee164fb | dd5e7697d6c2fc67ea195d8d3d38c19947433a45 | /chapter3_python-stringprocess/string.py | bad20a3dde5c89bb5b84504f2ab8f537bf7b99c1 | [] | no_license | ksm0207/Python_study | e8350e05f11497db345ef38939308b7e8b3b5e13 | 4b5f1bb1ae95a38e399bd4e789cdba57655caf3b | refs/heads/master | 2023-02-03T02:44:16.199225 | 2020-12-23T08:05:40 | 2020-12-23T08:05:40 | 288,656,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | # 문자열
name = "선풍기"
print(name)
name2 = "에어컨"
print(name2)
name3 = """
Hello Python !
"""
print(name3)
| [
"[email protected]"
] | |
39ab4982e78a4b7bf135c5ad7f9176d3529e949e | 0fdae1776b655c03ce0de85d2b79350a88c3b707 | /I0320005_Exercise 7.29.py | 8db693b74d621dbb2410de8e682d3af2c9f513ee | [] | no_license | rafiadn/Ahmad-Rafi-Adnanta_I0320005_Abyan_Tugas7 | 257256eed11e68060396d0d856c9fe5ef92d4893 | 4b9f925f7f22b702b5bd24fb4fb394708a9480f1 | refs/heads/main | 2023-04-07T02:42:44.225270 | 2021-04-16T06:28:24 | 2021-04-16T06:28:24 | 358,496,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | a = [10, 40, 30, 10, 30, 20]
print('a = ', a)
print('jumah nlai 10 = ', a.count(10))
print('jumah nlai 30 = ', a.count(30)) | [
"[email protected]"
] | |
ddda108c48b2a50a4dccf4df130003cfd4157fb0 | 0cbf107de8b6ae51308765d321d8c134ea8108f1 | /Algorithms.py | 7c635b1fb6d136953b8f2323cf26aa52d8e5d6be | [] | no_license | dcormar/pyScripts | eee3555ddad83de0a66d4122f3e3809589166a84 | b75fb32f0469b35a5e88ed8ba006854909ec1eec | refs/heads/master | 2020-04-08T19:18:09.082347 | 2019-05-05T18:10:32 | 2019-05-05T18:10:32 | 159,649,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py | from math import log, ceil
def bouncingBall(h, bounce, window):
return -1 if (h <= 0 or bounce <= 0 or bounce >= 1 or window >= h) else (ceil(log(window/h,bounce))*2 - 1)
print (bouncingBall(30, 0.66, 1.5))
'''
CON EL SIGTE CÓDIGO, DA ERROR DE "RecursionError: maximum recursion depth exceeded while calling a Python object"
if direction == 'DOWN' and h >= window:
print("Ball " + direction + " and higher than window. Counter = " + str(counter))
return bouncingBall(bounce*h, bounce, window, counter + 1, 'UP')
elif direction == 'UP' and h >= window:
print("Ball " + direction + " and higher than window. Counter = " + str(counter))
return bouncingBall(h, bounce, window, counter + 1, 'DOWN')
else:
print("Ball " + direction + " and lower than window. Counter = " + str(counter))
return counter
''' | [
"[email protected]"
] | |
2a072def812c6e995bddd52a6145ef5abdb5b46c | 451722014b427806a2988b04797dcdcbcbab3836 | /musterdaten/migrations/0001_initial.py | 898a8f0819944b0098cb0268114422e1b2c1a8e8 | [] | no_license | CivicVision/musterdatenkatalog | 0de148c28ca24c1e47a2b145c575fd0bfbdbc169 | 9875b74ed17b6417298ee7758b7060de50094185 | refs/heads/master | 2023-03-07T04:42:54.935932 | 2021-02-15T12:32:44 | 2021-02-15T12:34:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,347 | py | import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64, verbose_name='titel')),
('DCAT_AP', models.BooleanField(default=False)),
],
options={
'verbose_name': 'Kategorie',
'verbose_name_plural': 'Kategorien',
},
),
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
],
options={
'verbose_name': 'Stadt',
'verbose_name_plural': 'Städte',
},
),
migrations.CreateModel(
name='Dataset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128, verbose_name='titel')),
('description', models.CharField(max_length=128, verbose_name='beschreibung')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='erstellt_am')),
('original_id', models.CharField(max_length=32, verbose_name='portal_id')),
('url', models.URLField()),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='geändert_am')),
('metadata_created', models.DateTimeField(verbose_name='metadaten_erstellt')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='musterdaten.Category', verbose_name='Kategorie')),
('city', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='musterdaten.City', verbose_name='Stadt')),
],
options={
'verbose_name': 'Datensatz',
'verbose_name_plural': 'Datensätze',
},
),
migrations.CreateModel(
name='Leika',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64, verbose_name='titel')),
('code', models.CharField(max_length=64)),
('description', models.CharField(max_length=128)),
],
),
migrations.CreateModel(
name='License',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64, verbose_name='titel')),
('url', models.URLField()),
('short_title', models.CharField(max_length=32)),
],
options={
'verbose_name': 'Lizenz',
'verbose_name_plural': 'Lizenzen',
},
),
migrations.CreateModel(
name='Modeldataset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64, verbose_name='titel')),
('leika', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='musterdaten.Leika')),
],
options={
'verbose_name': 'Musterdatensatz',
'verbose_name_plural': 'Musterdatensätze',
},
),
migrations.CreateModel(
name='Modelsubject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64, verbose_name='titel')),
],
options={
'verbose_name': 'Thema',
'verbose_name_plural': 'Themen',
},
),
migrations.CreateModel(
name='State',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
],
options={
'verbose_name': 'Bundesland',
'verbose_name_plural': 'Bundesländer',
},
),
migrations.CreateModel(
name='Score',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('session_id', models.CharField(max_length=32)),
('dataset', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='musterdaten.Dataset')),
('modeldataset', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='musterdaten.Modeldataset')),
],
options={
'verbose_name': 'Bewertung',
'verbose_name_plural': 'Bewertungen',
},
),
migrations.AddField(
model_name='modeldataset',
name='modelsubject',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='musterdaten.Modelsubject'),
),
migrations.AddField(
model_name='dataset',
name='license',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='musterdaten.License'),
),
migrations.AddField(
model_name='dataset',
name='modeldataset',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='musterdaten.Modeldataset'),
),
migrations.AddField(
model_name='dataset',
name='top_3',
field=models.ManyToManyField(related_name='dataset_top3', to='musterdaten.Modeldataset'),
),
]
| [
"[email protected]"
] | |
c1274ab381afe7f4c02a38e209d14657c51870cf | 36b86e45774fbb0658bddafb3ea5df3200ff7c43 | /Chapter08/Number_10/decimalToBinary.py | 27bdcfe86b3b7b1cbec0bbab335c82a49859aec2 | [] | no_license | lunyy/ScriptProgramming_Python | 7c871f59a4932348af06e99dd03e6fd8f2e066e0 | bfd616b6d653d9c11ff58d2ed9011c4e814bb7c8 | refs/heads/master | 2020-06-17T12:19:00.820102 | 2016-11-26T17:40:58 | 2016-11-26T17:40:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | def decimalToBinary(value):
value = eval((value))
str_list = []
while(1) :
if value == 0:
str_list.append(0)
break
elif value == 1:
str_list.append(1)
break
# 0이나 1이면 맨 마지막에 0,1을 넣고 break
else :
str_list.append(value % 2)
value = round(value / 2)
# 다른 수이면 2로 나눈 나머지를 append하고 value를 2로 나눈 후 continue
return str_list[::-1] | [
"[email protected]"
] | |
988c68789e7119f20d7f7715581ef86a77fbb952 | 5941c9b7047054d7be3e55f460a3d5b35b987498 | /GeeksForGeeks/Searching/MajorityElement.py | a96ab75d69a3a46708549b96369b6d86dd683628 | [] | no_license | mohtashimkamran/DSA-GeeksForGeeks-Codechef-Codeforces-solutions | 19d3a0d716160750bac70fc979a982c4fde085bd | 364e7e68e32ebdb2429754883465696e06b3a436 | refs/heads/main | 2023-05-08T16:44:42.309261 | 2021-05-31T17:46:40 | 2021-05-31T17:46:40 | 307,804,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | # Majority Lement in an array
# def majority(arr,n):
# res=0
# count=1
# for i in range(1,n):
# if(arr[res]==arr[i]):
# count+=1
# else:
# count-=1
# if(count==0):
# res=i
# count=1
# count=0
# for i in range(n):
# if(arr[res]==arr[i]):
# count+=1
# if(count<=(n//2)):
# res=-1
# return res
# arr=[8,8,1,2,8]
# n=len(arr)
# print(majority(arr,n)) | [
"[email protected]"
] | |
887dd01dedd2f043d77af10aac2940397c5305da | 9611f657bbb92d2cc3edd556ea3ffaa702e997f0 | /graphics/screen.py | ae886934405652017bcfbd3dccb607f3cffc0d56 | [] | no_license | donhilion/JumpAndRun | 10fdfdcc5fdcd5619b757c3f65e68d2bf4085852 | 0308785a51bf61d9a4fec2d8370540df502b8178 | refs/heads/master | 2021-01-23T08:38:53.808186 | 2014-01-28T20:26:53 | 2014-01-28T20:26:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | __author__ = 'Donhilion'
class Screen(object):
""" The screen class.
This class provides an interface for screen which could be displayed in the Window class.
"""
def __init__(self):
""" Generates a new instance of this class.
Generates a new instance of this class.
"""
pass
def draw(self):
""" Draws the screen.
This method is a stub for drawing on the screen.
"""
pass
def key_down(self, key):
""" Handles key down events.
This method is a stub for handling key down events.
Args:
key: The key event information provided by pygame.
"""
pass
def key_up(self, key):
""" Handles key up events.
This method is a stub for handling key up events.
Args:
key: The key event information provided by pygame.
"""
pass
def mouse_click(self, pos, button):
""" Handles mouse click events.
This method is a stub for handling mouse click events.
Args:
pos: The position of the mouse.
button: The button pressed.
"""
pass
def mouse_move(self, pos):
""" Handles mouse move events.
This method is a stub for handling mouse movement events.
Args:
pos: The position of the mouse.
"""
pass
| [
"[email protected]"
] | |
f496a5cffa7a254d07e540baad61197bc12f653d | 60d5b5b1f1c912d1655de3884efc09dfddd8d132 | /sites/kotourism/places/migrations/0026_auto__add_track.py | e79459953a770ed672440949f28f15b68d8eff12 | [] | no_license | alexgula/django_sites | 15033c739401f24603e957c5a034d63652f0d21f | 038834c0f544d6997613d61d593a7d5abf673c70 | refs/heads/master | 2016-09-05T11:02:43.838095 | 2014-07-07T11:36:07 | 2014-07-07T11:36:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,361 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Track'
db.create_table('places_track', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=150, db_index=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50)),
('track', self.gf('django.db.models.fields.CharField')(max_length=1000)),
('desc', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('places', ['Track'])
def backwards(self, orm):
# Deleting model 'Track'
db.delete_table('places_track')
models = {
'places.imagecontent': {
'Meta': {'ordering': "['ordering']", 'object_name': 'ImageContent', 'db_table': "'places_region_imagecontent'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '250'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'imagecontent_set'", 'to': "orm['places.Region']"}),
'position': ('django.db.models.fields.CharField', [], {'default': "'left'", 'max_length': '10'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'places.place': {
'Meta': {'ordering': "['name']", 'unique_together': "[['type', 'slug'], ['type', 'interop_code']]", 'object_name': 'Place'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'address_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'address_ru': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'address_uk': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'desc_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'desc_ru': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'desc_uk': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '250', 'blank': 'True'}),
'interop_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lon': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150', 'db_index': 'True'}),
'name_en': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '150', 'null': 'True', 'blank': 'True'}),
'name_ru': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '150', 'null': 'True', 'blank': 'True'}),
'name_uk': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '150', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['places.Place']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'phone_en': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'phone_ru': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'phone_uk': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'timetable': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'timetable_en': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'timetable_ru': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'timetable_uk': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['places.PlaceType']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'places.placetype': {
'Meta': {'object_name': 'PlaceType'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'code': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'icon': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '250', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'name_ru': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'name_uk': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['places.PlaceType']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'places.region': {
'Meta': {'ordering': "['name']", 'unique_together': "(['type', 'slug'],)", 'object_name': 'Region'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'desc_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'desc_ru': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'desc_uk': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'icon': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '250', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'map_color': ('django.db.models.fields.CharField', [], {'max_length': '12', 'blank': 'True'}),
'map_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'name_ru': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'name_uk': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'template_key': ('django.db.models.fields.CharField', [], {'default': "'standart'", 'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'places.restructuredcontent': {
'Meta': {'ordering': "['ordering']", 'object_name': 'RestructuredContent', 'db_table': "'places_region_restructuredcontent'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'restructuredcontent_set'", 'to': "orm['places.Region']"}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'text_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'text_ru': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'text_uk': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'places.track': {
'Meta': {'object_name': 'Track'},
'desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'track': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
}
}
complete_apps = ['places']
| [
"[email protected]"
] | |
74b8760b1c181f2762ce273af444d1053d1ac0a9 | 02c1fe563ab440eaee0a6e49d3e118f790dd563e | /tests/conftest.py | 3d84cedcce3a7914f6b4001fe8fde04ca7c30970 | [] | no_license | arijit05saha/arijit05saha | 63cebbfd750a5d41dbca0d0758880b11e9b0044d | 9000bb834ced5a28286989436895298b67af6a56 | refs/heads/master | 2023-06-25T09:15:32.701309 | 2021-07-28T20:41:15 | 2021-07-28T20:41:15 | 390,500,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,016 | py | import pytest
from selenium import webdriver
driver = None
def pytest_addoption(parser):
parser.addoption(
"--browser_name", action="store", default="chrome"
)
@pytest.fixture(scope="class")
def setup(request):
browser_name = request.config.getoption("browser_name")
global driver
if browser_name == "chrome":
# CHROME
driver = webdriver.Chrome(executable_path="C:\Arijit\Selenium\Webdrivers\chromedriver.exe")
elif browser_name == "firefox":
# FIREFOX
driver = webdriver.Firefox(executable_path="C:\Arijit\Selenium\Webdrivers\geckodriver.exe")
elif browser_name == "ie":
# IE
driver = webdriver.Firefox(executable_path="C:\Arijit\Selenium\Webdrivers\IEDriverServer.exe")
driver.get("https://www.bestbuy.com/")
driver.maximize_window()
request.cls.driver = driver
yield
driver.close()
@pytest.mark.hookwrapper
def pytest_runtest_makereport(item):
"""
Extends the PyTest Plugin to take and embed screenshot in html report, whenever test fails.
:param item:
"""
pytest_html = item.config.pluginmanager.getplugin('html')
outcome = yield
report = outcome.get_result()
extra = getattr(report, 'extra', [])
screenshotLoc = "C:\\Users\\Arijit\\PycharmProjects\\BestBuy\\screenshots\\"
if report.when == 'call' or report.when == "setup":
xfail = hasattr(report, 'wasxfail')
if (report.skipped and xfail) or (report.failed and not xfail):
file_name = screenshotLoc + report.nodeid.replace("::", "_") + ".png"
_capture_screenshot(file_name)
if file_name:
html = '<div><img src="%s" alt="screenshot" style="width:304px;height:228px;" ' \
'onclick="window.open(this.src)" align="right"/></div>' % file_name
extra.append(pytest_html.extras.html(html))
report.extra = extra
def _capture_screenshot(name):
driver.get_screenshot_as_file(name) | [
"[email protected]"
] | |
6267765ba313ba8bed3090256c418033627aabe9 | 4798f79ff2f430f643863742b49b84243a480e26 | /manage.py | 4d78a65f7e54363f16cfefd8265f5fba4c0937aa | [
"MIT"
] | permissive | MachineLearningProject/flight-delay-prediction | 86c2ab17e40edc50e86c31f8308fcf91aa96aad6 | 915879f26ece8a1e5bf8678668cc2980a2d0e240 | refs/heads/master | 2021-01-10T17:00:46.955059 | 2016-03-16T11:36:42 | 2016-03-16T11:36:42 | 51,708,787 | 0 | 0 | null | 2016-03-16T11:36:43 | 2016-02-14T19:05:50 | Python | UTF-8 | Python | false | false | 249 | py | from flask.ext.script import Manager
from app import app
manager = Manager(app)
@manager.command
def runworker():
from app import cronjobs
job = cronjobs.AirportDelayRetriever()
job.run()
if __name__ == "__main__":
manager.run()
| [
"[email protected]"
] | |
b685e2e64e61cf761c0ca71b0c7f808d61cc266e | b3ab7562ae1b054f4da45192a0d711c71764e42b | /MachineLearning-StudentActivity/DSML_Task3/SVMRegression/SVMRegressorAllAttribute.py | 8b714fcb0b88a256b0ce72aca04a005302ee0209 | [] | no_license | Sebastiano2906/MachineLearning-StudentActivity | 80d7d41c527224d5dd23dcfabf2081e56829b4d0 | cdb17f01655a255b856d37473415fc66b5fa3d49 | refs/heads/master | 2022-11-30T08:53:25.438953 | 2020-08-12T17:37:28 | 2020-08-12T17:37:28 | 196,618,732 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,772 | py | from sklearn.svm import LinearSVR
import pandas as pd
from sklearn.metrics import mean_squared_error
import numpy as np
predictiveAttributeDegree = pd.read_json("../predictiveDegree.txt", orient='records', dtype=True,typ="series") #AGGIUSTARE I PATH, IN MODO DA RAGGIUNGERE QUESTO FILE
predictiveAttributeNotDegree = pd.read_json("../predictiveNotDegree.txt", orient='records', dtype=True,typ="series") #AGGIUSTARE I PATH, IN MODO DA RAGGIUNGERE QUESTO FILE
train_set_tot = []
test_set_tot = []
train_result_tot = []
test_result_tot = []
count = 0
svm_reg_tot = LinearSVR(epsilon=1.0, max_iter=100000000)
train_percent = (len(predictiveAttributeDegree)/100)*80
for i in range(len(predictiveAttributeDegree)):
if count < train_percent:
count = count + 1
train_set_tot.append([predictiveAttributeNotDegree[i][0], predictiveAttributeNotDegree[i][1], predictiveAttributeNotDegree[i][6],
predictiveAttributeNotDegree[i][7], predictiveAttributeNotDegree[i][9], predictiveAttributeNotDegree[i][10],
predictiveAttributeNotDegree[i][11], predictiveAttributeNotDegree[i][12],predictiveAttributeNotDegree[i][17],
predictiveAttributeNotDegree[i][18]])
train_result_tot.append([predictiveAttributeDegree[i][2]])
else:
test_set_tot.append([predictiveAttributeNotDegree[i][0], predictiveAttributeNotDegree[i][1], predictiveAttributeNotDegree[i][6],
predictiveAttributeNotDegree[i][7], predictiveAttributeNotDegree[i][9], predictiveAttributeNotDegree[i][10],
predictiveAttributeNotDegree[i][11], predictiveAttributeNotDegree[i][12],predictiveAttributeNotDegree[i][17],
predictiveAttributeNotDegree[i][18]])
test_result_tot.append([predictiveAttributeDegree[i][2]])
train_percent = (len(predictiveAttributeNotDegree)/100)*80
count = 0
for i in range(len(predictiveAttributeNotDegree)):
if count < train_percent:
count = count + 1
train_set_tot.append([predictiveAttributeNotDegree[i][0], predictiveAttributeNotDegree[i][1], predictiveAttributeNotDegree[i][6],
predictiveAttributeNotDegree[i][7], predictiveAttributeNotDegree[i][9], predictiveAttributeNotDegree[i][10],
predictiveAttributeNotDegree[i][11], predictiveAttributeNotDegree[i][12],predictiveAttributeNotDegree[i][17],
predictiveAttributeNotDegree[i][18]])
train_result_tot.append([predictiveAttributeNotDegree[i][2]])
else:
test_set_tot.append([predictiveAttributeNotDegree[i][0], predictiveAttributeNotDegree[i][1], predictiveAttributeNotDegree[i][6],
predictiveAttributeNotDegree[i][7], predictiveAttributeNotDegree[i][9], predictiveAttributeNotDegree[i][10],
predictiveAttributeNotDegree[i][11], predictiveAttributeNotDegree[i][12],predictiveAttributeNotDegree[i][17],
predictiveAttributeNotDegree[i][18]])
test_result_tot.append([predictiveAttributeNotDegree[i][2]])
train_result_tot = np.array(train_result_tot)
svm_reg_tot.fit(train_set_tot, train_result_tot.ravel())
print("----ALL ATTRIBUTE: score: ", svm_reg_tot.score(test_set_tot, test_result_tot))
# 0. matr 1.cf 6.tipoCds 7.coorte 9.annodiploma 10.votodip 11.codschool 12.tipoMat 17.mot_sta 18.sta
newStudent = [[2933, 2928, 1, 2015, 2015, 100, 200, 9, 3, 10]]
real_value = [30]
predicted = svm_reg_tot.predict(newStudent)
print("----ALL ATTRIBUTE: Predicted: ", predicted)
print("----ALL ATTRIBUTE: MSE: ", mean_squared_error(real_value, svm_reg_tot.predict(newStudent)))
print("----ALL ATTRIBUTE: Params: ", svm_reg_tot.get_params()) | [
"[email protected]"
] | |
8a59cb38a53397dc4e6561f775085d240748723e | aa05f0676d8969d97520a30e4bc9671e6c23f4f6 | /Chapter 11 - PsychoPy入门 - 基本刺激材料的呈现/ioHub-example.py | ed1d5255865a02116c78d3c5cdd4732eb24a97ff | [] | no_license | hejibo/Python-for-social-scientists | 6c7ad858ed4faa02667300d33d68896a9f3ffe49 | 28b81e25b98a7836bc5a06c03faecb54ffe3281a | refs/heads/master | 2021-09-15T12:53:20.795539 | 2018-03-09T19:54:04 | 2018-03-09T19:54:04 | 100,620,818 | 12 | 5 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | '''
http://www.psychopy.org/api/iohub/starting.html
'''
from psychopy.iohub import launchHubServer
# Start the ioHub process. The return variable is what is used
# during the experiment to control the iohub process itself,
# as well as any running iohub devices.
io=launchHubServer()
# By default, ioHub will create Keyboard and Mouse devices and
# start monitoring for any events from these devices only.
keyboard=io.devices.keyboard
mouse=io.devices.mouse
current_mouse_position = mouse.getPosition()
print 'current mouse position: ', current_mouse_position
# As a simple example, use the keyboard to have the experiment
# wait until a key is pressed.
print "Press any Key to Exit Example....."
keys = keyboard.waitForKeys()
print "Key press detected, exiting experiment."
print "the keys pressed are:",keys | [
"[email protected]"
] | |
3b6223b97fe67858b854914186d80bf6ee010f70 | d7016f69993570a1c55974582cda899ff70907ec | /sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_01_01/aio/operations/_file_shares_operations.py | 49e789635d95f6ce5f8b69f63250a9f13496a15b | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 39,965 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._file_shares_operations import (
build_create_request,
build_delete_request,
build_get_request,
build_list_request,
build_restore_request,
build_update_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FileSharesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.storage.v2021_01_01.aio.StorageManagementClient`'s
:attr:`file_shares` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self,
resource_group_name: str,
account_name: str,
maxpagesize: Optional[str] = None,
filter: Optional[str] = None,
expand: Optional[Union[str, _models.ListSharesExpand]] = None,
**kwargs: Any
) -> AsyncIterable["_models.FileShareItem"]:
"""Lists all shares.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param maxpagesize: Optional. Specified maximum number of shares that can be included in the
list. Default value is None.
:type maxpagesize: str
:param filter: Optional. When specified, only share names starting with the filter will be
listed. Default value is None.
:type filter: str
:param expand: Optional, used to expand the properties within share's properties. Known values
are: "deleted" and "snapshots". Default value is None.
:type expand: str or ~azure.mgmt.storage.v2021_01_01.models.ListSharesExpand
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FileShareItem or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2021_01_01.models.FileShareItem]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-01-01")) # type: Literal["2021-01-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.FileShareItems]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
maxpagesize=maxpagesize,
filter=filter,
expand=expand,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("FileShareItems", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares"} # type: ignore
@overload
async def create(
self,
resource_group_name: str,
account_name: str,
share_name: str,
file_share: _models.FileShare,
expand: Optional[Union[str, _models.PutSharesExpand]] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.FileShare:
"""Creates a new share under the specified account as described by request body. The share
resource includes metadata and properties for that share. It does not include a list of the
files contained by the share.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number. Required.
:type share_name: str
:param file_share: Properties of the file share to create. Required.
:type file_share: ~azure.mgmt.storage.v2021_01_01.models.FileShare
:param expand: Optional, used to create a snapshot. "snapshots" Default value is None.
:type expand: str or ~azure.mgmt.storage.v2021_01_01.models.PutSharesExpand
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileShare or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_01_01.models.FileShare
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create(
self,
resource_group_name: str,
account_name: str,
share_name: str,
file_share: IO,
expand: Optional[Union[str, _models.PutSharesExpand]] = None,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.FileShare:
"""Creates a new share under the specified account as described by request body. The share
resource includes metadata and properties for that share. It does not include a list of the
files contained by the share.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number. Required.
:type share_name: str
:param file_share: Properties of the file share to create. Required.
:type file_share: IO
:param expand: Optional, used to create a snapshot. "snapshots" Default value is None.
:type expand: str or ~azure.mgmt.storage.v2021_01_01.models.PutSharesExpand
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileShare or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_01_01.models.FileShare
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create(
self,
resource_group_name: str,
account_name: str,
share_name: str,
file_share: Union[_models.FileShare, IO],
expand: Optional[Union[str, _models.PutSharesExpand]] = None,
**kwargs: Any
) -> _models.FileShare:
"""Creates a new share under the specified account as described by request body. The share
resource includes metadata and properties for that share. It does not include a list of the
files contained by the share.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number. Required.
:type share_name: str
:param file_share: Properties of the file share to create. Is either a model type or a IO type.
Required.
:type file_share: ~azure.mgmt.storage.v2021_01_01.models.FileShare or IO
:param expand: Optional, used to create a snapshot. "snapshots" Default value is None.
:type expand: str or ~azure.mgmt.storage.v2021_01_01.models.PutSharesExpand
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileShare or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_01_01.models.FileShare
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-01-01")) # type: Literal["2021-01-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.FileShare]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(file_share, (IO, bytes)):
_content = file_share
else:
_json = self._serialize.body(file_share, "FileShare")
request = build_create_request(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
subscription_id=self._config.subscription_id,
expand=expand,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("FileShare", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("FileShare", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}"} # type: ignore
@overload
async def update(
self,
resource_group_name: str,
account_name: str,
share_name: str,
file_share: _models.FileShare,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.FileShare:
"""Updates share properties as specified in request body. Properties not mentioned in the request
will not be changed. Update fails if the specified share does not already exist.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number. Required.
:type share_name: str
:param file_share: Properties to update for the file share. Required.
:type file_share: ~azure.mgmt.storage.v2021_01_01.models.FileShare
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileShare or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_01_01.models.FileShare
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def update(
self,
resource_group_name: str,
account_name: str,
share_name: str,
file_share: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.FileShare:
"""Updates share properties as specified in request body. Properties not mentioned in the request
will not be changed. Update fails if the specified share does not already exist.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number. Required.
:type share_name: str
:param file_share: Properties to update for the file share. Required.
:type file_share: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileShare or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_01_01.models.FileShare
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def update(
self,
resource_group_name: str,
account_name: str,
share_name: str,
file_share: Union[_models.FileShare, IO],
**kwargs: Any
) -> _models.FileShare:
"""Updates share properties as specified in request body. Properties not mentioned in the request
will not be changed. Update fails if the specified share does not already exist.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number. Required.
:type share_name: str
:param file_share: Properties to update for the file share. Is either a model type or a IO
type. Required.
:type file_share: ~azure.mgmt.storage.v2021_01_01.models.FileShare or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileShare or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_01_01.models.FileShare
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-01-01")) # type: Literal["2021-01-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.FileShare]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(file_share, (IO, bytes)):
_content = file_share
else:
_json = self._serialize.body(file_share, "FileShare")
request = build_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("FileShare", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}"} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
account_name: str,
share_name: str,
expand: Literal["stats"] = "stats",
x_ms_snapshot: Optional[str] = None,
**kwargs: Any
) -> _models.FileShare:
"""Gets properties of a specified share.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number. Required.
:type share_name: str
:param expand: Optional, used to expand the properties within share's properties. Known values
are "stats" and None. Default value is "stats".
:type expand: str
:param x_ms_snapshot: Optional, used to retrieve properties of a snapshot. Default value is
None.
:type x_ms_snapshot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FileShare or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_01_01.models.FileShare
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-01-01")) # type: Literal["2021-01-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.FileShare]
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
subscription_id=self._config.subscription_id,
expand=expand,
x_ms_snapshot=x_ms_snapshot,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("FileShare", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
share_name: str,
x_ms_snapshot: Optional[str] = None,
**kwargs: Any
) -> None:
"""Deletes specified share under its account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number. Required.
:type share_name: str
:param x_ms_snapshot: Optional, used to delete a snapshot. Default value is None.
:type x_ms_snapshot: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-01-01")) # type: Literal["2021-01-01"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
subscription_id=self._config.subscription_id,
x_ms_snapshot=x_ms_snapshot,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}"} # type: ignore
@overload
async def restore( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
share_name: str,
deleted_share: _models.DeletedShare,
*,
content_type: str = "application/json",
**kwargs: Any
) -> None:
"""Restore a file share within a valid retention days if share soft delete is enabled.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number. Required.
:type share_name: str
:param deleted_share: Required.
:type deleted_share: ~azure.mgmt.storage.v2021_01_01.models.DeletedShare
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def restore( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
share_name: str,
deleted_share: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> None:
"""Restore a file share within a valid retention days if share soft delete is enabled.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number. Required.
:type share_name: str
:param deleted_share: Required.
:type deleted_share: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def restore( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
share_name: str,
deleted_share: Union[_models.DeletedShare, IO],
**kwargs: Any
) -> None:
"""Restore a file share within a valid retention days if share soft delete is enabled.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param share_name: The name of the file share within the specified storage account. File share
names must be between 3 and 63 characters in length and use numbers, lower-case letters and
dash (-) only. Every dash (-) character must be immediately preceded and followed by a letter
or number. Required.
:type share_name: str
:param deleted_share: Is either a model type or a IO type. Required.
:type deleted_share: ~azure.mgmt.storage.v2021_01_01.models.DeletedShare or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-01-01")) # type: Literal["2021-01-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[None]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(deleted_share, (IO, bytes)):
_content = deleted_share
else:
_json = self._serialize.body(deleted_share, "DeletedShare")
request = build_restore_request(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.restore.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
restore.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/fileServices/default/shares/{shareName}/restore"} # type: ignore
| [
"[email protected]"
] | |
308603f8349a5cc5bfcc057fbb89eb84b03a9878 | f87c86c4dcb9192717a06ec41caa5c49b1fd0201 | /adv/yukata_cassandra.py | 8dc435fad72e2a8adcdfe667606037d43be82aff | [
"Apache-2.0"
] | permissive | Caledor/dl | 5377982f31f0c1890aff487d5eefc1ffb6f7115f | fc5e9b6855afb40c4c499a70dfa0e0503e8c8f05 | refs/heads/master | 2023-04-01T19:15:04.372285 | 2021-03-28T23:25:06 | 2021-03-28T23:25:06 | 277,363,765 | 0 | 0 | Apache-2.0 | 2020-07-05T18:49:36 | 2020-07-05T18:49:36 | null | UTF-8 | Python | false | false | 582 | py | from core.advbase import *
class Yukata_Cassandra(Adv):
comment = "s1 overdamage team buff not considered"
def prerun(self):
self.a3_att_mod = Modifier("a3_att", "att", "passive", 0.30, get=self.a3_get)
def a3_get(self):
return self.s2.sp == self.s2.charged
def post_run(self, end):
try:
average_echo_att = self.sum_echo_att / g_logs.counts["s"]["s1"]
self.comment += f"; {average_echo_att:.2f} avg overdamage att"
except (KeyError, AttributeError):
pass
variants = {None: Yukata_Cassandra}
| [
"[email protected]"
] | |
ad4dca58852c4ae0ee83f2c284671f8f824fb71c | 6db9f9c1d0c282935bc52d1acac309ea6f183173 | /first.py | 4036d95429901a70f82d11b3ebf491f212c82591 | [] | no_license | timsl/DeepRL | a572fdc37a24199cf132488316bfec703615ca17 | 7d7a53dfb2cdea6f3d898af8c173b8faa6033f30 | refs/heads/master | 2021-01-22T03:58:01.295851 | 2017-04-24T14:08:26 | 2017-04-24T14:08:26 | 81,486,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | import gym
import universe # register the universe environments
env = gym.make('flashgames.DuskDrive-v0')
env.configure(remotes=1) # automatically creates a local docker container
observation_n = env.reset()
while True:
action_n = [[('KeyEvent', 'ArrowUp', True)] for ob in observation_n] # your agent here
observation_n, reward_n, done_n, info = env.step(action_n)
#env.render()
| [
"[email protected]"
] | |
e2d0b71910a531d634540fbe04b274c9751b2a10 | 4943d1a9f7dcaca8f6f674ec03e8a4abb5f6ec45 | /custom/telegram.py | 2bf21e4732b3ca0250099a962a876b364634954d | [] | no_license | krlosMata/token-check | 43317503a19febc45a03e3139b27a6cd60a10d6a | af88dc3d82c5853c754551ad2e0ff76c769d6110 | refs/heads/master | 2020-09-02T23:16:22.035306 | 2019-11-09T11:54:37 | 2019-11-09T11:54:37 | 219,329,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | import telegram
from telegram.ext import Updater, CommandHandler
class Ctelegram:
def __init__(self, _token, _chatId):
self.chatId = _chatId
self.bot = telegram.Bot(_token)
def sendMessage(self, message):
self.bot.send_message(self.chatId, message) | [
"[email protected]"
] | |
c37590dea59a0dbf01de377374de3ac608339a88 | 40acb96668d2154f087207ce744af280afa2b387 | /temps/main_tom.py | e67a30647113e9c6381c27fde07aaf52262657fe | [] | no_license | TomBugnon/hill_tononi_synthesis | 61a69e1911e1c74ae0f5f2b0745329c0660d6044 | c63bc6fc3c0406795bcfd7db3ccbf152aaf8e358 | refs/heads/master | 2020-12-30T12:10:33.169075 | 2017-06-07T20:55:29 | 2017-06-07T20:55:29 | 91,501,507 | 0 | 1 | null | 2017-05-16T20:28:52 | 2017-05-16T20:28:52 | null | UTF-8 | Python | false | false | 6,318 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Simulation of figures 3 & 4 in Hill-Tononi paper.
# Author: Pablo Martinez Cañada ([email protected])
# Modified by Keiko Fujii and Leonardo S. Barbosa
import nest
import nest.topology as tp
import numpy as np
import time
import math
import figure_4_plot_tom
reload(figure_4_plot_tom)
import figure_3_plot_nothal_tom
reload(figure_3_plot_nothal_tom)
import figure_3_plot_tom
reload(figure_3_plot_tom)
import test_scrambled_intact
reload(test_scrambled_intact)
sim_fig_3 = True
sim_fig_4 = False
#p_ratio = 1.
p_ratio = 2.
# vis_size = [10, 7]
vis_size = [40, 30]
for run in range(1,2):
#for run in range(1,21):
#for structured_input in [True, False]:
for structured_input in [True]:
#for structured_input in [False]:
# for scramble in [True, False]:
# for scramble in [True]:
for scramble in [False]:
if sim_fig_4:
Params = {
'Np': 40, # cells in the primary visual area
'Ns': 30, # cells in the secondary visual area
'visSize': 8.0, # visual angle (degrees)
'ret_rate': 30.0, # average firing rate of retina ganglion cells (spikes^(-1))
'ret_amplitude': 0.0, # amplitude of the sinusoidal poisson generator
# used for retina ganglion cells (spikes^(-1))
'temporal_frequency': 0.0, # frequency of the generator (Hz)
'threads': 12, # threads to use in NEST simulation
#'intervals': [1000.0, 1000.0, 7500.0], # keiko
#'intervals': [500.0], # Intervals (in ms) of the waking,transition
'intervals': [500.0, 500.0, 500.0, 500.0, 3000.0], # Intervals (in ms) of the waking,transition
# and sleep modes
'resolution': 1.0, # Simulation step (in ms)
'p_ratio': p_ratio,
}
# Run simulation of figure 4
figure_4_plot_tom.simulation(Params)
if sim_fig_3:
root_folder = '/Users/Tom/Documents/projects/network_simulations/HEAD_version/full/'
# root_folder = '/Users/Tom/Desktop/garbage'
#network ='network_full_keiko'
#network ='network_full_keiko2'
# network = 'network_full_leonardo'
network = 'network_full_tom'
#network = 'network_full_leonardo2'
# scramble network connections? only works with network_full_leonardo!
# scramble = True
# scramble = False
# structured_input = True
structured_input = False
ret_rate = 100.0
synapse_keiko = True
NMDA_keiko = False
edge_wrap = True
#edge_wrap = False
net_folder = '/%s_%s_edge_wrap_%d_Np_%d_Ns_%d_p_ratio_%d_NMDA_%s_synapse_%s' % \
(network,
'scrambled' if scramble else 'intact',
1*edge_wrap, vis_size[0], vis_size[1], p_ratio,
'keiko' if NMDA_keiko else 'default',
'keiko' if synapse_keiko else 'default')
if structured_input:
# vertical
lambda_dg = 2.0
#lambda_dg = 8.0
input_flag = True
data_folder = '/vertical_rate%d_run%d' % (int(ret_rate), run)
else:
lambda_dg = -1.0
# input_flag = False
input_flag = True
data_folder = '/%s_rate%d_run%d' % (
'random' if input_flag else 'spontaneous',
int(ret_rate), run)
Params = {
'dump_connections' : False, # Takes a lot of disk space and time! half gigabyte...
'load_connections' : False, # Load connections from files GENERATED IN A PREVIOUS RUN
'show_main_figure' : False,
'start_membrane_potential' : 120.0,
'end_membrane_potential' : 130.0,
'show_V4_num_conn_figure' : False,
'show_V4_connectivity_figure' : False,
'show_center_connectivity_figure' : False,
'save_recorders' : True,
#'save_recorders' : False,
'network' : network,
#'Np': 8,
#'Ns': 4,
'Np': 40,
'Ns': 30,
'visSize': 8.0,
'ret_rate': ret_rate,#20.0,
'ret_amplitude': 0.0, # random
'temporal_frequency': 2.0, # (Hz)
'spatial_frequency' : 0.5, # (cpd)
'threads': 12,
#'intervals': [100.0, 250.0, 650.0], # original
#'intervals': [5000.0], # keiko
'intervals': [2000.0], # leonardo
'resolution': 1.0,
'phi_dg': 0.0, # vertical
#'phi_dg': 0.5*np.pi, # horizontal
'edge_wrap' : edge_wrap,
'scrambled' : scramble, # scramble the connectivity: no invariance for horizontal/vertical stimulus
'lambda_dg': lambda_dg, # visSize / number_of_lines
'input_flag': input_flag,
'root_folder': root_folder,
'net_folder': net_folder,
'data_folder': data_folder,
# for debugin
'p_ratio': p_ratio,
'dry_run': False,
#'dry_run': True
'plot_all_regions' : True,
'synapse_keiko' : synapse_keiko,
'NMDA_keiko' : NMDA_keiko
}
# Run simulation of figure 3
figure_3_plot_tom.simulation(Params)
# test_scrambled_intact.simulation(Params)
| [
"[email protected]"
] | |
4c1950d06935ea7fd2a092991a9f0c3bd7238253 | 3a76de34371839c5b86f9fdbf216204c558cca18 | /venv/bin/easy_install | 4e19c72d5bdcafcda271ae024f8d6b3fd1587cda | [] | no_license | aaogoltcov/Selenium | a3637bb5d9adaaa3a706ed6c6e8a76c7faa72a4b | f8bb741e139668e3ea5fb6901c0eecceed132667 | refs/heads/master | 2022-05-20T06:36:36.983648 | 2020-04-17T13:01:01 | 2020-04-17T13:01:01 | 256,491,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | #!/Users/alexeyogoltsov/PycharmProjects/SeleniumTests/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
cf04ec4ee20713b3c77053a60735d4274bcb3066 | 3b50b3663abd4c6b01ae667f4bc6aca2ecf3bbba | /app_payment/urls.py | 4e0fb72475aa5786d1ee7dd3a8127de22400e284 | [] | no_license | DSIW/sdf | 394d48da286fa3ac311b44cf390a0f94b75b5cb0 | 655ecadf59cadf44086322932328d91d65f7e9e4 | refs/heads/develop | 2021-01-10T17:57:04.658816 | 2016-01-18T19:41:09 | 2016-01-18T19:41:09 | 44,266,686 | 2 | 3 | null | 2016-01-17T14:29:17 | 2015-10-14T18:04:27 | JavaScript | UTF-8 | Python | false | false | 620 | py | from django.conf.urls import *
from . import views
urlpatterns = [
url(r'^offers/(?P<id>[0-9]+)/start_payment/$', views.start_paypal_payment, name='start_paypal_payment'),
url(r'^payments/(?P<id>[0-9]+)/redirection/$', views.paypal_redirection, name='paypal_redirection'),
url(r'^payments/(?P<id>[0-9]+)/success/$', views.paypal_complete, name='payment-success'),
url(r'^payments/(?P<id>[0-9]+)/cancel/$', views.paypal_abort, name='payment-cancel'),
url(r'^payments/(?P<id>[0-9]+)/rate/$', views.rate_seller, name='rate-seller'),
url(r'^paypal/ipn-api/', include('paypal.standard.ipn.urls')),
]
| [
"[email protected]"
] | |
806723070baf939c6680b67d3f641580da57af0b | fb242bc6565251d69dabb33fae91ac36f9ff95f3 | /cloudstack/src/python/dnsinfo.py | 5e2ae0e2fa727b3f0837999cb279a04b02a1ded4 | [
"MIT"
] | permissive | OpenSciViz/cloud | b12aaeefe971936d75be2c110aa278e5f25d33de | 50cba2c3869790e558171b7d10b5d84328046383 | refs/heads/master | 2022-05-02T08:14:55.542360 | 2022-03-11T21:20:29 | 2022-03-11T21:20:29 | 66,280,280 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,149 | py | #!/usr/bin/env python
"""
DNSinfo module should support forward and reverse lookups of one or more IPs
and domain names.
"""
from __future__ import print_function
import pprint, socket, sys, timeit
if(sys.version_info.major < 3):
import six # allows python3 syntax in python2
def dnsinfo(ip='10.101.8.92'):
"""
DNSinfo returns the domain name(s) found for an individual IP
"""
dns = None
try:
dns = socket.gethostbyaddr(ip)
except:
print('oops no dns entry for: ', ip)
print(ip, dns)
return dns
#end dnsinfo
def alldns(dnshash={}, subnet=[10,101,8]):
"""
Alldns returns a hash dict. of all domain-names for each IP (key) in a subnet.
The subnet should be provided as a list. TBD: check whether subne is class A
or B or C by using len(subnet)
"""
i = 2
ips = str(subnet[0]) + '.' + str(subnet[1]) + '.' + str(subnet[2]) + '.'
while( i < 255):
ip = ips + str(i)
dns = dnsinfo(ip) ; dnshash[ip] = dns
i += 1
#endwhile
#endmain
if __name__ == '__main__':
info = {} ; net = [10,101,8]
# main() ; # main(info)
alldns(info, net)
# pp = pprint.PrettyPrinter(indent=2)
# pprint.pprint(info)
| [
"[email protected]"
] | |
1914ad0b73c9d0629afa0ba9729d9141ed746d6b | ebd78e20eacc840e52ebf2f0c44a779bc3d4ec42 | /blog/templatetags/blog_tags.py | c2077360adb572fe872e8f1cbc59941277371a55 | [] | no_license | Timehsw/djangoblogproject | ed22bfbf188881de27706475785d680cc3cfaafc | 019f0e1f2649b0f3d878498141d89e972fe67eaa | refs/heads/master | 2021-07-10T11:06:18.102644 | 2017-10-14T12:26:25 | 2017-10-14T12:26:25 | 104,549,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | # -*- coding: utf-8 -*-
'''
Created by hushiwei on 2017/9/23.
存放自定义的模板标签
'''
from django import template
from django.db.models.aggregates import Count
from ..models import Post,Category,Tag
register=template.Library()
@register.simple_tag
def get_recent_posts(num=5):
return Post.objects.all().order_by('-created_time')[:num]
@register.simple_tag
def archives():
return Post.objects.dates('created_time','month',order='DESC')
@register.simple_tag()
def get_categories():
# Count 计算分类下的文章数,其接受的参数为需要计数的模型的名称
return Category.objects.annotate(num_posts=Count('post')).filter(num_posts__gt=0)
@register.simple_tag()
def get_tags():
return Tag.objects.annotate(num_posts=Count('post')).filter(num_posts__gt=0) | [
"[email protected]"
] | |
173c61dcde3d13a5d5b69dda6a1ef55260f9fd73 | 45ceea0823a81f47a996e598ecb08c53c68ef68d | /src/test/test_main.py | e6ba06d46257976c03f402443105979822761d10 | [] | no_license | mjapon/acme-test | aae94662fadda97618778ec7fad2800257d8c252 | 0cfb8d83499a332fa1ab495a6d4271018b279c51 | refs/heads/master | 2023-08-05T12:02:29.646411 | 2021-09-18T16:46:12 | 2021-09-18T16:46:12 | 407,908,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | import pytest
from main import compute_payment, get_payment_by_day
@pytest.mark.parametrize(
"input, expected",
[
('RENE=MO10:00-12:00,TU10:00-12:00,TH01:00-03:00,SA14:00-18:00,SU20:00-21:00',
'The amount to pay RENE is: 215 USD'),
('ASTRID=MO10:00-12:00,TH12:00-14:00,SU20:00-21:00', 'The amount to pay ASTRID is: 85 USD')
]
)
def test_payment_expected(input, expected):
balance = compute_payment(input)
assert balance == expected
@pytest.mark.parametrize(
"worked_day, expected",
[
('MO10:00-12:00', 30),
('TH12:00-14:00', 30)
]
)
def test_payment_expected_by_day(worked_day, expected):
day_payment = get_payment_by_day(worked_day)
assert day_payment == expected
| [
"[email protected]"
] | |
ed132bc2b3a89eb0b02afa38e70f953619164897 | 1ff7c07d526390c3b2a74ec0e1a860299dd45c49 | /online_course/4.1.advanced.feature.slice.py | 33b94a8f2076ad4186bab4b3683e96341bdb7f04 | [] | no_license | strange-uncle/python-study | d25741c674d3a04df4b054edb8a27611cd0deeb3 | 8d8c1d53bd7f8574d5b95317cf03147df5a14edc | refs/heads/master | 2021-07-12T21:23:46.874546 | 2020-05-25T14:46:30 | 2020-05-25T14:46:30 | 142,584,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
l = list(range(20))
print(l)
# 注意下标的起点是0
# [x:y] 的终点y元素不包含
print(l[2:5])
print(l[2:])
print(l[:5])
print('负数是倒数,最后一个元素的位置是-1')
print(l[-3:])
print(l[-2:-1])
print('指定步长')
print(l[-6::2])
print('INDEX的顺序要从小到大,否则没有数据,返回的是[],如下:')
print(l[6:2])
print(l[-2:-6])
| [
"[email protected]"
] | |
93dcd149dcc82638dc801ae8162ab4b5d62ecea7 | d78d9361ca266295a9b20050025f311044a5c73a | /env/lib/python3.6/enum.py | 54a601b4e4bb0619329c6d404e163a3b3b5c21f6 | [] | no_license | diptinayan/Project1 | 8a2b61d29bcdc41ece1cb5f945ab42574d0fa375 | 822d7ce0da35a10232039e91931c6ab4553d077a | refs/heads/master | 2022-10-18T21:38:59.943783 | 2018-08-24T17:54:10 | 2018-08-24T17:54:10 | 145,874,360 | 0 | 1 | null | 2022-10-01T08:39:34 | 2018-08-23T15:40:29 | Python | UTF-8 | Python | false | false | 73 | py | C:/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/enum.py | [
"[email protected]"
] | |
8d00210f9368fa31f8f1faf90b5566dc764f3616 | 0415162c40a1e1af3bdf2bbc6b65457f5c0346a3 | /dashboard/urls.py | a3216d318d1ee482aeb1e5f67b6c35834726a634 | [] | no_license | rhrokib/code_aqua | 413316ecbe0e8cfde48bd5af4387f517403d2459 | 04c3e58072727a613ea918cc804bd6b23873828c | refs/heads/main | 2023-05-13T13:33:55.223096 | 2021-06-05T04:33:29 | 2021-06-05T04:33:29 | 356,197,318 | 0 | 1 | null | 2021-06-05T04:33:30 | 2021-04-09T08:33:26 | Python | UTF-8 | Python | false | false | 356 | py | from django.urls import path
from . import views
from . import forms
urlpatterns = [
path('', views.dashboard, name='dashboard'),
path('create', views.NewBudgetCreate.as_view(), name='budget_create'),
path('update/<str:pk>', views.BudgetUpdate.as_view(), name='budget_update'),
path('daily_spend', views.daily_spend, name='daily_spend'),
] | [
"[email protected]"
] | |
1bc320f2d0646971d9ce6eff7a3c4c40082dce9b | 08acec95bd1dc302633fadf7b47cd8ba3b749ff3 | /day-2018-04-02/myproject/venv/lib/python2.7/site-packages/zope/app/form/browser/metadirectives.py | 464769b9cd251dcf2a1e20c1e58f456edd5888c6 | [] | no_license | WeAreHus/StudyRecord | 74a312103ad2c037de23534160fa42d6a68ad174 | 047b7d9dcbee7c01ad2e8b888b160e66dfa9012d | refs/heads/master | 2022-12-16T14:47:15.984939 | 2019-04-29T15:16:15 | 2019-04-29T15:16:15 | 127,758,387 | 2 | 1 | null | 2022-11-22T02:50:30 | 2018-04-02T13:15:07 | Python | UTF-8 | Python | false | false | 9,031 | py | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Form and Widget specific 'browser' ZCML namespace interfaces
$Id$
"""
__docformat__ = 'restructuredtext'
from zope.interface import Interface
from zope.configuration.fields import GlobalObject, GlobalInterface
from zope.configuration.fields import Tokens, Path, Bool, PythonIdentifier
from zope.configuration.fields import MessageID
from zope.schema import Text, TextLine, Id
from zope.security.zcml import Permission
from zope.browsermenu.field import MenuField
class ICommonInformation(Interface):
"""
Common information for all successive directives
"""
name = TextLine(
title=u"Name",
description=u"The name of the generated view.",
required=True
)
schema = GlobalInterface(
title=u"Schema",
description=u"The schema from which the form is generated.",
required=True
)
for_ = GlobalInterface(
title=u"Interface",
description=u"""
The interface this page (view) applies to.
The view will be for all objects that implement this
interface. The schema is used if the for attribute is not
specified.
If the for attribute is specified, then the objects views must
implement or be adaptable to the schema.""",
required=False
)
permission = Permission(
title=u"Permission",
description=u"The permission needed to use the view.",
required=True
)
layer = GlobalInterface(
title=u"Layer",
description=u"The later the view is in. Default: 'default'",
required=False
)
template = Path(
title=u"Template",
description=u"An alternate template to use for the form.",
required=False
)
class_ = GlobalObject(
title=u"Class",
description=u"""
A class to provide custom widget definitions or methods to be
used by a custom template.
This class is used as a mix-in class. As a result, it needn't
subclass any special classes, such as BrowserView.""",
required=False
)
class ICommonFormInformation(ICommonInformation):
"""
Common information for browser forms
"""
label = MessageID(
title=u"Label",
description=u"A label to be used as the heading for the form.",
required=False
)
menu = MenuField(
title=u"The browser menu to include the form in.",
description=u"""
Many views are included in menus. It's convenient to name the
menu in the page directive, rather than having to give a
separate menuItem directive.""",
required=False
)
title = MessageID(
title=u"Menu title",
description=u"The browser menu label for the form.",
required=False
)
fields = Tokens(
title=u"Fields",
description=u"""
Here you can specify the names of the fields you wish to display.
The order in this list is also the order the fields will
be displayed in. If this attribute is not specified, all schema fields
will be displayed in the order specified in the schema itself.""",
required=False,
value_type=PythonIdentifier()
)
class ICommonAddInformation(Interface):
"""
Common information for add forms
"""
content_factory = GlobalObject(
title=u"Content factory",
description=u"""
An object to call to create new content objects.
This attribute isn't used if a class is specified that
implements createAndAdd.""",
required=False
)
content_factory_id = Id(
title=u"Content factory id",
description=u"A factory id to create new content objects",
required = False,
)
arguments = Tokens(
title=u"Arguments",
description=u"""
A list of field names to supply as positional arguments to the
factory.""",
required=False,
value_type=PythonIdentifier()
)
keyword_arguments = Tokens(
title=u"Keyword arguments",
description=u"""
A list of field names to supply as keyword arguments to the
factory.""",
required=False,
value_type=PythonIdentifier()
)
set_before_add = Tokens(
title=u"Set before add",
description=u"""
A list of fields to be assigned to the newly created object
before it is added.""",
required=False,
value_type=PythonIdentifier(),
)
set_after_add = Tokens(
title=u"Set after add",
description=u"""
A list of fields to be assigned to the newly created object
after it is added.""",
required=False,
value_type=PythonIdentifier()
)
class IFormDirective(ICommonFormInformation):
"""
Define an automatically generated form.
The form directive does nto require the data to be stored in its context,
but leaves the storing procedure to the to a method.
"""
class_ = GlobalObject(
title=u"Class",
description=u"""
A class to provide the `getData()` and `setData()` methods or
completely custom methods to be used by a custom template.
This class is used as a mix-in class. As a result, it needn't
subclass any special classes, such as BrowserView.""",
required=True
)
class IEditFormDirective(ICommonFormInformation):
"""
Define an automatically generated edit form
The editform directive creates and registers a view for editing
an object based on a schema.
"""
class ISubeditFormDirective(ICommonInformation):
"""
Define a subedit form
"""
label = TextLine(
title=u"Label",
description=u"A label to be used as the heading for the form.",
required=False
)
fulledit_path = TextLine(
title=u"Path (relative URL) to the full edit form",
required=False
)
fulledit_label = MessageID(
title=u"Label of the full edit form",
required=False
)
class IAddFormDirective(ICommonFormInformation, ICommonAddInformation):
"""
Define an automatically generated add form
The addform directive creates and registers a view for adding an
object based on a schema.
Adding an object is a bit trickier than editing an object, because
the object the schema applies to isn't available when forms are
being rendered. The addform directive provides a customization
interface to overcome this difficulty.
See zope.app.form.browser.interfaces.IAddFormCustomization.
"""
description = MessageID(
title=u"A longer description of the add form.",
description=u"""
A UI may display this with the item or display it when the
user requests more assistance.""",
required=False
)
class ISchemaDisplayDirective(ICommonFormInformation):
"""
Define an automatically generated display form.
The schemadisplay directive creates and registers a view for
displaying an object based on a schema.
"""
title = MessageID(
title=u"The browser menu label for the edit form",
description=u"This attribute defaults to 'Edit'.",
required=False
)
class IWidgetSubdirective(Interface):
"""Register custom widgets for a form.
This directive allows you to quickly generate custom widget directives for
a form.
Besides the two required arguments, field and class, you can specify any
amount of keyword arguments, e.g. style='background-color:#fefefe;'.
The keywords will be stored as attributes on the widget instance. To see
which keywords are sensible, you should look at the code of the specified
widget class.
"""
field = TextLine(
title=u"Field Name",
description=u"""
The name of the field/attribute/property for which this widget will be
used.""",
required=True,
)
class_ = GlobalObject(
title=u"Widget Class",
description=u"""The class that will create the widget.""",
required=False,
)
# Arbitrary keys and values are allowed to be passed to the CustomWidget.
IWidgetSubdirective.setTaggedValue('keyword_arguments', True)
| [
"[email protected]"
] | |
dd5732d78973606d48ef3676f52ba5c7835bca04 | 59412824148807a9ed029619ab52c7cc2e50b91a | /check_elidb_upf_backlog.py | 61a784d4bc583df88fe6af0a009bd189da217463 | [] | no_license | drain166/Nagios-Scripts | 3ebb75c31c67a73b4570a812b56bbe8c162e19f4 | 98c4357c33955780943bf7f5e3cd6f411ed9dd8d | refs/heads/master | 2021-01-01T06:17:34.135873 | 2014-06-06T16:14:41 | 2014-06-06T16:14:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,457 | py | #!/usr/bin/python
import re
import sys
import glob
import commands
### Set these ###################################
threshold = 100 #
upf_dir = '/app/tnstaf/cedfiles/elidb-lookup' #
cfg_file = '/app/tnstaf/cfg/elidb-local.cfg' #
log_file = '/app/tnstaf/logs/elidb-updated.log' #
#################################################
# Get current dist set name
dist_set_param = commands.getstatusoutput("grep elidb_dist_set " + cfg_file)
current_dist_set = dist_set_param[1].split('-')[1]
# Search UPF directory for files matching current dist set. Then sort the list and extract the newest.
searchstring = upf_dir + "/*/*" + current_dist_set.strip() + "*.done"
filelist = glob.glob(searchstring)
ids = [i.split('-')[4] for i in filelist]
ids = [int(x) for x in ids]
ids.sort()
newest = ids[-1]
# Find last loaded UPF.
logfile = re.findall('.*successfully loaded.*', open(log_file, 'r').read())
last_loaded = logfile[-1].split('-')[7]
# Determine drift and set to 0 if result is a negative (can happen if the updated proc is faster than this script.
drift = int(newest) - int(last_loaded)
if drift < 0:
drift = 0
# Evaluate drift and exit accordingly.
if drift > threshold:
print "WARNING: ELIDB Updated is %s UPFs behind (threshold is %s)." % (drift,threshold)
sys.exit(1)
else:
print "OK: ELIDB Updated is %s UPFs behind (threshold is %s)." % (drift,threshold)
sys.exit(0)
| [
"[email protected]"
] | |
c0a350cbef560a6e15b5006690002b05789143b8 | 73a06c33e686f7e01055fdf8d7c69eca901fe40d | /indiaxrussia/day2/path.py | ba8cbdebfc459a6a593b582daa9d7d7b7335fdc7 | [] | no_license | hashkanna/codeforces | cb8eee2d19e354b32b84208920c8a91995da050f | fc56cb9caaa52aac157ba9e4b717c13d8f51ce1f | refs/heads/master | 2021-06-27T10:37:01.906835 | 2019-01-16T15:55:06 | 2019-01-16T15:55:06 | 112,921,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,732 | py | from collections import defaultdict
from collections import Counter
def dfs(u,p):
# visited[u]=True
global timer
# global cycle
colors[u]=1
parents[u]=p
tin[u]=timer
timer+=1
# cc[u]=cc_num
# print(u)
for v in sorted(g[u]):
# if not visited[v]:
# if colors[v]==1:
# cycle=True
# return
if colors[v]==0:
dfs(v,u)
colors[u]=2
tout[u]=timer
timer+=1
g=defaultdict(list)
n,a,b=map(int, input().split(' '))
a-=1
b-=1
for edge in range(n):
g[edge]=[ind for ind,num in enumerate(list(map(int, input().split(' ')))) if num==1]
# print(g[n])
# a,b=map(int, input().split(' '))
# g[a].append(b)
# g[inp[1]-1].append(inp[0]-1)
# visited=[False]*n
colors=[0]*n
tin=[-1]*n
tout=[-1]*n
parents=[-2]*n
# cc=[-1]*n
# cc_num=0
timer=0
# for i in range(n):
if a==b:
print(0)
print(a+1)
else:
if colors[a-1]==0:
# if not visited[i]:
# cc_num+=1
dfs(a,-2)
# if cycle==True:
# break
# dfs(0,0)
# cc_size=Counter(cc)
# print(*[cc_size[cc[i]] for i in range(n)])
# for i in range(n):
# print(i+1, parents[i],tin[i],tout[i])
path=[]
while parents[b]!=-2:
# print('kanna')
path.append(b+1)
b=parents[b]
if b==a:
# print('done')
path.append(a+1)
break
if path==[]:
print(-1)
else:
print(len(path)-1)
print(*path[::-1])
# print(tout)
# for ind,num in enumerate(sorted(tout)):
# print(ind, end=' ')
# if cycle==True:
# print('NO')
# else:
# print('YES')
# print(*[i[0] for i in sorted(enumerate(tout), key=lambda x:-x[1])])
| [
"[email protected]"
] | |
dcb51eed179018baa2f80a7d0e9555486ecaece9 | ea4069901aa42076f3edb3ebfd3d3fcbdd4d6635 | /project1/project1/urls.py | 3079c5c0202336176181979d83c9a8f332a8c97a | [] | no_license | sunnystory/RestaurantWeb_mulcam | cc04dcb7a1d3e897811f8aa76ebc9f68c53855f8 | db5efcea47f56a9fd747bd8312c4222fde451e6f | refs/heads/master | 2022-12-04T18:07:12.904533 | 2020-08-24T08:56:30 | 2020-08-24T08:56:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 827 | py | """project1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
# import map1app.views
urlpatterns = [
path('admin/', admin.site.urls),
path('map1app/', include('map1app.urls')),
]
| [
"[email protected]"
] | |
145e6696b9b729046af44b249ac01f731bbd07ec | ed3d9adbca4c55ea53227b6a3b5d81d30d7313cb | /gcp/extract/lib/bundles.py | fbc60eedc56d4b94d8ab93c38b52eb3e1125b8f9 | [
"MIT"
] | permissive | delgadom/prospectus-tools | 112671bafa8a95df002b4dabdc3397b8f9a29ccf | 05adc34a54e0296af5e7516f3d5b69b53a53e989 | refs/heads/master | 2021-01-15T22:38:59.866068 | 2016-05-28T22:28:45 | 2016-05-28T22:28:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,752 | py | # -*- coding: utf-8 -*-
################################################################################
# Copyright 2014, Distributed Meta-Analysis System
################################################################################
"""
This file provides methods for extracting data from impact bundles (.nc4 files)
"""
__copyright__ = "Copyright 2014, Distributed Meta-Analysis System"
__author__ = "James Rising"
__credits__ = ["James Rising"]
__maintainer__ = "James Rising"
__email__ = "[email protected]"
__status__ = "Production"
__version__ = "$Revision$"
# $Source$
import os, csv
def get_yearses(fp, yearses):
TODO
"""
if yearses[0][0] < 1000:
# Just heads and tails
reader = csv.reader(fp)
reader.next()
values = [float(row[1]) for row in reader]
results = []
for years in yearses:
if years[0] > 0:
results.append(values[years[0]:years[1]])
elif years[1] == 0:
results.append(values[years[0]:])
else:
results.append(values[years[0]:years[1]])
return results
results = []
reader = csv.reader(fp)
yearses_ii = 0
found = False
values = []
for row in reader:
if not found:
try:
if int(row[0]) >= yearses[yearses_ii][0]:
found = True
except:
pass
if found:
if row[1] != 'NA':
values.append(float(row[1]))
if int(row[0]) == yearses[yearses_ii][1]:
found = False
results.append(values)
values = []
yearses_ii += 1
if found:
results.append(values)
return results
"""
def get_years(fp, years, column=2):
TODO
"""
results = []
reader = csv.reader(fp)
reader.next()
years_ii = 0
for row in reader:
while years_ii < len(years) and int(row[0]) > years[years_ii]:
results.append(None)
years_ii += 1
if years_ii == len(years):
break
if int(row[0]) == years[years_ii]:
if row[column-1] != 'NA':
results.append(float(row[column-1]))
else:
results.append(None)
years_ii += 1
else:
results.append(None) # row[0] < year
return results
"""
def iterate_bundle(targetdir, filename, column='debased'):
reader = Dataset(os.path.join(targetdir, filename), 'r', format='NETCDF4')
regions = reader.variables['regions'][:]
years = reader.variables['years'][:]
for ii in range(len(regions)):
yield regions[ii], years, reader.variables[column][:, ii]
| [
"[email protected]"
] | |
be1cbd8e6125aa0ef337267c5865735cc9869c6a | bb7dfc95d7e02549a316995280475e2255a959b6 | /module/blenderQtIntegrationComm.py | 5830758cf4cdc781d6efeea06cddb743044298f9 | [] | no_license | subing85/packages_test | 508df542efecaed7a53ca9356f53b4fb290f3149 | 50a202e0bc6bfb199363622d07cbc858bcd277f2 | refs/heads/master | 2021-04-12T12:23:49.020561 | 2018-03-30T20:21:50 | 2018-03-30T20:21:50 | 126,643,399 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,296 | py | bl_info = { 'name': 'Qt Integration',
'author': 'Vincent Gires',
'description': 'Qt Integration',
'version': (0, 0, 0),
'blender': (2, 7, 9),
'location': '',
'warning': '',
'wiki_url': '',
'tracker_url': '',
'category': 'Qt'
}
import bpy
import sys
import os
import logging
logger = logging.getLogger(__name__)
class QtWindowEventLoop(bpy.types.Operator):
bl_idname = 'screen.qt_event_loop'
bl_label = 'PyQt Event Loop'
_timer = None
_close = None
def __init__(self, widget):
self._widget = widget
def close(self):
self._close = True
def modal(self, context, event):
wm = context.window_manager
if self._close:
logger.debug('cancel modal operator')
wm.event_timer_remove(self._timer)
return {'CANCELLED'}
else:
logger.debug('process the events for Qt window')
self.event_loop.processEvents()
self.app.sendPostedEvents(None, 0)
return {'PASS_THROUGH'}
def execute(self, context):
logger.debug('execute operator')
self.app = QtWidgets.QApplication.instance()
# instance() gives the possibility to have multiple windows
# and close it one by one
if not self.app:
self.app = QtWidgets.QApplication(['blender'])
self.event_loop = QtCore.QEventLoop()
self.widget = self._widget()
self.widget.context = context
self.widget.destroyed.connect(self.close)
logger.debug(self.app)
logger.debug(self.widget)
# run modal
wm = context.window_manager
self._timer = wm.event_timer_add(1/120, context.window)
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
def register():
bpy.utils.register_module(__name__)
from qt_integration import example
bpy.utils.register_class(example.CustomWindowOperator)
bpy.utils.register_class(example.QtPanelExample)
def unregister():
bpy.utils.unregister_module(__name__)
if __name__ == '__main__':
register()
| [
"[email protected]"
] | |
00333ebb89e9f206b7a0a0e04c031045f5f683cd | 7949f96ee7feeaa163608dbd256b0b76d1b89258 | /toontown/coghq/DistributedGridAI.py | b09c5b13b7be520586cc519cc09d06bf0bc7ed04 | [] | no_license | xxdecryptionxx/ToontownOnline | 414619744b4c40588f9a86c8e01cb951ffe53e2d | e6c20e6ce56f2320217f2ddde8f632a63848bd6b | refs/heads/master | 2021-01-11T03:08:59.934044 | 2018-07-27T01:26:21 | 2018-07-27T01:26:21 | 71,086,644 | 8 | 10 | null | 2018-06-01T00:13:34 | 2016-10-17T00:39:41 | Python | UTF-8 | Python | false | false | 10,497 | py | # File: t (Python 2.4)
from CrateGlobals import *
from otp.level import DistributedEntityAI
from direct.directnotify import DirectNotifyGlobal
class DistributedGridAI(DistributedEntityAI.DistributedEntityAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGridAI')
def __init__(self, level, entId):
self.initialized = 0
DistributedEntityAI.DistributedEntityAI.__init__(self, level, entId)
self.activeCellList = []
def delete(self):
DistributedEntityAI.DistributedEntityAI.delete(self)
del self.activeCellList
def generate(self):
DistributedEntityAI.DistributedEntityAI.generate(self)
def initializeGrid(self):
if not self.initialized:
self.objPos = { }
self.gridCells = [
None] * self.numRow
for i in range(len(self.gridCells)):
self.gridCells[i] = [
None] * self.numCol
for j in range(len(self.gridCells[i])):
self.gridCells[i][j] = []
self.initialized = 1
def addActiveCell(self, cell):
self.activeCellList.append(cell)
def getObjPos(self, objId):
objPos = self.objPos.get(objId, None)
if objPos:
(row, col) = objPos
if row >= 0 and row < self.numRow and col >= 0 and col < self.numCol:
return [
(col + 1) * self.cellSize,
(row + 1) * self.cellSize,
0]
else:
self.notify.debug('row/col out of range %s/%s' % (row, col))
else:
self.notify.debug("didn't have record of obj")
def addObjectByPos(self, objId, pos, width = 1):
if not self.initialized:
self.initializeGrid()
if self.objPos.get(objId, None):
return 1
x = pos[0]
y = pos[1]
col = min(int(x / self.cellSize), self.numCol - width)
row = min(int(y / self.cellSize), self.numRow - width)
self.notify.debug('attempt add %d at %s, row,col = %d,%d' % (objId, pos, row, col))
while col >= 0 and col < self.numCol:
while row >= 0 and row < self.numRow:
if self.addObjectByRowCol(objId, row, col):
return 1
continue
row += 2
row = 0
col += 2
self.notify.debug('requestObjPos: row/col out of range %s/%s' % (row, col))
row = min(row, self.numRow)
row = max(0, row)
col = min(col, self.numRow)
col = max(0, col)
return self.addObjectByRowCol(objId, row, col)
def addObjectByRowCol(self, objId, row, col):
if row >= 0 and row < self.numRow - 1 and col >= 0 and col < self.numCol - 1:
self.notify.debug('adding obj %s to grid cell %s,%s' % (objId, row, col))
self.gridCells[row][col].append(objId)
self.gridCells[row + 1][col].append(objId)
self.gridCells[row][col + 1].append(objId)
self.gridCells[row + 1][col + 1].append(objId)
self.objPos[objId] = [
row,
col]
self._DistributedGridAI__setChangedActiveCells(onList = [
[
row,
col],
[
row + 1,
col],
[
row,
col + 1],
[
row + 1,
col + 1]], objId = objId)
return 1
self.notify.debug("couldn't obj to grid cell %s,%s" % (row, col))
return 0
def removeObject(self, objId):
objPos = self.objPos.get(objId)
if not objPos:
return None
(row, col) = objPos
self.notify.debug('removing obj %s from %s, %s' % (objId, row, col))
self.gridCells[row][col].remove(objId)
self.gridCells[row + 1][col].remove(objId)
self.gridCells[row][col + 1].remove(objId)
self.gridCells[row + 1][col + 1].remove(objId)
del self.objPos[objId]
self._DistributedGridAI__setChangedActiveCells(offList = [
[
row,
col],
[
row + 1,
col],
[
row,
col + 1],
[
row + 1,
col + 1]], objId = objId)
def checkMoveDir(self, objId, h):
if h > 225 and h < 315:
return self.checkMove(objId, 0, 1)
elif h > 45 and h < 135:
return self.checkMove(objId, 0, -1)
elif h < 45 or h > 315:
return self.checkMove(objId, 1, 0)
elif h > 135 and h < 225:
return self.checkMove(objId, -1, 0)
def doMoveDir(self, objId, h):
if h > 225 and h < 315:
return self.doMove(objId, 0, 1)
elif h > 45 and h < 135:
return self.doMove(objId, 0, -1)
elif h < 45 or h > 315:
return self.doMove(objId, 1, 0)
elif h > 135 and h < 225:
return self.doMove(objId, -1, 0)
def checkPush(self, objId, side):
if side == 0:
return self.checkMove(objId, 0, -1)
elif side == 1:
return self.checkMove(objId, 0, 1)
elif side == 2:
return self.checkMove(objId, -1, 0)
elif side == 3:
return self.checkMove(objId, 1, 0)
def doPush(self, objId, side):
if side == 0:
return self.doMove(objId, 0, -1)
elif side == 1:
return self.doMove(objId, 0, 1)
elif side == 2:
return self.doMove(objId, -1, 0)
elif side == 3:
return self.doMove(objId, 1, 0)
def checkMove(self, objId, dRow, dCol):
objPos = self.objPos.get(objId)
if not objPos:
return None
(row, col) = objPos
validMove = 1
if dRow < 0:
validMove = validMove & self._DistributedGridAI__isEmpty(row - 1, col) & self._DistributedGridAI__isEmpty(row - 1, col + 1)
elif dRow > 0:
validMove = validMove & self._DistributedGridAI__isEmpty(row + 2, col) & self._DistributedGridAI__isEmpty(row + 2, col + 1)
if dCol < 0:
validMove = validMove & self._DistributedGridAI__isEmpty(row, col - 1) & self._DistributedGridAI__isEmpty(row + 1, col - 1)
elif dCol > 0:
validMove = validMove & self._DistributedGridAI__isEmpty(row, col + 2) & self._DistributedGridAI__isEmpty(row + 1, col + 2)
return validMove
def doMove(self, objId, dRow, dCol):
objPos = self.objPos.get(objId)
if not objPos:
return 0
(row, col) = objPos
validMove = self.checkMove(objId, dRow, dCol)
if validMove:
self.gridCells[row][col].remove(objId)
self.gridCells[row + 1][col].remove(objId)
self.gridCells[row][col + 1].remove(objId)
self.gridCells[row + 1][col + 1].remove(objId)
newRow = row + dRow
newCol = col + dCol
self.gridCells[newRow][newCol].append(objId)
self.gridCells[newRow + 1][newCol].append(objId)
self.gridCells[newRow][newCol + 1].append(objId)
self.gridCells[newRow + 1][newCol + 1].append(objId)
self.objPos[objId] = [
newRow,
newCol]
self.updateActiveCells(objId, row, col, dRow, dCol)
return validMove
def updateActiveCells(self, objId, row, col, dRow, dCol):
newRow = row + dRow
newCol = col + dCol
newCells = [
[
newRow,
newCol],
[
newRow + 1,
newCol],
[
newRow,
newCol + 1],
[
newRow + 1,
newCol + 1]]
oldCells = [
[
row,
col],
[
row + 1,
col],
[
row,
col + 1],
[
row + 1,
col + 1]]
onList = []
offList = []
for cell in newCells:
if cell not in oldCells:
onList.append(cell)
continue
for cell in oldCells:
if cell not in newCells:
offList.append(cell)
continue
self._DistributedGridAI__setChangedActiveCells(onList, offList, objId)
def _DistributedGridAI__setChangedActiveCells(self, onList = [], offList = [], objId = None):
for cell in self.activeCellList:
self.notify.debug('onList = %s, offList = %s, cell = %s' % (onList, offList, cell.getRowCol()))
if cell.getRowCol() in onList:
cell.b_setState(1, objId)
continue
if cell.getRowCol() in offList:
cell.b_setState(0, objId)
continue
def _DistributedGridAI__isEmpty(self, row, col):
if row < 0 and row >= self.numRow and col < 0 or col >= self.numCol:
return 0
if len(self.gridCells[row][col]) > 0:
return 0
return 1
def printGrid(self):
if not __debug__:
return None
for i in range(len(self.gridCells)):
str = ''
for j in range(len(self.gridCells[i])):
col = self.gridCells[i][j]
active = 0
for cell in self.activeCellList:
if cell.getRowCol() == [
i,
j]:
active = 1
continue
if len(col) > 0:
if active:
str += '[X]'
else:
str += ' X '
if active:
str += '[.]'
continue
str += ' . '
print str + ' : %d' % i
print ''
| [
"[email protected]"
] | |
669beaf3ea25605a65cd83b7b25f54b79271deb5 | d9351a62a2e90b3bc486d4a11a715735108e3851 | /ajaxDemo/ajaxdemo.py | cb4c2f4b7b612e5fee477bf3cd385150aa5baa46 | [] | no_license | mooonpark/flask-demo | b1f3af7692b531f30acc9fb4b05c0aa335ed8edd | 07f30428df72d3b7d663234f4b7510610ae48d5d | refs/heads/master | 2021-06-20T20:57:44.872229 | 2017-08-06T06:35:00 | 2017-08-06T06:35:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | from flask import Flask, jsonify, render_template, request
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/_add_numbers')
def add_numbers():
a = request.args.get('a', 0, type=int)
b = request.args.get('b', 0, type=int)
return jsonify(result=a+b)
if __name__ == '__main__':
app.run(debug=True, port=8080, host="127.0.0.2")
| [
"[email protected]"
] | |
a49b1655f351657ebc0aa3a89a0e6034ae07f6ed | 439d363d13eb643cee2ef6027c016904ce6dfc4e | /gRNAtoOligo.py | 4f08aaf0bfaf3794413a08f19228cc48f3692a3f | [] | no_license | allgenesconsidered/gRNAtoOligo | 4cb3560e8dc8925f0112308b18502cd6529051dd | 9f44c5447cc90986738605fad1b9b16b6e2bac97 | refs/heads/master | 2021-01-11T06:24:47.316587 | 2017-02-23T22:05:48 | 2017-02-23T22:05:48 | 69,590,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,960 | py | from __future__ import print_function, division
import pandas as pd
from os.path import dirname, basename
import sys, csv, re, argparse
def parseFile(doc):
"""
Input: Document of gRNAs (for now, a .csv).
Output: panda df with gRNA and names
"""
if not '.csv' in basename(doc):
raise IOError(
"Please convert your output to a .csv with:\
\n grna name | grna sequence ")
with open(doc, 'r') as f:
reader = csv.reader(f)
grnaList = [row for row in reader]
if not set(grnaList[0][1]) <= set('ATGCatgc'): # File has a header
grnaList.remove(grnaList[0])
return grnaList # File has no header, starts with first gRNA
def reverseComp(sequence):
"""
Input: string of DNA
Output: string of reverse complement DNA
"""
comp = {'A':"T","T":"A","C":"G","G":"C"}
out = ''.join([comp[i] for i in sequence]) # Generator function to
# replace all characters in s.
return out[::-1] # Return inverse string
def formatString(sequence):
"""
Format string to remove spaces and have all cases be upper.
"""
return re.sub(" ", "", sequence).upper()
def addAdapter(grna, backbone, delete_g):
"""
Function to add flanks to gRNAs depending on the backbone.
Input: gRNA and backbone.
Output: Two lines of a dataframe coorisponding to the forward and
reverse strand of the gRNA with flanks.
"""
flanks = {
'f_137':('TTGG','GTTTAAGAGC'),
'r_137':('TTAGCTCTTAAAC','CCAACAAG'),
'f_330':('CACCG', ''),
'r_330':('AAAC','C'),
'f_1010.1':('CTTG',''),
'r_1010.1':('','CAAA'),
'f_1010.2':('TTGG',''),
'r_1010.2':('','CAAA')}
if delete_g and grna[0] == 'G' and len(grna) == 20: #Too many G's
grna = grna[1:]
if backbone in ('p1371','p1372'):
line1 = flanks['f_137'][0] + grna + flanks['f_137'][1]
line2 = flanks['r_137'][0] + reverseComp(grna) + flanks['r_137'][1]
elif backbone == 'px330':
line1 = flanks['f_330'][0] + grna + flanks['f_330'][1]
line2 = flanks['r_330'][0] + reverseComp(grna) + flanks['r_330'][1]
elif backbone == 'p1010.1':
line1 = flanks['f_1010.1'][0] + grna + flanks['f_1010.1'][1]
line2 = flanks['r_1010.1'][0] + reverseComp(grna) + flanks['r_1010.1'][1]
elif backbone == 'p1010.2':
line1 = flanks['f_1010.2'][0] + grna + flanks['f_1010.2'][1]
line2 = flanks['r_1010.2'][0] + reverseComp(grna) + flanks['r_1010.2'][1]
else:
raise ValueError('Backbone argument must be either:\
\n p1371 \
\n p1372 \
\n px330 \
\n p1010.1\
\n px1010.2')
return line1, line2
def getFilename(file):
"""
Use os.path to grab the filename for use in generating the output csv
Output: The path to the original file and the file name.
"""
return dirname(file), basename(file).split('.')[0]
def saveCSV(output, file):
"""
Saves the dataframe
"""
path, filename = getFilename(file)
if path == '':
path = '.'
save = path + '/' + filename + '_oligo_output.csv'
with open(save, 'wb') as outfile:
csv_writer = csv.writer(outfile)
for row in output:
csv_writer.writerow(row)
return
def generateOutput(dat, backbone, delete_g):
"""
Main funciton for gRNAtoOligo.py
Input: the file and the backbone argument
Output: A .csv file with adapters for each gRNa in the list.
"""
output_csv = [['Name', 'Sequence']]
for row in dat:
oligos = addAdapter(formatString(row[1]), backbone, delete_g)
name = row[0]
output_csv.append([name + '_F', oligos[0]])
output_csv.append([name + '_R', oligos[1]])
return output_csv
def main():
parser = argparse.ArgumentParser(description='Autogenerate gRNA oligos')
parser.add_argument('backbone', help='Backbone: either p1371, p1372, px330, or p1010(.1,.2).')
parser.add_argument('guides', help='csv file of name : guide combos')
parser.add_argument('-d','--delete_g', help='Delete front G, default False.', default=False)
args = parser.parse_args()
dat = parseFile(args.guides)
saveCSV(generateOutput(dat, args.backbone, args.delete_g), args.guides)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
018a7e833574ab840bbdb00dcefad15b440f8303 | f673c526701163c1ce94608a2af3181a78bdf40f | /examples/statemachine/documentSignoffDemo.py | 58aa2088fc67a4985acd454f80e238c85c402a04 | [
"MIT"
] | permissive | nikiluc/pyparsing | 01df25c5dd1f087fb5dacac834aa41c6e325c4a3 | adf8dd00b736ba1934914e1db78528af02662b65 | refs/heads/master | 2020-05-07T19:33:14.489282 | 2019-04-07T18:45:07 | 2019-04-07T18:45:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,523 | py | #
# documentSignoffDemo.py
#
# Example of a state machine modeling the state of a document in a document
# control system, using named state transitions
#
import statemachine
import documentsignoffstate
class Document:
def __init__(self):
# start light in Red state
self._state = documentsignoffstate.New()
@property
def state(self):
return self._state
# get behavior/properties from current state
def __getattr__(self, attrname):
attr = getattr(self._state, attrname)
if isinstance(getattr(documentsignoffstate, attrname, None),
documentsignoffstate.DocumentRevisionStateTransition):
return lambda : setattr(self, '_state', attr())
return attr
def __str__(self):
return "{0}: {1}".format(self.__class__.__name__, self._state)
def run_demo():
import random
doc = Document()
print(doc)
# begin editing document
doc.create()
print(doc)
print(doc.state.description)
while not isinstance(doc._state, documentsignoffstate.Approved):
print('...submit')
doc.submit()
print(doc)
print(doc.state.description)
if random.randint(1,10) > 3:
print('...reject')
doc.reject()
else:
print('...approve')
doc.approve()
print(doc)
print(doc.state.description)
doc.activate()
print(doc)
print(doc.state.description)
if __name__ == '__main__':
run_demo()
| [
"[email protected]"
] | |
44f8298f84297e99a2d2c81e62f01a2c0c474552 | 7dfef87d8ea1436c3610cb221b24ed37f6722f8b | /backend/feature/database.py | 08c5cb842c7f0a03a1e9fd0456fb19dc24f26d7f | [
"MIT"
] | permissive | weihuan830/LAK-2020 | 240136e1b682411d80cfd9aa01d5099477fc730b | b36b3e3234f4589b6f3b7fd67e818b6eeca79776 | refs/heads/main | 2023-02-11T14:58:00.859433 | 2021-01-14T14:15:50 | 2021-01-14T14:15:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,124 | py | import pymongo
from pymongo import MongoClient
def userSequenceByUidPid(userid, problemid):
client = MongoClient("127.0.0.1", 27017)
db = client.trumptech
collection = db.records
cursor = collection.find({"userid":userid, "d_source":problemid}, {"_id":0,"pageX":1,"pageY":1,"type":1})
return list(cursor)
def problemSequenceById(problemid):
client = MongoClient("127.0.0.1", 27017)
db = client.trumptech
collection = db.records
cursor = collection.find({"d_source":problemid}, {"_id":0,"pageX":1,"pageY":1,"userid":1})
return list(cursor)
def userSequenceByProblem(problemid):
client = MongoClient("127.0.0.1", 27017)
db = client.trumptech
collection = db.records
cursor = list(collection.aggregate(
[
{"$match": {"d_source": problemid}},
{"$group": {
"_id":"$userid",
"data":{
"$push":{
"timestamp":"$dt_timestamp",
"pageX":"$pageX",
"pageY":"$pageY",
"type":"$type"
}
},
}
},
{ "$sort": { "data.timestamp": -1 } },
]))
return cursor
def userSequenceByProblemByEventTime(problemid):
client = MongoClient("127.0.0.1", 27017)
db = client.trumptech
# collection = db.records
collection = db.movements
cursor = list(collection.aggregate(
[
{"$match": {"d_source": problemid}},
{"$group": {
"_id":"$userid",
"data":{
"$push":{
"time":"$dt_timestamp",
"time2":"$timeStamp",
"x":"$pageX",
"y":"$pageY",
"type":"$type"
}
},
}
},
{ "$sort": { "data.time": -1, "data.time2": -1 } },
]))
return cursor
| [
"[email protected]"
] | |
40cd31bcd64b58fe57787fb1f99f3698c6dae001 | 6b89e007ca00754e7578add541b771c389055d62 | /tests/splitDrillFileTest.py | e34fcbe9d794c641e29eafa189e72dd8d746ebdb | [] | no_license | andyalexander/gcodeParser | 28e0c093c62d8bb6ccc633425529d76e0fa3241c | 7ff7e3a589ff6e16d2d51fd8230a342bcb2c260a | refs/heads/master | 2021-01-10T13:50:09.082117 | 2015-09-23T21:57:22 | 2015-09-23T21:57:22 | 43,025,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | __author__ = 'Andrew'
from splitDrillFile import splitDrillFile
path = r"../files"
fn = "reflow_controller.top.drill.gcode"
fnIn = path + '/' + fn
splitDrillFile(fnIn, True)
| [
"[email protected]"
] | |
4647a6afa7a22d863f5a25b077b14361588c742e | 63f9a0d150cbef75f4e6e8246dc7ecac3f3b6d09 | /python/ray/tune/examples/mxnet_example.py | a8639c618f8ab3c596924d4383a087f4eb996877 | [
"Apache-2.0",
"MIT"
] | permissive | ray-project/maze-raylit | 79f0a5af9fe4bdc13a2d5b3919da867ed5439aab | a03cd14a50d87d58effea1d749391af530d7609c | refs/heads/master | 2023-01-23T04:23:35.178501 | 2020-12-04T22:34:14 | 2020-12-04T22:34:14 | 318,274,659 | 5 | 0 | Apache-2.0 | 2020-12-04T22:34:15 | 2020-12-03T17:47:58 | Python | UTF-8 | Python | false | false | 2,963 | py | import mxnet as mx
from ray import tune, logger
from ray.tune.integration.mxnet import TuneCheckpointCallback, \
TuneReportCallback
from ray.tune.schedulers import ASHAScheduler
def train_mnist_mxnet(config, mnist, num_epochs=10):
batch_size = config["batch_size"]
train_iter = mx.io.NDArrayIter(
mnist["train_data"], mnist["train_label"], batch_size, shuffle=True)
val_iter = mx.io.NDArrayIter(mnist["test_data"], mnist["test_label"],
batch_size)
data = mx.sym.var("data")
data = mx.sym.flatten(data=data)
fc1 = mx.sym.FullyConnected(data=data, num_hidden=config["layer_1_size"])
act1 = mx.sym.Activation(data=fc1, act_type="relu")
fc2 = mx.sym.FullyConnected(data=act1, num_hidden=config["layer_2_size"])
act2 = mx.sym.Activation(data=fc2, act_type="relu")
# MNIST has 10 classes
fc3 = mx.sym.FullyConnected(data=act2, num_hidden=10)
# Softmax with cross entropy loss
mlp = mx.sym.SoftmaxOutput(data=fc3, name="softmax")
# create a trainable module on CPU
mlp_model = mx.mod.Module(symbol=mlp, context=mx.cpu())
mlp_model.fit(
train_iter,
eval_data=val_iter,
optimizer="sgd",
optimizer_params={"learning_rate": config["lr"]},
eval_metric="acc",
batch_end_callback=mx.callback.Speedometer(batch_size, 100),
eval_end_callback=TuneReportCallback({
"mean_accuracy": "accuracy"
}),
epoch_end_callback=TuneCheckpointCallback(
filename="mxnet_cp", frequency=3),
num_epoch=num_epochs)
def tune_mnist_mxnet(num_samples=10, num_epochs=10):
logger.info("Downloading MNIST data...")
mnist_data = mx.test_utils.get_mnist()
logger.info("Got MNIST data, starting Ray Tune.")
config = {
"layer_1_size": tune.choice([32, 64, 128]),
"layer_2_size": tune.choice([64, 128, 256]),
"lr": tune.loguniform(1e-3, 1e-1),
"batch_size": tune.choice([32, 64, 128])
}
scheduler = ASHAScheduler(
max_t=num_epochs, grace_period=1, reduction_factor=2)
analysis = tune.run(
tune.with_parameters(
train_mnist_mxnet, mnist=mnist_data, num_epochs=num_epochs),
resources_per_trial={
"cpu": 1,
},
metric="mean_accuracy",
mode="max",
config=config,
num_samples=num_samples,
scheduler=scheduler,
name="tune_mnist_mxnet")
return analysis
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
if args.smoke_test:
analysis = tune_mnist_mxnet(num_samples=1, num_epochs=1)
else:
analysis = tune_mnist_mxnet(num_samples=10, num_epochs=10)
print("Best hyperparameters found were: ", analysis.best_config)
| [
"[email protected]"
] | |
22fe8f97d897364e3f5210e66debabccab2dde5d | 7aa031cb4d132bf0d27a800fcbce2b847cab4562 | /NBodySim.py | ba608d2758b877427d28d987351efe587c5d2e2a | [] | no_license | samirjohnson/Ph22_Repository | 7ef39d7d30cb5c2d3beb055c50ba0ad40cd22df1 | 90de66b608e9bb39640871df6576b66a3d7e8ff2 | refs/heads/master | 2021-05-24T16:12:55.421589 | 2020-06-05T22:34:17 | 2020-06-05T22:34:17 | 253,650,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,751 | py | from Vector import *
from SymplecticIntegrator import *
import math
import numpy as np
import matplotlib.pyplot as plt
from random import random
M = 1; G = 1; R = 1; a = 0.1; N = 100; tdyn = math.sqrt(R**3/(G*M*N));
tfinal = 6*math.pi*tdyn; timestep = 0.005; trelax = N / (10 * math.log(N)) * tdyn;
def func(vec, t, h):
acc = lambda x1, x2: -G * M * (x1 - x2) / (Vector.magnitude(x1 - x2)**(3) + Vector.magnitude(x1 - x2)*a**2)
xvec = Vector([None]*int(len(vec)/2)); vvec = Vector([None]*len(xvec));
for i in range(len(xvec)):
f = Vector([0,0])
for j in range(len(vvec)):
if i!=j:
q = acc(vec[2*i], vec[2*j])
f[0] += q[0]; f[1] += q[1];
vvec[i] = f; xvec[i] = vec[2*i+1] + h*f
retvec = Vector([None]*len(vec)); retvec[::2] = xvec; retvec[1::2] = vvec;
return retvec
#initialize particles within ring of radius R
vecINIT = Vector([0] * 2 * N)
for i in range(2*N):
theta1 = 2 * math.pi * random(); theta2 = 2 * math.pi * random();
if i % 2 == 0:
vecINIT[i] = random()*R*Vector([math.cos(theta1), math.sin(theta1)])
else:
vecINIT[i] = Vector.magnitude(vecINIT[i-1])/10*Vector([math.cos(theta2),math.sin(theta2)])
vecList = evaluateODE(vecINIT, timestep, func, tfinal)
radialPos = []; radialV = []; xList = []; yList = [];
for v in vecList:
for i in v:
if v.index(i)%2==0:
radialPos.append(Vector.magnitude(i))
xList.append(i[0]); yList.append(i[1]);
else:
radialV.append(Vector.magnitude(i))
tList = np.linspace(0,tfinal, int(len(radialPos)/N))
for i in range(N):
plt.plot(tList,radialPos[i::N])
plt.xlabel('t'); plt.ylabel('Radial Position');
plt.show()
for i in range(N):
plt.plot(tList, radialV[i::N])
plt.xlabel('t'); plt.ylabel('Radial Velocity');
plt.show()
radialDist0 = []; radialDist1 = []; radialDist2 = []; radialDist3 = [];
xList0,yList0,xList1,yList1,xList2,yList2,xList3,yList3=[],[],[],[],[],[],[],[]
def plotSnapshot(radialDist, xl, yl, t):
for i in range(N):
radialDist.append(np.log(radialPos[int(t/timestep)*N + i]))
xl.append(xList[int(t/timestep)*N + i])
yl.append(yList[int(t/timestep)*N + i])
plt.hist(radialDist, color='blue', edgecolor='black',bins = int(N/4), log=True)
plt.xlabel('Log(Radial Distance)'); plt.ylabel('Log(Number of particles)')
plt.show()
plt.scatter(xl,yl,s=5)
plt.xlabel('X'); plt.ylabel('Y'); plt.show()
plotSnapshot(radialDist0,xList0,yList0,trelax/2)
plotSnapshot(radialDist1,xList1,yList1,trelax)
plotSnapshot(radialDist2,xList2,yList2,3*trelax/2)
plotSnapshot(radialDist3,xList3,yList3,2*trelax)
| [
"[email protected]"
] | |
a3cdc718709cd35dba2b09fedaa48fa75134f80d | fe4e2c7cab09446c6eb51d49f80ec8bbecea7068 | /Song-Processor/nfeibel_202_P5.py | 07fd47c3c264696e16e2ae004044f43c655ad950 | [] | no_license | nfeibel/Python | f2200873c4a353eb628ec01295579742431d7bfd | 9041029f8b936dd47ad52a1066f54637902872fc | refs/heads/main | 2023-02-15T00:16:21.378806 | 2021-01-14T02:30:58 | 2021-01-14T02:30:58 | 329,484,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,782 | py | #-------------------------------------------------------------------------------
# Name: Nick Feibel
# Project 5
# Due Date: 10/31/2019
#-------------------------------------------------------------------------------
# Honor Code Statement: I received no assistance on this assignment that
# violates the ethical guidelines set forth by professor and class syllabus.
#-------------------------------------------------------------------------------
# References: Class lectures
#-------------------------------------------------------------------------------
# Comments and assumptions: Nick Feibel, nfeibel, G01164484, CS-112-202, no
# collaboration partners. Thanks for grading this!
# What I found most useful was using the Python Visualizer with the test
# cases provided in the instructions.
#-------------------------------------------------------------------------------
# NOTE: width of source code should be <=80 characters to be readable on-screen.
#2345678901234567890123456789012345678901234567890123456789012345678901234567890
# 10 20 30 40 50 60 70 80
#-------------------------------------------------------------------------------
##########################################################################
#########################################################################
#Below function creates a dictionary of note durations. A base_tempo
#is provided.
#-----------------------------------------------------------------------
def generate_durations(base_tempo):
#The multiplier is determined and then the dictionary is returned.
multiplier = 60/base_tempo
return {'Whole': 4 * multiplier, 'Half': 2 * multiplier, 'Quarter': \
1 * multiplier, 'Eighth': 0.5 * multiplier, 'Sixteenth': 0.25*multiplier}
#########################################################################
#Below function creates a dictionary of frequency values provided a
#base_freq.
#-----------------------------------------------------------------------
def generate_frequencies(base_freq):
#Frequency keys are initialized as well as the dictionary
#that will be returned at the end.
frequency = {'C': -9,'C#':-8,'D':-7,'D#':-6,'E':-5,'F':-4,'F#':-3,\
'G':-2,'G#':-1,'A':0,'A#':1,'B':2}
newDict = {}
#Loop iterates through and determines the frequency for the octave's
#notes, octave by octave and adds the value to
for i in range(-3,4):
for letter, value in frequency.items():
newDict[letter + str(i+4)] = base_freq * (2**((value+(i*12))/12))
#The straggler notes we then assign their values outside a loop.
newDict['A0'] = base_freq * (2**((0+(-4*12))/12))
newDict['A#0'] = base_freq * (2**((1+(-4*12))/12))
newDict['B0'] = base_freq * (2**((2+(-4*12))/12))
newDict['C8'] = base_freq * (2**((-9+(4*12))/12))
return newDict
#########################################################################
#Below function finds either the highest or lowest note in a song
#provided as filename.
#-----------------------------------------------------------------------
def find_note(filename, highest_or_lowest):
#Below I open the file, copy the contents to a string and then
#split that string into a listOfLines. The notes list is initiated
#and the notes from the listOfLines are added to the notes list
#to keep track of all the notes.
fileObject = open(filename)
fileContents = fileObject.read()
fileObject.close()
listOfLines = fileContents.split("\n")
listOfLines[:] = listOfLines[2:-1]
notes = []
for i in range(len(listOfLines)):
notes.append(listOfLines[i].split(",")[0])
#Below a simple if-else statement is used to verify whether highest or
#lowest should be found.
if highest_or_lowest == True:
#The first note is initialized as the highest and it's octave
#is saved as well. I use a function I made called letterValue
#to confirm a numerical value for the note in an octave (0-11).
#This is called to confirm the current highestLetter value.
currentHighest = notes[0]
highestNumber = currentHighest[-1]
highestLetter = letterValue(currentHighest[0])
#A loop is used to iterate through the notes list and find the
#absolute highest note in the list. If-elif-else statement
#used to first check the octave, then check the letterValue.
for i in range(len(notes)):
if highestNumber < notes[i][-1]:
currentHighest = notes[i]
highestNumber = currentHighest[-1]
highestLetter = letterValue(currentHighest[0])
elif highestNumber > notes[i][-1]:
continue
else:
if highestLetter < letterValue(notes[i][0]):
currentHighest = notes[i]
highestNumber = currentHighest[-1]
highestLetter = letterValue(currentHighest[0])
return currentHighest
else:
#Same process as above except for the lowest note.
currentLowest = notes[0]
lowestNumber = currentLowest[-1]
lowestLetter = letterValue(currentLowest[0])
for i in range(len(notes)):
if lowestNumber > notes[i][-1]:
currentLowest = notes[i]
lowestNumber = currentLowest[-1]
lowestLetter = letterValue(currentLowest[0])
elif lowestNumber < notes[i][-1]:
continue
else:
if lowestLetter > letterValue(notes[i][0]):
currentLowest = notes[i]
lowestNumber = currentLowest[-1]
lowestLetter = letterValue(currentLowest[0])
return currentLowest
#return None statement included just incase.
return None
#########################################################################
#Below function creates a random_song provided a filename, tempo,
#tuning, and num_measures.
#-----------------------------------------------------------------------
def random_song(filename, tempo, tuning, num_measures):
#Random module of course is used for this so it is imported at the start.
import random
#File we are writing to is opened. Durations dictionary is defined
#and the tempo and tuning are written to the file at the start.
fileObject = open(filename, 'w')
durations = {'Whole': 4, 'Half': 2, 'Quarter': \
1, 'Eighth': 0.5, 'Sixteenth': 0.25}
fileObject.write(str(tempo) + '\n')
fileObject.write(str(tuning) + '\n')
#Below a loop iterates through the number of measures with the beats
#initialized as 4 at the start of every loop. Then a while loop is used
#to continue adding beats until the measure is filled. A string is
#made by using random.choice and random.randint and then the string is
#written to the file. If a duration chosen is too long for the beats left
#the loop continues so it continues until one that fits is selected.
for i in range(num_measures):
beats = 4
while beats > 0:
currentType = random.choice(["Sixteenth", "Eighth", 'Quarter', \
'Half','Whole'])
if beats < durations[currentType]:
continue
currentString = random.choice(["C","C#","D","D#","E","F","F#",\
"G","G#","A","A#","B"])+str(random.randint(1,7))
fileObject.write(currentString+","+currentType+"\n")
beats = beats - durations[currentType]
#Once the file has been completed, it is closed and we return None.
fileObject.close()
return None
#########################################################################
#Below function changes all the notes in a filename based on the confirmed
#dictionary of changes and the confirmed shift. New file is confirmed
#with the same name simply _changed is added to it.
#-----------------------------------------------------------------------
def change_notes(filename, changes, shift):
#The file is opened and contents copied and split into a list
#as lines. Then the file we are writing to is setup.
fileObject = open(filename)
fileContents = fileObject.read()
fileObject.close()
splitFile = fileContents.split('\n')
splitName = filename.split('.')
fileObject2 = open(splitName[0]+"_changed."+splitName[1], 'w')
#The last line is checked whether it is an empty space and if it is
#it is removed.
if splitFile[-1] == '':
splitFile.pop(-1)
#The tempo and tuning are added at the start to the output list.
output = [splitFile.pop(0)]
output.append(splitFile.pop(0))
#We then confirm the letterShift and octaveShift based on whether
#it is greater than or less than +-11.
if (shift > 11) or (shift < -11):
if shift < 0:
letterShift = (shift % -12)
octaveShift = (shift // -12)*-1
else:
letterShift = shift % 12
octaveShift = shift // 12
else:
octaveShift = 0
letterShift = shift
#A loop is then used to iterate through the splitFile line by line
#and make the confirmed changes.
for i in range(len(splitFile)):
currentLine = splitFile[i].split(',')
#First we instantly check whether the note is in the changes
#dictionary and if so we make the change and continue.
if changes.get(currentLine[0], 'ohno') != 'ohno':
output.append(changes[currentLine[0]]+','+currentLine[1])
continue
#Then there is an if-else statement to check whether the shift
#is positive or negative.
if shift < 0:
#Shift positive confirmed and the line's octave and
#value are initialized.
octave = currentLine[0][-1:]
value = letterValue(currentLine[0][:-1])
#If-else statement checks whether the shift is going
#to cause the note to go out of bounds, and if that
#is the case, assigns the output as the current note
#rather than shifting.
if ((((value + letterShift < 0) and \
(((int(octave)+octaveShift-1) < 0) or ((value <9) and \
((int(octave)+octaveShift-1) == 0)))) or \
(octaveShift + int(octave) < 0)) or \
((int(octave)+octaveShift) <= 0 and value + letterShift < 9))\
and not ((octaveShift + int(octave) -1 == 0) and \
12+(value + letterShift) >= 9):
output.append(currentLine[0]+','+currentLine[1])
else:
#If the shift is not going to be too much, an if-else
#statement is used to check whether the shift is going
#to cause the note to go to another octave and if so,
#it assigns the new octave value. If not, it simply
#assigns the new note with the current octave.
if value + letterShift < 0:
output.append(valueLetter(12 + (value+letterShift))+\
str(int(octave)+octaveShift-1)+','+currentLine[1])
else:
output.append(valueLetter(value+letterShift)+\
str(int(octave)+octaveShift)+','+currentLine[1])
else:
#The same is done for shifting in the positive direction.
octave = currentLine[0][-1:]
value = letterValue(currentLine[0][:-1])
if (((value + letterShift > 12) and \
(((int(octave)+octaveShift+1) > 8) or \
(((int(octave)+octaveShift+1) == 8))) \
or (octaveShift + int(octave)) > 8)) \
or ((int(octave)+octaveShift) == 8 and value + letterShift > 0):
output.append(currentLine[0]+','+currentLine[1])
else:
if value + letterShift > 11:
output.append(valueLetter((value+letterShift)-12)+\
str(int(octave)+octaveShift+1)+','+currentLine[1])
else:
output.append(valueLetter(value+letterShift)+\
str(int(octave)+octaveShift)+','+currentLine[1])
#Once the output list is complete, it is written to the new file.
for i in range(len(output)):
fileObject2.write(output[i]+'\n')
#File is closed and None is returned.
fileObject2.close()
return None
#########################################################################
#Below function makes a dictionary of the tempo, tuning, types, and
#notes in a confirmed file, filename.
#-----------------------------------------------------------------------
def song_as_dict(filename):
#The file is opened and contents copied and split into a list as lines.
fileObject = open(filename)
fileContents = fileObject.read()
fileObject.close()
splitFile = fileContents.split('\n')
splitName = filename.split('.')
#We check if the last line is an empty space and if that is the case,
#we remove it.
if splitFile[-1] == '':
splitFile.pop(-1)
#The tempo and tuning are confirmed and the types and
#notes are initialized.
output = {'tempo': int(splitFile.pop(0))}
output['tuning'] = float(splitFile.pop(0))
output['types'] = {}
output['notes'] = {}
#A loop goes through the splitFile and adds the note and type line by
#line.
for i in range(len(splitFile)):
#The currentLine is split into note and type.
currentLine = splitFile[i].split(',')
#If-else statement checks whether a note has been added previously,
#and if not, it initializes that dictionary for the note.
if output['notes'].get(currentLine[0][:-1], 'ohno') == 'ohno':
output['notes'][currentLine[0][:-1]] = \
{int(currentLine[0][-1:]): 1}
else:
#If a note has been found in the dictionary, we check whether the
#octave is in the dictionary for the note already, and if not
#it initializes it.
if output['notes'][currentLine[0][:-1]].get(\
int(currentLine[0][-1:]), 'ohno') == 'ohno':
output['notes'][currentLine[0][:-1]][int(\
currentLine[0][-1:])] = 1
else:
#If the octave is already in the dictionary, we simply iterate
#the counter for it.
output['notes'][currentLine[0][:-1]][int(\
currentLine[0][-1:])] += 1
#Then we check whether a type has been added previously,
#and if not, it initializes that dictionary for the type.
if output['types'].get(currentLine[1].strip(), 'ohno') == 'ohno':
output['types'][currentLine[1].strip()] = 1
else:
#If the type is already in the dictionary, we simply iterate
#the counter for it.
output['types'][currentLine[1].strip()] += 1
#The output dictionary is returned.
return output
#########################################################################
#Below function provides a value for a provided letter. Value is 0 - 11
#0 being C, the bottom of the scale, and 11 being B, the top of the scale.
#-----------------------------------------------------------------------
def letterValue(letter):
frequency = {'C': 0,'C#':1,'D':2,'D#':3,'E':4,'F':5,'F#':6,'G':7,'G#':8,\
'A':9,'A#':10,'B':11}
return frequency[letter]
#########################################################################
#Below function provides a letter for a provided value. Value is 0 - 11
#0 being C, the bottom of the scale, and 11 being B, the top of the scale.
#-----------------------------------------------------------------------
def valueLetter(randomValue):
frequency = {0: 'C',1: 'C#',2: 'D',3:'D#',4:'E',5:'F',6:'F#',7:'G',\
8:'G#',9:'A',10:'A#',11:'B'}
return frequency[randomValue] | [
"[email protected]"
] | |
e5b7ab5bbefba46b68568440aedf0a897e14c2cc | 02b48b9f515406da1c980c4feb9d268f69a4d24d | /tools/alf_plugins/write_to_file/write_to_file.py | 0c1578396b7c43defa656886ed42bdd564654346 | [] | no_license | dspking111/ossie-june | 3b2f12835021b9f26daed019ae637fe17b64dcdb | f14b2a133416df44f411c9d06115bb968cfb7c69 | refs/heads/master | 2020-03-26T07:20:15.300009 | 2012-01-27T20:52:46 | 2012-01-27T20:52:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,498 | py | #!/usr/bin/env python
## Copyright 2007 Virginia Polytechnic Institute and State University
##
## This file is part of the OSSIE ALF write_to_file tool.
##
## OSSIE ALF write_to_file is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## OSSIE ALF write_to_file is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with OSSIE ALF write_to_file; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
'''Write incomming packet(s) to file(s)'''
import wx #needed for display stuff
from omniORB import CORBA #use this for the CORBA orb stuff (pushing packets)
import sys #for system commands (e.g., argv and argc stuff)
import CosNaming #narrowing naming context stuff
from ossie.cf import CF, CF__POA #core framework stuff
from ossie.standardinterfaces import standardInterfaces__POA
from wx.lib.anchors import LayoutAnchors #used by splitter window
# import wx.lib.buttons as buttons #for special wx buttons
class write_to_file_short(standardInterfaces__POA.complexShort):
'''Writes I and Q data to 2 files'''
def __init__(self, orb, gui):
self.orb = orb
self.gui = gui #usually parent
self.delimiter = ','
def pushPacket(self, I_data, Q_data):
'''Store the data to be written to file when the Write Packet
button is bushed'''
self.gui.I_data = I_data
self.gui.Q_data = Q_data
def create(parent,namespace, interface, ns_name, port_name):
#return MainFrame(parent, -1, namespace, interface, ns_name, port_name)
return MainFrame(parent, -1, "Don't know what this should be",
namespace, interface, ns_name, port_name)
#generate wx ids for my wx controls
[wxID_MAINFRAME, wxID_SPLITTERWINDOW1, wxID_WRITEPACKETBTN, wxID_WRITEBUFFERBTN, wxID_IFILENAMEEDITOR, wxID_QFILENAMEEDITOR, wxID_IFILESTATICTEXT,wxID_QFILESTATICTEXT] = [wx.NewId() for _init_ctrls in range(8)]
class MainFrame(wx.Frame):
def __init__(self, parent, id, title, namespace, interface,
component_name, port_name):
self._init_ctrls(parent)
self.I_data = []
self.Q_data = []
self.parent = parent
self.namespace = namespace
self.interface = interface
self.my_local_controls = None
self.component_name = component_name
self.port_name = port_name
self.setup_graphics()
# Now Create the menu bar and items
self.mainmenu = wx.MenuBar()
menu = wx.Menu()
menu.Append(205, 'E&xit', 'Enough of this already!')
self.Bind(wx.EVT_MENU, self.OnFileExit, id=205)
self.mainmenu.Append(menu, '&File')
menu = wx.Menu()
menu.Append(300, '&About', 'About this thing...')
self.Bind(wx.EVT_MENU, self.OnHelpAbout, id=300)
self.mainmenu.Append(menu, '&Help')
self.SetMenuBar(self.mainmenu)
# Bind the close event so we can disconnect the ports
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
self.Show(True)
self.iFileNameEditor.write('i_out.dat')
self.qFileNameEditor.write('q_out.dat')
def _init_ctrls(self, prnt):
wx.Frame.__init__(self, id=wxID_MAINFRAME, name='', parent=prnt,
pos=wx.Point(530, 680), size=wx.Size(520, 320),
style=wx.DEFAULT_FRAME_STYLE, title='Write to File Tool')
self.splitterWindow1 = wx.SplitterWindow(id=wxID_SPLITTERWINDOW1,
name='splitterWindow1', parent=self, point=wx.Point(1, 1),
size=wx.Size(570, 270), style=wx.SP_3D)
self.splitterWindow1.SetConstraints(LayoutAnchors(self.splitterWindow1,
True, True, True, True))
self.WritePacketBtn = wx.Button(id=wxID_WRITEPACKETBTN, label='Write Packet',
name='WritePacketBtn', parent=self.splitterWindow1, pos=wx.Point(155, 150),
size=wx.Size(165, 50))
self.WritePacketBtn.SetFont(wx.Font(16, wx.SWISS, wx.NORMAL, wx.BOLD, False))
self.WritePacketBtn.SetBackgroundColour("green")
self.WritePacketBtn.Bind(wx.EVT_BUTTON, self.OnWritePacketBtnButton,
id=wxID_WRITEPACKETBTN)
self.WriteBufferBtn = wx.Button(id=wxID_WRITEBUFFERBTN, label='Write BUFFER',
name='WriteBufferBtn', parent=self.splitterWindow1, pos=wx.Point(155, 210),
size=wx.Size(165, 50))
self.WriteBufferBtn.SetFont(wx.Font(16, wx.SWISS, wx.NORMAL, wx.BOLD, False))
self.WriteBufferBtn.SetBackgroundColour("green")
self.WriteBufferBtn.Bind(wx.EVT_BUTTON, self.OnWriteBufferBtnButton,
id=wxID_WRITEBUFFERBTN)
#####
## if you want an image on the button
# button_bmp = wx.Image("../AWG/my_image.bmp", wx.BITMAP_TYPE_BMP).ConvertToBitmap()
# self.WritePacketBtn = buttons.GenBitmapTextButton(self.splitterWindow1,
# wxID_WRITEPACKETBTN,button_bmp, 'Write Packet',
# name='WritePacketBtn', pos=wx.Point(155, 250),
# size=wx.Size(300, 100), style=0)
# self.WritePacketBtn.Bind(wx.EVT_BUTTON, self.OnWritePacketBtnButton,
# id=wxID_WRITEPACKETBTN)
#####
self.iFileNameEditor = wx.TextCtrl(id=wxID_IFILENAMEEDITOR,
name=u'iFileNameEditor', parent=self.splitterWindow1, pos=wx.Point(215, 50),
size=wx.Size(250, 30), style=0, value=u'')
self.qFileNameEditor = wx.TextCtrl(id=wxID_QFILENAMEEDITOR,
name=u'qFileNameEditor', parent=self.splitterWindow1, pos=wx.Point(215, 100),
size=wx.Size(250, 30), style=0, value=u'')
self.iFileStaticText = wx.StaticText(id=wxID_IFILESTATICTEXT,
label=u'I channel file:', name='qFileStaticText', parent=self.splitterWindow1,
pos=wx.Point(55, 50), size= wx.Size(100, 20), style=0)
self.iFileStaticText.SetFont(wx.Font(10,wx.SWISS,wx.NORMAL,wx.BOLD,True,u'Sans'))
self.qFileStaticText = wx.StaticText(id=wxID_QFILESTATICTEXT,
label=u'Q channel file:', name='qFileStaticText', parent=self.splitterWindow1,
pos=wx.Point(55, 100), size= wx.Size(100, 20), style=0)
self.qFileStaticText.SetFont(wx.Font(10,wx.SWISS,wx.NORMAL,wx.BOLD,True,u'Sans'))
def OnWritePacketBtnButton(self,event):
#get the file names from the file name editors in the GUI
self.i_file_name = self.iFileNameEditor.GetLineText(0)
self.q_file_name = self.qFileNameEditor.GetLineText(0)
#write the data out
open_file = open(self.i_file_name, 'w') #open the file for writing
open_file.write(str(self.I_data))
open_file.close()
open_file = open(self.q_file_name, 'w') #open the file for writing
open_file.write(str(self.Q_data))
open_file.close()
def OnWriteBufferBtnButton(self,event):
#get the file names from the file name editors in the GUI
self.i_file_name = self.iFileNameEditor.GetLineText(0)
self.q_file_name = self.qFileNameEditor.GetLineText(0)
#write the data out
open_file = open(self.i_file_name, 'w') #open the file for writing
open_file.write(str(self.I_data))
open_file.close()
open_file = open(self.q_file_name, 'w') #open the file for writing
open_file.write(str(self.Q_data))
open_file.close()
def OnFileExit(self, event):
'''This is what will happen when you select File -> Exit
in the menu bar'''
self.Close() #close the frame
def OnHelpAbout(self, event):
'''Stuff that gets displayed when you select Help -> About in
the menu bar'''
from wx.lib.dialogs import ScrolledMessageDialog
about = ScrolledMessageDialog(self, "Write to file tool.\nA product of Wireless@VT.", "About...")
about.ShowModal()
def setup_graphics(self):
self.CORBA_being_used = False
if True:
self.CORBA_being_used = True
self.orb = CORBA.ORB_init(sys.argv, CORBA.ORB_ID)
obj = self.orb.resolve_initial_references("NameService")
rootContext = obj._narrow(CosNaming.NamingContext)
if rootContext is None:
print "Failed to narrow the root naming context"
sys.exit(1)
name = [CosNaming.NameComponent(self.component_name[0],""),
CosNaming.NameComponent(self.component_name[1],""),
CosNaming.NameComponent(self.component_name[2],"")]
try:
ResourceRef = rootContext.resolve(name)
except:
print "Required resource not found"
sys.exit(1)
ResourceHandle = ResourceRef._narrow(CF.Resource)
PortReference = ResourceHandle.getPort(self.port_name)
if PortReference is None:
print "Failed to get Port reference"
self.PortHandle = PortReference._narrow(CF.Port)
#create the class instance of the write_to_file class
self.my_file_writer = write_to_file_short(self.orb, self)
obj_poa = self.orb.resolve_initial_references("RootPOA")
poaManager = obj_poa._get_the_POAManager()
poaManager.activate()
obj_poa.activate_object(self.my_file_writer)
self.PortHandle.connectPort(self.my_file_writer._this(),
"thisismyconnectionid_w2file")
#orb.run()
def OnCloseWindow(self,event):
if hasattr(self.parent, 'removeToolFrame'):
self.parent.removeToolFrame(self)
self = None
event.Skip()
def __del__(self):
if self.CORBA_being_used:
self.PortHandle.disconnectPort("thisismyconnectionid_w2file")
while (_time.time() - self.my_local_plot.end_time) < 1.5:
#print (time.time() - self.my_local_plot.end_time)
pass
#_time.sleep(1)
| [
"[email protected]"
] | |
60cf113ec864ba308dd3a9eb858b02fec6d69017 | 73b44c96ba8cb8221370fc9b517ba0dd6ae8b6b5 | /rhea/vendor/altera/device_serdes.py | a2e022b40b424c7c4f5491d338e0264b32510192 | [
"MIT"
] | permissive | Vikram9866/rhea | cf07e8f3075a605453b496714fccc08cb9e3af03 | 47edf23d975c3b04e8d4d0399137ead7580e0ef2 | refs/heads/master | 2021-01-17T22:55:15.214226 | 2016-06-19T16:04:00 | 2016-06-19T16:04:00 | 61,488,094 | 1 | 0 | null | 2016-06-20T20:03:40 | 2016-06-19T15:59:23 | Python | UTF-8 | Python | false | false | 376 | py |
from __future__ import absolute_import
from .._device_serdes_prim import device_serdes_input_prim
from .._device_serdes_prim import device_serdes_output_prim
def device_serdes_input(serdes):
prim_inst = device_serdes_input_prim(serdes)
return prim_inst
def device_serdes_output(serdes):
prim_inst = device_serdes_output_prim(serdes)
return prim_inst
| [
"[email protected]"
] | |
d5b0a9a8aec526687311ce8c89804dd7cec1eedb | fd2125d194e6da6563f62863cf5fa4e9f7beac3a | /IPList.py | 1f483b15d23322dcabdd02a2107975b326010b23 | [] | no_license | myzhongguo/OpenSaaSAPI | 006e13f4b5271e1e382edfb8044ec84ae71a4de6 | c606ca0e9f6c824806ccae12e0092c1400860eb7 | refs/heads/master | 2020-03-18T01:13:02.220025 | 2017-06-02T08:58:27 | 2017-06-02T08:58:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55 | py | # -*- coding: utf-8 -*-
IPAccess = [
"0.0.0.0",
] | [
"[email protected]"
] | |
f7bf6095895c712679c0b8ed82e18d1a3b89e999 | 98f730ec6a43d8be4a34b0f2a44a9d35989d2287 | /tests/unit/api/test_policies_api.py | ed585f675a7b9a759f97a8a55947354b12753fd1 | [] | no_license | scottwr98/pynifi-client | 9337a4f322536ee466d419a788b8b5948cdc62d7 | 013ac2ffa591284a0d6cbb9ed552681cc6f91165 | refs/heads/master | 2020-04-18T08:47:03.680749 | 2017-11-04T23:59:58 | 2017-11-04T23:59:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,985 | py | # coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service. # noqa: E501
OpenAPI spec version: 1.4.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import pynifi_client
from pynifi_client.api.policies_api import PoliciesApi # noqa: E501
from pynifi_client.rest import ApiException
class TestPoliciesApi(unittest.TestCase):
"""PoliciesApi unit test stubs"""
def setUp(self):
self.api = pynifi_client.api.policies_api.PoliciesApi() # noqa: E501
def tearDown(self):
pass
def test_create_access_policy(self):
"""Test case for create_access_policy
Creates an access policy # noqa: E501
"""
pass
def test_get_access_policy(self):
"""Test case for get_access_policy
Gets an access policy # noqa: E501
"""
pass
def test_get_access_policy_for_resource(self):
"""Test case for get_access_policy_for_resource
Gets an access policy for the specified action and resource # noqa: E501
"""
pass
def test_remove_access_policy(self):
"""Test case for remove_access_policy
Deletes an access policy # noqa: E501
"""
pass
def test_update_access_policy(self):
"""Test case for update_access_policy
Updates a access policy # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
34a19e920f8df9aa9076fa26e7b77e8971382a7c | c0de3ab530bc9fcf673735f117f0a3cbee08efbb | /ejerci2.py | 6deb00bda68f933e49d0d6aeaea768d9e655dfbf | [] | no_license | matrix231993/tareapython2 | 857e9b4a3493646465c1d5c7733360ba2db84238 | 89e35d712de8e5f8a00ce2b19444cb77e69afe12 | refs/heads/main | 2023-07-30T07:17:32.353403 | 2021-09-20T04:30:22 | 2021-09-20T04:30:22 | 408,204,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py |
#2) Realiza un programa que lea un número impar por teclado. Si el usuario no introduce un número impar, debe repetise el proceso hasta que lo introduzca correctamente.
numero = 0
while numero % 2 == 0:
numero = int(input("Ingrese un número impar: ") ) | [
"[email protected]"
] | |
05329e10052643e0d41a74ee7c613e6d110d6847 | 79bceb0af5b658cd45d46bdf7f3fcffb17b7ddf5 | /apps/hello/apps.py | 51fd39919ac4c320019f041a89631d6473c4d8b0 | [] | no_license | 42cc/FortyTwoTestTask | a655d646955a1731ec44111a797e00b19fd5ebee | 5b71a1f8bf3fd15975986f30f8b17732b4805305 | refs/heads/master | 2021-07-05T20:47:48.733401 | 2021-04-23T18:37:41 | 2021-04-23T18:37:41 | 26,122,037 | 2 | 831 | null | 2021-04-23T18:37:42 | 2014-11-03T14:27:30 | Python | UTF-8 | Python | false | false | 90 | py | from django.apps import AppConfig
class HelloConfig(AppConfig):
name = "apps.hello"
| [
"[email protected]"
] | |
b3641cc7b3263bdf1d532e504074816ce17a495a | bb7f97a9bf71d05cc4fb479389c31032b70d8e08 | /djangosnippets/cab/urls/bookmarks.py | 160c08f9af3ba841a4bdab77c8dce57a89caabed | [
"BSD-3-Clause"
] | permissive | williamratcliff/scisnippets | 352e584e67e5c3ad1ccbe46f71334eca3982885e | 255f9b80080f608cb015b78b97b95b4806519b31 | refs/heads/master | 2021-01-02T09:32:54.534426 | 2010-11-05T06:56:40 | 2010-11-05T06:56:40 | 1,049,021 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | from django.conf.urls.defaults import *
from cab.views import bookmarks
urlpatterns = patterns('',
url(r'^$',
bookmarks.user_bookmarks,
name='cab_user_bookmarks'),
url(r'^add/(?P<snippet_id>\d+)/$',
bookmarks.add_bookmark,
name='cab_bookmark_add'),
url(r'^delete/(?P<snippet_id>\d+)/$',
bookmarks.delete_bookmark,
name='cab_bookmark_delete'),
)
| [
"[email protected]"
] | |
80d29ad699bb72bfe60efd725afef7cca6ac3d36 | 969bb0fb57e9d3a59009c327142fe63d96001daf | /app/auth/forms.py | bffd6406e5146d998e51ca7e2ae54ee7a5d1c7a2 | [] | no_license | AnumAsif/watch-list | 96acd78df2166b70c2455ecd463a3440fa438a96 | 446105a03531b106255b64e1285b00d33d74e787 | refs/heads/master | 2020-04-20T05:49:53.266049 | 2019-02-08T13:30:16 | 2019-02-08T13:30:16 | 168,666,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,301 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import Required, Email, EqualTo
from ..models import User
from wtforms import ValidationError
# from RegistrationForm (FLaskform)
class RegistrationForm(FlaskForm):
email = StringField('Your Email Address',validators=[Required(),Email()])
username = StringField('Enter your username',validators = [Required()])
password = PasswordField('Password',validators = [Required(), EqualTo('password_confirm',message = 'Passwords must match')])
password_confirm = PasswordField('Confirm Passwords',validators = [Required()])
submit = SubmitField('Sign Up')
def validate_email(self,data_field):
if User.query.filter_by(email =data_field.data).first():
raise ValidationError('There is an account with that email')
def validate_username(self,data_field):
if User.query.filter_by(username = data_field.data).first():
raise ValidationError('That username is taken')
class LoginForm(FlaskForm):
email = StringField('Your Email Address', validators=[Required(),Email()])
password=PasswordField('Password',validators=[Required()])
remember = BooleanField('Remember me')
submit= SubmitField('Sign In')
| [
"[email protected]"
] | |
b6a5232fb03423e27b2bc4dc2b9ee7c6575a3e89 | ebec4fb7b2eb36b0214e7c018dd1a2f9a9f7b67d | /SPLN/aulas_kiko/aula4/exer_aula3-cont MEH.py | c2bfa06a8a9d0195dd590924fe316eab3180774a | [] | no_license | MrBoas/PLC | cb09aaa2440a8711a6ba888a92cdcd2559b0b8cf | 3f77cf1ce96cf8c4bb21ee4ec05f1fc418e9b73e | refs/heads/master | 2022-02-02T10:45:16.322559 | 2019-08-05T15:51:51 | 2019-08-05T15:51:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,349 | py | #!/usr/bin/python3
import re
# Limpar titulo e autor
def clean(file):
text=open(file).read()
text = re.sub(r'\n\d*\n?O SENHOR DOS ANÉIS(?: I)?\n\d*',"",text)
text = re.sub(r'\n\d+\nJ. R. R. TOLKIEN',"",text)
# text = re.sub(r'\n\d+\n',"\n",text)
text = re.sub(r' +',r' ',text)
return text
par_mark = '#P##'
frase_mark = '#F##'
lista_abrev = ['Sr','Sra','Dr','Dra']
# 1. fazer print do texto com 3 '###' no inicio de cada frase
def paragrafos(text):
return re.sub(r'([.!?]+\s*\n\s*)',r'\1' + par_mark,text).split(par_mark)
def paragrafos2(text):
return re.findall(r'.*?[.!?]+\s*\n\s*',text,re.DOTALL)
def frases(text):
lista_frases = []
abreviaturas = "("+ "|".join(lista_abrev)+ ")"
for par in text:
par = re.sub(abreviaturas+r'\. ',r'\1_ ',par)
par = re.sub(r'([-?!]+)',r'\1'+frase_mark,par)
par = re.sub(frase_mark+r"$","",par)
par = re.sub(abreviaturas+r'\_ ',r'\1. ',par)
lista_frases.append(par.split(frase_mark))
return ("\n"+par_mark+" "+frase_mark).join(lista_par)
def prettyprint(text):
text = re.sub(par_mark,"",text)
text = re.sub(frase_mark+r" ?","\n",text)
return text
text = clean("sda_irmandade.txt")
text = paragrafos(text)
text = frases(text)
text = prettyprint(text)
print(text)
# 2. encontrar e imprimir nomes proprios
| [
"[email protected]"
] | |
bb0f62e0a3e7fc8450eff2a2d172019dfd01fbdc | fea9a0aea71c1e4c49c78246faf9f2069b9e5d15 | /fullshelf/shop/migrations/0008_auto_20180606_1352.py | dc6c5884a8e056f6ed2255784bf4b72d06f78325 | [] | no_license | CBA222/shopping-aggregator | 3cb8c05751682c70455cbe0ef0142182131599e1 | 4b3c33a484519eb91c30eb29e37c5dc7048f023a | refs/heads/master | 2020-04-16T22:44:15.092997 | 2019-03-16T21:54:48 | 2019-03-16T21:54:48 | 165,981,473 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,174 | py | # Generated by Django 2.0.5 on 2018-06-06 13:52
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0007_auto_20180602_1129'),
]
operations = [
migrations.RemoveField(
model_name='product',
name='price',
),
migrations.RemoveField(
model_name='product',
name='url',
),
migrations.AddField(
model_name='product',
name='best_price',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=10),
),
migrations.AddField(
model_name='product',
name='best_url',
field=models.URLField(default='no-url'),
),
migrations.AddField(
model_name='product',
name='best_vendor',
field=models.CharField(default='', max_length=50),
),
migrations.AddField(
model_name='product',
name='listings',
field=django.contrib.postgres.fields.jsonb.JSONField(default=[]),
),
]
| [
"[email protected]"
] | |
07e878d9a5f9f25899a092ea350dc879418b05b0 | fffdd07b462ff5df3e87511d8399328b67a52dfc | /run_script.py | 0aa35d6a460364aa7f2dd2a3ffa1774e40b59fe4 | [] | no_license | thebjm/HomeSecurity | 29f8506adb141deb90e6adc6acad7ff894d4ce3e | 199e42f0a530325b7a0b19e53067317eb486ef94 | refs/heads/master | 2020-08-21T00:36:29.763179 | 2019-10-18T18:33:13 | 2019-10-18T18:33:13 | 216,082,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,585 | py | #!/usr/bin/python3
import subprocess
import os
import time
import RPi.GPIO as GPIO
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
mail = os.path.join(BASE_DIR, 'mail.py')
target = os.path.join(BASE_DIR, 'recognize_video.py')
print(target)
GPIO.setmode(GPIO.BCM)
servo = 4
pir_sensor = 17
piezo = 27
pushbtn = 20
GPIO.setup(servo,GPIO.OUT)
pwm = GPIO.PWM(servo,50)
#pwm.start(7.5)
pwm.start(13)
pwm.ChangeDutyCycle(1)
time.sleep(1)
pwm.stop()
print('[INFO] locking Door..')
GPIO.setup(piezo,GPIO.OUT)
GPIO.setup(pir_sensor, GPIO.IN)
GPIO.setup(pushbtn, GPIO.IN, pull_up_down = GPIO.PUD_UP)
current_state = 0
def siren():
current_state = GPIO.input(pir_sensor)
if current_state == 1:
print("GPIO pin %s is %s" % (pir_sensor, current_state))
GPIO.output(piezo,True)
time.sleep(0.5)
GPIO.output(piezo,False)
time.sleep(.5)
subprocess.call('python3 ' +mail, shell = True )
current_state == 0
else:
print("GPIO pin %s is %s" % (pir_sensor, current_state))
GPIO.output(piezo,True)
time.sleep(0.5)
try:
GPIO.output(piezo,True)
time.sleep(2)
while True:
siren()
input_state = GPIO.input(20)
if input_state == False:
GPIO.output(piezo,True)
time.sleep(0.5)
print('button Pressed')
break
except KeyboardInterrupt:
GPIO.output(piezo,True)
time.sleep(0.5)
pass
subprocess.call('python3 ' +target, shell = True )
| [
"[email protected]"
] | |
a5700105db1ed5b4cb1c283c8f04460173e54956 | 7dc1854d63a4d13e939354ee3fbbbdf0222b62cf | /train.py | 64a553da899439fa867ab46d77d1d408aad9f56e | [] | no_license | samuelvinicius777/classificador_dogs_cats | bf2e04793bd47c32d0232a0df52fc51356d5bb4c | 19d4e7ae0a2341163dbc2aad631d0b18bce8dba2 | refs/heads/master | 2023-07-15T15:32:27.337766 | 2021-08-30T01:22:30 | 2021-08-30T01:22:30 | 400,398,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,814 | py | # https://github.com/samuelvinicius777/classificador_dogs_cats.git
# Carregandos imports
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import cv2
import os
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import load_model, save_model
# Defindo a escala de cor dos pixels
train = ImageDataGenerator(rescale= 1./255)
validation = ImageDataGenerator(rescale= 1./255)
test = ImageDataGenerator(rescale=1./255)
# Carregamento do dataset train/validation/test
train_dataset = train.flow_from_directory('data/training_set_dogs_cats/training_set',
target_size= (280,280),
color_mode = "rgb",
batch_size = 32,
class_mode = 'categorical')
validation_dataset = validation.flow_from_directory('data/test_set_dogs_cats/test_set_validation',
target_size= (280,280),
color_mode = "rgb",
batch_size = 32,
class_mode = 'categorical')
test_dataset = test.flow_from_directory('data/test_set_dogs_cats/test_set',
target_size= (280,280),
color_mode = "rgb",
batch_size = 32,
class_mode = 'categorical')
# Construção do modelo
model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(32,(3,3), activation= "relu", input_shape = (280,280,3)),
tf.keras.layers.Conv2D(64,(3,3), activation= "relu"),
tf.keras.layers.MaxPool2D(2,2),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64,activation= "relu"),
tf.keras.layers.Dense(2,activation='softmax')
])
# Compilação do modelo
model.compile(loss= "categorical_crossentropy",
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001),
metrics = ["accuracy"])
# Treinamento do modelo
model_fit = model.fit(train_dataset,
steps_per_epoch = 30,
batch_size = 32,
epochs = 50,
verbose = 1,
validation_data = validation_dataset)
# Salvando o modelo
model.save("models/model.h5")
| [
"[email protected]"
] | |
1830bb394a72fc8c8712b10b63aabefbb67a8f1b | a1d34642f258cc3cdf58ba5f6cddee6e2e4ec76f | /Portfolio_VaR_Toolv4.py | aedae2f60afbc6ebe76681bcd3ef9f133a5c683b | [] | no_license | royopa/Python_Portfolio__VaR_Tool | a4133e7a4da11d357d9030589241d4ad8da068ef | e797d43e25f35b391b48c3697e0c0f68d9c10b6a | refs/heads/master | 2021-07-03T01:12:27.500950 | 2018-10-30T07:30:49 | 2018-10-30T07:30:49 | 169,900,791 | 1 | 1 | null | 2019-02-09T18:46:02 | 2019-02-09T18:46:02 | null | UTF-8 | Python | false | false | 33,428 | py |
import datetime
import pandas as pd
from pandas.tseries.offsets import BDay
import pandas_datareader.data as web
import numpy as np
import math
import matplotlib as plt
import matplotlib.dates as mdates
from matplotlib import cm as cm
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
import re
import scipy.stats
import time
import wx.lib.pubsub
from wx.lib.pubsub import pub
import wx
# First tab
class PageOne(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# Set first tab input fields + button
fontbold = wx.Font(18, wx.DEFAULT, wx.NORMAL, wx.BOLD)
title = wx.StaticText(self, wx.ID_ANY, 'Portfolio Tool')
title.SetFont(fontbold)
stock_a_label = wx.StaticText(self, -1, "Ticker Stock A", (20, 20))
stock_b_label = wx.StaticText(self, -1, "Ticker Stock B", (20, 20))
stock_c_label = wx.StaticText(self, -1, "Ticker Stock C", (20, 20))
stock_d_label = wx.StaticText(self, -1, "Ticker Stock D", (20, 20))
self.stock_a_ticker_input = wx.TextCtrl(self, size=(60, -1))
self.stock_b_ticker_input = wx.TextCtrl(self, size=(60, -1))
self.stock_c_ticker_input = wx.TextCtrl(self, size=(60, -1))
self.stock_d_ticker_input = wx.TextCtrl(self, size=(60, -1))
stock_a_weight_label = wx.StaticText(self, -1, "Initial weight Stock A", (20, 20))
stock_b_weight_label= wx.StaticText(self, -1, "Initial weight Stock B", (20, 20))
stock_c_weight_label = wx.StaticText(self, -1, "Initial weight Stock C", (20, 20))
stock_d_weight_label = wx.StaticText(self, -1, "Initial weight Stock D", (20, 20))
self.stock_a_weight_input = wx.TextCtrl(self, size=(60, -1))
self.stock_b_weight_input = wx.TextCtrl(self, size=(60, -1))
self.stock_c_weight_input = wx.TextCtrl(self, size=(60, -1))
self.stock_d_weight_input = wx.TextCtrl(self, size=(60, -1))
timeseries_label = wx.StaticText(self, -1, "Time series from: [dd/mm/yyyy]", (20, 20))
self.timeseries_input = wx.TextCtrl(self, size=(85, -1))
benchmark_label = wx.StaticText(self, -1, "Benchmark", (20, 20))
self.benchmark_input = wx.TextCtrl(self, size=(85, -1))
background_a = wx.StaticText(self, -1, "> Stock weights should be decimals (i.e. 40% = 0.4)", (20, 20))
background_a.SetForegroundColour(wx.BLUE)
self.export = wx.CheckBox(self, label = 'Export data to CSV')
button = wx.Button(self, label="Retrieve data",)
self.Bind(wx.EVT_BUTTON, self.onRETRIEVE, button)
# Put all of the above in a Sizer
self.warning = wx.StaticText(self, -1, "", (20, 20))
sizer = wx.GridBagSizer(10, 15)
sizer.Add(title, (1, 0))
sizer.Add(stock_a_label, (3, 0))
sizer.Add(stock_b_label, (4, 0))
sizer.Add(stock_c_label, (5, 0))
sizer.Add(stock_d_label, (6, 0))
sizer.Add(self.stock_a_ticker_input, (3, 2))
sizer.Add(self.stock_b_ticker_input, (4, 2))
sizer.Add(self.stock_c_ticker_input, (5, 2))
sizer.Add(self.stock_d_ticker_input, (6, 2))
sizer.Add(stock_a_weight_label, (3, 5))
sizer.Add(stock_b_weight_label, (4, 5))
sizer.Add(stock_c_weight_label, (5, 5))
sizer.Add(stock_d_weight_label, (6, 5))
sizer.Add(self.stock_a_weight_input, (3, 7))
sizer.Add(self.stock_b_weight_input, (4, 7))
sizer.Add(self.stock_c_weight_input, (5, 7))
sizer.Add(self.stock_d_weight_input, (6, 7))
sizer.Add(timeseries_label, (3, 9))
sizer.Add(self.timeseries_input, (3, 11))
sizer.Add(benchmark_label, (4, 9))
sizer.Add(self.benchmark_input, (4, 11))
sizer.Add(background_a, (5, 9))
sizer.Add(self.export, (8, 9))
sizer.Add(button, (9, 0))
sizer.Add(self.warning, (11, 0))
self.border = wx.BoxSizer()
self.border.Add(sizer, 1, wx.ALL | wx.EXPAND, 5)
self.SetSizerAndFit(self.border)
def onRETRIEVE(self, event):
# Get input values
stock_a_ticker = self.stock_a_ticker_input.GetValue()
stock_b_ticker = self.stock_b_ticker_input.GetValue()
stock_c_ticker = self.stock_c_ticker_input.GetValue()
stock_d_ticker = self.stock_d_ticker_input.GetValue()
stock_a_weight = self.stock_a_weight_input.GetValue()
stock_b_weight = self.stock_b_weight_input.GetValue()
stock_c_weight = self.stock_c_weight_input.GetValue()
stock_d_weight = self.stock_d_weight_input.GetValue()
stocks = [stock_a_ticker, stock_b_ticker, stock_c_ticker, stock_d_ticker, ]
# Check if the date was inserted correctly
try:
datetime.datetime.strptime(self.timeseries_input.GetValue(), '%d/%m/%Y')
# Check if all stock weights are floats
if re.match("^\d+?\.\d+?$", stock_a_weight) is None or re.match("^\d+?\.\d+?$", stock_b_weight) is None or re.match("^\d+?\.\d+?$", stock_c_weight) is None or re.match("^\d+?\.\d+?$", stock_d_weight) is None:
self.warning.SetLabel("Stock weight should be a digit")
# Check whether all fields are populated
elif any(x == '' for x in stocks) or any(x == None for x in stocks) or self.benchmark_input.GetValue() == '':
self.warning.SetLabel("One or more inputs are missing. Please insert all required values")
else:
weights = np.asarray([float(stock_a_weight), float(stock_b_weight), float(stock_c_weight), float(stock_d_weight), ])
# Check whether the portfolio weights sum up to 1
if sum(weights) != 1:
self.warning.SetLabel("Portfolio weights should sum up to 1")
else:
try:
self.warning.SetLabel("")
# Get stock data
data = web.DataReader(stocks, data_source='yahoo', start= self.timeseries_input.GetValue())['Adj Close']
data.sort_index(inplace=True, ascending=True)
data.index = pd.to_datetime(data.index)
time.sleep(5)
# Get benchmark data
benchmark = web.DataReader(self.benchmark_input.GetValue(), data_source='yahoo', start=self.timeseries_input.GetValue())['Adj Close']
benchmark.sort_index(inplace=True, ascending=True)
benchmark.index = pd.to_datetime(benchmark.index)
# Calculate headline metrics
returns = data.pct_change().dropna()
mean_daily_returns = returns.mean()
std = returns.std()
benchmark_returns = benchmark.pct_change().dropna()
benchmark_std = benchmark_returns.std()
# Create headers
mean_daily_return_label = wx.StaticText(self, -1, "Historical mean daily return (%)", (20, 20))
expected_annual_return_label = wx.StaticText(self, -1, "Historical annual return (%)", (20, 20))
daily_std_label = wx.StaticText(self, -1, "Hist. standard deviation (%, daily)", (20, 20))
annual_std_label = wx.StaticText(self, -1, "Hist. standard Deviation (%, annual)", (20, 20))
sharpe_label = wx.StaticText(self, -1, "Hist. Sharpe Ratio (annual)", (20, 20))
TE_label = wx.StaticText(self, -1, "Ex-post Tracking Error", (20, 20))
Beta_label = wx.StaticText(self, -1, "Beta", (20, 20))
stock_a_header = wx.StaticText(self, -1, str(stocks[0]), (20, 20))
stock_b_header = wx.StaticText(self, -1, str(stocks[1]), (20, 20))
stock_c_header = wx.StaticText(self, -1, str(stocks[2]), (20, 20))
stock_d_header = wx.StaticText(self, -1, str(stocks[3]), (20, 20))
portfolio_header = wx.StaticText(self, -1, "Portfolio", (20, 20))
benchmark_header = wx.StaticText(self, -1, "Benchmark("+self.benchmark_input.GetValue()+")", (20, 20))
# Calculate basis for portfolio metrics
positions = {}
positions[stocks[0]] = {returns[stocks[0]].index[0]: float(stock_a_weight)}
positions[stocks[1]] = {returns[stocks[1]].index[0]: float(stock_b_weight)}
positions[stocks[2]] = {returns[stocks[2]].index[0]: float(stock_c_weight)}
positions[stocks[3]] = {returns[stocks[3]].index[0]: float(stock_d_weight)}
pos = pd.DataFrame.from_dict(positions).reindex(returns.index).fillna(method="ffill")
portfolio = pos.shift() * (1 + returns).cumprod(axis=0)
portfolio['total_wealth'] = portfolio[[stocks[0], stocks[1], stocks[2], stocks[3]]].sum(axis=1)
portfolio.index = pd.to_datetime(portfolio.index)
date = datetime.datetime.strptime(self.timeseries_input.GetValue(), "%d/%m/%Y")
start_date = date + BDay(1)
portfolio.at[start_date, 'total_wealth'] = 1
portfolio["returns"] = portfolio['total_wealth'].pct_change()
# Calculate + insert specific stock, benchmark and portfolio metrics
stock_a_mean_daily_return = wx.StaticText(self, -1, str(round(mean_daily_returns[stocks[0]]*100, 2)), (20, 20))
stock_b_mean_daily_return = wx.StaticText(self, -1, str(round(mean_daily_returns[stocks[1]]*100, 2)), (20, 20))
stock_c_mean_daily_return = wx.StaticText(self, -1, str(round(mean_daily_returns[stocks[2]]*100, 2)), (20, 20))
stock_d_mean_daily_return = wx.StaticText(self, -1, str(round(mean_daily_returns[stocks[3]]*100, 2)), (20, 20))
portfolio_mean_daily_return = portfolio["returns"].mean()
portfolio_mean_daily_return_scr = wx.StaticText(self, -1, str(round(portfolio_mean_daily_return * 100, 2)), (20, 20))
benchmark_mean_daily_return = wx.StaticText(self, -1, str(round(benchmark_returns.mean() * 100, 2)), (20, 20))
stock_a_annual_return = wx.StaticText(self, -1, str(round(mean_daily_returns[stocks[0]]*100*252, 2)), (20, 20))
stock_b_annual_return = wx.StaticText(self, -1, str(round(mean_daily_returns[stocks[1]]*100*252, 2)), (20, 20))
stock_c_annual_return = wx.StaticText(self, -1, str(round(mean_daily_returns[stocks[2]]*100*252, 2)), (20, 20))
stock_d_annual_return = wx.StaticText(self, -1, str(round(mean_daily_returns[stocks[3]]*100*252, 2)), (20, 20))
portfolio_annual_return = wx.StaticText(self, -1, str(round(portfolio_mean_daily_return * 100 * 252, 2)), (20, 20))
benchmark_annual_return = wx.StaticText(self, -1, str(round(benchmark_returns.mean() * 100 * 252, 2)), (20, 20))
stock_a_daily_std = wx.StaticText(self, -1, str(round(std[stocks[0]]*100, 2)), (20, 20))
stock_b_daily_std = wx.StaticText(self, -1, str(round(std[stocks[1]]*100, 2)), (20, 20))
stock_c_daily_std = wx.StaticText(self, -1, str(round(std[stocks[2]]*100, 2)), (20, 20))
stock_d_daily_std = wx.StaticText(self, -1, str(round(std[stocks[3]]*100, 2)), (20, 20))
portfolio_daily_std = portfolio["returns"].std()
portfolio_daily_std_scr = wx.StaticText(self, -1, str(round(portfolio_daily_std * 100, 2)), (20, 20))
benchmark_daily_std = wx.StaticText(self, -1, str(round(benchmark_std * 100, 2)), (20, 20))
stock_a_annual_std = wx.StaticText(self, -1, str(round(std[stocks[0]] * 100 * np.sqrt(252), 2)), (20, 20))
stock_b_annual_std = wx.StaticText(self, -1, str(round(std[stocks[1]] * 100 * np.sqrt(252), 2)), (20, 20))
stock_c_annual_std = wx.StaticText(self, -1, str(round(std[stocks[2]] * 100 * np.sqrt(252), 2)), (20, 20))
stock_d_annual_std = wx.StaticText(self, -1, str(round(std[stocks[3]] * 100 * np.sqrt(252), 2)), (20, 20))
portfolio_annual_std = wx.StaticText(self, -1, str(round(portfolio_daily_std * 100 * np.sqrt(252), 2)), (20, 20))
benchmark_annual_std = wx.StaticText(self, -1, str(round(benchmark_std * 100 * np.sqrt(252), 2)), (20, 20))
risk_free_rate = 2.25 # 10 year US-treasury rate (annual)
sharpe_a = ((mean_daily_returns[stocks[0]] * 100 * 252) - risk_free_rate ) / (std[stocks[0]] * 100 * np.sqrt(252))
sharpe_b = ((mean_daily_returns[stocks[1]] * 100 * 252) - risk_free_rate) / (std[stocks[1]] * 100 * np.sqrt(252))
sharpe_c = ((mean_daily_returns[stocks[2]] * 100 * 252) - risk_free_rate) / (std[stocks[2]] * 100 * np.sqrt(252))
sharpe_d = ((mean_daily_returns[stocks[3]] * 100 * 252) - risk_free_rate) / (std[stocks[3]] * 100 * np.sqrt(252))
sharpe_portfolio = ((portfolio_mean_daily_return * 100 * 252) - risk_free_rate) / (portfolio_daily_std * 100 * np.sqrt(252))
sharpe_benchmark = ((benchmark_returns.mean() * 100 * 252) - risk_free_rate) / (benchmark_std * 100 * np.sqrt(252))
sharpe_a_scr = wx.StaticText(self, -1, str(round(sharpe_a, 2)),(20, 20))
sharpe_b_scr = wx.StaticText(self, -1, str(round(sharpe_b, 2)), (20, 20))
sharpe_c_scr = wx.StaticText(self, -1, str(round(sharpe_c, 2)), (20, 20))
sharpe_d_scr = wx.StaticText(self, -1, str(round(sharpe_d, 2)), (20, 20))
sharpe_portfolio_scr = wx.StaticText(self, -1, str(round(sharpe_portfolio, 2)), (20, 20))
sharpe_benchmark_scr = wx.StaticText(self, -1, str(round(sharpe_benchmark, 2)), (20, 20))
TE_a = (returns[stocks[0]] - benchmark.pct_change().dropna()).std()
TE_b = (returns[stocks[1]] - benchmark.pct_change().dropna()).std()
TE_c = (returns[stocks[2]] - benchmark.pct_change().dropna()).std()
TE_d = (returns[stocks[3]] - benchmark.pct_change().dropna()).std()
TE_p = (portfolio["returns"] - benchmark.pct_change().dropna()).std()
TE_stock_a = wx.StaticText(self, -1, str(round(TE_a * 100, 2)), (20, 20))
TE_stock_b = wx.StaticText(self, -1, str(round(TE_b * 100, 2)), (20, 20))
TE_stock_c = wx.StaticText(self, -1, str(round(TE_c * 100, 2)), (20, 20))
TE_stock_d = wx.StaticText(self, -1, str(round(TE_d * 100, 2)), (20, 20))
TE_portfolio = wx.StaticText(self, -1, str(round(TE_p * 100, 2)), (20, 20))
beta_a = (np.cov(returns[stocks[0]], benchmark_returns)[0][1]) / benchmark_returns.var()
beta_b = (np.cov(returns[stocks[1]], benchmark_returns)[0][1]) / benchmark_returns.var()
beta_c = (np.cov(returns[stocks[2]], benchmark_returns)[0][1]) / benchmark_returns.var()
beta_d = (np.cov(returns[stocks[3]], benchmark_returns)[0][1]) / benchmark_returns.var()
beta_p = (np.cov(portfolio["returns"].dropna(), benchmark_returns.iloc[1:])[0][1]) / benchmark_returns.var()
beta_a_lab = wx.StaticText(self, -1, str(round(beta_a, 2)), (20, 20))
beta_b_lab = wx.StaticText(self, -1, str(round(beta_b, 2)), (20, 20))
beta_c_lab = wx.StaticText(self, -1, str(round(beta_c, 2)), (20, 20))
beta_d_lab = wx.StaticText(self, -1, str(round(beta_d, 2)), (20, 20))
beta_p_lab = wx.StaticText(self, -1, str(round(beta_p, 2)), (20, 20))
# Put all the metrics in a Sizer
sizer = wx.GridBagSizer(10, 10)
sizer.Add(mean_daily_return_label, (12, 0))
sizer.Add(expected_annual_return_label, (13, 0))
sizer.Add(daily_std_label, (14, 0))
sizer.Add(annual_std_label, (15, 0))
sizer.Add(sharpe_label, (16, 0))
sizer.Add(TE_label, (17, 0))
sizer.Add(Beta_label, (18, 0))
sizer.Add(stock_a_header, (11, 2))
sizer.Add(stock_b_header, (11, 4))
sizer.Add(stock_c_header, (11, 6))
sizer.Add(stock_d_header, (11, 8))
sizer.Add(portfolio_header, (11, 11))
sizer.Add(benchmark_header, (11, 13))
sizer.Add(stock_a_mean_daily_return, (12, 2))
sizer.Add(stock_b_mean_daily_return, (12, 4))
sizer.Add(stock_c_mean_daily_return, (12, 6))
sizer.Add(stock_d_mean_daily_return, (12, 8))
sizer.Add(portfolio_mean_daily_return_scr, (12, 11))
sizer.Add(benchmark_mean_daily_return, (12, 13))
sizer.Add(stock_a_annual_return, (13, 2))
sizer.Add(stock_b_annual_return, (13, 4))
sizer.Add(stock_c_annual_return, (13, 6))
sizer.Add(stock_d_annual_return, (13, 8))
sizer.Add(portfolio_annual_return, (13, 11))
sizer.Add(benchmark_annual_return, (13, 13))
sizer.Add(stock_a_daily_std, (14, 2))
sizer.Add(stock_b_daily_std, (14, 4))
sizer.Add(stock_c_daily_std, (14, 6))
sizer.Add(stock_d_daily_std, (14, 8))
sizer.Add(portfolio_daily_std_scr, (14, 11))
sizer.Add(benchmark_daily_std, (14, 13))
sizer.Add(stock_a_annual_std, (15, 2))
sizer.Add(stock_b_annual_std, (15, 4))
sizer.Add(stock_c_annual_std, (15, 6))
sizer.Add(stock_d_annual_std, (15, 8))
sizer.Add(portfolio_annual_std, (15, 11))
sizer.Add(benchmark_annual_std, (15, 13))
sizer.Add(sharpe_a_scr, (16, 2))
sizer.Add(sharpe_b_scr, (16, 4))
sizer.Add(sharpe_c_scr, (16, 6))
sizer.Add(sharpe_d_scr, (16, 8))
sizer.Add(sharpe_portfolio_scr, (16, 11))
sizer.Add(sharpe_benchmark_scr, (16, 13))
sizer.Add(TE_stock_a, (17, 2))
sizer.Add(TE_stock_b, (17, 4))
sizer.Add(TE_stock_c, (17, 6))
sizer.Add(TE_stock_d, (17, 8))
sizer.Add(TE_portfolio, (17, 11))
sizer.Add(beta_a_lab, (18, 2))
sizer.Add(beta_b_lab, (18, 4))
sizer.Add(beta_c_lab, (18, 6))
sizer.Add(beta_d_lab, (18, 8))
sizer.Add(beta_p_lab, (18, 11))
self.border = wx.BoxSizer()
self.border.Add(sizer, 1, wx.ALL | wx.EXPAND, 5)
self.SetSizerAndFit(self.border)
# Make the headline data available to the other tabs by means of PubSub
pub.sendMessage("panelListener", arg1 = data, arg2 = weights, arg3 = stocks, arg4 = portfolio)
# Export price-date from Yahoo to CSV if box is ticked
if self.export.GetValue() == True:
data.to_csv("data"+stock_a_ticker+"to"+stock_d_ticker+".csv", sep=';', encoding='utf-8')
else:
pass
except Exception as e:
self.warning.SetLabel(str(e))
except ValueError:
self.warning.SetLabel("Date not in the right format")
# Second tab
class PageTwo(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
pub.subscribe(self.myListener, "panelListener")
def myListener(self, arg1, arg2, arg3, arg4):
# import variables
data_2 = arg1
stocks_2 = arg3
portfolio_2 = arg4
portfolio_2.rename(columns={'returns': 'Portfolio',}, inplace=True)
returns = data_2.pct_change().dropna()
# Create histogram of daily returns
figure_1 = Figure(figsize=(7, 2.5))
canvas_1 = FigureCanvas(self, -1, figure_1)
axes_1 = figure_1.add_subplot(111)
axes_2 = figure_1.add_subplot(111)
axes_3 = figure_1.add_subplot(111)
axes_4 = figure_1.add_subplot(111)
axes_5 = figure_1.add_subplot(111)
axes_1.hist(returns[stocks_2[0]], bins=50, normed=True, histtype='stepfilled', alpha=0.5)
axes_2.hist(returns[stocks_2[1]], bins=50, normed=True, histtype='stepfilled', alpha=0.5)
axes_3.hist(returns[stocks_2[2]], bins=50, normed=True, histtype='stepfilled', alpha=0.5)
axes_4.hist(returns[stocks_2[3]], bins=50, normed=True, histtype='stepfilled', alpha=0.5)
axes_5.hist(portfolio_2["Portfolio"].dropna(), bins=50, normed=True, histtype='stepfilled', alpha=0.5)
axes_1.set_title(u"Historic return distribution", weight='bold')
axes_1.legend(loc='upper left')
# Create indexed performance chart
figure_2 = Figure(figsize=(7, 2.5))
canvas_2 = FigureCanvas(self, -1, figure_2)
axes_A = figure_2.add_subplot(111)
axes_B = figure_2.add_subplot(111)
axes_C = figure_2.add_subplot(111)
axes_D = figure_2.add_subplot(111)
axes_E = figure_2.add_subplot(111)
years = mdates.YearLocator()
yearsFmt = mdates.DateFormatter("'%y")
ret_index = (1 + returns).cumprod()
portfolio_cum = (1 + portfolio_2["Portfolio"].dropna()).cumprod()
axes_A.plot(ret_index.index, ret_index[stocks_2[0]])
axes_A.xaxis.set_major_locator(years)
axes_A.xaxis.set_major_formatter(yearsFmt)
axes_B.plot(ret_index.index, ret_index[stocks_2[1]])
axes_B.xaxis.set_major_locator(years)
axes_B.xaxis.set_major_formatter(yearsFmt)
axes_C.plot(ret_index.index, ret_index[stocks_2[2]])
axes_C.xaxis.set_major_locator(years)
axes_C.xaxis.set_major_formatter(yearsFmt)
axes_D.plot(ret_index.index, ret_index[stocks_2[3]])
axes_D.xaxis.set_major_locator(years)
axes_D.xaxis.set_major_formatter(yearsFmt)
axes_E.plot(portfolio_cum.index, portfolio_cum)
axes_E.xaxis.set_major_locator(years)
axes_E.xaxis.set_major_formatter(yearsFmt)
axes_A.set_title(u" Indexed Performance (base = 1)", weight='bold')
axes_A.legend(loc='upper left')
sizer = wx.GridBagSizer(7, 2.5)
sizer.Add(canvas_1, (1, 0))
sizer.Add(canvas_2, (2, 0))
self.border = wx.BoxSizer()
self.border.Add(sizer, 1, wx.ALL | wx.EXPAND, 5)
self.SetSizerAndFit(self.border)
# Third tab
class PageThree(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
pub.subscribe(self.myListener, "panelListener")
def myListener(self, arg1, arg2, arg3, arg4):
fontbold = wx.Font(18, wx.DEFAULT, wx.NORMAL, wx.BOLD)
# import variables
data_3 = arg1
stocks_3 = arg3
weights_3 = arg2
portfolio_3 = arg4
returns = data_3.pct_change().dropna()
mean_daily_returns = returns.mean()
std = returns.std()
# Calculate daily 5% Historical Simulation VaR for individual stocks and portfolio
title_historical = wx.StaticText(self, wx.ID_ANY, 'VaR - Historical Simulation')
title_historical.SetFont(fontbold)
stock_a_hist_var_lab = wx.StaticText(self, -1, str(stocks_3[0]) + " - Daily VaR (5%)", (20, 20))
stock_b_hist_var_lab = wx.StaticText(self, -1, str(stocks_3[1]) + " - Daily VaR (5%)", (20, 20))
stock_c_hist_var_lab = wx.StaticText(self, -1, str(stocks_3[2]) + " - Daily VaR (5%)", (20, 20))
stock_d_hist_var_lab = wx.StaticText(self, -1, str(stocks_3[3]) + " - Daily VaR (5%)", (20, 20))
portfolio_hist_var_lab = wx.StaticText(self, -1, "Portfolio - Daily VaR (5%)", (20, 20))
stock_a_hist_var = wx.StaticText(self, -1, str(round(returns[stocks_3[0]].quantile(0.05) * 100, 2)), (20, 20))
stock_b_hist_var = wx.StaticText(self, -1, str(round(returns[stocks_3[1]].quantile(0.05) * 100, 2)), (20, 20))
stock_c_hist_var = wx.StaticText(self, -1, str(round(returns[stocks_3[2]].quantile(0.05) * 100, 2)), (20, 20))
stock_d_hist_var = wx.StaticText(self, -1, str(round(returns[stocks_3[3]].quantile(0.05) * 100, 2)), (20, 20))
portfolio_hist_ret = portfolio_3["returns"].dropna()
portfolio_hist_var = wx.StaticText(self, -1, str(round(portfolio_hist_ret.quantile(0.05) * 100, 2)), (20, 20))
# Calculate daily 5% Variance-Covariance VaR for individual stocks and portfolio
title_varcov = wx.StaticText(self, wx.ID_ANY, 'VaR - Variance Covariance')
title_varcov.SetFont(fontbold)
stock_a_cov_var_lab = wx.StaticText(self, -1, str(stocks_3[0]) + " - Daily VaR (5%)", (20, 20))
stock_b_cov_var_lab = wx.StaticText(self, -1, str(stocks_3[1]) + " - Daily VaR (5%)", (20, 20))
stock_c_cov_var_lab = wx.StaticText(self, -1, str(stocks_3[2]) + " - Daily VaR (5%)", (20, 20))
stock_d_cov_var_lab = wx.StaticText(self, -1, str(stocks_3[3]) + " - Daily VaR (5%)", (20, 20))
stock_a_cov_var = wx.StaticText(self, -1, str(round(scipy.stats.norm.ppf(0.05, mean_daily_returns[stocks_3[0]], std[stocks_3[0]]) * 100, 2)))
stock_b_cov_var = wx.StaticText(self, -1, str(round(scipy.stats.norm.ppf(0.05, mean_daily_returns[stocks_3[1]], std[stocks_3[1]]) * 100, 2)))
stock_c_cov_var = wx.StaticText(self, -1, str(round(scipy.stats.norm.ppf(0.05, mean_daily_returns[stocks_3[2]], std[stocks_3[2]]) * 100, 2)))
stock_d_cov_var = wx.StaticText(self, -1, str(round(scipy.stats.norm.ppf(0.05, mean_daily_returns[stocks_3[3]], std[stocks_3[3]]) * 100, 2)))
portfolio_return_daily = portfolio_3["returns"].dropna().mean()
portfolio_std = portfolio_3["returns"].dropna().std()
portfolio_cov_var_lab = wx.StaticText(self, -1, "Portfolio - Daily VaR (5%)", (20, 20))
portfolio_cov_var = wx.StaticText(self, -1, str(round(scipy.stats.norm.ppf(0.05, portfolio_return_daily, portfolio_std) * 100, 2)))
# Calculate 5% Monte Carlo Sim Daily VaR for individual stocks - along Geometric Brownian Motion
title_MC = wx.StaticText(self, wx.ID_ANY, 'VaR - Monte Carlo (along Geometric Brownian Motion')
title_MC.SetFont(fontbold)
MC_return =[]
for item in range(len(stocks_3)):
result = []
S = data_3[stocks_3[item]].iloc[-1]
T = 252
mu = returns[stocks_3[item]].mean()*252
vol = returns[stocks_3[item]].std()*np.sqrt(252)
for i in range(1000):
daily_returns = np.random.normal(mu/T,vol/math.sqrt(T),T)+1
price_list = [S]
price_list.append(price_list[-1] * daily_returns)
result.append(price_list[-1])
MC_return.append((np.percentile(result,5) - S) / S)
stock_a_MC_lab = wx.StaticText(self, -1, str(stocks_3[0]) + " - Daily VaR (5%)", (20, 20))
stock_b_MC_lab = wx.StaticText(self, -1, str(stocks_3[1]) + " - Daily VaR (5%)", (20, 20))
stock_c_MC_lab = wx.StaticText(self, -1, str(stocks_3[2]) + " - Daily VaR (5%)", (20, 20))
stock_d_MC_lab = wx.StaticText(self, -1, str(stocks_3[3]) + " - Daily VaR (5%)", (20, 20))
stock_a_MC = wx.StaticText(self, -1, str(round(MC_return[0] * 100, 2)), (20, 20))
stock_b_MC = wx.StaticText(self, -1, str(round(MC_return[1] * 100, 2)), (20, 20))
stock_c_MC = wx.StaticText(self, -1, str(round(MC_return[2] * 100, 2)), (20, 20))
stock_d_MC = wx.StaticText(self, -1, str(round(MC_return[3] * 100, 2)), (20, 20))
MC_assumptions_lab = wx.StaticText(self, -1, "Monte Carlo - Assumptions", (20, 20))
MC_assumption_1 = wx.StaticText(self, -1, "Geometric Brownian Motion", (20, 20))
MC_assumption_2 = wx.StaticText(self, -1, "N = 1000", (20, 20))
MC_assumption_3 = wx.StaticText(self, -1, "μ = mean daily stock return (i.e. drift)", (20, 20))
MC_assumption_4 = wx.StaticText(self, -1, "σ = standard deviation of returns", (20, 20))
MC_assumption_1.SetForegroundColour(wx.BLUE)
MC_assumption_2.SetForegroundColour(wx.BLUE)
MC_assumption_3.SetForegroundColour(wx.BLUE)
MC_assumption_4.SetForegroundColour(wx.BLUE)
# Put all metrics in a Sizer
sizer = wx.GridBagSizer(10, 15)
sizer.Add(title_historical, (1, 0))
sizer.Add(stock_a_hist_var_lab, (3, 0))
sizer.Add(stock_b_hist_var_lab, (4, 0))
sizer.Add(stock_c_hist_var_lab, (5, 0))
sizer.Add(stock_d_hist_var_lab, (6, 0))
sizer.Add(portfolio_hist_var_lab, (8, 0))
sizer.Add(stock_a_hist_var, (3, 1))
sizer.Add(stock_b_hist_var, (4, 1))
sizer.Add(stock_c_hist_var, (5, 1))
sizer.Add(stock_d_hist_var, (6, 1))
sizer.Add(portfolio_hist_var, (8, 1))
sizer.Add(title_varcov, (10, 0))
sizer.Add(stock_a_cov_var_lab, (12, 0))
sizer.Add(stock_b_cov_var_lab, (13, 0))
sizer.Add(stock_c_cov_var_lab, (14, 0))
sizer.Add(stock_d_cov_var_lab, (15, 0))
sizer.Add(portfolio_cov_var_lab, (17, 0))
sizer.Add(stock_a_cov_var, (12, 1))
sizer.Add(stock_b_cov_var, (13, 1))
sizer.Add(stock_c_cov_var, (14, 1))
sizer.Add(stock_d_cov_var, (15, 1))
sizer.Add(portfolio_cov_var, (17, 1))
sizer.Add(title_MC, (1, 8))
sizer.Add(stock_a_MC_lab, (3, 8))
sizer.Add(stock_b_MC_lab, (4, 8))
sizer.Add(stock_c_MC_lab, (5, 8))
sizer.Add(stock_d_MC_lab, (6, 8))
sizer.Add(stock_a_MC, (3, 9))
sizer.Add(stock_b_MC, (4, 9))
sizer.Add(stock_c_MC, (5, 9))
sizer.Add(stock_d_MC, (6, 9))
sizer.Add(MC_assumptions_lab, (8, 8))
sizer.Add(MC_assumption_1, (10, 8))
sizer.Add(MC_assumption_2, (11, 8))
sizer.Add(MC_assumption_3, (12, 8))
sizer.Add(MC_assumption_4, (13, 8))
self.border = wx.BoxSizer()
self.border.Add(sizer, 1, wx.ALL | wx.EXPAND, 5)
self.SetSizerAndFit(self.border)
# Fourth tab
class PageFour(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
pub.subscribe(self.myListener, "panelListener")
def myListener(self, arg1, arg2, arg3, arg4):
# Import variables
data_4 = arg1
returns = data_4.pct_change().dropna()
# Construct correlation matrix
figure_3 = Figure(figsize=(6, 4))
canvas_3 = FigureCanvas(self, -1, figure_3)
axes_E = figure_3.add_subplot(111)
axes_E.pcolor(returns.corr(), cmap=plt.cm.Blues)
axes_E.set_xticks(np.arange(5)+0.5) # center x ticks
axes_E.set_yticks(np.arange(5)+0.5) # center y ticks
axes_E.set_xticklabels(returns.columns)
axes_E.set_yticklabels(returns.columns)
sizer = wx.GridBagSizer(7, 2.5)
sizer.Add(canvas_3, (1, 0))
self.border = wx.BoxSizer()
self.border.Add(sizer, 1, wx.ALL | wx.EXPAND, 5)
self.SetSizerAndFit(self.border)
# MainFrame
class MainFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, title="Portfolio Tool")
# Wraps it all up and put everything together
p = wx.Panel(self)
nb = wx.Notebook(p)
page1 = PageOne(nb)
page2 = PageTwo(nb)
page3 = PageThree(nb)
page4 = PageFour(nb)
nb.AddPage(page1, "Portfolio Data")
nb.AddPage(page2, "Descriptive Data +")
nb.AddPage(page3, "VAR")
nb.AddPage(page4, "Correlation Matrix")
sizer = wx.BoxSizer()
sizer.Add(nb, 1, wx.EXPAND)
p.SetSizer(sizer)
if __name__ == "__main__":
app = wx.App()
frame = MainFrame()
frame.SetSize(0, 0, 1200, 750)
frame.Center()
frame.Show()
app.MainLoop() | [
"[email protected]"
] | |
f315bd7932192543dd844061a0c1e9ca384a68cb | fe039d4ba46deb5f184f5330ebd044663078bb50 | /nerTagger.py | 8d12fa592d21573e6518469a8451d7f15f11c209 | [] | no_license | anishsaha12/hindi-bank-chatbot | 81317dc2220897fe722e6eec907d2565cc0a33f5 | d16f553297a617098cc61c33c2cfa0121114d818 | refs/heads/master | 2022-11-15T22:13:29.804780 | 2020-07-15T14:46:39 | 2020-07-15T14:46:39 | 275,863,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,437 | py | import pickle
import numpy as np
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
from posTagger import *
with open('nerTagger/max_len.pkl', 'rb') as f:
max_len = pickle.load(f)
with open('nerTagger/max_len_char_prefix.pkl', 'rb') as f:
max_len_char_prefix = pickle.load(f)
with open('nerTagger/max_len_char_suffix.pkl', 'rb') as f:
max_len_char_suffix = pickle.load(f)
with open('nerTagger/word2pos.pkl', 'rb') as f:
word2pos = pickle.load(f)
with open('nerTagger/word2idx.pkl', 'rb') as f:
word2idx = pickle.load(f)
with open('nerTagger/pos2idx.pkl', 'rb') as f:
pos2idx = pickle.load(f)
with open('nerTagger/char2idx.pkl', 'rb') as f:
char2idx = pickle.load(f)
with open('nerTagger/idx2tag.pkl', 'rb') as f:
idx2tag = pickle.load(f)
model = load_model('nerTagger/word_pos_char_emb_bilstm_ner_tagger.h5')
def nerTagger(line):
input_to_tag = line.rstrip()
input_tokens = input_to_tag.split(' ')
X_word = []
for w in input_tokens:
try:
X_word.append(word2idx[w])
except:
X_word.append(word2idx["UNK"])
# pos tag the inpt line
pos_tagged = posTagger(line)
X_pos = []
for w in pos_tagged:
try:
X_pos.append(pos2idx[w])
except:
X_pos.append(pos2idx["UNK"])
X_word = [X_word]
X_word = pad_sequences(maxlen=max_len, sequences=X_word, value=word2idx["PAD"], padding='post', truncating='post')
X_pos = [X_pos]
X_pos = pad_sequences(maxlen=max_len, sequences=X_pos, value=word2pos["PAD"], padding='post', truncating='post')
sent_seq = []
for i in range(max_len):
word_seq = []
for j in range(max_len_char_prefix):
try:
charIdx = char2idx.get(input_tokens[i][j])
if charIdx is not None:
word_seq.append(charIdx)
else:
word_seq.append(char2idx.get("UNK"))
except:
word_seq.append(char2idx.get("PAD"))
sent_seq.append(word_seq)
X_char_prefix = [sent_seq]
sent_seq = []
for i in range(max_len):
word_seq = []
for j in range(max_len_char_suffix):
try:
charIdx = char2idx.get(input_tokens[i][-max_len_char_suffix + j])
if charIdx is not None:
word_seq.append(charIdx)
else:
word_seq.append(char2idx.get("UNK"))
except:
word_seq.append(char2idx.get("PAD"))
sent_seq.append(word_seq)
X_char_suffix = [sent_seq]
y_pred = model.predict([X_word,
X_pos,
np.array(X_char_prefix).reshape((len(X_char_prefix),
max_len, max_len_char_prefix)),
np.array(X_char_suffix).reshape((len(X_char_suffix),
max_len, max_len_char_suffix))
])
p = np.argmax(y_pred[0], axis=-1)
result = []
for w, pred in zip(input_tokens, p):
if w != 0:
result.append([w, idx2tag[pred]])
return result
# print(ner_tagger("आपके मोबाइल पे बैंक से एक OTP आएगा . कृपया बताइये .")) | [
"[email protected]"
] | |
50f4234ba65b18f9731d6d8d9726096e4ff88174 | 89cd8b77ad5171c336cc60b2133fe6468a6cb53f | /AirBattle_Demo/07-抽取模型.py | 4ce4d8dedbe2faac02e51b1e65733fe0b97115f6 | [
"MIT"
] | permissive | fenglihanxiao/Python | 75178f6b6b0c53345e1ed54226ea645216572d6c | 872baf3a3a5ee42740161152605ca2b1ddf4cd30 | refs/heads/master | 2021-05-23T18:49:20.656433 | 2020-04-29T01:06:21 | 2020-04-29T01:06:21 | 253,199,073 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,598 | py | """
飞机大战——传智播客·黑马程序员出品
"""
import sys # 导入系统模块
import pygame # 导入pygame模块
import pygame.locals # 导入pygame本地策略
# from pygame.locals import *
APP_ICON = "res/app.ico" # 设计公共的图片常量,定义在文件中,方便管理
IMG_BACKGROUND = "res/img_bg_level_1.jpg" # 设计公共的图片常量,定义在文件中,方便管理
# TODO 2.创建所有显示的图形父类Model
class Model:
window = None # TODO 7.定义主窗体对象,用于模型访问使用
# TODO 3.构造方法
def __init__(self,img_path,x,y):
self.img = pygame.image.load(img_path) # TODO 4.背景图片
self.x = x # TODO 4.窗体中放置的x坐标
self.y = y # TODO 4.窗体中放置的y坐标
# TODO 5.将模型加入窗体的方法抽取到父类
def display(self):
Model.window.blit(self.img, (self.x, self.y)) # TODO 8.使用Model的类变量访问窗体对象 # 调用bilt方法,将图片加入到窗体中
# 背景类
class Background(Model):
""" TODO 1.将操作抽取到父类
# 制作背景构造方法,传入图片路径,x,y
def __init__(self,img_path,x,y):
self.img = pygame.image.load(img_path) # 背景图片,传入图片路径
self.x = x # 窗体中放置的x坐标
self.y = y # 窗体中放置的y坐标
"""
# 玩家类
class PlayerPlane(Model):
pass
# 敌机类
class EnemyPlane(Model):
pass
# 子弹类
class Bullet(Model):
pass
# 游戏类
class Game:
WINDOW_WIDTH = 512 # 设计窗体尺寸常量,定义在类中,缩小作用范围
WINDOW_HEIGHT = 768 #设计窗体尺寸常量,定义在类中,缩小作用范围
# 主程序,运行游戏入口
def run(self):
self.frame_init() # 执行窗体初始化
self.model_init() # 执行对象初始化
while True: # 构造反复执行的机制,刷新窗体,使窗体保持在屏幕上
pygame.display.update() # 刷新窗体
self.event_init() # 反复调用事件监听方法
# 初始化窗体
def frame_init(self):
self.window = pygame.display.set_mode((Game.WINDOW_WIDTH, Game.WINDOW_HEIGHT)) # 使用窗体常量替换原始格式 # 初始化窗体
Model.window = self.window # TODO 9.将窗体对象传入模型类中
img = pygame.image.load(APP_ICON) # 使用图片常量替换原始格式 # 加载图标文件为图片对象
pygame.display.set_icon(img) # 设置窗体图标为图片
pygame.display.set_caption("Plane Battle v1.0 传智播客·黑马程序员出品") # 设置窗体的标题
# 定义事件处理的方法
def event_init(self):
for event in pygame.event.get(): # 获取当前发生的所有事件
if event.type == pygame.locals.QUIT : # 判断当前事件类别是不是点击窗体的关闭按钮
sys.exit() # 执行退出系统操作
# 初始化窗体中的对象
def model_init(self):
background = Background(IMG_BACKGROUND,0,0) # 使用图片常量替换原始格式 # 初始化背景对象,传入图片路径,放置在0,0位
background.display() # TODO 6.使用抽取的Model类中的display方法完成
""" TODO 6.使用公共方法完成
self.window.blit(background.img, (background.x, background.y)) # 调用bilt方法,将图片加入到窗体中
"""
# 设置测试类入口操作
if __name__ == "__main__":
Game().run()
| [
"[email protected]"
] | |
babb4edc8375a09c12a27e9a4a0fca74a59ff456 | 0ec8af881e552cd1aeef75f9fbfe63172a72b6ff | /Chapter04/gather.py | 52f60f9d5f3e3268edda593a5ebe8fafee509e84 | [
"MIT"
] | permissive | gridl/Python-Parallel-Programming-Cookbook-Second-Edition | 3d26b7a1b3b9aef7975aa97922dfc7d9ff10dedd | cccf14fada24b9f2c12ace5ff9b8f4334cae1da2 | refs/heads/master | 2020-07-05T09:57:35.121344 | 2019-07-26T14:08:57 | 2019-07-26T14:08:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
data = (rank+1)**2
data = comm.gather(data, root=0)
if rank == 0:
print ("rank = %s " %rank +\
"...receiving data to other process")
for i in range(1,size):
#value = data[i]
print(" process %s receiving %s from process %s"\
%(rank , value , i))
| [
"[email protected]"
] | |
8d291cdccecb664a81a8a9dc57ef536d88f22907 | b89474ef844f3b860b0027e46206ae0aa911dfd3 | /DataStructures and Algorithms Using Python/Day 1/store_marks.py | 74f7cb9e9e0260e97505fd42e74b3ccbcef39892 | [] | no_license | AgentT30/InfyTq-Foundation_Courses | 6e2574fca0dc5ff1126d1e910d5086e695921dce | a676478df0c083839ebe56ee3dcb9fffd2307390 | refs/heads/master | 2022-12-01T11:28:06.052658 | 2020-08-13T04:41:34 | 2020-08-13T04:41:34 | 287,181,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 450 | py | def update_mark_list(mark_list, new_element, pos):
mark_list.insert(pos, new_element)
return mark_list
def find_mark(mark_list, pos1, pos2):
return [mark_list[pos1], mark_list[pos2]]
# Provide different values for the variables and test your program
mark_list = [89, 78, 99, 76, 77, 72, 88, 99]
new_element = 69
pos = 2
pos1 = 5
pos2 = 8
print(update_mark_list(mark_list, new_element, pos))
print(find_mark(mark_list, pos1, pos2))
| [
"[email protected]"
] | |
7f9f050dabcf470350cad0530c1bacaedf380699 | c100b41ac9c3686a97a91ceaf38ac08654779c45 | /button | bcf4d9548db001d1785d5a97a533d60142d4e058 | [] | no_license | enderwing/raspberry-pi-projects | 2d3c33623c3ec6309669ee95311a0bff5425dc88 | d67e9b3c55b73dd9a4316d0a872d4c6683d384e1 | refs/heads/master | 2021-01-16T21:23:57.958808 | 2017-11-09T19:42:56 | 2017-11-09T19:42:56 | 38,697,244 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | #!/usr/bin/env python3
import RPi.GPIO as GPIO
import time as t
GPIO.setmode(GPIO.BCM)
GPIO.setup(26, GPIO.IN)
count = 0
while True:
input = GPIO.input(26)
if input == True:
count += 1
print("Pressed: " + str(count) + " times")
t.sleep(0.5)
| [
"[email protected]"
] | ||
9b99b2aeef5a36a5e2a204a7692ce3647e4a4ac5 | 79ee6fe7d3fcb4ca0df5501941e4e069bc3378b3 | /01-donar-search/visuals.py | d347dd1b5a6c77f1fecd91d9dfa4a51ca3bfebbe | [] | no_license | dingdl/ds-learning-projects | 501ef041e4b18408534fda3bf252fd2e1d4c322e | 7533581389f0ec1df86e7ccb22b3f92b5a4309df | refs/heads/master | 2022-12-31T12:46:14.144953 | 2020-10-20T07:26:47 | 2020-10-20T07:26:47 | 271,525,688 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,397 | py | ###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
#
# Display inline matplotlib plots with IPython
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
###########################################
import matplotlib.pyplot as pl
import matplotlib.patches as mpatches
import numpy as np
import pandas as pd
from time import time
from sklearn.metrics import f1_score, accuracy_score
def distribution(data, transformed = False):
"""
Visualization code for displaying skewed distributions of features
"""
# Create figure
fig = pl.figure(figsize = (11,5));
# Skewed feature plotting
for i, feature in enumerate(['capital-gain','capital-loss']):
ax = fig.add_subplot(1, 2, i+1)
ax.hist(data[feature], bins = 25, color = '#00A0A0')
ax.set_title("'%s' Feature Distribution"%(feature), fontsize = 14)
ax.set_xlabel("Value")
ax.set_ylabel("Number of Records")
ax.set_ylim((0, 2000))
ax.set_yticks([0, 500, 1000, 1500, 2000])
ax.set_yticklabels([0, 500, 1000, 1500, ">2000"])
# Plot aesthetics
if transformed:
fig.suptitle("Log-transformed Distributions of Continuous Census Data Features", \
fontsize = 16, y = 1.03)
else:
fig.suptitle("Skewed Distributions of Continuous Census Data Features", \
fontsize = 16, y = 1.03)
fig.tight_layout()
fig.show()
def evaluate(results, accuracy, f1):
"""
Visualization code to display results of various learners.
inputs:
- learners: a list of supervised learners
- stats: a list of dictionaries of the statistic results from 'train_predict()'
- accuracy: The score for the naive predictor
- f1: The score for the naive predictor
"""
# Create figure
fig, ax = pl.subplots(2, 3, figsize = (15,12))
# Constants
bar_width = 0.3
colors = ['#A00000','#00A0A0','#00A000']
# Super loop to plot four panels of data
for k, learner in enumerate(results.keys()):
for j, metric in enumerate(['train_time', 'acc_train', 'f_train', 'pred_time', 'acc_test', 'f_test']):
for i in np.arange(3):
# Creative plot code
ax[j//3, j%3].bar(i+k*bar_width, results[learner][i][metric], width = bar_width, color = colors[k])
ax[j//3, j%3].set_xticks([0.45, 1.45, 2.45])
ax[j//3, j%3].set_xticklabels(["1%", "10%", "100%"])
ax[j//3, j%3].set_xlabel("Training Set Size")
ax[j//3, j%3].set_xlim((-0.1, 3.0))
# Add unique y-labels
ax[0, 0].set_ylabel("Time (in seconds)")
ax[0, 1].set_ylabel("Accuracy Score")
ax[0, 2].set_ylabel("F-score")
ax[1, 0].set_ylabel("Time (in seconds)")
ax[1, 1].set_ylabel("Accuracy Score")
ax[1, 2].set_ylabel("F-score")
# Add titles
ax[0, 0].set_title("Model Training")
ax[0, 1].set_title("Accuracy Score on Training Subset")
ax[0, 2].set_title("F-score on Training Subset")
ax[1, 0].set_title("Model Predicting")
ax[1, 1].set_title("Accuracy Score on Testing Set")
ax[1, 2].set_title("F-score on Testing Set")
# Add horizontal lines for naive predictors
ax[0, 1].axhline(y = accuracy, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
ax[1, 1].axhline(y = accuracy, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
ax[0, 2].axhline(y = f1, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
ax[1, 2].axhline(y = f1, xmin = -0.1, xmax = 3.0, linewidth = 1, color = 'k', linestyle = 'dashed')
# Set y-limits for score panels
ax[0, 1].set_ylim((0, 1))
ax[0, 2].set_ylim((0, 1))
ax[1, 1].set_ylim((0, 1))
ax[1, 2].set_ylim((0, 1))
# Create patches for the legend
patches = []
for i, learner in enumerate(results.keys()):
patches.append(mpatches.Patch(color = colors[i], label = learner))
pl.legend(handles = patches, bbox_to_anchor = (-.80, 2.53), \
loc = 'upper center', borderaxespad = 0., ncol = 3, fontsize = 'x-large')
# Aesthetics
pl.suptitle("Performance Metrics for Three Supervised Learning Models", fontsize = 16, y = 1.10)
pl.tight_layout()
pl.show()
def feature_plot(importances, X_train, y_train):
# Display the five most important features
indices = np.argsort(importances)[::-1]
columns = X_train.columns.values[indices[:5]]
values = importances[indices][:5]
# Creat the plot
fig = pl.figure(figsize = (9,5))
pl.title("Normalized Weights for First Five Most Predictive Features", fontsize = 16)
pl.bar(np.arange(5), values, width = 0.6, align="center", color = '#00A000', \
label = "Feature Weight")
pl.bar(np.arange(5) - 0.3, np.cumsum(values), width = 0.2, align = "center", color = '#00A0A0', \
label = "Cumulative Feature Weight")
pl.xticks(np.arange(5), columns)
pl.xlim((-0.5, 4.5))
pl.ylabel("Weight", fontsize = 12)
pl.xlabel("Feature", fontsize = 12)
pl.legend(loc = 'upper center')
pl.tight_layout()
pl.show()
| [
"[email protected]"
] | |
8b84974960ab4670c153766baa45d4edbe1447d2 | ece82340214eb823a245a12aa15f86052ff93e90 | /app/accounts/serializers.py | 289095e73bf9ba7aa1a0066e0c2d9829c3ef6cb6 | [] | no_license | M-Moein-M/django-wechange | 37bde4f56773f144a3c2fc18e29efb2b3bda29a4 | e030e2e11e7b32d16a55a97055382b97c41c7b9c | refs/heads/master | 2023-07-28T04:52:21.975770 | 2021-09-10T06:07:17 | 2021-09-10T06:07:17 | 402,376,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | from rest_framework import serializers
from django.contrib.auth.models import User
from core.models import Profile
class UserSerializer(serializers.ModelSerializer):
"""Serializer for User model"""
class Meta:
model = User
fields = ['id', 'username', 'email']
read_only_fields = ['id']
class ProfileSerializer(serializers.ModelSerializer):
"""Serializer for Profile model"""
user = UserSerializer()
class Meta:
model = Profile
fields = '__all__'
read_only_fields = ['id', 'picture']
def update(self, instance, validated_data):
user_data = validated_data.get('user')
if user_data:
user = instance.user
user.username = user_data.get('username', user.username)
user.email = user_data.get('email', user.email)
user.save()
instance.about = validated_data.get('about', instance.about)
instance.save()
return instance
| [
"[email protected]"
] | |
1e82fbb61dcfee01d4b4e3eeed7d289c4d825811 | 71c4e18cb0e059a47195a64e799629c86db54e76 | /tests/test_medleydb_pitch.py | f2e05d05b95d2078062a934a1f8b6b805580697c | [
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | TheagaoMIR/mirdata | 5b68a32de96033d44913e3e9259cb3e3f153cd04 | d1b7e2059762b7398e5fa7967947f4e25043c0be | refs/heads/master | 2022-05-27T21:53:57.144554 | 2019-06-26T20:04:49 | 2019-06-26T20:04:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | from __future__ import absolute_import
import os
import pytest
from mirdata import medleydb_pitch, utils
from tests.test_utils import mock_validated, mock_validator
@pytest.fixture
def mock_validate(mocker):
return mocker.patch.object(medleydb_pitch, 'validate')
@pytest.fixture
def data_home(tmpdir):
return str(tmpdir)
@pytest.fixture
def save_path(data_home):
return utils.get_save_path(data_home)
@pytest.fixture
def dataset_path(save_path):
return os.path.join(save_path, medleydb_pitch.DATASET_DIR)
def test_validate_valid(dataset_path, mocker, mock_validator):
mock_validator.return_value = (False, False)
missing_files, invalid_checksums = medleydb_pitch.validate(dataset_path)
assert not (missing_files or invalid_checksums)
mock_validator.assert_called_once()
| [
"[email protected]"
] | |
14f645240d248751bd612aa5bfef0050ddf1eb72 | eb84ce60ea926e59ab299a2e1b74c80348bd1788 | /src/main.py | 8fe68ac16e9f10af843d3fe592fecbc3ad361fe4 | [] | no_license | zhvkgj/test-task | 83f6f2f8d3f0f6a987f8de089b7552251cfa7f44 | 598f0e9523f28de1894fb20b2bd90eabb4e342fe | refs/heads/main | 2023-03-16T12:33:55.708863 | 2021-03-07T20:26:56 | 2021-03-07T20:26:56 | 345,441,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,532 | py | import sys
from collections import namedtuple
from radon.raw import analyze, Module
from radon.metrics import mi_visit, mi_rank
from radon.complexity import cc_visit, cc_rank
from radon.cli.tools import iter_filenames
def analyze_directory(name: str, exclude=None, ignore=None):
"""Iter through filenames starting from the `name` directory
and collecting metrics info about each file into dictionary.
Optional `exclude` filters can be passed as a comma-separated
string of regexes, while `ignore` filters are a comma-separated list of
directory names to ignore.
"""
DirResult = namedtuple('Result', ['cc', 'mi', 'raw'])
dict_with_dir_results = {}
for filename in iter_filenames([name], exclude=exclude, ignore=ignore):
with open(filename) as f_obj:
source = f_obj.read()
# get Cyclomatic Complexity blocks
cyclomatic = extract_cyclomatic_complexity(cc_visit(source))
# get Maintainability score
mi = mi_rank(mi_visit(source, True))
# get raw metrics
raw = analyze(source)
dict_with_dir_results[filename] = DirResult(cc=cyclomatic, mi=mi, raw=raw)
return dict_with_dir_results
def extract_cyclomatic_complexity(blocks: list):
"""Extract cyclomatic complexity score from list of `Function` or `Class` namedtuples.
Return list of dictionaries corresponding to each function, method or class.
"""
list_of_cc = []
for block in blocks:
list_of_cc.append({'letter': block.letter, 'name': block.fullname,
'rank': cc_rank(block.complexity)})
return list_of_cc
def print_enumerated_result(dir_results: dict):
"""Prints formatted string consist of methods' and
classes' cyclomatic complexity, maintainability rank and
from `dir_results`.
"""
def format_cc_info(cc_info: list):
result = []
for cur_decl in cc_info:
result.append(f"\t{cur_decl['letter']} {cur_decl['name']} "
f"complexity rank: {cur_decl['rank']}\n")
return "".join(result)
def format_mi_info(mi_info: int):
return f"\tMaintainability rank: {mi_info}"
def format_raw_info(raw_info: Module):
return f"\tloc: {raw_info.loc}\n" \
f"\tlloc: {raw_info.lloc}\n" \
f"\tsloc: {raw_info.sloc}\n"
list_of_str_with_res = []
for num, (filename, dir_res) in enumerate(dir_results.items()):
list_of_str_with_res.append(f" {num + 1}. {filename}:\n"
f"{format_cc_info(dir_res.cc)}\n"
f"{format_mi_info(dir_res.mi)}\n"
f"{format_raw_info(dir_res.raw)}")
for res in list_of_str_with_res:
print(res)
def harvest_and_print_the_metrics(name: str, exclude, ignore):
"""Iter through filenames starting from the `name` directory
and printing metrics info about each file into dictionary.
Optional `exclude` filters can be passed as a comma-separated
string of regexes, while `ignore` filters are a comma-separated list of
directory names to ignore.
"""
metrics_res_under_dir = analyze_directory(name, exclude, ignore)
print_enumerated_result(metrics_res_under_dir)
directory = sys.argv[1] if len(sys.argv) > 1 else '.'
exclude_files = sys.argv[2] if len(sys.argv) > 2 else None
ignore_files = sys.argv[3] if len(sys.argv) > 3 else None
harvest_and_print_the_metrics(directory, exclude_files, ignore_files)
| [
"[email protected]"
] | |
dffed13817ad16821e54303c7bd4dcbc1398cce3 | 085be96bab2b8a9880d002c7cc089619983866f8 | /core/migrations/0011_auto_20200923_2215.py | f1746b5c8e6690b257844cafe309454223187ac0 | [] | no_license | Naman-Monga/forum-clone-django | 3d19b92af1bd2f2ce9409804221d4a420437c149 | 4e705f40696995a75b33bc01cb60a47923c1e60b | refs/heads/master | 2022-12-20T08:23:38.940467 | 2020-10-02T06:35:14 | 2020-10-02T06:35:14 | 297,880,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 569 | py | # Generated by Django 3.1.1 on 2020-09-23 16:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0010_userprofile_ans_upvoted'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='fname',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='userprofile',
name='lname',
field=models.CharField(max_length=100, null=True),
),
]
| [
"[email protected]"
] | |
d762d56e3d99ec560babfc608a2657f9fc44a751 | d7016f69993570a1c55974582cda899ff70907ec | /sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/v2019_09_01/models/__init__.py | 3167e9fc085a2acf7f385f26d438de6efe4e1593 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 5,397 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._models_py3 import AccessPolicyEntry
from ._models_py3 import Attributes
from ._models_py3 import CheckNameAvailabilityResult
from ._models_py3 import CloudErrorBody
from ._models_py3 import DeletedVault
from ._models_py3 import DeletedVaultListResult
from ._models_py3 import DeletedVaultProperties
from ._models_py3 import DimensionProperties
from ._models_py3 import IPRule
from ._models_py3 import Key
from ._models_py3 import KeyAttributes
from ._models_py3 import KeyCreateParameters
from ._models_py3 import KeyListResult
from ._models_py3 import KeyProperties
from ._models_py3 import LogSpecification
from ._models_py3 import MetricSpecification
from ._models_py3 import NetworkRuleSet
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import Permissions
from ._models_py3 import PrivateEndpoint
from ._models_py3 import PrivateEndpointConnection
from ._models_py3 import PrivateEndpointConnectionItem
from ._models_py3 import PrivateLinkResource
from ._models_py3 import PrivateLinkResourceListResult
from ._models_py3 import PrivateLinkServiceConnectionState
from ._models_py3 import Resource
from ._models_py3 import ResourceListResult
from ._models_py3 import ServiceSpecification
from ._models_py3 import Sku
from ._models_py3 import Vault
from ._models_py3 import VaultAccessPolicyParameters
from ._models_py3 import VaultAccessPolicyProperties
from ._models_py3 import VaultCheckNameAvailabilityParameters
from ._models_py3 import VaultCreateOrUpdateParameters
from ._models_py3 import VaultListResult
from ._models_py3 import VaultPatchParameters
from ._models_py3 import VaultPatchProperties
from ._models_py3 import VaultProperties
from ._models_py3 import VirtualNetworkRule
from ._key_vault_management_client_enums import AccessPolicyUpdateKind
from ._key_vault_management_client_enums import CertificatePermissions
from ._key_vault_management_client_enums import CreateMode
from ._key_vault_management_client_enums import DeletionRecoveryLevel
from ._key_vault_management_client_enums import Enum10
from ._key_vault_management_client_enums import Enum11
from ._key_vault_management_client_enums import JsonWebKeyCurveName
from ._key_vault_management_client_enums import JsonWebKeyOperation
from ._key_vault_management_client_enums import JsonWebKeyType
from ._key_vault_management_client_enums import KeyPermissions
from ._key_vault_management_client_enums import NetworkRuleAction
from ._key_vault_management_client_enums import NetworkRuleBypassOptions
from ._key_vault_management_client_enums import PrivateEndpointConnectionProvisioningState
from ._key_vault_management_client_enums import PrivateEndpointServiceConnectionStatus
from ._key_vault_management_client_enums import Reason
from ._key_vault_management_client_enums import SecretPermissions
from ._key_vault_management_client_enums import SkuFamily
from ._key_vault_management_client_enums import SkuName
from ._key_vault_management_client_enums import StoragePermissions
from ._key_vault_management_client_enums import VaultProvisioningState
from ._patch import __all__ as _patch_all
from ._patch import * # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"AccessPolicyEntry",
"Attributes",
"CheckNameAvailabilityResult",
"CloudErrorBody",
"DeletedVault",
"DeletedVaultListResult",
"DeletedVaultProperties",
"DimensionProperties",
"IPRule",
"Key",
"KeyAttributes",
"KeyCreateParameters",
"KeyListResult",
"KeyProperties",
"LogSpecification",
"MetricSpecification",
"NetworkRuleSet",
"Operation",
"OperationDisplay",
"OperationListResult",
"Permissions",
"PrivateEndpoint",
"PrivateEndpointConnection",
"PrivateEndpointConnectionItem",
"PrivateLinkResource",
"PrivateLinkResourceListResult",
"PrivateLinkServiceConnectionState",
"Resource",
"ResourceListResult",
"ServiceSpecification",
"Sku",
"Vault",
"VaultAccessPolicyParameters",
"VaultAccessPolicyProperties",
"VaultCheckNameAvailabilityParameters",
"VaultCreateOrUpdateParameters",
"VaultListResult",
"VaultPatchParameters",
"VaultPatchProperties",
"VaultProperties",
"VirtualNetworkRule",
"AccessPolicyUpdateKind",
"CertificatePermissions",
"CreateMode",
"DeletionRecoveryLevel",
"Enum10",
"Enum11",
"JsonWebKeyCurveName",
"JsonWebKeyOperation",
"JsonWebKeyType",
"KeyPermissions",
"NetworkRuleAction",
"NetworkRuleBypassOptions",
"PrivateEndpointConnectionProvisioningState",
"PrivateEndpointServiceConnectionStatus",
"Reason",
"SecretPermissions",
"SkuFamily",
"SkuName",
"StoragePermissions",
"VaultProvisioningState",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| [
"[email protected]"
] | |
c86170ab17c8fbe779c49babd5e821ef631247f6 | f5a5d270d4d6e4a066359b0b43ba73f6511ed1de | /bin/classify.py | a0dd78c64c3d21edf05d70c6e44be078a51551d4 | [] | no_license | wkodate/Juminhyo | 72ad7f29f38880e0b4946d0793836e0c7c0f7de3 | c515e6b0ef4a19082d75ec80ffb0237aad007e95 | HEAD | 2016-09-06T02:17:35.534011 | 2014-12-13T10:42:58 | 2014-12-13T10:42:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append("/Users/wkodate/Develop/Juminhyo/classes")
import twitterapi
import classifier
argvs = sys.argv
argc = len(argvs)
def printDebug():
print 'Usage: python %s <account>' % argvs[0]
quit()
if argc != 2:
printDebug()
account = argvs[1]
count = 200
dbpath = '/Users/wkodate/Develop/Juminhyo/db/test.db'
t=twitterapi.twitterapi()
print account+' is ...'
statuses = t.getUserTimeline(account, count)
normTextList = []
for s in statuses:
normTextList.append(t.normalizeTweet(s.text))
textString=twitterapi.list2String(normTextList)
cl=classifier.fisherclassifier(classifier.getwords)
cl.setdb(dbpath)
print cl.classify(textString)
| [
"[email protected]"
] | |
76074538499b5f4cf5f15686dc1159da4845c032 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/decalogu.py | 20a8a4306b46289efb9fa1b83691b0139bf4b807 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 100 | py | ii = [('CarlTFR.py', 1), ('GrimSLE.py', 1), ('CrokTPS.py', 1), ('MereHHB.py', 1), ('ThomWEC.py', 1)] | [
"[email protected]"
] | |
2f1a769560876d03cb82983a4c69c96188ce8cfa | 3fbca797017e49cd12209386fbbab5980aa04aa2 | /fabfile.py | deeb1f0ff19f411607574b2ef37a42963fe50876 | [] | no_license | fylaw/awesome-python-webapp | 65ebfb48d082b37cddaf0b33fb4cdd91c5f64c2a | 4fe83511bfcc3065b88486e0a3094f2c9b7b3e18 | refs/heads/master | 2020-03-15T04:15:53.946190 | 2018-05-22T08:16:16 | 2018-05-22T08:16:16 | 131,961,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | true | false | 2,276 | py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
__author__ = 'y'
import os, re
from datetime import datetime
from fabric.api import env, local, lcd, settings, put, run, cd, get, sudo
from invoke import task
# 服务器用户
env.user = 'root'
env.sudo_user='root'
env.hosts=['awesome-python-webapp.mlb.com']
_TAR_FILE = 'dist-awesome.tar.gz'
def _current_path():
return os.path.abspath('.')
def _now():
return datetime.now().strftime('%y-%m-%d_%H.%M.%S')
def backup(user,passwod):
'''
Dump entire database on server and backup to local.
'''
dt = _now()
f = 'backup-awesome-%s.sql' % dt
with cd('/tmp'):
run('mysqldump --user=%s --password=%s --skip-opt --add-drop-table --default-character-set=utf8 --quick awesome > %s' % (user, password, f))
run('tar -czvf %s.tar.gz %s' % (f,f))
get('%s.tar.gz' % f, '%s/backup/' % _current_path())
run('rm -f %s' % f)
run('rm -f %s.tar.gz' % f)
def build():
'''
Build dist package
'''
includes = ['static', 'templates', 'favicon.ico', '*.py']
excludes = ['test', '.*', '*.pyc', '*.pyo','__pycache__']
local('rm -f dist/%s' % _TAR_FILE)
with lcd(os.path.join(os.path.abspath(''), 'www')):
cmd = ['tar', '--dereference', '-czvf', '../dist/%s' % _TAR_FILE]
cmd.extend(['--exclude=\'%s\'' % ex for ex in excludes])
cmd.extend(includes)
local(' '.join(cmd))
_REMOTE_TMP_TAR = '/tmp/%s' % _TAR_FILE
_REMOTE_BASE_DIR = '/var/www/awesome'
# 部署到远程服务器
def deploy():
newdir = 'www-%s' % datetime.now().strftime('%y-%m-%d_%H.%M.%S')
run('rm -f %s' % _REMOTE_TMP_TAR)
put('dist/%s' % _TAR_FILE, _REMOTE_TMP_TAR)
with cd(_REMOTE_BASE_DIR):
sudo('mkdir %s' % newdir)
with cd('%s/%s' % (_REMOTE_BASE_DIR, newdir)):
sudo('tar -xzvf %s' % _REMOTE_TMP_TAR)
with cd(_REMOTE_BASE_DIR):
sudo('rm -f www')
sudo('ln -s %s www' % newdir)
sudo('chown www-data:www-data www')
sudo('chown -R www-data:www-data %s' % newdir)
sudo('chmod a+x www/app.py')
with settings(warn_only=True):
sudo('supervisorctl stop awesome')
sudo('supervisorctl start awesome')
sudo('/usr/local/nginx/sbin/nginx -s reload') | [
"[email protected]"
] | |
46e78e315c056d7e98c0e40a4e9a68c78e975f54 | 009df7ad499b19a4df066160cf0c7d8b20355dfb | /src/the_tale/the_tale/accounts/friends/views.py | 56066cebec413f9bbcf9f42dad449e0df61434c3 | [
"BSD-3-Clause"
] | permissive | devapromix/the-tale | c0804c7475e877f12f29444ddbbba025561d3412 | 2a10efd3270734f8cf482b4cfbc5353ef8f0494c | refs/heads/develop | 2020-03-28T20:26:30.492292 | 2018-10-07T17:32:46 | 2018-10-07T17:32:46 | 149,070,887 | 1 | 0 | BSD-3-Clause | 2018-10-07T17:32:47 | 2018-09-17T04:57:50 | Python | UTF-8 | Python | false | false | 4,157 | py |
import smart_imports
smart_imports.all()
class FriendsResource(utils_resources.Resource):
@utils_decorators.login_required
@accounts_views.validate_fast_account()
def initialize(self, *args, **kwargs):
super(FriendsResource, self).initialize(*args, **kwargs)
@dext_old_views.handler('', method='get')
def friends(self):
friends = prototypes.FriendshipPrototype.get_friends_for(self.account)
candidates = prototypes.FriendshipPrototype.get_candidates_for(self.account)
accounts_ids = [account.id for account in friends]
clans_ids = [model.clan_id for model in friends]
heroes = {hero.account_id: hero for hero in heroes_logic.load_heroes_by_account_ids(accounts_ids)}
clans = {clan.id: clan for clan in clans_prototypes.ClanPrototype.get_list_by_id(clans_ids)}
return self.template('friends/friends_list.html',
{'friends': friends,
'candidates': candidates,
'heroes': heroes,
'clans': clans})
@dext_old_views.handler('candidates', method='get')
def candidates(self):
candidates = prototypes.FriendshipPrototype.get_candidates_for(self.account)
accounts_ids = [account.id for account in candidates]
clans_ids = [model.clan_id for model in candidates]
heroes = {hero.account_id: hero for hero in heroes_logic.load_heroes_by_account_ids(accounts_ids)}
clans = {clan.id: clan for clan in clans_prototypes.ClanPrototype.get_list_by_id(clans_ids)}
return self.template('friends/friends_candidates.html',
{'candidates': candidates,
'heroes': heroes,
'clans': clans})
@dext_old_views.validate_argument('friend', accounts_prototypes.AccountPrototype.get_by_id, 'friends', 'Игрок не найден')
@dext_old_views.handler('request', method='get')
def request_dialog(self, friend):
if friend.id == accounts_logic.get_system_user().id:
return self.auto_error('friends.request_dialog.system_user', 'Вы не можете пригласить в друзья системного пользователя')
return self.template('friends/request_dialog.html',
{'friend': friend,
'form': forms.RequestForm()})
@dext_old_views.validate_argument('friend', accounts_prototypes.AccountPrototype.get_by_id, 'friends', 'Игрок не найден')
@dext_old_views.handler('request', method='post')
def request_friendship(self, friend):
if friend.is_fast:
return self.json_error('friends.request_friendship.fast_friend', 'Вы не можете пригласить в друзья игрока не завершившего регистрацию')
if friend.id == accounts_logic.get_system_user().id:
return self.json_error('friends.request_friendship.system_user', 'Вы не можете пригласить в друзья системного пользователя')
form = forms.RequestForm(self.request.POST)
if not form.is_valid():
return self.json_error('friends.request_friendship.form_errors', form.errors)
prototypes.FriendshipPrototype.request_friendship(self.account, friend, text=form.c.text)
return self.json_ok()
@dext_old_views.validate_argument('friend', accounts_prototypes.AccountPrototype.get_by_id, 'friends', 'Игрок не найден')
@dext_old_views.handler('accept', method='post')
def accept_friendship(self, friend):
prototypes.FriendshipPrototype.request_friendship(self.account, friend)
return self.json_ok()
@dext_old_views.validate_argument('friend', accounts_prototypes.AccountPrototype.get_by_id, 'friends', 'Игрок не найден')
@dext_old_views.handler('remove', method='post')
def remove(self, friend):
prototypes.FriendshipPrototype.remove_friendship(self.account, friend)
return self.json_ok()
| [
"[email protected]"
] | |
00485b88423f11e87d3f8364cb04b9faf59ad77a | be72227efe777774aeb8acc3b0ef3133803871c1 | /timelapse.py | cc6fe9622ad6267e45f370836742cab7bedba8f4 | [] | no_license | nicgaudio/foundry-bot | 2f634f1b128fb8a02b5b39405e6cbe5520cd20c5 | 02b266400c98ec606a587721a1130e1229ccb484 | refs/heads/master | 2022-04-17T01:05:07.017995 | 2020-04-15T16:29:52 | 2020-04-15T16:29:52 | 255,972,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,145 | py | #Timelapse script for 3D print captures
#Run on Raspberry pi
#Import peripheral modules
# from picamera import PiCamera
import dropbox
from dotenv import load_dotenv
#Standard imports
import os
from os import system
import argparse
from time import sleep
import logging as log
#Load .env
load_dotenv()
# #Define arg parser and arguments
# parser = argparse.ArgumentParser(description='Parser to define timelapse capture parameters')
# parser.add_argument('-p', help="Pass the directory name for the images to be written to (typically the print name).", dest='img_out_path', required=True)
# parser.add_argument('-t', help="Pass the estimated time of the print in 'DD:HH:MM:SS' format.", dest='print_time_est', required=True)
# args = parser.parse_args()
#Get Dropbox envs and instantiate class obj
DB_ACCESS_TOKEN = os.getenv('DROPBOX_ACCESS_TOKEN')
DB_OUTPUT_PATH = os.getenv('DROPBOX_OUTPUT_PATH')
dbx = dropbox.Dropbox(DB_ACCESS_TOKEN)
print(DB_ACCESS_TOKEN)
print(DB_OUTPUT_PATH)
# #Display useful info
# print('Image filepath: {}'.format(args.img_out_path))
# print('Print time: {}'.format(args.print_time_est))
# #Get args
# img_out_path = args.img_out_path
# print_time_est = args.print_time_est
# #Check if output directory exists. If not, create it!
# if not os.path.exists(img_out_path):
# print('')
# print('Output path {} does not exist. Creating it!'.format(img_out_path))
# os.mkdir(img_out_path)
# camera = PiCamera()
# camera.resolution = (1024, 768)
# for i in range(500):
# camera.capture('{0}/{1}_{2:04d}.jpg'.format(img_out_path, img_out_path, i))
# print('Image {0}_{1:04d}.jpg saved to {0}/{0}_{1:04d}.jpg'.format(img_out_path, i))
# sleep(30)
# print('')
# print('Converting images to .gif. This may take a moment')
# gif_filename = '{0}/{0}.gif'.format(img_out_path)
# system('convert -delay 10 -loop 0 {0}/{0}*.jpg {1}'.format(img_out_path, gif_filename))
# print('')
# print('Uploading file {} to Dropbox.'.format(gif_filename))
# with open(gif_filename) as f:
# dbx.files_upload(f.read(), '{0}/{1}'.format(DB_OUTPUT_PATH, gif_filename))
# print('')
# print('Finished upload!')
| [
"[email protected]"
] | |
d84ce3c2229a57a35bcbab2aa2380cb1dbd8ac6a | 06e0c89781ae9c07a55090c43d8609e9dfefbb6f | /School_13/School_13/urls.py | e2644ac2d642e915d18468194c0d27885be0f873 | [] | no_license | mfarzamalam/django | d6d4302910301ae3e135a95a9982f3bd01218260 | 935a60d3ac874b7adb4287b4c2d172b89c6551b9 | refs/heads/master | 2023-04-10T21:06:11.601436 | 2021-04-27T21:31:47 | 2021-04-27T21:31:47 | 345,129,992 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | """School_13 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('staff.urls'))
]
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.