content
stringlengths 5
1.05M
|
---|
# -*- coding: utf-8 -*-
"""
sogou (Videos)
@website https://v.sogou.com
@provide-api no
@using-api no
@results HTML
@stable no
@parse url, title, content, thumbnail
"""
from lxml.html.soupparser import fromstring
from searx.url_utils import urlencode
categories = ['videos']
paging = True
search_url = 'https://v.sogou.com/v?{query}&page={offset}'
# do search-request
def request(query, params):
offset = params['pageno']
# query and paging
params['url'] = search_url.format(query=urlencode({'query': query}),
offset=offset)
return params
# get response from search-request
def response(resp):
from searx.webapp import sentry
results = []
dom = fromstring(resp.text)
for result in dom.xpath('//div[@class="srch-all-result"]//li[@class="sort_lst_li"]'):
try:
url = 'https://v.sogou.com' + result.xpath('./a')[0].attrib.get('href')
title = result.xpath('./a')[0].attrib.get('title')
content = title
thumbnail = result.xpath('./a/img')[0].attrib.get('src')
results.append({'url': url,
'title': title,
'content': content,
'thumbnail': thumbnail,
'template': 'videos.html'})
except:
sentry.captureException()
return results
|
from io import BytesIO
import random
import tempfile
import pytest
from pybloom2.pybloom import BloomFilter, ScalableBloomFilter
TEST_SIZE = 12345
@pytest.mark.parametrize("file_class", [
tempfile.TemporaryFile,
BytesIO,
])
@pytest.mark.parametrize("filter_class,args", [
(BloomFilter, (TEST_SIZE,)),
(ScalableBloomFilter, ()),
])
def test_serialization(filter_class, args, file_class):
expected = set([random.randint(0, 10000100) for _ in range(TEST_SIZE)])
filter = filter_class(*args)
for item in expected:
filter.add(item)
file_obj = file_class()
filter.tofile(file_obj)
del filter
file_obj.seek(0)
filter = filter_class.fromfile(file_obj)
for item in expected:
assert item in filter
file_obj.close()
|
import time
import pyaudio
import wave
import sys
import os
from google_speech import Speech
class Call_APP():
def __init__(self, package_name):
self.chunk = 1024
result = self.find_name(package_name)
print(package_name, result)
if result == None:
sys.exit(0)
self.call_name = result[1]
self.file_name = result[0]
self.f = wave.open(self.file_name,"rb")
self.p = pyaudio.PyAudio()
self.stream = self.p.open(format =self.p.get_format_from_width(self.f.getsampwidth()),
channels = self.f.getnchannels(),
rate = self.f.getframerate(),
output = True)
self.lang = 'ko'
self.sox_effects = ("speed", "1.1")
def find_name(self, package_name):
path = '/home/kimsoohyun/00-Research/02-Graph/06-appexecute/wav_file/'
bixby = f'{package_name}_bixby.wav'
google = f'{package_name}_google.wav'
if os.path.isfile(os.path.join(path,bixby)):
return os.path.join(path,bixby), os.path.join(path,'hibixby.wav')
elif os.path.isfile(os.path.join(path, google)):
return os.path.join(path,google), os.path.join(path,'okgoogle.wav')
else:
return None
def call_bixby(self):
f = wave.open(self.call_name,"rb")
p = pyaudio.PyAudio()
stream = self.p.open(format =p.get_format_from_width(f.getsampwidth()),
channels = f.getnchannels(),
rate = f.getframerate(),
output = True)
data = f.readframes(self.chunk)
while data:
stream.write(data)
data = f.readframes(self.chunk)
stream.stop_stream()
p.terminate()
def call_appname(self):
f = wave.open(self.file_name,"rb")
p = pyaudio.PyAudio()
stream = self.p.open(format =p.get_format_from_width(f.getsampwidth()),
channels = f.getnchannels(),
rate = f.getframerate(),
output = True)
data = f.readframes(self.chunk)
while data:
stream.write(data)
data = f.readframes(self.chunk)
stream.stop_stream()
p.terminate()
def exit_appname(self):
f = wave.open(file_name,"rb")
p = pyaudio.PyAudio()
stream = self.p.open(format =p.get_format_from_width(f.getsampwidth()),
channels = f.getnchannels(),
rate = f.getframerate(),
output = True)
data = f.readframes(self.chunk)
while data:
stream.write(data)
data = f.readframes(self.chunk)
stream.stop_stream()
p.terminate()
def start_main(self):
self.call_bixby()
time.sleep(0.5)
self.call_appname()
def end_main(self):
self.call_bixby()
time.sleep(0.5)
self.exit_appname()
def main(self, startend):
if startend == 'start':
self.start_main()
elif startend == "end":
self.end_main()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--packagename','-p',
type=str,
required=True,
help='input appname')
parser.add_argument('--startend','-s',
type=str,
required=True,
help='input start or end message')
args = parser.parse_args()
c = Call_APP(args.packagename)
c.main(args.startend)
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from google.api_core import exceptions
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
def _make_client(project="test-project", connection=None):
from google.cloud.bigquery.client import Client
if connection is None:
connection = _make_connection()
client = Client(project=project, credentials=_make_credentials(), _http=object())
client._connection = connection
return client
def _make_connection(*responses):
import google.cloud.bigquery._http
from google.cloud.exceptions import NotFound
mock_conn = mock.create_autospec(google.cloud.bigquery._http.Connection)
mock_conn.api_request.side_effect = list(responses) + [NotFound("miss")]
return mock_conn
def _make_retriable_exception():
return exceptions.TooManyRequests(
"retriable exception", errors=[{"reason": "rateLimitExceeded"}]
)
def _make_job_resource(
creation_time_ms=1437767599006,
started_time_ms=1437767600007,
ended_time_ms=1437767601008,
started=False,
ended=False,
etag="abc-def-hjk",
endpoint="https://bigquery.googleapis.com",
job_type="load",
job_id="a-random-id",
location="US",
project_id="some-project",
user_email="[email protected]",
):
resource = {
"status": {"state": "PENDING"},
"configuration": {job_type: {}},
"statistics": {"creationTime": creation_time_ms, job_type: {}},
"etag": etag,
"id": "{}:{}".format(project_id, job_id),
"jobReference": {
"projectId": project_id,
"jobId": job_id,
"location": location,
},
"selfLink": "{}/bigquery/v2/projects/{}/jobs/{}".format(
endpoint, project_id, job_id
),
"user_email": user_email,
}
if started or ended:
resource["statistics"]["startTime"] = started_time_ms
resource["status"]["state"] = "RUNNING"
if ended:
resource["statistics"]["endTime"] = ended_time_ms
resource["status"]["state"] = "DONE"
if job_type == "query":
resource["configuration"]["query"]["destinationTable"] = {
"projectId": project_id,
"datasetId": "_temp_dataset",
"tableId": "_temp_table",
}
return resource
class _Base(unittest.TestCase):
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.table import TableReference
ENDPOINT = "https://bigquery.googleapis.com"
PROJECT = "project"
SOURCE1 = "http://example.com/source1.csv"
DS_ID = "dataset_id"
DS_REF = DatasetReference(PROJECT, DS_ID)
TABLE_ID = "table_id"
TABLE_REF = TableReference(DS_REF, TABLE_ID)
JOB_ID = "JOB_ID"
JOB_TYPE = "unknown"
KMS_KEY_NAME = "projects/1/locations/us/keyRings/1/cryptoKeys/1"
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _setUpConstants(self):
import datetime
from google.cloud._helpers import UTC
self.WHEN_TS = 1437767599.006
self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace(tzinfo=UTC)
self.ETAG = "ETAG"
self.FULL_JOB_ID = "%s:%s" % (self.PROJECT, self.JOB_ID)
self.RESOURCE_URL = "{}/bigquery/v2/projects/{}/jobs/{}".format(
self.ENDPOINT, self.PROJECT, self.JOB_ID
)
self.USER_EMAIL = "[email protected]"
def _table_ref(self, table_id):
from google.cloud.bigquery.table import TableReference
return TableReference(self.DS_REF, table_id)
def _make_resource(self, started=False, ended=False, location="US"):
self._setUpConstants()
return _make_job_resource(
creation_time_ms=int(self.WHEN_TS * 1000),
started_time_ms=int(self.WHEN_TS * 1000),
ended_time_ms=int(self.WHEN_TS * 1000) + 1000000,
started=started,
ended=ended,
etag=self.ETAG,
endpoint=self.ENDPOINT,
job_type=self.JOB_TYPE,
job_id=self.JOB_ID,
project_id=self.PROJECT,
user_email=self.USER_EMAIL,
location=location,
)
def _verifyInitialReadonlyProperties(self, job):
# root elements of resource
self.assertIsNone(job.etag)
self.assertIsNone(job.self_link)
self.assertIsNone(job.user_email)
# derived from resource['statistics']
self.assertIsNone(job.created)
self.assertIsNone(job.started)
self.assertIsNone(job.ended)
self.assertIsNone(job.transaction_info)
# derived from resource['status']
self.assertIsNone(job.error_result)
self.assertIsNone(job.errors)
self.assertIsNone(job.state)
def _verifyReadonlyResourceProperties(self, job, resource):
from datetime import timedelta
statistics = resource.get("statistics", {})
if "creationTime" in statistics:
self.assertEqual(job.created, self.WHEN)
else:
self.assertIsNone(job.created)
if "startTime" in statistics:
self.assertEqual(job.started, self.WHEN)
else:
self.assertIsNone(job.started)
if "endTime" in statistics:
self.assertEqual(job.ended, self.WHEN + timedelta(seconds=1000))
else:
self.assertIsNone(job.ended)
if "etag" in resource:
self.assertEqual(job.etag, self.ETAG)
else:
self.assertIsNone(job.etag)
if "selfLink" in resource:
self.assertEqual(job.self_link, self.RESOURCE_URL)
else:
self.assertIsNone(job.self_link)
if "user_email" in resource:
self.assertEqual(job.user_email, self.USER_EMAIL)
else:
self.assertIsNone(job.user_email)
|
# Integer silently converts to float then taken an action with another float
print(3 / 4)
print(9.87654000)
print(8 / 2)
print(6 * 7.0)
print(4 + 1.65)
|
#!/bin/env python
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License").
You may not use this file except in compliance with the License.
A copy of the License is located at
http://www.apache.org/licenses/LICENSE-2.0
or in the "license" file accompanying this file. This file is distributed
on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. See the License for the specific language governing
permissions and limitations under the License.
"""
import argparse
import errno
import json
import logging
import os
import sys
from bson.json_util import dumps
from pymongo import MongoClient
from pymongo.errors import (ConnectionFailure, OperationFailure,
ServerSelectionTimeoutError)
class AutovivifyDict(dict):
"""N depth defaultdict."""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
class DocumentDbLimits(object):
"""
DocumentDB limits
"""
def __init__(self):
pass
COLLECTION_QUALIFIED_INDEX_NAME_MAX_LENGTH = 63
COLLECTION_NAME_MAX_LENGTH = 57
COMPOUND_INDEX_MAX_KEYS = 32
DATABASE_NAME_MAX_LENGTH = 63
FULLY_QUALIFIED_INDEX_NAME_MAX_LENGTH = 127
INDEX_KEY_MAX_LENGTH = 2048
INDEX_NAME_MAX_LENGTH = 63
NAMESPACE_MAX_LENGTH = 120
class DocumentDbUnsupportedFeatures(object):
"""
List of unsupported features in DocumentDB
"""
def __init__(self):
pass
UNSUPPORTED_INDEX_TYPES = [
'text', '2d', '2dsphere', 'geoHaystack', 'hashed'
]
UNSUPPORTED_INDEX_OPTIONS = ['partialFilterExpression', 'storageEngine', \
'collation', 'dropDuplicates']
UNSUPPORTED_COLLECTION_OPTIONS = ['capped']
class IndexToolConstants(object):
"""
constants used in this tool
"""
def __init__(self):
pass
DATABASES_TO_SKIP = ['admin', 'local', 'system']
METADATA_FILE_SUFFIX_PATTERN = 'metadata.json'
CONNECT_TIMEOUT = 5000
EXCEEDED_LIMITS = 'exceeded_limits'
FILE_PATH = 'filepath'
ID = '_id_'
INDEXES = 'indexes'
INDEX_DEFINITION = 'definition'
INDEX_NAME = 'name'
INDEX_VERSION = 'v'
INDEX_KEY = 'key'
INDEX_NAMESPACE = 'ns'
NAMESPACE = 'ns'
OPTIONS = 'options'
UNSUPPORTED_INDEX_OPTIONS_KEY = 'unsupported_index_options'
UNSUPPORTED_COLLECTION_OPTIONS_KEY = 'unsupported_collection_options'
UNSUPPORTED_INDEX_TYPES_KEY = 'unsupported_index_types'
class DocumentDbIndexTool(IndexToolConstants):
"""
Traverses a mongodump directory structure performs discovery and index restore functions.
"""
def __init__(self, args):
super(DocumentDbIndexTool, self).__init__()
self.args = args
log_level = logging.INFO
if self.args.debug is True:
log_level = logging.DEBUG
root_logger = logging.getLogger()
root_logger.setLevel(log_level)
root_handler = logging.StreamHandler(sys.stdout)
root_handler.setLevel(log_level)
formatter = logging.Formatter('%(asctime)s: %(message)s')
root_handler.setFormatter(formatter)
root_logger.addHandler(root_handler)
def _mkdir_p(self, filepath):
try:
os.makedirs(filepath)
except OSError as ose:
if ose.errno == errno.EEXIST and os.path.isdir(filepath):
pass
else:
raise
def _get_db_connection(self, host, port, tls, tls_ca_file, username,
password, auth_db):
"""Connect to instance, returning a connection"""
logging.debug("Connecting to instance at %s:%s", host, port)
mongodb_client = MongoClient(
host=host,
port=port,
ssl=tls,
ssl_ca_certs=tls_ca_file,
connect=True,
connectTimeoutMS=DocumentDbIndexTool.CONNECT_TIMEOUT,
serverSelectionTimeoutMS=DocumentDbIndexTool.CONNECT_TIMEOUT)
# force the client to actually connect
mongodb_client.admin.command('ismaster')
if password is not None:
# deprecated in 3.6, but allows us to handle both auth types gracefully
mongodb_client[auth_db].authenticate(name=username,
password=password)
logging.info("Successfully authenticated to database: %s", auth_db)
logging.info("Successfully connected to instance %s:%s", host, port)
return mongodb_client
def _get_compatible_metadata(self, metadata, compatibility_issues):
compatible_metadata = metadata.copy()
for db_name in compatibility_issues:
if self.EXCEEDED_LIMITS in compatibility_issues[db_name]:
del compatible_metadata[db_name]
continue
for collection_name in compatibility_issues[db_name]:
if self.UNSUPPORTED_COLLECTION_OPTIONS_KEY in compatibility_issues[
db_name][collection_name]:
del compatible_metadata[db_name][collection_name]
continue
if self.EXCEEDED_LIMITS in compatibility_issues[db_name][
collection_name]:
del compatible_metadata[db_name][collection_name]
continue
for index_name in compatibility_issues[db_name][
collection_name]:
del compatible_metadata[db_name][collection_name][
self.INDEXES][index_name]
return metadata
def _get_metadata_from_file(self, filepath):
"""
Given a path to a metadata file, return the JSON data structure parsed
from the contents, formatted.
"""
with open(filepath, 'rt') as metadata_file:
logging.debug("Getting metadata from file: %s", filepath)
file_metadata = json.load(metadata_file)
collection_metadata = AutovivifyDict()
indexes = file_metadata.get(self.INDEXES, None)
# every collection should have at least the _id_ index. If no indexes are listed, the
# metadata document is malformed and we should error out
if indexes is None:
raise Exception(
"Malformed metadata document {} has no indexes.".format(
filepath))
first_index = indexes[0]
if self.NAMESPACE in first_index:
first_index_namespace = first_index[self.NAMESPACE]
(db_name, collection_name) = first_index_namespace.split('.', 1)
else:
db_name = os.path.basename(os.path.dirname(filepath))
thisFileName = os.path.basename(filepath)
collection_name = thisFileName[0:(len(thisFileName)-len(self.METADATA_FILE_SUFFIX_PATTERN)-1)]
collection_metadata[self.FILE_PATH] = filepath
for index in indexes:
index_name = index.pop(self.INDEX_NAME)
collection_metadata[self.INDEXES][index_name] = index
if self.OPTIONS in file_metadata:
collection_metadata[self.OPTIONS] = file_metadata[
self.OPTIONS]
return db_name, collection_name, collection_metadata
def _find_metadata_files(self, start_dir):
"""Recurse through subdirectories looking for metadata files"""
metadata_files = []
for (dirpath, dirnames, files) in os.walk(start_dir):
for filename in files:
if filename.endswith(self.METADATA_FILE_SUFFIX_PATTERN):
metadata_files.append(os.path.join(dirpath, filename))
return metadata_files
def _dump_indexes_from_server(self, connection, output_dir, dry_run=False):
"""
Discover all indexes in a mongodb server and dump them
to files using the mongodump format
"""
logging.info("Retrieving indexes from server...")
try:
database_info = connection.admin.command({'listDatabases': 1})
for database_doc in database_info['databases']:
database_name = database_doc['name']
logging.debug("Database: %s", database_name)
if database_name in self.DATABASES_TO_SKIP:
continue
database_path = os.path.join(output_dir, database_name)
if dry_run is not True:
self._mkdir_p(database_path)
# Write out each collection's stats in this database
for collection_name in connection[
database_name].list_collection_names():
logging.debug("Collection: %s", collection_name)
collection_metadata = {}
collection_indexes = connection[database_name][
collection_name].list_indexes()
thisIndexes = []
for thisIndex in collection_indexes:
if "ns" not in thisIndex:
# mdb44+ eliminated the "ns" attribute
thisIndex["ns"] = "{}.{}".format(database_name,collection_name)
thisIndexes.append(thisIndex)
collection_metadata[self.INDEXES] = thisIndexes
collection_metadata[self.OPTIONS] = connection[
database_name][collection_name].options()
collection_metadata_filename = "{}.{}".format(
collection_name, self.METADATA_FILE_SUFFIX_PATTERN)
collection_metadata_filepath = os.path.join(
database_path, collection_metadata_filename)
if dry_run is True:
logging.info("\n%s.%s\n%s",
database_name, collection_name,
dumps(collection_metadata))
else:
logging.debug(
"Writing collection metadata for collection: %s",
collection_name)
with open(collection_metadata_filepath,
'wt') as collection_metadata_file:
collection_metadata_file.write(
dumps(collection_metadata,
separators=(',', ':')))
logging.info(
"Completed writing index metadata to local folder: %s",
output_dir)
except Exception:
logging.exception("Failed to dump indexes from server")
sys.exit()
def get_metadata(self, start_path):
"""
Recursively search the supplied start_path, discovering all JSON metadata files and adding the
information to our metadata data structure.
"""
try:
logging.debug(
"Beginning recursive discovery of metadata files, starting at %s",
start_path)
metadata_files = self._find_metadata_files(start_path)
if metadata_files == []:
logging.error("No metadata files found beneath directory: %s",
start_path)
sys.exit()
logging.debug("Metadata files found: {}%s", metadata_files)
metadata = AutovivifyDict()
for filepath in metadata_files:
(db_name, collection_name,
collection_metadata) = self._get_metadata_from_file(filepath)
metadata[db_name][collection_name] = collection_metadata
return metadata
except Exception:
logging.exception("Failed to discover dump indexes")
sys.exit()
def find_compatibility_issues(self, metadata):
"""Check db, collection and index data in metadata files for compatibility with DocumentDB"""
compatibility_issues = AutovivifyDict()
for db_name in metadata:
db_metadata = metadata[db_name]
if len(db_name) > DocumentDbLimits.DATABASE_NAME_MAX_LENGTH:
message = 'Database name greater than {} characters'.format(
DocumentDbLimits.DATABASE_NAME_MAX_LENGTH)
compatibility_issues[db_name][
self.EXCEEDED_LIMITS][message] = db_name
for collection_name in metadata[db_name]:
collection_metadata = db_metadata[collection_name]
if len(collection_name
) > DocumentDbLimits.COLLECTION_NAME_MAX_LENGTH:
message = 'Collection name greater than {} characters'.format(
DocumentDbLimits.COLLECTION_NAME_MAX_LENGTH)
compatibility_issues[db_name][collection_name][
self.EXCEEDED_LIMITS][message] = collection_name
collection_namespace = '{}.{}'.format(db_name, collection_name)
# <db>.<collection>
if len(collection_namespace
) > DocumentDbLimits.NAMESPACE_MAX_LENGTH:
message = 'Namespace greater than {} characters'.format(
DocumentDbLimits.NAMESPACE_MAX_LENGTH)
compatibility_issues[db_name][collection_name][
self.EXCEEDED_LIMITS][message] = collection_namespace
if self.OPTIONS in collection_metadata:
for option_key in collection_metadata[self.OPTIONS]:
if option_key in DocumentDbUnsupportedFeatures.UNSUPPORTED_COLLECTION_OPTIONS and collection_metadata[self.OPTIONS][option_key] is True:
if self.UNSUPPORTED_COLLECTION_OPTIONS_KEY not in compatibility_issues[
db_name][collection_name]:
compatibility_issues[db_name][collection_name][
self.
UNSUPPORTED_COLLECTION_OPTIONS_KEY] = []
compatibility_issues[db_name][collection_name][
self.
UNSUPPORTED_COLLECTION_OPTIONS_KEY].append(
option_key)
for index_name in collection_metadata[self.INDEXES]:
index = collection_metadata[self.INDEXES][index_name]
# <collection>$<index>
collection_qualified_index_name = '{}${}'.format(
collection_name, index_name)
if len(
collection_qualified_index_name
) > DocumentDbLimits.COLLECTION_QUALIFIED_INDEX_NAME_MAX_LENGTH:
message = '<collection>$<index> greater than {} characters'.format(
DocumentDbLimits.
COLLECTION_QUALIFIED_INDEX_NAME_MAX_LENGTH)
compatibility_issues[db_name][collection_name][
index_name][self.EXCEEDED_LIMITS][
message] = collection_qualified_index_name
# <db>.<collection>$<index>
fully_qualified_index_name = '{}${}'.format(
collection_namespace, index_name)
if len(
fully_qualified_index_name
) > DocumentDbLimits.FULLY_QUALIFIED_INDEX_NAME_MAX_LENGTH:
message = '<db>.<collection>$<index> greater than {} characters'.format(
DocumentDbLimits.
COLLECTION_QUALIFIED_INDEX_NAME_MAX_LENGTH)
compatibility_issues[db_name][collection_name][
index_name][self.EXCEEDED_LIMITS][
message] = fully_qualified_index_name
# Check for indexes with too many keys
if len(index) > DocumentDbLimits.COMPOUND_INDEX_MAX_KEYS:
message = 'Index contains more than {} keys'.format(
DocumentDbLimits.COMPOUND_INDEX_MAX_KEYS)
compatibility_issues[db_name][collection_name][
index_name][self.EXCEEDED_LIMITS][message] = len(
index)
for key_name in index:
# Check for index key names that are too long
if len(key_name
) > DocumentDbLimits.INDEX_KEY_MAX_LENGTH:
message = 'Key name greater than {} characters'.format(
DocumentDbLimits.INDEX_KEY_MAX_LENGTH)
compatibility_issues[db_name][collection_name][
index_name][
self.EXCEEDED_LIMITS][message] = key_name
# Check for unsupported index options like collation
if key_name in DocumentDbUnsupportedFeatures.UNSUPPORTED_INDEX_OPTIONS:
if self.UNSUPPORTED_INDEX_OPTIONS_KEY not in compatibility_issues[
db_name][collection_name][index_name]:
compatibility_issues[db_name][collection_name][
index_name][
self.
UNSUPPORTED_INDEX_OPTIONS_KEY] = []
compatibility_issues[db_name][collection_name][
index_name][
self.UNSUPPORTED_INDEX_OPTIONS_KEY].append(
key_name)
# Check for unsupported index types like text
if key_name == self.INDEX_KEY:
for index_key_name in index[key_name]:
key_value = index[key_name][index_key_name]
if key_value in DocumentDbUnsupportedFeatures.UNSUPPORTED_INDEX_TYPES:
compatibility_issues[db_name][
collection_name][index_name][
self.
UNSUPPORTED_INDEX_TYPES_KEY] = key_value
return compatibility_issues
def _restore_indexes(self, connection, metadata):
"""Restore compatible indexes to a DocumentDB instance"""
for db_name in metadata:
for collection_name in metadata[db_name]:
for index_name in metadata[db_name][collection_name][
self.INDEXES]:
# convert the keys dict to a list of tuples as pymongo requires
index_keys = metadata[db_name][collection_name][
self.INDEXES][index_name][self.INDEX_KEY]
keys_to_create = []
index_options = {}
index_options[self.INDEX_NAME] = index_name
for key in index_keys:
index_direction = index_keys[key]
if type(index_direction) is float:
index_direction = int(index_direction)
elif type(index_direction) is dict and '$numberInt' in index_direction:
index_direction = int(index_direction['$numberInt'])
elif type(index_direction) is dict and '$numberDouble' in index_direction:
index_direction = int(float(index_direction['$numberDouble']))
keys_to_create.append((key, index_direction))
for k in metadata[db_name][collection_name][
self.INDEXES][index_name]:
if k != self.INDEX_KEY and k != self.INDEX_VERSION:
# this key is an additional index option
index_options[k] = metadata[db_name][
collection_name][self.INDEXES][index_name][k]
if self.args.dry_run is True:
logging.info(
"(dry run) %s.%s: would attempt to add index: %s",
db_name, collection_name, index_name)
logging.info(" (dry run) index options: %s", index_options)
logging.info(" (dry run) index keys: %s", keys_to_create)
else:
logging.debug("Adding index %s -> %s", keys_to_create,
index_options)
database = connection[db_name]
collection = database[collection_name]
collection.create_index(keys_to_create,
**index_options)
logging.info("%s.%s: added index: %s", db_name,
collection_name, index_name)
def run(self):
"""Entry point
"""
metadata = None
compatibility_issues = None
connection = None
# get a connection to our source mongodb or destination DocumentDb
if self.args.dump_indexes is True or self.args.restore_indexes is True:
try:
connection = self._get_db_connection(
host=self.args.host,
port=self.args.port,
tls=self.args.tls,
tls_ca_file=self.args.tls_ca_file,
username=self.args.username,
password=self.args.password,
auth_db=self.args.auth_db)
except (ConnectionFailure, ServerSelectionTimeoutError,
OperationFailure) as cex:
logging.error("Connection to instance %s:%s failed: %s",
self.args.host, self.args.port, cex)
sys.exit()
# dump indexes from a MongoDB server
if self.args.dump_indexes is True:
self._dump_indexes_from_server(connection, self.args.dir,
self.args.dry_run)
sys.exit()
# all non-dump operations require valid source metadata
try:
metadata = self.get_metadata(self.args.dir)
compatibility_issues = self.find_compatibility_issues(metadata)
except Exception as ex:
logging.error("Failed to load collection metadata: %s", ex)
sys.exit()
# Apply indexes to a DocumentDB instance
if self.args.restore_indexes is True:
metadata_to_restore = metadata
if self.args.skip_incompatible is not True:
if compatibility_issues:
logging.error(
"incompatible indexes exist and --skip-incompatible not specified."
)
sys.exit()
else:
metadata_to_restore = self._get_compatible_metadata(
metadata, compatibility_issues)
self._restore_indexes(connection, metadata_to_restore)
sys.exit()
# find and print a summary or detail or compatibility issues
if self.args.show_issues is True:
if not compatibility_issues:
logging.info("No incompatibilities found.")
else:
logging.info(
json.dumps(compatibility_issues,
sort_keys=True,
indent=4,
separators=(',', ': ')))
sys.exit()
# print all compatible (restorable) collections and indexes
if self.args.show_compatible is True:
compatible_metadata = self._get_compatible_metadata(
metadata, compatibility_issues)
logging.info(
json.dumps(compatible_metadata,
sort_keys=True,
indent=4,
separators=(',', ': ')))
def main():
"""
parse command line arguments and
"""
parser = argparse.ArgumentParser(
description='Dump and restore indexes from MongoDB to DocumentDB.')
parser.add_argument('--debug',
required=False,
action='store_true',
help='output debugging information')
parser.add_argument(
'--dry-run',
required=False,
action='store_true',
help='Perform processing, but do not actually restore indexes')
parser.add_argument('--dir',
required=True,
type=str,
help='dump to or restore from DIR')
parser.add_argument('--show-compatible',
required=False,
action='store_true',
dest='show_compatible',
help='output all compatible indexes (without change)')
parser.add_argument(
'--show-issues',
required=False,
action='store_true',
dest='show_issues',
help='output a detailed structure of compatibility issues')
parser.add_argument('--dump-indexes',
required=False,
action='store_true',
help='Dump indexes from the specified host/port')
parser.add_argument(
'--restore-indexes',
required=False,
action='store_true',
help='Restore indexes found in metadata to the specified host/port')
parser.add_argument(
'--skip-incompatible',
required=False,
action='store_true',
help='Skip incompatible indexes while dumping or restoring')
parser.add_argument('--host',
required=False,
type=str,
default='localhost',
help='connect to host HOST (default: localhost)')
parser.add_argument('--port',
required=False,
type=int,
default=27017,
help='connect to port PORT (default: 27017)')
parser.add_argument('--username',
required=False,
type=str,
help='authenticate with username USERNAME')
parser.add_argument('--password',
required=False,
type=str,
help='authenticate with password PASSWORD')
parser.add_argument(
'--auth-db',
required=False,
type=str,
dest='auth_db',
help='authenticate using database AUTH_DB (default: admin)')
parser.add_argument('--tls',
required=False,
action='store_true',
help='connect using TLS')
parser.add_argument('--tls-ca-file',
required=False,
type=str,
help='path to CA file used for TLS connection')
args = parser.parse_args()
if not (args.dump_indexes or args.restore_indexes or args.show_issues
or args.show_compatible):
message = "must specify one of [--dump-indexes | --restore-indexes | --show-issues | --show-compatible]"
parser.error(message)
if args.dir is not None:
if not os.path.isdir(args.dir):
parser.error("--dir must specify a directory")
if args.dump_indexes is True:
if args.restore_indexes is True:
parser.error("cannot dump and restore indexes simultaneously")
if any([args.username, args.password]):
if not all([args.username, args.password]):
parser.error(
"both --username amd --password are required if providing MongoDB credentials."
)
if args.auth_db is not None and not all([args.username, args.password]):
parser.error("--auth-db requires both --username and --password.")
if args.auth_db is None and args.username is not None:
args.auth_db = 'admin'
indextool = DocumentDbIndexTool(args)
indextool.run()
if __name__ == "__main__":
main()
|
import cape_privacy as cape
from cape_privacy.spark import dtypes
from cape_privacy.spark.transformations import ColumnRedact
from cape_privacy.spark.transformations import DatePerturbation
from cape_privacy.spark.transformations import NumericPerturbation
from cape_privacy.spark.transformations import NumericRounding
from cape_privacy.spark.transformations import Tokenizer
from pyspark import sql
from pyspark.sql import functions
from dataset import load_dataset
# Set up your SparkSession as usual, but configure it for use with Cape.
# We do this because some transformations expect Arrow to be enabled.
sess = sql.SparkSession.builder \
.appName("cape.tutorial.maskPersonalInformation") \
.getOrCreate()
sess = cape.spark.configure_session(sess)
# Load Spark DataFrame
df = load_dataset(sess)
print("Original Dataset:")
df.show()
# Define the transformations
tokenize = Tokenizer(max_token_len=10, key=b"my secret")
perturb_numric = NumericPerturbation(dtype=dtypes.Integer, min=-10, max=10)
perturb_date = DatePerturbation(frequency=("YEAR", "MONTH", "DAY"), min=(-10, -5, -5), max=(10, 5, 5))
round_numeric = NumericRounding(dtype=dtypes.Float, precision=-3)
redact_column = ColumnRedact(columns="ssn")
# Apply the transformation
df = redact_column(df)
df = df.select(tokenize(functions.col('name')).alias('name'),
perturb_numric(functions.col('age')).alias('age'),
round_numeric(functions.col('salary')).alias('salary'),
perturb_date(functions.col('birthdate')).alias('birthdate'))
print("Masked Dataset:")
print(df.show())
|
from ctypes import windll, c_void_p, POINTER, c_size_t, Structure, c_uint64, c_uint32, sizeof, c_wchar, c_wchar_p, byref
from ctypes.wintypes import DWORD
from pprint import pprint
from inc.errors import GWErrors
from inc.system_info import GWSystemInfo
class MEMORY_BASIC_INFORMATION(Structure):
"""https://msdn.microsoft.com/en-us/library/aa366775"""
_fields_ = (('BaseAddress', c_uint64),
('AllocationBase', c_uint64),
('AllocationProtect', DWORD),
('RegionSize', c_size_t),
('State', DWORD),
('Protect', DWORD),
('Type', DWORD))
MEM_COMMIT = 0x1000
MEM_FREE = 0x10000
MEM_RESERVE = 0x2000
MEM_IMAGE = 0x1000000
MEM_MAPPED = 0x40000
MEM_PRIVATE = 0x20000
PAGE_EXECUTE = 0x10
PAGE_EXECUTE_READ = 0x20
PAGE_EXECUTE_READWRITE = 0x40
PAGE_EXECUTE_WRITECOPY = 0x80
PAGE_NOACCESS = 0x01
PAGE_READONLY = 0x02
PAGE_READWRITE = 0x04
PAGE_WRITECOPY = 0x08
PAGE_TARGETS_INVALID = 0x40000000
PAGE_TARGETS_NO_UPDATE = 0x40000000
PAGE_GUARD = 0x100
PAGE_NOCACHE = 0x200
PAGE_WRITECOMBINE = 0x400
VirtualQueryEx = windll.kernel32.VirtualQueryEx
VirtualQueryEx.argtypes = [ c_void_p, c_void_p, POINTER(MEMORY_BASIC_INFORMATION), c_size_t ]
VirtualQueryEx.rettype = c_size_t
# StrFormatByteSizeW
StrFormatByteSize = windll.shlwapi.StrFormatByteSizeW
StrFormatByteSize.argtypes = [ c_uint64, POINTER(c_wchar), c_uint32 ]
StrFormatByteSize.rettype = c_wchar_p
class GWVirtualMemory:
si: GWSystemInfo = None
memory: dict = dict()
err: GWErrors = GWErrors()
handle = None
count: int = 0
size: c_uint64 = 0
# ##########################################################################
# Constructor
# ##########################################################################
def __init__(self, handle: c_void_p = None, si: GWSystemInfo = None):
self.clear_memory_list()
if handle:
self.handle = handle
if si is not None:
self.si = si
else:
self.si = GWSystemInfo()
# ##########################################################################
# Clear list
# ##########################################################################
def clear_memory_list(self):
self.memory = dict()
# ##########################################################################
# Set handle
# ##########################################################################
def handle_set(self, in_handle):
self.handle = in_handle
# ##########################################################################
# Removes handle
# ##########################################################################
def handle_remove(self):
self.handle = None
# ##########################################################################
# Get's MEMORY_BASIC_INFORMATION by Address
# ##########################################################################
def get_memory_information_by_address(self, in_address: c_uint64 = 0):
if not self.handle:
return False
mbi: MEMORY_BASIC_INFORMATION = MEMORY_BASIC_INFORMATION()
size = sizeof(mbi)
ret = VirtualQueryEx(self.handle, in_address, mbi, size)
if not ret:
return False
return mbi
# ##########################################################################
# Get's list of MEMORY_BASIC_INFORMATION
# ##########################################################################
def enum_memory_from_to(self, in_from: c_uint64 = 0, in_to: c_uint64 = 0):
self.clear_memory_list()
if not self.handle:
return False
# print(self.si)
addr_max: c_uint64 = in_to
addr_min: c_uint64 = in_from
if addr_max < self.si.lpMaximumApplicationAddress:
addr_max = self.si.lpMaximumApplicationAddress - 1
if addr_min < self.si.lpMinimumApplicationAddress:
addr_min = self.si.lpMinimumApplicationAddress + 1
address = addr_min
pid = windll.kernel32.GetProcessId(self.handle)
while address < addr_max:
mbi = self.get_memory_information_by_address(address)
if mbi is not False:
addr_base: c_uint64 = c_uint64(mbi.BaseAddress)
addr_len: c_uint64 = c_uint64(mbi.RegionSize)
if ( mbi.State and MEM_COMMIT ) and (mbi.Protect and PAGE_READWRITE ):
self.memory[mbi.BaseAddress] = mbi
address = addr_base.value + addr_len.value + 1
else:
print("Error: {} Base: 0x{:016X}".format(
self.err.get_error_string(),
address
))
return False
self.count = len(self.memory)
self.size = 0
for m in self.memory.keys():
m: dict = m
self.size += self.memory[m].RegionSize
# ##########################################################################
# Get count in list
# ##########################################################################
def get_count(self):
return self.count
# ##########################################################################
# Get Size in Bytes
# ##########################################################################
def get_size_in_byte(self):
# s = (c_wchar * 8192)(0)
# StrFormatByteSize(self.size, byref(c_wchar), 8192)
# print(c_wchar)
return self.get_sizeof_fmt(self.size)
# ##########################################################################
#
# ##########################################################################
def get_sizeof_fmt(self, num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
# ##########################################################################
#
# ##########################################################################
# ##########################################################################
#
# ##########################################################################
|
"""Custom pyplot style and palette."""
import matplotlib.pyplot as plt
import seaborn as sns
plt.rcParams.update(
{
"figure.dpi": 120,
"savefig.dpi": 300,
"figure.figsize": (7, 4.33),
"lines.linewidth": 2,
"axes.spines.bottom": False,
"axes.spines.top": False,
"axes.spines.left": False,
"axes.spines.right": False,
"axes.grid": True,
"axes.grid.axis": "y",
"ytick.left": False,
"grid.linewidth": 0.8,
"grid.alpha": 0.8,
"font.family": "Lato",
"font.weight": "regular",
}
)
glasbey_bw_50 = [
(0.843137, 0.0, 0.0, 1.0),
(0.54902, 0.235294, 1.0, 1.0),
(0.007843000000000044, 0.533333, 0.0, 1.0),
(0.0, 0.67451, 0.780392, 1.0),
(0.596078, 1.0, 0.0, 1.0),
(1.0, 0.498039, 0.819608, 1.0),
(0.42352900000000004, 0.0, 0.3098040000000001, 1.0),
(1.0, 0.647059, 0.188235, 1.0),
(0.0, 0.0, 0.615686, 1.0),
(0.52549, 0.439216, 0.407843, 1.0),
(0.0, 0.286275, 0.258824, 1.0),
(0.309804, 0.164706, 0.0, 1.0),
(0.0, 0.992157, 0.811765, 1.0),
(0.737255, 0.717647, 1.0, 1.0),
(0.584314, 0.705882, 0.47843100000000005, 1.0),
(0.752941, 0.015685999999999978, 0.72549, 1.0),
(0.14509800000000006, 0.4, 0.635294, 1.0),
(0.156863, 0.0, 0.254902, 1.0),
(0.862745, 0.701961, 0.686275, 1.0),
(0.996078, 0.960784, 0.564706, 1.0),
(0.31372500000000003, 0.27058799999999994, 0.356863, 1.0),
(0.643137, 0.486275, 0.0, 1.0),
(1.0, 0.443137, 0.4, 1.0),
(0.24705900000000003, 0.505882, 0.431373, 1.0),
(0.509804, 0.0, 0.050980000000000025, 1.0),
(0.639216, 0.482353, 0.701961, 1.0),
(0.203922, 0.305882, 0.0, 1.0),
(0.607843, 0.894118, 1.0, 1.0),
(0.921569, 0.0, 0.46666699999999994, 1.0),
(0.17647099999999993, 0.0, 0.03921599999999997, 1.0),
(0.368627, 0.564706, 1.0, 1.0),
(0.0, 0.780392, 0.12549, 1.0),
(0.345098, 0.003921999999999981, 0.666667, 1.0),
(0.0, 0.117647, 0.0, 1.0),
(0.603922, 0.278431, 0.0, 1.0),
(0.588235, 0.623529, 0.65098, 1.0),
(0.607843, 0.258824, 0.360784, 1.0),
(0.0, 0.12156899999999998, 0.196078, 1.0),
(0.784314, 0.768627, 0.0, 1.0),
(1.0, 0.815686, 1.0, 1.0),
(0.0, 0.745098, 0.603922, 1.0),
(0.215686, 0.08235300000000001, 1.0, 1.0),
(0.176471, 0.145098, 0.14509799999999995, 1.0),
(0.8745100000000001, 0.345098, 1.0, 1.0),
(0.745098, 0.905882, 0.752941, 1.0),
(0.498039, 0.27058799999999994, 0.596078, 1.0),
(0.321569, 0.309804, 0.235294, 1.0),
(0.847059, 0.4, 0.0, 1.0),
(0.392157, 0.454902, 0.219608, 1.0),
(0.756863, 0.45098, 0.533333, 1.0),
]
sns.set_palette(glasbey_bw_50)
palette = sns.color_palette()
|
def vsota_diagonal_spirale_nxn(n):
najvecje = n * n
element = 1
vsota = 1
korak = 0
while element != najvecje:
korak += 2
for x in range(4):
element += korak
vsota += element
return vsota
|
antimony = '''
model myModel
S1 -> S2; k1*S1
S1 = 10; S2 = 0
k1 = 1
end
'''
phrasedml = '''
model1 = model "myModel"
sim1 = simulate uniform(0, 5, 100)
task1 = run sim1 on model1
plot "Figure 1" time vs S1, S2
'''
import tellurium as te
exp = te.experiment(antimony, phrasedml)
exp.execute()
exp.printpython()
### export testing - put the full path of zip file you want to create
#exp.exportAsCombine()
|
from django.apps import AppConfig
class EventTypeConfig(AppConfig):
name = 'event_type'
|
from distutils.core import setup
import py2exe
import sys
# This is a standalone script, let it fill in the parameters.
if len(sys.argv) == 1:
sys.argv.append("py2exe")
setup(
name = "pop",
description = "The pop compiler.",
version = "0.1.1",
zipfile = None,
options = {
"py2exe": {
"compressed": True,
"optimize": 2,
"dist_dir": ".",
"bundle_files": 1
}
},
console = [
{
"script": "pop/__main__.py",
"icon_resources": [(0, "pop.ico")],
"dest_base": "pop"
}
]
)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import os
from setuptools import setup, find_packages
try:
with open('Readme.md') as f:
readme = f.read()
except IOError:
readme = ''
def _requires_from_file(filename):
return open(filename).read().splitlines()
setup(
name='voicetotext',
version='1.0.1',
url='https://github.com/kztka/voicetotext',
author='kztka',
author_email='[email protected]',
maintainer='kztka',
maintainer_email='[email protected]',
description='Transript media file to text using google api.',
long_description=readme,
packages=find_packages(),
py_modules=['voicetotext'],
install_requires=_requires_from_file('requirements.txt'),
license="MIT",
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
],
entry_points='''
# -*- Entry points: -*-
[console_scripts]
voicetotext=voicetotext.voicetotext:cli
splitvoice=voicetotext.splitvoice:cli
''',
)
|
import open3d as o3d
import torch
import torch.nn as nn
import torch.nn.functional as F
from skimage.measure import marching_cubes_lewiner
import numpy as np
import trimesh
import proxy_models_im as proxy_models
import pc_encoder_im as pc_encoder
import tqdm
import os
import shutil
import matplotlib.pyplot as plt
device = torch.device('cuda')
def newdir(name):
if os.path.isdir(name):
shutil.rmtree(name)
os.mkdir(name)
class Generator(nn.Module):
def __init__(self,gf_dim,latent_dim,z_dim):
super(Generator,self).__init__()
self.network = nn.Sequential(
nn.Linear(z_dim,gf_dim),
nn.LeakyReLU(),
nn.Linear(gf_dim,latent_dim),
)
def forward(self,inputs):
return self.network(inputs)
class Decoder(nn.Module):
def __init__(self, z_dim, point_dim, gf_dim):
super(Decoder, self).__init__()
self.z_dim = z_dim
self.point_dim = point_dim
self.gf_dim = gf_dim
self.linear_1 = nn.Linear(self.z_dim+self.point_dim, self.gf_dim*8, bias=True)
self.linear_2 = nn.Linear(self.gf_dim*8, self.gf_dim*8, bias=True)
self.linear_3 = nn.Linear(self.gf_dim*8, self.gf_dim*8, bias=True)
self.linear_4 = nn.Linear(self.gf_dim*8, self.gf_dim*4, bias=True)
self.linear_5 = nn.Linear(self.gf_dim*4, self.gf_dim*2, bias=True)
self.linear_6 = nn.Linear(self.gf_dim*2, self.gf_dim*1, bias=True)
self.linear_7 = nn.Linear(self.gf_dim*1, 1, bias=True)
nn.init.normal_(self.linear_1.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_1.bias,0)
nn.init.normal_(self.linear_2.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_2.bias,0)
nn.init.normal_(self.linear_3.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_3.bias,0)
nn.init.normal_(self.linear_4.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_4.bias,0)
nn.init.normal_(self.linear_5.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_5.bias,0)
nn.init.normal_(self.linear_6.weight, mean=0.0, std=0.02)
nn.init.constant_(self.linear_6.bias,0)
nn.init.normal_(self.linear_7.weight, mean=1e-5, std=0.02)
nn.init.constant_(self.linear_7.bias,0)
def forward(self, points, z):
pts = points.repeat(len(z),1,1)
zs = z.view(-1,1,self.z_dim).repeat(1,pts.shape[1],1)
pointz = torch.cat([pts,zs],2)
l1 = self.linear_1(pointz)
l1 = F.leaky_relu(l1, negative_slope=0.02, inplace=True)
l2 = self.linear_2(l1)
l2 = F.leaky_relu(l2, negative_slope=0.02, inplace=True)
l3 = self.linear_3(l2)
l3 = F.leaky_relu(l3, negative_slope=0.02, inplace=True)
l4 = self.linear_4(l3)
l4 = F.leaky_relu(l4, negative_slope=0.02, inplace=True)
l5 = self.linear_5(l4)
l5 = F.leaky_relu(l5, negative_slope=0.02, inplace=True)
l6 = self.linear_6(l5)
l6 = F.leaky_relu(l6, negative_slope=0.02, inplace=True)
l7 = self.linear_7(l6)
#l7 = torch.clamp(l7, min=0, max=1)
l7 = torch.max(torch.min(l7, l7*0.01+0.99), l7*0.01)
return l7
class Model(nn.Module):
def __init__(self):
super(Model,self).__init__()
self.bsz = 1
self.z_dim = 256
self.latent_dim = 256
self.g_hdim = 2048
self.d_hdim = 128
self.b_mean,self.b_std = torch.load('betas_params.pt')
self.frame_coords = torch.tensor(torch.load('frame_coords.pt')).to(device).unsqueeze(0)
self.N = 64
self.isosurface = 0.4
self.z_mean = 0.
self.z_std = 1.2
self.M = pc_encoder.FiLMNetwork(16,self.latent_dim)
self.G = Generator(self.g_hdim,self.latent_dim,self.z_dim)
self.G.load_state_dict(torch.load('modelG_dict.pt'))
for param in self.G.parameters():
param.requires_grad = False
self.G.eval()
self.D = Decoder(self.latent_dim,3,self.d_hdim)
self.D.load_state_dict(torch.load('modelD_dict.pt'))
for param in self.D.parameters():
param.requires_grad = False
self.D.eval()
self.proxy = proxy_models.LossProxy()
self.proxy.load_state_dict(torch.load('comf_dict_im.pt'))
for param in self.proxy.parameters():
param.requires_grad = False
self.proxy.eval()
self.optimizer = torch.optim.Adam(self.M.parameters(),1e-5)
def forward(self,cond=True):
# Random Inputs
z_curr = self.z_mean + torch.randn(size=import mesh_to_sdf(self.bsz,self.z_dim)).to(device) * self.z_std
betas = (self.b_mean + torch.randn(size=(self.bsz,len(self.b_std))) * self.b_std).float().to(device)
if is_test:
z_curr = glb_zs[[idx]]
betas = glb_betas[[idx]]
# Condition
if cond:
z_curr = self.M(betas,z_curr)
# Generator
z_curr = self.G(z_curr)
# Decoder
net_out = self.D(self.frame_coords, z_curr)
model_out = - (net_out - self.isosurface).view(self.N,self.N,self.N).detach().cpu().numpy()
verts,faces,_,_ = marching_cubes_lewiner(
model_out, level = 0., spacing = [1./self.N]*3
)
faces = np.concatenate([faces])
return z_curr,verts,faces,betas
def loss(self,z_curr,verts,faces,betas):
# Calculate dL/dp
xyz_upstream = torch.tensor(verts.astype(float), dtype=torch.float, device=device)
faces_upstream = torch.tensor(faces.astype(np.long), dtype=torch.long, device=device)
# Sample points
pts = self.sample_mesh_surface(xyz_upstream,faces_upstream)
# Points upstream
points_upstream = pts.clone().detach().requires_grad_(True)
comf = self.proxy(points_upstream.unsqueeze(0),betas,True)
comf.backward()
dL_dp = points_upstream.grad
# Calculate dL/ds
self.optimizer.zero_grad()
points = pts.clone().detach().requires_grad_(True)
pred_sdf = self.D(points, z_curr)
pred_sdf = - (pred_sdf - self.isosurface).squeeze(0)
loss_normals = torch.sum(pred_sdf)
loss_normals.backward(retain_graph = True)
normals = points.grad/torch.norm(points.grad, 2, 1).unsqueeze(-1)
self.optimizer.zero_grad()
dL_ds = -torch.matmul(dL_dp.unsqueeze(1), normals.unsqueeze(-1)).squeeze(-1)
# Calculate dL/ds(s)
loss_backward = (dL_ds * pred_sdf).mean()
return loss_backward
def optimize(self,loss):
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def transform_verts(self,verts):
if torch.is_tensor(verts):
mini = verts.min(dim=0)[0]
maxi = verts.max(dim=0)[0]
center = (mini + maxi)/2.
scale_factor = torch.prod(maxi - mini).pow(1./3.)
fn = lambda x: torch.stack(x,dim=-1)
else:
mini = verts.min(axis=0)
maxi = verts.max(axis=0)
center = (mini + maxi)/2.
scale_factor = np.prod(maxi - mini) ** (1./3.)
fn = lambda x: np.column_stack(x)
verts = (verts - center) / scale_factor
return fn((-verts[:,0],verts[:,1],-verts[:,2]))
def sample_mesh_surface(self,v,f,n_s=10000):
a,b,c = v[f].permute(1,0,2)
areas = torch.cross(a - b, b - c).norm(dim=-1)
weights = (areas / areas.sum()).detach().cpu().numpy()
choices = np.random.choice(a=len(weights),size=n_s,p=weights)
u,v = torch.rand(size=(2,n_s)).to(device)
pts = (1 - u**.5).view(-1,1) * a[choices]
pts += (u**.5 * (1 - v)).view(-1,1) * b[choices]
pts += (v * u ** .5).view(-1,1) * c[choices]
return pts
def writeObj(self, verts, faces, outfile):
faces += 1
with open(outfile, 'w') as f:
for a, b, c in verts.tolist():
f.write(f'v {a} {b} {c}\n')
for a, b, c in faces.tolist():
f.write(f"f {a} {b} {c}\n")
def export(self,cond):
z,v,f,betas = self.forward(cond)
v = torch.tensor(v).float()
f = torch.tensor(f).long()
epsilon = 0.
u_pts = torch.tensor(np.mgrid[0:64,0:64,0:64].T).view(-1,3) / 63.
a = v.min(dim=0)[0]
b = v.max(dim=0)[0]
a = a - .5
b = b - .5
pts = a + u_pts * (b - a)
sdf = self.D(pts.to(device),z).view(1,self.N,self.N,self.N).cpu().detach()
sdf = -(sdf - self.isosurface).flip((1,3))
v = self.transform_verts(v)
grid_min = v.min(dim=0)[0]
grid_max = v.max(dim=0)[0]
d = {
'sdf':sdf.detach().cpu().numpy(),
'grid_min':grid_min.detach().cpu().numpy(),
'grid_max':grid_max.detach().cpu().numpy(),
'vertices':v.detach().cpu().numpy(),
'faces':f.detach().cpu().numpy()
'betas':betas.detach().cpu().numpy()
}
return d
def train(model):
global idx
idx = 0
iters = 100
for i in range(0,iters,model.bsz):
outputs = model()
loss = model.loss(*outputs)
model.optimize(loss)
idx += 1
def test(model,name,cond=True):
global idx
idx = 0
num_exp = 100
for i in tqdm.tqdm(range(num_exp)):
item = model.export(cond)
write_gzip(item,f'model_output_cond/{name}/{i}.gz')
idx += 1
def test_all():
model = Model().to(device)
newdir('model_output_cond')
names = [e for e in range(step,step*num_exp+1,step)]
for name in names:
exp_dir = f'model_output_cond/{name}'
newdir(exp_dir)
model.load_state_dict(torch.load(f'comf_model_{name}.pt'))
test(model,name)
newdir('model_output_cond/reg')
test(model,'reg',False)
def main():
global glb_betas
glb_betas = torch.load('gen_betas.pt').to(device)
global glb_zs
glb_zs = torch.load('gen_zs.pt').to(device)
global is_test
is_test = False
is_train = True
if is_train:
model = Model().to(device)
epochs = step * num_exp
for epoch in tqdm.tqdm(range(epochs)):
train(model)
if (epoch+1) % step == 0:
torch.save(model.state_dict(),f'comf_model_{epoch+1}.pt')
is_test = True
test_all()
if __name__ == "__main__":
global step,num_exp
step = 1
num_exp = 5
main()
|
from dagster_graphql import dauphin
from dagster import check
from dagster.core.snap import PipelineSnapshot
from dagster.core.types.dagster_type import DagsterTypeKind
from .config_types import DauphinConfigType, to_dauphin_config_type
def config_type_for_schema(pipeline_snapshot, schema_key):
return (
to_dauphin_config_type(pipeline_snapshot.config_schema_snapshot, schema_key)
if schema_key
else None
)
def to_dauphin_dagster_type(pipeline_snapshot, dagster_type_key):
check.str_param(dagster_type_key, "dagster_type_key")
check.inst_param(pipeline_snapshot, pipeline_snapshot, PipelineSnapshot)
dagster_type_meta = pipeline_snapshot.dagster_type_namespace_snapshot.get_dagster_type_snap(
dagster_type_key
)
base_args = dict(
key=dagster_type_meta.key,
name=dagster_type_meta.name,
display_name=dagster_type_meta.display_name,
description=dagster_type_meta.description,
is_builtin=dagster_type_meta.is_builtin,
is_nullable=dagster_type_meta.kind == DagsterTypeKind.NULLABLE,
is_list=dagster_type_meta.kind == DagsterTypeKind.LIST,
is_nothing=dagster_type_meta.kind == DagsterTypeKind.NOTHING,
input_schema_type=config_type_for_schema(
pipeline_snapshot, dagster_type_meta.loader_schema_key,
),
output_schema_type=config_type_for_schema(
pipeline_snapshot, dagster_type_meta.materializer_schema_key,
),
inner_types=list(
map(
lambda key: to_dauphin_dagster_type(pipeline_snapshot, key),
dagster_type_meta.type_param_keys,
)
),
)
if dagster_type_meta.kind == DagsterTypeKind.LIST:
base_args["of_type"] = to_dauphin_dagster_type(
pipeline_snapshot, dagster_type_meta.type_param_keys[0]
)
return DauphinListDagsterType(**base_args)
elif dagster_type_meta.kind == DagsterTypeKind.NULLABLE:
base_args["of_type"] = to_dauphin_dagster_type(
pipeline_snapshot, dagster_type_meta.type_param_keys[0]
)
return DauphinNullableDagsterType(**base_args)
else:
return DauphinRegularDagsterType(**base_args)
class DauphinDagsterType(dauphin.Interface):
class Meta(object):
name = "DagsterType"
key = dauphin.NonNull(dauphin.String)
name = dauphin.String()
display_name = dauphin.NonNull(dauphin.String)
description = dauphin.String()
is_nullable = dauphin.NonNull(dauphin.Boolean)
is_list = dauphin.NonNull(dauphin.Boolean)
is_builtin = dauphin.NonNull(dauphin.Boolean)
is_nothing = dauphin.NonNull(dauphin.Boolean)
input_schema_type = dauphin.Field(DauphinConfigType)
output_schema_type = dauphin.Field(DauphinConfigType)
inner_types = dauphin.non_null_list("DagsterType")
class DauphinRegularDagsterType(dauphin.ObjectType):
class Meta(object):
name = "RegularDagsterType"
interfaces = [DauphinDagsterType]
class DauphinWrappingDagsterType(dauphin.Interface):
class Meta(object):
name = "WrappingDagsterType"
of_type = dauphin.Field(dauphin.NonNull(DauphinDagsterType))
class DauphinListDagsterType(dauphin.ObjectType):
class Meta(object):
name = "ListDagsterType"
interfaces = [DauphinDagsterType, DauphinWrappingDagsterType]
class DauphinNullableDagsterType(dauphin.ObjectType):
class Meta(object):
name = "NullableDagsterType"
interfaces = [DauphinDagsterType, DauphinWrappingDagsterType]
|
import datetime
from .TwitterSearchException import TwitterSearchException
from .utils import py3k
try: from urllib.parse import parse_qs, quote_plus, unquote # python3
except ImportError: from urlparse import parse_qs; from urllib import quote_plus, unquote #python2
class TwitterSearchOrder(object):
"""
This class is for configurating all available arguments of the Twitter Search API (v1.1).
It also creates valid query strings which can be used in other environments identical to the syntax of the Twitter Search API.
"""
# default value for count should be the maximum value to minimize traffic
# see https://dev.twitter.com/docs/api/1.1/get/search/tweets
_max_count = 100
# taken from http://www.loc.gov/standards/iso639-2/php/English_list.php
iso_6391 = ['aa', 'ab', 'ae', 'af', 'ak', 'am', 'an', 'ar', 'as', 'av', 'ay', 'az', 'ba', 'be', 'bg', 'bh', 'bi', 'bm', 'bn', 'bo', 'br', 'bs', 'ca', 'ce', 'ch', 'co', 'cr', 'cs', 'cu', 'cv', 'cy', 'da', 'de', 'dv', 'dz', 'ee', 'el', 'en', 'eo', 'es', 'et', 'eu', 'fa', 'ff', 'fi', 'fj', 'fo', 'fr', 'fy', 'ga', 'gd', 'gl', 'gn', 'gu', 'gv', 'ha', 'he', 'hi', 'ho', 'hr', 'ht', 'hu', 'hy', 'hz', 'ia', 'id', 'ie', 'ig', 'ii', 'ik', 'io', 'is', 'it', 'iu', 'ja', 'jv', 'ka', 'kg', 'ki', 'kj', 'kk', 'kl', 'km', 'kn', 'ko', 'kr', 'ks', 'kv', 'kw', 'ky', 'la', 'lb', 'lg', 'li', 'ln', 'lo', 'lt', 'lu', 'lv', 'mg', 'mh', 'mi', 'mk', 'ml', 'mn', 'mr', 'ms', 'mt', 'my', 'na', 'nb', 'nd', 'ne', 'ng', 'nl', 'nn', 'no', 'nr', 'nv', 'ny', 'oc', 'oj', 'om', 'or', 'os', 'pa', 'pi', 'pl', 'ps', 'pt', 'qu', 'rm', 'rn', 'ro', 'ru', 'rw', 'sa', 'sc', 'sd', 'se', 'sg', 'si', 'sk', 'sl', 'sm', 'sn', 'so', 'sq', 'sr', 'ss', 'st', 'su', 'sv', 'sw', 'ta', 'te', 'tg', 'th', 'ti', 'tk', 'tl', 'tn', 'to', 'tr', 'ts', 'tt', 'tw', 'ty', 'ug', 'uk', 'ur', 'uz', 've', 'vi', 'vo', 'wa', 'wo', 'xh', 'yi', 'yo', 'za', 'zh', 'zu']
def __init__(self):
self.arguments = { 'count' : '%s' % self._max_count }
self.searchterms = []
self.url = ''
def addKeyword(self, word):
""" Adds a given string or list to the current keyword list """
if isinstance(word, str if py3k else basestring) and len(word) >= 2:
self.searchterms.append(word)
elif isinstance(word, list):
self.searchterms += word
else:
raise TwitterSearchException(1000)
def setKeywords(self, word):
""" Sets a given list as the new keyword list """
if not isinstance(word, list):
raise TwitterSearchException(1001)
self.searchterms = word
def setSearchURL(self, url):
""" Reads given query string and stores key-value tuples """
if url[0] == '?':
url = url[1:]
args = parse_qs(url)
self.searchterms = args['q']
del args['q']
# urldecode keywords
for item in self.searchterms:
item = unquote(item)
self.arguments = {}
for key, value in args.items():
self.arguments.update({key : unquote(value[0])})
def createSearchURL(self):
""" Generates (urlencoded) query string from stored key-values tuples """
if len(self.searchterms) == 0:
raise TwitterSearchException(1015)
url = '?q='
url += '+'.join([ quote_plus(i) for i in self.searchterms])
for key, value in self.arguments.items():
url += '&%s=%s' % (quote_plus(key), (quote_plus(value) if key != 'geocode' else value) )
self.url = url
return self.url
def setLanguage(self, lang):
""" Sets 'lang' paramater """
if lang in self.iso_6391:
self.arguments.update( { 'lang' : '%s' % lang } )
else:
raise TwitterSearchException(1002)
def setLocale(self, lang):
""" Sets 'locale' paramater """
if lang in self.iso_6391:
self.arguments.update( { 'locale' : '%s' % lang } )
else:
raise TwitterSearchException(1002)
def setResultType(self, tor):
""" Sets 'result_type' paramater """
if tor == 'mixed' or tor == 'recent' or tor == 'popular':
self.arguments.update( { 'result_type' : '%s' % tor } )
else:
raise TwitterSearchException(1003)
def setSinceID(self, twid):
""" Sets 'since_id' parameter """
if py3k:
if not isinstance(twid, int):
raise TwitterSearchException(1004)
else:
if not isinstance(twid, (int, long)):
raise TwitterSearchException(1004)
if twid > 0:
self.arguments.update( { 'since_id' : '%s' % twid } )
else:
raise TwitterSearchException(1004)
def setMaxID(self, twid):
""" Sets 'max_id' parameter """
if py3k:
if not isinstance(twid, int):
raise TwitterSearchException(1004)
else:
if not isinstance(twid, (int, long)):
raise TwitterSearchException(1004)
if twid > 0:
self.arguments.update( { 'max_id' : '%s' % twid } )
else:
raise TwitterSearchException(1004)
def setCount(self, cnt):
""" Sets 'count' paramater """
if isinstance(cnt, int) and cnt > 0 and cnt <= 100:
self.arguments.update( { 'count' : '%s' % cnt } )
else:
raise TwitterSearchException(1004)
def setGeocode(self, latitude, longitude, radius, km=True):
""" Sets geolocation paramaters """
if not isinstance(radius, (int) if py3k else (int, long) ) or radius <= 0:
raise TwitterSearchException(1004)
if isinstance(latitude, float) and isinstance(longitude, float):
if isinstance(km, bool):
self.arguments.update( { 'geocode' : '%s,%s,%s%s' % (latitude, longitude, radius, 'km' if km else 'mi') } )
else:
raise TwitterSearchException(1005)
else:
raise TwitterSearchException(1004)
def setCallback(self, func):
""" Sets 'callback' paramater """
if isinstance(func, str if py3k else basestring) and func:
self.arguments.update( { 'callback' : '%s' % func } )
else:
raise TwitterSearchException(1006)
def setUntil(self, date):
""" Sets 'until' parameter """
if isinstance(date, datetime.date) and date <= datetime.date.today():
self.arguments.update( { 'until' : '%s' % date.strftime('%Y-%m-%d') } )
else:
raise TwitterSearchException(1007)
def setIncludeEntities(self, include):
""" Sets 'include entities' paramater """
if not isinstance(include, bool):
raise TwitterSearchException(1008)
if include:
self.arguments.update( { 'include_entities' : 'True' } )
else:
self.arguments.update( { 'include_entities' : 'False' } )
|
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2020, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
"""Based on the scheme and rank splits a layer"""
import copy
from torch import nn
# Import AIMET specific modules
from aimet_torch.svd.svd_splitter import WeightSvdModuleSplitter
from aimet_torch import layer_database as lad
from aimet_torch.svd import model_stats_calculator as MS
from aimet_common import statistics_util as stats_u
from aimet_common.utils import AimetLogger
logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Svd)
class ModelPruner:
"""
Creates a compressed model by iterating over the selected layers in the model using the corresponding ranks
"""
@classmethod
def _copy_model(cls, model, model_layers):
"""
Creates a copy of the original model and its layers
:param model: the original model
:param model_layers: original model's layers
:return:
"""
# Create a deep-copy of the model to return
model_copy = copy.deepcopy(model)
# Create an empty model_layers to fill
model_layers_copy = {}
modules_in_copy = list(model_copy.modules())
# For all modules in the current model
for index, module in enumerate(model.modules()):
# If this module is included in the existing model_layers, we need to add a corresponding entry into
# model_layers_copy
if id(module) in model_layers:
existing_layer = model_layers[id(module)]
new_layer = lad.Layer(modules_in_copy[index], existing_layer.name,
existing_layer.output_shape)
new_layer.picked_for_compression = existing_layer.picked_for_compression
model_layers_copy[id(modules_in_copy[index])] = new_layer
# Now we need to set parent references
lad.LayerDatabase.set_reference_to_parent_module(model_copy, model_layers_copy)
return model_copy, model_layers_copy
def create_compressed_model(self, svd_rank_pair_dict, model, compressible_layers, svd_lib_ref, metric):
"""
Creates and returns a compressed model using self._model for the given rank index
:param svd_rank_pair_dict: Rank index is the value and corresponding layer name is the key
:param model: the original model
:param compressible_layers: all the layers that can be compressed
:param svd_lib_ref: Model Optimization library reference
:param metric: the cost metric being used
:return: Returns the compressed model and per_layer statistics list
"""
# pylint: disable=too-many-locals
# Create a copy of the model
compressed_model, compressed_model_layers = self._copy_model(model, compressible_layers)
# Layer attributes for compressed model
# Start with all layers that are not selected for compression
selected_layers = [layer for (key, layer) in compressed_model_layers.items()
if layer.picked_for_compression is True]
compressed_model_layers = {key: value for (key, value) in compressed_model_layers.items()
if value.picked_for_compression is False}
# List to hold the SVD Statistics for each selected layer
layer_stats_list = list()
# Loop over all the selected layers
for layer in selected_layers:
svd_rank_pair = svd_rank_pair_dict[layer.name]
# Split the layer
sequential_of_split_layers, layer_a_attr, layer_b_attr = DeprecatedSvdPruner.prune_layer(layer, svd_rank_pair[0],
svd_lib_ref)
# Replace original layer with sequential of split layers
setattr(layer.parent_module, layer.var_name_of_module_in_parent,
sequential_of_split_layers)
# Add layer attributes for the split layers
compressed_model_layers[id(layer_a_attr.module)] = layer_a_attr
compressed_model_layers[id(layer_b_attr.module)] = layer_b_attr
split_layers = list()
split_layers.append(layer_a_attr)
split_layers.append(layer_b_attr)
ms = MS.ModelStats
layer_compression_ratio = ms.compute_per_layer_compression_ratio(orig_layer=layer,
split_layers=split_layers,
metric=metric)
per_layer = stats_u.SvdStatistics.PerSelectedLayer(name=layer.name, rank=svd_rank_pair[0],
compression_ratio=layer_compression_ratio)
layer_stats_list.append(per_layer)
return compressed_model, compressed_model_layers, layer_stats_list
class DeprecatedSvdPruner:
"""
Splits layers based on SVD technique
"""
@staticmethod
def prune_layer(original_layer, rank, svd_lib_ref):
"""
Splits a layer based on the splitting scheme
:param original_layer: original layers attributes
:param rank: rank pair for a given layer
:param svd_lib_ref: Reference to Model optimization library
:return:
"""
# Delegate to the right method to split the layer
if isinstance(original_layer.module, nn.Conv2d):
module_a, module_b = WeightSvdModuleSplitter.split_conv_module(original_layer.module, original_layer.name,
rank, svd_lib_ref)
elif isinstance(original_layer.module, nn.Linear):
module_a, module_b = WeightSvdModuleSplitter.split_fc_module(original_layer.module, original_layer.name,
rank, svd_lib_ref)
else:
raise TypeError("Only Conv and FC layers are currently supported")
# Create a sequential of the split layers
seq = nn.Sequential(module_a, module_b)
# layer_attr of split layers
layer_a = lad.Layer(module_a, original_layer.name + '_a', original_layer.output_shape)
layer_b = lad.Layer(module_b, original_layer.name + '_b', original_layer.output_shape)
return seq, layer_a, layer_b
|
import datetime
from decimal import Decimal
from testil import eq
from dateutil.tz import tzoffset, tzlocal
from openpyxl.styles import numbers
from corehq.apps.export.const import MISSING_VALUE, EMPTY_VALUE
from couchexport.util import get_excel_format_value
def check(input, output, format, output_type):
excel_format, value = get_excel_format_value(input)
eq(excel_format, format)
eq(value, output)
eq(type(value), output_type)
def test_integers():
yield check, '3423', 3423, numbers.FORMAT_NUMBER, int
yield check, '-234', -234, numbers.FORMAT_NUMBER, int
yield check, 324, 324, numbers.FORMAT_NUMBER, int
def test_decimal():
yield check, '4.0345', 4.0345, numbers.FORMAT_NUMBER_00, float
yield check, '-3.234', -3.234, numbers.FORMAT_NUMBER_00, float
yield check, 5.032, 5.032, numbers.FORMAT_NUMBER_00, float
yield check, Decimal('3.00'), 3.00, numbers.FORMAT_NUMBER_00, float
def test_boolean():
yield check, 'TRUE', True, numbers.FORMAT_GENERAL, bool
yield check, 'True', True, numbers.FORMAT_GENERAL, bool
yield check, 'true', True, numbers.FORMAT_GENERAL, bool
yield check, True, True, numbers.FORMAT_GENERAL, bool
yield check, 'FALSE', False, numbers.FORMAT_GENERAL, bool
yield check, 'False', False, numbers.FORMAT_GENERAL, bool
yield check, 'false', False, numbers.FORMAT_GENERAL, bool
yield check, False, False, numbers.FORMAT_GENERAL, bool
def test_decimal_eur():
yield check, '6,9234', 6.9234, numbers.FORMAT_NUMBER_00, float
yield check, '-5,342', -5.342, numbers.FORMAT_NUMBER_00, float
def test_percentage():
yield check, '80%', 0.8, numbers.FORMAT_PERCENTAGE, float
yield check, '-50%', -0.5, numbers.FORMAT_PERCENTAGE, float
yield check, '3.45%', 0.0345, numbers.FORMAT_PERCENTAGE_00, float
yield check, '-4.35%', -0.0435, numbers.FORMAT_PERCENTAGE_00, float
def test_comma_separated_us():
yield check, '3,000.0234', 3000.0234, numbers.FORMAT_NUMBER_COMMA_SEPARATED1, float
yield check, '3,234,000.342', 3234000.342, numbers.FORMAT_NUMBER_COMMA_SEPARATED1, float
yield check, '5,000,343', 5000343, numbers.FORMAT_NUMBER_COMMA_SEPARATED1, float
yield check, '-5,334.32', -5334.32, numbers.FORMAT_NUMBER_COMMA_SEPARATED1, float
def test_comma_separated_eur():
yield check, '5.600,0322', 5600.0322, numbers.FORMAT_NUMBER_COMMA_SEPARATED2, float
yield check, '5.600,0322', 5600.0322, numbers.FORMAT_NUMBER_COMMA_SEPARATED2, float
yield check, '8.435.600,0322', 8435600.0322, numbers.FORMAT_NUMBER_COMMA_SEPARATED2, float
yield check, '5.555.600', 5555600, numbers.FORMAT_NUMBER_COMMA_SEPARATED2, float
yield check, '-2.433,032', -2433.032, numbers.FORMAT_NUMBER_COMMA_SEPARATED2, float
def test_currency_usd():
yield check, '$3,534.02', 3534.02, numbers.FORMAT_CURRENCY_USD_SIMPLE, float
yield check, '$99', 99, numbers.FORMAT_CURRENCY_USD_SIMPLE, float
yield check, '$5,000', 5000, numbers.FORMAT_CURRENCY_USD_SIMPLE, float
yield check, '-$234.02', -234.02, numbers.FORMAT_CURRENCY_USD_SIMPLE, float
yield check, '$4.4302', 4.4302, numbers.FORMAT_CURRENCY_USD_SIMPLE, float
def test_currency_eur():
yield check, '€5.323,09', 5323.09, numbers.FORMAT_CURRENCY_EUR_SIMPLE, float
yield check, '-€303,03', -303.03, numbers.FORMAT_CURRENCY_EUR_SIMPLE, float
yield check, '€22', 22, numbers.FORMAT_CURRENCY_EUR_SIMPLE, float
yield check, '€3.303', 3303, numbers.FORMAT_CURRENCY_EUR_SIMPLE, float
yield check, '€3,003', 3.003, numbers.FORMAT_CURRENCY_EUR_SIMPLE, float
def test_date():
yield check, '2020-01-20', datetime.datetime(2020, 1, 20, 0, 0), \
numbers.FORMAT_DATE_YYYYMMDD2, datetime.datetime
yield check, '2020/01/20', datetime.datetime(2020, 1, 20, 0, 0), \
numbers.FORMAT_DATE_YYYYMMDD2, datetime.datetime
yield check, '2020.01.20', datetime.datetime(2020, 1, 20, 0, 0), \
numbers.FORMAT_DATE_YYYYMMDD2, datetime.datetime
yield check, datetime.date(2020, 1, 20), datetime.date(2020, 1, 20), \
numbers.FORMAT_DATE_YYYYMMDD2, datetime.date
def test_datetime():
yield check, '2020-01-20 12:33', \
datetime.datetime(2020, 1, 20, 12, 33), \
numbers.FORMAT_DATE_DATETIME, datetime.datetime
yield check, '2020-01-20 12:33:22', \
datetime.datetime(2020, 1, 20, 12, 33, 22), \
numbers.FORMAT_DATE_DATETIME, datetime.datetime
yield check, '2020-01-20 1:33:22PM', \
datetime.datetime(2020, 1, 20, 13, 33, 22), \
numbers.FORMAT_DATE_DATETIME, datetime.datetime
yield check, '2020-01-20 09:33:22.890000-6:00', \
datetime.datetime(2020, 1, 20, 9, 33, 22, 890000,
tzinfo=tzoffset(None, -21600)), \
numbers.FORMAT_DATE_DATETIME, datetime.datetime
yield check, '2020-01-20 09:33:22.890000-6', \
datetime.datetime(2020, 1, 20, 9, 33, 22, 890000,
tzinfo=tzoffset(None, -21600)), \
numbers.FORMAT_DATE_DATETIME, datetime.datetime
yield check, datetime.datetime(2020, 1, 20, 11, 11), \
datetime.datetime(2020, 1, 20, 11, 11), \
numbers.FORMAT_DATE_DATETIME, datetime.datetime
yield check, '2020-01-17T15:45:37.268000Z', \
datetime.datetime(2020, 1, 17, 15, 45, 37, 268000, tzinfo=tzlocal()), \
numbers.FORMAT_DATE_DATETIME, datetime.datetime
def test_time():
yield check, '12:33', '12:33', numbers.FORMAT_DATE_TIME4, str
yield check, '12:33:66', '12:33:66', numbers.FORMAT_DATE_TIME4, str
yield check, '09:33:22.890000-6:00', '09:33:22.890000-6:00', numbers.FORMAT_DATE_TIME4, str
yield check, '09:33:22.890000-6', '09:33:22.890000-6', numbers.FORMAT_DATE_TIME4, str
def test_missing():
yield check, MISSING_VALUE, MISSING_VALUE, numbers.FORMAT_TEXT, str
def test_empty():
yield check, EMPTY_VALUE, EMPTY_VALUE, numbers.FORMAT_TEXT, str
def test_text():
yield check, 'hi this is text', 'hi this is text', numbers.FORMAT_TEXT, str
yield check, '1241234eeeesffsfs', '1241234eeeesffsfs', numbers.FORMAT_TEXT, str
yield check, {'en': 'Thanks', 'de': 'Danke'}, "{'en': 'Thanks', 'de': 'Danke'}", \
numbers.FORMAT_TEXT, str
def test_bad_date_string():
yield check, '112020-02-2609', '112020-02-2609', numbers.FORMAT_TEXT, str
|
# -*- coding: utf-8 -*-
"""
Scrape Website Data | Cannlytics
Copyright © 2021 Cannlytics
Author: Keegan Skeate <[email protected]>
Created: 1/10/2021
License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Resources:
https://stackoverflow.com/questions/54416896/how-to-scrape-email-and-phone-numbers-from-a-list-of-websites
https://hackersandslackers.com/scraping-urls-with-beautifulsoup/
TODO:
Improve with requests-html - https://github.com/psf/requests-html
- Get #about
- Get absolute URLs
- Search for text (prices/analyses)
r.html.search('Python is a {} language')[0]
"""
import re
import requests
from bs4 import BeautifulSoup
def get_page_metadata(url):
"""Scrape target URL for metadata."""
headers = {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "GET",
"Access-Control-Allow-Headers": "Content-Type",
"Access-Control-Max-Age": "3600",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0",
}
# Handle URLs without http beginning
if not url.startswith("http"):
url = "http://" + url
response = requests.get(url, headers=headers)
html = BeautifulSoup(response.content, "html.parser")
metadata = {
"description": get_description(html),
"image_url": get_image(html), # FIXME: Append URL if relative path.
"favicon": get_favicon(html, url),
"brand_color": get_theme_color(html),
}
return response, html, metadata
def get_description(html):
"""Scrape page description."""
description = None
if html.find("meta", property="description"):
description = html.find("meta", property="description").get("content")
elif html.find("meta", property="og:description"):
description = html.find("meta", property="og:description").get("content")
elif html.find("meta", property="twitter:description"):
description = html.find("meta", property="twitter:description").get("content")
elif html.find("p"):
description = html.find("p").contents
if isinstance(description, list):
try:
description = description[0]
except IndexError:
pass
return description
def get_image(html):
"""Scrape share image."""
image = None
if html.find("meta", property="image"):
image = html.find("meta", property="image").get("content")
elif html.find("meta", property="og:image"):
image = html.find("meta", property="og:image").get("content")
elif html.find("meta", property="twitter:image"):
image = html.find("meta", property="twitter:image").get("content")
elif html.find("img", src=True):
image = html.find_all("img")[0].get("src")
return image
def get_favicon(html, url):
"""Scrape favicon."""
if html.find("link", attrs={"rel": "icon"}):
favicon = html.find("link", attrs={"rel": "icon"}).get("href")
elif html.find("link", attrs={"rel": "shortcut icon"}):
favicon = html.find("link", attrs={"rel": "shortcut icon"}).get("href")
else:
favicon = f'{url.rstrip("/")}/favicon.ico'
return favicon
def get_theme_color(html):
"""Scrape brand color."""
if html.find("meta", property="theme-color"):
color = html.find("meta", property="theme-color").get("content")
return color
return None
def get_phone(html, response):
"""Scrape phone number."""
try:
phone = html.select("a[href*=callto]")[0].text
return phone
except:
pass
try:
phone = re.findall(
r"\(?\b[2-9][0-9]{2}\)?[-][2-9][0-9]{2}[-][0-9]{4}\b", response.text
)[0]
return phone
except:
pass
try:
phone = re.findall(
r"\(?\b[2-9][0-9]{2}\)?[-. ]?[2-9][0-9]{2}[-. ]?[0-9]{4}\b", response.text
)[-1]
return phone
except:
print("Phone number not found")
phone = ""
return phone
def get_email(html, response):
"""Get email."""
try:
email = re.findall(
r"([a-zA-Z0-9._-]+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9_-]+)", response.text
)[-1]
return email
except:
pass
try:
email = html.select("a[href*=mailto]")[-1].text
except:
print("Email not found")
email = ""
return email
def find_lab_address():
"""
TODO: Tries to find a lab's address from their website, then Google Maps.
"""
street, city, state, zipcode = None, None, None, None
return street, city, state, zipcode
def find_lab_linkedin():
"""
TODO: Tries to find a lab's LinkedIn URL. (Try to find LinkedIn on homepage?)
"""
return ""
def find_lab_url():
"""
TODO: Find a lab's website URL. (Google search for name?)
"""
return ""
def clean_string_columns(df):
"""Clean string columns in a dataframe."""
for column in df.columns:
try:
df[column] = df[column].str.title()
df[column] = df[column].str.replace("Llc", "LLC")
df[column] = df[column].str.replace("L.L.C.", "LLC")
df[column] = df[column].str.strip()
except AttributeError:
pass
return df
|
#!/usr/bin/env python3
import ffai.core.model as m
import ffai.core.table as t
import ffai.core.procedure as p
import ffai.ai.pathfinding as pf
import ffai.ai.proc_bot as pb
from typing import Optional, List, Dict
import ffai.core.game as g
from ffai.ai.registry import register_bot
import numpy as np
class ActionSequence:
def __init__(self, action_steps: List[m.Action], score: float = 0, description: str = ''):
""" Creates a new ActionSequence - an ordered list of sequential Actions to attempt to undertake.
:param action_steps: Sequence of action steps that form this action.
:param score: A score representing the attractiveness of the move (default: 0)
:param description: A debug string (default: '')
"""
# Note the intention of this object is that when the object is acting, as steps are completed,
# they are removed from the move_sequence so the next move is always the top of the move_sequence
# lis
self.action_steps = action_steps
self.score = score
self.description = description
def is_valid(self, game: g.Game) -> bool:
pass
def popleft(self):
return self.action_steps.pop(0)
# val = self.action_steps[0]
# del self.action_steps[0]
# return val
def is_empty(self):
return not self.action_steps
class FfHeatMap:
""" A heat map of a Blood Bowl field.
A class for analysing zones of control for both teams
"""
def __init__(self, game: g.Game, team: m.Team):
self.game = game
self.team = team
# Note that the edges are not on the field, but represent crowd squares
self.units_friendly: List[List[float]] = [[0.0 for y in range(game.state.pitch.height)] for x in range(game.state.pitch.width)]
self.units_opponent: List[List[float]] = [[0.0 for y in range(game.state.pitch.height)] for x in range(game.state.pitch.width)]
def add_unit_paths(self, player: m.Player, paths: List[pf.Path]):
is_friendly: bool = player.team == self.team
for path in paths:
if is_friendly:
self.units_friendly[path.steps[-1].x][path.steps[-1].y] += path.prob * path.prob
else:
self.units_opponent[path.steps[-1].x][path.steps[-1].y] += path.prob * path.prob
def add_unit_by_paths(self, game: g.Game, paths: Dict[m.Player, List[pf.Path]]):
for player in paths.keys():
self.add_unit_paths(player, paths[player])
def add_players_moved(self, game: g.Game, players: List[m.Player]):
for player in players:
adjacents: List[m.Square] = game.get_adjacent_squares(player.position, occupied=True)
self.units_friendly[player.position.x][player.position.y] += 1.0
for adjacent in adjacents:
self.units_friendly[player.position.x][player.position.y] += 0.5
def get_ball_move_square_safety_score(self, square: m.Square) -> float:
# Basic idea - identify safe regions to move the ball towards
# friendly_heat: float = self.units_friendly[square.x][square.y]
opponent_heat: float = self.units_opponent[square.x][square.y]
score: float = 30.0 * max(0.0, (1.0 - opponent_heat / 2))
# score: float=0.0
# if opponent_heat < 0.25: score += 15.0
# if opponent_heat < 0.05: score += 15.0
# if opponent_heat < 1.5: score += 5
# if friendly_heat > 3.5: score += 10.0
# score += max(30.0, 5.0*(friendly_heat-opponent_heat))
return score
def get_cage_necessity_score(self, square: m.Square) -> float:
# opponent_friendly: float = self.units_friendly[square.x][square.y]
opponent_heat: float = self.units_opponent[square.x][square.y]
score: float = 0.0
if opponent_heat < 0.4:
score -= 80.0
# if opponent_friendly > opponent_heat: score -= max(30.0, 10.0*(opponent_friendly-opponent_heat))
# if opponent_heat <1.5: score -=5
# if opponent_heat > opponent_friendly: score += 10.0*(opponent_friendly-opponent_heat)
return score
def blitz_used(game: g.Game) -> bool:
for action in game.state.available_actions:
if action.action_type == t.ActionType.START_BLITZ:
return False
return True
def handoff_used(game: g.Game) -> bool:
for action in game.state.available_actions:
if action.action_type == t.ActionType.START_HANDOFF:
return False
return True
def foul_used(game: g.Game) -> bool:
for action in game.state.available_actions:
if action.action_type == t.ActionType.START_FOUL:
return False
return True
def pass_used(game: g.Game) -> bool:
for action in game.state.available_actions:
if action.action_type == t.ActionType.START_PASS:
return False
return True
def get_players(game: g.Game, team: m.Team, include_own: bool = True, include_opp: bool = True, include_stunned: bool = True, include_used: bool = True, include_off_pitch: bool = False, only_blockable: bool = False, only_used: bool = False) -> List[m.Player]:
players: List[m.Player] = []
selected_players: List[m.Player] = []
for iteam in game.state.teams:
if iteam == team and include_own:
players.extend(iteam.players)
if iteam != team and include_opp:
players.extend(iteam.players)
for player in players:
if only_blockable and not player.state.up:
continue
if only_used and not player.state.used:
continue
if include_stunned or not player.state.stunned:
if include_used or not player.state.used:
if include_off_pitch or (player.position is not None and not game.is_out_of_bounds(player.position)):
selected_players.append(player)
return selected_players
def caging_squares_north_east(game: g.Game, protect_square: m.Square) -> List[m.Square]:
# * At it's simplest, a cage requires 4 players in the North-East, South-East, South-West and North-West
# * positions, relative to the ball carrier, such that there is no more than 3 squares between the players in
# * each of those adjacent compass directions.
# *
# * 1 3
# * xx-xx
# * xx-xx
# * --o--
# * xx-xx
# * xx-xx
# * 3 4
# *
# * pitch is 26 long
# *
# *
# * Basically we need one player in each of the corners: 1-4, but spaced such that there is no gap of 3 squares.
# * If the caging player is in 1-4, but next to ball carrier, he ensures this will automatically be me
# *
# * The only exception to this is when the ball carrier is on, or near, the sideline. Then return the squares
# * that can otherwise form the cage.
# *
caging_squares: List[m.Square] = []
x = protect_square.x
y = protect_square.y
if x <= game.state.pitch.width - 3:
if y == game.state.pitch.height - 2:
caging_squares.append(game.get_square(x + 1, y + 1))
caging_squares.append(game.get_square(x + 2, y + 1))
caging_squares.append(game.get_square(x + 1, y))
caging_squares.append(game.get_square(x + 2, y))
elif y == game.state.pitch.height - 1:
caging_squares.append(game.get_square(x + 1, y))
caging_squares.append(game.get_square(x + 2, y))
else:
caging_squares.append(game.get_square(x + 1, y + 1))
caging_squares.append(game.get_square(x + 1, y + 2))
caging_squares.append(game.get_square(x + 2, y + 1))
# caging_squares.append(game.state.pitch.get_square(x + 3, y + 3))
return caging_squares
def caging_squares_north_west(game: g.Game, protect_square: m.Square) -> List[m.Square]:
caging_squares: List[m.Square] = []
x = protect_square.x
y = protect_square.y
if x >= 3:
if y == game.state.pitch.height-2:
caging_squares.append(game.get_square(x - 1, y + 1))
caging_squares.append(game.get_square(x - 2, y + 1))
caging_squares.append(game.get_square(x - 1, y))
caging_squares.append(game.get_square(x - 2, y))
elif y == game.state.pitch.height-1:
caging_squares.append(game.get_square(x - 1, y))
caging_squares.append(game.get_square(x - 2, y))
else:
caging_squares.append(game.get_square(x - 1, y + 1))
caging_squares.append(game.get_square(x - 1, y + 2))
caging_squares.append(game.get_square(x - 2, y + 1))
# caging_squares.append(game.state.pitch.get_square(x - 3, y + 3))
return caging_squares
def caging_squares_south_west(game: g.Game, protect_square: m.Square) -> List[m.Square]:
caging_squares: List[m.Square] = []
x = protect_square.x
y = protect_square.y
if x >= 3:
if y == 2:
caging_squares.append(game.get_square(x - 1, y - 1))
caging_squares.append(game.get_square(x - 2, y - 1))
caging_squares.append(game.get_square(x - 1, y))
caging_squares.append(game.get_square(x - 2, y))
elif y == 1:
caging_squares.append(game.get_square(x - 1, y))
caging_squares.append(game.get_square(x - 2, y))
else:
caging_squares.append(game.get_square(x - 1, y - 1))
caging_squares.append(game.get_square(x - 1, y - 2))
caging_squares.append(game.get_square(x - 2, y - 1))
# caging_squares.append(game.state.pitch.get_square(x - 3, y - 3))
return caging_squares
def caging_squares_south_east(game: g.Game, protect_square: m.Square) -> List[m.Square]:
caging_squares: List[m.Square] = []
x = protect_square.x
y = protect_square.y
if x <= game.state.pitch.width - 3:
if y == 2:
caging_squares.append(game.get_square(x + 1, y - 1))
caging_squares.append(game.get_square(x + 2, y - 1))
caging_squares.append(game.get_square(x + 1, y))
caging_squares.append(game.get_square(x + 2, y))
elif y == 1:
caging_squares.append(game.get_square(x + 1, y))
caging_squares.append(game.get_square(x + 2, y))
else:
caging_squares.append(game.get_square(x + 1, y - 1))
caging_squares.append(game.get_square(x + 1, y - 2))
caging_squares.append(game.get_square(x + 2, y - 1))
# caging_squares.append(game.get_square(x + 3, y - 3))
return caging_squares
def is_caging_position(game: g.Game, player: m.Player, protect_player: m.Player) -> bool:
return player.position.distance(protect_player.position) <= 2 and not is_castle_position_of(game, player, protect_player)
def has_player_within_n_squares(game: g.Game, units: List[m.Player], square: m.Square, num_squares: int) -> bool:
for cur in units:
if cur.position.distance(square) <= num_squares:
return True
return False
def has_adjacent_player(game: g.Game, square: m.Square) -> bool:
return not game.get_adjacent_players(square)
def is_castle_position_of(game: g.Game, player1: m.Player, player2: m.Player) -> bool:
return player1.position.x == player2.position.x or player1.position.y == player2.position.y
def is_bishop_position_of(game: g.Game, player1: m.Player, player2: m.Player) -> bool:
return abs(player1.position.x - player2.position.x) == abs(player1.position.y - player2.position.y)
def attacker_would_surf(game: g.Game, attacker: m.Player, defender: m.Player) -> bool:
if (defender.has_skill(t.Skill.SIDE_STEP) and not attacker.has_skill(t.Skill.GRAB)) or defender.has_skill(t.Skill.STAND_FIRM):
return False
if not attacker.position.is_adjacent(defender.position):
return False
return direct_surf_squares(game, attacker.position, defender.position)
def direct_surf_squares(game: g.Game, attack_square: m.Square, defend_square: m.Square) -> bool:
defender_on_sideline: bool = on_sideline(game, defend_square)
defender_in_endzone: bool = on_endzone(game, defend_square)
if defender_on_sideline and defend_square.x == attack_square.x:
return True
if defender_in_endzone and defend_square.y == attack_square.y:
return True
if defender_in_endzone and defender_on_sideline:
return True
return False
def reverse_x_for_right(game: g.Game, team: m.Team, x: int) -> int:
if not game.is_team_side(m.Square(13, 3), team):
res = game.state.pitch.width - 1 - x
else:
res = x
return res
def reverse_x_for_left(game: g.Game, team: m.Team, x: int) -> int:
if game.is_team_side(m.Square(13, 3), team):
res = game.state.pitch.width - 1 - x
else:
res = x
return res
def on_sideline(game: g.Game, square: m.Square) -> bool:
return square.y == 1 or square.y == game.state.pitch.height - 1
def on_endzone(game: g.Game, square: m.Square) -> bool:
return square.x == 1 or square.x == game.state.pitch.width - 1
def on_los(game: g.Game, team: m.Team, square: m.Square) -> bool:
return (reverse_x_for_right(game, team, square.x) == 13) and 4 < square.y < 21
def los_squares(game: g.Game, team: m.Team) -> List[m.Square]:
squares: List[m.Square] = [
game.get_square(reverse_x_for_right(game, team, 13), 5),
game.get_square(reverse_x_for_right(game, team, 13), 6),
game.get_square(reverse_x_for_right(game, team, 13), 7),
game.get_square(reverse_x_for_right(game, team, 13), 8),
game.get_square(reverse_x_for_right(game, team, 13), 9),
game.get_square(reverse_x_for_right(game, team, 13), 10),
game.get_square(reverse_x_for_right(game, team, 13), 11)
]
return squares
def distance_to_sideline(game: g.Game, square: m.Square) -> int:
return min(square.y - 1, game.state.pitch.height - square.y - 2)
def is_endzone(game, square: m.Square) -> bool:
return square.x == 1 or square.x == game.state.pitch.width - 1
def last_block_proc(game) -> Optional[p.Block]:
for i in range(len(game.state.stack.items) - 1, -1, -1):
if isinstance(game.state.stack.items[i], p.Block):
block_proc = game.state.stack.items[i]
return block_proc
return None
def is_adjacent_ball(game: g.Game, square: m.Square) -> bool:
ball_square = game.get_ball_position()
return ball_square is not None and ball_square.is_adjacent(square)
def squares_within(game: g.Game, square: m.Square, distance: int) -> List[m.Square]:
squares: List[m.Square] = []
for i in range(-distance, distance + 1):
for j in range(-distance, distance + 1):
cur_square = game.get_square(square.x + i, square.y + j)
if cur_square != square and not game.is_out_of_bounds(cur_square):
squares.append(cur_square)
return squares
def distance_to_defending_endzone(game: g.Game, team: m.Team, position: m.Square) -> int:
res = reverse_x_for_right(game, team, position.x) - 1
return res
def distance_to_scoring_endzone(game: g.Game, team: m.Team, position: m.Square) -> int:
res = reverse_x_for_left(game, team, position.x) - 1
return res
# return game.state.pitch.width - 1 - reverse_x_for_right(game, team, position.x)
def players_in_scoring_endzone(game: g.Game, team: m.Team, include_own: bool = True, include_opp: bool = False) -> List[m.Player]:
players: List[m.Player] = get_players(game, team, include_own=include_own, include_opp=include_opp)
selected_players: List[m.Player] = []
for player in players:
if in_scoring_endzone(game, team, player.position):
selected_players.append(player)
return selected_players
def in_scoring_endzone(game: g.Game, team: m.Team, square: m.Square) -> bool:
return reverse_x_for_left(game, team, square.x) == 1
def players_in_scoring_distance(game: g.Game, team: m.Team, include_own: bool = True, include_opp: bool = True, include_stunned: bool = False) -> List[m.Player]:
players: List[m.Player] = get_players(game, team, include_own=include_own, include_opp=include_opp, include_stunned=include_stunned)
selected_players: List[m.Player] = []
for player in players:
if distance_to_scoring_endzone(game, team, player.position) <= player.num_moves_left():
selected_players.append(player)
return selected_players
def distance_to_nearest_player(game: g.Game, team: m.Team, square: m.Square, include_own: bool = True, include_opp: bool = True, only_used: bool = False, include_used: bool = True, include_stunned: bool = True, only_blockable: bool = False) -> int:
opps: List[m.Player] = get_players(game, team, include_own=include_own, include_opp=include_opp, only_used=only_used, include_used=include_used, include_stunned=include_stunned, only_blockable=only_blockable)
cur_max = 100
for opp in opps:
dist = opp.position.distance(square)
cur_max = min(cur_max, dist)
return cur_max
def screening_distance(game: g.Game, from_square: m.Square, to_square: m.Square) -> float:
# Return the "screening distance" between 3 squares. (To complete)
# float dist =math.sqrt(math.pow(m.Square.x - cur.position.x, 3) + math.pow(m.Square.y - cur.position.y, 3))
return 0.0
def num_opponents_can_reach(game: g.Game, team: m.Team, square: m.Square) -> int:
opps: List[m.Player] = get_players(game, team, include_own=False, include_opp=True)
num_opps_reach: int = 0
for cur in opps:
dist = max(square.x - cur.position.x, square.y - cur.position.y)
if cur.state.stunned:
continue
move_allowed = cur.get_ma() + 2
if not cur.state.up:
move_allowed -= 3
if dist < move_allowed:
num_opps_reach += 1
return num_opps_reach
def num_opponents_on_field(game: g.Game, team: m.Team) -> int:
opps: List[m.Player] = get_players(game, team, include_own=False, include_opp=True)
num_opponents = 0
for cur in opps:
if cur.position is not None:
num_opponents += 1
return num_opponents
def number_opponents_closer_than_to_endzone(game: g.Game, team: m.Team, square: m.Square) -> int:
opponents: List[m.Player] = get_players(game, team, include_own=False, include_opp=True)
num_opps = 0
distance_square_endzone = distance_to_defending_endzone(game, team, square)
for opponent in opponents:
distance_opponent_endzone = distance_to_defending_endzone(game, team, opponent.position)
if distance_opponent_endzone < distance_square_endzone:
num_opps += 1
return num_opps
def in_scoring_range(game: g.Game, player: m.Player) -> bool:
return player.num_moves_left() >= distance_to_scoring_endzone(game, player.team, player.position)
def players_in_scoring_range(game: g.Game, team: m.Team, include_own=True, include_opp=True, include_used=True, include_stunned=True) -> List[m.Player]:
players: List[m.Player] = get_players(game, team, include_own=include_own, include_opp=include_opp, include_stunned=include_stunned, include_used=include_used)
res: List[m.Player] = []
for player in players:
if in_scoring_range(game, player):
res.append(player)
return res
def players_in(game: g.Game, team: m.Team, squares: List[m.Square], include_own=True, include_opp=True, include_used=True, include_stunned=True, only_blockable=False) -> List[m.Player]:
allowed_players: List[m.Player] = get_players(game, team, include_own=include_own, include_opp=include_opp, include_used=include_used, include_stunned=include_stunned, only_blockable=only_blockable)
res: List[m.Player] = []
for square in squares:
player: Optional[m.Player] = game.get_player_at(square)
if player is None:
continue
if player in allowed_players:
res.append(player)
return res
class GrodBot(pb.Agent):
"""
A Bot that uses path finding to evaluate all possibilities.
WIP!!! Hand-offs and Pass actions going a bit funny.
"""
mean_actions_available = []
steps = []
BASE_SCORE_BLITZ = 60.0
BASE_SCORE_FOUL = -50.0
BASE_SCORE_BLOCK = 65 # For a two dice block
BASE_SCORE_HANDOFF = 40.0
BASE_SCORE_PASS = 40.0
BASE_SCORE_MOVE_TO_OPPONENT = 45.0
BASE_SCORE_MOVE_BALL = 45.0
BASE_SCORE_MOVE_TOWARD_BALL = 45.0
BASE_SCORE_MOVE_TO_SWEEP = 0.0
BASE_SCORE_CAGE_BALL = 70.0
BASE_SCORE_MOVE_TO_BALL = 60.0
BASE_SCORE_BALL_AND_CHAIN = 75.0
BASE_SCORE_DEFENSIVE_SCREEN = 0.0
ADDITIONAL_SCORE_DODGE = 0.0 # Lower this value to dodge more.
ADDITIONAL_SCORE_NEAR_SIDELINE = -20.0
ADDITIONAL_SCORE_SIDELINE = -40.0
def __init__(self, name):
super().__init__(name)
self.my_team = None
self.opp_team = None
self.current_move: Optional[ActionSequence] = None
self.verbose = True
self.debug = False
self.heat_map: Optional[FfHeatMap] = None
self.actions_available = []
def set_verbose(self, verbose):
self.verbose = verbose
def set_debug(self, debug):
self.debug = debug
def act(self, game):
# Refresh my_team and opp_team (they seem to be copies)
proc = game.state.stack.peek()
available_actions = game.state.available_actions
available_action_types = [available_action.action_type for available_action in available_actions]
# Update local my_team and opp_team variables to latest copy (to ensure fresh data)
if hasattr(proc, 'team'):
assert proc.team == self.my_team
self.my_team = proc.team
self.opp_team = game.get_opp_team(self.my_team)
# For statistical purposes, keeps a record of # action choices.
available = 0
for action_choice in available_actions:
if len(action_choice.positions) == 0 and len(action_choice.players) == 0:
available += 1
elif len(action_choice.positions) > 0:
available += len(action_choice.positions)
else:
available += len(action_choice.players)
self.actions_available.append(available)
# Evaluate appropriate action for each possible procedure
if isinstance(proc, p.CoinTossFlip):
action = self.coin_toss_flip(game)
elif isinstance(proc, p.CoinTossKickReceive):
action = self.coin_toss_kick_receive(game)
elif isinstance(proc, p.Setup):
action = self.setup(game)
elif isinstance(proc, p.PlaceBall):
action = self.place_ball(game)
elif isinstance(proc, p.HighKick):
action = self.high_kick(game)
elif isinstance(proc, p.Touchback):
action = self.touchback(game)
elif isinstance(proc, p.Turn):
if proc.quick_snap:
action = self.quick_snap(game)
elif proc.blitz:
action = self.blitz(game)
else:
action = self.turn(game)
elif isinstance(proc, p.PlayerAction):
action = self.player_action(game)
elif isinstance(proc, p.Block):
action = self.block(game)
elif isinstance(proc, p.Push):
action = self.push(game)
elif isinstance(proc, p.FollowUp):
action = self.follow_up(game)
elif isinstance(proc, p.Apothecary):
action = self.apothecary(game)
elif isinstance(proc, p.PassAction):
action = self.pass_action(game)
elif isinstance(proc, p.Catch):
action = self.catch(game)
elif isinstance(proc, p.Interception):
action = self.interception(game)
elif isinstance(proc, p.Reroll):
action = self.reroll(game)
elif isinstance(proc, p.Shadowing):
action = self.shadowing(game)
else:
if self.debug:
raise Exception("Unknown procedure: ", proc)
elif t.ActionType.USE_SKILL in available_action_types:
# Catch-all for things like Break Tackle, Diving Tackle etc
return m.Action(t.ActionType.USE_SKILL)
else:
# Ugly catch-all -> simply pick an action
action_choice = available_actions[0]
player = action_choice.players[0] if action_choice.players else None
position = action_choice.positions[0] if action_choice.positions else None
action = m.Action(action_choice.action_type, position=position, player=player)
# raise Exception("Unknown procedure: ", proc)
# Check returned Action is valid
action_found = False
for available_action in available_actions:
if isinstance(action.action_type, type(available_action.action_type)):
if available_action.players and available_action.positions:
action_found = (action.player in available_action.players) and (action.player in available_action.players)
elif available_action.players:
action_found = action.player in available_action.players
elif available_action.positions:
action_found = action.position in available_action.positions
else:
action_found = True
if not action_found:
if self.debug:
raise Exception('Invalid action')
else:
# Ugly catch-all -> simply pick an action
action_choice = available_actions[0]
player = action_choice.players[0] if action_choice.players else None
position = action_choice.positions[0] if action_choice.positions else None
action = m.Action(action_choice.action_type, position=position, player=player)
if self.verbose:
current_team = game.state.current_team.name if game.state.current_team is not None else available_actions[0].team.name
print(' Turn=H' + str(game.state.half) + 'R' + str(game.state.round) + ', Team=' + current_team + ', Action=' + action.action_type.name)
return action
def reroll(self, game):
proc = game.state.stack.peek()
# target_roll = proc.context.roll.target
# target_higher = proc.context.roll.target_higher
# dice = proc.context.roll.dice
# num_dice = len(dice)
if proc.can_use_pro:
return m.Action(t.ActionType.USE_SKILL)
if isinstance(proc.context, p.GFI):
return m.Action(t.ActionType.USE_REROLL)
if isinstance(proc.context, p.Dodge):
return m.Action(t.ActionType.USE_REROLL)
if isinstance(proc.context, p.Catch):
return m.Action(t.ActionType.USE_REROLL)
if isinstance(proc.context, p.Pickup):
return m.Action(t.ActionType.USE_REROLL)
else:
return m.Action(t.ActionType.USE_REROLL)
def new_game(self, game: g.Game, team):
"""
Called when a new game starts.
"""
self.my_team = team
self.opp_team = game.get_opp_team(team)
self.actions_available = []
def coin_toss_flip(self, game: g.Game):
"""
Select heads/tails and/or kick/receive
"""
return m.Action(t.ActionType.TAILS)
# return Action(ActionType.HEADS)
def coin_toss_kick_receive(self, game: g.Game):
"""
Select heads/tails and/or kick/receive
"""
return m.Action(t.ActionType.RECEIVE)
# return Action(ActionType.KICK)
def setup(self, game: g.Game) -> m.Action:
"""
Move players from the reserves to the pitch
"""
if isinstance(game.state.stack.peek(), p.Setup):
proc: p.Setup = game.state.stack.peek()
else:
raise ValueError('Setup procedure expected')
if proc.reorganize:
# We are dealing with perfect defence. For now do nothing, but we could send all players back to reserve box
action_steps: List[m.Action] = [m.Action(t.ActionType.END_SETUP)]
self.current_move = ActionSequence(action_steps, description='Perfect Defence do nothing')
else:
if not get_players(game, self.my_team, include_own=True, include_opp=False, include_off_pitch=False):
# If no players are on the pitch yet, create a new ActionSequence for the setup.
action_steps: List[m.Action] = []
turn = game.state.round
half = game.state.half
opp_score = 0
for team in game.state.teams:
if team != self.my_team:
opp_score = max(opp_score, team.state.score)
score_diff = self.my_team.state.score - opp_score
# Choose 11 best players to field
players_available: List[m.Player] = []
for available_action in game.state.available_actions:
if available_action.action_type == t.ActionType.PLACE_PLAYER:
players_available = available_action.players
players_sorted_value = sorted(players_available, key=lambda x: player_value(game, x), reverse=True)
n_keep: int = min(11, len(players_sorted_value))
players_available = players_sorted_value[:n_keep]
# Are we kicking or receiving?
if game.state.receiving_this_drive:
place_squares: List[m.Square] = [
game.get_square(reverse_x_for_right(game, self.my_team, 13), 7),
game.get_square(reverse_x_for_right(game, self.my_team, 13), 8),
game.get_square(reverse_x_for_right(game, self.my_team, 13), 9),
# Receiver next
game.get_square(reverse_x_for_right(game, self.my_team, 8), 8),
# Support line players
game.get_square(reverse_x_for_right(game, self.my_team, 13), 10),
game.get_square(reverse_x_for_right(game, self.my_team, 13), 11),
game.get_square(reverse_x_for_right(game, self.my_team, 13), 5),
game.get_square(reverse_x_for_right(game, self.my_team, 13), 13),
# A bit wide semi-defensive
game.get_square(reverse_x_for_right(game, self.my_team, 11), 4),
game.get_square(reverse_x_for_right(game, self.my_team, 11), 12),
# Extra help at the back
game.get_square(reverse_x_for_right(game, self.my_team, 10), 8)
]
players_sorted_bash = sorted(players_available, key=lambda x: player_bash_ability(game, x), reverse=True)
players_sorted_blitz = sorted(players_available, key=lambda x: player_blitz_ability(game, x), reverse=True)
else:
place_squares: List[m.Square] = [
# LOS squares first
game.get_square(reverse_x_for_right(game, self.my_team, 13), 7),
game.get_square(reverse_x_for_right(game, self.my_team, 13), 8),
game.get_square(reverse_x_for_right(game, self.my_team, 13), 9),
game.get_square(reverse_x_for_right(game, self.my_team, 12), 3),
game.get_square(reverse_x_for_right(game, self.my_team, 12), 13),
game.get_square(reverse_x_for_right(game, self.my_team, 11), 2),
game.get_square(reverse_x_for_right(game, self.my_team, 11), 14),
game.get_square(reverse_x_for_right(game, self.my_team, 12), 5),
game.get_square(reverse_x_for_right(game, self.my_team, 12), 10),
game.get_square(reverse_x_for_right(game, self.my_team, 11), 11),
game.get_square(reverse_x_for_right(game, self.my_team, 11), 5)
]
players_sorted_bash = sorted(players_available, key=lambda x: player_bash_ability(game, x), reverse=True)
players_sorted_blitz = sorted(players_available, key=lambda x: player_blitz_ability(game, x), reverse=True)
for i in range(len(players_available)):
action_steps.append(m.Action(t.ActionType.PLACE_PLAYER, player=players_sorted_bash[i], position=place_squares[i]))
action_steps.append(m.Action(t.ActionType.END_SETUP))
self.current_move = ActionSequence(action_steps, description='Setup')
# We must have initialised the action sequence, lets execute it
if self.current_move.is_empty():
raise Exception('what')
else:
next_action: m.Action = self.current_move.popleft()
return next_action
def place_ball(self, game: g.Game):
"""
Place the ball when kicking.
"""
# Note left_center square is 7,8
center_opposite: m.Square = m.Square(reverse_x_for_left(game, self.my_team, 7), 8)
return m.Action(t.ActionType.PLACE_BALL, position=center_opposite)
def high_kick(self, game: g.Game):
"""
Select player to move under the ball.
"""
ball_pos = game.get_ball_position()
if game.is_team_side(game.get_ball_position(), self.my_team) and game.get_player_at(game.get_ball_position()) is None:
players_available = game.get_players_on_pitch(self.my_team, up=True)
if players_available:
players_sorted = sorted(players_available, key=lambda x: player_blitz_ability(game, x), reverse=True)
player = players_sorted[0]
return m.Action(t.ActionType.PLACE_PLAYER, player=player, position=ball_pos)
return m.Action(t.ActionType.SELECT_NONE)
def touchback(self, game: g.Game):
"""
Select player to give the ball to.
"""
players_available = game.get_players_on_pitch(self.my_team, up=True)
if players_available:
players_sorted = sorted(players_available, key=lambda x: player_blitz_ability(game, x), reverse=True)
player = players_sorted[0]
return m.Action(t.ActionType.SELECT_PLAYER, player=player)
return m.Action(t.ActionType.SELECT_NONE)
def set_next_move(self, game: g.Game):
""" Set self.current_move
:param game:
"""
self.current_move = None
players_moved: List[m.Player] = get_players(game, self.my_team, include_own=True, include_opp=False, include_used=True, only_used=False)
players_to_move: List[m.Player] = get_players(game, self.my_team, include_own=True, include_opp=False, include_used=False)
paths_own: Dict[m.Player, List[pf.Path]] = dict()
for player in players_to_move:
paths = pf.get_all_paths(game, player, from_position=None, num_moves_used=None, allow_skill_reroll=True, max_search_distance=player.num_moves_left())
paths_own[player] = paths
players_opponent: List[m.Player] = get_players(game, self.my_team, include_own=False, include_opp=True, include_stunned=False)
paths_opposition: Dict[m.Player, List[pf.Path]] = dict()
for player in players_opponent:
paths = pf.get_all_paths(game, player, from_position=None, num_moves_used=None, allow_skill_reroll=True, max_search_distance=player.num_moves_left())
paths_opposition[player] = paths
# Create a heat-map of control zones
heat_map: FfHeatMap = FfHeatMap(game, self.my_team)
heat_map.add_unit_by_paths(game, paths_opposition)
heat_map.add_unit_by_paths(game, paths_own)
heat_map.add_players_moved(game, get_players(game, self.my_team, include_own=True, include_opp=False, only_used=True))
self.heat_map = heat_map
all_actions: List[ActionSequence] = []
for action_choice in game.state.available_actions:
if action_choice.action_type == t.ActionType.START_MOVE:
players_available: List[m.Player] = action_choice.players
for player in players_available:
paths = paths_own[player]
all_actions.extend(potential_move_actions(game, heat_map, player, paths))
elif action_choice.action_type == t.ActionType.START_BLITZ:
players_available: List[m.Player] = action_choice.players
for player in players_available:
paths = pf.get_all_paths(game, player, from_position=None, num_moves_used=None, allow_skill_reroll=True, max_search_distance=player.num_moves_left()-1)
all_actions.extend(potential_blitz_actions(game, heat_map, player, paths))
elif action_choice.action_type == t.ActionType.START_FOUL:
players_available: List[m.Player] = action_choice.players
for player in players_available:
paths = paths_own[player]
all_actions.extend(potential_foul_actions(game, heat_map, player, paths))
elif action_choice.action_type == t.ActionType.START_BLOCK:
players_available: List[m.Player] = action_choice.players
for player in players_available:
all_actions.extend(potential_block_actions(game, heat_map, player))
elif action_choice.action_type == t.ActionType.START_PASS:
players_available: List[m.Player] = action_choice.players
for player in players_available:
player_square: m.Square = player.position
if game.get_ball_position() == player_square:
paths = paths_own[player]
all_actions.extend(potential_pass_actions(game, heat_map, player, paths))
elif action_choice.action_type == t.ActionType.START_HANDOFF:
players_available: List[m.Player] = action_choice.players
for player in players_available:
player_square: m.Square = player.position
if game.get_ball_position() == player_square:
paths = paths_own[player]
all_actions.extend(potential_handoff_actions(game, heat_map, player, paths))
elif action_choice.action_type == t.ActionType.END_TURN:
all_actions.extend(potential_end_turn_action(game))
if all_actions:
all_actions.sort(key=lambda x: x.score, reverse=True)
self.current_move = all_actions[0]
if self.verbose:
print(' Turn=H' + str(game.state.half) + 'R' + str(game.state.round) + ', Team=' + game.state.current_team.name + ', Action=' + self.current_move.description + ', Score=' + str(self.current_move.score))
def set_continuation_move(self, game: g.Game):
""" Set self.current_move
:param game:
"""
self.current_move = None
player: m.Player = game.state.active_player
paths = pf.get_all_paths(game, player, from_position=None, num_moves_used=None, allow_skill_reroll=True, max_search_distance=player.num_moves_left() - 1)
all_actions: List[ActionSequence] = []
for action_choice in game.state.available_actions:
if action_choice.action_type == t.ActionType.MOVE:
players_available: List[m.Player] = action_choice.players
all_actions.extend(potential_move_actions(game, self.heat_map, player, paths, is_continuation=True))
elif action_choice.action_type == t.ActionType.END_PLAYER_TURN:
all_actions.extend(potential_end_player_turn_action(game, self.heat_map, player))
if all_actions:
all_actions.sort(key=lambda x: x.score, reverse=True)
self.current_move = all_actions[0]
if self.verbose:
print(' Turn=H' + str(game.state.half) + 'R' + str(game.state.round) + ', Team=' + game.state.current_team.name + ', Action=Continue Move + ' + self.current_move.description + ', Score=' + str(self.current_move.score))
def turn(self, game: g.Game) -> m.Action:
"""
Start a new player action / turn.
"""
# Simple algorithm:
# Loop through all available (yet to move) players.
# Compute all possible moves for all players.
# Assign a score to each action for each player.
# The player/play with the highest score is the one the Bot will attempt to use.
# Store a representation of this turn internally (for use by player-action) and return the action to begin.
self.set_next_move(game)
next_action: m.Action = self.current_move.popleft()
return next_action
def quick_snap(self, game: g.Game):
self.current_move = None
return m.Action(t.ActionType.END_TURN)
def blitz(self, game: g.Game):
self.current_move = None
return m.Action(t.ActionType.END_TURN)
def player_action(self, game: g.Game):
"""
Take the next action from the current stack and execute
"""
if self.current_move.is_empty():
self.set_continuation_move(game)
action_step = self.current_move.popleft()
return action_step
def shadowing(self, game: g.Game):
"""
Select block die or reroll.
"""
# Loop through available dice results
proc = game.state.stack.peek()
return m.Action(t.ActionType.USE_SKILL)
def block(self, game: g.Game):
"""
Select block die or reroll.
"""
# Loop through available dice results
proc = game.state.stack.peek()
if proc.waiting_juggernaut:
return m.Action(t.ActionType.USE_SKILL)
if proc.waiting_wrestle_attacker or proc.waiting_wrestle_defender:
return m.Action(t.ActionType.USE_SKILL)
active_player: m.Player = game.state.active_player
attacker: m.Player = game.state.stack.items[-1].attacker
defender: m.Player = game.state.stack.items[-1].defender
favor: m.Team = game.state.stack.items[-1].favor
actions: List[ActionSequence] = []
check_reroll = False
for action_choice in game.state.available_actions:
if action_choice.action_type == t.ActionType.USE_REROLL:
check_reroll = True
continue
action_steps: List[m.Action] = [
m.Action(action_choice.action_type)
]
score = block_favourability(action_choice.action_type, self.my_team, active_player, attacker, defender, favor)
actions.append(ActionSequence(action_steps, score=score, description='Block die choice'))
if check_reroll and check_reroll_block(game, self.my_team, actions, favor):
return m.Action(t.ActionType.USE_REROLL)
else:
actions.sort(key=lambda x: x.score, reverse=True)
current_move = actions[0]
return current_move.action_steps[0]
def push(self, game: g.Game):
"""
Select square to push to.
"""
# Loop through available squares
block_proc: Optional[p.Block] = last_block_proc(game)
attacker: m.Player = block_proc.attacker
defender: m.Player = block_proc.defender
is_blitz_action = block_proc.blitz
score: float = -100.0
for to_square in game.state.available_actions[0].positions:
cur_score = score_push(game, defender.position, to_square)
if cur_score > score:
score = cur_score
push_square = to_square
return m.Action(t.ActionType.PUSH, position=push_square)
def follow_up(self, game: g.Game):
"""
Follow up or not. ActionType.FOLLOW_UP must be used together with a position.
"""
player = game.state.active_player
do_follow = check_follow_up(game)
for position in game.state.available_actions[0].positions:
if do_follow and player.position != position:
return m.Action(t.ActionType.FOLLOW_UP, position=position)
elif not do_follow and player.position == position:
return m.Action(t.ActionType.FOLLOW_UP, position=position)
def apothecary(self, game: g.Game):
"""
Use apothecary?
"""
# Update here -> apothecary BH in first half, KO or BH in second half
return m.Action(t.ActionType.USE_APOTHECARY)
# return Action(ActionType.DONT_USE_APOTHECARY)
def interception(self, game: g.Game):
"""
Select interceptor.
"""
for action in game.state.available_actions:
if action.action_type == t.ActionType.SELECT_PLAYER:
for player, agi_rolls in zip(action.players, action.agi_rolls):
return m.Action(t.ActionType.SELECT_PLAYER, player=player)
return m.Action(t.ActionType.SELECT_NONE)
def pass_action(self, game: g.Game):
"""
Reroll or not.
"""
return m.Action(t.ActionType.USE_REROLL)
# return Action(ActionType.DONT_USE_REROLL)
def end_game(self, game: g.Game):
"""
Called when a game end.
"""
print(f'''Result for {self.name}''')
print('------------------')
print(f'''Num steps: {len(self.actions_available)}''')
print(f'''Avg. branching factor: {np.mean(self.actions_available)}''')
GrodBot.steps.append(len(self.actions_available))
GrodBot.mean_actions_available.append(np.mean(self.actions_available))
print(f'''Avg. Num steps: {np.mean(GrodBot.steps)}''')
print(f'''Avg. overall branching factor: {np.mean(GrodBot.mean_actions_available)}''')
winner = game.get_winner()
print(f'''Casualties: {game.state.home_team.name} ({game.home_agent.name}): {game.num_casualties(game.state.home_team)} ... {game.state.away_team.name} ({game.away_agent.name}): {game.num_casualties(game.state.away_team)}''')
print(f'''Score: {game.state.home_team.name} ({game.home_agent.name}): {game.state.home_team.state.score} ... {game.state.away_team.name} ({game.away_agent.name}): {game.state.away_team.state.score}''')
if winner is None:
print(f'''It's a draw''')
elif winner == self:
print(f'''I won''')
else:
print(f'''I lost''')
print('------------------')
def block_favourability(block_result: m.ActionType, team: m.Team, active_player: m.Player, attacker: m.Player, defender: m.Player, favor: m.Team) -> float:
if attacker.team == active_player.team:
if block_result == t.ActionType.SELECT_DEFENDER_DOWN:
return 6.0
elif block_result == t.ActionType.SELECT_DEFENDER_STUMBLES:
if defender.has_skill(t.Skill.DODGE) and not attacker.has_skill(t.Skill.TACKLE):
return 4.0 # push back
else:
return 6.0
elif block_result == t.ActionType.SELECT_PUSH:
return 4.0
elif block_result == t.ActionType.SELECT_BOTH_DOWN:
if defender.has_skill(t.Skill.BLOCK) and not attacker.has_skill(t.Skill.BLOCK):
return 1.0 # skull
elif not attacker.has_skill(t.Skill.BLOCK):
return 2 # both down
elif attacker.has_skill(t.Skill.BLOCK) and defender.has_skill(t.Skill.BLOCK):
return 3.0 # nothing happens
else:
return 5.0 # only defender is down
elif block_result == t.ActionType.SELECT_ATTACKER_DOWN:
return 1.0 # skull
else:
if block_result == t.ActionType.SELECT_DEFENDER_DOWN:
return 1.0 # least favourable
elif block_result == t.ActionType.SELECT_DEFENDER_STUMBLES:
if defender.has_skill(t.Skill.DODGE) and not attacker.has_skill(t.Skill.TACKLE):
return 3 # not going down, so I like this.
else:
return 1.0 # splat. No good.
elif block_result == t.ActionType.SELECT_PUSH:
return 3.0
elif block_result == t.ActionType.SELECT_BOTH_DOWN:
if not attacker.has_skill(t.Skill.BLOCK) and defender.has_skill(t.Skill.BLOCK):
return 6.0 # Attacker down, I am not.
if not attacker.has_skill(t.Skill.BLOCK) and not defender.has_skill(t.Skill.BLOCK):
return 5.0 # Both down is pretty good.
if attacker.has_skill(t.Skill.BLOCK) and not defender.has_skill(t.Skill.BLOCK):
return 2.0 # Just I splat
else:
return 4.0 # Nothing happens (both have block).
elif block_result == t.ActionType.SELECT_ATTACKER_DOWN:
return 6.0 # most favourable!
return 0.0
def potential_end_player_turn_action(game: g.Game, heat_map, player: m.Player) -> List[ActionSequence]:
actions: List[ActionSequence] = []
action_steps: List[m.Action] = [
m.Action(t.ActionType.END_PLAYER_TURN)
]
# End turn happens on a score of 1.0. Any actions with a lower score are never selected.
actions.append(ActionSequence(action_steps, score=1.0, description='End Turn'))
return actions
def potential_end_turn_action(game: g.Game) -> List[ActionSequence]:
actions: List[ActionSequence] = []
action_steps: List[m.Action] = [
m.Action(t.ActionType.END_TURN)
]
# End turn happens on a score of 1.0. Any actions with a lower score are never selected.
actions.append(ActionSequence(action_steps, score=1.0, description='End Turn'))
return actions
def potential_block_actions(game: g.Game, heat_map: FfHeatMap, player: m.Player) -> List[ActionSequence]:
# Note to self: need a "stand up and end move option.
move_actions: List[ActionSequence] = []
if not player.state.up:
# There is currently a bug in the controlling logic. Prone players shouldn't be able to block
return move_actions
blockable_players: List[m.Player] = game.get_adjacent_opponents(player, standing=True, stunned=False, down=False)
for blockable_player in blockable_players:
action_steps: List[m.Action] = [
m.Action(t.ActionType.START_BLOCK, player=player),
m.Action(t.ActionType.BLOCK, position=blockable_player.position),
m.Action(t.ActionType.END_PLAYER_TURN)
]
action_score = score_block(game, heat_map, player, blockable_player)
score = action_score
move_actions.append(ActionSequence(action_steps, score=score, description='Block ' + player.name + ' to (' + str(blockable_player.position.x) + ',' + str(blockable_player.position.y) + ')'))
# potential action -> sequence of steps such as "START_MOVE, MOVE (to square) etc
return move_actions
def potential_blitz_actions(game: g.Game, heat_map: FfHeatMap, player: m.Player, paths: List[pf.Path]) -> List[ActionSequence]:
move_actions: List[ActionSequence] = []
for path in paths:
path_steps = path.steps
end_square: m.Square = game.get_square(path.steps[-1].x, path.steps[-1].y)
blockable_players = game.get_adjacent_players(end_square, team=game.get_opp_team(player.team), down=False, stunned=False)
blockable_squares = [player.position for player in blockable_players]
for blockable_square in blockable_squares:
action_steps: List[m.Action] = []
action_steps.append(m.Action(t.ActionType.START_BLITZ, player=player))
if not player.state.up:
action_steps.append(m.Action(t.ActionType.STAND_UP))
for step in path_steps:
# Note we need to add 1 to x and y because the outermost layer of squares is not actually reachable
action_steps.append(m.Action(t.ActionType.MOVE, position=game.get_square(step.x, step.y), player=player))
action_steps.append(m.Action(t.ActionType.BLOCK, position=blockable_square))
# action_steps.append(m.Action(t.ActionType.END_PLAYER_TURN))
action_score = score_blitz(game, heat_map, player, end_square, game.get_player_at(blockable_square))
path_score = path_cost_to_score(path) # If an extra GFI required for block, should increase here. To do.
score = action_score + path_score
move_actions.append(ActionSequence(action_steps, score=score, description='Blitz ' + player.name + ' to ' + str(blockable_square.x) + ',' + str(blockable_square.y)))
# potential action -> sequence of steps such as "START_MOVE, MOVE (to square) etc
return move_actions
def potential_pass_actions(game: g.Game, heat_map: FfHeatMap, player: m.Player, paths: List[pf.Path]) -> List[ActionSequence]:
move_actions: List[ActionSequence] = []
for path in paths:
path_steps = path.steps
end_square: m.Square = game.get_square(path.steps[-1].x, path.steps[-1].y)
# Need possible receving players
to_squares, distances = game.get_pass_distances_at(player, end_square)
for to_square in to_squares:
action_steps: List[m.Action] = []
action_steps.append(m.Action(t.ActionType.START_PASS, player=player))
receiver: Optional[m.Player] = game.get_player_at(to_square)
if not player.state.up:
action_steps.append(m.Action(t.ActionType.STAND_UP))
for step in path_steps:
# Note we need to add 1 to x and y because the outermost layer of squares is not actually reachable
action_steps.append(m.Action(t.ActionType.MOVE, position=game.get_square(step.x, step.y)))
action_steps.append(m.Action(t.ActionType.PASS, position=to_square))
action_steps.append(m.Action(t.ActionType.END_PLAYER_TURN))
action_score = score_pass(game, heat_map, player, end_square, to_square)
path_score = path_cost_to_score(path) # If an extra GFI required for block, should increase here. To do.
score = action_score + path_score
move_actions.append(ActionSequence(action_steps, score=score, description='Pass ' + player.name + ' to ' + str(to_square.x) + ',' + str(to_square.y)))
# potential action -> sequence of steps such as "START_MOVE, MOVE (to square) etc
return move_actions
def potential_handoff_actions(game: g.Game, heat_map: FfHeatMap, player: m.Player, paths: List[pf.Path]) -> List[ActionSequence]:
move_actions: List[ActionSequence] = []
for path in paths:
path_steps = path.steps
end_square: m.Square = game.get_square(path.steps[-1].x, path.steps[-1].y)
handoffable_players = game.get_adjacent_players(end_square, team=player.team, standing=True, down=False, stunned=False)
for handoffable_player in handoffable_players:
action_steps: List[m.Action] = []
action_steps.append(m.Action(t.ActionType.START_HANDOFF, player=player))
for step in path_steps:
# Note we need to add 1 to x and y because the outermost layer of squares is not actually reachable
action_steps.append(m.Action(t.ActionType.MOVE, position=game.get_square(step.x, step.y)))
action_steps.append(m.Action(t.ActionType.HANDOFF, position=handoffable_player.position))
action_steps.append(m.Action(t.ActionType.END_PLAYER_TURN))
action_score = score_handoff(game, heat_map, player, handoffable_player, end_square)
path_score = path_cost_to_score(path) # If an extra GFI required for block, should increase here. To do.
score = action_score + path_score
move_actions.append(ActionSequence(action_steps, score=score, description='Handoff ' + player.name + ' to ' + str(handoffable_player.position.x) + ',' + str(handoffable_player.position.y)))
# potential action -> sequence of steps such as "START_MOVE, MOVE (to square) etc
return move_actions
def potential_foul_actions(game: g.Game, heat_map: FfHeatMap, player: m.Player, paths: List[pf.Path]) -> List[ActionSequence]:
move_actions: List[ActionSequence] = []
for path in paths:
path_steps = path.steps
end_square: m.Square = game.get_square(path.steps[-1].x, path.steps[-1].y)
foulable_players = game.get_adjacent_players(end_square, team=game.get_opp_team(player.team), standing=False, stunned=True, down=True)
for foulable_player in foulable_players:
action_steps: List[m.Action] = []
action_steps.append(m.Action(t.ActionType.START_FOUL, player=player))
if not player.state.up:
action_steps.append(m.Action(t.ActionType.STAND_UP))
for step in path_steps:
# Note we need to add 1 to x and y because the outermost layer of squares is not actually reachable
action_steps.append(m.Action(t.ActionType.MOVE, position=game.get_square(step.x, step.y)))
action_steps.append(m.Action(t.ActionType.FOUL, foulable_player.position))
action_steps.append(m.Action(t.ActionType.END_PLAYER_TURN))
action_score = score_foul(game, heat_map, player, foulable_player, end_square)
path_score = path_cost_to_score(path) # If an extra GFI required for block, should increase here. To do.
score = action_score + path_score
move_actions.append(ActionSequence(action_steps, score=score, description='Foul ' + player.name + ' to ' + str(foulable_player.position.x) + ',' + str(foulable_player.position.y)))
# potential action -> sequence of steps such as "START_MOVE, MOVE (to square) etc
return move_actions
def potential_move_actions(game: g.Game, heat_map: FfHeatMap, player: m.Player, paths: List[pf.Path], is_continuation: bool = False) -> List[ActionSequence]:
move_actions: List[ActionSequence] = []
ball_square: m.Square = game.get_ball_position()
for path in paths:
path_steps = path.steps
action_steps: List[m.Action] = []
if not is_continuation:
action_steps.append(m.Action(t.ActionType.START_MOVE, player=player))
if not player.state.up:
action_steps.append(m.Action(t.ActionType.STAND_UP))
for step in path_steps:
# Note we need to add 1 to x and y because the outermost layer of squares is not actually reachable
action_steps.append(m.Action(t.ActionType.MOVE, position=game.get_square(step.x, step.y)))
to_square: m.Square = game.get_square(path_steps[-1].x, path_steps[-1].y)
action_score, is_complete, description = score_move(game, heat_map, player, to_square)
if is_complete:
action_steps.append(m.Action(t.ActionType.END_PLAYER_TURN))
path_score = path_cost_to_score(path) # If an extra GFI required for block, should increase here. To do.
if is_continuation and path_score > 0:
# Continuing actions (after a Blitz block for example) may choose risky options, so penalise
path_score = -10 + path_score * 2
score = action_score + path_score
move_actions.append(ActionSequence(action_steps, score=score, description='Move: ' + description + ' ' + player.name + ' to ' + str(path_steps[-1].x) + ',' + str(path_steps[-1].y)))
# potential action -> sequence of steps such as "START_MOVE, MOVE (to square) etc
return move_actions
def score_blitz(game: g.Game, heat_map: FfHeatMap, attacker: m.Player, block_from_square: m.Square, defender: m.Player) -> float:
score: float = GrodBot.BASE_SCORE_BLITZ
ball_carrier: Optional[m.Player] = game.get_ball_carrier()
is_ball_carrier = attacker == ball_carrier
num_block_dice: int = game.num_block_dice_at(attacker, defender, block_from_square, blitz=True, dauntless_success=False)
ball_position: m.Player = game.get_ball_position()
if num_block_dice == 3:
score += 30.0
if num_block_dice == 2:
score += 10.0
if num_block_dice == 1:
score += -30.0
if num_block_dice == -2:
score += -75.0
if num_block_dice == -3:
score += -100.0
if attacker.has_skill(t.Skill.BLOCK):
score += 20.0
if defender.has_skill(t.Skill.DODGE) and not attacker.has_skill(t.Skill.TACKLE):
score -= 10.0
if defender.has_skill(t.Skill.BLOCK):
score += -10.0
if ball_position == attacker.position:
if attacker.position.is_adjacent(defender.position) and block_from_square == attacker.position:
score += 20.0 # Favour blitzing with ball carrier at start of move
else:
score += -40.0 # But don't blitz with ball carrier after that
if defender.position == ball_position:
score += 50.0 # Blitzing ball carrier
if defender.position.is_adjacent(ball_position):
score += 20.0 # Blitzing someone adjacent to ball carrier
if direct_surf_squares(game, block_from_square, defender.position):
score += 25.0 # A surf
if game.get_adjacent_opponents(attacker, stunned=False, down=False) and not is_ball_carrier:
score -= 10.0
if attacker.position == block_from_square:
score -= 20.0 # A Blitz where the block is the starting square is unattractive
if in_scoring_range(game, defender):
score += 10.0 # Blitzing players closer to the endzone is attractive
return score
def score_foul(game: g.Game, heat_map: FfHeatMap, attacker: m.Player, defender: m.Player, to_square: m.Square) -> float:
score = GrodBot.BASE_SCORE_FOUL
ball_carrier: Optional[m.Player] = game.get_ball_carrier()
if ball_carrier == attacker:
score = score - 30.0
if attacker.has_skill(t.Skill.DIRTY_PLAYER):
score = score + 10.0
if attacker.has_skill(t.Skill.SNEAKY_GIT):
score = score + 10.0
if defender.state.stunned:
score = score - 15.0
assists_for, assists_against = game.num_assists_at(attacker, defender, to_square, foul=True)
score = score + (assists_for-assists_against) * 15.0
if attacker.team.state.bribes > 0:
score += 40.0
if attacker.has_skill(t.Skill.CHAINSAW):
score += 30.0
# TVdiff = defender.GetBaseTV() - attacker.GetBaseTV()
tv_diff = 10.0
score = score + tv_diff
return score
def score_move(game: g.Game, heat_map: FfHeatMap, player: m.Player, to_square: m.Square) -> (float, bool, str):
scores: List[(float, bool, str)] = [
[*score_receiving_position(game, heat_map, player, to_square), 'move to receiver'],
[*score_move_towards_ball(game, heat_map, player, to_square), 'move toward ball'],
[*score_move_to_ball(game, heat_map, player, to_square), 'move to ball'],
[*score_move_ball(game, heat_map, player, to_square), 'move ball'],
[*score_sweep(game, heat_map, player, to_square), 'move to sweep'],
[*score_defensive_screen(game, heat_map, player, to_square), 'move to defensive screen'],
[*score_offensive_screen(game, heat_map, player, to_square), 'move to offsensive screen'],
[*score_caging(game, heat_map, player, to_square), 'move to cage'],
[*score_mark_opponent(game, heat_map, player, to_square), 'move to mark opponent']
]
scores.sort(key=lambda tup: tup[0], reverse=True)
score, is_complete, description = scores[0]
# All moves should avoid the sideline
if distance_to_sideline(game, to_square) == 0:
score += GrodBot.ADDITIONAL_SCORE_SIDELINE
if distance_to_sideline(game, to_square) == 1:
score += GrodBot.ADDITIONAL_SCORE_NEAR_SIDELINE
return score, is_complete, description
def score_receiving_position(game: g.Game, heat_map: FfHeatMap, player: m.Player, to_square: m.Square) -> (float, bool):
ball_carrier = game.get_ball_carrier()
if ball_carrier is not None and (player.team != ball_carrier.team or player == game.get_ball_carrier()):
return 0.0, True
receivingness = player_receiver_ability(game, player)
score = receivingness - 30.0
if in_scoring_endzone(game, player.team, to_square):
num_in_range = len(players_in_scoring_endzone(game, player.team, include_own=True, include_opp=False))
if player.team.state.turn == 8:
score += 40 # Pretty damned urgent to get to end zone!
score -= num_in_range * num_in_range * 40 # Don't want too many catchers in the endzone ...
score += 5.0 * (max(distance_to_scoring_endzone(game, player.team, player.position), player.get_ma()) - max(distance_to_scoring_endzone(game, player.team, to_square), player.get_ma()))
# Above score doesn't push players to go closer than their MA from the endzone.
if distance_to_scoring_endzone(game, player.team, to_square) > player.get_ma() + 2:
score -= 30.0
opp_team = game.get_opp_team(player.team)
opps: List[m.Player] = game.get_adjacent_players(player.position, opp_team, stunned=False, down=False)
if opps:
score -= 40.0 + 20.0 * len(opps)
score -= 10.0 * len(game.get_adjacent_players(to_square, opp_team, stunned=False, down=False))
num_in_range = len(players_in_scoring_distance(game, player.team, include_own=True, include_opp=False))
score -= num_in_range * num_in_range * 20.0 # Lower the score if we already have some receivers.
if players_in(game, player.team, squares_within(game, to_square, 2), include_opp=False, include_own=True):
score -= 20.0
return score, True
def score_move_towards_ball(game: g.Game, heat_map: FfHeatMap, player: m.Player, to_square: m.Square) -> (float, bool):
ball_square: m.Square = game.get_ball_position()
ball_carrier = game.get_ball_carrier()
if ball_carrier is not None:
ball_team = ball_carrier.team
else:
ball_team = None
if (to_square == ball_square) or ((ball_team is not None) and (ball_team == player.team)):
return 0.0, True
score = GrodBot.BASE_SCORE_MOVE_TOWARD_BALL
if ball_carrier is None:
score += 20.0
player_distance_to_ball = ball_square.distance(player.position)
destination_distance_to_ball = ball_square.distance(to_square)
score += (player_distance_to_ball - destination_distance_to_ball)
if destination_distance_to_ball > 3:
pass
# score -= 50
# ma_allowed = player.move_allowed()
# current_distance_to_ball = ball_square.distance(player.position)
# Cancel the penalty for being near the sideline if the ball is on the sideline
# if distance_to_sideline(game, ball_square) <= 1:
# if distance_to_sideline(game, to_square): score += 10.0
# Increase score if moving closer to the ball
# score += (current_distance_to_ball - distance_to_ball)*3
return score, True
def score_move_to_ball(game: g.Game, heat_map: FfHeatMap, player: m.Player, to_square: m.Square) -> (float, bool):
ball_square: m.Square = game.get_ball_position()
ball_carrier = game.get_ball_carrier()
if (ball_square != to_square) or (ball_carrier is not None):
return 0.0, True
score = GrodBot.BASE_SCORE_MOVE_TO_BALL
if player.has_skill(t.Skill.SURE_HANDS) or not player.team.state.reroll_used:
score += 15.0
if player.get_ag() < 2:
score += -10.0
if player.get_ag() == 3:
score += 5.0
if player.get_ag() > 3:
score += 10.0
num_tz = game.num_tackle_zones_at(player, ball_square)
score += - 10 * num_tz # Lower score if lots of tackle zones on ball.
# If there is only 1 or 3 players left to move, lets improve score of trying to pick the ball up
players_to_move: List[m.Player] = get_players(game, player.team, include_own=True, include_opp=False, include_used=False, include_stunned=False)
if len(players_to_move) == 1:
score += 25
if len(players_to_move) == 2:
score += 15
# If the current player is the best player to pick up the ball, increase the score
players_sorted_blitz = sorted(players_to_move, key=lambda x: player_blitz_ability(game, x), reverse=True)
if players_sorted_blitz[0] == player:
score += 9
# Cancel the penalty for being near the sideline if the ball is on/near the sideline (it's applied later)
if distance_to_sideline(game, ball_square) == 1:
score -= GrodBot.ADDITIONAL_SCORE_NEAR_SIDELINE
if distance_to_sideline(game, ball_square) == 0:
score -= GrodBot.ADDITIONAL_SCORE_SIDELINE
# Need to increase score if no other player is around to get the ball (to do)
return score, False
def score_move_ball(game: g.Game, heat_map: FfHeatMap, player: m.Player, to_square: m.Square) -> (float, bool):
# ball_square: m.Square = game.get_ball_position()
ball_carrier = game.get_ball_carrier()
if (ball_carrier is None) or player != ball_carrier:
return 0.0, True
score = GrodBot.BASE_SCORE_MOVE_BALL
if in_scoring_endzone(game, player.team, to_square):
if player.team.state.turn == 8:
score += 115.0 # Make overwhelmingly attractive
else:
score += 60.0 # Make scoring attractive
elif player.team.state.turn == 8:
score -= 100.0 # If it's the last turn, heavily penalyse a non-scoring action
else:
score += heat_map.get_ball_move_square_safety_score(to_square)
opps: List[m.Player] = game.get_adjacent_players(to_square, team=game.get_opp_team(player.team), stunned=False)
if opps:
score -= (40.0 + 20.0 * len(opps))
opps_close_to_destination = players_in(game, player.team, squares_within(game, to_square, 2), include_own=False, include_opp=True, include_stunned=False)
if opps_close_to_destination:
score -= (20.0 + 5.0 * len(opps_close_to_destination))
if not blitz_used(game):
score -= 30.0 # Lets avoid moving the ball until the Blitz has been used (often helps to free the move).
dist_player = distance_to_scoring_endzone(game, player.team, player.position)
dist_destination = distance_to_scoring_endzone(game, player.team, to_square)
score += 5.0 * (dist_player - dist_destination) # Increase score the closer we get to the scoring end zone
# Try to keep the ball central
if distance_to_sideline(game, to_square) < 3:
score -= 30
return score, True
def score_sweep(game: g.Game, heat_map: FfHeatMap, player: m.Player, to_square: m.Square) -> (float, bool):
ball_carrier = game.get_ball_carrier()
if ball_carrier is not None:
ball_team = ball_carrier.team
else:
ball_team = None
if ball_team == player.team:
return 0.0, True # Don't sweep unless the other team has the ball
if distance_to_defending_endzone(game, player.team, game.get_ball_position()) < 9:
return 0.0, True # Don't sweep when the ball is close to the endzone
if players_in_scoring_distance(game, player.team, include_own=False, include_opp=True):
return 0.0, True # Don't sweep when there are opponent units in scoring range
score = GrodBot.BASE_SCORE_MOVE_TO_SWEEP
blitziness = player_blitz_ability(game, player)
score += blitziness - 60.0
score -= 30.0 * len(game.get_adjacent_opponents(player, standing=True, down=False, stunned=False))
# Now to evaluate ideal square for Sweeping:
x_preferred = int(reverse_x_for_left(game, player.team, (game.state.pitch.width-2) / 4))
y_preferred = int((game.state.pitch.height-2) / 2)
score -= abs(y_preferred - to_square .y) * 10.0
# subtract 5 points for every square away from the preferred sweep location.
score -= abs(x_preferred - to_square .x) * 5.0
# Check if a player is already sweeping:
for i in range(-2, 3):
for j in range(-2, 3):
cur: m.Square = game.get_square(x_preferred + i, y_preferred + j)
player: Optional[m.Player] = game.get_player_at(cur)
if player is not None and player.team == player.team:
score -= 90.0
return score, True
def score_defensive_screen(game: g.Game, heat_map: FfHeatMap, player: m.Player, to_square: m.Square) -> (float, bool):
ball_square = game.get_ball_position()
ball_carrier = game.get_ball_carrier()
if ball_carrier is not None:
ball_team = ball_carrier.team
else:
ball_team = None
if ball_team is None or ball_team == player.team:
return 0.0, True # Don't screen if we have the ball or ball is on the ground
# This one is a bit trickier by nature, because it involves combinations of two or more players...
# Increase score if square is close to ball carrier.
# Decrease if far away.
# Decrease if square is behind ball carrier.
# Increase slightly if square is 1 away from sideline.
# Decrease if close to a player on the same team WHO IS ALREADY screening.
# Increase slightly if most of the players movement must be used to arrive at the screening square.
score = GrodBot.BASE_SCORE_DEFENSIVE_SCREEN
distance_ball_carrier_to_end = distance_to_defending_endzone(game, player.team, ball_square)
distance_square_to_end = distance_to_defending_endzone(game, player.team, to_square)
if distance_square_to_end + 1.0 < distance_ball_carrier_to_end:
score += 30.0 # Increase score defending on correct side of field.
distance_to_ball = ball_square.distance(to_square)
score += 4.0*max(5.0 - distance_to_ball, 0.0) # Increase score defending in front of ball carrier
score += distance_square_to_end/10.0 # Increase score a small amount to screen closer to opponents.
distance_to_closest_opponent = distance_to_nearest_player(game, player.team, to_square, include_own=False, include_opp=True, include_stunned=False)
if distance_to_closest_opponent <= 1.5:
score -= 30.0
elif distance_to_closest_opponent <= 2.95:
score += 10.0
elif distance_to_closest_opponent > 2.95:
score += 5.0
if distance_to_sideline(game, to_square) == 1:
score -= GrodBot.ADDITIONAL_SCORE_NEAR_SIDELINE # Cancel the negative score of being 1 from sideline.
distance_to_closest_friendly_used = distance_to_nearest_player(game, player.team, to_square, include_own=True, include_opp=False, only_used=True)
if distance_to_closest_friendly_used >= 4:
score += 2.0
elif distance_to_closest_friendly_used >= 3:
score += 40.0 # Increase score if the square links with another friendly (hopefully also screening)
elif distance_to_closest_friendly_used > 2:
score += 10.0 # Descrease score if very close to another defender
else:
score -= 10.0 # Decrease score if too close to another defender.
distance_to_closest_friendly_unused = distance_to_nearest_player(game, player.team, to_square, include_own=True, include_opp=False, include_used=True)
if distance_to_closest_friendly_unused >= 4:
score += 3.0
elif distance_to_closest_friendly_unused >= 3:
score += 8.0 # Increase score if the square links with another friendly (hopefully also screening)
elif distance_to_closest_friendly_unused > 2:
score += 3.0 # Descrease score if very close to another defender
else:
score -= 10.0 # Decrease score if too close to another defender.
return score, True
def score_offensive_screen(game: g.Game, heat_map: FfHeatMap, player: m.Player, to_square: m.Square) -> (float, bool):
# Another subtle one. Basically if the ball carrier "breaks out", I want to screen him from
# behind, rather than cage him. I may even want to do this with an important receiver.
# Want my players to be 3 squares from each other, not counting direct diagonals.
# Want my players to be hampering the movement of opponent ball or players.
# Want my players in a line between goal line and opponent.
#
ball_carrier: m.Player = game.get_ball_carrier()
ball_square: m.Player = game.get_ball_position()
if ball_carrier is None or ball_carrier.team != player.team:
return 0.0, True
score = 0.0 # Placeholder - not implemented yet.
return score, True
def score_caging(game: g.Game, heat_map: FfHeatMap, player: m.Player, to_square: m.Square) -> (float, bool):
ball_carrier: m.Player = game.get_ball_carrier()
if ball_carrier is None or ball_carrier.team != player.team or ball_carrier == player:
return 0.0, True # Noone has the ball. Don't try to cage.
ball_square: m.Square = game.get_ball_position()
cage_square_groups: List[List[m.Square]] = [
caging_squares_north_east(game, ball_square),
caging_squares_north_west(game, ball_square),
caging_squares_south_east(game, ball_square),
caging_squares_south_west(game, ball_square)
]
dist_opp_to_ball = distance_to_nearest_player(game, player.team, ball_square, include_own=False, include_opp=True, include_stunned=False)
avg_opp_ma = average_ma(game, get_players(game, player.team, include_own=False, include_opp=True, include_stunned=False))
for curGroup in cage_square_groups:
if to_square in curGroup and not players_in(game, player.team, curGroup, include_opp=False, include_own=True, only_blockable=True):
# Test square is inside the cage corner and no player occupies the corner
if to_square in curGroup:
score = GrodBot.BASE_SCORE_CAGE_BALL
dist = distance_to_nearest_player(game, player.team, to_square, include_own=False, include_stunned=False, include_opp=True)
score += dist_opp_to_ball - dist
if dist_opp_to_ball > avg_opp_ma:
score -= 30.0
if not ball_carrier.state.used:
score -= 30.0
if to_square.is_adjacent(game.get_ball_position()):
score += 5
if is_bishop_position_of(game, player, ball_carrier):
score -= 2
score += heat_map.get_cage_necessity_score(to_square)
if not ball_carrier.state.used:
score = max(0.0, score - GrodBot.BASE_SCORE_CAGE_BALL) # Penalise forming a cage if ball carrier has yet to move
if not player.state.up:
score += 5.0
return score, True
return 0, True
def score_mark_opponent(game: g.Game, heat_map: FfHeatMap, player: m.Player, to_square: m.Square) -> (float, bool):
# Modification - no need to mark prone opponents already marked
ball_carrier = game.get_ball_carrier()
opp_team = game.get_opp_team(player.team)
if ball_carrier is not None:
ball_team = ball_carrier.team
else:
ball_team = None
ball_square = game.get_ball_position()
if ball_square == player.position:
return 0.0, True # Don't mark opponents deliberately with the ball
all_opponents: List[m.Player] = game.get_adjacent_players(to_square, team=opp_team)
if not all_opponents:
return 0.0, True
if (ball_carrier is not None) and (ball_carrier == player):
return 0.0, True
score = GrodBot.BASE_SCORE_MOVE_TO_OPPONENT
if to_square.is_adjacent(game.get_ball_position()):
if ball_team == player.team:
score += 20.0
else:
score += 30.0
for opp in all_opponents:
if distance_to_scoring_endzone(game, opp.team, to_square) < opp.get_ma() + 2:
score += 10.0 # Mark opponents in scoring range first.
break # Only add score once.
if len(all_opponents) == 1:
score += 20.0
num_friendly_next_to = game.num_tackle_zones_in(all_opponents[0])
if all_opponents[0].state.up:
if num_friendly_next_to == 1:
score += 5.0
else:
score -= 10.0 * num_friendly_next_to
if not all_opponents[0].state.up:
if num_friendly_next_to == 0:
score += 5.0
else:
score -= 10.0 * num_friendly_next_to # Unless we want to start fouling ...
if not player.state.up:
score += 25.0
if not player.has_skill(t.Skill.GUARD):
score -= len(all_opponents) * 10.0
else:
score += len(all_opponents) * 10.0
ball_is_near = False
for current_opponent in all_opponents:
if current_opponent.position.is_adjacent(game.get_ball_position()):
ball_is_near = True
if ball_is_near:
score += 8.0
if player.position != to_square and game.num_tackle_zones_in(player) > 0:
score -= 40.0
if ball_square is not None:
distance_to_ball = ball_square.distance(to_square)
score -= distance_to_ball / 5.0 # Mark opponents closer to ball when possible
if ball_team is not None and ball_team != player.team:
distance_to_other_endzone = distance_to_scoring_endzone(game, player.team, to_square)
# This way there is a preference for most advanced (distance wise) units.
return score, True
def score_handoff(game: g.Game, heat_map: FfHeatMap, ball_carrier: m.Player, receiver: m.Player, from_square: m.Square) -> float:
if receiver == ball_carrier:
return 0.0
score = GrodBot.BASE_SCORE_HANDOFF
score += probability_fail_to_score(probability_catch_fail(game, receiver))
if not ball_carrier.team.state.reroll_used:
score += +10.0
score -= 5.0 * (distance_to_scoring_endzone(game, ball_carrier.team, receiver.position) - distance_to_scoring_endzone(game, ball_carrier.team, ball_carrier.position))
if receiver.state.used:
score -= 30.0
if (game.num_tackle_zones_in(ball_carrier) > 0 or game.num_tackle_zones_in(receiver) > 0) and not blitz_used(game):
score -= 50.0 # Don't try a risky hand-off if we haven't blitzed yet
if in_scoring_range(game, receiver) and not in_scoring_range(game, ball_carrier):
score += 40.0
# score += heat_map.get_ball_move_square_safety_score(receiver.position)
return score
def score_pass(game: g.Game, heat_map: FfHeatMap, passer: m.Player, from_square: m.Square, to_square: m.Square) -> float:
receiver = game.get_player_at(to_square)
if receiver is None:
return 0.0
if receiver.team != passer.team:
return 0.0
if receiver == passer:
return 0.0
score = GrodBot.BASE_SCORE_PASS
score += probability_fail_to_score(probability_catch_fail(game, receiver))
dist: t.PassDistance = game.get_pass_distance(from_square, receiver.position)
score += probability_fail_to_score(probability_pass_fail(game, passer, from_square, dist))
if not passer.team.state.reroll_used:
score += +10.0
score = score - 5.0 * (distance_to_scoring_endzone(game, receiver.team, receiver.position) - distance_to_scoring_endzone(game, passer.team, passer.position))
if receiver.state.used:
score -= 30.0
if game.num_tackle_zones_in(passer) > 0 or game.num_tackle_zones_in(receiver) > 0 and not blitz_used(game):
score -= 50.0
if in_scoring_range(game, receiver) and not in_scoring_range(game, passer):
score += 40.0
return score
def score_block(game: g.Game, heat_map: FfHeatMap, attacker: m.Player, defender: m.Player) -> float:
score = GrodBot.BASE_SCORE_BLOCK
ball_carrier = game.get_ball_carrier()
ball_square = game.get_ball_position()
if attacker.has_skill(t.Skill.CHAINSAW):
score += 15.0
score += 20.0 - 2 * defender.get_av()
# Add something in case the defender is really valuable?
else:
num_block_dice = game.num_block_dice(attacker, defender)
if num_block_dice == 3:
score += 15.0
if num_block_dice == 2:
score += 0.0
if num_block_dice == 1:
score += -66.0 # score is close to zero.
if num_block_dice == -2:
score += -95.0
if num_block_dice == -3:
score += -150.0
if not attacker.team.state.reroll_used and not attacker.has_skill(t.Skill.LONER):
score += 10.0
if attacker.has_skill(t.Skill.BLOCK) or attacker.has_skill(t.Skill.WRESTLE):
score += 20.0
if defender.has_skill(t.Skill.DODGE) and not attacker.has_skill(t.Skill.TACKLE):
score += -10.0
if defender.has_skill(t.Skill.BLOCK):
score += -10.0
if attacker_would_surf(game, attacker, defender):
score += 32.0
if attacker.has_skill(t.Skill.LONER):
score -= 10.0
if attacker == ball_carrier:
score += -45.0
if defender == ball_carrier:
score += 35.0
if defender.position.is_adjacent(ball_square):
score += 15.0
return score
def score_push(game: g.Game, from_square: m.Square, to_square: m.Square) -> float:
score = 0.0
ball_square = game.get_ball_position()
if distance_to_sideline(game, to_square) == 0:
score = score + 10.0 # Push towards sideline
if ball_square is not None and to_square .is_adjacent(ball_square):
score = score - 15.0 # Push away from ball
if direct_surf_squares(game, from_square, to_square):
score = score + 10.0
return score
def check_follow_up(game: g.Game) -> bool:
# To do: the logic here is faulty for the current game state, in terms of how and when actions are evaluated. I.e.
# the check appears to happen before the defending player is placed prone (but after the player is pushed?)
# What I want is to follow up, generally, if the defender is prone and not otherwise.
active_player: m.Player = game.state.active_player
block_proc = last_block_proc(game)
attacker: m.Player = block_proc.attacker
defender: m.Player = block_proc.defender
is_blitz_action = block_proc.blitz
for position in game.state.available_actions[0].positions:
if active_player.position != position:
follow_up_square: m.Square = position
defender_prone = (block_proc.selected_die == t.BBDieResult.DEFENDER_DOWN) or ((block_proc.selected_die == t.BBDieResult.DEFENDER_STUMBLES) and (attacker.has_skill(t.Skill.TACKLE) or not defender.has_skill(t.Skill.DODGE)))
num_tz_cur = game.num_tackle_zones_in(active_player)
num_tz_new = game.num_tackle_zones_at(active_player, follow_up_square)
opp_adj_cur = game.get_adjacent_opponents(active_player, stunned=False, down=False)
opp_adj_new = game.get_adjacent_players(follow_up_square, team=game.get_opp_team(active_player.team), stunned=False, down=False)
num_tz_new -= defender_prone
# If blitzing (with squares of movement left) always follow up if the new square is not in any tackle zone.
if is_blitz_action and attacker.num_moves_left() > 0 and num_tz_new == 0:
return True
# If Attacker has the ball, strictly follow up only if there are less opponents next to new square.
if game.get_ball_carrier == attacker:
if len(opp_adj_new) - defender_prone < len(opp_adj_cur):
return True
return False
if game.get_ball_carrier == defender:
return True # Always follow up if defender has ball
if distance_to_sideline(game, follow_up_square) == 0:
return False # No if moving to sideline
if distance_to_sideline(game, defender.position) == 0:
return True # Follow up if opponent is on sideline
if follow_up_square.is_adjacent(game.get_ball_position()):
return True # Follow if moving next to ball
if attacker.position.is_adjacent(game.get_ball_position()):
return False # Don't follow if already next to ball
# Follow up if less standing opponents in the next square or equivalent, but defender is now prone
if (num_tz_new == 0) or (num_tz_new < num_tz_cur) or (num_tz_new == num_tz_cur and not defender_prone):
return True
if attacker.has_skill(t.Skill.GUARD) and num_tz_new > num_tz_cur:
return True # Yes if attacker has guard
if attacker.get_st() > defender.get_st() + num_tz_new - num_tz_cur:
return True # Follow if stronger
if is_blitz_action and attacker.num_moves_left() == 0:
return True # If blitzing but out of moves, follow up to prevent GFIing...
return False
def check_reroll_block(game: g.Game, team: m.Team, block_results: List[ActionSequence], favor: m.Team) -> bool:
block_proc: Optional[p.Block] = last_block_proc(game)
attacker: m.Player = block_proc.attacker
defender: m.Player = block_proc.defender
is_blitz_action = block_proc.blitz
ball_carrier: Optional[m.Player] = game.get_ball_carrier()
best_block_score: float = 0
cur_block_score: float = -1
if len(block_results) > 0:
best_block_score = block_results[0].score
if len(block_results) > 1:
cur_block_score = block_results[1].score
if favor == team and cur_block_score > best_block_score:
best_block_score = cur_block_score
if favor != team and cur_block_score < best_block_score:
best_block_score = cur_block_score
if len(block_results) > 2:
cur_block_score = block_results[2].score
if favor == team and cur_block_score > best_block_score:
best_block_score = cur_block_score
if favor != team and cur_block_score < best_block_score:
best_block_score = cur_block_score
if best_block_score < 4:
return True
elif ball_carrier == defender and best_block_score < 5:
return True # Reroll if target has ball and not knocked over.
else:
return False
def scoring_urgency_score(game: g.Game, heat_map: FfHeatMap, player: m.Player) -> float:
if player.team.state.turn == 8:
return 40
return 0
def path_cost_to_score(path: pf.Path) -> float:
cost: float = 1-path.prob
# assert 0 <= cost <= 1
score = -(cost * cost * (250.0 + GrodBot.ADDITIONAL_SCORE_DODGE))
return score
def probability_fail_to_score(probability: float) -> float:
score = -(probability * probability * (250.0 + GrodBot.ADDITIONAL_SCORE_DODGE))
return score
def probability_catch_fail(game: g.Game, receiver: m.Player) -> float:
num_tz = 0.0
if not receiver.has_skill(t.Skill.NERVES_OF_STEEL):
num_tz = game.num_tackle_zones_in(receiver)
probability_success = min(5.0, receiver.get_ag()+1.0-num_tz)/6.0
if receiver.has_skill(t.Skill.CATCH):
probability_success += (1.0-probability_success)*probability_success
probability = 1.0 - probability_success
return probability
def probability_pass_fail(game: g.Game, passer: m.Player, from_square: m.Square, dist: t.PassDistance) -> float:
num_tz = 0.0
if not passer.has_skill(t.Skill.NERVES_OF_STEEL):
num_tz = game.num_tackle_zones_at(passer, from_square)
if passer.has_skill(t.Skill.ACCURATE):
num_tz -= 1
if passer.has_skill(t.Skill.STRONG_ARM and dist != t.PassDistance.QUICK_PASS):
num_tz -= 1
if dist == t.PassDistance.HAIL_MARY:
return -100.0
if dist == t.PassDistance.QUICK_PASS:
num_tz -= 1
if dist == t.PassDistance.SHORT_PASS:
num_tz -= 0
if dist == t.PassDistance.LONG_PASS:
num_tz += 1
if dist == t.PassDistance.LONG_BOMB:
num_tz += 2
probability_success = min(5.0, passer.get_ag()-num_tz)/6.0
if passer.has_skill(t.Skill.PASS):
probability_success += (1.0-probability_success)*probability_success
probability = 1.0 - probability_success
return probability
def choose_gaze_victim(game: g.Game, player: m.Player) -> m.Player:
best_victim: Optional[m.Player] = None
best_score = 0.0
ball_square: m.Square = game.get_ball_position()
potentials: List[m.Player] = game.get_adjacent_players(player, team=game.get_opp_team(player.team), down=False, standing=True, stunned=False)
for unit in potentials:
current_score = 5.0
current_score += 6.0 - unit.get_ag()
if unit.position.is_adjacent(ball_square):
current_score += 5.0
if current_score > best_score:
best_score = current_score
best_victim = unit
return best_victim
def average_st(game: g.Game, players: List[m.Player]) -> float:
values = [player.get_st() for player in players]
return sum(values)*1.0 / len(values)
def average_av(game: g.Game, players: List[m.Player]) -> float:
values = [player.get_av() for player in players]
return sum(values)*1.0 / len(values)
def average_ma(game: g.Game, players: List[m.Player]) -> float:
values = [player.get_ma() for player in players]
return sum(values)*1.0 / len(values)
def player_bash_ability(game: g.Game, player: m.Player) -> float:
bashiness: float = 0.0
bashiness += 10.0 * player.get_st()
bashiness += 5.0 * player.get_av()
if player.has_skill(t.Skill.BLOCK):
bashiness += 10.0
if player.has_skill(t.Skill.WRESTLE):
bashiness += 10.0
if player.has_skill(t.Skill.MIGHTY_BLOW):
bashiness += 5.0
if player.has_skill(t.Skill.CLAWS):
bashiness += 5.0
if player.has_skill(t.Skill.PILING_ON):
bashiness += 5.0
if player.has_skill(t.Skill.GUARD):
bashiness += 15.0
if player.has_skill(t.Skill.DAUNTLESS):
bashiness += 10.0
if player.has_skill(t.Skill.FOUL_APPEARANCE):
bashiness += 5.0
if player.has_skill(t.Skill.TENTACLES):
bashiness += 5.0
if player.has_skill(t.Skill.STUNTY):
bashiness -= 10.0
if player.has_skill(t.Skill.REGENERATION):
bashiness += 10.0
if player.has_skill(t.Skill.THICK_SKULL):
bashiness += 3.0
return bashiness
def team_bash_ability(game: g.Game, players: List[m.Player]) -> float:
total = 0.0
for player in players:
total += player_bash_ability(game, player)
return total
def player_pass_ability(game: g.Game, player: m.Player) -> float:
passing_ability = 0.0
passing_ability += player.get_ag() * 15.0 # Agility most important.
passing_ability += player.get_ma() * 2.0 # Fast movements make better ball throwers.
if player.has_skill(t.Skill.PASS):
passing_ability += 10.0
if player.has_skill(t.Skill.SURE_HANDS):
passing_ability += 5.0
if player.has_skill(t.Skill.EXTRA_ARMS):
passing_ability += 3.0
if player.has_skill(t.Skill.NERVES_OF_STEEL):
passing_ability += 3.0
if player.has_skill(t.Skill.ACCURATE):
passing_ability += 5.0
if player.has_skill(t.Skill.STRONG_ARM):
passing_ability += 5.0
if player.has_skill(t.Skill.BONE_HEAD):
passing_ability -= 15.0
if player.has_skill(t.Skill.REALLY_STUPID):
passing_ability -= 15.0
if player.has_skill(t.Skill.WILD_ANIMAL):
passing_ability -= 15.0
if player.has_skill(t.Skill.ANIMOSITY):
passing_ability -= 10.0
if player.has_skill(t.Skill.LONER):
passing_ability -= 15.0
if player.has_skill(t.Skill.DUMP_OFF):
passing_ability += 5.0
if player.has_skill(t.Skill.SAFE_THROW):
passing_ability += 5.0
if player.has_skill(t.Skill.NO_HANDS):
passing_ability -= 100.0
return passing_ability
def player_blitz_ability(game: g.Game, player: m.Player) -> float:
blitzing_ability = player_bash_ability(game, player)
blitzing_ability += player.get_ma() * 10.0
if player.has_skill(t.Skill.TACKLE):
blitzing_ability += 5.0
if player.has_skill(t.Skill.SPRINT):
blitzing_ability += 5.0
if player.has_skill(t.Skill.SURE_FEET):
blitzing_ability += 5.0
if player.has_skill(t.Skill.STRIP_BALL):
blitzing_ability += 5.0
if player.has_skill(t.Skill.DIVING_TACKLE):
blitzing_ability += 5.0
if player.has_skill(t.Skill.MIGHTY_BLOW):
blitzing_ability += 5.0
if player.has_skill(t.Skill.CLAWS):
blitzing_ability += 5.0
if player.has_skill(t.Skill.PILING_ON):
blitzing_ability += 5.0
if player.has_skill(t.Skill.BONE_HEAD):
blitzing_ability -= 15.0
if player.has_skill(t.Skill.REALLY_STUPID):
blitzing_ability -= 15.0
if player.has_skill(t.Skill.WILD_ANIMAL):
blitzing_ability -= 10.0
if player.has_skill(t.Skill.LONER):
blitzing_ability -= 15.0
if player.has_skill(t.Skill.SIDE_STEP):
blitzing_ability += 5.0
if player.has_skill(t.Skill.JUMP_UP):
blitzing_ability += 5.0
if player.has_skill(t.Skill.HORNS):
blitzing_ability += 10.0
if player.has_skill(t.Skill.JUGGERNAUT):
blitzing_ability += 10.0
if player.has_skill(t.Skill.LEAP):
blitzing_ability += 5.0
return blitzing_ability
def player_receiver_ability(game: g.Game, player: m.Player) -> float:
receiving_ability = 0.0
receiving_ability += player.get_ma() * 5.0
receiving_ability += player.get_ag() * 10.0
if player.has_skill(t.Skill.CATCH):
receiving_ability += 15.0
if player.has_skill(t.Skill.EXTRA_ARMS):
receiving_ability += 10.0
if player.has_skill(t.Skill.NERVES_OF_STEEL):
receiving_ability += 5.0
if player.has_skill(t.Skill.DIVING_CATCH):
receiving_ability += 5.0
if player.has_skill(t.Skill.DODGE):
receiving_ability += 10.0
if player.has_skill(t.Skill.SIDE_STEP):
receiving_ability += 5.0
if player.has_skill(t.Skill.BONE_HEAD):
receiving_ability -= 15.0
if player.has_skill(t.Skill.REALLY_STUPID):
receiving_ability -= 15.0
if player.has_skill(t.Skill.WILD_ANIMAL):
receiving_ability -= 15.0
if player.has_skill(t.Skill.LONER):
receiving_ability -= 15.0
if player.has_skill(t.Skill.NO_HANDS):
receiving_ability -= 100.0
return receiving_ability
def player_run_ability(game: g.Game, player: m.Player) -> float:
running_ability = 0.0
running_ability += player.get_ma() * 10.0 # Really favour fast units
running_ability += player.get_ag() * 10.0 # Agility to be prized
running_ability += player.get_st() * 5.0 # Doesn't hurt to be strong!
if player.has_skill(t.Skill.SURE_HANDS):
running_ability += 10.0
if player.has_skill(t.Skill.BLOCK):
running_ability += 10.0
if player.has_skill(t.Skill.EXTRA_ARMS):
running_ability += 5.0
if player.has_skill(t.Skill.DODGE):
running_ability += 10.0
if player.has_skill(t.Skill.SIDE_STEP):
running_ability += 5.0
if player.has_skill(t.Skill.STAND_FIRM):
running_ability += 3.0
if player.has_skill(t.Skill.BONE_HEAD):
running_ability -= 15.0
if player.has_skill(t.Skill.REALLY_STUPID):
running_ability -= 15.0
if player.has_skill(t.Skill.WILD_ANIMAL):
running_ability -= 15.0
if player.has_skill(t.Skill.LONER):
running_ability -= 15.0
if player.has_skill(t.Skill.ANIMOSITY):
running_ability -= 5.0
if player.has_skill(t.Skill.DUMP_OFF):
running_ability += 5.0
if player.has_skill(t.Skill.NO_HANDS):
running_ability -= 100.0
return running_ability
def player_value(game: g.Game, player: m.Player) -> float:
value = player.get_ag()*40 + player.get_av()*30 + player.get_ma()*30 + player.get_st()*50 + len(player.get_skills())*20
return value
# Register bot
register_bot('GrodBot', GrodBot)
|
import os
from codenode.frontend._settings import *
HOME_PATH = os.path.join(PROJECT_PATH, '..', '..', 'devel', 'env')
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = os.path.join(HOME_PATH, 'codenode.db') # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
SEARCH_INDEX = os.path.join(HOME_PATH, 'search_index')
PLOT_IMAGES = os.path.join(HOME_PATH, 'plot_images')
INSTALLED_APPS = INSTALLED_APPS + ('django_nose', 'django_extensions')
TEST_RUNNER = 'django_nose.run_tests'
|
from flask import Flask
import subprocess as sp
import os
app = Flask(__name__)
@app.route("/on-push", methods=['POST'])
def on_push():
command = os.environ.get('PUSH_COMMAND', False)
if command:
try:
sp.Popen(command, shell=True, executable='/bin/bash')
return ''
except:
return ('An exception occurred while handling the request', 503)
return ('', 204)
if __name__ == "__main__":
app.run()
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys
import os
from xml.etree import ElementTree
from xml.etree.ElementTree import Element, SubElement
from lxml import etree
import codecs
from libs.constants import DEFAULT_ENCODING
TXT_EXT = '.txt'
ENCODE_METHOD = DEFAULT_ENCODING
class YOLOWriter:
def __init__(self, folder_name, filename, img_size, database_src='Unknown', local_img_path=None):
self.folder_name = folder_name
self.filename = filename
self.database_src = database_src
self.img_size = img_size
self.box_list = []
self.local_img_path = local_img_path
self.verified = False
def add_bnd_box(self, x_min, y_min, x_max, y_max, name, difficult):
bnd_box = {'xmin': x_min, 'ymin': y_min, 'xmax': x_max, 'ymax': y_max}
bnd_box['name'] = name
bnd_box['difficult'] = difficult
self.box_list.append(bnd_box)
def bnd_box_to_yolo_line(self, box, class_list=[]):
x_min = box['xmin']
x_max = box['xmax']
y_min = box['ymin']
y_max = box['ymax']
x_center = float((x_min + x_max)) / 2 / self.img_size[1]
y_center = float((y_min + y_max)) / 2 / self.img_size[0]
w = float((x_max - x_min)) / self.img_size[1]
h = float((y_max - y_min)) / self.img_size[0]
# PR387
box_name = box['name']
if box_name not in class_list:
class_list.append(box_name)
class_index = class_list.index(box_name)
return class_index, x_center, y_center, w, h
def save(self, class_list=[], target_file=None):
out_file = None # Update yolo .txt
out_class_file = None # Update class list .txt
if target_file is None:
out_file = open(
self.filename + TXT_EXT, 'w', encoding=ENCODE_METHOD)
classes_file = os.path.join(os.path.dirname(os.path.abspath(self.filename)), "classes.txt")
out_class_file = open(classes_file, 'w')
else:
out_file = codecs.open(target_file, 'w', encoding=ENCODE_METHOD)
classes_file = os.path.join(os.path.dirname(os.path.abspath(target_file)), "classes.txt")
out_class_file = open(classes_file, 'w')
for box in self.box_list:
class_index, x_center, y_center, w, h = self.bnd_box_to_yolo_line(box, class_list)
# print (classIndex, x_center, y_center, w, h)
out_file.write("%d %.9f %.9f %.9f %.9f\n" % (class_index, x_center, y_center, w, h))
# print (classList)
# print (out_class_file)
for c in class_list:
out_class_file.write(c+'\n')
out_class_file.close()
out_file.close()
class YoloReader:
def __init__(self, file_path, image, class_list_path=None):
# shapes type:
# [labbel, [(x1,y1), (x2,y2), (x3,y3), (x4,y4)], color, color, difficult]
self.shapes = []
self.file_path = file_path
if class_list_path is None:
dir_path = os.path.dirname(os.path.realpath(self.file_path))
self.class_list_path = os.path.join(dir_path, "classes.txt")
else:
self.class_list_path = class_list_path
# print (file_path, self.class_list_path)
classes_file = open(self.class_list_path, 'r')
self.classes = classes_file.read().strip('\n').split('\n')
# print (self.classes)
img_size = [image.height(), image.width(),
1 if image.isGrayscale() else 3]
self.img_size = img_size
self.verified = False
# try:
self.parse_yolo_format()
# except:
# pass
def get_shapes(self):
return self.shapes
def add_shape(self, label, x_min, y_min, x_max, y_max, difficult):
points = [(x_min, y_min), (x_max, y_min), (x_max, y_max), (x_min, y_max)]
self.shapes.append((label, points, None, None, difficult))
def yolo_line_to_shape(self, class_index, x_center, y_center, w, h):
label = self.classes[int(class_index)]
x_min = max(float(x_center) - float(w) / 2, 0)
x_max = min(float(x_center) + float(w) / 2, 1)
y_min = max(float(y_center) - float(h) / 2, 0)
y_max = min(float(y_center) + float(h) / 2, 1)
x_min = round(self.img_size[1] * x_min)
x_max = round(self.img_size[1] * x_max)
y_min = round(self.img_size[0] * y_min)
y_max = round(self.img_size[0] * y_max)
return label, x_min, y_min, x_max, y_max
def parse_yolo_format(self):
bnd_box_file = open(self.file_path, 'r')
for bndBox in bnd_box_file:
class_index, x_center, y_center, w, h = bndBox.strip().split(' ')
label, x_min, y_min, x_max, y_max = self.yolo_line_to_shape(class_index, x_center, y_center, w, h)
# Caveat: difficult flag is discarded when saved as yolo format.
self.add_shape(label, x_min, y_min, x_max, y_max, False)
|
import os
total_chars = 0
total_files = 0
for root, dirs, files in os.walk(os.path.abspath(""), topdown=False):
for name in files:
file_name = os.path.join(root, name)
if not (('.git' in file_name) or ('pycache' in file_name) or ('pyc' in file_name)):
try:
with open(file_name, 'r') as f:
data = f.read()
total_chars += len(data)
except:
print(f"error : {file_name}")
total_files += 1
print(f"Your app contain {total_chars} characters")
print(f"Your app contain {total_files} files")
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional tests for models.models."""
__author__ = [
'[email protected] (John Cox)',
]
import datetime
from models import config
from models import entities
from models import models
from models import services
from modules.notifications import notifications
from tests.functional import actions
from google.appengine.ext import db
# Disable complaints about docstrings for self-documenting tests.
# pylint: disable=g-missing-docstring
class EventEntityTestCase(actions.ExportTestBase):
def test_for_export_transforms_correctly(self):
event = models.EventEntity(source='source', user_id='1')
key = event.put()
exported = event.for_export(self.transform)
self.assert_blacklisted_properties_removed(event, exported)
self.assertEqual('source', event.source)
self.assertEqual('transformed_1', exported.user_id)
self.assertEqual(key, models.EventEntity.safe_key(key, self.transform))
class ContentChunkTestCase(actions.ExportTestBase):
"""Tests ContentChunkEntity|DAO|DTO."""
# Allow access to protected members under test.
# pylint: disable=protected-access
def setUp(self):
super(ContentChunkTestCase, self).setUp()
config.Registry.test_overrides[models.CAN_USE_MEMCACHE.name] = True
self.content_type = 'content_type'
self.contents = 'contents'
self.id = 1
self.memcache_key = models.ContentChunkDAO._get_memcache_key(self.id)
self.resource_id = 'resource:id' # To check colons are preserved.
self.supports_custom_tags = True
self.type_id = 'type_id'
self.uid = models.ContentChunkDAO.make_uid(
self.type_id, self.resource_id)
def tearDown(self):
config.Registry.test_overrides = {}
super(ContentChunkTestCase, self).tearDown()
def assert_fuzzy_equal(self, first, second):
"""Assert doesn't check last_modified, allowing clock skew."""
self.assertTrue(isinstance(first, models.ContentChunkDTO))
self.assertTrue(isinstance(second, models.ContentChunkDTO))
self.assertEqual(first.content_type, second.content_type)
self.assertEqual(first.contents, second.contents)
self.assertEqual(first.id, second.id)
self.assertEqual(first.resource_id, second.resource_id)
self.assertEqual(
first.supports_custom_tags, second.supports_custom_tags)
self.assertEqual(first.type_id, second.type_id)
def assert_list_fuzzy_equal(self, first, second):
self.assertEqual(len(first), len(second))
for f, s in zip(first, second):
self.assert_fuzzy_equal(f, s)
def test_dao_delete_deletes_entity_and_cached_dto(self):
key = models.ContentChunkDAO.save(models.ContentChunkDTO({
'content_type': self.content_type,
'contents': self.contents,
'id': self.id,
'resource_id': self.resource_id,
'supports_custom_tags': self.supports_custom_tags,
'type_id': self.type_id,
}))
entity = db.get(key)
dto = models.ContentChunkDAO.get(key.id())
self.assertIsNotNone(entity)
self.assertIsNotNone(dto)
models.ContentChunkDAO.delete(key.id())
entity = db.get(key)
dto = models.ContentChunkDAO.get(key.id())
self.assertIsNone(entity)
self.assertIsNone(dto)
def test_dao_delete_runs_successfully_when_no_entity_present(self):
self.assertIsNone(models.ContentChunkDAO.delete(self.id))
def test_dao_get_returns_cached_entity(self):
key = models.ContentChunkDAO.save(models.ContentChunkDTO({
'content_type': self.content_type,
'contents': self.contents,
'resource_id': self.resource_id,
'supports_custom_tags': self.supports_custom_tags,
'type_id': self.type_id,
}))
entity = db.get(key)
entity.contents = 'patched'
patched_dto = models.ContentChunkDAO._make_dto(entity)
models.MemcacheManager.set(self.memcache_key, patched_dto)
from_datastore = models.ContentChunkEntity.get_by_id(self.id)
from_cache = models.MemcacheManager.get(self.memcache_key)
self.assert_fuzzy_equal(patched_dto, from_cache)
self.assertNotEqual(patched_dto.contents, from_datastore.contents)
def test_dao_get_returns_datastore_entity_and_populates_cache(self):
self.assertIsNone(models.MemcacheManager.get(self.memcache_key))
key = models.ContentChunkDAO.save(models.ContentChunkDTO({
'content_type': self.content_type,
'contents': self.contents,
'resource_id': self.resource_id,
'supports_custom_tags': self.supports_custom_tags,
'type_id': self.type_id,
}))
expected_dto = models.ContentChunkDAO._make_dto(db.get(key))
from_datastore = models.ContentChunkEntity.get_by_id(self.id)
from_cache = models.MemcacheManager.get(self.memcache_key)
self.assert_fuzzy_equal(
expected_dto, models.ContentChunkDAO._make_dto(from_datastore))
self.assert_fuzzy_equal(expected_dto, from_cache)
def test_dao_get_returns_none_when_entity_id_none(self):
self.assertIsNone(models.ContentChunkDAO.get(None))
def test_dao_get_returns_none_when_no_entity_in_datastore(self):
self.assertIsNone(models.MemcacheManager.get(self.memcache_key))
self.assertIsNone(models.ContentChunkDAO.get(self.id))
self.assertEqual(
models.NO_OBJECT, models.MemcacheManager.get(self.memcache_key))
def test_dao_get_by_uid_returns_empty_list_if_no_matches(self):
self.assertEqual([], models.ContentChunkDAO.get_by_uid(self.uid))
def test_dao_get_by_uid_returns_matching_dtos_sorted_by_id(self):
different_uid = models.ContentChunkDAO.make_uid(
'other', self.resource_id)
first_key = models.ContentChunkEntity(
content_type=self.content_type, contents=self.contents,
supports_custom_tags=self.supports_custom_tags, uid=self.uid).put()
second_key = models.ContentChunkEntity(
content_type=self.content_type, contents=self.contents + '2',
supports_custom_tags=self.supports_custom_tags, uid=self.uid).put()
unused_different_uid_key = models.ContentChunkEntity(
content_type=self.content_type, contents=self.contents,
supports_custom_tags=self.supports_custom_tags,
uid=different_uid).put()
expected_dtos = [
models.ContentChunkDAO.get(first_key.id()),
models.ContentChunkDAO.get(second_key.id())]
actual_dtos = models.ContentChunkDAO.get_by_uid(self.uid)
self.assert_list_fuzzy_equal(expected_dtos, actual_dtos)
def test_dao_make_dto(self):
key = models.ContentChunkEntity(
content_type=self.content_type, contents=self.contents,
supports_custom_tags=self.supports_custom_tags, uid=self.uid).put()
entity = db.get(key) # Refetch to avoid timestamp skew.
dto = models.ContentChunkDAO._make_dto(entity)
self.assertEqual(entity.content_type, dto.content_type)
self.assertEqual(entity.contents, dto.contents)
self.assertEqual(entity.key().id(), dto.id)
self.assertEqual(entity.last_modified, dto.last_modified)
self.assertEqual(entity.supports_custom_tags, dto.supports_custom_tags)
entity_type_id, entity_resource_id = models.ContentChunkDAO._split_uid(
entity.uid)
self.assertEqual(entity_resource_id, dto.resource_id)
self.assertEqual(entity_type_id, dto.type_id)
def test_dao_make_uid(self):
self.assertEqual(None, models.ContentChunkDAO.make_uid(None, None))
self.assertEqual(
'foo:bar', models.ContentChunkDAO.make_uid('foo', 'bar'))
def test_dao_make_uid_requires_both_args_disallows_colons_in_type_id(self):
bad_pairs = [
(None, 'foo'),
('foo', None),
(':', None),
(':', 'foo'),
('', ''),
('', 'foo'),
('foo', ''),
(':', ''),
(':', 'foo'),
]
for bad_pair in bad_pairs:
with self.assertRaises(AssertionError):
models.ContentChunkDAO.make_uid(*bad_pair)
def test_dao_split_uid(self):
self.assertEqual(
(None, None), models.ContentChunkDAO._split_uid(None))
self.assertEqual(
('foo', 'bar'), models.ContentChunkDAO._split_uid('foo:bar'))
self.assertEqual(
('foo', 'http://bar'),
models.ContentChunkDAO._split_uid('foo:http://bar'))
def test_dao_split_uid_requires_colon_and_both_values_are_truthy(self):
with self.assertRaises(AssertionError):
models.ContentChunkDAO._split_uid('foo')
with self.assertRaises(AssertionError):
models.ContentChunkDAO._split_uid(':')
with self.assertRaises(AssertionError):
models.ContentChunkDAO._split_uid('foo:')
with self.assertRaises(AssertionError):
models.ContentChunkDAO._split_uid(':foo')
def test_dao_save_creates_new_object_and_populates_cache(self):
self.assertIsNone(models.MemcacheManager.get(self.memcache_key))
key = models.ContentChunkDAO.save(models.ContentChunkDTO({
'content_type': self.content_type,
'contents': self.contents,
'id': self.id,
'resource_id': self.resource_id,
'supports_custom_tags': self.supports_custom_tags,
'type_id': self.type_id,
}))
expected_dto = models.ContentChunkDAO._make_dto(db.get(key))
self.assert_fuzzy_equal(
expected_dto, models.MemcacheManager.get(self.memcache_key))
def test_dao_save_updates_existing_object_and_populates_cache(self):
key = models.ContentChunkDAO.save(models.ContentChunkDTO({
'content_type': self.content_type,
'contents': self.contents,
'id': self.id,
'resource_id': self.resource_id,
'supports_custom_tags': self.supports_custom_tags,
'type_id': self.type_id,
}))
original_dto = models.ContentChunkDAO._make_dto(db.get(key))
self.assert_fuzzy_equal(
original_dto, models.MemcacheManager.get(self.memcache_key))
original_dto.content_type = 'new_content_type'
original_dto.contents = 'new_contents'
original_dto.supports_custom_tags = True
original_dto.uid = 'new_system_id:new_resource:id'
models.ContentChunkDAO.save(original_dto)
expected_dto = models.ContentChunkDAO._make_dto(db.get(key))
self.assert_fuzzy_equal(
expected_dto, models.MemcacheManager.get(self.memcache_key))
class PersonalProfileTestCase(actions.ExportTestBase):
def test_for_export_transforms_correctly_and_sets_safe_key(self):
date_of_birth = datetime.date.today()
email = '[email protected]'
legal_name = 'legal_name'
nick_name = 'nick_name'
user_id = '1'
profile = models.PersonalProfile(
date_of_birth=date_of_birth, email=email, key_name=user_id,
legal_name=legal_name, nick_name=nick_name)
profile.put()
exported = profile.for_export(self.transform)
self.assert_blacklisted_properties_removed(profile, exported)
self.assertEqual(
self.transform(user_id), exported.safe_key.name())
class MemcacheManagerTestCase(actions.TestBase):
def setUp(self):
super(MemcacheManagerTestCase, self).setUp()
config.Registry.test_overrides = {models.CAN_USE_MEMCACHE.name: True}
def tearDown(self):
config.Registry.test_overrides = {}
super(MemcacheManagerTestCase, self).tearDown()
def test_set_multi(self):
data = {'a': 'A', 'b': 'B'}
models.MemcacheManager.set_multi(data)
self.assertEquals('A', models.MemcacheManager.get('a'))
self.assertEquals('B', models.MemcacheManager.get('b'))
def test_get_multi(self):
models.MemcacheManager.set('a', 'A')
models.MemcacheManager.set('b', 'B')
data = models.MemcacheManager.get_multi(['a', 'b', 'c'])
self.assertEquals(2, len(data.keys()))
self.assertEquals('A', data['a'])
self.assertEquals('B', data['b'])
def test_set_multi_no_memcache(self):
config.Registry.test_overrides = {}
data = {'a': 'A', 'b': 'B'}
models.MemcacheManager.set_multi(data)
self.assertEquals(None, models.MemcacheManager.get('a'))
self.assertEquals(None, models.MemcacheManager.get('b'))
def test_get_multi_no_memcache(self):
config.Registry.test_overrides = {}
models.MemcacheManager.set('a', 'A')
models.MemcacheManager.set('b', 'B')
data = models.MemcacheManager.get_multi(['a', 'b', 'c'])
self.assertEquals(0, len(data.keys()))
class TestEntity(entities.BaseEntity):
data = db.TextProperty(indexed=False)
class TestDto(object):
def __init__(self, the_id, the_dict):
self.id = the_id
self.dict = the_dict
class TestDao(models.BaseJsonDao):
DTO = TestDto
ENTITY = TestEntity
ENTITY_KEY_TYPE = models.BaseJsonDao.EntityKeyTypeName
class BaseJsonDaoTestCase(actions.TestBase):
def setUp(self):
super(BaseJsonDaoTestCase, self).setUp()
config.Registry.test_overrides = {models.CAN_USE_MEMCACHE.name: True}
def tearDown(self):
config.Registry.test_overrides = {}
super(BaseJsonDaoTestCase, self).tearDown()
def test_bulk_load(self):
key_0 = 'dto_0'
key_1 = 'dto_1'
mc_key_0 = '(entity:TestEntity:dto_0)'
mc_key_1 = '(entity:TestEntity:dto_1)'
dto = TestDto(key_0, {'a': 0})
TestDao.save(dto)
dto = TestDto(key_1, {'a': 1})
TestDao.save(dto)
def assert_bulk_load_succeeds():
dtos = TestDao.bulk_load([key_0, key_1, 'dto_2'])
self.assertEquals(3, len(dtos))
self.assertEquals(key_0, dtos[0].id)
self.assertEquals({'a': 0}, dtos[0].dict)
self.assertEquals(key_1, dtos[1].id)
self.assertEquals({'a': 1}, dtos[1].dict)
self.assertIsNone(dtos[2])
# Confirm entities in memcache
memcache_entities = models.MemcacheManager.get_multi(
[mc_key_0, mc_key_1])
self.assertEquals(2, len(memcache_entities))
self.assertIn(mc_key_0, memcache_entities)
self.assertIn(mc_key_1, memcache_entities)
assert_bulk_load_succeeds()
# Evict one from memcache
models.MemcacheManager.delete(mc_key_0)
memcache_entities = models.MemcacheManager.get_multi(
[mc_key_0, mc_key_1])
self.assertEquals(1, len(memcache_entities))
self.assertIn(mc_key_1, memcache_entities)
assert_bulk_load_succeeds()
# Evict both from memcache
models.MemcacheManager.delete(mc_key_0)
models.MemcacheManager.delete(mc_key_1)
memcache_entities = models.MemcacheManager.get_multi(
[mc_key_0, mc_key_1])
self.assertEquals(0, len(memcache_entities))
assert_bulk_load_succeeds()
class QuestionDAOTestCase(actions.TestBase):
"""Functional tests for QuestionDAO."""
# Name determined by parent. pylint: disable=g-bad-name
def setUp(self):
"""Sets up datastore contents."""
super(QuestionDAOTestCase, self).setUp()
self.used_twice_question_dto = models.QuestionDTO(None, {})
self.used_twice_question_id = models.QuestionDAO.save(
self.used_twice_question_dto)
self.used_once_question_dto = models.QuestionDTO(None, {})
self.used_once_question_id = models.QuestionDAO.save(
self.used_once_question_dto)
self.unused_question_dto = models.QuestionDTO(None, {})
self.unused_question_id = models.QuestionDAO.save(
self.unused_question_dto)
# Handcoding the dicts. This is dangerous because they're handcoded
# elsewhere, the implementations could fall out of sync, and these tests
# may then pass erroneously.
self.first_question_group_description = 'first_question_group'
self.first_question_group_dto = models.QuestionGroupDTO(
None,
{'description': self.first_question_group_description,
'items': [{'question': str(self.used_once_question_id)}]})
self.first_question_group_id = models.QuestionGroupDAO.save(
self.first_question_group_dto)
self.second_question_group_description = 'second_question_group'
self.second_question_group_dto = models.QuestionGroupDTO(
None,
{'description': self.second_question_group_description,
'items': [{'question': str(self.used_twice_question_id)}]})
self.second_question_group_id = models.QuestionGroupDAO.save(
self.second_question_group_dto)
self.third_question_group_description = 'third_question_group'
self.third_question_group_dto = models.QuestionGroupDTO(
None,
{'description': self.third_question_group_description,
'items': [{'question': str(self.used_twice_question_id)}]})
self.third_question_group_id = models.QuestionGroupDAO.save(
self.third_question_group_dto)
def test_used_by_returns_single_question_group(self):
self.assertEqual(
long(self.first_question_group_id),
models.QuestionDAO.used_by(self.used_once_question_id)[0].id)
def test_used_by_returns_multiple_question_groups(self):
used_by = models.QuestionDAO.used_by(self.used_twice_question_id)
self.assertEqual(long(self.second_question_group_id), used_by[0].id)
self.assertEqual(long(self.third_question_group_id), used_by[1].id)
def test_used_by_returns_empty_list_for_unused_question(self):
not_found_id = 7
self.assertFalse(models.QuestionDAO.load(not_found_id))
self.assertEqual([], models.QuestionDAO.used_by(not_found_id))
class StudentTestCase(actions.ExportTestBase):
def test_for_export_transforms_correctly(self):
user_id = '1'
student = models.Student(key_name='name', user_id='1', is_enrolled=True)
key = student.put()
exported = student.for_export(self.transform)
self.assert_blacklisted_properties_removed(student, exported)
self.assertTrue(exported.is_enrolled)
self.assertEqual('transformed_1', exported.user_id)
self.assertEqual(
'transformed_' + user_id, exported.key_by_user_id.name())
self.assertEqual(
models.Student.safe_key(key, self.transform), exported.safe_key)
def test_get_key_does_not_transform_by_default(self):
user_id = 'user_id'
student = models.Student(key_name='name', user_id=user_id)
student.put()
self.assertEqual(user_id, student.get_key().name())
def test_safe_key_transforms_name(self):
key = models.Student(key_name='name').put()
self.assertEqual(
'transformed_name',
models.Student.safe_key(key, self.transform).name())
class StudentProfileDAOTestCase(actions.ExportTestBase):
# Allow tests of protected state. pylint: disable=protected-access
def test_can_send_welcome_notifications_false_if_config_value_false(self):
self.swap(services.notifications, 'enabled', lambda: True)
self.swap(services.unsubscribe, 'enabled', lambda: True)
handler = actions.MockHandler(
app_context=actions.MockAppContext(environ={
'course': {'send_welcome_notifications': False}
}))
self.assertFalse(
models.StudentProfileDAO._can_send_welcome_notifications(handler))
def test_can_send_welcome_notifications_false_notifications_disabled(self):
self.swap(services.notifications, 'enabled', lambda: False)
self.swap(services.unsubscribe, 'enabled', lambda: True)
handler = actions.MockHandler(
app_context=actions.MockAppContext(environ={
'course': {'send_welcome_notifications': True}
}))
self.assertFalse(
models.StudentProfileDAO._can_send_welcome_notifications(handler))
def test_can_send_welcome_notifications_false_unsubscribe_disabled(self):
self.swap(services.notifications, 'enabled', lambda: True)
self.swap(services.unsubscribe, 'enabled', lambda: False)
handler = actions.MockHandler(
app_context=actions.MockAppContext(environ={
'course': {'send_welcome_notifications': True}
}))
self.assertFalse(
models.StudentProfileDAO._can_send_welcome_notifications(handler))
def test_can_send_welcome_notifications_true_if_all_true(self):
self.swap(services.notifications, 'enabled', lambda: True)
self.swap(services.unsubscribe, 'enabled', lambda: True)
handler = actions.MockHandler(
app_context=actions.MockAppContext(environ={
'course': {'send_welcome_notifications': True}
}))
self.assertTrue(
models.StudentProfileDAO._can_send_welcome_notifications(handler))
def test_get_send_welcome_notifications(self):
handler = actions.MockHandler(app_context=actions.MockAppContext())
self.assertFalse(
models.StudentProfileDAO._get_send_welcome_notifications(handler))
handler = actions.MockHandler(
app_context=actions.MockAppContext(environ={
'course': {}
}))
self.assertFalse(
models.StudentProfileDAO._get_send_welcome_notifications(handler))
handler = actions.MockHandler(
app_context=actions.MockAppContext(environ={
'course': {'send_welcome_notifications': False}
}))
self.assertFalse(
models.StudentProfileDAO._get_send_welcome_notifications(handler))
handler = actions.MockHandler(
app_context=actions.MockAppContext(environ={
'course': {'send_welcome_notifications': True}
}))
self.assertTrue(
models.StudentProfileDAO._get_send_welcome_notifications(handler))
def test_send_welcome_notification_enqueues_and_sends(self):
nick_name = 'No Body'
email = '[email protected]'
sender = '[email protected]'
title = 'title'
student = models.Student(key_name=email, name=nick_name)
self.swap(services.notifications, 'enabled', lambda: True)
self.swap(services.unsubscribe, 'enabled', lambda: True)
handler = actions.MockHandler(
app_context=actions.MockAppContext(environ={
'course': {
'send_welcome_notifications': True,
'title': title,
'welcome_notifications_sender': sender,
},
}))
models.StudentProfileDAO._send_welcome_notification(handler, student)
self.execute_all_deferred_tasks()
notification = notifications.Notification.all().get()
payload = notifications.Payload.all().get()
audit_trail = notification.audit_trail
self.assertEqual(title, audit_trail['course_title'])
self.assertEqual(
'http://mycourse.appspot.com/slug/',
audit_trail['course_url'])
self.assertTrue(audit_trail['unsubscribe_url'].startswith(
'http://mycourse.appspot.com/slug/modules/unsubscribe'))
self.assertTrue(notification._done_date)
self.assertEqual(email, notification.to)
self.assertEqual(sender, notification.sender)
self.assertEqual('Welcome to ' + title, notification.subject)
self.assertTrue(payload)
class StudentAnswersEntityTestCase(actions.ExportTestBase):
def test_safe_key_transforms_name(self):
student_key = models.Student(key_name='name').put()
answers = models.StudentAnswersEntity(key_name=student_key.name())
answers_key = answers.put()
self.assertEqual(
'transformed_name',
models.StudentAnswersEntity.safe_key(
answers_key, self.transform).name())
class StudentPropertyEntityTestCase(actions.ExportTestBase):
def test_safe_key_transforms_user_id_component(self):
user_id = 'user_id'
student = models.Student(key_name='[email protected]', user_id=user_id)
student.put()
property_name = 'property-name'
student_property_key = models.StudentPropertyEntity.create(
student, property_name).put()
self.assertEqual(
'transformed_%s-%s' % (user_id, property_name),
models.StudentPropertyEntity.safe_key(
student_property_key, self.transform).name())
|
#!/usr/bin/env python
# encoding: utf-8
"""
Drive the Robot
"""
import sched, time
import signal, sys
import time
import requests
from urllib.request import urlopen
from enum import Enum
import constants
from arduino import Arduino
from navigation import Navigation
class State(Enum):
COLLECT_spin_and_search_cone = 1
COLLECT_wander_and_search_cone = 2
COLLECT_approach_cone = 3
COLLECT_acquire_cone = 4
COLLECT_open_claw = 5
DELIVER_spin_and_search_target = 6
DELIVER_wander_and_search_target = 7
DELIVER_approach_target = 8
DELIVER_verify_target = 9
DELIVER_release_cone = 10
class Driver():
# Wait .05 sec between loops
# This waits after the loop is run. This will not subtract the time it took to run loop() from the total wait time.
def __init__(self, webserver_queue=None, looprate=0.2):
self.arduino = Arduino()
self.navigation = Navigation(self.arduino)
self.state = State.COLLECT_spin_and_search_cone
# Define constants
self.looprate = looprate
# Variables updated from webserver
self.webserver_queue = webserver_queue
self.mode = "auto"
self.manual_direction = "stop"
# Variables used by states
self.start_time = None
self.ready_to_deliver = False
def start(self):
self.stop = False
s = sched.scheduler(time.time, self.arduino.board.sleep)
s.enter(0, 1, self.loop, (s,))
s.run()
def stop_on_next_loop(self):
self.stop = True
def loop(self, sc):
# Read webserver queue for new messages
while((self.webserver_queue != None) and (len(self.webserver_queue) > 0)):
self.process_message(self.webserver_queue.popleft())
# If stopped, just loop
if (self.stop):
sc.enter(self.looprate, 1, self.loop, (sc,))
return
if (self.mode == "auto"):
# ---- Collect Spin and Search Cone ----
if (self.state == State.COLLECT_spin_and_search_cone):
if (self.start_time == None):
self.start_time = time.time()
if (time.time() - self.start_time >= constants.SPIN_TIME):
self.start_time = None
self.navigation.stop()
self.change_state(State.COLLECT_wander_and_search_cone)
# TODO: Use signatures with pixy
else:
status = self.navigation.spin_and_search_cone()
if (status == "CONE_FOUND"):
self.start_time = None
self.change_state(State.COLLECT_approach_cone)
# ---- Collect Wander and Search ----
elif (self.state == State.COLLECT_wander_and_search_cone):
if (self.start_time == None):
self.start_time = time.time()
if (time.time() - self.start_time >= constants.WANDER_TIME):
self.start_time = None
self.navigation.stop()
self.change_state(State.COLLECT_spin_and_search_cone)
# TODO: Use signatures with pixy
else:
status = self.navigation.wander_and_search_cone()
if (status == "CONE_FOUND"):
self.start_time = None
self.change_state(State.COLLECT_approach_cone)
# ---- Collect Approach Cone ----
elif (self.state == State.COLLECT_approach_cone):
status = self.navigation.approach_cone()
if (status == "LOST_CONE"):
self.change_state(State.COLLECT_wander_and_search_cone)
elif (status == "CONE_IN_RANGE"):
self.change_state(State.COLLECT_acquire_cone)
# ---- Collect Acquire Cone ----
elif (self.state == State.COLLECT_acquire_cone):
self.arduino.close_claw()
ping = self.arduino.get_ping()
if (ping <= constants.PING_CONE_THRESHOLD and ping != 0):
if (self.ready_to_deliver == True):
self.change_state(State.DELIVER_spin_and_search_target)
self.ready_to_deliver = False
else:
print("Waiting for inter-bot command to deliver...")
else:
self.change_state(State.COLLECT_open_claw)
# ---- Collect Open Claw ----
elif (self.state == State.COLLECT_open_claw):
self.arduino.open_claw()
self.change_state(State.COLLECT_approach_cone)
# ---- Deliver Spin and Search Target ----
elif (self.state == State.DELIVER_spin_and_search_target):
if (self.start_time == None):
self.start_time = time.time()
if (time.time() - self.start_time >= constants.SPIN_TIME):
self.start_time = None
self.navigation.stop()
self.change_state(State.DELIVER_wander_and_search_target)
# TODO: Use signatures with pixy
else:
status = self.navigation.spin_and_search_target()
if (status == "TARGET_FOUND"):
self.start_time = None
self.change_state(State.DELIVER_approach_target)
# ---- Deliver Wander and Search Target ----
elif (self.state == State.DELIVER_wander_and_search_target):
if (self.start_time == None):
self.start_time = time.time()
if (time.time() - self.start_time >= constants.WANDER_TIME):
self.start_time = None
self.navigation.stop()
self.change_state(State.DELIVER_spin_and_search_target)
# TODO: Use signatures with pixy
else:
status = self.navigation.wander_and_search_target()
if (status == "TARGET_FOUND"):
self.start_time = None
self.change_state(State.DELIVER_approach_target)
# ---- Deliver Approach Target ----
elif (self.state == State.DELIVER_approach_target):
status = self.navigation.approach_target()
if (status == "LOST_TARGET"):
self.change_state(State.DELIVER_wander_and_search_target)
elif (status == "TARGET_IN_RANGE"):
self.change_state(State.DELIVER_verify_target)
# ---- Deliver Verify Target ----
elif (self.state == State.DELIVER_verify_target):
self.change_state(State.DELIVER_release_cone)
# ---- Deliver Release Cone ----
elif (self.state == State.DELIVER_release_cone):
self.arduino.open_claw()
self.arduino.board.sleep(1)
self.navigation.reverse()
self.arduino.board.sleep(2)
self.navigation.spin_clockwise()
self.arduino.board.sleep(3)
print("Starting over...")
self.change_state(State.COLLECT_spin_and_search_cone)
try:
endpoint = "http://" + constants.GROUP10_IP + constants.GROUP10_ENDPOINT_READY
print("Hitting endpoint: " + endpoint)
# requests.get(endpoint, timeout=0.001)
urlopen(endpoint)
except:
print("Failed to hit Group10 Endpoint, trying again...")
print("Just kidding, I'm giving up!")
elif (self.mode == "manual"):
# print("In manual mode")
if (self.manual_direction == "stop"):
(left_motor, right_motor) = (0.0, 0.0)
elif (self.manual_direction == "forward"):
(left_motor, right_motor) = (0.2, 0.2)
elif (self.manual_direction == "backward"):
(left_motor, right_motor) = (-0.2, -0.2)
elif (self.manual_direction == "right"):
(left_motor, right_motor) = (0.2, 0.0)
elif (self.manual_direction == "left"):
(left_motor, right_motor) = (0.0, 0.2)
print("L: " + str(left_motor) + " R: " + str(right_motor))
self.arduino.set_motors(left_motor, right_motor)
elif (self.mode == "kill"):
self.shutdown()
sys.exit(0)
# Loop again after delay
sc.enter(self.looprate, 1, self.loop, (sc,))
def change_state(self, new_state):
print("[State Change] " + str(self.state) + " -> " + str(new_state))
self.state = new_state
def process_message(self, message):
if (message == "stop"):
self.stop_on_next_loop()
elif (message == "start"):
self.start()
elif (message == "print"):
print("Print!")
# Modes
elif (message == "auto"):
self.mode = "auto"
elif (message == "manual"):
self.mode = "manual"
elif (message == "kill"):
self.mode = "kill"
# Inter-Bot commands
elif (message == "bot_ready_to_deliver"):
print("Received inter-bot communication: ready_to_deliver")
self.ready_to_deliver = True
# Manual Directions
elif (message == "manual_forward"):
self.manual_direction = "forward"
elif (message == "manual_backward"):
self.manual_direction = "backward"
elif (message == "manual_right"):
self.manual_direction = "right"
elif (message == "manual_left"):
self.manual_direction = "left"
elif (message == "manual_stop"):
self.manual_direction = "stop"
elif (message == "manual_claw_open"):
self.arduino.open_claw()
elif (message == "manual_claw_close"):
self.arduino.close_claw()
def shutdown(self, signal=None, frame=None):
self.arduino.shutdown()
if __name__ == '__main__':
driver = Driver()
# Graceful shutdown on ctrl-c
signal.signal(signal.SIGINT, driver.shutdown)
driver.start()
|
import torch
def onehot(y, num_classes):
y_onehot = torch.zeros(y.size(0), num_classes).to(y.device)
if len(y.size()) == 1:
y_onehot = y_onehot.scatter_(1, y.unsqueeze(-1), 1)
elif len(y.size()) == 2:
y_onehot = y_onehot.scatter_(1, y, 1)
else:
raise ValueError("[onehot]: y should be in shape [B], or [B, C]")
return y_onehot
def sum(tensor, dim=None, keepdim=False):
if dim is None:
# sum up all dim
return torch.sum(tensor)
else:
if isinstance(dim, int):
dim = [dim]
dim = sorted(dim)
for d in dim:
tensor = tensor.sum(dim=d, keepdim=True)
if not keepdim:
for i, d in enumerate(dim):
tensor.squeeze_(d-i)
return tensor
def mean(tensor, dim=None, keepdim=False):
if dim is None:
# mean all dim
return torch.mean(tensor)
else:
if isinstance(dim, int):
dim = [dim]
dim = sorted(dim)
for d in dim:
tensor = tensor.mean(dim=d, keepdim=True)
if not keepdim:
for i, d in enumerate(dim):
tensor.squeeze_(d-i)
return tensor
def split_feature(tensor, type="split"):
"""
type = ["split", "cross"]
"""
C = tensor.size(1)
if type == "split":
return tensor[:, :C // 2, ...], tensor[:, C // 2:, ...]
elif type == "cross3":
return tensor[:, 0::3, ...], tensor[:, 1::3, ...], tensor[:, 2::3, ...]
elif type == "cross":
return tensor[:, 0::2, ...], tensor[:, 1::2, ...]
def cat_feature(tensor_a, tensor_b):
return torch.cat((tensor_a, tensor_b), dim=1)
def timesteps(tensor):
return int(tensor.size(2))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('donations', '0003_goal'),
]
operations = [
migrations.AddField(
model_name='goal',
name='source_type',
field=models.CharField(default=b'', help_text=b'Limit donations to a specific type of donation for this goal.', max_length=100, choices=[(b'', b'All types'), (b'extralife', b'Extra Life'), (b'fanfunding', b'Fan Funding'), (b'streamtip', b'Stream Tip'), (b'streamjar', b'Stream Jar'), (b'imraising', b'Imraising')]),
),
]
|
# from .resnet import (
# ResNet,
# ResNet18,
# ResNet34,
# ResNet50,
# ResNet101,
# ResNet152,
# ResNet200,
# )
# __all__ = [
# "ResNet",
# "ResNet18",
# "ResNet34",
# "ResNet50",
# "ResNet101",
# "ResNet152",
# "ResNet200",
# ]
|
#!/usr/bin/env python
#@# vim: set filetype=python:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "./lib"))
import similar_image_search as SimilarImageSearch
# NOTE: This is a test query using a '4.png' in 'database_example.json'
test_query = {
"ahash": "82b00fef8410041f8006f43f8000800fc000e023fc640003fc001001d841c000e80fe000001ff00f043dec00ec1fce00f8009e00f0001f00dbc01fc13bf00ffff7be8fffceff7fffb3ff27f777ff9fffefffbfffeffffffdcfdffffdf7ff3cfcdff41ff98ffb2fe307fc7f8de0619ff1f00c4383f1fff00bc0fffe3d81f9ff1d",
"dhash": "1d266c4c2b346d382b6dac702eb509de030d80c731a8636b20e1f1b33297d3cc563bc17e527ca1fc997bc97a19fb9c8bf2a7b825c31bfa08320d751b66253e3bae70bac139fcc7ac67e84f06cd3c3e329d9434409ccd29991e3624f12f9a79e1a7c5770332064e079f12581983c330c34379cf3b2330e2ba03b390710fb39a39",
"phash": "df4be3d8783f77902bb3f7d4a4ec5f93a7c1fbb152c921f18d598ff18c3044e3a176fe2bf2849835769e066c7e2b2744827c03d3a284d1d71ba04c4e59b7403f5cc99b3fc189a60c91bf24e0d3689de06a50ff3b920ef91fc87728bc94ea0055c7021ecb914b4613898ed3229ca932e6dd294b63d2309382b90b75e9e9627cba",
"whash": "82900fef8410001f8006e43f8000800fc0006023dc600003fc001001d8004000a80fe000001ff006041dec00cc1fcc00f8009e00f0001f009bc01fc133f00fff53be0eff8eff7fffb3ff27f777ff9fffefffbffccfffbffdc7dffffcd7fd1cf81ff01ff90ffb2fe107fc0f8de0619ff1f00c0301f1fdf00bc0fffe1980f9ff1c"
}
with SimilarImageSearch.Client() as client:
print("Testing a reload API")
if client.reload():
print("... OK!!")
else:
print("... NG!!")
print("Testing a search API")
results = client.search(
hash=test_query[ "phash" ],
type=SimilarImageSearch.SEARCH_TYPE_PHASH,
max_matches=3
)
for result in results:
print("ID: {} | distance: {}".format(result.id, result.distance))
|
import os
from pathlib import Path
import gensim.downloader as api
from gensim.models import Word2Vec
import spacy
from yasmin.constants import SPACY_MODEL_NAME
from yasmin.core import WSD
from yasmin.helpers import custom_tokenizer, hash_types, make_type_matrix
model_path = str(Path(__file__).parents[1] /
'tests' / 'fixtures' / 'data' / 'text8.model')
if os.path.isfile(model_path):
model = Word2Vec.load(model_path)
else:
dataset = api.load("text8")
model = Word2Vec(dataset)
model_types = {
'furniture': ['sofa', 'desk', 'chair', 'stool', 'bed', 'table', 'cabinet'],
'data': ['figure', 'diagram', 'chart', 'illustration', 'image', 'table']
}
nlp = spacy.load(SPACY_MODEL_NAME, create_make_doc=custom_tokenizer)
type_matrix = make_type_matrix(
model_types=model_types, model=model
)
type_cache = {hash_types(model_types): type_matrix}
wsd = WSD(nlp, model, model_types, type_cache)
sentences = {
'I sit at the table': 'furniture',
'I ate breakfast on the kitchen table in the morning': 'furniture',
'The table above shows the results of our study': 'data',
'The wrote the results in the table on page 4': 'data',
}
for sent, type_ in sentences.items():
predicted_type = wsd.disambiguate(
sent=sent, word='table', types=list(model_types.keys())
)
print(f'{sent}\n'
f'Real type: {type_}\n'
f'Predicted type: {predicted_type[0]["type"]}\n'
f'Prediction probability: {predicted_type[0]["prob"]}\n'
f'--------------------')
|
from visions.typesets.complete_set import CompleteSet
from visions.typesets.geometry_set import GeometrySet
from visions.typesets.standard_set import StandardSet
from visions.typesets.typeset import VisionsTypeset
__all__ = ["VisionsTypeset", "CompleteSet", "StandardSet", "GeometrySet"]
|
from django.apps import AppConfig
class GestionpedidosConfig(AppConfig):
name = 'gestionpedidos'
|
import pytest
import elaspic2_rest_api
@pytest.mark.parametrize("attribute", ["__version__"])
def test_attribute(attribute):
assert getattr(elaspic2_rest_api, attribute)
def test_main():
import elaspic2_rest_api
assert elaspic2_rest_api
|
# -*- coding: UTF-8 -*-
import requests
from bs4 import BeautifulSoup
from collections import defaultdict
occurrences = defaultdict(int)
__author__ = 'Webdeziner.se'
searchUrl = raw_input('Which url to scan: ');
def make_soup(url):
response = requests.get(url)
return BeautifulSoup(response.content, 'lxml')
def search(url):
soup = make_soup(url)
tags = soup.find_all()
for tag in tags:
occurrences[tag.name] += 1
print len(tags)
search(searchUrl)
### print occurrences
for tag_count in occurrences:
print tag_count, occurrences[tag_count]
|
""" Get information related to instrument such as performance """
# Copyright (c) 2018-present, Taatu Ltd.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import os
import datetime
from datetime import timedelta
import pymysql.cursors
PDIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.abspath(PDIR))
from settings import SmartAlphaPath, debug, get_portf_suffix
SETT = SmartAlphaPath()
sys.path.append(os.path.abspath(SETT.get_path_pwd()))
from sa_access import sa_db_access
ACCESS_OBJ = sa_db_access()
DB_USR = ACCESS_OBJ.username()
DB_PWD = ACCESS_OBJ.password()
DB_NAME = ACCESS_OBJ.db_name()
DB_SRV = ACCESS_OBJ.db_server()
def get_pct_from_date(date_from, sql_select, last_price):
"""
Get percentage performance from specified date to current date.
Args:
String: date in string format YYYYMMDD
String: SQL query to select the appropriate column
Double: Last price
Returns:
Double: Percentage performance
"""
pct = 0
initial_price = 0
connection = pymysql.connect(host=DB_SRV,
user=DB_USR,
password=DB_PWD,
db=DB_NAME,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor(pymysql.cursors.SSCursor)
sql = sql_select + "AND date <= '"+ str(date_from) +"' ORDER BY date DESC LIMIT 1"
debug(sql)
cursor.execute(sql)
res = cursor.fetchall()
for row in res:
initial_price = row[0]
cursor.close()
connection.close()
debug('pp: ' + str(initial_price) + ' date='+ str(date_from))
if initial_price != 0:
pct = ((last_price - initial_price) / initial_price)
debug(str(pct) + ' = '+ '('+ str(last_price) +' - '+\
str(initial_price) +') / '+ str(initial_price))
return pct
def get_prev_session_date(symbol):
"""
Get the last date of the last trading session
Args:
String: Instrument symbol
Returns:
Datetime: last trading session date
"""
ret = datetime.datetime(2000, 1, 1, 1, 1)
connection = pymysql.connect(host=DB_SRV,
user=DB_USR,
password=DB_PWD,
db=DB_NAME,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
cursor = connection.cursor(pymysql.cursors.SSCursor)
sql = "SELECT date from price_instruments_data WHERE symbol = '"+\
str(symbol) +"' ORDER BY date DESC LIMIT 2"
cursor.execute(sql)
res = cursor.fetchall()
i = 1
for row in res:
if i == 2:
ret = row[0]
i += 1
cursor.close()
connection.close()
return ret
class InstrumentSummaryData:
"""
Provide information related to trading instrument
Args:
String: Instrument symbol
Int: id of the symbol
"""
symbol_selection = ""
uid = ""
sql_select = ""
sql_select_signal = ""
last_date = datetime.datetime(2000, 1, 1, 1, 1)
d_1yp = datetime.datetime(2000, 1, 1, 1, 1)
d_6Mp = datetime.datetime(2000, 1, 1, 1, 1)
d_3Mp = datetime.datetime(2000, 1, 1, 1, 1)
d_1Mp = datetime.datetime(2000, 1, 1, 1, 1)
d_1Wp = datetime.datetime(2000, 1, 1, 1, 1)
d_1Dp = datetime.datetime(2000, 1, 1, 1, 1)
d_1Wf = datetime.datetime(2000, 1, 1, 1, 1)
last_price = 0
lp_signal = 0
def __init__(self, symbol, uid, connection):
""" Select and initialize instrument data according to args """
self.symbol_selection = symbol
cursor = connection.cursor(pymysql.cursors.SSCursor)
sql = "SELECT symbol from symbol_list WHERE uid=" + str(uid)
cursor.execute(sql)
res = cursor.fetchall()
for row in res:
symbol_is_portf = row[0]
if symbol_is_portf.find(get_portf_suffix()) > -1:
self.sql_select = "SELECT price_close, date FROM chart_data "+\
"WHERE symbol='"+ self.symbol_selection +"' "
else:
self.sql_select = "SELECT price_close, date "+\
"FROM price_instruments_data WHERE symbol='"+ self.symbol_selection + "' "
self.sql_select_signal = "SELECT signal_price, date FROM chart_data "+\
"WHERE symbol='"+ self.symbol_selection +"' AND forecast = 0 "
sql = self.sql_select_signal+" ORDER BY Date DESC LIMIT 1"
debug(sql)
cursor.execute(sql)
res = cursor.fetchall()
for row in res:
self.lp_signal = row[0]
sql = self.sql_select+" ORDER BY Date DESC LIMIT 1"
cursor.execute(sql)
res = cursor.fetchall()
for row in res:
self.last_price = row[0]
self.last_date = row[1]
cursor.close()
self.uid = uid
self.d_1_year_perf = self.last_date - (timedelta(days=365))
self.d_6_month_perf = self.last_date - (timedelta(days=180))
self.d_3_month_perf = self.last_date - (timedelta(days=90))
self.d_1_month_perf = self.last_date - (timedelta(days=30))
self.d_1_week_perf = self.last_date - (timedelta(days=7))
self.d_1_day_perf = get_prev_session_date(self.symbol_selection)
self.d_1_week_forcast = 0
def get_last_price(self):
""" get last trading session price """
return self.last_price
def get_ticker(self):
""" get instrument symbol """
return self.symbol_selection
def get_pct_1_year_performance(self):
""" get instrument 1-year performance """
str_date = self.d_1_year_perf.strftime("%Y%m%d")
return str(get_pct_from_date(str_date, self.sql_select, self.last_price))
def get_pct_6_month_performance(self):
""" get instrument 6-month performance """
str_date = self.d_6_month_perf.strftime("%Y%m%d")
return str(get_pct_from_date(str_date, self.sql_select, self.last_price))
def get_pct_3_month_performance(self):
""" get instrument 3-month performance """
str_date = self.d_3_month_perf.strftime("%Y%m%d")
return str(get_pct_from_date(str_date, self.sql_select, self.last_price))
def get_pct_1_month_performance(self):
""" get instrument 1-month performance """
str_date = self.d_1_month_perf.strftime("%Y%m%d")
return str(get_pct_from_date(str_date, self.sql_select, self.last_price))
def get_pct_1_week_performance(self):
""" get instrument 1-week performance """
str_date = self.d_1_week_perf.strftime("%Y%m%d")
return str(get_pct_from_date(str_date, self.sql_select, self.last_price))
def get_pct_1_day_performance(self):
""" get instrument 1-day performance """
str_date = self.d_1_day_perf.strftime("%Y%m%d")
return str(get_pct_from_date(str_date, self.sql_select, self.last_price))
def get_pct_1_year_signal(self):
""" get instrument 1-year signal performance """
str_date = self.d_1_year_perf.strftime("%Y%m%d")
return str(get_pct_from_date(str_date, self.sql_select_signal, self.lp_signal))
def get_pct_6_month_signal(self):
""" get instrument 6-month signal performance """
str_date = self.d_6_month_perf.strftime("%Y%m%d")
return str(get_pct_from_date(str_date, self.sql_select_signal, self.lp_signal))
def get_pct_3_month_signal(self):
""" get instrument 3-month signal performance """
str_date = self.d_3_month_perf.strftime("%Y%m%d")
return str(get_pct_from_date(str_date, self.sql_select_signal, self.lp_signal))
def get_pct_1_month_signal(self):
""" get instrument 1-month signal performance """
str_date = self.d_1_month_perf.strftime("%Y%m%d")
return str(get_pct_from_date(str_date, self.sql_select_signal, self.lp_signal))
def get_pct_1_week_signal(self):
""" get instrument 1-week signal performance """
str_date = self.d_1_week_perf.strftime("%Y%m%d")
return str(get_pct_from_date(str_date, self.sql_select_signal, self.lp_signal))
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 8 21:45:27 2018
@author: pilla
"""
|
from flask import request
class HTTPError(Exception):
status_code = 400
code = NotImplemented
message = None
def __init__(self, code=None, status_code=None, payload=None):
Exception.__init__(self)
if code is not None:
if self.code is NotImplemented:
self.code = code
else:
name = self.__class__.__name__
raise Exception("cannot override default code for {}".format(name))
if self.code is NotImplemented and code is None:
raise Exception("code required")
if status_code is not None:
self.status_code = status_code
self.payload = payload
if self.message is not None:
if payload is None:
self.payload = {}
self.payload['message'] = self.message
def to_dict(self):
rv = dict(self.payload or ())
rv['code'] = self.code
return rv
class JSONDataRequired(HTTPError):
code = 'json_data_required'
class InvalidField(HTTPError):
code = 'invalid_field'
def __init__(self, field, message):
assert request.url_rule.input_schema is not None
assert field.name in request.url_rule.input_schema.fields
super(InvalidField, self).__init__(payload={
'field_name': field.serialized_name or field.name,
'message': message,
})
class InvalidInput(HTTPError):
code = 'invalid_input'
def __init__(self, reason):
super(InvalidInput, self).__init__(payload={
'invalid_input': reason
})
class MissingFields(HTTPError):
code = 'missing_fields'
def __init__(self, field_names):
super(MissingFields, self).__init__(payload={
'fields': field_names
})
class ResourceNotFound(HTTPError):
status_code = 404
code = 'not_found'
|
__version__ = "0.0.1"
from .block import Block
from .service import SVCObj, SVC
|
"""
James Park, [email protected]
seoulai.com
2018
"""
import seoulai_gym as gym
import numpy as np
from seoulai_gym.envs.market.agents import Agent
from seoulai_gym.envs.market.base import Constants
from itertools import count
class RandomAgent(Agent):
def algo(
self,
state,
):
# print(state.keys())
return self.action(np.random.choice(range(self.action_spaces)))
def postprocess(
self,
obs,
action,
next_obs,
rewards,
):
pass
if __name__ == "__main__":
your_id = "random"
mode = Constants.HACKATHON # participants can select mode
""" 1. You must define dictionary of actions! (key = action_name, value = order_parameters)
your_actions = dict(
action_name1 = order_parameters 1,
action_name2 = order_parameters 2,
...
)
2. Order parameters
order_parameters = +10 It means that your agent'll buy 10 bitcoins.
order_parameters = -20 It means that your agent'll sell 20 bitcoins.
order_parameters = (+10, '%') It means buying 10% of the available amount.
order_parameters = (-20, '%') It means selling 20% of the available amount.
3. If you want to add "hold" action, just define "your_hold_action_name = 0"
4. You must return dictionary of actions.
"""
your_actions = {}
your_actions = dict(
holding = 0,
buy_all = (+100, '%'),
sell_all= (-100, '%'),
)
a1 = RandomAgent(
your_id,
your_actions,
)
env = gym.make("Market")
env.participate(your_id, mode)
obs = env.reset()
for t in count(): # Online RL
print(f"step {t}")
action = a1.act(obs) # Local function
next_obs, rewards, done, _= env.step(**action)
a1.postprocess(obs, action, next_obs, rewards)
print("ACTION", action)
print("REWARDS", rewards)
if done:
break
obs = next_obs
print(f"==========================================================================================")
|
#1) seqmut-1-1: Which of these is a correct reference diagram following the execution of the following code?
#A. I.
#2) seqmut-1-4: What will be the value of a after the following code has executed?
#a = ["holiday", "celebrate!"]
#quiet = a
#quiet.append("company")
#["holiday", "celebrate!", "company"]
#3) seqmut-1-5: Could aliasing cause potential confusion in this problem?
#b = ['q', 'u', 'i']
#z = b
#b[1] = 'i'
#z.remove('i')
#print(z)
#A. yes
#4) seqmut-1-13: Given that we want to accumulate the total sum of a list of numbers, which of the following accumulator patterns would be appropriate?
#C. III.
#5) seqmut-1-14: Given that we want to accumulate the total number of strings in the list, which of the following accumulator patterns would be appropriate?
#lst = ['plan', 'answer', 5, 9.29, 'order, items', [4]]
#s = 0
#for item in lst:
# if type(item) == type("string"):
# s = s + 1
#D. 4.
#6)seqmut-1-15: Which of these are good names for an accumulator variable? Select as many as apply.
#C. total
#D. accum
#7) seqmut-1-16: Which of these are good names for an iterator (loop) variable? Select as many as apply.
#A. item
#C. elem
#D. char
#8) seqmut-1-17: Which of these are good names for a sequence variable? Select as many as apply.
#A. num_lst
#C. sentence
#D. names
#9)seqmut-1-18: Given the following scenario, what are good names for the accumulator variable, iterator variable, and sequence variable? You are writing code that
# uses a list of sentences and accumulates the total number of sentences that have the word ‘happy’ in them.
# D. accumulator variable: total | iterator variable: sentence |sequence variable: sentence_lst
#10) For each character in the string saved in ael, append that character to a list that should be saved in a variable app.
#ael = "python!"
#app = list()
#for ch in ael:
# app.append(ch)
#11) For each string in wrds, add ‘ed’ to the end of the word (to make the word past tense). Save these past tense words to a list called past_wrds.
#wrds = ["end", 'work', "play", "start", "walk", "look", "open", "rain", "learn", "clean"]
#past_wrds = list()
#for word in wrds:
# past_wrds.append( word + "ed" )
#print(past_wrds) |
#TODO
import numpy as np
import pywt
from pywt import wavedec
import matplotlib
from matplotlib import pyplot as plt
from skimage.restoration import (denoise_wavelet, estimate_sigma)
from skimage import data, img_as_float
from skimage.util import random_noise
from skimage.measure import compare_psnr
def decomp_vis(coefs, signal=None):
"""
This function is used for visulization 1d wavelet decompositions
coefs (list of np arrays): the decomposition coefficients from wavedec
signal (optional 1d np array): the original signal if desired to be plotted
"""
if signal is not None:
fig, ax = plt.subplots(len(coefs)+1, 1,constrained_layout =True)
ax[0].plot(signal,c='orange');
ax[0].set_title('Original Signal')
else:
fig, ax = plt.subplots(len(coefs), 1,constrained_layout =True)
for i,coef in reversed(list(enumerate(coefs))):
if signal is not None: ind = len(coefs) -i
else: ind = i
ax[ind].plot(coef)
ax[ind].set_title('Decomposition Level ' + str(len(coefs) - i - 1) + ' Plot')
plt.show()
def decomp_2vis(coefs, im = None):
"""
This function is used for visulization 2d wavelet decompositions
coefs (list of np arrays): the decomposition coefficients from wavedec
im (optional 2d np array): the original image if desired to be plotted
"""
if im is not None:
fig, ax = plt.subplots(1, 2,constrained_layout =True)
pos = ax[0].imshow(im);
ax[0].set_title('Original Image')
cbar = fig.colorbar(pos, ax=ax[0])
im_ = concat_2d_coefs(coefs)
pos = ax[1].imshow(im_ )
#pos = ax[1].imshow(im_, vmin = cbar.vmin, vmax = cbar.vmax)
ax[1].set_title('Multiresolution 2D tranformation')
fig.colorbar(pos, ax=ax[1])
else:
plt.imshow(concat_2d_coefs(coefs))
plt.imshow('Multiresolution 2D tranformation')
plt.colorbar()
print('show plot now ')
plt.show()
def concat_2d_coefs(coefs,verbose = False, limit = 100):
if verbose :
print('The shape of coef[0] = ' , coefs[0].shape)
print('The shape of coef[1][0] = ' , coefs[1][0].shape)
print('The shape of coef[1][1] = ' , coefs[1][1].shape)
print('The shape of coef[1][2] = ' , coefs[1][2].shape)
output = np.concatenate([
np.concatenate([coefs[0], coefs[1][0]] ,axis =1 ),
np.concatenate([coefs[1][1], coefs[1][2]] ,axis =1) ],
axis = 0 )
return output
def naive_denoising(im):
"""
This funciton uses a naive approach (built in apporach) to denoising images
The perfromance of this fucntion can be considerably improved with better selection of wavelet and trheshold
"""
im_bayes = denoise_wavelet(im, multichannel=True,method='BayesShrink', mode='soft')
return im_bayes
def coef_shrinkage_1D(cube,
baseline,
channel,
polarization,
wavelet,
n,
threshold,
tfix,
ttype):
"""
This fucntion performes 1D wavelet coefficient shrinkage based on a slice of a hypercube
cube(np.array): specifies the hyper cube to be acted on
baseline (int): specifies the baseline number to be acted on
channel(int): specifies the channel number to be acted on
polarization(int): specifices the polarization to be acted on
wavelet (str): specificies the wavelet type to be used in analysis and synthesis
n (int): specifies the DWT depth
threshold(str): specifies the type of threshold to be used
tfix(int): specifies the fixed threshold if to be used
ttype(str): specifies the type of thresholding to be applied (hard,soft)
"""
slice = cube[baseline,channel,:,polarization]
# Decomposition
coefs = pywt.wavedec(slice, wavelet,level =n)
# Theshold
if threshold == 'fixed':
denoised = coefs
for i,coef in enumerate(coefs):
denoised[i] = pywt.threshold(coef,tfix,ttype)
else:
logger.warning('No other wavelet thresholds have been impleted yet')
return
# Resynthesis
return pywt.waverec(denoised,wavelet)[:slice.shape[0]]
if __name__ == "__main__":
main()
|
# third party lib
from flask import jsonify, make_response
import requests
import time, pdb
# internal lib
from lib.settings import ServerHTTP
from lib.client_registry import Registry
from client.routes.dispatcher import dispatcher
def create_response(code, msg=None):
"""
Convenience method for flask.make_response(flask.jsonify(msg), code)
If no message is supplied, a generic message will be returned based on the code provided
:param code: The HTTP status code
:param msg: Any message to be included in the response
"""
status = 'OK'
if code != 200:
status = 'error'
if msg is None:
if code == 200:
msg = 'success'
elif code == 404:
msg = 'Not found'
elif code == 400:
msg = 'User error; please check request'
else:
msg = 'Unknown error'
envelope = {
'status': status,
'msg': msg
}
return make_response(jsonify(envelope), code)
def respond_to_server(path, verb, msg=None, client_name=None, env='production'):
if env == "production":
url = f'{get_protocol(env)}://{ServerHTTP.external_host}/{path}'
else:
url = f'{get_protocol(env)}://{ServerHTTP.external_host}:{ServerHTTP.port}/{path}'
s = requests.Session()
req = requests.Request(method=verb, url=url, data=msg, params={'client_name': client_name})
prepped = req.prepare()
s.send(prepped)
def get_protocol(env='production'):
if env == 'development':
return 'http'
return 'https'
def ask_til_answered(client, gap=5, msg=None, env="production"):
sleep_t = gap
tasks_so_far = [None]
def ready(msg):
if "task" not in msg:
return False
task_id = (msg["task"], msg["subtask"])
if task_id == tasks_so_far[-1]:
return False
tasks_so_far.append(task_id)
return True
answered = False
#client = Registry.get_instance().list_clients()[1]
if env == "production":
url = f"{get_protocol(env)}://{ServerHTTP.external_host}/api/process/info"
else:
url = f"{get_protocol(env)}://{ServerHTTP.external_host}:{ServerHTTP.port}/api/process/info"
while not answered:
try:
response = requests.get(url).json()
msg = response["msg"]
new_task = ready(msg)
if new_task:
dispatcher((msg["task"], msg["subtask"]), client, env, msg.get("data",None))
sleep_t = gap
else:
sleep_t == min(2*sleep_t, 60)
except Exception as e:
print(e)
#logging.error("Error getting results")
#logging.error(e)
time.sleep(sleep_t)
return response
def message_clients(address, client_name=None, args=None, env='production', data=None):
"""depricated. Uses http to connect to the clients"""
pass
#clients = Registry.get_instance().list_clients()
#if client_name is None:
# client_list = clients
#else:
# client_list = list(filter(lambda x: x["name"] == client_name, clients))
#for client in client_list:
# if args is None:
# requests.post(f'{get_protocol(env)}://{client["external_host"]}:{client["port"]}/api/{address}',
# data=data)
# else:
# requests.post(f'{get_protocol(env)}://{client["external_host"]}:{client["port"]}/api/{address}',
# params=args, data=data)
|
from PIL import Image
import numpy
class PIC(object):
def __init__(self,fileN):
#initializes
self.fileN=fileN
self.pic=Image.open(fileN)
self.colors=numpy.array(self.pic)
self.ogColors=numpy.array(self.pic)
def ultra(self):
#makes the largest color the only color if it is domiant
for x in range(0,len(self.colors)):
for y in range(0,len(self.colors[x])):
for z in range(0,3):
temp=self.colors[x][y][:3:]
if(((max(temp)-self.colors[x][y][z])<max(temp)/2) \
and (max(temp)!=self.colors[x][y][z])):
self.colors[x][y][z]=0
self.pic=Image.fromarray(self.colors)
self.pic.save("out/og/ultra"+self.fileN)
#colors need to be set back to it's inital color to prevent every effect
#being applied
self.colors=self.ogColors.copy()
def ultra2(self):
#makes the largest color the only color if it is domiant
for x in range(0,len(self.colors)):
for y in range(0,len(self.colors[x])):
for z in range(0,3):
temp=self.colors[x][y][:3:]
if(max(temp)!=self.colors[x][y][z]):
self.colors[x][y][z]= \
self.colors[x][y][z]-(self.colors[x][y][z]/2)
self.pic=Image.fromarray(self.colors)
self.pic.save("out/alt/altUltra"+self.fileN)
#colors need to be set back to it's inital color to prevent every effect
#being applied
self.colors=self.ogColors.copy()
def halfColor(self):
#halves every pixel color
for x in range(0,len(self.colors)):
for y in range(0,len(self.colors[x])):
self.colors[x][y]=list(map(lambda x:x/2,self.colors[x][y]))
self.pic=Image.fromarray(self.colors)
self.pic.save("out/half/half"+self.fileN)
#colors need to be set back to it's inital color to prevent every effect
#being applied
self.colors=self.ogColors.copy()
def gray(self):
#makes the image black and white
for x in range(0,len(self.colors)):
for y in range(0,len(self.colors[x])):
for z in range(0,len(self.colors[x][y])):
self.colors[x][y][z]= \
max(self.colors[x][y])/len(self.colors[x][y])
self.pic=Image.fromarray(self.colors)
self.pic.save("out/gray/gray"+self.fileN)
#colors need to be set back to it's inital color to prevent every effect
#being applied
self.colors=self.ogColors.copy()
if __name__=="__main__":
#takes input
print("enter the file you want to edit")
fileN=input()
pic=PIC(fileN)
#loop to keep running until stop is enetered
command=""
while(command.lower()!="stop"):
print(
"choose an effect:\ndom for the dominant effect\n"+
"ext for the extreme effect\n"+
"half for the half effect\n"+
"gray for gray\n"+
"all for all\n"+
"enter stop to stop the program"
)
command=input()
#pick a function
if(command.lower()=="dom"):
pic.ultra()
elif(command.lower()=="ext"):
pic.ultra2()
elif(command.lower()=="half"):
pic.halfColor()
elif(command.lower()=="gray"):
pic.gray()
elif(command.lower()=="all"):
pic.ultra()
pic.ultra2()
pic.halfColor()
pic.gray()
|
"""
TCKDB backend app tests models test_freq module
"""
from tckdb.backend.app.models.freq import Freq
def test_freq_model():
"""Test creating an instance of Freq"""
freq1 = Freq(factor=0.99 * 1.014,
source='J.A. Montgomery, M.J. Frisch, J. Chem. Phys. 1999, 110, 2822–2827, DOI: 10.1063/1.477924')
assert freq1.factor == 0.99 * 1.014
assert freq1.source == 'J.A. Montgomery, M.J. Frisch, J. Chem. Phys. 1999, 110, 2822–2827, DOI: 10.1063/1.477924'
assert repr(freq1) == "<Freq(id=None, factor=1.00386, level_id=None, source='J.A. Montgomery, M.J. Frisch, " \
"J. Chem. Phys. 1999, 110, 2822–2827, DOI: 10.1063/1.477924')>"
assert str(freq1) == "<Freq(factor=1.00386, level_id=None, source='J.A. Montgomery, " \
"M.J. Frisch, J. Chem. Phys. 1999, 110, 2822–2827, DOI: 10.1063/1.477924')>"
freq2 = Freq(factor=0.98, source='Calculated using the Truhlar method')
assert str(freq2) == "<Freq(factor=0.98, level_id=None, source='Calculated using the Truhlar method')>"
freq3 = Freq(factor=0.98, source='Calculated using the Truhlar method')
assert str(freq3) == "<Freq(factor=0.98, level_id=None, source='Calculated using the Truhlar method')>"
freq4 = Freq(factor=0.98, source='Calculated using the Truhlar method')
assert str(freq4) == "<Freq(factor=0.98, level_id=None, source='Calculated using the Truhlar method')>"
|
import os
from flask import Flask
# 实例化app,参数如下:
# import_name,
# static_url_path=None,
# static_folder="static",
# static_host=None,
# host_matching=False,
# subdomain_matching=False,
# template_folder="templates",
# instance_path=None,
# instance_relative_config=False,
# root_path=None,
BASE_DIR = os.getcwd()
root_folder_path = os.path.abspath(os.path.join(BASE_DIR, ''))
static_folder_root = os.path.join(root_folder_path, "static")
# tip: 为什么此处需要指定绝对路径 - 因为大部分应用初始化可能在启动脚本中,所以其查找目录以项目根目录查找不会出现问题
# 但此处启动脚本在其他位置,所以需要指定其绝对路径
app = Flask(__name__, static_folder = static_folder_root)
# app.register_routes_to_resources(static_folder_root)
app.config['JSON_AS_ASCII'] = False # 使jsonify返回json字符串时,支持中文
|
from transformer.harvardnlp_transformer import *
def data_gen(V, batch, nbatches, max_words_in_sentence):
"""
Generate random data for a src-tgt copy task.
# 5: # of sentences per batch == batch(2nd arg)
# 4: # of words in each sentence
# 7: size of word dictionary
np.random.randint(low=1, high=7, size=(5, 4)) # 5 by 4 matrix
>>> gen = data_gen(7, 5, 2, 4)
>>> batch0 = next(gen)
>>> batch0.src
>>> batch0.trg
>>> batch0.src.shape # [5, 4]
>>> batch0.ntokens # 15
tensor([[1, 2, 3, 2],
[1, 2, 1, 4],
[1, 2, 4, 5],
[1, 1, 2, 1],
[1, 2, 5, 5]]) # [5, 4]
>>> batch0.src_mask
tensor([[[1, 1, 1, 1]],
[[1, 1, 1, 1]],
[[1, 1, 1, 1]],
[[1, 1, 1, 1]],
[[1, 1, 1, 1]]], dtype=torch.uint8) # [5, 1, 4]
>>> batch0.trg
tensor([[1, 2, 3],
[1, 2, 1],
[1, 2, 4],
[1, 1, 2],
[1, 2, 5]]) # [5, 3]
>>> batch0.trg_y
tensor([[2, 3, 2],
[2, 1, 4],
[2, 4, 5],
[1, 2, 1],
[2, 5, 5]]) # [5, 3]
>>> batch0.trg_mask
tensor([[[1, 0, 0],
[1, 1, 0],
[1, 1, 1]],
[[1, 0, 0],
[1, 1, 0],
[1, 1, 1]],
[[1, 0, 0],
[1, 1, 0],
[1, 1, 1]],
[[1, 0, 0],
[1, 1, 0],
[1, 1, 1]],
[[1, 0, 0],
[1, 1, 0],
[1, 1, 1]]], dtype=torch.uint8) # [5, 3, 3]
>>> batch0.ntokens # 15
>>> batch0.src.shape # (5, 4)
"""
for _ in range(nbatches):
data = torch.from_numpy(np.random.randint(low=1, high=V, size=(batch, max_words_in_sentence)))
data[:, 0] = 1 # 1 for first column
src = Variable(data, requires_grad=False).type(torch.long)
tgt = Variable(data, requires_grad=False).type(torch.long)
yield BatchP(src, tgt, 0)
def first_example():
class SimpleLossCompute:
"""A simple loss compute and train function."""
def __init__(self, generator, criterion, opt=None):
self.generator = generator
"""
converter from output of transformer unit to linear and log_softmax
shape converted
from (batch_size, max_words_in_sentence - 1, d_model)
to (batch_size, max_words_in_sentence - 1, size_dict)
"""
self.criterion = criterion # LabelSmoothing
self.opt = opt # NoamOpt
def __call__(self, x, y, norm):
"""
Parameters
----------
x: output of the transformer unit
y: batch.trg_y which has shape of (batch size, 'max number of tokens in sentence' - 1)
norm: LayerNorm layer
Returns
-------
"""
x = self.generator(x) # output of log_softmax with shape of (num_batch * batch_size, V)
norm = norm.type(torch.FloatTensor) # added by kdw
loss = self.criterion(x.contiguous().view(-1, x.size(-1)), y.contiguous().view(-1)) / norm
loss.backward()
if self.opt is not None:
self.opt.step()
self.opt.optimizer.zero_grad()
# return loss.data[0] * norm
return loss.data.item() * norm
# Train the simple copy task.
V = size_dict # size of word dictionary
criterion = LabelSmoothingP(size=V, padding_idx=0, smoothing=0.0)
model = make_model_p(src_vocab=V, tgt_vocab=V, N=2)
model_opt = NoamOptP(
model_size=model.src_embed[0].d_model,
factor=1,
warmup=400, # lr will grow until 'warmup' steps and then shrink
optimizer=torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9)
)
for epoch in range(10):
model.train()
run_epoch_p(
data_iter=data_gen(V=V, batch=batch_size, nbatches=3, max_words_in_sentence=max_words_in_sentence),
model=model,
loss_compute=SimpleLossCompute(model.generator, criterion, model_opt)
)
model.eval()
print(run_epoch_p(data_gen(V=V, batch=batch_size, nbatches=3, max_words_in_sentence=max_words_in_sentence), model,
SimpleLossCompute(model.generator, criterion, None)))
return model
# encoder layer v1 ##############################
def get_encoder_output(num_head, d_model):
"""
Encoder
Args:
num_head:
d_model:
Returns:
Examples:
>>> get_encoder_output(num_head, d_model)
"""
# 1st sublayer of 1st encoder
mha = MultiHeadedAttentionP(h=num_head, d_model=d_model)
norm = LayerNormP(d_model)
o_sub_layer1 = mha(norm(o_pe), norm(o_pe), norm(o_pe), batch0.src_mask) # [5, 4, 12]
# 2nd sublayer of 1st encoder
pff = PositionwiseFeedForwardP(d_model=d_model, d_ff=hidden_size_pff)
o_sub_layer2 = o_sub_layer1 + nn.Dropout(dropout_rate)(pff(norm(o_sub_layer1))) # [5, 4, 12]
return o_sub_layer2
##############################
# encoder layer v2 ##############################
def get_encoder_output(x, num_head, d_model, dropout_rate):
"""
Args:
x:
num_head:
d_model:
Returns:
Examples:
>>> get_encoder_output(o_pe, num_head, d_model, dropout_rate)
"""
class MySublayerConnection(nn.Module):
def __init__(self, size, dropout_rate):
super(MySublayerConnection, self).__init__()
self.norm = nn.LayerNorm(size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, x, sublayer):
return x + self.dropout(sublayer(self.norm(x)))
sublayers = clones_p(MySublayerConnection(d_model, dropout_rate), 2)
mha = MultiHeadedAttentionP(h=num_head, d_model=d_model) # 1st sublayer
o_sub_layer1 = sublayers[0](x, lambda x: mha(x, x, x, batch0.src_mask))
pff = PositionwiseFeedForwardP(d_model=d_model, d_ff=hidden_size_pff) # 2nd sublayer
o_sub_layer2 = sublayers[1](o_sub_layer1, pff)
return o_sub_layer2
##############################
# encoder layer v3 ##############################
class MySublayerConnection(nn.Module):
def __init__(self, size, dropout_rate):
super(MySublayerConnection, self).__init__()
self.norm = nn.LayerNorm(size)
self.dropout = nn.Dropout(dropout_rate)
def forward(self, x, sublayer):
return x + self.dropout(sublayer(self.norm(x)))
class MyEncoderLayer(nn.Module):
"""
>>> el = MyEncoderLayer(d_model, dropout_rate, num_head, hidden_size_pff)
>>> el.forward(o_pe, batch0.src_mask) # [5, 4, 12]
"""
def __init__(self, size, dropout_rate, num_head, hidden_size_pff):
"""
:param size: last dim of input and output
:param dropout_rate:
:param num_head:
:param hidden_size_pff:
"""
super(MyEncoderLayer, self).__init__()
self.size = size
self.sublayers = clones_p(MySublayerConnection(size, dropout_rate), 2)
self.mha = MultiHeadedAttentionP(h=num_head, d_model=size) # 1st sublayer
self.pff = PositionwiseFeedForwardP(d_model=size, d_ff=hidden_size_pff) # 2nd sublayer
def forward(self, x, mask):
o_sub_layer1 = self.sublayers[0](x, lambda x: self.mha(x, x, x, mask))
return self.sublayers[1](o_sub_layer1, self.pff)
##############################
# encoder v1 ##############################
class MyEncoder(nn.Module):
"""
>>> el = MyEncoderLayer(d_model, dropout_rate, num_head, hidden_size_pff)
>>> encoder = MyEncoder(el, 2)
>>> encoder.forward(o_pe, batch0.src_mask) # [5, 4, 12]
"""
def __init__(self, encoder, num_encoder):
super(MyEncoder, self).__init__()
self.encoders = clones_p(encoder, num_encoder)
self.norm = LayerNormP(encoder.size)
def forward(self, x, mask):
for encoder in self.encoders:
x = encoder(x, mask)
return self.norm(x)
# decoder layer v2 ##############################
class MyDecoderLayer(nn.Module):
"""
>>> dl = MyDecoderLayer(d_model, num_head)
>>> o_decoder_layer = dl.forward(o_pe_decoder=o_pe_decoder, src_mask=batch0.src_mask, trg_mask=batch0.trg_mask)
>>> o_decoder_layer.shape
"""
def __init__(self, size, num_head):
super(MyDecoderLayer, self).__init__()
self.size = size
self.self_attn = MultiHeadedAttentionP(h=num_head, d_model=size)
self.src_attn = MultiHeadedAttentionP(h=num_head, d_model=size)
self.pff = PositionwiseFeedForwardP(d_model=size, d_ff=hidden_size_pff)
self.sublayers = clones_p(MySublayerConnection(size, dropout_rate), 3)
def forward(self, o_prev_decoder, o_encoder, src_mask, trg_mask):
"""
o_prev_decoder could be output of embedding(+pe)
or output of previous decoder
"""
o_sublayer0 = self.sublayers[0](o_prev_decoder, lambda x: self.self_attn(
query=o_prev_decoder,
key=o_prev_decoder,
value=o_prev_decoder,
mask=trg_mask))
o_sublayer1 = self.sublayers[1](o_sublayer0, lambda x: self.src_attn(
query=o_sublayer0,
key=o_encoder,
value=o_encoder,
mask=src_mask)) # why use src_mask, not trg_mask???
# -> 이미 이런 단계(sublayers[0])에서 타겟 문장의 정보를 필터링 했으므로, 추가로 필터링 할 필요가 없어 보인다.
return sublayers[2](o_sublayer1, self.pff)
##############################
# decoder
class MyDecoder(nn.Module):
"""
>>> el = MyEncoderLayer(d_model, dropout_rate, num_head, hidden_size_pff)
>>> encoder = MyEncoder(el, 6)
>>> o_encoder = encoder.forward(o_pe_encoder, batch0.src_mask) # [5, 4, 12]
>>> decoder_layer = MyDecoderLayer(size=d_model, num_head=num_head)
>>> o_decoder = decoder_layer.forward(o_pe_decoder, o_encoder, batch0.src_mask, batch0.trg_mask)
>>> o_decoder.shape # [5, 3, 12]
"""
def __init__(self, decoder_layer, num_decoder):
super(MyDecoder, self).__init__()
self.decoders = clones_p(decoder_layer, num_decoder)
self.norm = LayerNormP(decoder_layer.size)
def forward(self, o_prev_decoder, o_encoder, src_mask, trg_mask):
for decoder in self.decoders:
o_prev_decoder = decoder(o_prev_decoder, o_encoder, src_mask, trg_mask)
return o_prev_decoder
if __name__ == '__main__':
# ##### params
max_words_in_sentence = 4 # of words in each sentence
batch_size = 5 # of sentences
size_dict = 7 # size of word dictionary
d_model = 12
hidden_size_pff = 11
num_head = 2
dropout_rate = 0.1
num_encoder_layer = 2
# small training example
first_example()
# ##### data that is composed of sentence which is sequence of word index.
np.random.seed(0)
gen_batch = data_gen(V=size_dict, batch=batch_size, nbatches=2, max_words_in_sentence=max_words_in_sentence)
batch0 = next(gen_batch)
# batch0.src; batch0.src.shape
# batch0.src_mask; batch0.src_mask.shape
# batch0.trg
# batch0.trg_mask
src = torch.tensor([[0, 3, 0, 2],
[1, 0, 3, 2],
[0, 0, 0, 1],
[1, 0, 0, 1],
[3, 2, 2, 1]]); src
src_mask = torch.tensor([[[1, 1, 1, 1]],
[[1, 1, 1, 1]],
[[1, 1, 1, 1]],
[[1, 1, 1, 1]],
[[1, 1, 1, 1]]]); src_mask
# making input to encoder
em = EmbeddingsP(d_model=d_model, vocab=size_dict)
pe = PositionalEncodingP(d_model=d_model, dropout=0.)
o_pe = pe(em(src)) # input to encoder, [5, 4, 12]
# data for decoder
batch0.trg
batch0.trg_mask
el = MyEncoderLayer(d_model, dropout_rate, num_head, hidden_size_pff)
encoder = MyEncoder(el, 6)
o_encoder = encoder.forward(o_pe, batch0.src_mask) # [5, 4, 12]
# making input to encoder
em = EmbeddingsP(d_model=d_model, vocab=size_dict)
pe = PositionalEncodingP(d_model=d_model, dropout=dropout_rate)
o_pe_decoder = pe(em(batch0.trg)) # input to encoder, [5, 4, 12]
# decoder layer v1 ##############################
self_attn = MultiHeadedAttentionP(h=num_head, d_model=d_model)
src_attn = MultiHeadedAttentionP(h=num_head, d_model=d_model)
pff = PositionwiseFeedForwardP(d_model=d_model, d_ff=hidden_size_pff)
# first input: output of embedding and positional encoding for decoder
# second input: output of encoder this is used as key and value,
# output of previous sublayer this is used as query
sublayers = clones_p(MySublayerConnection(d_model, dropout_rate), 3)
o_sublayer0 = sublayers[0](o_pe_decoder, lambda x: self_attn(
query=o_pe_decoder,
key=o_pe_decoder,
value=o_pe_decoder,
mask=batch0.trg_mask))
o_sublayer1 = sublayers[1](o_sublayer0, lambda x: src_attn(
query=o_sublayer0,
key=o_encoder,
value=o_encoder,
mask=batch0.src_mask)) # why use src_mask, not trg_mask???
# -> 이미 이런 단계(sublayers[0])에서 타겟 문장의 정보를 필터링 했으므로, 추가로 필터링 할 필요가 없어 보인다.
o_sublayer2 = sublayers[2](o_sublayer1, pff)
o_sublayer2.shape
|
from pathlib import Path
import pybullet as p
ASSETS_PATH = Path(__file__).resolve().parent / 'assets'
class EndEffector:
"""
A base class for UR5 end effectors.
"""
def __init__(self, urdf_path, load_position, load_orientation, ur5_install_joints, ee_tip_idx):
"""
The initialization of the UR5 end effector.
:param urdf_path: the path of the urdf that describes the end effector
:param load_position: the position to load the end effector
:param load_orientation: the orientation to load the end effector
:param ur5_install_joints: the home joints of the UR5 robot when installing this end effector
:param ee_tip_idx: the body id of this end effector
"""
self.urdf_path = urdf_path
self.load_position = load_position
self.load_orientation = load_orientation
self.ur5_install_joints = ur5_install_joints
self.ee_tip_idx = ee_tip_idx
# Loads the model.
self.body_id = p.loadURDF(str(urdf_path), load_position, load_orientation)
def get_body_id(self):
"""
Gets the body id of this end effector in PyBullet.
:return: the body id
"""
return self.body_id
def get_position_offset(self):
"""
Gets the position offset of this end effector.
:return: the position offset
"""
return 0, 0, 0
def get_ur5_install_joints(self):
"""
Gets the the home joints of the UR5 robot when installing this end effector.
:return: the UR5 home joints for this end effector
"""
return self.ur5_install_joints
def get_base_pose(self):
"""
Gets the base position and orientation of this end effector.
:return: the position and orientation of the base
"""
return p.getBasePositionAndOrientation(self.body_id)
def get_tip_pose(self):
"""
Gets the tip position and orientation of this end effector.
:return: the position and orientation of the tip
"""
state = p.getLinkState(self.body_id, self.ee_tip_idx)
return state[4], state[5]
def reset(self, reset_base=False):
"""
Resets this end effector.
:param reset_base: True if resetting the base pose, False otherwise
"""
if reset_base:
p.resetBasePositionAndOrientation(bodyUniqueId=self.body_id,
posObj=self.load_position,
ornObj=self.load_orientation)
|
import numpy as np
import tensorflow as tf
import roi_pooling_layer.roi_pooling_op as roi_pool_op
import roi_pooling_layer.roi_pooling_op_grad
from rpn_msr.proposal_layer_tf import proposal_layer as proposal_layer_py
from rpn_msr.anchor_target_layer_tf import anchor_target_layer as anchor_target_layer_py
from rpn_msr.proposal_target_layer_tf import proposal_target_layer as proposal_target_layer_py
DEFAULT_PADDING = 'SAME'
def layer(op):
def layer_decorated(self, *args, **kwargs):
# Automatically set a name if not provided.
#op.__name__的是各个操作函数名,如conv、max_pool
#get_unique_name返回类似与conv_4,以name:'conv_4'存在kwargs字典
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
# Figure out the layer inputs.
if len(self.inputs)==0:
raise RuntimeError('No input variables found for layer %s.'%name)
#此情况说明刚有输入层,即取输入数据即可
elif len(self.inputs)==1:
layer_input = self.inputs[0]
else:
layer_input = list(self.inputs)
# Perform the operation and get the output.
#开始做卷积,做pool操作!!!!正式开始做操作的是这里,而不是函数定义,会发现下面函数定义中与所给参数个数不符合,原因在于input没给定
layer_output = op(self, layer_input, *args, **kwargs)
# Add to layer LUT.
#在self.layer中添加该name操作信息
self.layers[name] = layer_output
# This output is now the input for the next layer.
#将该output添加到inputs中
self.feed(layer_output)
# Return self for chained calls.
return self
return layer_decorated
class Network(object):
def __init__(self, inputs, trainable=True):
self.inputs = []
self.layers = dict(inputs)
self.trainable = trainable
self.setup()
def setup(self):
raise NotImplementedError('Must be subclassed.')
def load(self, data_path, session, saver, ignore_missing=False):
if data_path.endswith('.ckpt'):
saver.restore(session, data_path)
else:
data_dict = np.load(data_path).item()
for key in data_dict:
with tf.variable_scope(key, reuse=True):
for subkey in data_dict[key]:
try:
var = tf.get_variable(subkey)
session.run(var.assign(data_dict[key][subkey]))
print "assign pretrain model "+subkey+ " to "+key
except ValueError:
print "ignore "+key
if not ignore_missing:
raise
#*args中存的是多余的变量,且无标签,存在tuple中,如果有标签,则需要将函数改为feed(self, *args,**kwargs):
#**kwargs为一个dict
#layers为一个dict,inputs为一个list
def feed(self, *args):
#如果没给参数,就raise一个error
assert len(args)!=0
self.inputs = []
for layer in args:
#先判断如果给定参数是一个str
if isinstance(layer, basestring):
#self.layers在VGGnet_train 重载,为一个有值的dict
try:
#将layer改为真实的variable,虽然目前还只是数据流图的一部分,还没有真正的开始运作
layer = self.layers[layer]
print layer
except KeyError:
print self.layers.keys()
raise KeyError('Unknown layer name fed: %s'%layer)
#将取出的layer数据存入input列表
self.inputs.append(layer)
return self
def get_output(self, layer):
try:
#self.layers在VGGnet_train.py中重载了,为一个dict,记录的是每一层的输出
layer = self.layers[layer]
except KeyError:
print self.layers.keys()
raise KeyError('Unknown layer name fed: %s'%layer)
return layer
#得到唯一的名字,prefix传回来的是conv、max_pool..
#self.layers为一个dict,item将其转换为可迭代形式
def get_unique_name(self, prefix):
# startswith() 方法用于检查字符串是否是以指定子字符串开头,返回true与false
#即查看有没有conv开头的key,记录有的个数(true),相加再加1为id
id = sum(t.startswith(prefix) for t,_ in self.layers.items())+1
#返回的就是类似与conv_4
return '%s_%d'%(prefix, id)
#此函数就是在tensorflow格式下建立变量
def make_var(self, name, shape, initializer=None, trainable=True):
return tf.get_variable(name, shape, initializer=initializer, trainable=trainable)
#判断padding类型是否符合要求
def validate_padding(self, padding):
assert padding in ('SAME', 'VALID')
@layer
#就因为上面的属性函数,是的真正的conv操作没有在这里进行,而是在上面的layer函数中进行
def conv(self, input, k_h, k_w, c_o, s_h, s_w, name, relu=True, padding=DEFAULT_PADDING, group=1, trainable=True):
#判断padding是否为same与valid的一种
self.validate_padding(padding)
#shape最后一位为深度
#input形状为[batch, in_height, in_width, in_channels]
#c_i.c_o分别为输入激活图层的深度,与输入激活图层深度,即卷积核个数
c_i = input.get_shape()[-1]
assert c_i%group==0
assert c_o%group==0
##conv2d中stride[]第一位与最后一位都必须为1,第一位表示在batch上的位移,第四位表示在深度方向上的位移,i与k目前没找到定义,应该为input与卷积核
#目前问题解决,lambda相当与一个def,只是定义函数
##1.将参数filter变为一个二维矩阵,形状为:[filter_height*filter_width*in_channels,output_channels]
#2.将输入(input)转化为一个具有如下形状的Tensor,形状为:[batch,out_height,out_width,filter_height * filter_width * in_channels]
#3.操作sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] *filter[di, dj, q, k]
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name) as scope:
#采取截断是正态初始化权重,这只是一种initializer方法,mean=0,stddev=0.01
init_weights = tf.truncated_normal_initializer(0.0, stddev=0.01)
#这也只是定义initializer的方法,初始化为0
init_biases = tf.constant_initializer(0.0)
#make_var就是用get_variable来建立变量
#weight.shape[高,宽,深度,多少个]
kernel = self.make_var('weights', [k_h, k_w, c_i/group, c_o], init_weights, trainable)
biases = self.make_var('biases', [c_o], init_biases, trainable)
if group==1:
conv = convolve(input, kernel)
else:
#如果group不为0,将input与kernel第4个维度,即深度信息平分为group组
input_groups = tf.split(3, group, input)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i,k in zip(input_groups, kernel_groups)]
#分开的group组合起来
conv = tf.concat(3, output_groups)
if relu:
bias = tf.nn.bias_add(conv, biases)
return tf.nn.relu(bias, name=scope.name)
return tf.nn.bias_add(conv, biases, name=scope.name)
@layer
def relu(self, input, name):
return tf.nn.relu(input, name=name)
@layer
def max_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.max_pool(input,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)
@layer
def avg_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING):
self.validate_padding(padding)
return tf.nn.avg_pool(input,
ksize=[1, k_h, k_w, 1],
strides=[1, s_h, s_w, 1],
padding=padding,
name=name)10635000012334
@layer
def proposal_layer(self, input, _feat_stride, anchor_scales, cfg_key, name):
"""
对应proposal层:结合anchor_pred_reg和fg anchors输出proposals,并进一步剔除无效anchors
"""
#cfg_key为TRAIN
if isinstance(input[0], tuple):
input[0] = input[0][0]
#就是返回blob,内容为[proposal引索(全0),proposal],shape(proposals.shape[0],5),引索(全0)占一列,proposal占4列
return tf.reshape(tf.py_func(proposal_layer_py,[input[0],input[1],input[2], cfg_key, _feat_stride, anchor_scales], [tf.float32]),[-1,5],name =name)
@layer
def roi_pool(self, input, pooled_height, pooled_width, spatial_scale, name):
"""
Rol pooling层有2个输入:
1. 原始的feature maps
2. RPN输出的proposal boxes(大小各不相同)
"""
# only use the first input
if isinstance(input[0], tuple):
input[0] = input[0][0]
if isinstance(input[1], tuple):
input[1] = input[1][1]
print input
return roi_pool_op.roi_pool(input[0], input[1], pooled_height, pooled_width, spatial_scale, name = name)[0]
@layer
def anchor_target_layer(self, input, _feat_stride, anchor_scales, name):
"""
anchor_target_layer:对应rpn-data,训练阶段用来产生计算rpn_cls_loss和rpn_loss_bbox的参数:
rpn_labels:[achors.shape[0], 1],所有anchors的fg/bg信息,(-1,0,1),-1代表不参加训练
rpn_bbox_targets:[anchors.shape[0], 4],所有anchors与gtbox之间的回归值
rpn_inside_weights,rpn_outside_weights分别代表fg/bg anchors的初始化权重
input为'rpn_cls_score','gt_boxes','im_info','data'信息组成的一个列表,input[0]为rpn_cls_score信息
"""
if isinstance(input[0], tuple):
input[0] = input[0][0]
with tf.variable_scope(name) as scope:
#tf.py_func将任意的python函数func转变为TensorFlow op,格式tf.py_func(func, inp, Tout, stateful=True, name=None)
#func为python函数,inp为输入(ndarray),Tout为自定义输出格式,下面的输入输出分别为[input[0],input[1],input[2],input[3], _feat_stride, anchor_scales],[tf.float32,tf.float32,tf.float32,tf.float32]
#rpn_label中存的是所有anchor的label(-1,0,1),
#rpn_bbox_targets是所有anchor的4回归值,对于标签为-1的anchor,4个回归值全是0,
#rpn_bbox_inside_weights,rpn_bbox_outside_weights是两个权重,初始化方式不一样
#要将tf.float32类型转换为tf.Tensor类型
#tf.convert_to_tensor函数,以确保我们处理张量而不是其他类型
rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = tf.py_func(anchor_target_layer_py, [input[0], input[1], input[2], input[3], _feat_stride, anchor_scales], [tf.float32, tf.float32, tf.float32, tf.float32])
rpn_labels = tf.convert_to_tensor(tf.cast(rpn_labels), name = 'rpn_labels')
rpn_bbox_targets = tf.convert_to_tensor(tf.cast(rpn_bbox_targets), name = 'rpn_bbox_targets')
rpn_bbox_inside_weights = tf.convert_to_tensor(tf.cast(rpn_bbox_inside_weights), name = 'rpn_bbox_inside_weights')
rpn_bbox_outside_weights = tf.convert_to_tensor(tf.cast(rpn_bbox_outside_weights), name = 'rpn_bbox_outside_weights')
return rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights
@layer
def proposal_target_layer(self, input, classes, name):
"""
proposal_target_layer:训练阶段用来产生预测的分类好的最终proposals,三个(len(rois),4*21)大小的矩阵,和bbox_targets(和gt之间的差别,用来精修proposals),bbox_inside_weights和bbox_outside_weights
"""
if isinstance(input[0], tuple):
input[0] = input[0][0]
with tf.variable_scope(name) as scope:
#产生筛选后的roi,对应labels,三个(len(rois),4*21)大小的矩阵,其中一个对fg-roi对应引索行的对应类别的4个位置填上(dx,dy,dw,dh),另两个对fg-roi对应引索行的初始权重对应类别的4个位置填上(1,1,1,1)
rois, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights = tf.py_func(proposal_target_layer_py, [input[0], input[1], classes], [tf.float32,tf.float32,tf.float32,tf.float32,tf.float32])
rois = tf.reshape(rois, [-1, 5], name = 'rois')
# 要将tf.float32类型转换为tf.Tensor类型
# tf.convert_to_tensor函数,以确保我们处理张量而不是其他类型
labels = tf.convert_to_tensor(tf.cast(labels,tf.int32), name = 'labels')
bbox_targets = tf.convert_to_tensor(bbox_targets, name = 'bbox_targets')
bbox_inside_weights = tf.convert_to_tensor(bbox_inside_weights, name = 'bbox_inside_weights')
bbox_outside_weights = tf.convert_to_tensor(bbox_outside_weights, name = 'bbox_outside_weights')
return rois, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights
@layer
def reshape_layer(self, input, d, name):
"""
在caffe基本数据结构blob中以如下形式保存数据:blob=[batch_size, channel*2,height,width]
而在softmax分类时需要进行fg/bg二分类,所以reshape layer会将其变为[1, 2, channel*H, W]大小
即单独“腾空”出来一个维度以便softmax分类,之后再reshape回复原状
"""
input_shape = tf.shape(input)
if name == 'rpn_cls_prob_reshape':
# 还原回rpn_cls_prob_reshape的信息位置格式
return tf.transpose(tf.reshape(tf.transpose(input,[0,3,1,2]),[input_shape[0],
int(d),tf.cast(tf.cast(input_shape[1],tf.float32)/tf.cast(d,tf.float32)*tf.cast(input_shape[3],tf.float32),tf.int32),input_shape[2]]),[0,2,3,1],name=name)
else:
# 假设rpn_cls_score.shape为[1,n,n,18],最后reshape成[1,9n,n,2]
#假如rpn_cls_score.shape为[1,3,3,18],元素内容为range(3*3*18),最后得到的形状为[0,81],[1,82],[2,83]..意思为前81个元素(3*3*9)为bg,后81个元素对应fg,0与36对应着该featuremap的位置i,对应原图的可视野为前景或者背景的概率
# 当然需要再一步softmax才能给出该可视野为fg与bg的概率
return tf.transpose(tf.reshape(tf.transpose(input,[0,3,1,2]),[input_shape[0],
int(d),tf.cast(tf.cast(input_shape[1],tf.float32)*(tf.cast(input_shape[3],tf.float32)/tf.cast(d,tf.float32)),tf.int32),input_shape[2]]),[0,2,3,1],name=name)
@layer
def feature_extrapolating(self, input, scales_base, num_scale_base, num_per_octave, name):
return feature_extrapolating_op.feature_extrapolating(input, scales_base, num_scale_base, num_per_octave, name = name)
@layer
def lrn(self, input, raadius, alpha, beta, name, bias = 1.0):
"""
一种正则归一化方法
"""
return tf.nn.local_response_normalization(input, depth_radius=radius, alpha=alpha, beta=beta, bias=bias, name=name)
@layer
def concat(self, inputs, axis, name):
"""
返回tensor
"""
return tf.concat(concat_dim = axis, values = inputs, name = name)
@layer
def fc(self, input, num_out, name, relu = True, trainable = True):
with tf.variable_scope(name) as scope:
# only use the first input
if isinstance(input, tuple):
input = input[0]
input_shape = input.get_shape()
if input_shape.ndims == 4:
# 将特征图转换成特征向量
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(tf.transpose(input, [0, 3, 1, 2]), [-1, dim])
else:
# 输入即特征向量
feed_in, dim = (input, int(input_shape[-1]))
if name == 'bbox_pred':
init_weights = tf.truncated_normal_initializer(0.0, stddev = 0.001)
init_biases = tf.constant_initializer(0.0)
else:
init_weights = tf.truncated_normal_initializer(0.0, stddev = 0.01)
init_biases = tf.constant_initializer(0.0)
weights = self.make_var('weights', [dim, num_out], init_weights, trainable)
biases = self.make_var('biases', [num_out], init_biases, trainable)
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name = scope.name)
return fc
@layer
# 多项逻辑斯特回归
def softmax(self, input, name):
input_shape = tf.shape(input)
if name == 'rpn_cls_prob':
# 就是对原始数据进行softmax激活
return tf.reshape(tf.nn.softmax(tf.reshape(input,[-1,input_shape[3]])),[-1,input_shape[1],input_shape[2],input_shape[3]],name=name)
else:
return tf.nn.softmax(input, name = name)
@layer
# dropout防止过拟合
def dropout(self, input, keep_prob, name):
return tf.nn.dropout(input, keep_prob, name = name) |
import math
from cvat.apps.engine.ddln.geometry import Line, Point, PolarPoint, get_angle_between
from ..models import Runway
def iterate_runways(reader, reporter):
for row in reader._reader:
runway_id, *lines_data = row
if len(lines_data) != 12: # 6 lines, each line is represented by 2 values
reporter.report_wrong_values_amount(12, len(lines_data))
continue
left = from_row(lines_data[0:2], reader.image_width, reader.image_height)
right = from_row(lines_data[2:4], reader.image_width, reader.image_height)
center = from_row(lines_data[4:6], reader.image_width, reader.image_height)
start = from_row(lines_data[6:8], reader.image_width, reader.image_height)
end = from_row(lines_data[8:10], reader.image_width, reader.image_height)
designator = from_row(lines_data[10:12], reader.image_width, reader.image_height)
runway = Runway(runway_id, left, right, center, start, end, designator)
runway.calculate_vanishing_points(reporter)
try:
fake_invisible_lines(runway)
except ValueError as e:
reporter._report(e.args[0])
continue
yield runway
def write_runway(runway: Runway, writer):
writer._writer.writerow((
runway.id,
*as_row(runway.left_line, writer.image_width, writer.image_height),
*as_row(runway.right_line, writer.image_width, writer.image_height),
*as_row(runway.center_line, writer.image_width, writer.image_height),
*as_row(runway.start_line, writer.image_width, writer.image_height),
*as_row(runway.end_line, writer.image_width, writer.image_height),
*as_row(runway.designator_line, writer.image_width, writer.image_height),
))
def from_row(row, width, height):
angle, distance = row
if angle == '' and distance == '':
return None
unit = height / 2
center = Point(width / 2, height / 2)
angle = -float(angle)
distance = float(distance)
distance = unit * distance
angle = angle * math.pi / 180
polar_point = PolarPoint(distance, angle + math.pi / 2)
touch_point = polar_point.to_cartesian_coordinates(center)
return Line.by_point_and_angle(touch_point, angle)
def as_row(line, width, height):
if not line:
return '', ''
unit = height / 2
center = Point(width / 2, height / 2)
horizontal_line = Line.by_point_and_angle(center, 0)
distance = center.distance_to(line)
# need minus here, because CVAT y axis is opposite to output y axis
angle = -line.get_angle()
intersection = line.intersect(horizontal_line)
if intersection:
if intersection.x < width / 2:
angle += math.pi
else:
vertical_line = Line.by_point_and_angle(center, math.pi / 2)
vertical_intersection = line.intersect(vertical_line)
if vertical_intersection.y < height / 2:
angle += math.pi
distance = distance / unit
angle = angle % (2 * math.pi)
angle = angle * 180 / math.pi
angle = format(angle, ".2f")
distance = format(distance, ".6f")
return angle, distance
def fake_invisible_lines(runway):
# Coordinates for non-visible lines are not stored in csv,
# so have to guess them
lon_lines = guess_lines(runway.lon_vanishing_point, runway.left_line, runway.center_line, runway.right_line)
runway.left_line, runway.center_line, runway.right_line = lon_lines
lat_lines = guess_lines(runway.lat_vanishing_point, runway.start_line, runway.designator_line, runway.end_line)
runway.start_line, runway.designator_line, runway.end_line = lat_lines
def guess_lines(vanishing_point, first, second, third):
missing_lines_amount = sum(line is None for line in [first, second, third])
if missing_lines_amount == 0:
return first, second, third
if missing_lines_amount == 1:
if vanishing_point:
if not first:
dphi = get_angle_between(second.get_angle(), third.get_angle())
first = Line.by_point_and_angle(vanishing_point, second.get_angle() - dphi)
elif not second:
dphi = get_angle_between(first.get_angle(), third.get_angle()) / 2
second = Line.by_point_and_angle(vanishing_point, first.get_angle() + dphi)
elif not third:
dphi = get_angle_between(first.get_angle(), second.get_angle())
third = Line.by_point_and_angle(vanishing_point, second.get_angle() + dphi)
else:
if not first:
dx = third.c - second.c
first = Line(second.a, second.b, second.c - dx)
elif not second:
dx = (third.c - first.c) / 2
second = Line(third.a, third.b, third.c - dx)
elif not third:
dx = second.c - first.c
third = Line(second.a, second.b, second.c + dx)
return first, second, third
if missing_lines_amount == 2:
if vanishing_point:
dphi = 0.175
if first:
second = Line.by_point_and_angle(vanishing_point, first.get_angle() + dphi)
third = Line.by_point_and_angle(vanishing_point, second.get_angle() + dphi)
elif second:
first = Line.by_point_and_angle(vanishing_point, second.get_angle() - dphi)
third = Line.by_point_and_angle(vanishing_point, second.get_angle() + dphi)
elif third:
second = Line.by_point_and_angle(vanishing_point, third.get_angle() - dphi)
first = Line.by_point_and_angle(vanishing_point, second.get_angle() - dphi)
else:
dx = 10
if first:
second = Line(first.a, first.b, first.c + dx)
third = Line(second.a, second.b, second.c + dx)
elif second:
first = Line(second.a, second.b, second.c - dx)
third = Line(second.a, second.b, second.c + dx)
elif third:
second = Line(third.a, third.b, third.c - dx)
first = Line(second.a, second.b, second.c - dx)
return first, second, third
raise ValueError("All lines cannot be invisible")
|
from ipywidgets import Box, Layout
class MenpoWidget(Box):
r"""
Base class for defining a Menpo widget.
The widget has a `selected_values` trait that can be used in order to
inspect any changes that occur to its children. It also has functionality
for adding, removing, replacing or calling the handler callback function of
the `selected_values` trait.
Parameters
----------
children : `list` of `ipywidgets`
The `list` of `ipywidgets` objects to be set as children in the
`ipywidgets.Box`.
trait : `traitlets.TraitType` subclass
The type of the `selected_values` object that gets added as a trait
in the widget. Possible options from `traitlets` are {``Int``, ``Float``,
``Dict``, ``List``, ``Tuple``}.
trait_initial_value : `int` or `float` or `dict` or `list` or `tuple`
The initial value of the `selected_values` trait.
render_function : `callable` or ``None``, optional
The render function that behaves as a callback handler of the
`selected_values` trait for the `change` event. Its signature can be
``render_function()`` or ``render_function(change)``, where ``change``
is a `dict` with the following keys:
- ``owner`` : the `HasTraits` instance
- ``old`` : the old value of the modified trait attribute
- ``new`` : the new value of the modified trait attribute
- ``name`` : the name of the modified trait attribute.
- ``type`` : ``'change'``
If ``None``, then nothing is added.
"""
def __init__(self, children, trait, trait_initial_value, render_function=None):
# Create box object
super(MenpoWidget, self).__init__(children=children)
# Add trait for selected values
selected_values = trait(default_value=trait_initial_value)
selected_values_trait = {"selected_values": selected_values}
self.add_traits(**selected_values_trait)
self.selected_values = trait_initial_value
# Set render function
self._render_function = None
self.add_render_function(render_function)
def add_render_function(self, render_function):
r"""
Method that adds the provided `render_function()` as a callback handler
to the `selected_values` trait of the widget. The given function is
also stored in `self._render_function`.
Parameters
----------
render_function : `callable` or ``None``, optional
The render function that behaves as a callback handler of the
`selected_values` trait for the `change` event. Its signature can be
``render_function()`` or ``render_function(change)``, where
``change`` is a `dict` with the following keys:
- ``owner`` : the `HasTraits` instance
- ``old`` : the old value of the modified trait attribute
- ``new`` : the new value of the modified trait attribute
- ``name`` : the name of the modified trait attribute.
- ``type`` : ``'change'``
If ``None``, then nothing is added.
"""
self._render_function = render_function
if self._render_function is not None:
self.observe(self._render_function, names="selected_values", type="change")
def remove_render_function(self):
r"""
Method that removes the current `self._render_function()` as a callback
handler to the `selected_values` trait of the widget and sets
``self._render_function = None``.
"""
if self._render_function is not None:
self.unobserve(
self._render_function, names="selected_values", type="change"
)
self._render_function = None
def replace_render_function(self, render_function):
r"""
Method that replaces the current `self._render_function()` with the
given `render_function()` as a callback handler to the `selected_values`
trait of the widget.
Parameters
----------
render_function : `callable` or ``None``, optional
The render function that behaves as a callback handler of the
`selected_values` trait for the `change` event. Its signature can be
``render_function()`` or ``render_function(change)``, where
``change`` is a `dict` with the following keys:
- ``owner`` : the `HasTraits` instance
- ``old`` : the old value of the modified trait attribute
- ``new`` : the new value of the modified trait attribute
- ``name`` : the name of the modified trait attribute.
- ``type`` : ``'change'``
If ``None``, then nothing is added.
"""
# remove old function
self.remove_render_function()
# add new function
self.add_render_function(render_function)
def call_render_function(self, old_value, new_value, type_value="change"):
r"""
Method that calls the existing `render_function()` callback handler.
Parameters
----------
old_value : `int` or `float` or `dict` or `list` or `tuple`
The old `selected_values` value.
new_value : `int` or `float` or `dict` or `list` or `tuple`
The new `selected_values` value.
type_value : `str`, optional
The trait event type.
"""
if self._render_function is not None:
change_dict = {
"type": "change",
"old": old_value,
"name": type_value,
"new": new_value,
"owner": self.__str__(),
}
self._render_function(change_dict)
|
r"""
Neighbor Lookups
================
"""
import numpy as np
from openpnm.utils import logging
logger = logging.getLogger(__name__)
__all__ = ['from_neighbor_throats', 'from_neighbor_pores']
def from_neighbor_throats(target, prop, mode='min', ignore_nans=True):
r"""
Adopt a value from the values found in neighboring throats
Parameters
----------
target : Base
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
prop : str
The dictionary key of the array containing the throat property to be
used in the calculation.
mode : str
Controls how the pore property is calculated. Options are 'min',
'max' and 'mean'.
Returns
-------
value : ndarray
Array containing customized values based on those of adjacent throats.
"""
prj = target.project
network = prj.network
boss = prj.find_full_domain(target)
data = boss[prop]
nans = np.isnan(data)
im = network.create_incidence_matrix()
if mode == 'min':
if ignore_nans:
data[nans] = np.inf
values = np.ones((network.Np, ))*np.inf
np.minimum.at(values, im.row, data[im.col])
if mode == 'max':
if ignore_nans:
data[nans] = -np.inf
values = np.ones((network.Np, ))*-np.inf
np.maximum.at(values, im.row, data[im.col])
if mode == 'mean':
if ignore_nans:
data[nans] = 0
values = np.zeros((network.Np, ))
np.add.at(values, im.row, data[im.col])
counts = np.zeros((network.Np, ))
np.add.at(counts, im.row, np.ones((network.Nt, ))[im.col])
if ignore_nans:
np.subtract.at(counts, im.row, nans[im.col])
values = values/counts
Ps = boss.pores(target.name)
return np.array(values)[Ps]
def from_neighbor_pores(target, prop, mode='min', ignore_nans=True):
r"""
Adopt a value based on the values in neighboring pores
Parameters
----------
target : Base
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
prop : str
The dictionary key to the array containing the pore property to be
used in the calculation.
mode : str
Controls how the throat property is calculated. Options are 'min',
'max' and 'mean'.
ignore_nans : bool (default is ``True``)
If ``True`` the result will ignore ``nans`` in the neighbors
Returns
-------
value : ndarray
Array containing customized values based on those of adjacent pores.
"""
prj = target.project
lookup = prj.find_full_domain(target)
network = prj.network
domain = target._domain
throats = domain.throats(target.name)
P12 = network.find_connected_pores(throats)
pvalues = lookup[prop][P12]
if ignore_nans:
pvalues = np.ma.MaskedArray(data=pvalues, mask=np.isnan(pvalues))
try: # If pvalues is not empty
if mode == 'min':
value = np.amin(pvalues, axis=1)
if mode == 'max':
value = np.amax(pvalues, axis=1)
if mode == 'mean':
value = np.mean(pvalues, axis=1)
except np.AxisError: # Handle case of empty pvalues
value = []
return np.array(value)
|
class Solution:
def repeatedNTimes(self, nums: List[int]) -> int:
hash_map = set()
for num in nums:
if num not in hash_map:
hash_map.add(num)
else:
return num |
#!/usr/bin/env python
import sys
from setuptools import setup
import os
os.environ['COPY_EXTENDED_ATTRIBUTES_DISABLE'] = 'true'
os.environ['COPYFILE_DISABLE'] = 'true'
version = "1.0.8"
config = dict(
name = 'pydot3',
packages = ['pydot'],
version = version,
description = 'Python 3 interface to Graphviz\'s Dot',
author = 'Eric Chio',
author_email = '[email protected]',
url = 'http://www.github.com/log0/pydot3/',
download_url = 'https://github.com/log0/pydot3/archive/%s.zip' % version,
license = 'MIT',
keywords = 'graphviz dot graphs visualization pydot',
platforms = ['any'],
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Software Development :: Libraries :: Python Modules'],
install_requires = ['pyparsing', 'setuptools'],
data_files = [('.', ['LICENSE', 'README.md'])])
if sys.version_info >= (3,):
config.update(dict(
use_2to3=True,
))
setup(**config)
|
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.v3_processing_id import v3ProcessingID as v3ProcessingID_
__all__ = ["v3ProcessingID"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class v3ProcessingID(v3ProcessingID_):
"""
v3 Code System ProcessingID
Codes used to specify whether a message is part of a production,
training, or debugging system.
Status: active - Version: 2018-08-12
http://terminology.hl7.org/ValueSet/v3-ProcessingID
"""
class Meta:
resource = _resource
|
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy.special import erf
###########################################
# load neutrino spectra
###########################################
PATH_abs = os.path.dirname(os.path.abspath(__file__))
flist_neuflux = [
PATH_abs + "/Background_inputs/NeutrinoFluxes/CAJOH_pp.dat",
PATH_abs + "/Background_inputs/NeutrinoFluxes/CAJOH_hep.dat",
PATH_abs + "/Background_inputs/NeutrinoFluxes/CAJOH_8B.dat",
PATH_abs + "/Background_inputs/NeutrinoFluxes/CAJOH_13N.dat",
PATH_abs + "/Background_inputs/NeutrinoFluxes/CAJOH_15O.dat",
PATH_abs + "/Background_inputs/NeutrinoFluxes/CAJOH_17F.dat",
PATH_abs + "/Background_inputs/NeutrinoFluxes/CAJOH_lines.dat",
PATH_abs + "/Background_inputs/NeutrinoFluxes/DSNB_spec.dat",
PATH_abs + "/Background_inputs/NeutrinoFluxes/Galactic_SN_spec.dat",
PATH_abs + "/Background_inputs/NeutrinoFluxes/CAJOH_Atm.dat",
]
Enu_vec = np.logspace(-3, 6, 9001)
Fnu_vec_tot = np.zeros(Enu_vec.shape)
# interpolate and sum solar fluxes
Fnu_vec_solar = np.zeros(Enu_vec.shape)
for i in range(0, 6):
in_flux = np.loadtxt(flist_neuflux[i])
Fnu_vec_solar += np.exp(
np.interp(
np.log(Enu_vec),
np.log(in_flux[:, 0]),
np.log(in_flux[:, 1]),
left=-999.0,
right=-999.0,
)
)
# add monochromatic spectra
in_lines = np.loadtxt(flist_neuflux[6])
for i in range(len(in_lines[:, 0])):
Fnu_vec_temp = np.zeros(Enu_vec.shape)
Eind = np.argmin(np.abs(Enu_vec - in_lines[i, 0]))
Fnu_vec_temp[Eind] = 1.0
Fnu_vec_solar += in_lines[i, 1] / np.trapz(Fnu_vec_temp, x=Enu_vec) * Fnu_vec_temp
Fnu_vec_tot += Fnu_vec_solar
# DSNB flux
in_flux = np.loadtxt(flist_neuflux[7])
Fnu_vec_DSNB = np.exp(
np.interp(
np.log(Enu_vec),
np.log(in_flux[:, 0]),
np.log(in_flux[:, 1]),
left=-999.0,
right=-999.0,
)
)
Fnu_vec_tot += Fnu_vec_DSNB
# Galactic SNB flux
in_flux = np.loadtxt(flist_neuflux[8])
Fnu_vec_GSNB = np.exp(
np.interp(
np.log(Enu_vec),
np.log(in_flux[:, 0]),
np.log(in_flux[:, 1]),
left=-999.0,
right=-999.0,
)
)
Fnu_vec_tot += Fnu_vec_GSNB
# atmospheric flux
in_flux = np.loadtxt(flist_neuflux[9])
Fnu_vec_Atm = np.exp(
np.interp(
np.log(Enu_vec),
np.log(in_flux[:, 0]),
np.log(in_flux[:, 1]),
left=-999.0,
right=-999.0,
)
)
Fnu_vec_tot += Fnu_vec_Atm
###########################################
# functions
###########################################
from paleoSpec.helper_functions import F2helm
def diff_xsec(ER, Enu, mN, AN, ZN):
"""
returns the differential scattering cross section
per nuclear recoil energy ER for a neutrino with
energy Enu scattering elastically (CEvNS) off a
nucleus with N
input:
ER - nuclear recoil energy in [keV]
Enu - neutrino energy in [MeV]
mN - mass of N in [GeV]
AN - atomic number of N
ZN - number of protons in N
output:
differential cross section in [cm^2/keV]
"""
# constants
GF = 1.1663787e-5 # Fermi constant in [1/GeV^2]
sin2W = 0.23121 # weak mixing angle
hbarc_GeVcm = 1.97e-14 # in [GeV cm]
# unit conversion
GeVPERkeV = 1e-6
#
unitconv = hbarc_GeVcm ** 2 * GeVPERkeV
prefac = GF ** 2 / (4.0 * np.pi)
QN2 = (AN - ZN - (1.0 - 4.0 * sin2W) * ZN) ** 2
xsec = (
QN2 * mN * (1.0 - mN * ER / (2.0 * Enu ** 2)) * F2helm(2.0 * 1e6 * mN * ER, AN)
)
xsec = np.clip(xsec, 0, 1e30)
return unitconv * prefac * xsec
def dRdE(mN, AN, ZN, xiN, Fnu):
"""
returns a tuple
[recoil energies, differential rate of recoil events
per unit recoil energy and unit target mass]
for the target nucleus N from the neutrino background
inputs:
mN - mass of N in [GeV]
AN - atomic number of N
ZN - number of protons in N
xiN - mass fraction N comprises of the target
---- optional
Fnu - spectral neutrino flux,
default is the total flux computed above
output:
E [keV]
(dR/dE)_N in [dru = 1/keV/kg/day]
"""
# unit conversion
GeVPERkg = 5.61e26
sPERday = 8.64e4
# let's go
unitconv = GeVPERkg * sPERday
prefac = xiN / mN
Evec = np.logspace(-3, 3, 1201) # grid of recoil energies in keV
dRdE = np.zeros(len(Evec)) # create variable for result
for i in range(len(dRdE)):
Enu_min = np.sqrt(mN * Evec[i] / 2.0)
inds = np.where(Enu_vec > Enu_min)[0]
integrand = diff_xsec(Evec[i], Enu_vec[inds], mN, AN, ZN) * Fnu[inds]
dRdE[i] = np.trapz(integrand, x=Enu_vec[inds])
dRdE = unitconv * prefac * dRdE
return Evec, dRdE
def dRdE_solar(mN, AN, ZN, xiN):
return dRdE(mN, AN, ZN, xiN, Fnu_vec_solar)
def dRdE_DSNB(mN, AN, ZN, xiN):
return dRdE(mN, AN, ZN, xiN, Fnu_vec_DSNB)
def dRdE_GSNB(mN, AN, ZN, xiN):
return dRdE(mN, AN, ZN, xiN, Fnu_vec_GSNB)
def dRdE_atm(mN, AN, ZN, xiN):
return dRdE(mN, AN, ZN, xiN, Fnu_vec_Atm)
|
'''
运行时的变量将暂存于此
'''
#告诉整个程序下载是否停止
Down_Satuation = False
#文件夹池
DirPool = {}
#文件池
FilePool = {}
#已经浏览过的文件夹的数量
seen_dir_count = 0
#正在浏览的文件夹数量
seeing_dir_count = 0
#已找到的文件夹数量
Dir_count = 0
#已找到的文件的数量
File_count = 0
#已处理的文件数量
seeing_file_count = 0
#RPC返回文本, 使用时需先转为str在把" ' "replace为" " "
RPC_satuation = {}
RPC_error = {}
#下载信息数
RPC_count = 0
#下载信息(链接,路径,名称)
#名称 大小 下载 路径 状态
#{name, total_size, d_size, path, url, satuation}
RPC_down = {}
#页数
seeing_page = 1
#已下载数量
DdCount = 0
#记录下载的数量
dingcount = 1
pageMutiTask = 1
#下载线程
DownThread = 5
#下载任务
DownMaxTask = 3
REV = '3.6'
about = '''
NTG 2021 - (个人)版权所有
QQ:3052381496
版本:''' + REV + '''
广告招租
已授权给:MoeFireStudio
特别鸣谢:YuanTuo666
测试时间至9月12日
'''
UpdateUrl = 'https://ntgpro.lanzoui.com/iROFHst1abe'
sayUrl = 'https://ntgpro.lanzoui.com/iCpKZst1aha'
msgBox = 'https://ntgpro.lanzoui.com/iBA6jst1ave' |
class TaskInstanceConfig(object):
def __init__(self, task_config):
self.cpu = task_config.cpu
self.memory = task_config.memory
self.disk = task_config.disk
self.gpu = task_config.gpu
self.gpu_memory = task_config.gpu_memory
self.duration = task_config.duration
self.submit_time = task_config.submit_time
self.gpu_type_require = task_config.gpu_type_require
class TaskConfig(object):
def __init__(self, task_index, instances_number, cpu, memory, disk, gpu, gpu_memory, duration, submit_time, gpu_type_require,parent_indices=None):
self.task_index = task_index
self.instances_number = instances_number
self.cpu = cpu
self.memory = memory
self.disk = disk
self.gpu = gpu
self.gpu_memory = gpu_memory
self.duration = duration
self.submit_time = submit_time
self.gpu_type_require = gpu_type_require
self.parent_indices = parent_indices
def printState(self):
checkPrefix = "[检查task]: "
taskIndexStr = "task_index: " + str(self.task_index) + '; '
durationStr = "duration: " + str(self.duration) + '; '
cpuStr = "cpu: " + str(self.cpu) + '; '
cpuMemoryStr = "memory: " + str(self.memory) + '; '
gpuStr = "gpu: " + str(self.gpu) + '; '
gpuMemoryStr = "gpu_memory: " + str(self.gpu_memory) + '; '
submitTimeStr = "submit_time: " + str(self.submit_time) + '; '
gpuTypeRequireStr = 'gpu_type_require: ' + str(self.gpu_type_require.name) + ' with id: ' + str(self.gpu_type_require.value) + '; '
return checkPrefix + taskIndexStr + durationStr + cpuStr + cpuMemoryStr + gpuStr + gpuMemoryStr + submitTimeStr + gpuTypeRequireStr
class JobConfig(object):
def __init__(self, idx, submit_time, task_configs):
self.submit_time = submit_time
self.task_configs = task_configs
self.id = idx
def printState(self):
return "[检查job]: job_id: " + str(self.id) + " 当前job提交时间: " + str(self.submit_time) + " taskConfig:" + str(self.printTasksDetail())
def printTasksDetail(self):
res = "\n"
for task in self.task_configs:
res += ("\t" + task.printState() + "\n")
return res |
from flask_restful import Resource
from flask import jsonify
from injector import inject
from backend_application.service import ProjectService
class ProjectList(Resource):
@inject
def __init__(self, project_service: ProjectService):
self.project_service = project_service
""" returns list of projects with their id name, and description"""
def get(self, portfolio_key: str):
projects = self.project_service.get_projects_by_portfolio(portfolio_key)
result = jsonify([{
'id': p['_key'],
'name': p['name'],
'description': p['description'],
'language': p['language'],
'homePage': p['home_page'],
'htmlUrl': p['html_url']
} for p in projects])
return result
|
from openpyxl import load_workbook
filename = 'aalh_iit_buildings_011.xlsx'
wb = load_workbook(filename)
ws = wb['Metadata Template']
minimumcol = 2
maximumcol = 2
minimumrow = 7
maximumrow = 1343
iterationrow = 7
titlecol = 2
covcol = 10
for row in ws.iter_rows(min_row=minimumrow, min_col=minimumcol, max_row=maximumrow, max_col=maximumcol):
testvar = ws.cell(row=iterationrow, column=covcol).value
for cell in row:
if testvar.find('St. Clair') != -1:
title = testvar.replace('St. Clair St.','St. Clair Street')
ws.cell(row=iterationrow, column=covcol).value = title
elif testvar.find('St.') != -1:
title = testvar.replace('St.','Street')
ws.cell(row=iterationrow, column=covcol).value = title
elif testvar.find('Dr.') != -1:
title = testvar.replace('Dr.','Drive')
ws.cell(row=iterationrow, column=covcol).value = title
elif testvar.find('Rd.') != -1:
title = testvar.replace('Rd.','Road')
ws.cell(row=iterationrow, column=covcol).value = title
elif testvar.find('Ave.') != -1:
title = testvar.replace('Ave.','Avenue')
ws.cell(row=iterationrow, column=covcol).value = title
elif testvar.find('Blvd.') != -1:
title = testvar.replace('Blvd.','Boulevard')
ws.cell(row=iterationrow, column=covcol).value = title
else:
continue
print(iterationrow,'|',testvar,'|',ws.cell(row=iterationrow, column=covcol).value)
iterationrow = iterationrow + 1
wb.save('aalh_iit_buildings_011.xlsx') |
#! /usr/bin/env python
from rakali import VideoPlayer, VideoStream
stream = VideoStream(src=5)
player = VideoPlayer(stream=stream)
with player:
player.autoplay()
|
print('''
::
:;J7, :, ::;7:
,ivYi, , ;LLLFS:
:iv7Yi :7ri;j5PL
,:ivYLvr ,ivrrirrY2X,
:;[email protected]: :ivu@kexianli.
:iL7::,:::iiirii:ii;::::,,irvF7rvvLujL7ur
ri::,:,::i:iiiiiii:i:irrv177JX7rYXqZEkvv17
;i:, , ::::iirrririi:i:::iiir2XXvii;L8OGJr71i
:,, ,,: ,::[email protected]:i:::j1jri7ZBOS7ivv,
,::, ::rv77iiiriii:iii:i::,[email protected]
,, ,, ,:ir7ir::,:::i;ir:::i:i::rSGGYri712:
::: ,v7r:: ::rrv77:, ,, ,:i7rrii:::::, ir7ri7Lri
, 2OBBOi,iiir;r:: ,irriiii::,, ,iv7Luur:
,, i78MBBi,:,:::,:, :7FSL: ,iriii:::i::,,:rLqXv::
: iuMMP: :,:::,:ii;2GY7OBB0viiii:i:iii:i:::iJqL;::
, ::::i ,,,,, ::LuBBu BBBBBErii:i:i:i:i:i:i:r77ii
, : , ,,:::rruBZ1MBBqi, :,,,:::,::::::iiriri:
, ,,,,::::i: @arqiao. ,:,, ,:::ii;i7:
:, rjujLYLi ,,:::::,:::::::::,, ,:i,:,,,,,::i:iii
:: BBBBBBBBB0, ,,::: , ,:::::: , ,,,, ,,:::::::
i, , ,8BMMBBBBBBi ,,:,, ,,, , , , , , :,::ii::i::
: iZMOMOMBBM2::::::::::,,,, ,,,,,,:,,,::::i:irr:i:::,
i ,,:;u0MBMOG1L:::i:::::: ,,,::, ,,, ::::::i:i:iirii:i:i:
: ,iuUuuXUkFu7i:iii:i:::, :,:,: ::::::::i:i:::::iirr7iiri::
: :[email protected]:::::, ,:ii:::::::i:::::i::,::::iirrriiiri::,
: 5BMBBBBBBSr:,::rv2kuii:::iii::,:i:,, , ,,:,:i@petermu.,
, :r50EZ8MBBBBGOBBBZP7::::i::,:::::,: :,:,::i;rrririiii::
:jujYY7LS0ujJL7r::,::i::,::::::::::::::iirirrrrrrr:ii:
,: :@kevensun.:,:,,,::::i:i:::::,,::::::iir;ii;7v77;ii;i,
,,, ,,:,::::::i:iiiii:i::::,, ::::[email protected];7:i,
, , ,,,:,,::::::::iiiiiiiiii:,:,:::::::::iiir;ri7vL77rrirri::
:,, , ::::::::i:::i:::i:i::,,,,,:,::i:i:::iir;@Secbone.ii:::
''') |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
#replace can be mm and none. mm= mode for categoric features and median for numeric features
#n_r= null ratio , s_r=skewness ratio , c_r=correalation ratio , n_f=number of features ,t_s= test size, n= remove outliers more than,cat_count= remove categoric columns more than cat_count
def main (train,test,target,Id,n_r=0.6,s_r=0.75,c_r=1,n_f="full",t_s=0.25,r_s=42,replace="mm",cat_count=100,n=2):
#-----------------------------------------------------------------------------------------------------------------------------------------------------
dataset = pd.concat(objs=[train, test], axis=0,sort=False).reset_index(drop=True)
#-----------------------------------------------------------------------------------------------------------------------------------------------------
def check_skew(train,target):
if train[target].skew()>=s_r :
train[target]= np.log1p(train[target])
#-----------------------------------------------------------------------------------------------------------------------------------------------------
def drop_na(dataset,target):
dataset_isna=dataset.isna()
dataset_isna_sum=dataset_isna.sum()
dataset_isna_ratio=dataset_isna_sum/len(dataset)
if target in dataset_isna_ratio:
dataset_isna_ratio.drop(target,inplace=True)
remove_columns=dataset_isna_ratio[dataset_isna_ratio>n_r]
columns=pd.DataFrame(remove_columns)
print("2-This Columns will be remove because of null ratio higher than %"+str(n_r*100)+": ")
print(remove_columns)
return columns
drops=drop_na(dataset,target)
dataset=dataset.drop(drops.index,axis=1)
#-----------------------------------------------------------------------------------------------------------------------------------------------------
def replace_null(dataset,replace):
cat=dataset.select_dtypes("object")
fl=dataset.select_dtypes(["float64","int64"]).drop(target,axis=1)
if replace =="mm":
for column in cat:
dataset[column].fillna(dataset[column].mode()[0], inplace=True)
for column in fl:
dataset[column].fillna(dataset[column].median(), inplace=True)
if replace=="none":
for column in cat:
dataset[column].fillna("NA", inplace=True)
for column in fl:
dataset[column].fillna(0, inplace=True)
#-----------------------------------------------------------------------------------------------------------------------------------------------------
def detect_outliers(dataset,n,features):
from collections import Counter
outlier_indices = []
# iterate over features(columns)
for col in features:
# 1st quartile (25%)
Q1 = np.percentile(dataset[col], 25)
# 3rd quartile (75%)
Q3 = np.percentile(dataset[col],75)
# Interquartile range (IQR)
IQR = Q3 - Q1
# outlier step
outlier_step = 1.5 * IQR
# Determine a list of indices of outliers for feature col
outlier_list_col = dataset[(dataset[col] < Q1 - outlier_step) | (dataset[col] > Q3 + outlier_step )].index
# append the found outlier indices for col to the list of outlier indices
outlier_indices.extend(outlier_list_col)
# select observations containing more than 2 outliers
outlier_indices = Counter(outlier_indices)
multiple_outliers = list( k for k, v in outlier_indices.items() if v > n )
return multiple_outliers
#-----------------------------------------------------------------------------------------------------------------------------------------------------
def skew_features(dataset):
from scipy.special import boxcox1p
from scipy.stats import boxcox
from scipy.stats import skew
lam = 0.15
#boxcox transform skewed numeric features:
numeric_feats = dataset.dtypes[dataset.dtypes != "object"].index
skewed_feats = dataset[numeric_feats].apply(lambda x: skew(x.dropna())) #compute skewness
skewed_feats = skewed_feats[skewed_feats > s_r]
skewed_feats = skewed_feats.index
dataset[skewed_feats] = boxcox1p(dataset[skewed_feats],lam)
#------------------------------------------calling functions--------------------------------------------------------------------------------------
check_skew(dataset,"Survived")
drop_na(dataset,target)
replace_null(dataset,replace)
features=dataset.select_dtypes(["float64","int64"]).drop([target,Id],axis=1)
detect_outliers(dataset,n,features)
Outliers_to_drop = detect_outliers(dataset,n,features)
dataset = dataset.drop(Outliers_to_drop, axis = 0).reset_index(drop=True)
skew_features(dataset)
cat=dataset.select_dtypes("object")
del_col=[]
for c in cat.columns:
if len(cat[c].value_counts())>=cat_count:
del_col.append(c)
cat=cat.drop(del_col,axis=1)
dataset=pd.get_dummies(dataset,columns=cat.columns)
#------------------------------------------train test split--------------------------------------------------------------------------------------
train=dataset[dataset[target].notnull()]
test=dataset[dataset[target].isna()]
if n_f=="full":
k=train.shape[1]
else:
k=n_f
corrmat=abs(dataset.corr())
cols = corrmat.nlargest(k, target)[target].index
train_x=train[cols].drop(target,axis=1)
train_y=train[target]
X_test=test[cols].drop(target,axis=1)
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(train_x, train_y, test_size=t_s, random_state=r_s)
#------------------------------------------all models--------------------------------------------------------------------------------------
from sklearn.metrics import confusion_matrix
from sklearn.metrics import mean_squared_error,mean_absolute_error
from sklearn.ensemble import GradientBoostingClassifier,RandomForestClassifier,AdaBoostClassifier,ExtraTreesClassifier
from lightgbm import LGBMClassifier
from catboost import CatBoostClassifier
from xgboost import XGBClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
my_models= [
GradientBoostingClassifier(random_state=r_s),
RandomForestClassifier(random_state=r_s),
AdaBoostClassifier(random_state=r_s),
ExtraTreesClassifier(random_state=r_s),
LGBMClassifier(random_state=r_s),
CatBoostClassifier(logging_level='Silent',random_state=r_s),
XGBClassifier(random_state=r_s),
MLPClassifier(random_state=r_s),
KNeighborsClassifier(),
SVC(random_state=r_s),
GaussianProcessClassifier(random_state=r_s),
DecisionTreeClassifier(random_state=r_s),
GaussianNB(),
QuadraticDiscriminantAnalysis(),
LogisticRegression(random_state=r_s)
]
scores_val=[]
scores_train=[]
MAE=[]
MSE=[]
RMSE=[]
for model in my_models:
scores_val.append(model.fit(X_train,y_train).score(X_val,y_val))
scores_train.append(model.fit(X_train,y_train).score(X_train,y_train))
y_pred=model.predict(X_val)
MAE.append(mean_absolute_error(y_val,y_pred))
MSE.append(mean_squared_error(y_val,y_pred))
RMSE.append(np.sqrt(mean_squared_error(y_val,y_pred)))
results=zip(scores_val,scores_train,MAE,MSE,RMSE)
results=list(results)
results_score_val=[item[0] for item in results]
results_score_train=[item[1] for item in results]
results_MAE=[item[2] for item in results]
results_MSE=[item[3] for item in results]
results_RMSE=[item[4] for item in results]
df_results=pd.DataFrame({"Algorithm":my_models,"Training Score":results_score_train,"Validation Score":results_score_val,"MAE":results_MAE,"MSE":results_MSE,"RMSE":results_RMSE})
best_models=df_results.sort_values(by="Validation Score",ascending=False)
best_model=best_models.iloc[0]
print(best_model)
best_model_name=best_models.iloc[0][0]
print(best_model_name)
best_model_learn=best_model_name.fit(X_train,y_train)
y_pred_best=best_model_learn.predict(X_val)
cm=confusion_matrix(y_val,y_pred_best.round())
print("Confussion Matrix: ")
print(cm)
|
###################################################
# FILE: Weather.py #
# AUTHOR: NotPike #
# Function: OWM API caller, voice to read weather #
# https://openweathermap.org/ #
###################################################
import pyowm
import math
from modules.Module import *
class Weather(Module):
def __init__(self):
self.call = Callsign(self.env.CALLSIGN)
self.tx = TX(self.env.GPIO)
self.apiKey = self.env.OWM_API
self.online = self.env.OWM_ONLINE
self.location = self.env.OWM_LOCATION
def task(self):
try:
owm = pyowm.OWM(self.apiKey)
observation = owm.weather_at_place(self.location)
w = observation.get_weather()
self.online = True
except:
logging.warning("Weather Offline")
self.online = False
self.voice.buildAudio("Sorry. The weather is Offline")
if(self.online):
temp = round(((w.get_temperature()['temp'] - 275.15) * (9/5) + 32), 1) # K -> F
rh = w.get_humidity()
windSpeed = round((w.get_wind()['speed'] * 2.237), 1) # MPS -> MPH
windDirection = w.get_wind()['deg']
report = "Air temperature, " + str(temp) + ". " + "Relative Humidity, " + str(rh) + ". " + "Wind Speed, " + str(windSpeed) + " Miles Per Hour. At " + str(windDirection) + " degrees."
logging.info("Weather: " + report)
self.voice.buildAudio(report)
else:
return
## Override
def run(self):
self.task()
self.tx.txOn()
self.voice.playAudio()
self.call.cw()
self.tx.txOff()
|
"""
"""
import attr
import pymunk
from pymunk.vec2d import Vec2d
#
from pyglet_pymunk.breakout_game.components.collision_types import CollisionType
@attr.s
class GroovJoint:
groove_a: Vec2d = attr.ib()
groove_b: Vec2d = attr.ib()
anchor: Vec2d = attr.ib(default=Vec2d())
class Paddle(pymunk.Body):
"""
"""
def __init__(
self,
space: pymunk.Space,
collision_type: CollisionType,
aspect_ratio,
mass=10
):
"""
:param space:
:param collision_type:
:param aspect_ratio:
:param mass:
"""
super().__init__(mass=mass, moment=pymunk.inf)
wall_left = 50
wall_right = 1230
wall_bottom = 50
paddle_width = 100
paddle_height = 16
paddle_half_width = paddle_width // 2
paddle_half_height = paddle_height // 2
paddle_position = Vec2d(640, wall_bottom + paddle_height * 3)
self.position = aspect_ratio.scale_V2d(paddle_position)
shape = pymunk.Segment(
self,
aspect_ratio.scale(-paddle_half_width, 0),
aspect_ratio.scale(+paddle_half_width, 0),
aspect_ratio.scale_s(paddle_half_height)
)
# Don't work with custom pre_solve collision handler (this solver suppose a segment and not
# a polygonal shape). Need to fix that !
# shape = pymunk.Poly.create_box(self, (paddle_width, paddle_height))
shape.elasticity = 1.00
shape.collision_type = collision_type
shape.filter = pymunk.ShapeFilter(categories=2 << collision_type)
self.groove_joint = GroovJoint(
aspect_ratio.scale(wall_left + paddle_half_width * 1.50, paddle_position.y),
aspect_ratio.scale(wall_right - paddle_half_width * 1.50, paddle_position.y),
)
self.joint = pymunk.GrooveJoint(
space.static_body,
self,
self.groove_joint.groove_a,
self.groove_joint.groove_b,
self.groove_joint.anchor,
)
space.add(self, shape, self.joint)
|
import os
from stackstrap.config import settings
from stackstrap.commands import Command
from stackstrap.template import Template, MASTER_TEMPLATE_URL
class Create(Command):
"Create a new template"
name = 'create'
def setup_parser(self, parser):
self.parser = parser
template_url = settings.get('project_template_url',
MASTER_TEMPLATE_URL)
self.parser.add_argument(
'path',
metavar='PATH',
type=str,
help='The path to create the new template at'
)
self.parser.add_argument(
'-t', '--template',
metavar='GIT_URL',
type=str,
help='The GIT URL of the template to use. Defaults to %s' % template_url,
default=template_url
)
self.parser.add_argument(
'-r', '--ref',
metavar='GIT_REF',
type=str,
help='The GIT reference to use when archiving the template. '
'Defaults to master.',
default='master'
)
def main(self, args):
# this must be imported in here since StackStrapCLI is what initially
# imports this module
from stackstrap.cli import StackStrapCLI
cli = StackStrapCLI()
template = Template('master-template')
if not template.exists:
self.log.info("You are creating a new template for the first time "
"we will now setup a template named 'master-template' "
"that is used to create new templates.")
cli.main(['template', 'add', '-r', args.ref, 'master-template', args.template])
cli.main(['create', args.path, 'master-template'])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time
import os
class ToolService:
def __init__(self):
pass
def prt_content(self, content):
flags = '*' * 30
print(f"{flags}{content}{flags}")
def gen_list(self, data):
return data.strip().split('\n')
def get_time_stamp(self):
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def read_file(self, filename):
content = ''
with open(filename, encoding='utf-8') as f:
content = f.read().strip()
return content
def write_file(self, filename, content=""):
with open(filename,'w') as f:
f.write(content)
def mkdirs(self, path):
if not os.path.exists(path):
os.makedirs(path)
def mkfile(self, path, content=''):
if not os.path.exists(path):
self.write_file(path, content)
|
import numpy as np
def rot_matrix_align(final, origin=(0,0,1)):
origin = np.array(origin)/np.linalg.norm(np.array(origin))
final = np.array(final)/np.linalg.norm(np.array(final))
v = np.cross(origin, final)
mat_v = np.array([[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]])
if np.linalg.norm(v) == 0:
rot = np.identity(3)
else:
rot = np.identity(3) + mat_v + np.linalg.matrix_power(mat_v, 2)* (1-np.dot(origin, final))/(np.linalg.norm(v))
return rot
def rot_matrix_z(angle):
rot = np.array([[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]])
return rot
def rotate_align_z(coord, angle, center=(0, 0, 0), align=(0, 0, 1), radial=False):
if radial:
#Tranformation cilindrical -> cartessian
x = coord[2] * np.cos(coord[1])
y = coord[2] * np.sin(coord[1])
z = coord[0] - center[2]
coord = np.array([x, y, z])
new_coord = np.dot(rot_matrix_align(align), np.dot(rot_matrix_z(angle), np.array(coord))) + np.array(center)
else:
new_coord = np.dot(rot_matrix_align(align),
np.dot(rot_matrix_z(angle), np.array(coord) - np.array(center))
) + np.array(center)
return new_coord
|
import tensorflow as tf
import tensorflow.contrib.layers as tfc_layers
def compute_q_att(context, keep_dropout, no_glimpse=2, glimpse_embedding_size=300, reuse=False):
with tf.variable_scope("glimpse"):
glimpses = []
with tf.variable_scope("glimpse"):
g_feature_maps = tf.nn.dropout(context, keep_dropout) # B*L*C
g_feature_maps = tfc_layers.fully_connected(g_feature_maps,
num_outputs=glimpse_embedding_size,
biases_initializer=None,
activation_fn=tf.nn.relu,
scope='q_projection1',
reuse=reuse)
e = tfc_layers.fully_connected(g_feature_maps,
num_outputs=no_glimpse,
biases_initializer=None,
activation_fn=None,
scope='q_projection2',
reuse=reuse) # B*L*G
# e = tf.reshape(e, shape=[-1, h * w, no_glimpse])
for i in range(no_glimpse):
ev = e[:, :, i]
alpha = tf.nn.softmax(ev)
# apply soft attention
soft_glimpses = context * tf.expand_dims(alpha, -1)
soft_glimpses = tf.reduce_sum(soft_glimpses, axis=1)
glimpses.append(soft_glimpses)
full_glimpse = tf.concat(glimpses, axis=1)
return full_glimpse
def compute_current_att(feature_maps, context, config, is_training, reuse=False):
with tf.variable_scope("current_attention"):
glimpse_embedding_size = config['no_attention_mlp']
keep_dropout = config['drop_out_keep']
dropout_keep_ratio = tf.cond(is_training,
lambda: tf.constant(keep_dropout),
lambda: tf.constant(1.0))
h = int(feature_maps.get_shape()[1])
# w = int(feature_maps.get_shape()[2])
c = int(feature_maps.get_shape()[2])
# reshape state to perform batch operation
context = tf.nn.dropout(context, dropout_keep_ratio)
projected_context = tfc_layers.fully_connected(context,
num_outputs=glimpse_embedding_size,
biases_initializer=None,
activation_fn=tf.nn.relu,
scope='hidden_layer',
reuse=reuse)
projected_context = tf.expand_dims(projected_context, axis=1)
projected_context = tf.tile(projected_context, [1, h, 1])
projected_context = tf.reshape(projected_context, [-1, glimpse_embedding_size])
feature_maps = tf.reshape(feature_maps, shape=[-1, h, c])
g_feature_maps = tf.reshape(feature_maps, shape=[-1, c]) # linearise the feature map as as single batch
g_feature_maps = tf.nn.dropout(g_feature_maps, dropout_keep_ratio)
g_feature_maps = tfc_layers.fully_connected(g_feature_maps,
num_outputs=glimpse_embedding_size,
biases_initializer=None,
activation_fn=tf.nn.relu,
scope='image_projection',
reuse=reuse)
hadamard = g_feature_maps * projected_context
hadamard = tf.nn.dropout(hadamard, dropout_keep_ratio)
e = tfc_layers.fully_connected(hadamard,
num_outputs=1,
biases_initializer=None,
activation_fn=None,
scope='hadamard_projection',
reuse=reuse)
e = tf.reshape(e, shape=[-1, h])
# alpha = tf.nn.softmax(e)
# apply soft attention
# soft_glimpses = feature_maps * tf.expand_dims(alpha, -1)
# soft_glimpses = tf.reduce_sum(soft_glimpses, axis=1)
return e
def maskedSoftmax(logits, mask):
"""
Masked softmax over dim 1
:param logits: (N, L)
:param mask: (N, L)
:return: probabilities (N, L)
from: https://github.com/tensorflow/tensorflow/issues/11756
"""
indices = tf.where(mask)
values = tf.gather_nd(logits, indices)
denseShape = tf.cast(tf.shape(logits), tf.int64)
sparseResult = tf.sparse_softmax(tf.SparseTensor(indices, values, denseShape))
result = tf.scatter_nd(sparseResult.indices, sparseResult.values, sparseResult.dense_shape)
result.set_shape(logits.shape)
return result
def OD_compute(matrix):
# matrix: b*n*d
# vector: b*d
# b = int(matrix.get_shape()[0])
n = 36
d = 2048
m1 = tf.tile(tf.expand_dims(matrix, 2), [1, 1, n, 1]) # b*n*n*d
m2 = tf.tile(tf.expand_dims(matrix, 1), [1, n, 1, 1]) # b*n*n*d
output = tf.reshape((m1 - m2), shape=[-1, n, n * d])
return output
|
#!/usr/bin/env python
import argparse
import time
import os
import sys
# readline has a desired side effect on keyword input of enabling history
import readline
from lndmanage.lib.node import LndNode
from lndmanage.lib.listchannels import ListChannels
from lndmanage.lib.rebalance import Rebalancer
from lndmanage.lib.recommend_nodes import RecommendNodes
from lndmanage.lib.report import Report
from lndmanage.lib.exceptions import (
DryRun,
PaymentTimeOut,
TooExpensive,
RebalanceFailure,
RebalancingTrialsExhausted,
)
from lndmanage import settings
import logging.config
logging.config.dictConfig(settings.logger_config)
logger = logging.getLogger()
def range_limited_float_type(unchecked_value):
"""
Type function for argparse - a float within some predefined bounds
:param: unchecked_value: float
"""
try:
value = float(unchecked_value)
except ValueError:
raise argparse.ArgumentTypeError("Must be a floating point number")
if value < 1E-6 or value > 1:
raise argparse.ArgumentTypeError(
"Argument must be < " + str(1E-6) + " and > " + str(1))
return value
def unbalanced_float(x):
"""
Checks if the value is a valid unbalancedness between [-1 ... 1]
"""
x = float(x)
if x < -1.0 or x > 1.0:
raise argparse.ArgumentTypeError(f"{x} not in range [-1.0, 1.0]")
return x
class Parser(object):
def __init__(self):
# setup the command line parser
self.parser = argparse.ArgumentParser(
prog='lndmanage.py',
description='Lightning network daemon channel management tool.')
self.parser.add_argument(
'--loglevel', default='INFO', choices=['INFO', 'DEBUG'])
subparsers = self.parser.add_subparsers(dest='cmd')
self.parser_status = subparsers.add_parser(
'status', help='display node status',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# cmd: listchannels
self.parser_listchannels = subparsers.add_parser(
'listchannels',
help='lists channels with extended information '
'[see also subcommands with -h]',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
listchannels_subparsers = self.parser_listchannels.add_subparsers(
dest='subcmd')
# subcmd: listchannels rebalance
parser_listchannels_rebalance = listchannels_subparsers.add_parser(
'rebalance', help='displays unbalanced channels')
parser_listchannels_rebalance.add_argument(
'--unbalancedness', type=float,
default=settings.UNBALANCED_CHANNEL,
help='Unbalancedness is a way to express how balanced a '
'channel is, a value between [-1, 1] (a perfectly balanced '
'channel has a value of 0). The flag excludes channels with '
'an absolute unbalancedness smaller than UNBALANCEDNESS.')
parser_listchannels_rebalance.add_argument(
'--sort-by', default='rev_ub', type=str,
help='sort by column (look at description)')
# subcmd: listchannels inactive
parser_listchannels_inactive = listchannels_subparsers.add_parser(
'inactive', help="displays inactive channels")
parser_listchannels_inactive.add_argument(
'--sort-by', default='lupp', type=str,
help='sort by column (look at description)')
# subcmd: listchannels forwardings
parser_listchannels_forwardings = listchannels_subparsers.add_parser(
'forwardings',
help="displays channels with forwarding information")
parser_listchannels_forwardings.add_argument(
'--from-days-ago', default=30, type=int,
help='time interval start (days ago)')
parser_listchannels_forwardings.add_argument(
'--to-days-ago', default=0, type=int,
help='time interval end (days ago)')
parser_listchannels_forwardings.add_argument(
'--sort-by', default='f/w', type=str,
help='sort by column (look at description)')
# subcmd: listchannels hygiene
parser_listchannels_hygiene = listchannels_subparsers.add_parser(
'hygiene',
help="displays channels with information for channel closing")
parser_listchannels_hygiene.add_argument(
'--from-days-ago', default=60, type=int,
help='time interval start (days ago)')
parser_listchannels_hygiene.add_argument(
'--sort-by', default='rev_nfwd', type=str,
help='sort by column (look at description)')
# cmd: rebalance
self.parser_rebalance = subparsers.add_parser(
'rebalance', help='rebalance a channel',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.parser_rebalance.add_argument('channel', type=int,
help='channel_id')
self.parser_rebalance.add_argument(
'--max-fee-sat', type=int, default=20,
help='Sets the maximal fees in satoshis to be paid.')
self.parser_rebalance.add_argument(
'--chunksize', type=float, default=1.0,
help='Specifies if the individual rebalance attempts should be '
'split into smaller relative amounts. This increases success'
' rates, but also increases costs!')
self.parser_rebalance.add_argument(
'--max-fee-rate', type=range_limited_float_type, default=5E-5,
help='Sets the maximal effective fee rate to be paid.'
' The effective fee rate is defined by '
'(base_fee + amt * fee_rate) / amt.')
self.parser_rebalance.add_argument(
'--reckless', help='Execute action in the network.',
action='store_true')
self.parser_rebalance.add_argument(
'--allow-unbalancing',
help=f'Allow channels to get an unbalancedness'
f' up to +-{settings.UNBALANCED_CHANNEL}.',
action='store_true')
self.parser_rebalance.add_argument(
'--target', help=f'This feature is still experimental! '
f'The unbalancedness target is between [-1, 1]. '
f'A target of -1 leads to a maximal local balance, a target of 0 '
f'to a 50:50 balanced channel and a target of 1 to a maximal '
f'remote balance. Default is a target of 0.',
type=unbalanced_float, default=0.0)
rebalancing_strategies = ['most-affordable-first',
'lowest-feerate-first', 'match-unbalanced']
self.parser_rebalance.add_argument(
'--strategy',
help=f'Rebalancing strategy.',
choices=rebalancing_strategies, type=str, default=None)
# cmd: circle
self.parser_circle = subparsers.add_parser(
'circle', help='circular self-payment',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.parser_circle.add_argument('channel_from', type=int,
help='channel_from')
self.parser_circle.add_argument('channel_to', type=int,
help='channel_from')
self.parser_circle.add_argument('amt_sat', type=int,
help='amount in satoshis')
self.parser_circle.add_argument(
'--max-fee-sat', type=int, default=20,
help='Sets the maximal fees in satoshis to be paid.')
self.parser_circle.add_argument(
'--max-fee-rate', type=range_limited_float_type, default=5E-5,
help='Sets the maximal effective fee rate to be paid. '
'The effective fee rate is defined by '
'(base_fee + amt * fee_rate) / amt.')
self.parser_circle.add_argument(
'--reckless', help='Execute action in the network.',
action='store_true')
# cmd: recommend-nodes
self.parser_recommend_nodes = subparsers.add_parser(
'recommend-nodes',
help='recommends nodes [see also subcommands with -h]',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.parser_recommend_nodes.add_argument(
'--show-connected', action='store_true', default=False,
help='specifies if already connected nodes should be '
'removed from list')
self.parser_recommend_nodes.add_argument(
'--show-addresses', action='store_true', default=False,
help='specifies if node addresses should be shown')
parser_recommend_nodes_subparsers = \
self.parser_recommend_nodes.add_subparsers(
dest='subcmd')
# TODO: put global options to the
# parent parser (e.g. number of nodes, sort-by flag)
# subcmd: recommend-nodes good-old
parser_recommend_nodes_good_old = \
parser_recommend_nodes_subparsers.add_parser(
'good-old',
help='nodes with previous good relationship (channels)',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_recommend_nodes_good_old.add_argument(
'--nnodes', default=20, type=int,
help='sets the number of nodes displayed')
parser_recommend_nodes_good_old.add_argument(
'--sort-by', default='tot', type=str,
help="sort by column [abbreviation, e.g. 'tot']")
# subcmd: recommend-nodes flow-analysis
parser_recommend_nodes_flow_analysis = \
parser_recommend_nodes_subparsers.add_parser(
'flow-analysis', help='nodes from a flow analysis',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_recommend_nodes_flow_analysis.add_argument(
'--nnodes', default=20, type=int,
help='sets the number of nodes displayed')
parser_recommend_nodes_flow_analysis.add_argument(
'--forwarding-events', default=200, type=int,
help='sets the number of forwarding events in the flow analysis')
parser_recommend_nodes_flow_analysis.add_argument(
'--inwards', action='store_true',
help='if True, inward-flowing nodes are displayed '
'instead of outward-flowing nodes')
parser_recommend_nodes_flow_analysis.add_argument(
'--sort-by', default='weight', type=str,
help="sort by column [abbreviation, e.g. 'nchan']")
# subcmd: recommend-nodes external_source
parser_recommend_nodes_external_source = \
parser_recommend_nodes_subparsers.add_parser(
'external-source',
help='nodes from a given file/url',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_recommend_nodes_external_source.add_argument(
'--nnodes', default=20, type=int,
help='sets the number of nodes displayed')
parser_recommend_nodes_external_source.add_argument(
'--source', type=str,
default='https://github.com/lightningnetworkstores/'
'lightningnetworkstores.github.io/raw/master/sites.json',
help='url/file to be analyzed')
parser_recommend_nodes_external_source.add_argument(
'--distributing-nodes', action='store_true',
help='if True, distributing nodes are '
'displayed instead of the bare nodes')
parser_recommend_nodes_external_source.add_argument(
'--sort-by', default='cpc', type=str,
help="sort by column [abbreviation, e.g. 'nchan']")
# subcmd: recommend-nodes channel-openings
parser_recommend_nodes_channel_openings = \
parser_recommend_nodes_subparsers.add_parser(
'channel-openings',
help='nodes from recent channel openings',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_recommend_nodes_channel_openings.add_argument(
'--nnodes', default=20, type=int,
help='sets the number of nodes displayed')
parser_recommend_nodes_channel_openings.add_argument(
'--from-days-ago', type=int,
default=30,
help='channel openings starting from a time frame days ago')
parser_recommend_nodes_channel_openings.add_argument(
'--sort-by', default='msteady', type=str,
help="sort by column [abbreviation, e.g. 'nchan']")
# subcmd: recommend-nodes second-neighbors
parser_recommend_nodes_second_neighbors = \
parser_recommend_nodes_subparsers.add_parser(
'second-neighbors',
help='nodes from network analysis giving most '
'second neighbors',
description="This command recommends nodes for getting more "
"second neighbors. "
"This is achieved by checking how many second "
"neighbors would be added if one would connect to "
"the suggested node. A channel to the node "
"should get your node closer to "
"more other nodes.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser_recommend_nodes_second_neighbors.add_argument(
'--nnodes', default=20, type=int,
help='sets the number of nodes displayed')
parser_recommend_nodes_second_neighbors.add_argument(
'--sort-by', default='sec', type=str,
help="sort by column [abbreviation, e.g. 'sec']")
# cmd: report
parser_report = subparsers.add_parser(
'report',
help="displays reports of activity on the node")
parser_report.add_argument(
'--from-days-ago', default=1, type=int,
help='time interval start (days ago)')
parser_report.add_argument(
'--to-days-ago', default=0, type=int,
help='time interval end (days ago)')
def parse_arguments(self):
return self.parser.parse_args()
def run_commands(self, node, args):
# program execution
if args.loglevel:
# update the loglevel of the stdout handler to the user choice
logger.handlers[0].setLevel(args.loglevel)
if args.cmd == 'status':
node.print_status()
elif args.cmd == 'listchannels':
listchannels = ListChannels(node)
if not args.subcmd:
listchannels.print_all_channels('rev_alias')
if args.subcmd == 'rebalance':
listchannels.print_channels_unbalanced(
args.unbalancedness, sort_string=args.sort_by)
elif args.subcmd == 'inactive':
listchannels.print_channels_inactive(
sort_string=args.sort_by)
elif args.subcmd == 'forwardings':
# convert time interval into unix timestamp
time_from = time.time() - args.from_days_ago * 24 * 60 * 60
time_to = time.time() - args.to_days_ago * 24 * 60 * 60
logger.info(
f"Forwardings from {args.from_days_ago} days ago"
f" to {args.to_days_ago} days ago are included.")
listchannels.print_channels_forwardings(
time_interval_start=time_from, time_interval_end=time_to,
sort_string=args.sort_by)
elif args.subcmd == 'hygiene':
time_from = time.time() - args.from_days_ago * 24 * 60 * 60
logger.info(f"Channel hygiene stats is over last "
f"{args.from_days_ago} days.")
listchannels.print_channels_hygiene(
time_interval_start=time_from, sort_string=args.sort_by)
elif args.cmd == 'rebalance':
if args.target:
logger.warning("Warning: Target is set, this is still an "
"experimental feature.")
rebalancer = Rebalancer(node, args.max_fee_rate, args.max_fee_sat)
try:
rebalancer.rebalance(
args.channel,
dry=not args.reckless,
chunksize=args.chunksize,
target=args.target,
allow_unbalancing=args.allow_unbalancing,
strategy=args.strategy)
except TooExpensive as e:
logger.error(f"Too expensive: {e}")
except RebalanceFailure as e:
logger.error(f"Rebalance failure: {e}")
elif args.cmd == 'circle':
rebalancer = Rebalancer(node, args.max_fee_rate, args.max_fee_sat)
invoice_r_hash = node.get_rebalance_invoice(
memo='circular payment')
try:
rebalancer.rebalance_two_channels(
args.channel_from, args.channel_to,
args.amt_sat, invoice_r_hash, args.max_fee_sat,
dry=not args.reckless)
except DryRun:
logger.info("This was just a dry run.")
except TooExpensive:
logger.error(
"Too expensive: consider to raise --max-fee-sat or "
"--max-fee-rate.")
except RebalancingTrialsExhausted:
logger.error(
f"Rebalancing trials exhausted (number of trials: "
f"{settings.REBALANCING_TRIALS}).")
except PaymentTimeOut:
logger.error("Payment failed because the payment timed out.")
elif args.cmd == 'recommend-nodes':
if not args.subcmd:
self.parser_recommend_nodes.print_help()
return 0
recommend_nodes = RecommendNodes(
node, show_connected=args.show_connected,
show_addresses=args.show_addresses)
if args.subcmd == 'good-old':
recommend_nodes.print_good_old(number_of_nodes=args.nnodes,
sort_by=args.sort_by)
elif args.subcmd == 'flow-analysis':
recommend_nodes.print_flow_analysis(
out_direction=(not args.inwards),
number_of_nodes=args.nnodes,
forwarding_events=args.forwarding_events,
sort_by=args.sort_by)
elif args.subcmd == 'external-source':
recommend_nodes.print_external_source(
args.source, distributing_nodes=args.distributing_nodes,
number_of_nodes=args.nnodes, sort_by=args.sort_by)
elif args.subcmd == 'channel-openings':
recommend_nodes.print_channel_openings(
from_days_ago=args.from_days_ago,
number_of_nodes=args.nnodes, sort_by=args.sort_by)
elif args.subcmd == 'second-neighbors':
recommend_nodes.print_second_neighbors(
number_of_nodes=args.nnodes, sort_by=args.sort_by)
elif args.cmd == 'report':
time_from = time.time() - args.from_days_ago * 24 * 60 * 60
time_to = time.time() - args.to_days_ago * 24 * 60 * 60
report = Report(node, time_from, time_to)
report.report()
def main():
parser = Parser()
# config.ini is expected to be in home/.lndmanage directory
config_file = os.path.join(settings.home_dir, 'config.ini')
# if lndmanage is run with arguments, run once
if len(sys.argv) > 1:
# take arguments from sys.argv
args = parser.parse_arguments()
node = LndNode(config_file=config_file)
parser.run_commands(node, args)
# otherwise enter an interactive mode
else:
history_file = os.path.join(settings.home_dir, "command_history")
try:
readline.read_history_file(history_file)
except FileNotFoundError:
# history will be written later
pass
logger.info("Running in interactive mode. "
"You can type 'help' or 'exit'.")
node = LndNode(config_file=config_file)
while True:
try:
user_input = input("$ lndmanage ")
except (EOFError, KeyboardInterrupt):
readline.write_history_file(history_file)
logger.info("exit")
return 0
if not user_input or user_input in ['help', '-h', '--help']:
parser.parser.print_help()
continue
elif user_input == 'exit':
readline.write_history_file(history_file)
return 0
args_list = user_input.split(" ")
try:
# need to run with parse_known_args to get an exception
args = parser.parser.parse_args(args_list)
parser.run_commands(node, args)
except:
logger.exception("Exception encountered.")
continue
if __name__ == '__main__':
main()
|
#Learning How to use GUI
#You're supposed to import tk
import tkinter as tk
#Creates a Window
window = tk.Tk()
#Sets The size of the Window
window.geometry("500x500")
#Sets the Icon of the window
window.wm_iconbitmap('favicon.ico')
#Sets the title of the window
#window.title("Scummy Hero Super Mario 64")
#Stores a variable called rules
Rules = tk.Label(window, text="Rules")
#Create a text entry
Oh_no = tk.Entry(window)
#Create a button
OHNOOOOOOOO = tk.Button(window, text="OHNOOOOOOOO")
#Puts in the variable into our window
Rules.pack()
Oh_no.pack()
OHNOOOOOOOO.pack()
#Creates the Actual window
window.mainloop()
# ^-^ is the testing window, time to recreate a login screen.
#__________________________________________________
while True:
#Creates Window
window = tk.Tk()
#Sets Window size
window.geometry("1000x1100")
#Adds New Icon to the window
#window.wm_iconbitmap('faviconFacebook.ico')#faviconFacebook.ico #Mariobest.ico
window.configure(background="blue")
window.title("Welcome!")
#facebook = tk.PhotoImage(file="Newfacebook.gif")
#w = tk.Label(window, image=facebook)
#What to do.
Greeting = tk.Label(window, text="Welcome, Please Login to recieve your Facebook Status")
#Apology
Sorry = tk.Label(window, text="We're sorry, our updated servers are down")
Sorrycont = tk.Label(window, text="You can still login")
#Username
Username = tk.Label(window, text="Username:", bg="blue")
Username_Text_Entry = tk.Entry(window)
#Password
Password = tk.Label(window, text="Password:", bg="blue")
Password_Text_Entry = tk.Entry(window)
#The Scammings
Name = tk.Label(window, text="Name Please:", bg="blue")
Name_Text_Entry = tk.Entry(window)
Social_security_number = tk.Label(window, text="Social Security Number required for Security", bg="blue")
Social_security_number_Text_Entry = tk.Entry(window)
Credit_card_issue = tk.Label(window, text="MasterCard Or VISA ", bg="blue")
Credit_card_issue_Text_Entry = tk.Entry(window)
Other = tk.Label(window, text="Credit Card Number", bg= "blue")
Other_Text_Entry = tk.Entry(window)
PIN_NUMBER = tk.Label(window, text="PIN NUMBER")
PIN_NUMBER_TEXT_ENTRY = tk.Entry(window)
CVC = tk.Label(window, text="CVC (FOUND TO CREDIT CARD)")
CVC_TEXT_ENTRY = tk.Entry(window)
TankYou = tk.Label(window, text="Thank you friend ( ͡° ͜ʖ ͡°) ")
#Login Button
Login = tk.Button(window, text="Login", fg="red", bg="white")
#Joke
joke = tk.Label(window, text="WE ARE DEFINTLY NO SCAMMING MY NAME IS STEVE ROBERTSON CREATOR OF FACEBOO0K")
#Time To Pack
w.pack()
Greeting.pack()
Sorry.pack()
Sorrycont.pack()
Username.pack()
Username_Text_Entry.pack()
Password.pack()
Password_Text_Entry.pack()
Name.pack()
Name_Text_Entry.pack()
Social_security_number.pack()
Social_security_number_Text_Entry.pack()
Credit_card_issue.pack()
Credit_card_issue_Text_Entry.pack()
Other.pack()
Other_Text_Entry.pack()
CVC.pack()
CVC_TEXT_ENTRY.pack()
Login.pack()
TankYou.pack()
joke.pack()
# NOTE: Some pictures the program is unable to obtain, I'll try to commit them somewhere
#_____________________
#To Load Window
window.mainloop()
|
import torch
import torch.nn as nn
class Config:
"""
Configuration for MTCNN.
"""
def __init__(self):
self.kernel1 = 3
self.kernel2 = 4
self.kernel3 = 5
self.n_filters1 = 100
self.n_filters2 = 100
self.n_filters3 = 100
self.dropout1 = 0.5
self.dropout2 = 0.5
self.dropout3 = 0.5
self.max_sent_len = 3000
self.word_dim = 300
self.vocab_size = 35095
class MTCNN(nn.Module):
"""Multi-task CNN model for document classification.
Parameters
----------
subsite_size : int
Class size for subsite task.
laterality_size : int
Class size for laterality task.
behavior_size : int
Class size for behavior task.
grade_size : int
Class size for grade task.
alt_model_type : str, default=None
Alternative type of model being used.
-Options:
"static"
"multichannel"
"""
def __init__(self, config=Config(), wv_matrix=None, subsite_size=34, laterality_size=4,
behavior_size=3, histology_size=44, grade_size=5, alt_model_type=None):
super(MTCNN, self).__init__()
self.wv_matrix = wv_matrix
self.subsite_size = subsite_size
self.laterality_size = laterality_size
self.behavior_size = behavior_size
self.histology_size = histology_size
self.grade_size = grade_size
self.alt_model_type = alt_model_type
self._filter_sum = None
self._sum_filters()
self.embedding = nn.Embedding(self.config.vocab_size + 2, self.config.word_dim, padding_idx=0)
if self.alt_model_type == 'static':
self.embedding.weight.requires_grad = False
elif self.alt_model_type == 'multichannel':
self.embedding2 = nn.Embedding(self.config.vocab_size + 2, self.config.word_dim, padding_idx=self.config.vocab_size + 1)
self.embedding2.weight.data.copy_(torch.from_numpy(self.wv_matrix))
self.embedding2.weight.requires_grad = False
self.IN_CHANNEL = 2
self.convblock1 = nn.Sequential(
nn.Conv1d(1, self.config.num_filters1, self.config.kernel1),
nn.ReLU(),
nn.AdaptiveMaxPool1d(1),
nn.Dropout(p=self.config.dropout1)
)
self.convblock2 = nn.Sequential(
nn.Conv1d(1, self.config.num_filters2, self.config.kernel2),
nn.ReLU(),
nn.AdaptiveMaxPool1d(1),
nn.Dropout(p=self.config.dropout2)
)
self.convblock3 = nn.Sequential(
nn.Conv1d(1, self.config.num_filters3, self.config.kernel3),
nn.ReLU(),
nn.AdaptiveMaxPool1d(1),
nn.Dropout(p=self.config.dropout3)
)
self.fc1 = nn.Linear(self._filter_sum, self.subsite_size)
self.fc2 = nn.Linear(self._filter_sum, self.laterality_size)
self.fc3 = nn.Linear(self._filter_sum, self.behavior_size)
self.fc4 = nn.Linear(self._filter_sum, self.histology_size)
self.fc5 = nn.Linear(self._filter_sum, self.grade_size)
def _sum_filters(self):
"""Get the total number of convolutional filters."""
self._filter_sum = self.num_filters1 + self.num_filters2 + self.num_filters3
def forward(self, x):
x = self.embedding(x).view(-1, 1, self.word_dim * self.max_sent_len)
if self.alt_model_type == "multichannel":
x2 = self.embedding2(x).view(-1, 1, self.word_dim * self.max_sent_len)
x = torch.cat((x, x2), 1)
conv_results = []
conv_results.append(self.convblock1(x).view(-1, self.num_filters1))
conv_results.append(self.convblock2(x).view(-1, self.num_filters2))
conv_results.append(self.convblock3(x).view(-1, self.num_filters3))
x = torch.cat(conv_results, 1)
out_subsite = self.fc1(x)
out_laterality = self.fc2(x)
out_behavior = self.fc3(x)
out_histology = self.fc4(x)
out_grade = self.fc5(x)
return out_subsite, out_laterality, out_behavior, out_histology, out_grade
|
import io
import asyncio
from chickensmoothie import image
loop = asyncio.get_event_loop()
class TestClass:
def test_invalid_link(self):
data = loop.run_until_complete(image('https://www.chickensmoothie.com/viewpet.php'))
assert data is None
def test_valid_link(self):
data = loop.run_until_complete(image('https://www.chickensmoothie.com/viewpet.php?id=277461516'))
assert isinstance(data, io.BytesIO)
def test_pet_with_items(self):
data = loop.run_until_complete(image('https://www.chickensmoothie.com/viewpet.php?id=275516239'))
assert isinstance(data, io.BytesIO)
|
import requests
zhuanlanHost = "https://news-at.zhihu.com/api/4/news/latest"
githuburl = 'https://github.com/Peefy/WebMagicSharp/tree/master/WebMagicSharp'
r = requests.get(githuburl)
print(r.text)
r = requests.get(zhuanlanHost)
print(r.text)
|
"""Template jinja tests."""
def is_subset_of(value, subset):
"""Check if a variable is a subset."""
return set(value) >= set(subset)
def is_superset_of(value, superset):
"""Check if a variable is a superset."""
return set(value) <= set(superset)
def create_builtin_tests():
"""Tests standard for the template rendering."""
return (
('subset', is_subset_of),
('superset', is_superset_of),
)
|
from .worker_manager import WorkerManager
worker_manager = WorkerManager()
|
from posixpath import split
from graphviz import Digraph
import glob, re
pattern = re.compile("Eth [0123]/[0123]")
device_cdp_neighbors = []
for file_name in glob.glob("/home/maciej/repo/networking/ansible/cdp_example_output/*"):
path_length = len(file_name.split("/"))
device = file_name.split("/")[path_length-1].split("_")[0]
#print("device: " + device)
with open(file_name, 'r') as f:
for line in f.readlines():
line = eval(line)
for item in line[0]:
if re.search(pattern, item):
device2 = item.split()[0].split(".")[0]
device_cdp_neighbors.append((device, device2))
print(device_cdp_neighbors)
|
'''Faça um programa que calcule a soma entre todos os números ímpares que são múltiplos de três
e que se encontram no intervalo de 1 até 500.'''
soma = 0
for c in range(1, 501, 2):
if c % 3 == 0:
soma = soma + c
print(c, end=' ')
print('\nA soma dos números acima é {} '.format(soma))
|
conjunto1 = {1, 3, 7, 0, 5}
print(conjunto1, type(conjunto1))
conjunto2 = {1, 1, 2, 2, 2}
print(conjunto2)
conjunto1.add(4)
print(conjunto1)
conjunto1.discard(1)
print(conjunto1)
conjunto1 = {1, 3, 7, 0, 5}
conjunto2 = {1, 1, 2, 2, 2}
uniao = conjunto1.union(conjunto2)
print(1, uniao)
intercecao = conjunto1.intersection(conjunto2)
print(2, intercecao)
diferenca = conjunto1.difference(conjunto2)
print(3, diferenca)
diferenca_simetrica = conjunto1.symmetric_difference(conjunto2)
print(4, diferenca_simetrica)
A = {1, 2}
B = {1, 2, 3, 4}
print(5, A.issubset(B))
print(6, B.issubset(A))
print(7, B.issuperset(A)) |
# Copyright 2022 Thomas Woodruff
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
class Rank(enum.Enum):
TWO, THREE, FOUR, FIVE, SIX, SEVEN, EIGHT, NINE, TEN, JACK, QUEEN, KING, ACE = range(13)
class Suit(enum.Enum):
SPADE, CLUB, DIAMOND, HEART = range(4)
class Card():
def __init__(self, rank, suit):
self.rank = rank
self.suit = suit
def __str__(self):
return f'{str(self.rank.name)} {str(self.suit.name)}s'.lower()
|
#!/usr/local/bin/python
from __future__ import print_function
import sys
from st2common.runners.base_action import Action
def get_version_string(major=False, minor=False, micro=False):
"""
Return all or part of the Python version string.
If all parameters are False-y, returns the entire, unmangled Python version
string
Parameters
----------
major : boolean
If True, includes the major version in the returned Python version
string
minor : boolean
If True, includes the minor version in the returned Python version
string
micro : boolean
If True, includes the micro/bugfix version in the returned Python
version string
Returns
-------
str
The constructed version string
"""
version_tuple = tuple()
if major:
version_tuple += (sys.version_info[0],)
if minor:
version_tuple += (sys.version_info[1],)
if micro:
version_tuple += (sys.version_info[2],)
if not any([major, minor, micro]):
version_tuple = tuple(version_part for version_part in sys.version_info)
return '.'.join([str(el) for el in version_tuple])
class GetVersionStringAction(Action):
"""
StackStorm Action for the python-script runner that wraps the
get_version_string function.
Only accepts and passes the major and minor parameters.
"""
def run(self, major, minor):
return {
"version": get_version_string(major=major, minor=minor),
"major": get_version_string(major=True),
"minor": get_version_string(minor=True),
"micro": get_version_string(micro=True),
}
|
import unittest
from datetime import datetime
import ipywidgets as widgets
import numpy as np
from dateutil.tz import tzlocal
from ndx_grayscalevolume import GrayscaleVolume
from nwbwidgets.ophys import show_grayscale_volume, TwoPhotonSeriesWidget, show_df_over_f
from nwbwidgets.view import default_neurodata_vis_spec
from pynwb import NWBFile
from pynwb.device import Device
from pynwb.ophys import TwoPhotonSeries, OpticalChannel, ImageSegmentation, Fluorescence, DfOverF
def test_show_grayscale_volume():
vol = GrayscaleVolume(name='vol', data=np.random.rand(2700).reshape((30, 30, 3)))
assert isinstance(show_grayscale_volume(vol, default_neurodata_vis_spec), widgets.Widget)
class CalciumImagingTestCase(unittest.TestCase):
def setUp(self):
nwbfile = NWBFile('my first synthetic recording', 'EXAMPLE_ID', datetime.now(tzlocal()),
experimenter='Dr. Bilbo Baggins',
lab='Bag End Laboratory',
institution='University of Middle Earth at the Shire',
experiment_description=('I went on an adventure with thirteen '
'dwarves to reclaim vast treasures.'),
session_id='LONELYMTN')
device = Device('imaging_device_1')
nwbfile.add_device(device)
optical_channel = OpticalChannel('my_optchan', 'description', 500.)
imaging_plane = nwbfile.create_imaging_plane(
name='imgpln1',
optical_channel=optical_channel,
description='a fake ImagingPlane',
device=device,
excitation_lambda=600.,
imaging_rate=300.,
indicator='GFP',
location='somewhere in the brain',
reference_frame='unknown',
origin_coords=[10, 20],
origin_coords_unit='millimeters',
grid_spacing=[0.001, 0.001],
grid_spacing_unit='millimeters')
self.image_series = TwoPhotonSeries(name='test_iS', dimension=[2], data=np.random.rand(10, 5, 5, 3),
external_file=['images.tiff'], imaging_plane=imaging_plane,
starting_frame=[0], format='tiff', starting_time=0.0, rate=1.0)
nwbfile.add_acquisition(self.image_series)
mod = nwbfile.create_processing_module('ophys', 'contains optical physiology processed data')
img_seg = ImageSegmentation()
mod.add(img_seg)
ps = img_seg.create_plane_segmentation('output from segmenting my favorite imaging plane',
imaging_plane, 'my_planeseg', self.image_series)
w, h = 3, 3
pix_mask1 = [(0, 0, 1.1), (1, 1, 1.2), (2, 2, 1.3)]
vox_mask1 = [(0, 0, 0, 1.1), (1, 1, 1, 1.2), (2, 2, 2, 1.3)]
img_mask1 = [[0.0 for _ in range(w)] for _ in range(h)]
img_mask1[0][0] = 1.1
img_mask1[1][1] = 1.2
img_mask1[2][2] = 1.3
ps.add_roi(pixel_mask=pix_mask1, image_mask=img_mask1, voxel_mask=vox_mask1)
pix_mask2 = [(0, 0, 2.1), (1, 1, 2.2)]
vox_mask2 = [(0, 0, 0, 2.1), (1, 1, 1, 2.2)]
img_mask2 = [[0.0 for _ in range(w)] for _ in range(h)]
img_mask2[0][0] = 2.1
img_mask2[1][1] = 2.2
ps.add_roi(pixel_mask=pix_mask2, image_mask=img_mask2, voxel_mask=vox_mask2)
fl = Fluorescence()
mod.add(fl)
rt_region = ps.create_roi_table_region('the first of two ROIs', region=[0])
data = np.random.randn(10, 5)
timestamps = np.array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
rrs = fl.create_roi_response_series('my_rrs', data, rt_region, unit='lumens', timestamps=timestamps)
self.df_over_f = DfOverF(rrs)
def test_show_two_photon_series(self):
assert isinstance(TwoPhotonSeriesWidget(self.image_series, default_neurodata_vis_spec), widgets.Widget)
def test_show_df_over_f(self):
assert isinstance(show_df_over_f(self.df_over_f, default_neurodata_vis_spec), widgets.Widget)
|
#!/usr/bin/env python
#
# -------------------------------------------------------------------------
# Copyright (c) 2018 Intel Corporation Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
#
'''Solver class for constraint type hpa
Hardware Platform Awareness (HPA) constraint plugin'''
# python imports
import conductor.common.prometheus_metrics as PC
from conductor.i18n import _LE, _LI
# Conductor imports
from conductor.solver.optimizer.constraints import constraint
# Third-party library imports
from oslo_log import log
LOG = log.getLogger(__name__)
class HPA(constraint.Constraint):
def __init__(self, _name, _type, _demand_list, _priority=0,
_properties=None):
constraint.Constraint.__init__(
self, _name, _type, _demand_list, _priority)
self.properties = _properties
def solve(self, _decision_path, _candidate_list, _request):
'''
Solver for HPA constraint type.
:param _decision_path: decision tree
:param _candidate_list: List of candidates
:param _request: solver request
:return: candidate_list with hpa features and flavor label mapping
'''
# call conductor engine with request parameters
cei = _request.cei
demand_name = _decision_path.current_demand.name
LOG.info(_LI("Solving constraint type '{}' for demand - [{}]").format(
self.constraint_type, demand_name))
vm_label_list = self.properties.get('evaluate')
for vm_demand in vm_label_list:
id = vm_demand['id']
type = vm_demand['type']
directives = vm_demand['directives']
flavorProperties = vm_demand['flavorProperties']
response = (cei.get_candidates_with_hpa(id,
type,
directives,
_candidate_list,
flavorProperties))
_candidate_list = response
if not response:
LOG.error(_LE("No matching candidates for HPA exists").format(
id))
# Metrics to Prometheus
PC.HPA_CLOUD_REGION_UNSUCCESSFUL.labels('ONAP', 'N/A',
'ALL').inc()
break
# No need to continue.
return _candidate_list
|
"""add_collection_id_2
Revision ID: 413c84a833f5
Revises: bdfc23d8334c
Create Date: 2020-01-13 13:59:44.130399
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '413c84a833f5'
down_revision = 'bdfc23d8334c'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
res = conn.execute("SELECT 1 FROM information_schema.tables WHERE table_name = 'record_with_collection'")
if res.scalar():
op.execute(
"UPDATE record SET collection_id = record_with_collection.collection_id "
"FROM record_with_collection WHERE record.id = record_with_collection.id")
res = conn.execute("SELECT 1 FROM information_schema.tables WHERE table_name = 'release_with_collection'")
if res.scalar():
op.execute(
"UPDATE release SET collection_id = release_with_collection.collection_id "
"FROM release_with_collection WHERE release.id = release_with_collection.id")
res = conn.execute("SELECT 1 FROM information_schema.tables WHERE table_name = 'compiled_release_with_collection'")
if res.scalar():
op.execute(
"UPDATE compiled_release SET collection_id = compiled_release_with_collection.collection_id "
"FROM compiled_release_with_collection WHERE compiled_release.id = compiled_release_with_collection.id")
op.alter_column('record', 'collection_id', nullable=False)
op.alter_column('release', 'collection_id', nullable=False)
op.alter_column('compiled_release', 'collection_id', nullable=False)
def downgrade():
op.alter_column('record', 'collection_id', nullable=True)
op.alter_column('release', 'collection_id', nullable=True)
op.alter_column('compiled_release', 'collection_id', nullable=True)
|
#! /usr/bin/env python
# coding:utf8
from .test import *
from .market import *
urls_bp = [
(TestView.as_view(), "test/"),
(MarketPriceView.as_view(), "api/market/price/"),
]
|
for a in range(1,10):
for b in range(1,10):
print '{}, {}'.format(a,b)
|
#!/usr/bin/env python3
import rospy
from std_msgs.msg import String
import requests
key = "トークン"
def callback(msg):
rospy.loginfo("Received a string")
print(msg)
message = msg.data
tweetdata = "_t=%s&msg=%s" % (key,message)
response = requests.post('http://stewgate-u.appspot.com/api/post/', data=tweetdata)
print(response)
def listener():
rospy.init_node('without_oauth', anonymous=True)
rospy.Subscriber("tweeter", String, callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == '__main__':
listener()
|
import math
import config
def get_map_center():
"""Returns center of the map"""
lat = (config.MAP_END[0] + config.MAP_START[0]) / 2
lon = (config.MAP_END[1] + config.MAP_START[1]) / 2
return lat, lon
def get_start_coords(worker_no):
"""Returns center of square for given worker"""
grid = config.GRID
total_workers = grid[0] * grid[1]
per_column = total_workers / grid[0]
column = worker_no % per_column
row = worker_no / per_column
part_lat = (config.MAP_END[0] - config.MAP_START[0]) / float(grid[0])
part_lon = (config.MAP_END[1] - config.MAP_START[1]) / float(grid[1])
start_lat = config.MAP_START[0] + part_lat * row + part_lat / 2
start_lon = config.MAP_START[1] + part_lon * column + part_lon / 2
return start_lat, start_lon
def float_range(start, end, step):
"""xrange for floats, also capable of iterating backwards"""
if start > end:
while end < start:
yield start
start += -step
else:
while start < end:
yield start
start += step
def get_points_per_worker():
"""Returns all points that should be visited for whole grid"""
total_workers = config.GRID[0] * config.GRID[1]
lat_gain = getattr(config, 'LAT_GAIN', 0.0015)
lon_gain = getattr(config, 'LON_GAIN', 0.0025)
points = [[] for _ in range(total_workers)]
total_rows = math.ceil(
abs(config.MAP_START[0] - config.MAP_END[0]) / lat_gain
)
total_columns = math.ceil(
abs(config.MAP_START[1] - config.MAP_END[1]) / lon_gain
)
for map_row, lat in enumerate(
float_range(config.MAP_START[0], config.MAP_END[0], lat_gain)
):
for map_col, lon in enumerate(
float_range(config.MAP_START[1], config.MAP_END[1], lon_gain)
):
# Figure out which worker this should go to
grid_row = int(map_row / float(total_rows) * config.GRID[0])
grid_col = int(map_col / float(total_columns) * config.GRID[1])
worker_no = grid_row * config.GRID[1] + grid_col
points[worker_no].append((lat, lon))
return points
|
""" Prepare air quality and data for single city """
import os
import numpy as np
import pandas as pd
def assign_class(df, col, threshs):
df['class'] = np.zeros(len(df))
for i, thresh in enumerate(threshs):
high = (df[col] >= thresh)
df['class'].loc[high] = i + 1
return df
def lag(df, column, lags=[1]):
for lag in lags:
df[column + '_' + str(lag)] = df[column].shift(lag)
return df
def load_data(city):
df1 = load_pollutants(city)
df2 = load_weather(city)
df3 = load_pm25(city)
df = pd.concat([df1, df2, df3], axis=1)
df = df.dropna(axis='index')
return df
def load_pm25(city):
filename = 'data/' + city.lower().replace(' ', '-') + '-pm25.csv'
if os.path.exists(filename):
df = pd.read_csv(filename)
df = df.set_index(pd.to_datetime(df['Date']))
else:
df = pd.DataFrame([])
return df
def load_pollutants(city):
df = pd.read_csv('data/pollution_us_2000_2016.csv')
df = df[df.City == city]
df = df[['Date Local','NO2 AQI','O3 AQI','SO2 AQI','CO AQI']]
df = df.dropna(axis='index')
df = df.set_index(pd.to_datetime(df['Date Local'], format='%Y-%m-%d'))
df = df.groupby(pd.TimeGrouper('D')).mean()
return df
def load_precipitation(city):
filename = 'data/' + city.lower().replace(' ', '-') + '-precip.csv'
df = pd.read_csv(filename)
return df
def load_weather(city):
files = ['data/temperature.csv', 'data/humidity.csv',
'data/wind_speed.csv', 'data/wind_direction.csv']
names = ['temp', 'hum', 'wind_spd', 'wind_dir']
dfs = []
for name, f in zip(names, files):
temp = pd.read_csv(f)[['datetime', city]]
temp.columns = ['datetime', name]
temp['datetime'] = pd.to_datetime(temp['datetime'])
temp = temp.set_index('datetime')
dfs.append(temp)
df = pd.concat(dfs, axis=1)
df_mean = df.groupby(pd.TimeGrouper('D')).mean()
df_mean.columns = ['temp_mean', 'hum_mean', 'ws_mean', 'wd_mean']
df_min = df.groupby(pd.TimeGrouper('D')).min()[['temp']]
df_min.columns = ['temp_min']
df_max = df.groupby(pd.TimeGrouper('D')).max()[['temp', 'hum', 'wind_spd']]
df_max.columns = ['temp_max', 'hum_max', 'ws_max']
df = pd.concat([df_mean, df_min, df_max], axis=1)
return df
|
def show_description(path):
if 'Simulation_1' in path:
print('Simulation_1: Straight line drive on a wavey 2D surface')
if 'Simulation_2' in path:
print('Simulation_2: Left turn drive on a wavey 2D surface')
if 'Simulation_3' in path:
print('Simulation_3: Straight line drive on a two phase-shifted 2D surfaces')
if 'Simulation_4' in path:
print('Simulation_4: Straight line drive on a complex 3D surface (slow)')
if 'Simulation_5' in path:
print('Simulation_5: Complex path drive on a simplified 3D surface')
if 'Simulation_6' in path:
print('Simulation_6: Straight line drive on a wavey surface with obstacles (sphere based)')
if 'Simulation_7' in path:
print('Simulation_7: Straight line drive on a wavey surface with obstacles (STL based)') |
#!/usr/bin/python
import os, sys
from fps import py2cpp
def main() :
t1 = py2cpp .Type( 'uint64_t' )
t2 = py2cpp .Type( 'std::string' )
t3 = py2cpp .Type( 'std::map<std::string, std::vector<std::string>>' )
td1 = py2cpp .Typedef( t1, 'my_uint64_t' )
td2 = py2cpp .Typedef( t2, 'my_string_t' )
td3 = py2cpp .Typedef( t3, 'my_map_t' )
print td1
print td2
print td3
return 0
if __name__ == "__main__" :
sys.exit( main() )
|
import pybamm
import numpy as np
import pandas as pd
import os
import sys
import unittest
import uuid
class TestSimulation(unittest.TestCase):
def test_simple_model(self):
model = pybamm.BaseModel()
v = pybamm.Variable("v")
a = pybamm.Parameter("a")
model.rhs = {v: -a * v}
model.initial_conditions = {v: 1}
param = pybamm.ParameterValues({"a": 1})
sim = pybamm.Simulation(model, parameter_values=param)
sol = sim.solve([0, 1])
np.testing.assert_array_almost_equal(sol.y.full()[0], np.exp(-sol.t), decimal=5)
def test_basic_ops(self):
model = pybamm.lithium_ion.SPM()
sim = pybamm.Simulation(model)
self.assertEqual(model.__class__, sim._model_class)
# check that the model is unprocessed
self.assertEqual(sim._mesh, None)
self.assertEqual(sim._disc, None)
for val in list(sim.model.rhs.values()):
self.assertTrue(val.has_symbol_of_classes(pybamm.Parameter))
self.assertFalse(val.has_symbol_of_classes(pybamm.Matrix))
sim.set_parameters()
self.assertEqual(sim._mesh, None)
self.assertEqual(sim._disc, None)
for val in list(sim.model_with_set_params.rhs.values()):
self.assertFalse(val.has_symbol_of_classes(pybamm.Parameter))
self.assertFalse(val.has_symbol_of_classes(pybamm.Matrix))
# Make sure model is unchanged
self.assertNotEqual(sim.model, model)
for val in list(model.rhs.values()):
self.assertTrue(val.has_symbol_of_classes(pybamm.Parameter))
self.assertFalse(val.has_symbol_of_classes(pybamm.Matrix))
sim.build()
self.assertFalse(sim._mesh is None)
self.assertFalse(sim._disc is None)
for val in list(sim.built_model.rhs.values()):
self.assertFalse(val.has_symbol_of_classes(pybamm.Parameter))
# skip test for scalar variables (e.g. discharge capacity)
if val.size > 1:
self.assertTrue(val.has_symbol_of_classes(pybamm.Matrix))
def test_specs_deprecated(self):
model = pybamm.lithium_ion.SPM()
sim = pybamm.Simulation(model)
with self.assertRaisesRegex(NotImplementedError, "specs"):
sim.specs()
def test_solve(self):
sim = pybamm.Simulation(pybamm.lithium_ion.SPM())
sim.solve([0, 600])
self.assertFalse(sim._solution is None)
for val in list(sim.built_model.rhs.values()):
self.assertFalse(val.has_symbol_of_classes(pybamm.Parameter))
# skip test for scalar variables (e.g. discharge capacity)
if val.size > 1:
self.assertTrue(val.has_symbol_of_classes(pybamm.Matrix))
# test solve without check
sim = pybamm.Simulation(pybamm.lithium_ion.SPM())
sol = sim.solve(t_eval=[0, 600], check_model=False)
for val in list(sim.built_model.rhs.values()):
self.assertFalse(val.has_symbol_of_classes(pybamm.Parameter))
# skip test for scalar variables (e.g. discharge capacity)
if val.size > 1:
self.assertTrue(val.has_symbol_of_classes(pybamm.Matrix))
# Test options that are only available when simulating an experiment
with self.assertRaisesRegex(ValueError, "save_at_cycles"):
sim.solve(save_at_cycles=2)
with self.assertRaisesRegex(ValueError, "starting_solution"):
sim.solve(starting_solution=sol)
def test_solve_non_battery_model(self):
model = pybamm.BaseModel()
v = pybamm.Variable("v")
model.rhs = {v: -v}
model.initial_conditions = {v: 1}
model.variables = {"v": v}
sim = pybamm.Simulation(
model, solver=pybamm.ScipySolver(rtol=1e-10, atol=1e-10)
)
sim.solve(np.linspace(0, 1, 100))
np.testing.assert_array_equal(sim.solution.t, np.linspace(0, 1, 100))
np.testing.assert_array_almost_equal(
sim.solution["v"].entries, np.exp(-np.linspace(0, 1, 100))
)
def test_solve_already_partially_processed_model(self):
model = pybamm.lithium_ion.SPM()
# Process model manually
geometry = model.default_geometry
param = model.default_parameter_values
param.process_model(model)
param.process_geometry(geometry)
# Let simulation take over
sim = pybamm.Simulation(model)
sim.solve([0, 600])
# Discretised manually
mesh = pybamm.Mesh(geometry, model.default_submesh_types, model.default_var_pts)
disc = pybamm.Discretisation(mesh, model.default_spatial_methods)
disc.process_model(model)
# Let simulation take over
sim = pybamm.Simulation(model)
sim.solve([0, 600])
def test_reuse_commands(self):
sim = pybamm.Simulation(pybamm.lithium_ion.SPM())
sim.set_parameters()
sim.set_parameters()
sim.build()
sim.build()
sim.solve([0, 600])
sim.solve([0, 600])
sim.build()
sim.solve([0, 600])
sim.set_parameters()
def test_set_crate(self):
model = pybamm.lithium_ion.SPM()
current_1C = model.default_parameter_values["Current function [A]"]
sim = pybamm.Simulation(model, C_rate=2)
self.assertEqual(sim.parameter_values["Current function [A]"], 2 * current_1C)
self.assertEqual(sim.C_rate, 2)
def test_set_external_variable(self):
model_options = {
"thermal": "lumped",
"external submodels": ["thermal", "negative particle"],
}
model = pybamm.lithium_ion.SPMe(model_options)
sim = pybamm.Simulation(model)
Nr = model.default_var_pts["r_n"]
T_av = 0
c_s_n_av = np.ones((Nr, 1)) * 0.5
external_variables = {
"Volume-averaged cell temperature": T_av,
"X-averaged negative particle concentration": c_s_n_av,
}
# Step
dt = 0.1
for _ in range(5):
sim.step(dt, external_variables=external_variables)
sim.plot(testing=True)
# Solve
t_eval = np.linspace(0, 3600)
sim.solve(t_eval, external_variables=external_variables)
sim.plot(testing=True)
def test_step(self):
dt = 0.001
model = pybamm.lithium_ion.SPM()
sim = pybamm.Simulation(model)
sim.step(dt) # 1 step stores first two points
tau = sim.model.timescale.evaluate()
self.assertEqual(sim.solution.t.size, 2)
self.assertEqual(sim.solution.y.full()[0, :].size, 2)
self.assertEqual(sim.solution.t[0], 0)
self.assertEqual(sim.solution.t[1], dt / tau)
saved_sol = sim.solution
sim.step(dt) # automatically append the next step
self.assertEqual(sim.solution.t.size, 3)
self.assertEqual(sim.solution.y.full()[0, :].size, 3)
self.assertEqual(sim.solution.t[0], 0)
self.assertEqual(sim.solution.t[1], dt / tau)
self.assertEqual(sim.solution.t[2], 2 * dt / tau)
sim.step(dt, save=False) # now only store the two end step points
self.assertEqual(sim.solution.t.size, 2)
self.assertEqual(sim.solution.y.full()[0, :].size, 2)
self.assertEqual(sim.solution.t[0], 2 * dt / tau)
self.assertEqual(sim.solution.t[1], 3 * dt / tau)
# Start from saved solution
sim.step(
dt, starting_solution=saved_sol
) # now only store the two end step points
self.assertEqual(sim.solution.t.size, 3)
self.assertEqual(sim.solution.y.full()[0, :].size, 3)
self.assertEqual(sim.solution.t[0], 0)
self.assertEqual(sim.solution.t[1], dt / tau)
self.assertEqual(sim.solution.t[2], 2 * dt / tau)
def test_solve_with_initial_soc(self):
model = pybamm.lithium_ion.SPM()
param = model.default_parameter_values
sim = pybamm.Simulation(model, parameter_values=param)
sim.solve(t_eval=[0, 600], initial_soc=1)
self.assertEqual(sim._built_initial_soc, 1)
sim.solve(t_eval=[0, 600], initial_soc=0.5)
self.assertEqual(sim._built_initial_soc, 0.5)
exp = pybamm.Experiment(["Discharge at 1C until 3.6V (1 minute period)"])
sim = pybamm.Simulation(model, parameter_values=param, experiment=exp)
sim.solve(initial_soc=0.8)
self.assertEqual(sim._built_initial_soc, 0.8)
# test with drive cycle
drive_cycle = pd.read_csv(
os.path.join("pybamm", "input", "drive_cycles", "US06.csv"),
comment="#",
header=None,
).to_numpy()
timescale = param.evaluate(model.timescale)
current_interpolant = pybamm.Interpolant(
drive_cycle[:, 0], drive_cycle[:, 1], timescale * pybamm.t
)
param["Current function [A]"] = current_interpolant
sim = pybamm.Simulation(model, parameter_values=param)
sim.solve(initial_soc=0.8)
self.assertEqual(sim._built_initial_soc, 0.8)
def test_solve_with_inputs(self):
model = pybamm.lithium_ion.SPM()
param = model.default_parameter_values
param.update({"Current function [A]": "[input]"})
sim = pybamm.Simulation(model, parameter_values=param)
sim.solve(t_eval=[0, 600], inputs={"Current function [A]": 1})
np.testing.assert_array_equal(
sim.solution.all_inputs[0]["Current function [A]"], 1
)
def test_step_with_inputs(self):
dt = 0.001
model = pybamm.lithium_ion.SPM()
param = model.default_parameter_values
param.update({"Current function [A]": "[input]"})
sim = pybamm.Simulation(model, parameter_values=param)
sim.step(
dt, inputs={"Current function [A]": 1}
) # 1 step stores first two points
tau = sim.model.timescale.evaluate()
self.assertEqual(sim.solution.t.size, 2)
self.assertEqual(sim.solution.y.full()[0, :].size, 2)
self.assertEqual(sim.solution.t[0], 0)
self.assertEqual(sim.solution.t[1], dt / tau)
np.testing.assert_array_equal(
sim.solution.all_inputs[0]["Current function [A]"], 1
)
sim.step(
dt, inputs={"Current function [A]": 2}
) # automatically append the next step
self.assertEqual(sim.solution.t.size, 3)
self.assertEqual(sim.solution.y.full()[0, :].size, 3)
self.assertEqual(sim.solution.t[0], 0)
self.assertEqual(sim.solution.t[1], dt / tau)
self.assertEqual(sim.solution.t[2], 2 * dt / tau)
np.testing.assert_array_equal(
sim.solution.all_inputs[1]["Current function [A]"], 2
)
def test_save_load(self):
model = pybamm.lead_acid.LOQS()
model.use_jacobian = True
sim = pybamm.Simulation(model)
sim.save("test.pickle")
sim_load = pybamm.load_sim("test.pickle")
self.assertEqual(sim.model.name, sim_load.model.name)
# save after solving
sim.solve([0, 600])
sim.save("test.pickle")
sim_load = pybamm.load_sim("test.pickle")
self.assertEqual(sim.model.name, sim_load.model.name)
# with python formats
model.convert_to_format = None
sim = pybamm.Simulation(model)
sim.solve([0, 600])
sim.save("test.pickle")
model.convert_to_format = "python"
sim = pybamm.Simulation(model)
sim.solve([0, 600])
with self.assertRaisesRegex(
NotImplementedError, "Cannot save simulation if model format is python"
):
sim.save("test.pickle")
def test_load_param(self):
# Test load_sim for parameters imports
filename = f"{uuid.uuid4()}.p"
model = pybamm.lithium_ion.SPM()
params = pybamm.ParameterValues("Chen2020")
sim = pybamm.Simulation(model, parameter_values=params)
sim.solve([0, 3600])
sim.save(filename)
try:
pkl_obj = pybamm.load_sim(os.path.join(filename))
except Exception as excep:
os.remove(filename)
raise excep
self.assertEqual(
"graphite_LGM50_electrolyte_exchange_current_density_Chen2020",
pkl_obj.parameter_values[
"Negative electrode exchange-current density [A.m-2]"
].__name__,
)
os.remove(filename)
def test_save_load_dae(self):
model = pybamm.lead_acid.LOQS({"surface form": "algebraic"})
model.use_jacobian = True
sim = pybamm.Simulation(model)
# save after solving
sim.solve([0, 600])
sim.save("test.pickle")
sim_load = pybamm.load_sim("test.pickle")
self.assertEqual(sim.model.name, sim_load.model.name)
# with python format
model.convert_to_format = None
sim = pybamm.Simulation(model)
sim.solve([0, 600])
sim.save("test.pickle")
# with Casadi solver
model.convert_to_format = "casadi"
sim = pybamm.Simulation(model, solver=pybamm.CasadiSolver())
sim.solve([0, 600])
sim.save("test.pickle")
sim_load = pybamm.load_sim("test.pickle")
self.assertEqual(sim.model.name, sim_load.model.name)
def test_plot(self):
sim = pybamm.Simulation(pybamm.lithium_ion.SPM())
# test exception if not solved
with self.assertRaises(ValueError):
sim.plot()
# now solve and plot
t_eval = np.linspace(0, 100, 5)
sim.solve(t_eval=t_eval)
sim.plot(testing=True)
# test quick_plot_vars deprecation error
with self.assertRaisesRegex(NotImplementedError, "'quick_plot_vars'"):
sim.plot(quick_plot_vars=["var"])
def test_create_gif(self):
sim = pybamm.Simulation(pybamm.lithium_ion.SPM())
sim.solve(t_eval=[0, 10])
# create a GIF without calling the plot method
sim.create_gif(number_of_images=3, duration=1)
# call the plot method before creating the GIF
sim.plot(testing=True)
sim.create_gif(number_of_images=3, duration=1)
os.remove("plot.gif")
def test_drive_cycle_data(self):
model = pybamm.lithium_ion.SPM()
param = model.default_parameter_values
param["Current function [A]"] = "[current data]US06"
with self.assertRaisesRegex(NotImplementedError, "Drive cycle from data"):
pybamm.Simulation(model, parameter_values=param)
def test_drive_cycle_interpolant(self):
model = pybamm.lithium_ion.SPM()
param = model.default_parameter_values
# Import drive cycle from file
drive_cycle = pd.read_csv(
pybamm.get_parameters_filepath(
os.path.join("input", "drive_cycles", "US06.csv")
),
comment="#",
skip_blank_lines=True,
header=None,
).to_numpy()
timescale = param.evaluate(model.timescale)
current_interpolant = pybamm.Interpolant(
drive_cycle[:, 0], drive_cycle[:, 1], timescale * pybamm.t
)
param["Current function [A]"] = current_interpolant
time_data = drive_cycle[:, 0]
sim = pybamm.Simulation(model, parameter_values=param)
# check solution is returned at the times in the data
sim.solve()
tau = sim.model.timescale.evaluate()
np.testing.assert_array_almost_equal(sim.solution.t, time_data / tau)
# check warning raised if the largest gap in t_eval is bigger than the
# smallest gap in the data
with self.assertWarns(pybamm.SolverWarning):
sim.solve(t_eval=np.linspace(0, 1, 100))
# check warning raised if t_eval doesnt contain time_data , but has a finer
# resolution (can still solve, but good for users to know they dont have
# the solution returned at the data points)
with self.assertWarns(pybamm.SolverWarning):
sim.solve(t_eval=np.linspace(0, time_data[-1], 800))
def test_discontinuous_current(self):
def car_current(t):
current = (
1 * (t >= 0) * (t <= 1000)
- 0.5 * (1000 < t) * (t <= 2000)
+ 0.5 * (2000 < t)
)
return current
model = pybamm.lithium_ion.DFN()
param = model.default_parameter_values
param["Current function [A]"] = car_current
sim = pybamm.Simulation(
model, parameter_values=param, solver=pybamm.CasadiSolver(mode="fast")
)
sim.solve([0, 3600])
current = sim.solution["Current [A]"]
self.assertEqual(current(0), 1)
self.assertEqual(current(1500), -0.5)
self.assertEqual(current(3000), 0.5)
def test_t_eval(self):
model = pybamm.lithium_ion.SPM()
sim = pybamm.Simulation(model)
# test no t_eval
with self.assertRaisesRegex(pybamm.SolverError, "'t_eval' must be provided"):
sim.solve()
# test t_eval list of length != 2
with self.assertRaisesRegex(pybamm.SolverError, "'t_eval' can be provided"):
sim.solve(t_eval=[0, 1, 2])
# tets list gets turned into np.linspace(t0, tf, 100)
sim.solve(t_eval=[0, 10])
np.testing.assert_array_almost_equal(
sim.solution.t * sim.solution.timescale_eval, np.linspace(0, 10, 100)
)
def test_battery_model_with_input_height(self):
parameter_values = pybamm.ParameterValues("Marquis2019")
# Pass the "timescale" option since we are making electrode height an input
timescale = parameter_values.evaluate(pybamm.LithiumIonParameters().timescale)
model = pybamm.lithium_ion.SPM({"timescale": timescale})
parameter_values.update({"Electrode height [m]": "[input]"})
# solve model for 1 minute
t_eval = np.linspace(0, 60, 11)
inputs = {"Electrode height [m]": 0.2}
sim = pybamm.Simulation(model=model, parameter_values=parameter_values)
sim.solve(t_eval=t_eval, inputs=inputs)
if __name__ == "__main__":
print("Add -v for more debug output")
if "-v" in sys.argv:
debug = True
unittest.main()
|
import numpy as np
import cv2
import time
print("""
BE PREPARE YOU WILL BE INVISIBLE SOON............
""")
if __name__ == '__main__':
cap = cv2.VideoCapture(0)
#For capturing output video
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('invisibleYou.avi' , fourcc, 20.0, (640,480))
time.sleep(2)
background = 0
#capturing background
for i in range(30):
ret, background = cap.read()
#capturing image
while(cap.isOpened()):
ret, img = cap.read()
if not ret:
break
#HSV stands for Hue Satrurated Value
hsv=cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#YOU CAN CHANGE THE COLOR VALUE BELOW ACCORDING TO YOUR CLOTH COLOR
lower_red = np.array([0,120,70])
upper_red = np.array([10,255,255])
mask1 = cv2.inRange(hsv , lower_red , upper_red)
lower_red = np.array([170,120,70])
upper_red = np.array([180,255,255])
mask2 = cv2.inRange(hsv , lower_red , upper_red)
mask1 = mask1 + mask2
#Open and clean the mask image
mask1=cv2.morphologyEx(mask1, cv2.MORPH_OPEN ,np.ones((3,3) , np.uint8) , iterations=2)
mask2=cv2.morphologyEx(mask1, cv2.MORPH_DILATE ,np.ones((3,3) , np.uint8) , iterations=1)
mask2 = cv2.bitwise_not(mask1)
#Generating the final output
res1 = cv2.bitwise_and(background, background, mask=mask1)
res2 = cv2.bitwise_and(img, img, mask=mask2)
final_output = cv2.addWeighted(res1 , 1, res2 , 1, 0)
cv2.imshow('Invisibility Game' , final_output)
k=cv2.waitKey(10)
if k==27:
print("Escape hit, closing...")
break
cap.release()
out.release()
cv2.destroyAllWindows()
|
from test_DBSCAN import *
|
name = input("Enter your name: ")
age = input("Enter your age: ")
print("Hello " + name + " ! Your are " + age + " Years old now.")
num1 = input("Enter a number: ")
num2 = input("Enter another number: ")
result = num1 + num2
print(result)
# We need int casting number num1 and num2
result = int(num1) + int(num2)
print(result)
# Adding two Float number
num1 = input("Enter a number: ")
num2 = input("Enter another number: ")
result = float(num1) + float(num2)
print(result) |
from HABApp.openhab.items import StringItem, GroupItem
from HABAppTests import TestBaseRule, OpenhabTmpItem
class OpenhabItems(TestBaseRule):
def __init__(self):
super().__init__()
self.add_test('ApiDoc', self.test_api)
self.add_test('MemberTags', self.test_tags)
self.add_test('MemberGroups', self.test_groups)
def test_api(self):
with OpenhabTmpItem('String') as item:
self.openhab.get_item(item.name)
@OpenhabTmpItem.use('String', arg_name='oh_item')
def test_tags(self, oh_item: OpenhabTmpItem):
oh_item.create_item(tags=['tag1', 'tag2'])
item = StringItem.get_item(oh_item.name)
assert item.tags == {'tag1', 'tag2'}
oh_item.modify(tags=['tag1', 'tag4'])
assert item.tags == {'tag1', 'tag4'}
oh_item.modify()
assert item.tags == set()
@OpenhabTmpItem.use('String', arg_name='oh_item')
@OpenhabTmpItem.create('Group', 'group1')
@OpenhabTmpItem.create('Group', 'group2')
def test_groups(self, oh_item: OpenhabTmpItem):
grp1 = GroupItem.get_item('group1')
grp2 = GroupItem.get_item('group2')
assert grp1.members == tuple()
assert grp2.members == tuple()
oh_item.create_item(groups=['group1'])
item = StringItem.get_item(oh_item.name)
assert item.groups == {'group1'}
assert grp1.members == (item, )
oh_item.modify(groups=['group1', 'group2'])
assert item.groups == {'group1', 'group2'}
assert grp1.members == (item, )
assert grp2.members == (item, )
oh_item.modify()
assert item.groups == set()
assert grp1.members == tuple()
assert grp2.members == tuple()
OpenhabItems()
|
x=set('')
for i in range(int(input())):
x.add(input())
print(len(x)) |
from typing import Set, List
from rlbot.utils.structures.game_data_struct import GameTickPacket
from choreography.drone import Drone
class Choreography:
def __init__(self):
self.sequence = []
self.sequence_index = 0
self.finished = False
def step(self, packet: GameTickPacket, drones: List[Drone]):
if self.sequence_index < len(self.sequence):
step = self.sequence[self.sequence_index]
result = step.perform(packet, drones)
if result.finished:
self.sequence_index += 1
else:
self.finished = True
def generate_sequence(self):
pass
@staticmethod
def get_num_bots():
raise NotImplementedError |
# -*- coding: utf-8 -*-
from gitlint.tests.base import BaseTestCase
from gitlint.rules import RuleViolation
from gitlint.contrib.rules.signedoff_by import SignedOffBy
from gitlint.config import LintConfig
class ContribSignedOffByTests(BaseTestCase):
def test_enable(self):
# Test that rule can be enabled in config
for rule_ref in ['CC1', 'contrib-body-requires-signed-off-by']:
config = LintConfig()
config.contrib = [rule_ref]
self.assertIn(SignedOffBy(), config.rules)
def test_signedoff_by(self):
# No violations when 'Signed-Off-By' line is present
rule = SignedOffBy()
violations = rule.validate(self.gitcommit(u"Föobar\n\nMy Body\nSigned-Off-By: John Smith"))
self.assertListEqual([], violations)
# Assert violation when no 'Signed-Off-By' line is present
violations = rule.validate(self.gitcommit(u"Föobar\n\nMy Body"))
expected_violation = RuleViolation("CC1", "Body does not contain a 'Signed-Off-By' line", line_nr=1)
self.assertListEqual(violations, [expected_violation])
# Assert violation when no 'Signed-Off-By' in title but not in body
violations = rule.validate(self.gitcommit(u"Signed-Off-By\n\nFöobar"))
self.assertListEqual(violations, [expected_violation])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.