max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
toontown/fishing/FishCollection.py | TheFamiliarScoot/open-toontown | 99 | 9200 | from . import FishBase
from . import FishGlobals
class FishCollection:
def __init__(self):
self.fishList = []
def __len__(self):
return len(self.fishList)
def getFish(self):
return self.fishList
def makeFromNetLists(self, genusList, speciesList, weightList):
self.fishList = []
for genus, species, weight in zip(genusList, speciesList, weightList):
self.fishList.append(FishBase.FishBase(genus, species, weight))
def getNetLists(self):
genusList = []
speciesList = []
weightList = []
for fish in self.fishList:
genusList.append(fish.getGenus())
speciesList.append(fish.getSpecies())
weightList.append(fish.getWeight())
return [genusList, speciesList, weightList]
def hasFish(self, genus, species):
for fish in self.fishList:
if fish.getGenus() == genus and fish.getSpecies() == species:
return 1
return 0
def hasGenus(self, genus):
for fish in self.fishList:
if fish.getGenus() == genus:
return 1
return 0
def __collect(self, newFish, updateCollection):
for fish in self.fishList:
if fish.getGenus() == newFish.getGenus() and fish.getSpecies() == newFish.getSpecies():
if fish.getWeight() < newFish.getWeight():
if updateCollection:
fish.setWeight(newFish.getWeight())
return FishGlobals.COLLECT_NEW_RECORD
else:
return FishGlobals.COLLECT_NO_UPDATE
if updateCollection:
self.fishList.append(newFish)
return FishGlobals.COLLECT_NEW_ENTRY
def collectFish(self, newFish):
return self.__collect(newFish, updateCollection=1)
def getCollectResult(self, newFish):
return self.__collect(newFish, updateCollection=0)
def __str__(self):
numFish = len(self.fishList)
txt = 'Fish Collection (%s fish):' % numFish
for fish in self.fishList:
txt += '\n' + str(fish)
return txt
| from . import FishBase
from . import FishGlobals
class FishCollection:
def __init__(self):
self.fishList = []
def __len__(self):
return len(self.fishList)
def getFish(self):
return self.fishList
def makeFromNetLists(self, genusList, speciesList, weightList):
self.fishList = []
for genus, species, weight in zip(genusList, speciesList, weightList):
self.fishList.append(FishBase.FishBase(genus, species, weight))
def getNetLists(self):
genusList = []
speciesList = []
weightList = []
for fish in self.fishList:
genusList.append(fish.getGenus())
speciesList.append(fish.getSpecies())
weightList.append(fish.getWeight())
return [genusList, speciesList, weightList]
def hasFish(self, genus, species):
for fish in self.fishList:
if fish.getGenus() == genus and fish.getSpecies() == species:
return 1
return 0
def hasGenus(self, genus):
for fish in self.fishList:
if fish.getGenus() == genus:
return 1
return 0
def __collect(self, newFish, updateCollection):
for fish in self.fishList:
if fish.getGenus() == newFish.getGenus() and fish.getSpecies() == newFish.getSpecies():
if fish.getWeight() < newFish.getWeight():
if updateCollection:
fish.setWeight(newFish.getWeight())
return FishGlobals.COLLECT_NEW_RECORD
else:
return FishGlobals.COLLECT_NO_UPDATE
if updateCollection:
self.fishList.append(newFish)
return FishGlobals.COLLECT_NEW_ENTRY
def collectFish(self, newFish):
return self.__collect(newFish, updateCollection=1)
def getCollectResult(self, newFish):
return self.__collect(newFish, updateCollection=0)
def __str__(self):
numFish = len(self.fishList)
txt = 'Fish Collection (%s fish):' % numFish
for fish in self.fishList:
txt += '\n' + str(fish)
return txt
| none | 1 | 3.201076 | 3 |
|
lead/strategies/strategy_base.py | M4gicT0/Distribute | 0 | 9201 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
#
# Distributed under terms of the MIT license.
"""
Strategy base class
"""
from abc import ABCMeta, abstractmethod
from tinydb import TinyDB, Query
from node import Node
import json
class Strategy(object):
def __init__(self, this_controller, this_description=None):
self.description = this_description
self.controller = this_controller
self.ledger = TinyDB("ledger.json")
self.db = TinyDB("nodes.json")
self.nodes = []
@abstractmethod
def store_file(self, file_bytes, file_name):
pass
@abstractmethod
def retrieve_file(self, file_name, locations):
pass
@abstractmethod
def get_time(self):
pass
def getNodes(self):
self.nodes = []
for item in self.db:
node = Node(item['mac'],item['ip'],item['port'],item['units'])
self.nodes.append(node)
return self.nodes
def getNodesWithFile(self,filename):
macs = self.ledger.search(Query().file_name == filename)
self.nodes = []
for item in macs:
mac = item["location"]
dbnode = self.db.get(Query().mac == mac)
if(dbnode == None):
continue
node = Node(dbnode['mac'],dbnode['ip'],dbnode['port'],dbnode['units'])
self.nodes.append(node)
return self.nodes
def getFileSize(self, filename):
file = self.ledger.get(Query().file_name == filename)
return file['size']
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
#
# Distributed under terms of the MIT license.
"""
Strategy base class
"""
from abc import ABCMeta, abstractmethod
from tinydb import TinyDB, Query
from node import Node
import json
class Strategy(object):
def __init__(self, this_controller, this_description=None):
self.description = this_description
self.controller = this_controller
self.ledger = TinyDB("ledger.json")
self.db = TinyDB("nodes.json")
self.nodes = []
@abstractmethod
def store_file(self, file_bytes, file_name):
pass
@abstractmethod
def retrieve_file(self, file_name, locations):
pass
@abstractmethod
def get_time(self):
pass
def getNodes(self):
self.nodes = []
for item in self.db:
node = Node(item['mac'],item['ip'],item['port'],item['units'])
self.nodes.append(node)
return self.nodes
def getNodesWithFile(self,filename):
macs = self.ledger.search(Query().file_name == filename)
self.nodes = []
for item in macs:
mac = item["location"]
dbnode = self.db.get(Query().mac == mac)
if(dbnode == None):
continue
node = Node(dbnode['mac'],dbnode['ip'],dbnode['port'],dbnode['units'])
self.nodes.append(node)
return self.nodes
def getFileSize(self, filename):
file = self.ledger.get(Query().file_name == filename)
return file['size']
| en | 0.668957 | #! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # # # Distributed under terms of the MIT license. Strategy base class | 2.685189 | 3 |
project_euler/problem_01/sol6.py | mudit-chopra/Python | 0 | 9202 | <gh_stars>0
'''
Problem Statement:
If we list all the natural numbers below 10 that are multiples of 3 or 5,
we get 3,5,6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below N.
'''
from __future__ import print_function
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
'''store multiples of 3 and 5 in a set and then add'''
n = int(input().strip())
l = set()
x = 3
y = 5
while(x<n):
l.add(x)
x+=3
while(y<n):
l.add(y)
y+=5
print(sum(l))
| '''
Problem Statement:
If we list all the natural numbers below 10 that are multiples of 3 or 5,
we get 3,5,6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below N.
'''
from __future__ import print_function
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
'''store multiples of 3 and 5 in a set and then add'''
n = int(input().strip())
l = set()
x = 3
y = 5
while(x<n):
l.add(x)
x+=3
while(y<n):
l.add(y)
y+=5
print(sum(l)) | en | 0.720321 | Problem Statement: If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3,5,6 and 9. The sum of these multiples is 23. Find the sum of all the multiples of 3 or 5 below N. # Python 2 # Python 3 store multiples of 3 and 5 in a set and then add | 4.352981 | 4 |
jsonfallback/functions.py | laymonage/django-jsonfallback | 0 | 9203 | <reponame>laymonage/django-jsonfallback
import copy
from django.db import NotSupportedError
from django.db.models import Expression
from .fields import mysql_compile_json_path, postgres_compile_json_path, FallbackJSONField
class JSONExtract(Expression):
def __init__(self, expression, *path, output_field=FallbackJSONField(), **extra):
super().__init__(output_field=output_field)
self.path = path
self.source_expression = self._parse_expressions(expression)[0]
self.extra = extra
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.source_expression = c.source_expression.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_postgresql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context):
params = []
arg_sql, arg_params = compiler.compile(self.source_expression)
params.extend(arg_params)
json_path = postgres_compile_json_path(self.path)
params.append(json_path)
template = '{} #> %s'.format(arg_sql)
return template, params
def as_mysql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context):
params = []
arg_sql, arg_params = compiler.compile(self.source_expression)
params.extend(arg_params)
json_path = mysql_compile_json_path(self.path)
params.append(json_path)
template = 'JSON_EXTRACT({}, %s)'.format(arg_sql)
return template, params
def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context):
raise NotSupportedError(
'Functions on JSONFields are only supported on PostgreSQL and MySQL at the moment.'
)
def copy(self):
c = super().copy()
c.source_expression = copy.copy(self.source_expression)
c.extra = self.extra.copy()
return c
| import copy
from django.db import NotSupportedError
from django.db.models import Expression
from .fields import mysql_compile_json_path, postgres_compile_json_path, FallbackJSONField
class JSONExtract(Expression):
def __init__(self, expression, *path, output_field=FallbackJSONField(), **extra):
super().__init__(output_field=output_field)
self.path = path
self.source_expression = self._parse_expressions(expression)[0]
self.extra = extra
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.source_expression = c.source_expression.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_postgresql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context):
params = []
arg_sql, arg_params = compiler.compile(self.source_expression)
params.extend(arg_params)
json_path = postgres_compile_json_path(self.path)
params.append(json_path)
template = '{} #> %s'.format(arg_sql)
return template, params
def as_mysql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context):
params = []
arg_sql, arg_params = compiler.compile(self.source_expression)
params.extend(arg_params)
json_path = mysql_compile_json_path(self.path)
params.append(json_path)
template = 'JSON_EXTRACT({}, %s)'.format(arg_sql)
return template, params
def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context):
raise NotSupportedError(
'Functions on JSONFields are only supported on PostgreSQL and MySQL at the moment.'
)
def copy(self):
c = super().copy()
c.source_expression = copy.copy(self.source_expression)
c.extra = self.extra.copy()
return c | zh | 0.159832 | #> %s'.format(arg_sql) | 2.099116 | 2 |
excelify/tests.py | pmbaumgartner/excelify | 11 | 9204 | <reponame>pmbaumgartner/excelify
import unittest
import tempfile
import pathlib
import datetime
import warnings
from IPython.testing.globalipapp import start_ipython, get_ipython
import pandas.util.testing as tm
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas import read_excel
import pytest
ip = get_ipython()
ip.magic('load_ext excelify')
class TestMagicExportImport(unittest.TestCase):
def setUp(self):
self.tempexcel = tempfile.NamedTemporaryFile(suffix='.xlsx')
def test_series(self):
series = Series()
excel_name = self.tempexcel.name
ip.run_line_magic('excel', 'series -f {filepath}'.format(filepath=excel_name))
loaded_series = read_excel(excel_name, squeeze=True, dtype=series.dtype)
tm.assert_series_equal(series, loaded_series, check_names=False)
def test_dataframe(self):
df = DataFrame()
excel_name = self.tempexcel.name
ip.run_line_magic('excel', 'df -f {filepath}'.format(filepath=excel_name))
loaded_df = read_excel(excel_name, dtype=df.dtypes)
tm.assert_frame_equal(df, loaded_df, check_names=False)
def test_sheet_name(self):
series = Series()
excel_name = self.tempexcel.name
sheetname = 'test_sheet_name'
ip.run_line_magic('excel', 'series -f {filepath} -s {sheetname}'.format(filepath=excel_name, sheetname=sheetname))
loaded_excel = read_excel(excel_name, sheet_name=None)
assert 'test_sheet_name' in loaded_excel
def test_all_pandas_objects(self):
df1 = DataFrame()
df2 = DataFrame()
series1 = Series()
series2 = Series()
pandas_objects = [(name, obj) for (name, obj) in locals().items()
if isinstance(obj, (DataFrame, Series))]
excel_name = self.tempexcel.name
ip.run_line_magic('excel_all', '-f {filepath}'.format(filepath=excel_name))
for (name, obj) in pandas_objects:
if isinstance(obj, Series):
loaded_data = read_excel(excel_name, sheet_name=name, squeeze=True, dtype=obj.dtype)
tm.assert_series_equal(obj, loaded_data, check_names=False)
elif isinstance(obj, DataFrame):
loaded_data = read_excel(excel_name, sheet_name=name, dtype=obj.dtypes)
tm.assert_frame_equal(obj, loaded_data, check_names=False)
def test_sheet_timestamp(self):
series = Series()
excel_name = self.tempexcel.name
ip.run_line_magic('excel', 'series -f {filepath}'.format(filepath=excel_name))
loaded_excel = read_excel(excel_name, sheet_name=None)
sheet_names = list(loaded_excel.keys())
for sheet in sheet_names:
_, date_string = sheet.split('_')
saved_date = datetime.datetime.strptime(date_string, "%Y%m%d-%H%M%S")
load_to_read = datetime.datetime.now() - saved_date
# there is probably a better way to test this
assert load_to_read.seconds < 10
def test_all_long_name(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
locals().update({'a' * 33 : Series()})
excel_name = self.tempexcel.name
ip.run_line_magic('excel_all', '-f {filepath}'.format(filepath=excel_name))
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert "truncated" in str(w[-1].message)
def test_long_name_provided(self):
with warnings.catch_warnings(record=True) as w:
series = Series()
excel_name = self.tempexcel.name
longsheet = 'a' * 33
ip.run_line_magic('excel', 'series -f {filepath} -s {longsheet}'.format(filepath=excel_name, longsheet=longsheet))
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert "truncated" in str(w[-1].message)
def test_long_name_default(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
longsheet = 'a' * 33
locals().update({longsheet : Series()})
excel_name = self.tempexcel.name
ip.run_line_magic('excel', '{longsheet} -f {filepath}'.format(longsheet=longsheet, filepath=excel_name))
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert "truncated" in str(w[-1].message)
def tearDown(self):
self.tempexcel.close()
def test_filename():
series = Series()
ip.run_line_magic('excel', 'series')
excel_name = list(pathlib.Path().glob('series_*.xlsx'))[0]
assert excel_name.exists()
excel_name.unlink()
def test_all_filename():
series = Series()
df = DataFrame()
ip.run_line_magic('excel_all', '')
excel_name = list(pathlib.Path().glob('all_data_*.xlsx'))[0]
assert excel_name.exists()
excel_name.unlink()
@pytest.fixture
def no_extension_file():
file = tempfile.NamedTemporaryFile()
yield file
file.close()
def test_filepath_append(no_extension_file):
series = Series()
excel_name = no_extension_file.name
ip.run_line_magic('excel', 'series -f {filepath}'.format(filepath=excel_name))
exported_filepath = pathlib.PurePath(excel_name + '.xlsx')
assert exported_filepath.suffix == '.xlsx'
def test_all_filepath_append(no_extension_file):
series = Series()
df = DataFrame()
excel_name = no_extension_file.name
ip.run_line_magic('excel_all', '-f {filepath}'.format(filepath=excel_name))
exported_filepath = pathlib.Path(excel_name + '.xlsx')
exported_filepath = pathlib.PurePath(excel_name + '.xlsx')
assert exported_filepath.suffix == '.xlsx'
def test_no_object():
with pytest.raises(NameError):
ip.run_line_magic('excel', 'nonexistantobject')
def test_non_pandas_object():
integer = 3
with pytest.raises(TypeError):
ip.run_line_magic('excel', 'integer')
string = 'string'
with pytest.raises(TypeError):
ip.run_line_magic('excel', 'string')
def test_all_no_objects():
with pytest.raises(RuntimeError):
ip.run_line_magic('excel_all', '')
def test_all_too_many_objects():
# this seems like a bad idea...
for i in range(102):
locals().update({'series' + str(i) : Series()})
with pytest.raises(RuntimeError):
ip.run_line_magic('excel_all', '')
| import unittest
import tempfile
import pathlib
import datetime
import warnings
from IPython.testing.globalipapp import start_ipython, get_ipython
import pandas.util.testing as tm
from pandas.core.frame import DataFrame
from pandas.core.series import Series
from pandas import read_excel
import pytest
ip = get_ipython()
ip.magic('load_ext excelify')
class TestMagicExportImport(unittest.TestCase):
def setUp(self):
self.tempexcel = tempfile.NamedTemporaryFile(suffix='.xlsx')
def test_series(self):
series = Series()
excel_name = self.tempexcel.name
ip.run_line_magic('excel', 'series -f {filepath}'.format(filepath=excel_name))
loaded_series = read_excel(excel_name, squeeze=True, dtype=series.dtype)
tm.assert_series_equal(series, loaded_series, check_names=False)
def test_dataframe(self):
df = DataFrame()
excel_name = self.tempexcel.name
ip.run_line_magic('excel', 'df -f {filepath}'.format(filepath=excel_name))
loaded_df = read_excel(excel_name, dtype=df.dtypes)
tm.assert_frame_equal(df, loaded_df, check_names=False)
def test_sheet_name(self):
series = Series()
excel_name = self.tempexcel.name
sheetname = 'test_sheet_name'
ip.run_line_magic('excel', 'series -f {filepath} -s {sheetname}'.format(filepath=excel_name, sheetname=sheetname))
loaded_excel = read_excel(excel_name, sheet_name=None)
assert 'test_sheet_name' in loaded_excel
def test_all_pandas_objects(self):
df1 = DataFrame()
df2 = DataFrame()
series1 = Series()
series2 = Series()
pandas_objects = [(name, obj) for (name, obj) in locals().items()
if isinstance(obj, (DataFrame, Series))]
excel_name = self.tempexcel.name
ip.run_line_magic('excel_all', '-f {filepath}'.format(filepath=excel_name))
for (name, obj) in pandas_objects:
if isinstance(obj, Series):
loaded_data = read_excel(excel_name, sheet_name=name, squeeze=True, dtype=obj.dtype)
tm.assert_series_equal(obj, loaded_data, check_names=False)
elif isinstance(obj, DataFrame):
loaded_data = read_excel(excel_name, sheet_name=name, dtype=obj.dtypes)
tm.assert_frame_equal(obj, loaded_data, check_names=False)
def test_sheet_timestamp(self):
series = Series()
excel_name = self.tempexcel.name
ip.run_line_magic('excel', 'series -f {filepath}'.format(filepath=excel_name))
loaded_excel = read_excel(excel_name, sheet_name=None)
sheet_names = list(loaded_excel.keys())
for sheet in sheet_names:
_, date_string = sheet.split('_')
saved_date = datetime.datetime.strptime(date_string, "%Y%m%d-%H%M%S")
load_to_read = datetime.datetime.now() - saved_date
# there is probably a better way to test this
assert load_to_read.seconds < 10
def test_all_long_name(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
locals().update({'a' * 33 : Series()})
excel_name = self.tempexcel.name
ip.run_line_magic('excel_all', '-f {filepath}'.format(filepath=excel_name))
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert "truncated" in str(w[-1].message)
def test_long_name_provided(self):
with warnings.catch_warnings(record=True) as w:
series = Series()
excel_name = self.tempexcel.name
longsheet = 'a' * 33
ip.run_line_magic('excel', 'series -f {filepath} -s {longsheet}'.format(filepath=excel_name, longsheet=longsheet))
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert "truncated" in str(w[-1].message)
def test_long_name_default(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
longsheet = 'a' * 33
locals().update({longsheet : Series()})
excel_name = self.tempexcel.name
ip.run_line_magic('excel', '{longsheet} -f {filepath}'.format(longsheet=longsheet, filepath=excel_name))
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert "truncated" in str(w[-1].message)
def tearDown(self):
self.tempexcel.close()
def test_filename():
series = Series()
ip.run_line_magic('excel', 'series')
excel_name = list(pathlib.Path().glob('series_*.xlsx'))[0]
assert excel_name.exists()
excel_name.unlink()
def test_all_filename():
series = Series()
df = DataFrame()
ip.run_line_magic('excel_all', '')
excel_name = list(pathlib.Path().glob('all_data_*.xlsx'))[0]
assert excel_name.exists()
excel_name.unlink()
@pytest.fixture
def no_extension_file():
file = tempfile.NamedTemporaryFile()
yield file
file.close()
def test_filepath_append(no_extension_file):
series = Series()
excel_name = no_extension_file.name
ip.run_line_magic('excel', 'series -f {filepath}'.format(filepath=excel_name))
exported_filepath = pathlib.PurePath(excel_name + '.xlsx')
assert exported_filepath.suffix == '.xlsx'
def test_all_filepath_append(no_extension_file):
series = Series()
df = DataFrame()
excel_name = no_extension_file.name
ip.run_line_magic('excel_all', '-f {filepath}'.format(filepath=excel_name))
exported_filepath = pathlib.Path(excel_name + '.xlsx')
exported_filepath = pathlib.PurePath(excel_name + '.xlsx')
assert exported_filepath.suffix == '.xlsx'
def test_no_object():
with pytest.raises(NameError):
ip.run_line_magic('excel', 'nonexistantobject')
def test_non_pandas_object():
integer = 3
with pytest.raises(TypeError):
ip.run_line_magic('excel', 'integer')
string = 'string'
with pytest.raises(TypeError):
ip.run_line_magic('excel', 'string')
def test_all_no_objects():
with pytest.raises(RuntimeError):
ip.run_line_magic('excel_all', '')
def test_all_too_many_objects():
# this seems like a bad idea...
for i in range(102):
locals().update({'series' + str(i) : Series()})
with pytest.raises(RuntimeError):
ip.run_line_magic('excel_all', '') | en | 0.979553 | # there is probably a better way to test this # this seems like a bad idea... | 2.228477 | 2 |
Easy/233/233.py | lw7360/dailyprogrammer | 0 | 9205 | # https://www.reddit.com/r/dailyprogrammer/comments/3ltee2/20150921_challenge_233_easy_the_house_that_ascii/
import random
import sys
def main():
data = open(sys.argv[1]).read().splitlines()[1::]
door = random.randrange(len(data[-1]))
wideData = []
for row in data:
curStr = ''
for ast in row:
if ast == '*':
curStr += '*****'
else:
curStr += ' '
wideData.append(curStr)
longData = []
for row in wideData:
longData.append(row[:])
longData.append(row[:])
longData.append(row[:])
for row in longData:
print row
if __name__ == "__main__":
main()
| # https://www.reddit.com/r/dailyprogrammer/comments/3ltee2/20150921_challenge_233_easy_the_house_that_ascii/
import random
import sys
def main():
data = open(sys.argv[1]).read().splitlines()[1::]
door = random.randrange(len(data[-1]))
wideData = []
for row in data:
curStr = ''
for ast in row:
if ast == '*':
curStr += '*****'
else:
curStr += ' '
wideData.append(curStr)
longData = []
for row in wideData:
longData.append(row[:])
longData.append(row[:])
longData.append(row[:])
for row in longData:
print row
if __name__ == "__main__":
main()
| en | 0.620112 | # https://www.reddit.com/r/dailyprogrammer/comments/3ltee2/20150921_challenge_233_easy_the_house_that_ascii/ | 3.38701 | 3 |
usr/callbacks/action/tools.py | uwitec/LEHome | 151 | 9206 | #!/usr/bin/env python
# encoding: utf-8
from __future__ import division
from decimal import Decimal
import subprocess
import threading
import urllib2
import urllib
import httplib
import json
import re
import hashlib
import base64
# import zlib
from lib.command.runtime import UserInput
from lib.helper.CameraHelper import CameraHelper
from lib.sound import Sound
from util import Util
from util.Res import Res
from util.log import *
from lib.model import Callback
class timer_callback(Callback.Callback):
def callback(self, cmd, action, target, msg):
if msg is None:
self._home.publish_msg(cmd, u"时间格式错误")
return False, None
if msg.endswith(u'点') or \
msg.endswith(u'分'):
t = Util.gap_for_timestring(msg)
elif msg.endswith(u"秒"):
t = int(Util.cn2dig(msg[:-1]))
elif msg.endswith(u"分钟"):
t = int(Util.cn2dig(msg[:-2]))*60
elif msg.endswith(u"小时"):
t = int(Util.cn2dig(msg[:-2]))*60*60
else:
self._home.publish_msg(cmd, u"时间格式错误")
return False
if t is None:
self._home.publish_msg(cmd, u"时间格式错误")
return False, None
DEBUG("thread wait for %d sec" % (t, ))
self._home.publish_msg(cmd, action + target + msg)
threading.current_thread().waitUtil(t)
if threading.current_thread().stopped():
return False
self._home.setResume(True)
count = 7
Sound.notice( Res.get_res_path("sound/com_bell"), True, count)
self._home.setResume(False)
return True
class translate_callback(Callback.Callback):
base_url = "http://fanyi.youdao.com/openapi.do"
def callback(self, cmd, msg):
if Util.empty_str(msg):
cancel_flag = u"取消"
finish_flag = u"完成"
self._home.publish_msg(
cmd
, u"请输入内容, 输入\"%s\"或\"%s\"结束:" % (finish_flag, cancel_flag)
, cmd_type="input"
)
msg = UserInput(self._home).waitForInput(
finish=finish_flag,
cancel=cancel_flag)
if msg is None:
self._home.publish_msg(cmd, u"无翻译内容")
elif len(msg) > 200:
self._home.publish_msg(cmd, u"翻译内容过长(<200字)")
else:
try:
values = {
"keyfrom":"11111testt111",
"key":"2125866912",
"type":"data",
"doctype":"json",
"version":"1.1",
"q":msg.encode("utf-8")
}
url = translate_callback.base_url + "?" + urllib.urlencode(values)
res = urllib2.urlopen(url).read()
res = " ".join(json.loads(res)["translation"])
self._home.publish_msg(cmd, u"翻译结果:\n" + res)
except Exception, ex:
ERROR("request error:", ex)
self._home.publish_msg(cmd, u"翻译失败")
return True
return True
class baidu_wiki_callback(Callback.Callback):
base_url = "http://wapbaike.baidu.com"
def searchWiki(self, word, time=10):
value = {"word": word.encode("utf-8")}
url = baidu_wiki_callback.base_url + \
"/search?" + urllib.urlencode(value)
try:
response = urllib2.urlopen(url, timeout=time)
html = response.read().encode("utf-8")
response.close()
real_url = None
content = None
m = re.compile(r"URL=(.+)'>").search(html)
if m:
real_url = m.group(1)
else:
return None, None
real_url = real_url[:real_url.index("?")]
if not real_url is None:
url = baidu_wiki_callback.base_url + real_url
response = urllib2.urlopen(url, timeout=time)
html = response.read()
response.close()
m = re.compile(
r'<p class="summary"><p>(.+)<div class="card-info">',
re.DOTALL
).search(html)
if m:
content = m.group(1)
return Util.strip_tags(content), url
else:
return None, None
except Exception, ex:
ERROR("wiki error: ", ex)
return None, None
def callback(self, cmd, msg):
if Util.empty_str(msg):
cancel_flag = u"取消"
finish_flag = u"完成"
self._home.publish_msg(
cmd
, u"请输入内容, 输入\"%s\"或\"%s\"结束:" % (finish_flag, cancel_flag)
, cmd_type="input"
)
msg = UserInput(self._home).waitForInput(
finish=finish_flag,
cancel=cancel_flag)
if not msg is None:
self._home.publish_msg(cmd, u"正在搜索...")
res, url = self.searchWiki(msg)
if res is None:
self._home.publish_msg(cmd, u"无百科内容")
else:
res = res.decode("utf-8")
if len(res) > 140:
res = res[:140]
msg = u"百度百科:\n %s...\n%s" \
% (res, url)
self._home.publish_msg(cmd, msg)
else:
self._home.publish_msg(cmd, u"无百科内容")
return True
class cal_callback(Callback.Callback):
_ops = {
u'加':'+',
u'减':'-',
u'乘':'*',
u'除':'/',
u'+':'+',
u'-':'-',
u'*':'*',
u'/':'/',
u'(':'(',
u'(':'(',
u')':')',
u')':')',
}
def _parse_tokens(self, src):
tokens = []
cur_t = u''
for term in src:
if term in cal_callback._ops:
if cur_t != u'':
tokens.append(cur_t)
cur_t = u''
tokens.append(term)
else:
cur_t += term
if cur_t != u'':
tokens.append(cur_t)
return tokens
def _parse_expression(self, tokens):
expression = u''
for token in tokens:
if token in cal_callback._ops:
expression += cal_callback._ops[token]
else:
num = Util.cn2dig(token)
if num is None:
return None
expression += str(num)
res = None
INFO("expression: " + expression)
try:
res = eval(expression)
res = Decimal.from_float(res).quantize(Decimal('0.00'))
except Exception, ex:
ERROR("cal expression error:", ex)
return res
def callback(self, cmd, msg):
if Util.empty_str(msg):
cancel_flag = u"取消"
finish_flag = u"完成"
self._home.publish_msg(
cmd
, u"请输入公式, 输入\"%s\"或\"%s\"结束:" % (finish_flag, cancel_flag)
, cmd_type="input"
)
msg = UserInput(self._home).waitForInput(
finish=finish_flag,
cancel=cancel_flag)
if msg is None:
self._home.publish_msg(cmd, u"无公式内容")
else:
tokens = self._parse_tokens(msg)
if not tokens is None:
res = self._parse_expression(tokens)
if not res is None:
self._home.publish_msg(cmd, u"%s = %s" % (msg, str(res)))
return True, res
else:
self._home.publish_msg(cmd, u"计算出错")
return True, None
else:
self._home.publish_msg(cmd, u"格式有误")
return True, None
class camera_quickshot_callback(Callback.Callback):
IMAGE_SERVER_URL = "http://lehome.sinaapp.com/image"
IMAGE_HOST_URL = "http://lehome-image.stor.sinaapp.com/"
def _upload_image(self, img_src, thumbnail_src):
if img_src is None or len(img_src) == 0:
return None, None
INFO("uploading: %s %s" % (img_src, thumbnail_src))
# swift --insecure upload image data/capture/2015_05_23_001856.jpg
proc = subprocess.Popen(
[
"swift",
"--insecure",
"upload",
"image",
thumbnail_src,
img_src
],
stdout=subprocess.PIPE
)
read_img = None
read_thumbnail = None
for i in range(2) :
try:
data = proc.stdout.readline().strip() #block / wait
INFO("swift readline: %s" % data)
if data.endswith(".thumbnail.jpg"):
INFO("save to storage:%s" % data)
read_thumbnail = camera_quickshot_callback.IMAGE_HOST_URL + data
elif data.endswith(".jpg"):
INFO("save to storage:%s" % data)
read_img = camera_quickshot_callback.IMAGE_HOST_URL + data
if not read_img is None and not read_thumbnail is None:
return read_img, read_thumbnail
except (KeyboardInterrupt, SystemExit):
raise
except Exception, ex:
ERROR(ex)
break
return None, None
def callback(self, cmd, msg):
self._home.publish_msg(cmd, u"正在截图...")
Sound.notice(Res.get_res_path("sound/com_shoot"))
save_path="data/capture/"
save_name, thumbnail_name = CameraHelper().take_a_photo(save_path)
# for test
# save_name = "2015_05_02_164052.jpg"
if save_name is None:
self._home.publish_msg(cmd, u"截图失败")
INFO("capture faild.")
return True
img_url, thumbnail_url = self._upload_image(
save_path + save_name,
save_path + thumbnail_name,
)
if img_url is None:
self._home.publish_msg(cmd, u"截图失败")
INFO("upload capture faild.")
return True
else:
self._home.publish_msg(
cmd,
msg=img_url,
cmd_type="capture"
)
return True
class push_info_callback(Callback.Callback):
def callback(self, cmd, target, msg):
if target is None or len(target) == 0:
if msg is None or len(msg) == 0:
self._home.publish_msg(cmd, u"请输入内容")
return True, None
self._home.publish_msg(cmd, msg)
DEBUG("show_callback: %s" % msg)
return True, msg
return True, "push"
| #!/usr/bin/env python
# encoding: utf-8
from __future__ import division
from decimal import Decimal
import subprocess
import threading
import urllib2
import urllib
import httplib
import json
import re
import hashlib
import base64
# import zlib
from lib.command.runtime import UserInput
from lib.helper.CameraHelper import CameraHelper
from lib.sound import Sound
from util import Util
from util.Res import Res
from util.log import *
from lib.model import Callback
class timer_callback(Callback.Callback):
def callback(self, cmd, action, target, msg):
if msg is None:
self._home.publish_msg(cmd, u"时间格式错误")
return False, None
if msg.endswith(u'点') or \
msg.endswith(u'分'):
t = Util.gap_for_timestring(msg)
elif msg.endswith(u"秒"):
t = int(Util.cn2dig(msg[:-1]))
elif msg.endswith(u"分钟"):
t = int(Util.cn2dig(msg[:-2]))*60
elif msg.endswith(u"小时"):
t = int(Util.cn2dig(msg[:-2]))*60*60
else:
self._home.publish_msg(cmd, u"时间格式错误")
return False
if t is None:
self._home.publish_msg(cmd, u"时间格式错误")
return False, None
DEBUG("thread wait for %d sec" % (t, ))
self._home.publish_msg(cmd, action + target + msg)
threading.current_thread().waitUtil(t)
if threading.current_thread().stopped():
return False
self._home.setResume(True)
count = 7
Sound.notice( Res.get_res_path("sound/com_bell"), True, count)
self._home.setResume(False)
return True
class translate_callback(Callback.Callback):
base_url = "http://fanyi.youdao.com/openapi.do"
def callback(self, cmd, msg):
if Util.empty_str(msg):
cancel_flag = u"取消"
finish_flag = u"完成"
self._home.publish_msg(
cmd
, u"请输入内容, 输入\"%s\"或\"%s\"结束:" % (finish_flag, cancel_flag)
, cmd_type="input"
)
msg = UserInput(self._home).waitForInput(
finish=finish_flag,
cancel=cancel_flag)
if msg is None:
self._home.publish_msg(cmd, u"无翻译内容")
elif len(msg) > 200:
self._home.publish_msg(cmd, u"翻译内容过长(<200字)")
else:
try:
values = {
"keyfrom":"11111testt111",
"key":"2125866912",
"type":"data",
"doctype":"json",
"version":"1.1",
"q":msg.encode("utf-8")
}
url = translate_callback.base_url + "?" + urllib.urlencode(values)
res = urllib2.urlopen(url).read()
res = " ".join(json.loads(res)["translation"])
self._home.publish_msg(cmd, u"翻译结果:\n" + res)
except Exception, ex:
ERROR("request error:", ex)
self._home.publish_msg(cmd, u"翻译失败")
return True
return True
class baidu_wiki_callback(Callback.Callback):
base_url = "http://wapbaike.baidu.com"
def searchWiki(self, word, time=10):
value = {"word": word.encode("utf-8")}
url = baidu_wiki_callback.base_url + \
"/search?" + urllib.urlencode(value)
try:
response = urllib2.urlopen(url, timeout=time)
html = response.read().encode("utf-8")
response.close()
real_url = None
content = None
m = re.compile(r"URL=(.+)'>").search(html)
if m:
real_url = m.group(1)
else:
return None, None
real_url = real_url[:real_url.index("?")]
if not real_url is None:
url = baidu_wiki_callback.base_url + real_url
response = urllib2.urlopen(url, timeout=time)
html = response.read()
response.close()
m = re.compile(
r'<p class="summary"><p>(.+)<div class="card-info">',
re.DOTALL
).search(html)
if m:
content = m.group(1)
return Util.strip_tags(content), url
else:
return None, None
except Exception, ex:
ERROR("wiki error: ", ex)
return None, None
def callback(self, cmd, msg):
if Util.empty_str(msg):
cancel_flag = u"取消"
finish_flag = u"完成"
self._home.publish_msg(
cmd
, u"请输入内容, 输入\"%s\"或\"%s\"结束:" % (finish_flag, cancel_flag)
, cmd_type="input"
)
msg = UserInput(self._home).waitForInput(
finish=finish_flag,
cancel=cancel_flag)
if not msg is None:
self._home.publish_msg(cmd, u"正在搜索...")
res, url = self.searchWiki(msg)
if res is None:
self._home.publish_msg(cmd, u"无百科内容")
else:
res = res.decode("utf-8")
if len(res) > 140:
res = res[:140]
msg = u"百度百科:\n %s...\n%s" \
% (res, url)
self._home.publish_msg(cmd, msg)
else:
self._home.publish_msg(cmd, u"无百科内容")
return True
class cal_callback(Callback.Callback):
_ops = {
u'加':'+',
u'减':'-',
u'乘':'*',
u'除':'/',
u'+':'+',
u'-':'-',
u'*':'*',
u'/':'/',
u'(':'(',
u'(':'(',
u')':')',
u')':')',
}
def _parse_tokens(self, src):
tokens = []
cur_t = u''
for term in src:
if term in cal_callback._ops:
if cur_t != u'':
tokens.append(cur_t)
cur_t = u''
tokens.append(term)
else:
cur_t += term
if cur_t != u'':
tokens.append(cur_t)
return tokens
def _parse_expression(self, tokens):
expression = u''
for token in tokens:
if token in cal_callback._ops:
expression += cal_callback._ops[token]
else:
num = Util.cn2dig(token)
if num is None:
return None
expression += str(num)
res = None
INFO("expression: " + expression)
try:
res = eval(expression)
res = Decimal.from_float(res).quantize(Decimal('0.00'))
except Exception, ex:
ERROR("cal expression error:", ex)
return res
def callback(self, cmd, msg):
if Util.empty_str(msg):
cancel_flag = u"取消"
finish_flag = u"完成"
self._home.publish_msg(
cmd
, u"请输入公式, 输入\"%s\"或\"%s\"结束:" % (finish_flag, cancel_flag)
, cmd_type="input"
)
msg = UserInput(self._home).waitForInput(
finish=finish_flag,
cancel=cancel_flag)
if msg is None:
self._home.publish_msg(cmd, u"无公式内容")
else:
tokens = self._parse_tokens(msg)
if not tokens is None:
res = self._parse_expression(tokens)
if not res is None:
self._home.publish_msg(cmd, u"%s = %s" % (msg, str(res)))
return True, res
else:
self._home.publish_msg(cmd, u"计算出错")
return True, None
else:
self._home.publish_msg(cmd, u"格式有误")
return True, None
class camera_quickshot_callback(Callback.Callback):
IMAGE_SERVER_URL = "http://lehome.sinaapp.com/image"
IMAGE_HOST_URL = "http://lehome-image.stor.sinaapp.com/"
def _upload_image(self, img_src, thumbnail_src):
if img_src is None or len(img_src) == 0:
return None, None
INFO("uploading: %s %s" % (img_src, thumbnail_src))
# swift --insecure upload image data/capture/2015_05_23_001856.jpg
proc = subprocess.Popen(
[
"swift",
"--insecure",
"upload",
"image",
thumbnail_src,
img_src
],
stdout=subprocess.PIPE
)
read_img = None
read_thumbnail = None
for i in range(2) :
try:
data = proc.stdout.readline().strip() #block / wait
INFO("swift readline: %s" % data)
if data.endswith(".thumbnail.jpg"):
INFO("save to storage:%s" % data)
read_thumbnail = camera_quickshot_callback.IMAGE_HOST_URL + data
elif data.endswith(".jpg"):
INFO("save to storage:%s" % data)
read_img = camera_quickshot_callback.IMAGE_HOST_URL + data
if not read_img is None and not read_thumbnail is None:
return read_img, read_thumbnail
except (KeyboardInterrupt, SystemExit):
raise
except Exception, ex:
ERROR(ex)
break
return None, None
def callback(self, cmd, msg):
self._home.publish_msg(cmd, u"正在截图...")
Sound.notice(Res.get_res_path("sound/com_shoot"))
save_path="data/capture/"
save_name, thumbnail_name = CameraHelper().take_a_photo(save_path)
# for test
# save_name = "2015_05_02_164052.jpg"
if save_name is None:
self._home.publish_msg(cmd, u"截图失败")
INFO("capture faild.")
return True
img_url, thumbnail_url = self._upload_image(
save_path + save_name,
save_path + thumbnail_name,
)
if img_url is None:
self._home.publish_msg(cmd, u"截图失败")
INFO("upload capture faild.")
return True
else:
self._home.publish_msg(
cmd,
msg=img_url,
cmd_type="capture"
)
return True
class push_info_callback(Callback.Callback):
def callback(self, cmd, target, msg):
if target is None or len(target) == 0:
if msg is None or len(msg) == 0:
self._home.publish_msg(cmd, u"请输入内容")
return True, None
self._home.publish_msg(cmd, msg)
DEBUG("show_callback: %s" % msg)
return True, msg
return True, "push"
| en | 0.562729 | #!/usr/bin/env python # encoding: utf-8 # import zlib # swift --insecure upload image data/capture/2015_05_23_001856.jpg #block / wait # for test # save_name = "2015_05_02_164052.jpg" | 2.105484 | 2 |
src/fix_code_1.py | Athenian-Computer-Science/numeric-operations-1-practice-template | 0 | 9207 | <gh_stars>0
#############################
# Collaborators: (enter people or resources who/that helped you)
# If none, write none
#
#
#############################
base = input('Enter the base: ")
height =
area = # Calculate the area of the triangle
print("The area of the triangle is (area).") | #############################
# Collaborators: (enter people or resources who/that helped you)
# If none, write none
#
#
#############################
base = input('Enter the base: ")
height =
area = # Calculate the area of the triangle
print("The area of the triangle is (area).") | en | 0.367387 | ############################# # Collaborators: (enter people or resources who/that helped you) # If none, write none # # ############################# # Calculate the area of the triangle | 3.955991 | 4 |
gputools/core/oclmultireduction.py | gmazzamuto/gputools | 89 | 9208 | <reponame>gmazzamuto/gputools
"""
an adaptation of pyopencl's reduction kernel for weighted avarages
like sum(a*b)
<EMAIL>
"""
from __future__ import print_function, unicode_literals, absolute_import, division
from six.moves import zip
import pyopencl as cl
from pyopencl.tools import (
context_dependent_memoize,
dtype_to_ctype, KernelTemplateBase,
_process_code_for_macro)
import numpy as np
from gputools import get_device
import sys
# {{{ kernel source
KERNEL = r"""//CL//
<%
inds = range(len(map_exprs))
%>
#define GROUP_SIZE ${group_size}
% for i,m in enumerate(map_exprs):
#define READ_AND_MAP_${i}(i) (${m})
% endfor
#define REDUCE(a, b) (${reduce_expr})
% if double_support:
#if __OPENCL_C_VERSION__ < 120
#pragma OPENCL EXTENSION cl_khr_fp64: enable
#endif
#define PYOPENCL_DEFINE_CDOUBLE
% endif
#include <pyopencl-complex.h>
${preamble}
typedef ${out_type} out_type;
__kernel void ${name}(
% for i in inds:
__global out_type *out__base_${i},
% endfor
long out__offset, ${arguments},
unsigned int seq_count, unsigned int n)
{
% for i in inds:
__global out_type *out_${i} = (__global out_type *) (
(__global char *) out__base_${i} + out__offset);
% endfor
${arg_prep}
% for i in inds:
__local out_type ldata_${i}[GROUP_SIZE];
out_type acc_${i} = ${neutral};
% endfor
unsigned int lid = get_local_id(0);
unsigned int i = get_group_id(0)*GROUP_SIZE*seq_count + lid;
//printf("seq: %d\tlid = %d\ti=%d\n",seq_count,lid,i);
for (unsigned s = 0; s < seq_count; ++s)
{
if (i >= n)
break;
% for i in inds:
acc_${i} = REDUCE(acc_${i}, READ_AND_MAP_${i}(i));
% endfor
i += GROUP_SIZE;
}
% for i in inds:
ldata_${i}[lid] = acc_${i};
% endfor
<%
cur_size = group_size
%>
% while cur_size > 1:
barrier(CLK_LOCAL_MEM_FENCE);
<%
new_size = cur_size // 2
assert new_size * 2 == cur_size
%>
if (lid < ${new_size})
{
% for i in inds:
ldata_${i}[lid] = REDUCE(
ldata_${i}[lid],
ldata_${i}[lid + ${new_size}]);
% endfor
}
<% cur_size = new_size %>
% endwhile
if (lid == 0) {
% for i in inds:
out_${i}[get_group_id(0)] = ldata_${i}[0];
% endfor
//printf("result: %.4f\n",out_0[get_group_id(0)] );
}
}
"""
def _get_reduction_source(
ctx, out_type, out_type_size,
neutral, reduce_expr, map_exprs, parsed_args,
name="reduce_kernel", preamble="", arg_prep="",
device=None, max_group_size=None):
if device is not None:
devices = [device]
else:
devices = ctx.devices
# {{{ compute group size
def get_dev_group_size(device):
# dirty fix for the RV770 boards
max_work_group_size = device.max_work_group_size
if "RV770" in device.name:
max_work_group_size = 64
# compute lmem limit
from pytools import div_ceil
lmem_wg_size = div_ceil(max_work_group_size, out_type_size)
result = min(max_work_group_size, lmem_wg_size)
# round down to power of 2
from pyopencl.tools import bitlog2
return 2**bitlog2(result)
group_size = min(get_dev_group_size(dev) for dev in devices)
if max_group_size is not None:
group_size = min(max_group_size, group_size)
# }}}
from mako.template import Template
from pytools import all
from pyopencl.characterize import has_double_support
src = str(Template(KERNEL).render(
out_type=out_type,
arguments=", ".join(arg.declarator() for arg in parsed_args),
group_size=group_size,
neutral=neutral,
reduce_expr=_process_code_for_macro(reduce_expr),
map_exprs=[_process_code_for_macro(m) for m in map_exprs],
name=name,
preamble=preamble,
arg_prep=arg_prep,
double_support=all(has_double_support(dev) for dev in devices),
))
# sys.exit()
from pytools import Record
class ReductionInfo(Record):
pass
return ReductionInfo(
context=ctx,
source=src,
group_size=group_size)
def get_reduction_kernel(stage,
ctx, dtype_out,
neutral, reduce_expr, arguments=None,
name="reduce_kernel", preamble="",
map_exprs = None,
device=None, options=[], max_group_size=None):
if map_exprs is None:
raise ValueError("map_exprs has to be given!")
for i, m in enumerate(map_exprs):
if m is None:
if stage==2:
map_exprs[i] = "pyopencl_reduction_inp_%i[i]"%i
else:
map_exprs[i] = "in[i]"
from pyopencl.tools import (
parse_arg_list, get_arg_list_scalar_arg_dtypes,
get_arg_offset_adjuster_code, VectorArg)
arg_prep = ""
if stage==1 and arguments is not None:
arguments = parse_arg_list(arguments, with_offset=True)
arg_prep = get_arg_offset_adjuster_code(arguments)
if stage==2 and arguments is not None:
arguments = parse_arg_list(arguments)
arguments = (
[VectorArg(dtype_out, "pyopencl_reduction_inp_%i"%i) for i in range(len(map_exprs))]
+arguments)
inf = _get_reduction_source(
ctx, dtype_to_ctype(dtype_out), dtype_out.itemsize,
neutral, reduce_expr, map_exprs, arguments,
name, preamble, arg_prep, device, max_group_size)
inf.program = cl.Program(ctx, inf.source)
inf.program.build(options)
inf.kernel = getattr(inf.program, name)
inf.arg_types = arguments
inf.kernel.set_scalar_arg_dtypes(
[None, ]*len(map_exprs)+[np.int64]
+get_arg_list_scalar_arg_dtypes(inf.arg_types)
+[np.uint32]*2)
return inf
# }}}
# {{{ main reduction kernel
class OCLMultiReductionKernel:
"""
simultanous reduction of a weighted sum of severalbuffers
example:
k = OCLMultiReduction(np.float32,
neutral="0",reduce_expr="a+b",
map_exprs = ["x[i]", "x[i]*y[i]"],
arguments="__global float *x,__global float *y")
k(a,b, out1 = out1, out2 = out2)
"""
def __init__(self, dtype_out,
neutral, reduce_expr, arguments=None,
map_exprs=[None],
name="reduce_kernel", options=[], preamble=""):
ctx = get_device().context
dtype_out = self.dtype_out = np.dtype(dtype_out)
max_group_size = None
trip_count = 0
self.n_exprs = len(map_exprs)
assert self.n_exprs>0
while True:
self.stage_1_inf = get_reduction_kernel(1, ctx,
dtype_out,
neutral, reduce_expr, arguments,
name=name+"_stage1", options=options, preamble=preamble,
map_exprs=map_exprs,
max_group_size=max_group_size)
kernel_max_wg_size = self.stage_1_inf.kernel.get_work_group_info(
cl.kernel_work_group_info.WORK_GROUP_SIZE,
ctx.devices[0])
if self.stage_1_inf.group_size<=kernel_max_wg_size:
break
else:
max_group_size = kernel_max_wg_size
trip_count += 1
assert trip_count<=2
self.stage_2_inf = get_reduction_kernel(2, ctx,
dtype_out,
neutral, reduce_expr, arguments=arguments,
name=name+"_stage2", options=options,
map_exprs = [None]*self.n_exprs,
preamble=preamble,
max_group_size=max_group_size)
from pytools import any
from pyopencl.tools import VectorArg
assert any(
isinstance(arg_tp, VectorArg)
for arg_tp in self.stage_1_inf.arg_types), \
"ReductionKernel can only be used with functions " \
"that have at least one vector argument"
def __call__(self, *args, **kwargs):
MAX_GROUP_COUNT = 1024 # noqa
SMALL_SEQ_COUNT = 4 # noqa
from pyopencl.array import empty
stage_inf = self.stage_1_inf
queue = kwargs.pop("queue", None)
wait_for = kwargs.pop("wait_for", None)
return_event = kwargs.pop("return_event", False)
outs = kwargs.pop("outs", [None]*self.n_exprs)
if kwargs:
raise TypeError("invalid keyword argument to reduction kernel")
stage1_args = args
while True:
invocation_args = []
vectors = []
from pyopencl.tools import VectorArg
for arg, arg_tp in zip(args, stage_inf.arg_types):
if isinstance(arg_tp, VectorArg):
if not arg.flags.forc:
raise RuntimeError("ReductionKernel cannot "
"deal with non-contiguous arrays")
vectors.append(arg)
invocation_args.append(arg.base_data)
if arg_tp.with_offset:
invocation_args.append(arg.offset)
else:
invocation_args.append(arg)
repr_vec = vectors[0]
sz = repr_vec.size
if queue is not None:
use_queue = queue
else:
use_queue = repr_vec.queue
if sz<=stage_inf.group_size*SMALL_SEQ_COUNT*MAX_GROUP_COUNT:
total_group_size = SMALL_SEQ_COUNT*stage_inf.group_size
group_count = (sz+total_group_size-1)//total_group_size
seq_count = SMALL_SEQ_COUNT
else:
group_count = MAX_GROUP_COUNT
macrogroup_size = group_count*stage_inf.group_size
seq_count = (sz+macrogroup_size-1)//macrogroup_size
if group_count==1:
results = [empty(use_queue,
(), self.dtype_out,
allocator=repr_vec.allocator) if out is None else out for out in outs]
else:
results = [empty(use_queue,
(group_count,), self.dtype_out,
allocator=repr_vec.allocator) for out in outs]
last_evt = stage_inf.kernel(
use_queue,
(group_count*stage_inf.group_size,),
(stage_inf.group_size,),
*([r.base_data for r in results]+[results[0].offset,]
+invocation_args+[seq_count, sz]),
**dict(wait_for=wait_for))
wait_for = [last_evt]
#print "ooooo ", group_count, len(args)
if group_count==1:
if return_event:
return results, last_evt
else:
return results
else:
stage_inf = self.stage_2_inf
args = tuple(results)+stage1_args
#args = (results[0],)+stage1_args
if __name__=='__main__':
from gputools import OCLArray, OCLReductionKernel
k1 = OCLReductionKernel(np.float32,
neutral="0", reduce_expr="a+b",
map_expr="x[i]",
arguments="__global float *x")
k2 = OCLMultiReductionKernel(np.float32,
neutral="0", reduce_expr="a+b",
map_exprs=["y[i]*x[i]","x[i]"],
arguments="__global float *x, __global float *y")
N = 512
a = OCLArray.from_array(np.ones((N,N),np.float32))
b = OCLArray.from_array(2.*np.ones((N,N),np.float32))
o1 = OCLArray.empty((),np.float32)
o2 = OCLArray.empty((),np.float32)
from time import time
t = time()
for _ in range(400):
k1(a)
k1(b)
k1(a).get()
k1(b).get()
print(time()-t)
t = time()
#print k2(a,b, outs = [o1,o2])
for _ in range(400):
k2(a[0],b[0], outs = [o1,o2])
o1.get()
print(time()-t)
# open("kern_new_1.txt","w").write(("%s"%k2.stage_1_inf).replace("\\n","\n"))
# open("kern_new_2.txt","w").write(("%s"%k2.stage_2_inf).replace("\\n","\n"))
| """
an adaptation of pyopencl's reduction kernel for weighted avarages
like sum(a*b)
<EMAIL>
"""
from __future__ import print_function, unicode_literals, absolute_import, division
from six.moves import zip
import pyopencl as cl
from pyopencl.tools import (
context_dependent_memoize,
dtype_to_ctype, KernelTemplateBase,
_process_code_for_macro)
import numpy as np
from gputools import get_device
import sys
# {{{ kernel source
KERNEL = r"""//CL//
<%
inds = range(len(map_exprs))
%>
#define GROUP_SIZE ${group_size}
% for i,m in enumerate(map_exprs):
#define READ_AND_MAP_${i}(i) (${m})
% endfor
#define REDUCE(a, b) (${reduce_expr})
% if double_support:
#if __OPENCL_C_VERSION__ < 120
#pragma OPENCL EXTENSION cl_khr_fp64: enable
#endif
#define PYOPENCL_DEFINE_CDOUBLE
% endif
#include <pyopencl-complex.h>
${preamble}
typedef ${out_type} out_type;
__kernel void ${name}(
% for i in inds:
__global out_type *out__base_${i},
% endfor
long out__offset, ${arguments},
unsigned int seq_count, unsigned int n)
{
% for i in inds:
__global out_type *out_${i} = (__global out_type *) (
(__global char *) out__base_${i} + out__offset);
% endfor
${arg_prep}
% for i in inds:
__local out_type ldata_${i}[GROUP_SIZE];
out_type acc_${i} = ${neutral};
% endfor
unsigned int lid = get_local_id(0);
unsigned int i = get_group_id(0)*GROUP_SIZE*seq_count + lid;
//printf("seq: %d\tlid = %d\ti=%d\n",seq_count,lid,i);
for (unsigned s = 0; s < seq_count; ++s)
{
if (i >= n)
break;
% for i in inds:
acc_${i} = REDUCE(acc_${i}, READ_AND_MAP_${i}(i));
% endfor
i += GROUP_SIZE;
}
% for i in inds:
ldata_${i}[lid] = acc_${i};
% endfor
<%
cur_size = group_size
%>
% while cur_size > 1:
barrier(CLK_LOCAL_MEM_FENCE);
<%
new_size = cur_size // 2
assert new_size * 2 == cur_size
%>
if (lid < ${new_size})
{
% for i in inds:
ldata_${i}[lid] = REDUCE(
ldata_${i}[lid],
ldata_${i}[lid + ${new_size}]);
% endfor
}
<% cur_size = new_size %>
% endwhile
if (lid == 0) {
% for i in inds:
out_${i}[get_group_id(0)] = ldata_${i}[0];
% endfor
//printf("result: %.4f\n",out_0[get_group_id(0)] );
}
}
"""
def _get_reduction_source(
ctx, out_type, out_type_size,
neutral, reduce_expr, map_exprs, parsed_args,
name="reduce_kernel", preamble="", arg_prep="",
device=None, max_group_size=None):
if device is not None:
devices = [device]
else:
devices = ctx.devices
# {{{ compute group size
def get_dev_group_size(device):
# dirty fix for the RV770 boards
max_work_group_size = device.max_work_group_size
if "RV770" in device.name:
max_work_group_size = 64
# compute lmem limit
from pytools import div_ceil
lmem_wg_size = div_ceil(max_work_group_size, out_type_size)
result = min(max_work_group_size, lmem_wg_size)
# round down to power of 2
from pyopencl.tools import bitlog2
return 2**bitlog2(result)
group_size = min(get_dev_group_size(dev) for dev in devices)
if max_group_size is not None:
group_size = min(max_group_size, group_size)
# }}}
from mako.template import Template
from pytools import all
from pyopencl.characterize import has_double_support
src = str(Template(KERNEL).render(
out_type=out_type,
arguments=", ".join(arg.declarator() for arg in parsed_args),
group_size=group_size,
neutral=neutral,
reduce_expr=_process_code_for_macro(reduce_expr),
map_exprs=[_process_code_for_macro(m) for m in map_exprs],
name=name,
preamble=preamble,
arg_prep=arg_prep,
double_support=all(has_double_support(dev) for dev in devices),
))
# sys.exit()
from pytools import Record
class ReductionInfo(Record):
pass
return ReductionInfo(
context=ctx,
source=src,
group_size=group_size)
def get_reduction_kernel(stage,
ctx, dtype_out,
neutral, reduce_expr, arguments=None,
name="reduce_kernel", preamble="",
map_exprs = None,
device=None, options=[], max_group_size=None):
if map_exprs is None:
raise ValueError("map_exprs has to be given!")
for i, m in enumerate(map_exprs):
if m is None:
if stage==2:
map_exprs[i] = "pyopencl_reduction_inp_%i[i]"%i
else:
map_exprs[i] = "in[i]"
from pyopencl.tools import (
parse_arg_list, get_arg_list_scalar_arg_dtypes,
get_arg_offset_adjuster_code, VectorArg)
arg_prep = ""
if stage==1 and arguments is not None:
arguments = parse_arg_list(arguments, with_offset=True)
arg_prep = get_arg_offset_adjuster_code(arguments)
if stage==2 and arguments is not None:
arguments = parse_arg_list(arguments)
arguments = (
[VectorArg(dtype_out, "pyopencl_reduction_inp_%i"%i) for i in range(len(map_exprs))]
+arguments)
inf = _get_reduction_source(
ctx, dtype_to_ctype(dtype_out), dtype_out.itemsize,
neutral, reduce_expr, map_exprs, arguments,
name, preamble, arg_prep, device, max_group_size)
inf.program = cl.Program(ctx, inf.source)
inf.program.build(options)
inf.kernel = getattr(inf.program, name)
inf.arg_types = arguments
inf.kernel.set_scalar_arg_dtypes(
[None, ]*len(map_exprs)+[np.int64]
+get_arg_list_scalar_arg_dtypes(inf.arg_types)
+[np.uint32]*2)
return inf
# }}}
# {{{ main reduction kernel
class OCLMultiReductionKernel:
"""
simultanous reduction of a weighted sum of severalbuffers
example:
k = OCLMultiReduction(np.float32,
neutral="0",reduce_expr="a+b",
map_exprs = ["x[i]", "x[i]*y[i]"],
arguments="__global float *x,__global float *y")
k(a,b, out1 = out1, out2 = out2)
"""
def __init__(self, dtype_out,
neutral, reduce_expr, arguments=None,
map_exprs=[None],
name="reduce_kernel", options=[], preamble=""):
ctx = get_device().context
dtype_out = self.dtype_out = np.dtype(dtype_out)
max_group_size = None
trip_count = 0
self.n_exprs = len(map_exprs)
assert self.n_exprs>0
while True:
self.stage_1_inf = get_reduction_kernel(1, ctx,
dtype_out,
neutral, reduce_expr, arguments,
name=name+"_stage1", options=options, preamble=preamble,
map_exprs=map_exprs,
max_group_size=max_group_size)
kernel_max_wg_size = self.stage_1_inf.kernel.get_work_group_info(
cl.kernel_work_group_info.WORK_GROUP_SIZE,
ctx.devices[0])
if self.stage_1_inf.group_size<=kernel_max_wg_size:
break
else:
max_group_size = kernel_max_wg_size
trip_count += 1
assert trip_count<=2
self.stage_2_inf = get_reduction_kernel(2, ctx,
dtype_out,
neutral, reduce_expr, arguments=arguments,
name=name+"_stage2", options=options,
map_exprs = [None]*self.n_exprs,
preamble=preamble,
max_group_size=max_group_size)
from pytools import any
from pyopencl.tools import VectorArg
assert any(
isinstance(arg_tp, VectorArg)
for arg_tp in self.stage_1_inf.arg_types), \
"ReductionKernel can only be used with functions " \
"that have at least one vector argument"
def __call__(self, *args, **kwargs):
MAX_GROUP_COUNT = 1024 # noqa
SMALL_SEQ_COUNT = 4 # noqa
from pyopencl.array import empty
stage_inf = self.stage_1_inf
queue = kwargs.pop("queue", None)
wait_for = kwargs.pop("wait_for", None)
return_event = kwargs.pop("return_event", False)
outs = kwargs.pop("outs", [None]*self.n_exprs)
if kwargs:
raise TypeError("invalid keyword argument to reduction kernel")
stage1_args = args
while True:
invocation_args = []
vectors = []
from pyopencl.tools import VectorArg
for arg, arg_tp in zip(args, stage_inf.arg_types):
if isinstance(arg_tp, VectorArg):
if not arg.flags.forc:
raise RuntimeError("ReductionKernel cannot "
"deal with non-contiguous arrays")
vectors.append(arg)
invocation_args.append(arg.base_data)
if arg_tp.with_offset:
invocation_args.append(arg.offset)
else:
invocation_args.append(arg)
repr_vec = vectors[0]
sz = repr_vec.size
if queue is not None:
use_queue = queue
else:
use_queue = repr_vec.queue
if sz<=stage_inf.group_size*SMALL_SEQ_COUNT*MAX_GROUP_COUNT:
total_group_size = SMALL_SEQ_COUNT*stage_inf.group_size
group_count = (sz+total_group_size-1)//total_group_size
seq_count = SMALL_SEQ_COUNT
else:
group_count = MAX_GROUP_COUNT
macrogroup_size = group_count*stage_inf.group_size
seq_count = (sz+macrogroup_size-1)//macrogroup_size
if group_count==1:
results = [empty(use_queue,
(), self.dtype_out,
allocator=repr_vec.allocator) if out is None else out for out in outs]
else:
results = [empty(use_queue,
(group_count,), self.dtype_out,
allocator=repr_vec.allocator) for out in outs]
last_evt = stage_inf.kernel(
use_queue,
(group_count*stage_inf.group_size,),
(stage_inf.group_size,),
*([r.base_data for r in results]+[results[0].offset,]
+invocation_args+[seq_count, sz]),
**dict(wait_for=wait_for))
wait_for = [last_evt]
#print "ooooo ", group_count, len(args)
if group_count==1:
if return_event:
return results, last_evt
else:
return results
else:
stage_inf = self.stage_2_inf
args = tuple(results)+stage1_args
#args = (results[0],)+stage1_args
if __name__=='__main__':
from gputools import OCLArray, OCLReductionKernel
k1 = OCLReductionKernel(np.float32,
neutral="0", reduce_expr="a+b",
map_expr="x[i]",
arguments="__global float *x")
k2 = OCLMultiReductionKernel(np.float32,
neutral="0", reduce_expr="a+b",
map_exprs=["y[i]*x[i]","x[i]"],
arguments="__global float *x, __global float *y")
N = 512
a = OCLArray.from_array(np.ones((N,N),np.float32))
b = OCLArray.from_array(2.*np.ones((N,N),np.float32))
o1 = OCLArray.empty((),np.float32)
o2 = OCLArray.empty((),np.float32)
from time import time
t = time()
for _ in range(400):
k1(a)
k1(b)
k1(a).get()
k1(b).get()
print(time()-t)
t = time()
#print k2(a,b, outs = [o1,o2])
for _ in range(400):
k2(a[0],b[0], outs = [o1,o2])
o1.get()
print(time()-t)
# open("kern_new_1.txt","w").write(("%s"%k2.stage_1_inf).replace("\\n","\n"))
# open("kern_new_2.txt","w").write(("%s"%k2.stage_2_inf).replace("\\n","\n")) | en | 0.36346 | an adaptation of pyopencl's reduction kernel for weighted avarages like sum(a*b) <EMAIL> # {{{ kernel source //CL// <% inds = range(len(map_exprs)) %> #define GROUP_SIZE ${group_size} % for i,m in enumerate(map_exprs): #define READ_AND_MAP_${i}(i) (${m}) % endfor #define REDUCE(a, b) (${reduce_expr}) % if double_support: #if __OPENCL_C_VERSION__ < 120 #pragma OPENCL EXTENSION cl_khr_fp64: enable #endif #define PYOPENCL_DEFINE_CDOUBLE % endif #include <pyopencl-complex.h> ${preamble} typedef ${out_type} out_type; __kernel void ${name}( % for i in inds: __global out_type *out__base_${i}, % endfor long out__offset, ${arguments}, unsigned int seq_count, unsigned int n) { % for i in inds: __global out_type *out_${i} = (__global out_type *) ( (__global char *) out__base_${i} + out__offset); % endfor ${arg_prep} % for i in inds: __local out_type ldata_${i}[GROUP_SIZE]; out_type acc_${i} = ${neutral}; % endfor unsigned int lid = get_local_id(0); unsigned int i = get_group_id(0)*GROUP_SIZE*seq_count + lid; //printf("seq: %d\tlid = %d\ti=%d\n",seq_count,lid,i); for (unsigned s = 0; s < seq_count; ++s) { if (i >= n) break; % for i in inds: acc_${i} = REDUCE(acc_${i}, READ_AND_MAP_${i}(i)); % endfor i += GROUP_SIZE; } % for i in inds: ldata_${i}[lid] = acc_${i}; % endfor <% cur_size = group_size %> % while cur_size > 1: barrier(CLK_LOCAL_MEM_FENCE); <% new_size = cur_size // 2 assert new_size * 2 == cur_size %> if (lid < ${new_size}) { % for i in inds: ldata_${i}[lid] = REDUCE( ldata_${i}[lid], ldata_${i}[lid + ${new_size}]); % endfor } <% cur_size = new_size %> % endwhile if (lid == 0) { % for i in inds: out_${i}[get_group_id(0)] = ldata_${i}[0]; % endfor //printf("result: %.4f\n",out_0[get_group_id(0)] ); } } # {{{ compute group size # dirty fix for the RV770 boards # compute lmem limit # round down to power of 2 # }}} # sys.exit() # }}} # {{{ main reduction kernel simultanous reduction of a weighted sum of severalbuffers example: k = OCLMultiReduction(np.float32, neutral="0",reduce_expr="a+b", map_exprs = ["x[i]", "x[i]*y[i]"], arguments="__global float *x,__global float *y") k(a,b, out1 = out1, out2 = out2) # noqa # noqa #print "ooooo ", group_count, len(args) #args = (results[0],)+stage1_args #print k2(a,b, outs = [o1,o2]) # open("kern_new_1.txt","w").write(("%s"%k2.stage_1_inf).replace("\\n","\n")) # open("kern_new_2.txt","w").write(("%s"%k2.stage_2_inf).replace("\\n","\n")) | 2.175873 | 2 |
mineshaft.py | DidymusRex/PiCraft | 0 | 9209 | #! /usr/bin/env python
import mcpi.minecraft as minecraft
import mcpi.block as block
import random
import time
mc = minecraft.Minecraft.create()
# ----------------------------------------------------------------------
# S E T U P
# ----------------------------------------------------------------------
# Where Am I?
pos = mc.player.getTilePos()
print "Game center point is %d, %d, %d" % (pos.x, pos.y, pos.z)
limit=256
mc.setBlocks(pos.x, pos.y, pos.z, pos.x+10, pos.y-256, pos.z+10, block.AIR.id)
mc.setBlocks(pos.x, pos.y, pos.z, pos.x-10, pos.y+256, pos.z-10, block.DIAMOND_ORE.id)
| #! /usr/bin/env python
import mcpi.minecraft as minecraft
import mcpi.block as block
import random
import time
mc = minecraft.Minecraft.create()
# ----------------------------------------------------------------------
# S E T U P
# ----------------------------------------------------------------------
# Where Am I?
pos = mc.player.getTilePos()
print "Game center point is %d, %d, %d" % (pos.x, pos.y, pos.z)
limit=256
mc.setBlocks(pos.x, pos.y, pos.z, pos.x+10, pos.y-256, pos.z+10, block.AIR.id)
mc.setBlocks(pos.x, pos.y, pos.z, pos.x-10, pos.y+256, pos.z-10, block.DIAMOND_ORE.id)
| en | 0.162243 | #! /usr/bin/env python # ---------------------------------------------------------------------- # S E T U P # ---------------------------------------------------------------------- # Where Am I? | 2.566413 | 3 |
example/example01.py | ChenglongChen/TextRank4ZH | 2 | 9210 | #-*- encoding:utf-8 -*-
from __future__ import print_function
import sys
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except:
pass
import codecs
from textrank4zh import TextRank4Keyword, TextRank4Sentence
text = codecs.open('../test/doc/01.txt', 'r', 'utf-8').read()
tr4w = TextRank4Keyword()
tr4w.analyze(text=text, lower=True, window=2) # py2中text必须是utf8编码的str或者unicode对象,py3中必须是utf8编码的bytes或者str对象
print( '关键词:' )
for item in tr4w.get_keywords(20, word_min_len=1):
print(item.word, item.weight)
print()
print( '关键短语:' )
for phrase in tr4w.get_keyphrases(keywords_num=20, min_occur_num= 2):
print(phrase)
tr4s = TextRank4Sentence()
tr4s.analyze(text=text, lower=True, source = 'all_filters')
print()
print( '摘要:' )
for item in tr4s.get_key_sentences(num=3):
print(item.weight, item.sentence) | #-*- encoding:utf-8 -*-
from __future__ import print_function
import sys
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except:
pass
import codecs
from textrank4zh import TextRank4Keyword, TextRank4Sentence
text = codecs.open('../test/doc/01.txt', 'r', 'utf-8').read()
tr4w = TextRank4Keyword()
tr4w.analyze(text=text, lower=True, window=2) # py2中text必须是utf8编码的str或者unicode对象,py3中必须是utf8编码的bytes或者str对象
print( '关键词:' )
for item in tr4w.get_keywords(20, word_min_len=1):
print(item.word, item.weight)
print()
print( '关键短语:' )
for phrase in tr4w.get_keyphrases(keywords_num=20, min_occur_num= 2):
print(phrase)
tr4s = TextRank4Sentence()
tr4s.analyze(text=text, lower=True, source = 'all_filters')
print()
print( '摘要:' )
for item in tr4s.get_key_sentences(num=3):
print(item.weight, item.sentence) | zh | 0.312402 | #-*- encoding:utf-8 -*- # py2中text必须是utf8编码的str或者unicode对象,py3中必须是utf8编码的bytes或者str对象 | 3.023639 | 3 |
scripts/anonymize_dumpdata.py | suutari-ai/respa | 1 | 9211 | import random
import uuid
import sys
import json
from faker import Factory
from faker.providers.person.fi_FI import Provider as PersonProvider
fake = Factory.create('fi_FI')
email_by_user = {}
users_by_id = {}
def anonymize_users(users):
usernames = set()
emails = set()
for data in users:
if data['model'] != 'users.user':
continue
user = data['fields']
user['password'] = "!"
username = fake.user_name()
while username in usernames:
username = fake.user_name()
usernames.add(username)
user['username'] = username
user['uuid'] = str(uuid.uuid4())
if user['first_name']:
user['first_name'] = fake.first_name()
if user['last_name']:
user['last_name'] = fake.last_name()
user['email'] = fake.email()
email_by_user[data['pk']] = user['email']
users_by_id[data['pk']] = user
def remove_secrets(data):
for model in data:
fields = model['fields']
if model['model'] == 'socialaccount.socialapp':
fields['client_id'] = fake.md5()
fields['secret'] = fake.md5()
elif model['model'] == 'socialaccount.socialapp':
fields['token_secret'] = fake.md5()
fields['token'] = fake.md5()
elif model['model'] == 'account.emailaddress':
fields['email'] = email_by_user[fields['user']]
elif model['model'] == 'socialaccount.socialaccount':
fields['extra_data'] = '{}'
fields['uid'] = users_by_id[fields['user']]['uuid']
elif model['model'] == 'sessions.session':
fields['session_data'] = "!"
model['pk'] = fake.md5()
data = json.load(sys.stdin)
anonymize_users(data)
remove_secrets(data)
json.dump(data, sys.stdout, indent=4)
| import random
import uuid
import sys
import json
from faker import Factory
from faker.providers.person.fi_FI import Provider as PersonProvider
fake = Factory.create('fi_FI')
email_by_user = {}
users_by_id = {}
def anonymize_users(users):
usernames = set()
emails = set()
for data in users:
if data['model'] != 'users.user':
continue
user = data['fields']
user['password'] = "!"
username = fake.user_name()
while username in usernames:
username = fake.user_name()
usernames.add(username)
user['username'] = username
user['uuid'] = str(uuid.uuid4())
if user['first_name']:
user['first_name'] = fake.first_name()
if user['last_name']:
user['last_name'] = fake.last_name()
user['email'] = fake.email()
email_by_user[data['pk']] = user['email']
users_by_id[data['pk']] = user
def remove_secrets(data):
for model in data:
fields = model['fields']
if model['model'] == 'socialaccount.socialapp':
fields['client_id'] = fake.md5()
fields['secret'] = fake.md5()
elif model['model'] == 'socialaccount.socialapp':
fields['token_secret'] = fake.md5()
fields['token'] = fake.md5()
elif model['model'] == 'account.emailaddress':
fields['email'] = email_by_user[fields['user']]
elif model['model'] == 'socialaccount.socialaccount':
fields['extra_data'] = '{}'
fields['uid'] = users_by_id[fields['user']]['uuid']
elif model['model'] == 'sessions.session':
fields['session_data'] = "!"
model['pk'] = fake.md5()
data = json.load(sys.stdin)
anonymize_users(data)
remove_secrets(data)
json.dump(data, sys.stdout, indent=4)
| none | 1 | 2.461199 | 2 |
|
torch_geometric/utils/negative_sampling.py | NucciTheBoss/pytorch_geometric | 2,350 | 9212 | import random
from typing import Optional, Tuple, Union
import numpy as np
import torch
from torch import Tensor
from torch_geometric.utils import coalesce, degree, remove_self_loops
from .num_nodes import maybe_num_nodes
def negative_sampling(edge_index: Tensor,
num_nodes: Optional[Union[int, Tuple[int, int]]] = None,
num_neg_samples: Optional[int] = None,
method: str = "sparse",
force_undirected: bool = False) -> Tensor:
r"""Samples random negative edges of a graph given by :attr:`edge_index`.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int or Tuple[int, int], optional): The number of nodes,
*i.e.* :obj:`max_val + 1` of :attr:`edge_index`.
If given as a tuple, then :obj:`edge_index` is interpreted as a
bipartite graph with shape :obj:`(num_src_nodes, num_dst_nodes)`.
(default: :obj:`None`)
num_neg_samples (int, optional): The (approximate) number of negative
samples to return.
If set to :obj:`None`, will try to return a negative edge for every
positive edge. (default: :obj:`None`)
method (string, optional): The method to use for negative sampling,
*i.e.*, :obj:`"sparse"` or :obj:`"dense"`.
This is a memory/runtime trade-off.
:obj:`"sparse"` will work on any graph of any size, while
:obj:`"dense"` can perform faster true-negative checks.
(default: :obj:`"sparse"`)
force_undirected (bool, optional): If set to :obj:`True`, sampled
negative edges will be undirected. (default: :obj:`False`)
:rtype: LongTensor
"""
assert method in ['sparse', 'dense']
size = num_nodes
bipartite = isinstance(size, (tuple, list))
size = maybe_num_nodes(edge_index) if size is None else size
size = (size, size) if not bipartite else size
force_undirected = False if bipartite else force_undirected
idx, population = edge_index_to_vector(edge_index, size, bipartite,
force_undirected)
if idx.numel() >= population:
return edge_index.new_empty((2, 0))
if num_neg_samples is None:
num_neg_samples = edge_index.size(1)
if force_undirected:
num_neg_samples = num_neg_samples // 2
prob = 1. - idx.numel() / population # Probability to sample a negative.
sample_size = int(1.1 * num_neg_samples / prob) # (Over)-sample size.
neg_idx = None
if method == 'dense':
# The dense version creates a mask of shape `population` to check for
# invalid samples.
mask = idx.new_ones(population, dtype=torch.bool)
mask[idx] = False
for _ in range(3): # Number of tries to sample negative indices.
rnd = sample(population, sample_size, idx.device)
rnd = rnd[mask[rnd]] # Filter true negatives.
neg_idx = rnd if neg_idx is None else torch.cat([neg_idx, rnd])
if neg_idx.numel() >= num_neg_samples:
neg_idx = neg_idx[:num_neg_samples]
break
mask[neg_idx] = False
else: # 'sparse'
# The sparse version checks for invalid samples via `np.isin`.
idx = idx.to('cpu')
for _ in range(3): # Number of tries to sample negative indices.
rnd = sample(population, sample_size, device='cpu')
mask = np.isin(rnd, idx)
if neg_idx is not None:
mask |= np.isin(rnd, neg_idx.to('cpu'))
mask = torch.from_numpy(mask).to(torch.bool)
rnd = rnd[~mask].to(edge_index.device)
neg_idx = rnd if neg_idx is None else torch.cat([neg_idx, rnd])
if neg_idx.numel() >= num_neg_samples:
neg_idx = neg_idx[:num_neg_samples]
break
return vector_to_edge_index(neg_idx, size, bipartite, force_undirected)
def batched_negative_sampling(
edge_index: Tensor,
batch: Union[Tensor, Tuple[Tensor, Tensor]],
num_neg_samples: Optional[int] = None,
method: str = "sparse",
force_undirected: bool = False,
) -> Tensor:
r"""Samples random negative edges of multiple graphs given by
:attr:`edge_index` and :attr:`batch`.
Args:
edge_index (LongTensor): The edge indices.
batch (LongTensor or Tuple[LongTensor, LongTensor]): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example.
If given as a tuple, then :obj:`edge_index` is interpreted as a
bipartite graph connecting two different node types.
num_neg_samples (int, optional): The number of negative samples to
return. If set to :obj:`None`, will try to return a negative edge
for every positive edge. (default: :obj:`None`)
method (string, optional): The method to use for negative sampling,
*i.e.*, :obj:`"sparse"` or :obj:`"dense"`.
This is a memory/runtime trade-off.
:obj:`"sparse"` will work on any graph of any size, while
:obj:`"dense"` can perform faster true-negative checks.
(default: :obj:`"sparse"`)
force_undirected (bool, optional): If set to :obj:`True`, sampled
negative edges will be undirected. (default: :obj:`False`)
:rtype: LongTensor
"""
if isinstance(batch, Tensor):
src_batch, dst_batch = batch, batch
else:
src_batch, dst_batch = batch[0], batch[1]
split = degree(src_batch[edge_index[0]], dtype=torch.long).tolist()
edge_indices = torch.split(edge_index, split, dim=1)
num_src = degree(src_batch, dtype=torch.long)
cum_src = torch.cat([src_batch.new_zeros(1), num_src.cumsum(0)[:-1]])
if isinstance(batch, Tensor):
num_nodes = num_src.tolist()
cumsum = cum_src
else:
num_dst = degree(dst_batch, dtype=torch.long)
cum_dst = torch.cat([dst_batch.new_zeros(1), num_dst.cumsum(0)[:-1]])
num_nodes = torch.stack([num_src, num_dst], dim=1).tolist()
cumsum = torch.stack([cum_src, cum_dst], dim=1).unsqueeze(-1)
neg_edge_indices = []
for i, edge_index in enumerate(edge_indices):
edge_index = edge_index - cumsum[i]
neg_edge_index = negative_sampling(edge_index, num_nodes[i],
num_neg_samples, method,
force_undirected)
neg_edge_index += cumsum[i]
neg_edge_indices.append(neg_edge_index)
return torch.cat(neg_edge_indices, dim=1)
def structured_negative_sampling(edge_index, num_nodes: Optional[int] = None,
contains_neg_self_loops: bool = True):
r"""Samples a negative edge :obj:`(i,k)` for every positive edge
:obj:`(i,j)` in the graph given by :attr:`edge_index`, and returns it as a
tuple of the form :obj:`(i,j,k)`.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
contains_neg_self_loops (bool, optional): If set to
:obj:`False`, sampled negative edges will not contain self loops.
(default: :obj:`True`)
:rtype: (LongTensor, LongTensor, LongTensor)
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
row, col = edge_index.cpu()
pos_idx = row * num_nodes + col
if not contains_neg_self_loops:
loop_idx = torch.arange(num_nodes) * (num_nodes + 1)
pos_idx = torch.cat([pos_idx, loop_idx], dim=0)
rand = torch.randint(num_nodes, (row.size(0), ), dtype=torch.long)
neg_idx = row * num_nodes + rand
mask = torch.from_numpy(np.isin(neg_idx, pos_idx)).to(torch.bool)
rest = mask.nonzero(as_tuple=False).view(-1)
while rest.numel() > 0: # pragma: no cover
tmp = torch.randint(num_nodes, (rest.size(0), ), dtype=torch.long)
rand[rest] = tmp
neg_idx = row[rest] * num_nodes + tmp
mask = torch.from_numpy(np.isin(neg_idx, pos_idx)).to(torch.bool)
rest = rest[mask]
return edge_index[0], edge_index[1], rand.to(edge_index.device)
def structured_negative_sampling_feasible(
edge_index: Tensor, num_nodes: Optional[int] = None,
contains_neg_self_loops: bool = True) -> bool:
r"""Returns :obj:`True` if
:meth:`~torch_geometric.utils.structured_negative_sampling` is feasible
on the graph given by :obj:`edge_index`.
:obj:`~torch_geometric.utils.structured_negative_sampling` is infeasible
if atleast one node is connected to all other nodes.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
contains_neg_self_loops (bool, optional): If set to
:obj:`False`, sampled negative edges will not contain self loops.
(default: :obj:`True`)
:rtype: bool
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
max_num_neighbors = num_nodes
edge_index = coalesce(edge_index, num_nodes=num_nodes)
if not contains_neg_self_loops:
edge_index, _ = remove_self_loops(edge_index)
max_num_neighbors -= 1 # Reduce number of valid neighbors
deg = degree(edge_index[0], num_nodes)
# True if there exists no node that is connected to all other nodes.
return bool(torch.all(deg < max_num_neighbors))
###############################################################################
def sample(population: int, k: int, device=None) -> Tensor:
if population <= k:
return torch.arange(population, device=device)
else:
return torch.tensor(random.sample(range(population), k), device=device)
def edge_index_to_vector(
edge_index: Tensor,
size: Tuple[int, int],
bipartite: bool,
force_undirected: bool = False,
) -> Tuple[Tensor, int]:
row, col = edge_index
if bipartite: # No need to account for self-loops.
idx = (row * size[1]).add_(col)
population = size[0] * size[1]
return idx, population
elif force_undirected:
assert size[0] == size[1]
num_nodes = size[0]
# We only operate on the upper triangular matrix:
mask = row < col
row, col = row[mask], col[mask]
offset = torch.arange(1, num_nodes, device=row.device).cumsum(0)[row]
idx = row.mul_(num_nodes).add_(col).sub_(offset)
population = (num_nodes * (num_nodes + 1)) // 2 - num_nodes
return idx, population
else:
assert size[0] == size[1]
num_nodes = size[0]
# We remove self-loops as we do not want to take them into account
# when sampling negative values.
mask = row != col
row, col = row[mask], col[mask]
col[row < col] -= 1
idx = row.mul_(num_nodes - 1).add_(col)
population = num_nodes * num_nodes - num_nodes
return idx, population
def vector_to_edge_index(idx: Tensor, size: Tuple[int, int], bipartite: bool,
force_undirected: bool = False) -> Tensor:
if bipartite: # No need to account for self-loops.
row = idx.div(size[1], rounding_mode='floor')
col = idx % size[1]
return torch.stack([row, col], dim=0)
elif force_undirected:
assert size[0] == size[1]
num_nodes = size[0]
offset = torch.arange(1, num_nodes, device=idx.device).cumsum(0)
end = torch.arange(num_nodes, num_nodes * num_nodes, num_nodes,
device=idx.device)
row = torch.bucketize(idx, end.sub_(offset), right=True)
col = offset[row].add_(idx) % num_nodes
return torch.stack([torch.cat([row, col]), torch.cat([col, row])], 0)
else:
assert size[0] == size[1]
num_nodes = size[0]
row = idx.div(num_nodes - 1, rounding_mode='floor')
col = idx % (num_nodes - 1)
col[row <= col] += 1
return torch.stack([row, col], dim=0)
| import random
from typing import Optional, Tuple, Union
import numpy as np
import torch
from torch import Tensor
from torch_geometric.utils import coalesce, degree, remove_self_loops
from .num_nodes import maybe_num_nodes
def negative_sampling(edge_index: Tensor,
num_nodes: Optional[Union[int, Tuple[int, int]]] = None,
num_neg_samples: Optional[int] = None,
method: str = "sparse",
force_undirected: bool = False) -> Tensor:
r"""Samples random negative edges of a graph given by :attr:`edge_index`.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int or Tuple[int, int], optional): The number of nodes,
*i.e.* :obj:`max_val + 1` of :attr:`edge_index`.
If given as a tuple, then :obj:`edge_index` is interpreted as a
bipartite graph with shape :obj:`(num_src_nodes, num_dst_nodes)`.
(default: :obj:`None`)
num_neg_samples (int, optional): The (approximate) number of negative
samples to return.
If set to :obj:`None`, will try to return a negative edge for every
positive edge. (default: :obj:`None`)
method (string, optional): The method to use for negative sampling,
*i.e.*, :obj:`"sparse"` or :obj:`"dense"`.
This is a memory/runtime trade-off.
:obj:`"sparse"` will work on any graph of any size, while
:obj:`"dense"` can perform faster true-negative checks.
(default: :obj:`"sparse"`)
force_undirected (bool, optional): If set to :obj:`True`, sampled
negative edges will be undirected. (default: :obj:`False`)
:rtype: LongTensor
"""
assert method in ['sparse', 'dense']
size = num_nodes
bipartite = isinstance(size, (tuple, list))
size = maybe_num_nodes(edge_index) if size is None else size
size = (size, size) if not bipartite else size
force_undirected = False if bipartite else force_undirected
idx, population = edge_index_to_vector(edge_index, size, bipartite,
force_undirected)
if idx.numel() >= population:
return edge_index.new_empty((2, 0))
if num_neg_samples is None:
num_neg_samples = edge_index.size(1)
if force_undirected:
num_neg_samples = num_neg_samples // 2
prob = 1. - idx.numel() / population # Probability to sample a negative.
sample_size = int(1.1 * num_neg_samples / prob) # (Over)-sample size.
neg_idx = None
if method == 'dense':
# The dense version creates a mask of shape `population` to check for
# invalid samples.
mask = idx.new_ones(population, dtype=torch.bool)
mask[idx] = False
for _ in range(3): # Number of tries to sample negative indices.
rnd = sample(population, sample_size, idx.device)
rnd = rnd[mask[rnd]] # Filter true negatives.
neg_idx = rnd if neg_idx is None else torch.cat([neg_idx, rnd])
if neg_idx.numel() >= num_neg_samples:
neg_idx = neg_idx[:num_neg_samples]
break
mask[neg_idx] = False
else: # 'sparse'
# The sparse version checks for invalid samples via `np.isin`.
idx = idx.to('cpu')
for _ in range(3): # Number of tries to sample negative indices.
rnd = sample(population, sample_size, device='cpu')
mask = np.isin(rnd, idx)
if neg_idx is not None:
mask |= np.isin(rnd, neg_idx.to('cpu'))
mask = torch.from_numpy(mask).to(torch.bool)
rnd = rnd[~mask].to(edge_index.device)
neg_idx = rnd if neg_idx is None else torch.cat([neg_idx, rnd])
if neg_idx.numel() >= num_neg_samples:
neg_idx = neg_idx[:num_neg_samples]
break
return vector_to_edge_index(neg_idx, size, bipartite, force_undirected)
def batched_negative_sampling(
edge_index: Tensor,
batch: Union[Tensor, Tuple[Tensor, Tensor]],
num_neg_samples: Optional[int] = None,
method: str = "sparse",
force_undirected: bool = False,
) -> Tensor:
r"""Samples random negative edges of multiple graphs given by
:attr:`edge_index` and :attr:`batch`.
Args:
edge_index (LongTensor): The edge indices.
batch (LongTensor or Tuple[LongTensor, LongTensor]): Batch vector
:math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each
node to a specific example.
If given as a tuple, then :obj:`edge_index` is interpreted as a
bipartite graph connecting two different node types.
num_neg_samples (int, optional): The number of negative samples to
return. If set to :obj:`None`, will try to return a negative edge
for every positive edge. (default: :obj:`None`)
method (string, optional): The method to use for negative sampling,
*i.e.*, :obj:`"sparse"` or :obj:`"dense"`.
This is a memory/runtime trade-off.
:obj:`"sparse"` will work on any graph of any size, while
:obj:`"dense"` can perform faster true-negative checks.
(default: :obj:`"sparse"`)
force_undirected (bool, optional): If set to :obj:`True`, sampled
negative edges will be undirected. (default: :obj:`False`)
:rtype: LongTensor
"""
if isinstance(batch, Tensor):
src_batch, dst_batch = batch, batch
else:
src_batch, dst_batch = batch[0], batch[1]
split = degree(src_batch[edge_index[0]], dtype=torch.long).tolist()
edge_indices = torch.split(edge_index, split, dim=1)
num_src = degree(src_batch, dtype=torch.long)
cum_src = torch.cat([src_batch.new_zeros(1), num_src.cumsum(0)[:-1]])
if isinstance(batch, Tensor):
num_nodes = num_src.tolist()
cumsum = cum_src
else:
num_dst = degree(dst_batch, dtype=torch.long)
cum_dst = torch.cat([dst_batch.new_zeros(1), num_dst.cumsum(0)[:-1]])
num_nodes = torch.stack([num_src, num_dst], dim=1).tolist()
cumsum = torch.stack([cum_src, cum_dst], dim=1).unsqueeze(-1)
neg_edge_indices = []
for i, edge_index in enumerate(edge_indices):
edge_index = edge_index - cumsum[i]
neg_edge_index = negative_sampling(edge_index, num_nodes[i],
num_neg_samples, method,
force_undirected)
neg_edge_index += cumsum[i]
neg_edge_indices.append(neg_edge_index)
return torch.cat(neg_edge_indices, dim=1)
def structured_negative_sampling(edge_index, num_nodes: Optional[int] = None,
contains_neg_self_loops: bool = True):
r"""Samples a negative edge :obj:`(i,k)` for every positive edge
:obj:`(i,j)` in the graph given by :attr:`edge_index`, and returns it as a
tuple of the form :obj:`(i,j,k)`.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
contains_neg_self_loops (bool, optional): If set to
:obj:`False`, sampled negative edges will not contain self loops.
(default: :obj:`True`)
:rtype: (LongTensor, LongTensor, LongTensor)
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
row, col = edge_index.cpu()
pos_idx = row * num_nodes + col
if not contains_neg_self_loops:
loop_idx = torch.arange(num_nodes) * (num_nodes + 1)
pos_idx = torch.cat([pos_idx, loop_idx], dim=0)
rand = torch.randint(num_nodes, (row.size(0), ), dtype=torch.long)
neg_idx = row * num_nodes + rand
mask = torch.from_numpy(np.isin(neg_idx, pos_idx)).to(torch.bool)
rest = mask.nonzero(as_tuple=False).view(-1)
while rest.numel() > 0: # pragma: no cover
tmp = torch.randint(num_nodes, (rest.size(0), ), dtype=torch.long)
rand[rest] = tmp
neg_idx = row[rest] * num_nodes + tmp
mask = torch.from_numpy(np.isin(neg_idx, pos_idx)).to(torch.bool)
rest = rest[mask]
return edge_index[0], edge_index[1], rand.to(edge_index.device)
def structured_negative_sampling_feasible(
edge_index: Tensor, num_nodes: Optional[int] = None,
contains_neg_self_loops: bool = True) -> bool:
r"""Returns :obj:`True` if
:meth:`~torch_geometric.utils.structured_negative_sampling` is feasible
on the graph given by :obj:`edge_index`.
:obj:`~torch_geometric.utils.structured_negative_sampling` is infeasible
if atleast one node is connected to all other nodes.
Args:
edge_index (LongTensor): The edge indices.
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
contains_neg_self_loops (bool, optional): If set to
:obj:`False`, sampled negative edges will not contain self loops.
(default: :obj:`True`)
:rtype: bool
"""
num_nodes = maybe_num_nodes(edge_index, num_nodes)
max_num_neighbors = num_nodes
edge_index = coalesce(edge_index, num_nodes=num_nodes)
if not contains_neg_self_loops:
edge_index, _ = remove_self_loops(edge_index)
max_num_neighbors -= 1 # Reduce number of valid neighbors
deg = degree(edge_index[0], num_nodes)
# True if there exists no node that is connected to all other nodes.
return bool(torch.all(deg < max_num_neighbors))
###############################################################################
def sample(population: int, k: int, device=None) -> Tensor:
if population <= k:
return torch.arange(population, device=device)
else:
return torch.tensor(random.sample(range(population), k), device=device)
def edge_index_to_vector(
edge_index: Tensor,
size: Tuple[int, int],
bipartite: bool,
force_undirected: bool = False,
) -> Tuple[Tensor, int]:
row, col = edge_index
if bipartite: # No need to account for self-loops.
idx = (row * size[1]).add_(col)
population = size[0] * size[1]
return idx, population
elif force_undirected:
assert size[0] == size[1]
num_nodes = size[0]
# We only operate on the upper triangular matrix:
mask = row < col
row, col = row[mask], col[mask]
offset = torch.arange(1, num_nodes, device=row.device).cumsum(0)[row]
idx = row.mul_(num_nodes).add_(col).sub_(offset)
population = (num_nodes * (num_nodes + 1)) // 2 - num_nodes
return idx, population
else:
assert size[0] == size[1]
num_nodes = size[0]
# We remove self-loops as we do not want to take them into account
# when sampling negative values.
mask = row != col
row, col = row[mask], col[mask]
col[row < col] -= 1
idx = row.mul_(num_nodes - 1).add_(col)
population = num_nodes * num_nodes - num_nodes
return idx, population
def vector_to_edge_index(idx: Tensor, size: Tuple[int, int], bipartite: bool,
force_undirected: bool = False) -> Tensor:
if bipartite: # No need to account for self-loops.
row = idx.div(size[1], rounding_mode='floor')
col = idx % size[1]
return torch.stack([row, col], dim=0)
elif force_undirected:
assert size[0] == size[1]
num_nodes = size[0]
offset = torch.arange(1, num_nodes, device=idx.device).cumsum(0)
end = torch.arange(num_nodes, num_nodes * num_nodes, num_nodes,
device=idx.device)
row = torch.bucketize(idx, end.sub_(offset), right=True)
col = offset[row].add_(idx) % num_nodes
return torch.stack([torch.cat([row, col]), torch.cat([col, row])], 0)
else:
assert size[0] == size[1]
num_nodes = size[0]
row = idx.div(num_nodes - 1, rounding_mode='floor')
col = idx % (num_nodes - 1)
col[row <= col] += 1
return torch.stack([row, col], dim=0)
| en | 0.682934 | Samples random negative edges of a graph given by :attr:`edge_index`. Args: edge_index (LongTensor): The edge indices. num_nodes (int or Tuple[int, int], optional): The number of nodes, *i.e.* :obj:`max_val + 1` of :attr:`edge_index`. If given as a tuple, then :obj:`edge_index` is interpreted as a bipartite graph with shape :obj:`(num_src_nodes, num_dst_nodes)`. (default: :obj:`None`) num_neg_samples (int, optional): The (approximate) number of negative samples to return. If set to :obj:`None`, will try to return a negative edge for every positive edge. (default: :obj:`None`) method (string, optional): The method to use for negative sampling, *i.e.*, :obj:`"sparse"` or :obj:`"dense"`. This is a memory/runtime trade-off. :obj:`"sparse"` will work on any graph of any size, while :obj:`"dense"` can perform faster true-negative checks. (default: :obj:`"sparse"`) force_undirected (bool, optional): If set to :obj:`True`, sampled negative edges will be undirected. (default: :obj:`False`) :rtype: LongTensor # Probability to sample a negative. # (Over)-sample size. # The dense version creates a mask of shape `population` to check for # invalid samples. # Number of tries to sample negative indices. # Filter true negatives. # 'sparse' # The sparse version checks for invalid samples via `np.isin`. # Number of tries to sample negative indices. Samples random negative edges of multiple graphs given by :attr:`edge_index` and :attr:`batch`. Args: edge_index (LongTensor): The edge indices. batch (LongTensor or Tuple[LongTensor, LongTensor]): Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each node to a specific example. If given as a tuple, then :obj:`edge_index` is interpreted as a bipartite graph connecting two different node types. num_neg_samples (int, optional): The number of negative samples to return. If set to :obj:`None`, will try to return a negative edge for every positive edge. (default: :obj:`None`) method (string, optional): The method to use for negative sampling, *i.e.*, :obj:`"sparse"` or :obj:`"dense"`. This is a memory/runtime trade-off. :obj:`"sparse"` will work on any graph of any size, while :obj:`"dense"` can perform faster true-negative checks. (default: :obj:`"sparse"`) force_undirected (bool, optional): If set to :obj:`True`, sampled negative edges will be undirected. (default: :obj:`False`) :rtype: LongTensor Samples a negative edge :obj:`(i,k)` for every positive edge :obj:`(i,j)` in the graph given by :attr:`edge_index`, and returns it as a tuple of the form :obj:`(i,j,k)`. Args: edge_index (LongTensor): The edge indices. num_nodes (int, optional): The number of nodes, *i.e.* :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) contains_neg_self_loops (bool, optional): If set to :obj:`False`, sampled negative edges will not contain self loops. (default: :obj:`True`) :rtype: (LongTensor, LongTensor, LongTensor) # pragma: no cover Returns :obj:`True` if :meth:`~torch_geometric.utils.structured_negative_sampling` is feasible on the graph given by :obj:`edge_index`. :obj:`~torch_geometric.utils.structured_negative_sampling` is infeasible if atleast one node is connected to all other nodes. Args: edge_index (LongTensor): The edge indices. num_nodes (int, optional): The number of nodes, *i.e.* :obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`) contains_neg_self_loops (bool, optional): If set to :obj:`False`, sampled negative edges will not contain self loops. (default: :obj:`True`) :rtype: bool # Reduce number of valid neighbors # True if there exists no node that is connected to all other nodes. ############################################################################### # No need to account for self-loops. # We only operate on the upper triangular matrix: # We remove self-loops as we do not want to take them into account # when sampling negative values. # No need to account for self-loops. | 2.7012 | 3 |
venv/Lib/site-packages/pafy/g.py | DavidJohnKelly/YoutubeDownloader | 2 | 9213 | import sys
if sys.version_info[:2] >= (3, 0):
# pylint: disable=E0611,F0401,I0011
from urllib.request import build_opener
else:
from urllib2 import build_opener
from . import __version__
urls = {
'gdata': "https://www.googleapis.com/youtube/v3/",
'watchv': "http://www.youtube.com/watch?v=%s",
'playlist': ('http://www.youtube.com/list_ajax?'
'style=json&action_get_list=1&list=%s'),
'thumb': "http://i.ytimg.com/vi/%s/default.jpg",
'bigthumb': "http://i.ytimg.com/vi/%s/mqdefault.jpg",
'bigthumbhd': "http://i.ytimg.com/vi/%s/hqdefault.jpg",
# For internal backend
'vidinfo': ('https://www.youtube.com/get_video_info?video_id=%s&'
'eurl=https://youtube.googleapis.com/v/%s&sts=%s'),
'embed': "https://youtube.com/embed/%s"
}
api_key = "<KEY>"
user_agent = "pafy " + __version__
lifespan = 60 * 60 * 5 # 5 hours
opener = build_opener()
opener.addheaders = [('User-Agent', user_agent)]
cache = {}
def_ydl_opts = {'quiet': True, 'prefer_insecure': False, 'no_warnings': True}
# The following are specific to the internal backend
UEFSM = 'url_encoded_fmt_stream_map'
AF = 'adaptive_fmts'
jsplayer = r';ytplayer\.config\s*=\s*({.*?});'
itags = {
'5': ('320x240', 'flv', "normal", ''),
'17': ('176x144', '3gp', "normal", ''),
'18': ('640x360', 'mp4', "normal", ''),
'22': ('1280x720', 'mp4', "normal", ''),
'34': ('640x360', 'flv', "normal", ''),
'35': ('854x480', 'flv', "normal", ''),
'36': ('320x240', '3gp', "normal", ''),
'37': ('1920x1080', 'mp4', "normal", ''),
'38': ('4096x3072', 'mp4', "normal", '4:3 hi-res'),
'43': ('640x360', 'webm', "normal", ''),
'44': ('854x480', 'webm', "normal", ''),
'45': ('1280x720', 'webm', "normal", ''),
'46': ('1920x1080', 'webm', "normal", ''),
'82': ('640x360-3D', 'mp4', "normal", ''),
'83': ('640x480-3D', 'mp4', 'normal', ''),
'84': ('1280x720-3D', 'mp4', "normal", ''),
'100': ('640x360-3D', 'webm', "normal", ''),
'102': ('1280x720-3D', 'webm', "normal", ''),
'133': ('426x240', 'm4v', 'video', ''),
'134': ('640x360', 'm4v', 'video', ''),
'135': ('854x480', 'm4v', 'video', ''),
'136': ('1280x720', 'm4v', 'video', ''),
'137': ('1920x1080', 'm4v', 'video', ''),
'138': ('4096x3072', 'm4v', 'video', ''),
'139': ('48k', 'm4a', 'audio', ''),
'140': ('128k', 'm4a', 'audio', ''),
'141': ('256k', 'm4a', 'audio', ''),
'160': ('256x144', 'm4v', 'video', ''),
'167': ('640x480', 'webm', 'video', ''),
'168': ('854x480', 'webm', 'video', ''),
'169': ('1280x720', 'webm', 'video', ''),
'170': ('1920x1080', 'webm', 'video', ''),
'171': ('128k', 'ogg', 'audio', ''),
'172': ('192k', 'ogg', 'audio', ''),
'218': ('854x480', 'webm', 'video', 'VP8'),
'219': ('854x480', 'webm', 'video', 'VP8'),
'242': ('360x240', 'webm', 'video', 'VP9'),
'243': ('480x360', 'webm', 'video', 'VP9'),
'244': ('640x480', 'webm', 'video', 'VP9 low'),
'245': ('640x480', 'webm', 'video', 'VP9 med'),
'246': ('640x480', 'webm', 'video', 'VP9 high'),
'247': ('720x480', 'webm', 'video', 'VP9'),
'248': ('1920x1080', 'webm', 'video', 'VP9'),
'249': ('48k', 'opus', 'audio', 'Opus'),
'250': ('56k', 'opus', 'audio', 'Opus'),
'251': ('128k', 'opus', 'audio', 'Opus'),
'256': ('192k', 'm4a', 'audio', '6-channel'),
'258': ('320k', 'm4a', 'audio', '6-channel'),
'264': ('2560x1440', 'm4v', 'video', ''),
'266': ('3840x2160', 'm4v', 'video', 'AVC'),
'271': ('1920x1280', 'webm', 'video', 'VP9'),
'272': ('3414x1080', 'webm', 'video', 'VP9'),
'278': ('256x144', 'webm', 'video', 'VP9'),
'298': ('1280x720', 'm4v', 'video', '60fps'),
'299': ('1920x1080', 'm4v', 'video', '60fps'),
'302': ('1280x720', 'webm', 'video', 'VP9'),
'303': ('1920x1080', 'webm', 'video', 'VP9'),
}
| import sys
if sys.version_info[:2] >= (3, 0):
# pylint: disable=E0611,F0401,I0011
from urllib.request import build_opener
else:
from urllib2 import build_opener
from . import __version__
urls = {
'gdata': "https://www.googleapis.com/youtube/v3/",
'watchv': "http://www.youtube.com/watch?v=%s",
'playlist': ('http://www.youtube.com/list_ajax?'
'style=json&action_get_list=1&list=%s'),
'thumb': "http://i.ytimg.com/vi/%s/default.jpg",
'bigthumb': "http://i.ytimg.com/vi/%s/mqdefault.jpg",
'bigthumbhd': "http://i.ytimg.com/vi/%s/hqdefault.jpg",
# For internal backend
'vidinfo': ('https://www.youtube.com/get_video_info?video_id=%s&'
'eurl=https://youtube.googleapis.com/v/%s&sts=%s'),
'embed': "https://youtube.com/embed/%s"
}
api_key = "<KEY>"
user_agent = "pafy " + __version__
lifespan = 60 * 60 * 5 # 5 hours
opener = build_opener()
opener.addheaders = [('User-Agent', user_agent)]
cache = {}
def_ydl_opts = {'quiet': True, 'prefer_insecure': False, 'no_warnings': True}
# The following are specific to the internal backend
UEFSM = 'url_encoded_fmt_stream_map'
AF = 'adaptive_fmts'
jsplayer = r';ytplayer\.config\s*=\s*({.*?});'
itags = {
'5': ('320x240', 'flv', "normal", ''),
'17': ('176x144', '3gp', "normal", ''),
'18': ('640x360', 'mp4', "normal", ''),
'22': ('1280x720', 'mp4', "normal", ''),
'34': ('640x360', 'flv', "normal", ''),
'35': ('854x480', 'flv', "normal", ''),
'36': ('320x240', '3gp', "normal", ''),
'37': ('1920x1080', 'mp4', "normal", ''),
'38': ('4096x3072', 'mp4', "normal", '4:3 hi-res'),
'43': ('640x360', 'webm', "normal", ''),
'44': ('854x480', 'webm', "normal", ''),
'45': ('1280x720', 'webm', "normal", ''),
'46': ('1920x1080', 'webm', "normal", ''),
'82': ('640x360-3D', 'mp4', "normal", ''),
'83': ('640x480-3D', 'mp4', 'normal', ''),
'84': ('1280x720-3D', 'mp4', "normal", ''),
'100': ('640x360-3D', 'webm', "normal", ''),
'102': ('1280x720-3D', 'webm', "normal", ''),
'133': ('426x240', 'm4v', 'video', ''),
'134': ('640x360', 'm4v', 'video', ''),
'135': ('854x480', 'm4v', 'video', ''),
'136': ('1280x720', 'm4v', 'video', ''),
'137': ('1920x1080', 'm4v', 'video', ''),
'138': ('4096x3072', 'm4v', 'video', ''),
'139': ('48k', 'm4a', 'audio', ''),
'140': ('128k', 'm4a', 'audio', ''),
'141': ('256k', 'm4a', 'audio', ''),
'160': ('256x144', 'm4v', 'video', ''),
'167': ('640x480', 'webm', 'video', ''),
'168': ('854x480', 'webm', 'video', ''),
'169': ('1280x720', 'webm', 'video', ''),
'170': ('1920x1080', 'webm', 'video', ''),
'171': ('128k', 'ogg', 'audio', ''),
'172': ('192k', 'ogg', 'audio', ''),
'218': ('854x480', 'webm', 'video', 'VP8'),
'219': ('854x480', 'webm', 'video', 'VP8'),
'242': ('360x240', 'webm', 'video', 'VP9'),
'243': ('480x360', 'webm', 'video', 'VP9'),
'244': ('640x480', 'webm', 'video', 'VP9 low'),
'245': ('640x480', 'webm', 'video', 'VP9 med'),
'246': ('640x480', 'webm', 'video', 'VP9 high'),
'247': ('720x480', 'webm', 'video', 'VP9'),
'248': ('1920x1080', 'webm', 'video', 'VP9'),
'249': ('48k', 'opus', 'audio', 'Opus'),
'250': ('56k', 'opus', 'audio', 'Opus'),
'251': ('128k', 'opus', 'audio', 'Opus'),
'256': ('192k', 'm4a', 'audio', '6-channel'),
'258': ('320k', 'm4a', 'audio', '6-channel'),
'264': ('2560x1440', 'm4v', 'video', ''),
'266': ('3840x2160', 'm4v', 'video', 'AVC'),
'271': ('1920x1280', 'webm', 'video', 'VP9'),
'272': ('3414x1080', 'webm', 'video', 'VP9'),
'278': ('256x144', 'webm', 'video', 'VP9'),
'298': ('1280x720', 'm4v', 'video', '60fps'),
'299': ('1920x1080', 'm4v', 'video', '60fps'),
'302': ('1280x720', 'webm', 'video', 'VP9'),
'303': ('1920x1080', 'webm', 'video', 'VP9'),
}
| en | 0.747965 | # pylint: disable=E0611,F0401,I0011 # For internal backend # 5 hours # The following are specific to the internal backend | 2.257034 | 2 |
src/cowrie/telnet/userauth.py | uwacyber/cowrie | 2,316 | 9214 | <gh_stars>1000+
# Copyright (C) 2015, 2016 GoSecure Inc.
"""
Telnet Transport and Authentication for the Honeypot
@author: <NAME> <<EMAIL>>
"""
from __future__ import annotations
import struct
from twisted.conch.telnet import (
ECHO,
LINEMODE,
NAWS,
SGA,
AuthenticatingTelnetProtocol,
ITelnetProtocol,
)
from twisted.python import log
from cowrie.core.config import CowrieConfig
from cowrie.core.credentials import UsernamePasswordIP
class HoneyPotTelnetAuthProtocol(AuthenticatingTelnetProtocol):
"""
TelnetAuthProtocol that takes care of Authentication. Once authenticated this
protocol is replaced with HoneyPotTelnetSession.
"""
loginPrompt = b"login: "
passwordPrompt = b"Password: "
windowSize = [40, 80]
def connectionMade(self):
# self.transport.negotiationMap[NAWS] = self.telnet_NAWS
# Initial option negotation. Want something at least for Mirai
# for opt in (NAWS,):
# self.transport.doChain(opt).addErrback(log.err)
# I need to doubly escape here since my underlying
# CowrieTelnetTransport hack would remove it and leave just \n
self.transport.write(self.factory.banner.replace(b"\n", b"\r\r\n"))
self.transport.write(self.loginPrompt)
def connectionLost(self, reason):
"""
Fires on pre-authentication disconnects
"""
AuthenticatingTelnetProtocol.connectionLost(self, reason)
def telnet_User(self, line):
"""
Overridden to conditionally kill 'WILL ECHO' which confuses clients
that don't implement a proper Telnet protocol (most malware)
"""
self.username = line # .decode()
# only send ECHO option if we are chatting with a real Telnet client
self.transport.willChain(ECHO)
# FIXME: this should be configurable or provided via filesystem
self.transport.write(self.passwordPrompt)
return "Password"
def telnet_Password(self, line):
username, password = self.username, line # .decode()
del self.username
def login(ignored):
self.src_ip = self.transport.getPeer().host
creds = UsernamePasswordIP(username, password, self.src_ip)
d = self.portal.login(creds, self.src_ip, ITelnetProtocol)
d.addCallback(self._cbLogin)
d.addErrback(self._ebLogin)
# are we dealing with a real Telnet client?
if self.transport.options:
# stop ECHO
# even if ECHO negotiation fails we still want to attempt a login
# this allows us to support dumb clients which is common in malware
# thus the addBoth: on success and on exception (AlreadyNegotiating)
self.transport.wontChain(ECHO).addBoth(login)
else:
# process login
login("")
return "Discard"
def telnet_Command(self, command):
self.transport.protocol.dataReceived(command + b"\r")
return "Command"
def _cbLogin(self, ial):
"""
Fired on a successful login
"""
interface, protocol, logout = ial
protocol.windowSize = self.windowSize
self.protocol = protocol
self.logout = logout
self.state = "Command"
self.transport.write(b"\n")
# Remove the short timeout of the login prompt.
self.transport.setTimeout(
CowrieConfig.getint("honeypot", "interactive_timeout", fallback=300)
)
# replace myself with avatar protocol
protocol.makeConnection(self.transport)
self.transport.protocol = protocol
def _ebLogin(self, failure):
# TODO: provide a way to have user configurable strings for wrong password
self.transport.wontChain(ECHO)
self.transport.write(b"\nLogin incorrect\n")
self.transport.write(self.loginPrompt)
self.state = "User"
def telnet_NAWS(self, data):
"""
From TelnetBootstrapProtocol in twisted/conch/telnet.py
"""
if len(data) == 4:
width, height = struct.unpack("!HH", b"".join(data))
self.windowSize = [height, width]
else:
log.msg("Wrong number of NAWS bytes")
def enableLocal(self, opt):
if opt == ECHO:
return True
# TODO: check if twisted now supports SGA (see git commit c58056b0)
elif opt == SGA:
return False
else:
return False
def enableRemote(self, opt):
# TODO: check if twisted now supports LINEMODE (see git commit c58056b0)
if opt == LINEMODE:
return False
elif opt == NAWS:
return True
elif opt == SGA:
return True
else:
return False
| # Copyright (C) 2015, 2016 GoSecure Inc.
"""
Telnet Transport and Authentication for the Honeypot
@author: <NAME> <<EMAIL>>
"""
from __future__ import annotations
import struct
from twisted.conch.telnet import (
ECHO,
LINEMODE,
NAWS,
SGA,
AuthenticatingTelnetProtocol,
ITelnetProtocol,
)
from twisted.python import log
from cowrie.core.config import CowrieConfig
from cowrie.core.credentials import UsernamePasswordIP
class HoneyPotTelnetAuthProtocol(AuthenticatingTelnetProtocol):
"""
TelnetAuthProtocol that takes care of Authentication. Once authenticated this
protocol is replaced with HoneyPotTelnetSession.
"""
loginPrompt = b"login: "
passwordPrompt = b"Password: "
windowSize = [40, 80]
def connectionMade(self):
# self.transport.negotiationMap[NAWS] = self.telnet_NAWS
# Initial option negotation. Want something at least for Mirai
# for opt in (NAWS,):
# self.transport.doChain(opt).addErrback(log.err)
# I need to doubly escape here since my underlying
# CowrieTelnetTransport hack would remove it and leave just \n
self.transport.write(self.factory.banner.replace(b"\n", b"\r\r\n"))
self.transport.write(self.loginPrompt)
def connectionLost(self, reason):
"""
Fires on pre-authentication disconnects
"""
AuthenticatingTelnetProtocol.connectionLost(self, reason)
def telnet_User(self, line):
"""
Overridden to conditionally kill 'WILL ECHO' which confuses clients
that don't implement a proper Telnet protocol (most malware)
"""
self.username = line # .decode()
# only send ECHO option if we are chatting with a real Telnet client
self.transport.willChain(ECHO)
# FIXME: this should be configurable or provided via filesystem
self.transport.write(self.passwordPrompt)
return "Password"
def telnet_Password(self, line):
username, password = self.username, line # .decode()
del self.username
def login(ignored):
self.src_ip = self.transport.getPeer().host
creds = UsernamePasswordIP(username, password, self.src_ip)
d = self.portal.login(creds, self.src_ip, ITelnetProtocol)
d.addCallback(self._cbLogin)
d.addErrback(self._ebLogin)
# are we dealing with a real Telnet client?
if self.transport.options:
# stop ECHO
# even if ECHO negotiation fails we still want to attempt a login
# this allows us to support dumb clients which is common in malware
# thus the addBoth: on success and on exception (AlreadyNegotiating)
self.transport.wontChain(ECHO).addBoth(login)
else:
# process login
login("")
return "Discard"
def telnet_Command(self, command):
self.transport.protocol.dataReceived(command + b"\r")
return "Command"
def _cbLogin(self, ial):
"""
Fired on a successful login
"""
interface, protocol, logout = ial
protocol.windowSize = self.windowSize
self.protocol = protocol
self.logout = logout
self.state = "Command"
self.transport.write(b"\n")
# Remove the short timeout of the login prompt.
self.transport.setTimeout(
CowrieConfig.getint("honeypot", "interactive_timeout", fallback=300)
)
# replace myself with avatar protocol
protocol.makeConnection(self.transport)
self.transport.protocol = protocol
def _ebLogin(self, failure):
# TODO: provide a way to have user configurable strings for wrong password
self.transport.wontChain(ECHO)
self.transport.write(b"\nLogin incorrect\n")
self.transport.write(self.loginPrompt)
self.state = "User"
def telnet_NAWS(self, data):
"""
From TelnetBootstrapProtocol in twisted/conch/telnet.py
"""
if len(data) == 4:
width, height = struct.unpack("!HH", b"".join(data))
self.windowSize = [height, width]
else:
log.msg("Wrong number of NAWS bytes")
def enableLocal(self, opt):
if opt == ECHO:
return True
# TODO: check if twisted now supports SGA (see git commit c58056b0)
elif opt == SGA:
return False
else:
return False
def enableRemote(self, opt):
# TODO: check if twisted now supports LINEMODE (see git commit c58056b0)
if opt == LINEMODE:
return False
elif opt == NAWS:
return True
elif opt == SGA:
return True
else:
return False | en | 0.76378 | # Copyright (C) 2015, 2016 GoSecure Inc. Telnet Transport and Authentication for the Honeypot @author: <NAME> <<EMAIL>> TelnetAuthProtocol that takes care of Authentication. Once authenticated this protocol is replaced with HoneyPotTelnetSession. # self.transport.negotiationMap[NAWS] = self.telnet_NAWS # Initial option negotation. Want something at least for Mirai # for opt in (NAWS,): # self.transport.doChain(opt).addErrback(log.err) # I need to doubly escape here since my underlying # CowrieTelnetTransport hack would remove it and leave just \n Fires on pre-authentication disconnects Overridden to conditionally kill 'WILL ECHO' which confuses clients that don't implement a proper Telnet protocol (most malware) # .decode() # only send ECHO option if we are chatting with a real Telnet client # FIXME: this should be configurable or provided via filesystem # .decode() # are we dealing with a real Telnet client? # stop ECHO # even if ECHO negotiation fails we still want to attempt a login # this allows us to support dumb clients which is common in malware # thus the addBoth: on success and on exception (AlreadyNegotiating) # process login Fired on a successful login # Remove the short timeout of the login prompt. # replace myself with avatar protocol # TODO: provide a way to have user configurable strings for wrong password From TelnetBootstrapProtocol in twisted/conch/telnet.py # TODO: check if twisted now supports SGA (see git commit c58056b0) # TODO: check if twisted now supports LINEMODE (see git commit c58056b0) | 1.775027 | 2 |
authcheck/app/model/exception.py | flyr4nk/secscan-authcheck | 572 | 9215 | class WebException(Exception):
pass
class ParserException(Exception):
"""
解析异常
"""
pass
class ApiException(Exception):
"""
api异常
"""
pass
class WsException(Exception):
"""
轮询异常
"""
pass
class SsoException(Exception):
"""
sso异常
"""
pass
class LibException(Exception):
"""
lib异常
"""
pass
class AccountException(Exception):
"""
账号异常(账号失效)
"""
pass
class FlowException(Exception):
"""
认证流量异常
"""
pass
| class WebException(Exception):
pass
class ParserException(Exception):
"""
解析异常
"""
pass
class ApiException(Exception):
"""
api异常
"""
pass
class WsException(Exception):
"""
轮询异常
"""
pass
class SsoException(Exception):
"""
sso异常
"""
pass
class LibException(Exception):
"""
lib异常
"""
pass
class AccountException(Exception):
"""
账号异常(账号失效)
"""
pass
class FlowException(Exception):
"""
认证流量异常
"""
pass
| zh | 0.641399 | 解析异常 api异常 轮询异常 sso异常 lib异常 账号异常(账号失效) 认证流量异常 | 1.955626 | 2 |
p_io.py | JeremyBuchanan/psf-photometry-pipeline | 0 | 9216 | <filename>p_io.py<gh_stars>0
import astropy.io.fits as fits
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import obj_data as od
import saphires as saph
from astropy.time import Time
from astropy.visualization import ZScaleInterval, SqrtStretch, ImageNormalize
from matplotlib.backends.backend_pdf import PdfPages
ra = od.ra
dec = od.dec
pmra = od.pmra
pmdec = od.pmdec
plx = od.plx
epoch = od.epoch
matplotlib.rcParams.update({'font.size': 12})
def write_fits(fn, data, im_headers, wcs_header):
'''
Writes a new fits file including the image data and
and updated header for the new image
Parameters
----------
fn: string
The desired file name of the new fits file
data: array-like
Contains all the image data
Returns
-------
avg_airmass: float
the amount of atmosphere obscuring the target, found in image header. Here
the airmass for all images is averaged
bjd: float
Barycentric Julian Date, found in the image header
header: Header
'''
for keys in wcs_header:
if keys not in ['HISTORY', 'COMMENT']:
im_headers[0][keys] = wcs_header[keys]
airmass = []
for i in im_headers:
airmass.append(i['AIRMASS'])
avg_airmass = np.mean(airmass)
im_headers[0]['AIRMASS'] = avg_airmass
jd_middle = np.zeros(len(im_headers))
for i in range(len(im_headers)):
jd_middle[i] = Time(im_headers[i]['DATE-OBS'], format='isot').jd
exptime = im_headers[i]['EXPTIME']
jd_middle[i] = jd_middle[i] + (exptime/2.0)/3600.0/24.0
isot_date_obs = Time(np.mean(jd_middle), format='jd').isot
tele = im_headers[0]['SITEID']
brv,bjd,bvcorr = saph.utils.brvc(isot_date_obs,0.0,tele,ra=ra,dec=dec,epoch=epoch, pmra=pmra, pmdec=pmdec, px=plx)
im_headers[0]['BJD'] = bjd[0]
header = im_headers[0]
hdu_p = fits.PrimaryHDU(data=data, header=header)
hdu = fits.HDUList([hdu_p])
hdu.writeto(fn)
return avg_airmass, bjd, header
def write_pdf(name, images, model=None, final_stars=None, residual_stars=None, fluxes=None, plot_res=None):
pp = PdfPages(name)
for i in range(len(images)):
fig, ax = plt.subplots(1, figsize=(10, 10))
norm = ImageNormalize(images[i], interval=ZScaleInterval(), stretch=SqrtStretch())
im = ax.imshow(images[i], norm=norm)
plt.colorbar(im)
plt.tight_layout()
pp.savefig()
plt.close()
if model is not None:
fig, ax = plt.subplots(1, figsize=(10, 10))
psf = ax.imshow(model)
plt.colorbar(psf)
ax.set_title('PSF Model')
plt.tight_layout()
pp.savefig()
plt.close()
if final_stars is not None:
if plot_res == 'y':
nrows = len(final_stars)
ncols = 2
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(10, 800), squeeze=True)
ax = ax.ravel()
index = 0
for i in range(0, nrows*ncols, 2):
norm = simple_norm(final_stars[index],'log')
norm2 = simple_norm(residual_stars[index], 'linear')
im = ax[i].imshow(final_stars[index], norm=norm, origin='lower', cmap='viridis', interpolation='none')
fig.colorbar(im, ax = ax[i])
ax[i].set_title(np.str(fluxes[index]))
im_r = ax[i+1].imshow(residual_stars[index], norm=norm2, origin='lower', cmap='viridis', interpolation='none')
fig.colorbar(im_r, ax = ax[i+1])
index = index + 1
plt.tight_layout()
pp.savefig()
plt.close()
pp.close()
def write_csv(name, im_name, bjd, filt, airmass, results, sky):
f = open(name, 'w')
f.write('NAME, ID, BJD, FLUX, FLUX ERROR, MAG, MAG ERROR, FILTER, X POSITION, Y POSITION, AIRMASS, RA, DEC\n')
for i in range(sky.size):
if results['flux_fit'][i] > 0:
star_id = results['id'][i]
flux = results['flux_fit'][i]
fluxerr = results['flux_unc'][i]
mag = -2.5*np.log10(flux)
magerr = (1.08574*fluxerr)/(flux)
x_pos = results['x_fit'][i]
y_pos = results['y_fit'][i]
ra = sky[i].ra.degree
dec = sky[i].dec.degree
f.write(im_name+','+np.str(i)+','+np.str(bjd)+','+np.str(flux)+','+np.str(fluxerr)+','+np.str(mag)+','+np.str(magerr)
+','+filt+','+np.str(x_pos)+','+np.str(y_pos)+','+str(airmass)+','+np.str(ra)+','+np.str(dec)+'\n')
f.close()
def write_txt(name, sources, stars_tbl, fwhm, results=None, t0=None,t1=None,t2=None,t3=None,t4=None,t5=None):
'''
Short text file with diagnostic info about each image set, specifically
for a successful run of the image set
Parameters
----------
name: string
name of the saved file
sources: Table
tabulated info about all the stars found on the image
stars_tbl: Table
tabulated info about all the stars used to form a psf
results: Table
tabulated info about all the stars found with the photometry routine
'''
f = open(name, 'w')
f.write('Number of stars in sources: '+np.str(len(sources))+'\nNumber of stars in stars_tbl: '+np.str(len(stars_tbl))
+'\nNumbers of stars in results: '+np.str(len(results))+'\nMin, Max, Median peaks in sources: '
+np.str(np.min(sources['peak']))+', '+np.str(np.max(sources['peak']))+', '+np.str(np.median(sources['peak']))
+'\nMin, Max, Median fluxes in results: '+np.str(np.min(results['flux_fit']))+', '+np.str(np.max(results['flux_fit']))+', '
+np.str(np.median(results['flux_fit']))+'\nFWHM: '+np.str(fwhm)+'\n')
if t5:
t_1 = t1-t0
t_2 = t2-t1
t_3 = t3-t2
t_4 = t4-t3
t_5 = t5-t4
t_f = t5-t0
f.write('Time to combine images: '+np.str(t_1)+'\nTime to find stars: '+np.str(t_2)+'\nTime to build psf: '
+np.str(t_3)+'\nTime to run photometry: '+np.str(t_4)+'\nTime to get wcs: '+np.str(t_5)+'\nTotal time: '
+np.str(t_f)+'\n')
f.close()
| <filename>p_io.py<gh_stars>0
import astropy.io.fits as fits
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import obj_data as od
import saphires as saph
from astropy.time import Time
from astropy.visualization import ZScaleInterval, SqrtStretch, ImageNormalize
from matplotlib.backends.backend_pdf import PdfPages
ra = od.ra
dec = od.dec
pmra = od.pmra
pmdec = od.pmdec
plx = od.plx
epoch = od.epoch
matplotlib.rcParams.update({'font.size': 12})
def write_fits(fn, data, im_headers, wcs_header):
'''
Writes a new fits file including the image data and
and updated header for the new image
Parameters
----------
fn: string
The desired file name of the new fits file
data: array-like
Contains all the image data
Returns
-------
avg_airmass: float
the amount of atmosphere obscuring the target, found in image header. Here
the airmass for all images is averaged
bjd: float
Barycentric Julian Date, found in the image header
header: Header
'''
for keys in wcs_header:
if keys not in ['HISTORY', 'COMMENT']:
im_headers[0][keys] = wcs_header[keys]
airmass = []
for i in im_headers:
airmass.append(i['AIRMASS'])
avg_airmass = np.mean(airmass)
im_headers[0]['AIRMASS'] = avg_airmass
jd_middle = np.zeros(len(im_headers))
for i in range(len(im_headers)):
jd_middle[i] = Time(im_headers[i]['DATE-OBS'], format='isot').jd
exptime = im_headers[i]['EXPTIME']
jd_middle[i] = jd_middle[i] + (exptime/2.0)/3600.0/24.0
isot_date_obs = Time(np.mean(jd_middle), format='jd').isot
tele = im_headers[0]['SITEID']
brv,bjd,bvcorr = saph.utils.brvc(isot_date_obs,0.0,tele,ra=ra,dec=dec,epoch=epoch, pmra=pmra, pmdec=pmdec, px=plx)
im_headers[0]['BJD'] = bjd[0]
header = im_headers[0]
hdu_p = fits.PrimaryHDU(data=data, header=header)
hdu = fits.HDUList([hdu_p])
hdu.writeto(fn)
return avg_airmass, bjd, header
def write_pdf(name, images, model=None, final_stars=None, residual_stars=None, fluxes=None, plot_res=None):
pp = PdfPages(name)
for i in range(len(images)):
fig, ax = plt.subplots(1, figsize=(10, 10))
norm = ImageNormalize(images[i], interval=ZScaleInterval(), stretch=SqrtStretch())
im = ax.imshow(images[i], norm=norm)
plt.colorbar(im)
plt.tight_layout()
pp.savefig()
plt.close()
if model is not None:
fig, ax = plt.subplots(1, figsize=(10, 10))
psf = ax.imshow(model)
plt.colorbar(psf)
ax.set_title('PSF Model')
plt.tight_layout()
pp.savefig()
plt.close()
if final_stars is not None:
if plot_res == 'y':
nrows = len(final_stars)
ncols = 2
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(10, 800), squeeze=True)
ax = ax.ravel()
index = 0
for i in range(0, nrows*ncols, 2):
norm = simple_norm(final_stars[index],'log')
norm2 = simple_norm(residual_stars[index], 'linear')
im = ax[i].imshow(final_stars[index], norm=norm, origin='lower', cmap='viridis', interpolation='none')
fig.colorbar(im, ax = ax[i])
ax[i].set_title(np.str(fluxes[index]))
im_r = ax[i+1].imshow(residual_stars[index], norm=norm2, origin='lower', cmap='viridis', interpolation='none')
fig.colorbar(im_r, ax = ax[i+1])
index = index + 1
plt.tight_layout()
pp.savefig()
plt.close()
pp.close()
def write_csv(name, im_name, bjd, filt, airmass, results, sky):
f = open(name, 'w')
f.write('NAME, ID, BJD, FLUX, FLUX ERROR, MAG, MAG ERROR, FILTER, X POSITION, Y POSITION, AIRMASS, RA, DEC\n')
for i in range(sky.size):
if results['flux_fit'][i] > 0:
star_id = results['id'][i]
flux = results['flux_fit'][i]
fluxerr = results['flux_unc'][i]
mag = -2.5*np.log10(flux)
magerr = (1.08574*fluxerr)/(flux)
x_pos = results['x_fit'][i]
y_pos = results['y_fit'][i]
ra = sky[i].ra.degree
dec = sky[i].dec.degree
f.write(im_name+','+np.str(i)+','+np.str(bjd)+','+np.str(flux)+','+np.str(fluxerr)+','+np.str(mag)+','+np.str(magerr)
+','+filt+','+np.str(x_pos)+','+np.str(y_pos)+','+str(airmass)+','+np.str(ra)+','+np.str(dec)+'\n')
f.close()
def write_txt(name, sources, stars_tbl, fwhm, results=None, t0=None,t1=None,t2=None,t3=None,t4=None,t5=None):
'''
Short text file with diagnostic info about each image set, specifically
for a successful run of the image set
Parameters
----------
name: string
name of the saved file
sources: Table
tabulated info about all the stars found on the image
stars_tbl: Table
tabulated info about all the stars used to form a psf
results: Table
tabulated info about all the stars found with the photometry routine
'''
f = open(name, 'w')
f.write('Number of stars in sources: '+np.str(len(sources))+'\nNumber of stars in stars_tbl: '+np.str(len(stars_tbl))
+'\nNumbers of stars in results: '+np.str(len(results))+'\nMin, Max, Median peaks in sources: '
+np.str(np.min(sources['peak']))+', '+np.str(np.max(sources['peak']))+', '+np.str(np.median(sources['peak']))
+'\nMin, Max, Median fluxes in results: '+np.str(np.min(results['flux_fit']))+', '+np.str(np.max(results['flux_fit']))+', '
+np.str(np.median(results['flux_fit']))+'\nFWHM: '+np.str(fwhm)+'\n')
if t5:
t_1 = t1-t0
t_2 = t2-t1
t_3 = t3-t2
t_4 = t4-t3
t_5 = t5-t4
t_f = t5-t0
f.write('Time to combine images: '+np.str(t_1)+'\nTime to find stars: '+np.str(t_2)+'\nTime to build psf: '
+np.str(t_3)+'\nTime to run photometry: '+np.str(t_4)+'\nTime to get wcs: '+np.str(t_5)+'\nTotal time: '
+np.str(t_f)+'\n')
f.close()
| en | 0.824411 | Writes a new fits file including the image data and and updated header for the new image Parameters ---------- fn: string The desired file name of the new fits file data: array-like Contains all the image data Returns ------- avg_airmass: float the amount of atmosphere obscuring the target, found in image header. Here the airmass for all images is averaged bjd: float Barycentric Julian Date, found in the image header header: Header Short text file with diagnostic info about each image set, specifically for a successful run of the image set Parameters ---------- name: string name of the saved file sources: Table tabulated info about all the stars found on the image stars_tbl: Table tabulated info about all the stars used to form a psf results: Table tabulated info about all the stars found with the photometry routine | 2.455753 | 2 |
sympy/polys/tests/test_monomialtools.py | ichuang/sympy | 1 | 9217 | <gh_stars>1-10
"""Tests for tools and arithmetics for monomials of distributed polynomials. """
from sympy.polys.monomialtools import (
monomials, monomial_count,
monomial_key, lex, grlex, grevlex,
monomial_mul, monomial_div,
monomial_gcd, monomial_lcm,
monomial_max, monomial_min,
monomial_divides,
Monomial,
InverseOrder, ProductOrder
)
from sympy.polys.polyerrors import ExactQuotientFailed
from sympy.abc import a, b, c, x, y, z
from sympy.utilities.pytest import raises
def test_monomials():
assert sorted(monomials([], 0)) == [1]
assert sorted(monomials([], 1)) == [1]
assert sorted(monomials([], 2)) == [1]
assert sorted(monomials([], 3)) == [1]
assert sorted(monomials([x], 0)) == [1]
assert sorted(monomials([x], 1)) == [1, x]
assert sorted(monomials([x], 2)) == [1, x, x**2]
assert sorted(monomials([x], 3)) == [1, x, x**2, x**3]
assert sorted(monomials([x, y], 0)) == [1]
assert sorted(monomials([x, y], 1)) == [1, x, y]
assert sorted(monomials([x, y], 2)) == [1, x, y, x**2, y**2, x*y]
assert sorted(monomials([x, y], 3)) == [1, x, y, x**2, x**3, y**2, y**3, x*y, x*y**2, y*x**2]
def test_monomial_count():
assert monomial_count(2, 2) == 6
assert monomial_count(2, 3) == 10
def test_lex_order():
assert lex((1,2,3)) == (1,2,3)
assert str(lex) == 'lex'
assert lex((1,2,3)) == lex((1,2,3))
assert lex((2,2,3)) > lex((1,2,3))
assert lex((1,3,3)) > lex((1,2,3))
assert lex((1,2,4)) > lex((1,2,3))
assert lex((0,2,3)) < lex((1,2,3))
assert lex((1,1,3)) < lex((1,2,3))
assert lex((1,2,2)) < lex((1,2,3))
def test_grlex_order():
assert grlex((1,2,3)) == (6, (1,2,3))
assert str(grlex) == 'grlex'
assert grlex((1,2,3)) == grlex((1,2,3))
assert grlex((2,2,3)) > grlex((1,2,3))
assert grlex((1,3,3)) > grlex((1,2,3))
assert grlex((1,2,4)) > grlex((1,2,3))
assert grlex((0,2,3)) < grlex((1,2,3))
assert grlex((1,1,3)) < grlex((1,2,3))
assert grlex((1,2,2)) < grlex((1,2,3))
assert grlex((2,2,3)) > grlex((1,2,4))
assert grlex((1,3,3)) > grlex((1,2,4))
assert grlex((0,2,3)) < grlex((1,2,2))
assert grlex((1,1,3)) < grlex((1,2,2))
def test_grevlex_order():
assert grevlex((1,2,3)) == (6, (-3,-2,-1))
assert str(grevlex) == 'grevlex'
assert grevlex((1,2,3)) == grevlex((1,2,3))
assert grevlex((2,2,3)) > grevlex((1,2,3))
assert grevlex((1,3,3)) > grevlex((1,2,3))
assert grevlex((1,2,4)) > grevlex((1,2,3))
assert grevlex((0,2,3)) < grevlex((1,2,3))
assert grevlex((1,1,3)) < grevlex((1,2,3))
assert grevlex((1,2,2)) < grevlex((1,2,3))
assert grevlex((2,2,3)) > grevlex((1,2,4))
assert grevlex((1,3,3)) > grevlex((1,2,4))
assert grevlex((0,2,3)) < grevlex((1,2,2))
assert grevlex((1,1,3)) < grevlex((1,2,2))
assert grevlex((0,1,1)) > grevlex((0,0,2))
assert grevlex((0,3,1)) < grevlex((2,2,1))
def test_InverseOrder():
ilex = InverseOrder(lex)
igrlex = InverseOrder(grlex)
assert ilex((1,2,3)) > ilex((2, 0, 3))
assert igrlex((1, 2, 3)) < igrlex((0, 2, 3))
assert str(ilex) == "ilex"
assert str(igrlex) == "igrlex"
def test_ProductOrder():
P = ProductOrder((grlex, lambda m: m[:2]), (grlex, lambda m: m[2:]))
assert P((1, 3, 3, 4, 5)) > P((2, 1, 5, 5, 5))
assert str(P) == "ProductOrder(grlex, grlex)"
def test_monomial_key():
assert monomial_key() == lex
assert monomial_key('lex') == lex
assert monomial_key('grlex') == grlex
assert monomial_key('grevlex') == grevlex
raises(ValueError, "monomial_key('foo')")
raises(ValueError, "monomial_key(1)")
def test_monomial_mul():
assert monomial_mul((3,4,1), (1,2,0)) == (4,6,1)
def test_monomial_div():
assert monomial_div((3,4,1), (1,2,0)) == (2,2,1)
def test_monomial_gcd():
assert monomial_gcd((3,4,1), (1,2,0)) == (1,2,0)
def test_monomial_lcm():
assert monomial_lcm((3,4,1), (1,2,0)) == (3,4,1)
def test_monomial_max():
assert monomial_max((3,4,5), (0,5,1), (6,3,9)) == (6,5,9)
def test_monomial_min():
assert monomial_min((3,4,5), (0,5,1), (6,3,9)) == (0,3,1)
def test_monomial_divides():
assert monomial_divides((1,2,3), (4,5,6)) is True
assert monomial_divides((1,2,3), (0,5,6)) is False
def test_Monomial():
m = Monomial((3, 4, 1), (x, y, z))
n = Monomial((1, 2, 0), (x, y, z))
assert m.as_expr() == x**3*y**4*z
assert n.as_expr() == x**1*y**2
assert m.as_expr(a, b, c) == a**3*b**4*c
assert n.as_expr(a, b, c) == a**1*b**2
assert m.exponents == (3, 4, 1)
assert m.gens == (x, y, z)
assert n.exponents == (1, 2, 0)
assert n.gens == (x, y, z)
assert m == (3, 4, 1)
assert n != (3, 4, 1)
assert m != (1, 2, 0)
assert n == (1, 2, 0)
assert m[0] == m[-3] == 3
assert m[1] == m[-2] == 4
assert m[2] == m[-1] == 1
assert n[0] == n[-3] == 1
assert n[1] == n[-2] == 2
assert n[2] == n[-1] == 0
assert m[:2] == (3, 4)
assert n[:2] == (1, 2)
assert m*n == Monomial((4, 6, 1))
assert m/n == Monomial((2, 2, 1))
assert m*(1, 2, 0) == Monomial((4, 6, 1))
assert m/(1, 2, 0) == Monomial((2, 2, 1))
assert m.gcd(n) == Monomial((1, 2, 0))
assert m.lcm(n) == Monomial((3, 4, 1))
assert m.gcd((1, 2, 0)) == Monomial((1, 2, 0))
assert m.lcm((1, 2, 0)) == Monomial((3, 4, 1))
assert m**0 == Monomial((0, 0, 0))
assert m**1 == m
assert m**2 == Monomial((6, 8, 2))
assert m**3 == Monomial((9,12, 3))
raises(ExactQuotientFailed, "m/Monomial((5, 2, 0))")
| """Tests for tools and arithmetics for monomials of distributed polynomials. """
from sympy.polys.monomialtools import (
monomials, monomial_count,
monomial_key, lex, grlex, grevlex,
monomial_mul, monomial_div,
monomial_gcd, monomial_lcm,
monomial_max, monomial_min,
monomial_divides,
Monomial,
InverseOrder, ProductOrder
)
from sympy.polys.polyerrors import ExactQuotientFailed
from sympy.abc import a, b, c, x, y, z
from sympy.utilities.pytest import raises
def test_monomials():
assert sorted(monomials([], 0)) == [1]
assert sorted(monomials([], 1)) == [1]
assert sorted(monomials([], 2)) == [1]
assert sorted(monomials([], 3)) == [1]
assert sorted(monomials([x], 0)) == [1]
assert sorted(monomials([x], 1)) == [1, x]
assert sorted(monomials([x], 2)) == [1, x, x**2]
assert sorted(monomials([x], 3)) == [1, x, x**2, x**3]
assert sorted(monomials([x, y], 0)) == [1]
assert sorted(monomials([x, y], 1)) == [1, x, y]
assert sorted(monomials([x, y], 2)) == [1, x, y, x**2, y**2, x*y]
assert sorted(monomials([x, y], 3)) == [1, x, y, x**2, x**3, y**2, y**3, x*y, x*y**2, y*x**2]
def test_monomial_count():
assert monomial_count(2, 2) == 6
assert monomial_count(2, 3) == 10
def test_lex_order():
assert lex((1,2,3)) == (1,2,3)
assert str(lex) == 'lex'
assert lex((1,2,3)) == lex((1,2,3))
assert lex((2,2,3)) > lex((1,2,3))
assert lex((1,3,3)) > lex((1,2,3))
assert lex((1,2,4)) > lex((1,2,3))
assert lex((0,2,3)) < lex((1,2,3))
assert lex((1,1,3)) < lex((1,2,3))
assert lex((1,2,2)) < lex((1,2,3))
def test_grlex_order():
assert grlex((1,2,3)) == (6, (1,2,3))
assert str(grlex) == 'grlex'
assert grlex((1,2,3)) == grlex((1,2,3))
assert grlex((2,2,3)) > grlex((1,2,3))
assert grlex((1,3,3)) > grlex((1,2,3))
assert grlex((1,2,4)) > grlex((1,2,3))
assert grlex((0,2,3)) < grlex((1,2,3))
assert grlex((1,1,3)) < grlex((1,2,3))
assert grlex((1,2,2)) < grlex((1,2,3))
assert grlex((2,2,3)) > grlex((1,2,4))
assert grlex((1,3,3)) > grlex((1,2,4))
assert grlex((0,2,3)) < grlex((1,2,2))
assert grlex((1,1,3)) < grlex((1,2,2))
def test_grevlex_order():
assert grevlex((1,2,3)) == (6, (-3,-2,-1))
assert str(grevlex) == 'grevlex'
assert grevlex((1,2,3)) == grevlex((1,2,3))
assert grevlex((2,2,3)) > grevlex((1,2,3))
assert grevlex((1,3,3)) > grevlex((1,2,3))
assert grevlex((1,2,4)) > grevlex((1,2,3))
assert grevlex((0,2,3)) < grevlex((1,2,3))
assert grevlex((1,1,3)) < grevlex((1,2,3))
assert grevlex((1,2,2)) < grevlex((1,2,3))
assert grevlex((2,2,3)) > grevlex((1,2,4))
assert grevlex((1,3,3)) > grevlex((1,2,4))
assert grevlex((0,2,3)) < grevlex((1,2,2))
assert grevlex((1,1,3)) < grevlex((1,2,2))
assert grevlex((0,1,1)) > grevlex((0,0,2))
assert grevlex((0,3,1)) < grevlex((2,2,1))
def test_InverseOrder():
ilex = InverseOrder(lex)
igrlex = InverseOrder(grlex)
assert ilex((1,2,3)) > ilex((2, 0, 3))
assert igrlex((1, 2, 3)) < igrlex((0, 2, 3))
assert str(ilex) == "ilex"
assert str(igrlex) == "igrlex"
def test_ProductOrder():
P = ProductOrder((grlex, lambda m: m[:2]), (grlex, lambda m: m[2:]))
assert P((1, 3, 3, 4, 5)) > P((2, 1, 5, 5, 5))
assert str(P) == "ProductOrder(grlex, grlex)"
def test_monomial_key():
assert monomial_key() == lex
assert monomial_key('lex') == lex
assert monomial_key('grlex') == grlex
assert monomial_key('grevlex') == grevlex
raises(ValueError, "monomial_key('foo')")
raises(ValueError, "monomial_key(1)")
def test_monomial_mul():
assert monomial_mul((3,4,1), (1,2,0)) == (4,6,1)
def test_monomial_div():
assert monomial_div((3,4,1), (1,2,0)) == (2,2,1)
def test_monomial_gcd():
assert monomial_gcd((3,4,1), (1,2,0)) == (1,2,0)
def test_monomial_lcm():
assert monomial_lcm((3,4,1), (1,2,0)) == (3,4,1)
def test_monomial_max():
assert monomial_max((3,4,5), (0,5,1), (6,3,9)) == (6,5,9)
def test_monomial_min():
assert monomial_min((3,4,5), (0,5,1), (6,3,9)) == (0,3,1)
def test_monomial_divides():
assert monomial_divides((1,2,3), (4,5,6)) is True
assert monomial_divides((1,2,3), (0,5,6)) is False
def test_Monomial():
m = Monomial((3, 4, 1), (x, y, z))
n = Monomial((1, 2, 0), (x, y, z))
assert m.as_expr() == x**3*y**4*z
assert n.as_expr() == x**1*y**2
assert m.as_expr(a, b, c) == a**3*b**4*c
assert n.as_expr(a, b, c) == a**1*b**2
assert m.exponents == (3, 4, 1)
assert m.gens == (x, y, z)
assert n.exponents == (1, 2, 0)
assert n.gens == (x, y, z)
assert m == (3, 4, 1)
assert n != (3, 4, 1)
assert m != (1, 2, 0)
assert n == (1, 2, 0)
assert m[0] == m[-3] == 3
assert m[1] == m[-2] == 4
assert m[2] == m[-1] == 1
assert n[0] == n[-3] == 1
assert n[1] == n[-2] == 2
assert n[2] == n[-1] == 0
assert m[:2] == (3, 4)
assert n[:2] == (1, 2)
assert m*n == Monomial((4, 6, 1))
assert m/n == Monomial((2, 2, 1))
assert m*(1, 2, 0) == Monomial((4, 6, 1))
assert m/(1, 2, 0) == Monomial((2, 2, 1))
assert m.gcd(n) == Monomial((1, 2, 0))
assert m.lcm(n) == Monomial((3, 4, 1))
assert m.gcd((1, 2, 0)) == Monomial((1, 2, 0))
assert m.lcm((1, 2, 0)) == Monomial((3, 4, 1))
assert m**0 == Monomial((0, 0, 0))
assert m**1 == m
assert m**2 == Monomial((6, 8, 2))
assert m**3 == Monomial((9,12, 3))
raises(ExactQuotientFailed, "m/Monomial((5, 2, 0))") | en | 0.920156 | Tests for tools and arithmetics for monomials of distributed polynomials. | 2.569867 | 3 |
Solutions/TenableIO/Data Connectors/azure_sentinel.py | johnbilliris/Azure-Sentinel | 2,227 | 9218 | <reponame>johnbilliris/Azure-Sentinel
import re
import base64
import hmac
import hashlib
import logging
import requests
from datetime import datetime
class AzureSentinel:
def __init__(self, workspace_id, workspace_key, log_type, log_analytics_url=''):
self._workspace_id = workspace_id
self._workspace_key = workspace_key
self._log_type = log_type
if ((log_analytics_url in (None, '') or str(log_analytics_url).isspace())):
log_analytics_url = 'https://' + self._workspace_id + '.ods.opinsights.azure.com'
pattern = r"https:\/\/([\w\-]+)\.ods\.opinsights\.azure.([a-zA-Z\.]+)$"
if not re.match(pattern, str(log_analytics_url)):
raise Exception("Invalid Log Analytics Uri.")
self._log_analytics_url = log_analytics_url
def build_signature(self, date, content_length, method, content_type, resource):
x_headers = 'x-ms-date:' + date
string_to_hash = method + "\n" + \
str(content_length) + "\n" + content_type + \
"\n" + x_headers + "\n" + resource
bytes_to_hash = bytes(string_to_hash, encoding="utf-8")
decoded_key = base64.b64decode(self._workspace_key)
encoded_hash = base64.b64encode(hmac.new(
decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest()).decode()
authorization = "SharedKey {}:{}".format(
self._workspace_id, encoded_hash)
return authorization
def post_data(self, body):
logging.info('constructing post to send to Azure Sentinel.')
method = 'POST'
content_type = 'application/json'
resource = '/api/logs'
rfc1123date = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
content_length = len(body)
logging.info('build signature.')
signature = self.build_signature(
rfc1123date, content_length, method, content_type, resource)
logging.info('signature built.')
uri = self._log_analytics_url + resource + '?api-version=2016-04-01'
headers = {
'content-type': content_type,
'Authorization': signature,
'Log-Type': self._log_type,
'x-ms-date': rfc1123date
}
logging.info('sending post to Azure Sentinel.')
response = requests.post(uri, data=body, headers=headers)
logging.info(response.status_code)
if (response.status_code >= 200 and response.status_code <= 299):
return response.status_code
else:
logging.warn("Events are not processed into Azure. Response code: {}".format(
response.status_code))
raise Exception(
f'Sending to Azure Sentinel failed with status code {response.status_code}')
| import re
import base64
import hmac
import hashlib
import logging
import requests
from datetime import datetime
class AzureSentinel:
def __init__(self, workspace_id, workspace_key, log_type, log_analytics_url=''):
self._workspace_id = workspace_id
self._workspace_key = workspace_key
self._log_type = log_type
if ((log_analytics_url in (None, '') or str(log_analytics_url).isspace())):
log_analytics_url = 'https://' + self._workspace_id + '.ods.opinsights.azure.com'
pattern = r"https:\/\/([\w\-]+)\.ods\.opinsights\.azure.([a-zA-Z\.]+)$"
if not re.match(pattern, str(log_analytics_url)):
raise Exception("Invalid Log Analytics Uri.")
self._log_analytics_url = log_analytics_url
def build_signature(self, date, content_length, method, content_type, resource):
x_headers = 'x-ms-date:' + date
string_to_hash = method + "\n" + \
str(content_length) + "\n" + content_type + \
"\n" + x_headers + "\n" + resource
bytes_to_hash = bytes(string_to_hash, encoding="utf-8")
decoded_key = base64.b64decode(self._workspace_key)
encoded_hash = base64.b64encode(hmac.new(
decoded_key, bytes_to_hash, digestmod=hashlib.sha256).digest()).decode()
authorization = "SharedKey {}:{}".format(
self._workspace_id, encoded_hash)
return authorization
def post_data(self, body):
logging.info('constructing post to send to Azure Sentinel.')
method = 'POST'
content_type = 'application/json'
resource = '/api/logs'
rfc1123date = datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
content_length = len(body)
logging.info('build signature.')
signature = self.build_signature(
rfc1123date, content_length, method, content_type, resource)
logging.info('signature built.')
uri = self._log_analytics_url + resource + '?api-version=2016-04-01'
headers = {
'content-type': content_type,
'Authorization': signature,
'Log-Type': self._log_type,
'x-ms-date': rfc1123date
}
logging.info('sending post to Azure Sentinel.')
response = requests.post(uri, data=body, headers=headers)
logging.info(response.status_code)
if (response.status_code >= 200 and response.status_code <= 299):
return response.status_code
else:
logging.warn("Events are not processed into Azure. Response code: {}".format(
response.status_code))
raise Exception(
f'Sending to Azure Sentinel failed with status code {response.status_code}') | none | 1 | 2.413461 | 2 |
|
MiniProject.py | siddharths067/CNN-Based-Agent-Modelling-for-Humanlike-Driving-Simulaion | 0 | 9219 | from tkinter import *
from PIL import ImageGrab
import numpy as np
import cv2
import time
import pyautogui as pg
import DirectInputRoutines as DIR
from LogKey import key_check
last_time = time.time()
one_hot = [0, 0, 0, 0, 0, 0]
hash_dict = {'w':0, 's':1, 'a':2, 'd':3, 'c':4, 'v':5}
X = []
y = []
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def process_img(original_image):
processed_img = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
#processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
vertices = np.array([[10,500],[10,300],[300,200],[500,200],[800,300],[800,500],
], np.int32)
processed_img = cv2.GaussianBlur(processed_img,(5,5),0)
processed_img = roi(processed_img, [vertices])
# more info: http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html
# edges rho theta thresh # min length, max gap:
#lines = cv2.HoughLinesP(processed_img, 1, np.pi/180, 180, 20, 15)
#draw_lines(processed_img,lines)
return processed_img
def roi(img, vertices):
#blank mask:
mask = np.zeros_like(img)
# fill the mask
cv2.fillPoly(mask, vertices, 255)
# now only show the area that is the mask
masked = cv2.bitwise_and(img, mask)
return masked
def draw_lines(img,lines):
for line in lines:
coords = line[0]
cv2.line(img, (coords[0], coords[1]), (coords[2], coords[3]), [255,255,255], 3)
def change_tab():
pg.hotkey("alt","tab")
def send_key(e):
hash = {"w":DIR.W, "a":DIR.A, "s":DIR.S, "d":DIR.D}
return hash[e.keysym]
def keyup(e):
if(e.keysym == "Alt_L" or e.keysym == "Tab"):
return
#print('down', e.keysym)
change_tab()
DIR.ReleaseKey(send_key(e))
change_tab()
global last_time
one_hot[hash_dict[e.keysym]] = 0
temp = list(one_hot)
printscreen = np.array(ImageGrab.grab(bbox=(0,40,800,640)))
printscreen = process_img(printscreen)
print('loop took {} seconds'.format(time.time()-last_time))
print([printscreen, temp])
last_time = time.time()
X.append(printscreen)
y.append(temp)
#cv2.imshow("image", printscreen)
def keydown(e):
#print('up', e.keysym)
if(e.keysym == "Alt_L" or e.keysym == "Tab"):
return
change_tab()
DIR.ReleaseKey(send_key(e))
change_tab()
global last_time
one_hot[hash_dict[e.keysym]] = 1
temp = list(one_hot)
printscreen = np.array(ImageGrab.grab(bbox=(0,40,800,680)))
printscreen = process_img(printscreen)
print('loop took {} seconds'.format(time.time()-last_time))
print([printscreen,temp])
last_time = time.time()
X.append(printscreen)
y.append(temp)
root = Tk()
frame = Frame(root, width=100, height=100)
frame.bind("<KeyPress>", keydown)
frame.bind("<KeyRelease>", keyup)
frame.pack()
frame.focus_set()
root.mainloop()
np.save("X.npy", X)
np.save("y.npy", y) | from tkinter import *
from PIL import ImageGrab
import numpy as np
import cv2
import time
import pyautogui as pg
import DirectInputRoutines as DIR
from LogKey import key_check
last_time = time.time()
one_hot = [0, 0, 0, 0, 0, 0]
hash_dict = {'w':0, 's':1, 'a':2, 'd':3, 'c':4, 'v':5}
X = []
y = []
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def process_img(original_image):
processed_img = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
#processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300)
vertices = np.array([[10,500],[10,300],[300,200],[500,200],[800,300],[800,500],
], np.int32)
processed_img = cv2.GaussianBlur(processed_img,(5,5),0)
processed_img = roi(processed_img, [vertices])
# more info: http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html
# edges rho theta thresh # min length, max gap:
#lines = cv2.HoughLinesP(processed_img, 1, np.pi/180, 180, 20, 15)
#draw_lines(processed_img,lines)
return processed_img
def roi(img, vertices):
#blank mask:
mask = np.zeros_like(img)
# fill the mask
cv2.fillPoly(mask, vertices, 255)
# now only show the area that is the mask
masked = cv2.bitwise_and(img, mask)
return masked
def draw_lines(img,lines):
for line in lines:
coords = line[0]
cv2.line(img, (coords[0], coords[1]), (coords[2], coords[3]), [255,255,255], 3)
def change_tab():
pg.hotkey("alt","tab")
def send_key(e):
hash = {"w":DIR.W, "a":DIR.A, "s":DIR.S, "d":DIR.D}
return hash[e.keysym]
def keyup(e):
if(e.keysym == "Alt_L" or e.keysym == "Tab"):
return
#print('down', e.keysym)
change_tab()
DIR.ReleaseKey(send_key(e))
change_tab()
global last_time
one_hot[hash_dict[e.keysym]] = 0
temp = list(one_hot)
printscreen = np.array(ImageGrab.grab(bbox=(0,40,800,640)))
printscreen = process_img(printscreen)
print('loop took {} seconds'.format(time.time()-last_time))
print([printscreen, temp])
last_time = time.time()
X.append(printscreen)
y.append(temp)
#cv2.imshow("image", printscreen)
def keydown(e):
#print('up', e.keysym)
if(e.keysym == "Alt_L" or e.keysym == "Tab"):
return
change_tab()
DIR.ReleaseKey(send_key(e))
change_tab()
global last_time
one_hot[hash_dict[e.keysym]] = 1
temp = list(one_hot)
printscreen = np.array(ImageGrab.grab(bbox=(0,40,800,680)))
printscreen = process_img(printscreen)
print('loop took {} seconds'.format(time.time()-last_time))
print([printscreen,temp])
last_time = time.time()
X.append(printscreen)
y.append(temp)
root = Tk()
frame = Frame(root, width=100, height=100)
frame.bind("<KeyPress>", keydown)
frame.bind("<KeyRelease>", keyup)
frame.pack()
frame.focus_set()
root.mainloop()
np.save("X.npy", X)
np.save("y.npy", y) | en | 0.530488 | # compute the median of the single channel pixel intensities # apply automatic Canny edge detection using the computed median # return the edged image #processed_img = cv2.Canny(processed_img, threshold1=200, threshold2=300) # more info: http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.html # edges rho theta thresh # min length, max gap: #lines = cv2.HoughLinesP(processed_img, 1, np.pi/180, 180, 20, 15) #draw_lines(processed_img,lines) #blank mask: # fill the mask # now only show the area that is the mask #print('down', e.keysym) #cv2.imshow("image", printscreen) #print('up', e.keysym) | 2.577406 | 3 |
src/code/djangotest/migrations/0001_initial.py | jielyu/notebook | 2 | 9220 | # Generated by Django 2.2.5 on 2019-10-05 23:22
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Password',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False, unique=True)),
('website', models.CharField(max_length=128)),
('username', models.CharField(max_length=128)),
('pwd', models.CharField(max_length=128)),
('time_add', models.DateTimeField(auto_now_add=True, null=True)),
('time_modify', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'password_tab',
},
),
]
| # Generated by Django 2.2.5 on 2019-10-05 23:22
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Password',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False, unique=True)),
('website', models.CharField(max_length=128)),
('username', models.CharField(max_length=128)),
('pwd', models.CharField(max_length=128)),
('time_add', models.DateTimeField(auto_now_add=True, null=True)),
('time_modify', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'password_tab',
},
),
]
| en | 0.765609 | # Generated by Django 2.2.5 on 2019-10-05 23:22 | 1.878663 | 2 |
filer/tests/utils/__init__.py | pbs/django-filer | 1 | 9221 | <reponame>pbs/django-filer<filename>filer/tests/utils/__init__.py
from django.template.loaders.base import Loader as BaseLoader
from django.template.base import TemplateDoesNotExist
class Mock():
pass
class MockLoader(BaseLoader):
is_usable = True
def load_template_source(self, template_name, template_dirs=None):
if template_name == 'cms_mock_template.html':
return '<div></div>', 'template.html'
elif template_name == '404.html':
return "404 Not Found", "404.html"
else:
raise TemplateDoesNotExist()
| from django.template.loaders.base import Loader as BaseLoader
from django.template.base import TemplateDoesNotExist
class Mock():
pass
class MockLoader(BaseLoader):
is_usable = True
def load_template_source(self, template_name, template_dirs=None):
if template_name == 'cms_mock_template.html':
return '<div></div>', 'template.html'
elif template_name == '404.html':
return "404 Not Found", "404.html"
else:
raise TemplateDoesNotExist() | none | 1 | 2.205818 | 2 |
|
utils/arrival_overlaps.py | davmre/sigvisa | 0 | 9222 | <gh_stars>0
import sigvisa.database.db
from sigvisa.database.dataset import *
import sigvisa.utils.geog
cursor = database.db.connect().cursor()
detections, arid2num = read_detections(cursor, 1237680000, 1237680000 + 168 * 3600, arrival_table="leb_arrival", noarrays=False)
last_det = dict()
overlaps = 0
for det in detections:
site = det[0]
time = det[2]
if site in last_det:
gap = time - last_det[site]
if gap < 5:
print " arrival %d at siteid %d occured %f seconds after previous at %f : phase %s" % (det[1], site, gap, last_det[site], det[DET_PHASE_COL])
overlaps = overlaps + 1
last_det[site] = time
print "total overlaps: ", overlaps, " out of ", len(detections), " detections"
| import sigvisa.database.db
from sigvisa.database.dataset import *
import sigvisa.utils.geog
cursor = database.db.connect().cursor()
detections, arid2num = read_detections(cursor, 1237680000, 1237680000 + 168 * 3600, arrival_table="leb_arrival", noarrays=False)
last_det = dict()
overlaps = 0
for det in detections:
site = det[0]
time = det[2]
if site in last_det:
gap = time - last_det[site]
if gap < 5:
print " arrival %d at siteid %d occured %f seconds after previous at %f : phase %s" % (det[1], site, gap, last_det[site], det[DET_PHASE_COL])
overlaps = overlaps + 1
last_det[site] = time
print "total overlaps: ", overlaps, " out of ", len(detections), " detections" | none | 1 | 2.524465 | 3 |
|
tests/transformation/streamline/test_move_identical_op_past_join_op.py | mmrahorovic/finn | 109 | 9223 | import pytest
from onnx import TensorProto
from onnx import helper as oh
import finn.core.onnx_exec as oxe
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.streamline.reorder import MoveTransposePastJoinAdd
from finn.util.basic import gen_finn_dt_tensor
def create_model(perm):
if perm == [0, 3, 1, 2]:
in_shape = [1, 128, 1, 256]
out_shape = [1, 256, 128, 1]
if perm == [0, 2, 3, 1]:
in_shape = [1, 256, 128, 1]
out_shape = [1, 128, 1, 256]
Transpose1_node = oh.make_node(
"Transpose", inputs=["in_transpose1"], outputs=["out_transpose1"], perm=perm
)
Transpose2_node = oh.make_node(
"Transpose", inputs=["in_transpose2"], outputs=["out_transpose2"], perm=perm
)
Join1_node = oh.make_node(
"Add", inputs=["out_transpose1", "out_transpose2"], outputs=["out_join1"]
)
in_transpose1 = oh.make_tensor_value_info(
"in_transpose1", TensorProto.FLOAT, in_shape
)
in_transpose2 = oh.make_tensor_value_info(
"in_transpose2", TensorProto.FLOAT, in_shape
)
out_transpose1 = oh.make_tensor_value_info(
"out_transpose1", TensorProto.FLOAT, out_shape
)
out_transpose2 = oh.make_tensor_value_info(
"out_transpose2", TensorProto.FLOAT, out_shape
)
out_join1 = oh.make_tensor_value_info("out_join1", TensorProto.FLOAT, out_shape)
graph = oh.make_graph(
nodes=[Transpose1_node, Transpose2_node, Join1_node],
name="test_graph",
inputs=[in_transpose1, in_transpose2],
outputs=[out_join1],
value_info=[
out_transpose1,
out_transpose2,
],
)
onnx_model = oh.make_model(graph, producer_name="test_model")
model = ModelWrapper(onnx_model)
return model
# Permutation of transpose node
@pytest.mark.parametrize("perm", [[0, 3, 1, 2], [0, 2, 3, 1]])
def test_move_identical_op_past_join_op(perm):
model = create_model(perm)
# Create input data
input0_tensor_name = model.graph.input[0].name
input1_tensor_name = model.graph.input[1].name
# Note: it is assumed that both tensors have the same shape and data type
input_shape = model.get_tensor_shape(input0_tensor_name)
input_dtype = model.get_tensor_datatype(input0_tensor_name)
input_val = gen_finn_dt_tensor(input_dtype, input_shape)
input_dict = {}
input_dict[input0_tensor_name] = input_val
input_dict[input1_tensor_name] = input_val
model_transformed = model.transform(MoveTransposePastJoinAdd())
assert oxe.compare_execution(model, model_transformed, input_dict)
# Check if order changed
node0_input0_model = model.find_consumers(model.graph.input[0].name)[0].op_type
node1_input1_model = model.find_consumers(model.graph.input[1].name)[0].op_type
node0_input0_model_transformed = model_transformed.find_consumers(
model_transformed.graph.input[0].name
)[0].op_type
node1_input1_model_transformed = model_transformed.find_consumers(
model_transformed.graph.input[1].name
)[0].op_type
assert node0_input0_model != node0_input0_model_transformed
assert node1_input1_model != node1_input1_model_transformed
| import pytest
from onnx import TensorProto
from onnx import helper as oh
import finn.core.onnx_exec as oxe
from finn.core.modelwrapper import ModelWrapper
from finn.transformation.streamline.reorder import MoveTransposePastJoinAdd
from finn.util.basic import gen_finn_dt_tensor
def create_model(perm):
if perm == [0, 3, 1, 2]:
in_shape = [1, 128, 1, 256]
out_shape = [1, 256, 128, 1]
if perm == [0, 2, 3, 1]:
in_shape = [1, 256, 128, 1]
out_shape = [1, 128, 1, 256]
Transpose1_node = oh.make_node(
"Transpose", inputs=["in_transpose1"], outputs=["out_transpose1"], perm=perm
)
Transpose2_node = oh.make_node(
"Transpose", inputs=["in_transpose2"], outputs=["out_transpose2"], perm=perm
)
Join1_node = oh.make_node(
"Add", inputs=["out_transpose1", "out_transpose2"], outputs=["out_join1"]
)
in_transpose1 = oh.make_tensor_value_info(
"in_transpose1", TensorProto.FLOAT, in_shape
)
in_transpose2 = oh.make_tensor_value_info(
"in_transpose2", TensorProto.FLOAT, in_shape
)
out_transpose1 = oh.make_tensor_value_info(
"out_transpose1", TensorProto.FLOAT, out_shape
)
out_transpose2 = oh.make_tensor_value_info(
"out_transpose2", TensorProto.FLOAT, out_shape
)
out_join1 = oh.make_tensor_value_info("out_join1", TensorProto.FLOAT, out_shape)
graph = oh.make_graph(
nodes=[Transpose1_node, Transpose2_node, Join1_node],
name="test_graph",
inputs=[in_transpose1, in_transpose2],
outputs=[out_join1],
value_info=[
out_transpose1,
out_transpose2,
],
)
onnx_model = oh.make_model(graph, producer_name="test_model")
model = ModelWrapper(onnx_model)
return model
# Permutation of transpose node
@pytest.mark.parametrize("perm", [[0, 3, 1, 2], [0, 2, 3, 1]])
def test_move_identical_op_past_join_op(perm):
model = create_model(perm)
# Create input data
input0_tensor_name = model.graph.input[0].name
input1_tensor_name = model.graph.input[1].name
# Note: it is assumed that both tensors have the same shape and data type
input_shape = model.get_tensor_shape(input0_tensor_name)
input_dtype = model.get_tensor_datatype(input0_tensor_name)
input_val = gen_finn_dt_tensor(input_dtype, input_shape)
input_dict = {}
input_dict[input0_tensor_name] = input_val
input_dict[input1_tensor_name] = input_val
model_transformed = model.transform(MoveTransposePastJoinAdd())
assert oxe.compare_execution(model, model_transformed, input_dict)
# Check if order changed
node0_input0_model = model.find_consumers(model.graph.input[0].name)[0].op_type
node1_input1_model = model.find_consumers(model.graph.input[1].name)[0].op_type
node0_input0_model_transformed = model_transformed.find_consumers(
model_transformed.graph.input[0].name
)[0].op_type
node1_input1_model_transformed = model_transformed.find_consumers(
model_transformed.graph.input[1].name
)[0].op_type
assert node0_input0_model != node0_input0_model_transformed
assert node1_input1_model != node1_input1_model_transformed
| en | 0.865581 | # Permutation of transpose node # Create input data # Note: it is assumed that both tensors have the same shape and data type # Check if order changed | 2.119054 | 2 |
app/lib/ncr_util.py | jchrisfarris/antiope-scorecards | 1 | 9224 | <filename>app/lib/ncr_util.py<gh_stars>1-10
import json
from lib import authz
from lib.logger import logger
from lib.exclusions import exclusions, state_machine
def get_allowed_actions(user, account_id, requirement, exclusion):
allowed_actions = {
'remediate': False,
'requestExclusion': False,
'requestExclusionChange': False,
}
current_state = exclusions.get_state(exclusion)
valid_state_transitions = state_machine.USER_STATE_TRANSITIONS.get(current_state, {}).keys()
logger.debug('Current state: %s', current_state)
logger.debug('Valid state transitions: %s', str(valid_state_transitions))
logger.debug('User: %s', json.dumps(user))
if authz.can_request_exclusion(user, account_id)[0]:
if set(valid_state_transitions) & set(exclusions.REQUEST_EXCLUSION_STATES):
allowed_actions['requestExclusion'] = True
if set(valid_state_transitions) & set(exclusions.REQUEST_EXCLUSION_CHANGE_STATES):
allowed_actions['requestExclusionChange'] = True
# Determine If can remediate
if can_requirement_be_remediated(requirement):
allowed_actions['remediate'] = authz.can_remediate(user, account_id)[0]
return allowed_actions
def can_requirement_be_remediated(requirement):
"""
Mehtod to validate whether a requirement is capable of being remediated.
:param requirement: The dict representing the requirement to check.
:returns bool: A boolean representing whether requirement can or cannot be remediated.
"""
return 'remediation' in requirement
| <filename>app/lib/ncr_util.py<gh_stars>1-10
import json
from lib import authz
from lib.logger import logger
from lib.exclusions import exclusions, state_machine
def get_allowed_actions(user, account_id, requirement, exclusion):
allowed_actions = {
'remediate': False,
'requestExclusion': False,
'requestExclusionChange': False,
}
current_state = exclusions.get_state(exclusion)
valid_state_transitions = state_machine.USER_STATE_TRANSITIONS.get(current_state, {}).keys()
logger.debug('Current state: %s', current_state)
logger.debug('Valid state transitions: %s', str(valid_state_transitions))
logger.debug('User: %s', json.dumps(user))
if authz.can_request_exclusion(user, account_id)[0]:
if set(valid_state_transitions) & set(exclusions.REQUEST_EXCLUSION_STATES):
allowed_actions['requestExclusion'] = True
if set(valid_state_transitions) & set(exclusions.REQUEST_EXCLUSION_CHANGE_STATES):
allowed_actions['requestExclusionChange'] = True
# Determine If can remediate
if can_requirement_be_remediated(requirement):
allowed_actions['remediate'] = authz.can_remediate(user, account_id)[0]
return allowed_actions
def can_requirement_be_remediated(requirement):
"""
Mehtod to validate whether a requirement is capable of being remediated.
:param requirement: The dict representing the requirement to check.
:returns bool: A boolean representing whether requirement can or cannot be remediated.
"""
return 'remediation' in requirement
| en | 0.848518 | # Determine If can remediate Mehtod to validate whether a requirement is capable of being remediated. :param requirement: The dict representing the requirement to check. :returns bool: A boolean representing whether requirement can or cannot be remediated. | 2.263177 | 2 |
ambari-server/src/test/python/stacks/2.6/SPARK2/test_spark_livy2.py | Syndra/Ambari-source | 1 | 9225 | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, patch
from stacks.utils.RMFTestCase import *
from only_for_platform import not_for_platform, PLATFORM_WINDOWS
@not_for_platform(PLATFORM_WINDOWS)
@patch("resource_management.libraries.functions.get_stack_version", new=MagicMock(return_value="2.5.0.0-1597"))
class TestSparkClient(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "SPARK2/2.0.0/package"
STACK_VERSION = "2.6"
DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/livy2_server.py",
classname = "LivyServer",
command = "start",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_start_default()
self.assertNoMoreResources()
def assert_start_default(self):
self.assertResourceCalled('Directory', '/var/run/livy2',
owner = 'livy',
group = 'hadoop',
create_parents = True,
mode = 0775
)
self.assertResourceCalled('Directory', '/var/log/livy2',
owner = 'livy',
group = 'hadoop',
create_parents = True,
mode = 0775
)
self.assertResourceCalled('HdfsResource', '/user/livy',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
hdfs_site = {u'a': u'b'},
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
owner = 'livy',
hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
dfs_type = '',
mode = 0775,
)
self.assertResourceCalled('HdfsResource', None,
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
hdfs_site = {u'a': u'b'},
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
dfs_type = '',
hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
)
self.assertResourceCalled('HdfsResource', '/livy2-recovery',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
hdfs_site = {u'a': u'b'},
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
owner = 'livy',
hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
dfs_type = '',
mode = 0700,
)
self.assertResourceCalled('HdfsResource', None,
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
hdfs_site = {u'a': u'b'},
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
dfs_type = '',
hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
)
self.assertResourceCalled('File', '/usr/hdp/current/livy2-server/conf/livy-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['livy2-env']['content']),
owner = 'livy',
group = 'livy',
mode = 0644,
)
self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/livy2-server/conf/livy.conf',
owner = 'livy',
key_value_delimiter = ' ',
group = 'livy',
properties = self.getConfig()['configurations']['livy2-conf'],
)
self.assertResourceCalled('File', '/usr/hdp/current/livy2-server/conf/log4j.properties',
content = '\n # Set everything to be logged to the console\n log4j.rootCategory=INFO, console\n log4j.appender.console=org.apache.log4j.ConsoleAppender\n log4j.appender.console.target=System.err\n log4j.appender.console.layout=org.apache.log4j.PatternLayout\n log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n log4j.logger.org.eclipse.jetty=WARN',
owner = 'livy',
group = 'livy',
mode = 0644,
)
self.assertResourceCalled('File', '/usr/hdp/current/livy2-server/conf/spark-blacklist.conf',
content = self.getConfig()['configurations']['livy2-spark-blacklist']['content'],
owner = 'livy',
group = 'livy',
mode = 0644,
)
self.assertResourceCalled('Directory', '/usr/hdp/current/livy2-server/logs',
owner = 'livy',
group = 'livy',
mode = 0755,
)
self.assertResourceCalled('Execute', '/usr/hdp/current/livy2-server/bin/livy-server start',
environment = {'JAVA_HOME': '/usr/jdk64/jdk1.7.0_45'},
not_if = 'ls /var/run/livy2/livy-livy-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/livy2/livy-livy-server.pid` >/dev/null 2>&1',
user = 'livy'
)
| #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, patch
from stacks.utils.RMFTestCase import *
from only_for_platform import not_for_platform, PLATFORM_WINDOWS
@not_for_platform(PLATFORM_WINDOWS)
@patch("resource_management.libraries.functions.get_stack_version", new=MagicMock(return_value="2.5.0.0-1597"))
class TestSparkClient(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "SPARK2/2.0.0/package"
STACK_VERSION = "2.6"
DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/livy2_server.py",
classname = "LivyServer",
command = "start",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_start_default()
self.assertNoMoreResources()
def assert_start_default(self):
self.assertResourceCalled('Directory', '/var/run/livy2',
owner = 'livy',
group = 'hadoop',
create_parents = True,
mode = 0775
)
self.assertResourceCalled('Directory', '/var/log/livy2',
owner = 'livy',
group = 'hadoop',
create_parents = True,
mode = 0775
)
self.assertResourceCalled('HdfsResource', '/user/livy',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
hdfs_site = {u'a': u'b'},
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
owner = 'livy',
hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
dfs_type = '',
mode = 0775,
)
self.assertResourceCalled('HdfsResource', None,
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
hdfs_site = {u'a': u'b'},
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
dfs_type = '',
hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
)
self.assertResourceCalled('HdfsResource', '/livy2-recovery',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
hdfs_site = {u'a': u'b'},
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
owner = 'livy',
hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
dfs_type = '',
mode = 0700,
)
self.assertResourceCalled('HdfsResource', None,
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/2.5.0.0-1235/hadoop/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://c6401.ambari.apache.org:8020',
hdfs_site = {u'a': u'b'},
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
dfs_type = '',
hadoop_conf_dir = '/usr/hdp/2.5.0.0-1235/hadoop/conf',
)
self.assertResourceCalled('File', '/usr/hdp/current/livy2-server/conf/livy-env.sh',
content = InlineTemplate(self.getConfig()['configurations']['livy2-env']['content']),
owner = 'livy',
group = 'livy',
mode = 0644,
)
self.assertResourceCalled('PropertiesFile', '/usr/hdp/current/livy2-server/conf/livy.conf',
owner = 'livy',
key_value_delimiter = ' ',
group = 'livy',
properties = self.getConfig()['configurations']['livy2-conf'],
)
self.assertResourceCalled('File', '/usr/hdp/current/livy2-server/conf/log4j.properties',
content = '\n # Set everything to be logged to the console\n log4j.rootCategory=INFO, console\n log4j.appender.console=org.apache.log4j.ConsoleAppender\n log4j.appender.console.target=System.err\n log4j.appender.console.layout=org.apache.log4j.PatternLayout\n log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n log4j.logger.org.eclipse.jetty=WARN',
owner = 'livy',
group = 'livy',
mode = 0644,
)
self.assertResourceCalled('File', '/usr/hdp/current/livy2-server/conf/spark-blacklist.conf',
content = self.getConfig()['configurations']['livy2-spark-blacklist']['content'],
owner = 'livy',
group = 'livy',
mode = 0644,
)
self.assertResourceCalled('Directory', '/usr/hdp/current/livy2-server/logs',
owner = 'livy',
group = 'livy',
mode = 0755,
)
self.assertResourceCalled('Execute', '/usr/hdp/current/livy2-server/bin/livy-server start',
environment = {'JAVA_HOME': '/usr/jdk64/jdk1.7.0_45'},
not_if = 'ls /var/run/livy2/livy-livy-server.pid >/dev/null 2>&1 && ps -p `cat /var/run/livy2/livy-livy-server.pid` >/dev/null 2>&1',
user = 'livy'
)
| en | 0.671418 | #!/usr/bin/env python Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # Set everything to be logged to the console\n log4j.rootCategory=INFO, console\n log4j.appender.console=org.apache.log4j.ConsoleAppender\n log4j.appender.console.target=System.err\n log4j.appender.console.layout=org.apache.log4j.PatternLayout\n log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n\n\n log4j.logger.org.eclipse.jetty=WARN', | 1.644628 | 2 |
SentDex/Chapter05.py | harimaruthachalam/SentDexChapters | 0 | 9226 | import quandl
import math
import numpy as np
from sklearn import preprocessing, cross_validation, svm
from sklearn.linear_model import LinearRegression
import pickle
import datetime
from matplotlib import style
import matplotlib.pyplot as plot
# Config
isLoadFromLocal = True
quandl.ApiConfig.api_key = '<KEY>'
style.use('ggplot')
# Loading data
if isLoadFromLocal:
df = pickle.load(open("DataFromQuandl_Stock_Chap2.pickle", "rb"))
else:
df = quandl.get('WIKI/GOOGL')
pickle.dump(df, open("DataFromQuandl_Stock_Chap2.pickle", "wb+"))
# Data pre-processing
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Close']) / df['Adj. Close']
df['PCT_Change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open']
df = df[['Adj. Close', 'HL_PCT', 'PCT_Change', 'Adj. Volume']]
forecastCol = 'Adj. Close'
df.fillna('-99999', inplace = True)
forecastOut = int(math.ceil(0.01*len(df)))
df['label'] = df[forecastCol].shift(-forecastOut)
# df['label'].plot()
# df[forecastCol].plot()
# plot.legend(loc = 4)
# plot.show()
x = np.array(df.drop(['label'], 1))
print(x)
x = preprocessing.scale(x)
print(x)
xLately = x[-forecastOut:]
x = x[:-forecastOut]
df.dropna(inplace = True)
y = np.array(df['label'])
# Regression
x_train, x_test, y_train, y_test = cross_validation.train_test_split(x, y, test_size=0.1)
# classifier = svm.SVR(kernel='linear') # SVM SVR
classifier = LinearRegression(n_jobs=3) # Linear Regression
classifier.fit(x_train, y_train)
accuracy = classifier.score(x_test, y_test)
forecastSet = classifier.predict(xLately)
print('Accuracy is ', accuracy, '\nForecasted values are ', forecastSet, '\nNumber of values is ', forecastOut)
df['Forecast'] = np.nan
lastDate = df.iloc[-1].name
print(lastDate)
lastTime = lastDate.timestamp()
print(lastTime)
oneDay = 24 * 60 * 60 # seconds in a day
nextTime = lastTime + oneDay
for iter in forecastSet:
nextDate = datetime.datetime.fromtimestamp(nextTime)
nextTime += oneDay
df.loc[nextDate] = [np.nan for _ in range(len(df.columns) - 1)] + [iter]
df['Adj. Close'].plot()
df['Forecast'].plot()
plot.legend(loc = 4)
plot.xlabel('Date')
plot.ylabel('Price')
plot.show() | import quandl
import math
import numpy as np
from sklearn import preprocessing, cross_validation, svm
from sklearn.linear_model import LinearRegression
import pickle
import datetime
from matplotlib import style
import matplotlib.pyplot as plot
# Config
isLoadFromLocal = True
quandl.ApiConfig.api_key = '<KEY>'
style.use('ggplot')
# Loading data
if isLoadFromLocal:
df = pickle.load(open("DataFromQuandl_Stock_Chap2.pickle", "rb"))
else:
df = quandl.get('WIKI/GOOGL')
pickle.dump(df, open("DataFromQuandl_Stock_Chap2.pickle", "wb+"))
# Data pre-processing
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Close']) / df['Adj. Close']
df['PCT_Change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open']
df = df[['Adj. Close', 'HL_PCT', 'PCT_Change', 'Adj. Volume']]
forecastCol = 'Adj. Close'
df.fillna('-99999', inplace = True)
forecastOut = int(math.ceil(0.01*len(df)))
df['label'] = df[forecastCol].shift(-forecastOut)
# df['label'].plot()
# df[forecastCol].plot()
# plot.legend(loc = 4)
# plot.show()
x = np.array(df.drop(['label'], 1))
print(x)
x = preprocessing.scale(x)
print(x)
xLately = x[-forecastOut:]
x = x[:-forecastOut]
df.dropna(inplace = True)
y = np.array(df['label'])
# Regression
x_train, x_test, y_train, y_test = cross_validation.train_test_split(x, y, test_size=0.1)
# classifier = svm.SVR(kernel='linear') # SVM SVR
classifier = LinearRegression(n_jobs=3) # Linear Regression
classifier.fit(x_train, y_train)
accuracy = classifier.score(x_test, y_test)
forecastSet = classifier.predict(xLately)
print('Accuracy is ', accuracy, '\nForecasted values are ', forecastSet, '\nNumber of values is ', forecastOut)
df['Forecast'] = np.nan
lastDate = df.iloc[-1].name
print(lastDate)
lastTime = lastDate.timestamp()
print(lastTime)
oneDay = 24 * 60 * 60 # seconds in a day
nextTime = lastTime + oneDay
for iter in forecastSet:
nextDate = datetime.datetime.fromtimestamp(nextTime)
nextTime += oneDay
df.loc[nextDate] = [np.nan for _ in range(len(df.columns) - 1)] + [iter]
df['Adj. Close'].plot()
df['Forecast'].plot()
plot.legend(loc = 4)
plot.xlabel('Date')
plot.ylabel('Price')
plot.show() | en | 0.331353 | # Config # Loading data # Data pre-processing # df['label'].plot() # df[forecastCol].plot() # plot.legend(loc = 4) # plot.show() # Regression # classifier = svm.SVR(kernel='linear') # SVM SVR # Linear Regression # seconds in a day | 2.489586 | 2 |
tifinity/actions/icc_parser.py | pmay/tifinity | 1 | 9227 | <reponame>pmay/tifinity
class IccProfile():
"""Parses an ICC Colour Profile.
According to spec: all Profile data shall be encoded as big-endian"""
def __init__(self, bytes):
self.header = {}
self.parse_icc(bytes)
def get_colour_space(self):
"""Returns the data colour space type, or None if not defined"""
return self.header.get('data_colour_space')
def tostring(self, limit_value=False):
out = "\nHEADER\n"
for k, v in self.header.items():
out += " [{0:27}]\t{1:31}\n".format(k, v)
out += "\nTAGS ({0})\n".format(self.tag_count)
for tag, (offset, size, value) in self.tags.items():
if len(value)>100 and limit_value:
out += " [{0}]\t{1}\t{2}\t{3}...\n".format(tag, offset, size, value[:100])
else:
out += " [{0}]\t{1}\t{2}\t{3}\n".format(tag, offset, size, value)
return out
def parse_icc(self, bytes):
"""Parsers the specified bytes representing an ICC Profile"""
# ICC profile consists of:
# - 128-byte profile header
# - profile tag table:
# - profile tagged element data (referenced from tag table)
if bytes is not None:
self.read_header(bytes)
self.read_tags(bytes)
def read_header(self, bytes):
self.header['profile_size'] = IccProfile.read_int(bytes, 0)
self.header['preferred_cmm_type'] = IccProfile.read_string(bytes, 4, 4)
self.header['profile_version_number'] = IccProfile.read_binary_coded_decimal(bytes, 8)
self.header['profile_device_class'] = IccProfile.read_string(bytes, 12, 4)
self.header['data_colour_space'] = IccProfile.read_string(bytes, 16, 4)
self.header['pcs'] = IccProfile.read_string(bytes, 20, 4)
self.header['creation_datetime'] = IccProfile.read_datetime(bytes, 24) # YY-mm-dd HH:mm:ss
self.header['acsp'] = IccProfile.read_string(bytes, 36, 4) # Must = acsp
self.header['primary_platform_sig'] = IccProfile.read_string(bytes, 40, 4) # APPL, MSFT, SGI, SUNW, 0
self.header['profile_flags'] = IccProfile.read_int(bytes, 44) # todo: flags
self.header['device_manufacturer'] = IccProfile.read_string(bytes, 48, 4)
self.header['device_model'] = IccProfile.read_int(bytes, 52)
self.header['device_attributes'] = IccProfile.read_int(bytes, 56) # todo: flags
self.header['rendering_intent'] = IccProfile.read_int(bytes, 64)
self.header['nciexyz_values'] = IccProfile.read_xyznumber(bytes, 68)
self.header['profile_creator_signature'] = IccProfile.read_string(bytes, 80, 4)
self.header['profile_id'] = str(bytes[84:99])
self.header['reserved'] = str(bytes[100:128])
def read_tags(self, bytes):
# 4 bytes tag count
# n x 12 byte tags (4 bytes sig, 4 bytes offset (relative to profile start), 4 bytes size of data element)
self.tag_count = IccProfile.read_int(bytes, 128)
self.tags = {}
for t in range(self.tag_count):
type = IccProfile.read_string(bytes, 132+(t*12), 4)
offset = IccProfile.read_int(bytes, 136+(t*12))
size = IccProfile.read_int(bytes, 140+(t*12))
read_func = tagtypes.get(type)
if read_func is not None:
#read_func = getattr(IccProfile, tag_tuple[0])
value = read_func(bytes, offset, size)
else:
value = bytes[offset: offset+size]
self.tags[type] = (offset, size, value)
@staticmethod
def read_int(bytes, offset, count=1, size=4, byteorder='big'):
return int.from_bytes(bytes[offset:offset+size], byteorder=byteorder)
@staticmethod
def read_string(bytes, offset, count, byteorder='big'):
return ''.join(map(chr, bytes[offset:offset+count]))
@staticmethod
def read_binary_coded_decimal(bytes, start):
out = "{0}.{1}.{2}".format(bytes[start],
bytes[start+1]>>4,
bytes[start+1]&0x0F)
return out
@staticmethod
def read_datetime(bytes, offset, byteorder='big'):
out = "{0}-{1}-{2} {3}:{4}:{5}".format(str(int.from_bytes(bytes[offset:offset + 2], byteorder=byteorder)),
str(int.from_bytes(bytes[offset + 2:offset + 4], byteorder=byteorder)),
str(int.from_bytes(bytes[offset + 4:offset + 6], byteorder=byteorder)),
str(int.from_bytes(bytes[offset + 6:offset + 8], byteorder=byteorder)),
str(int.from_bytes(bytes[offset + 8:offset + 10], byteorder=byteorder)),
str(int.from_bytes(bytes[offset + 10:offset + 12], byteorder=byteorder)))
return out
@staticmethod
def read_signature_type(bytes, offset, count):
assert (IccProfile.read_string(bytes, offset, 4) == 'sig ')
assert (IccProfile.read_int(bytes, offset + 4) == 0)
return IccProfile.read_string(bytes, offset+8, 4)
@staticmethod
def read_xyztype(bytes, offset, count):
sig = IccProfile.read_string(bytes, offset, 4)
assert(IccProfile.read_int(bytes, offset+4) == 0)
# todo: repeat xyz for remainder of xyztype bytes
xyz = IccProfile.read_xyznumber(bytes, offset+8)
return "{0}: {1}".format(sig, xyz)
@staticmethod
def read_xyznumber(bytes, offset, byteorder='big'):
x_i = IccProfile.read_s15Fixed16Number(bytes, offset)
y_i = IccProfile.read_s15Fixed16Number(bytes, offset+4)
z_i = IccProfile.read_s15Fixed16Number(bytes, offset+8)
return "X={0}, Y={1}, Z={2}".format(x_i, y_i, z_i)
@staticmethod
def read_trctype(bytes, offset, count):
# check first 4 bytes, either 'curv' or 'para'
sig = IccProfile.read_string(bytes, offset, 4)
if sig=='curv':
# next 4 bytes 0
assert (IccProfile.read_int(bytes, offset + 4) == 0)
n = IccProfile.read_int(bytes, offset+8)
vals = [IccProfile.read_int(bytes, offset+12+(2*i), size=2) for i in range(n)]
# todo: para
return "{0} : count {1} : {2}".format(sig, n, vals)
@staticmethod
def read_s15Fixed16Number(bytes, offset):
conv = lambda x: ((x & 0xffff0000) >> 16) + ((x & 0x0000ffff) / 65536)
return conv(int.from_bytes(bytes[offset:offset + 4], byteorder='big'))
@staticmethod
def read_s15Fixed16ArrayType(bytes, offset, count):
assert(IccProfile.read_string(bytes, offset, 4) == 'sf32')
assert(IccProfile.read_int(bytes, offset+4) == 0)
n = int((count-8)/4)
return [IccProfile.read_s15Fixed16Number(bytes, offset+8+(i*4)) for i in range(n)]
tagtypes = {
'chad': (IccProfile.read_s15Fixed16ArrayType),
'cprt': (IccProfile.read_string),
'desc': (IccProfile.read_string),
'dmdd': (IccProfile.read_string),
'tech': (IccProfile.read_signature_type),
'vued': (IccProfile.read_string),
'wtpt': (IccProfile.read_xyztype),
'bkpt': (IccProfile.read_xyztype), # private type?
'rTRC': (IccProfile.read_trctype),
'gTRC': (IccProfile.read_trctype),
'bTRC': (IccProfile.read_trctype),
'rXYZ': (IccProfile.read_xyztype),
'gXYZ': (IccProfile.read_xyztype),
'bXYZ': (IccProfile.read_xyztype),
}
if __name__=='__main__':
import numpy as np
import sys
with open(sys.argv[1], 'rb') as file:
data = np.fromfile(file, dtype="uint8")
profile = IccProfile(data)
print(profile.tostring()) | class IccProfile():
"""Parses an ICC Colour Profile.
According to spec: all Profile data shall be encoded as big-endian"""
def __init__(self, bytes):
self.header = {}
self.parse_icc(bytes)
def get_colour_space(self):
"""Returns the data colour space type, or None if not defined"""
return self.header.get('data_colour_space')
def tostring(self, limit_value=False):
out = "\nHEADER\n"
for k, v in self.header.items():
out += " [{0:27}]\t{1:31}\n".format(k, v)
out += "\nTAGS ({0})\n".format(self.tag_count)
for tag, (offset, size, value) in self.tags.items():
if len(value)>100 and limit_value:
out += " [{0}]\t{1}\t{2}\t{3}...\n".format(tag, offset, size, value[:100])
else:
out += " [{0}]\t{1}\t{2}\t{3}\n".format(tag, offset, size, value)
return out
def parse_icc(self, bytes):
"""Parsers the specified bytes representing an ICC Profile"""
# ICC profile consists of:
# - 128-byte profile header
# - profile tag table:
# - profile tagged element data (referenced from tag table)
if bytes is not None:
self.read_header(bytes)
self.read_tags(bytes)
def read_header(self, bytes):
self.header['profile_size'] = IccProfile.read_int(bytes, 0)
self.header['preferred_cmm_type'] = IccProfile.read_string(bytes, 4, 4)
self.header['profile_version_number'] = IccProfile.read_binary_coded_decimal(bytes, 8)
self.header['profile_device_class'] = IccProfile.read_string(bytes, 12, 4)
self.header['data_colour_space'] = IccProfile.read_string(bytes, 16, 4)
self.header['pcs'] = IccProfile.read_string(bytes, 20, 4)
self.header['creation_datetime'] = IccProfile.read_datetime(bytes, 24) # YY-mm-dd HH:mm:ss
self.header['acsp'] = IccProfile.read_string(bytes, 36, 4) # Must = acsp
self.header['primary_platform_sig'] = IccProfile.read_string(bytes, 40, 4) # APPL, MSFT, SGI, SUNW, 0
self.header['profile_flags'] = IccProfile.read_int(bytes, 44) # todo: flags
self.header['device_manufacturer'] = IccProfile.read_string(bytes, 48, 4)
self.header['device_model'] = IccProfile.read_int(bytes, 52)
self.header['device_attributes'] = IccProfile.read_int(bytes, 56) # todo: flags
self.header['rendering_intent'] = IccProfile.read_int(bytes, 64)
self.header['nciexyz_values'] = IccProfile.read_xyznumber(bytes, 68)
self.header['profile_creator_signature'] = IccProfile.read_string(bytes, 80, 4)
self.header['profile_id'] = str(bytes[84:99])
self.header['reserved'] = str(bytes[100:128])
def read_tags(self, bytes):
# 4 bytes tag count
# n x 12 byte tags (4 bytes sig, 4 bytes offset (relative to profile start), 4 bytes size of data element)
self.tag_count = IccProfile.read_int(bytes, 128)
self.tags = {}
for t in range(self.tag_count):
type = IccProfile.read_string(bytes, 132+(t*12), 4)
offset = IccProfile.read_int(bytes, 136+(t*12))
size = IccProfile.read_int(bytes, 140+(t*12))
read_func = tagtypes.get(type)
if read_func is not None:
#read_func = getattr(IccProfile, tag_tuple[0])
value = read_func(bytes, offset, size)
else:
value = bytes[offset: offset+size]
self.tags[type] = (offset, size, value)
@staticmethod
def read_int(bytes, offset, count=1, size=4, byteorder='big'):
return int.from_bytes(bytes[offset:offset+size], byteorder=byteorder)
@staticmethod
def read_string(bytes, offset, count, byteorder='big'):
return ''.join(map(chr, bytes[offset:offset+count]))
@staticmethod
def read_binary_coded_decimal(bytes, start):
out = "{0}.{1}.{2}".format(bytes[start],
bytes[start+1]>>4,
bytes[start+1]&0x0F)
return out
@staticmethod
def read_datetime(bytes, offset, byteorder='big'):
out = "{0}-{1}-{2} {3}:{4}:{5}".format(str(int.from_bytes(bytes[offset:offset + 2], byteorder=byteorder)),
str(int.from_bytes(bytes[offset + 2:offset + 4], byteorder=byteorder)),
str(int.from_bytes(bytes[offset + 4:offset + 6], byteorder=byteorder)),
str(int.from_bytes(bytes[offset + 6:offset + 8], byteorder=byteorder)),
str(int.from_bytes(bytes[offset + 8:offset + 10], byteorder=byteorder)),
str(int.from_bytes(bytes[offset + 10:offset + 12], byteorder=byteorder)))
return out
@staticmethod
def read_signature_type(bytes, offset, count):
assert (IccProfile.read_string(bytes, offset, 4) == 'sig ')
assert (IccProfile.read_int(bytes, offset + 4) == 0)
return IccProfile.read_string(bytes, offset+8, 4)
@staticmethod
def read_xyztype(bytes, offset, count):
sig = IccProfile.read_string(bytes, offset, 4)
assert(IccProfile.read_int(bytes, offset+4) == 0)
# todo: repeat xyz for remainder of xyztype bytes
xyz = IccProfile.read_xyznumber(bytes, offset+8)
return "{0}: {1}".format(sig, xyz)
@staticmethod
def read_xyznumber(bytes, offset, byteorder='big'):
x_i = IccProfile.read_s15Fixed16Number(bytes, offset)
y_i = IccProfile.read_s15Fixed16Number(bytes, offset+4)
z_i = IccProfile.read_s15Fixed16Number(bytes, offset+8)
return "X={0}, Y={1}, Z={2}".format(x_i, y_i, z_i)
@staticmethod
def read_trctype(bytes, offset, count):
# check first 4 bytes, either 'curv' or 'para'
sig = IccProfile.read_string(bytes, offset, 4)
if sig=='curv':
# next 4 bytes 0
assert (IccProfile.read_int(bytes, offset + 4) == 0)
n = IccProfile.read_int(bytes, offset+8)
vals = [IccProfile.read_int(bytes, offset+12+(2*i), size=2) for i in range(n)]
# todo: para
return "{0} : count {1} : {2}".format(sig, n, vals)
@staticmethod
def read_s15Fixed16Number(bytes, offset):
conv = lambda x: ((x & 0xffff0000) >> 16) + ((x & 0x0000ffff) / 65536)
return conv(int.from_bytes(bytes[offset:offset + 4], byteorder='big'))
@staticmethod
def read_s15Fixed16ArrayType(bytes, offset, count):
assert(IccProfile.read_string(bytes, offset, 4) == 'sf32')
assert(IccProfile.read_int(bytes, offset+4) == 0)
n = int((count-8)/4)
return [IccProfile.read_s15Fixed16Number(bytes, offset+8+(i*4)) for i in range(n)]
tagtypes = {
'chad': (IccProfile.read_s15Fixed16ArrayType),
'cprt': (IccProfile.read_string),
'desc': (IccProfile.read_string),
'dmdd': (IccProfile.read_string),
'tech': (IccProfile.read_signature_type),
'vued': (IccProfile.read_string),
'wtpt': (IccProfile.read_xyztype),
'bkpt': (IccProfile.read_xyztype), # private type?
'rTRC': (IccProfile.read_trctype),
'gTRC': (IccProfile.read_trctype),
'bTRC': (IccProfile.read_trctype),
'rXYZ': (IccProfile.read_xyztype),
'gXYZ': (IccProfile.read_xyztype),
'bXYZ': (IccProfile.read_xyztype),
}
if __name__=='__main__':
import numpy as np
import sys
with open(sys.argv[1], 'rb') as file:
data = np.fromfile(file, dtype="uint8")
profile = IccProfile(data)
print(profile.tostring()) | en | 0.561022 | Parses an ICC Colour Profile. According to spec: all Profile data shall be encoded as big-endian Returns the data colour space type, or None if not defined Parsers the specified bytes representing an ICC Profile # ICC profile consists of: # - 128-byte profile header # - profile tag table: # - profile tagged element data (referenced from tag table) # YY-mm-dd HH:mm:ss # Must = acsp # APPL, MSFT, SGI, SUNW, 0 # todo: flags # todo: flags # 4 bytes tag count # n x 12 byte tags (4 bytes sig, 4 bytes offset (relative to profile start), 4 bytes size of data element) #read_func = getattr(IccProfile, tag_tuple[0]) # todo: repeat xyz for remainder of xyztype bytes # check first 4 bytes, either 'curv' or 'para' # next 4 bytes 0 # todo: para # private type? | 2.951652 | 3 |
challenges/binary_search/test_binary_search.py | asakatida/data-structures-and-algorithms.py | 0 | 9228 | from .binary_search import binary_search
def test_binary_search_empty_array():
assert binary_search([], 0) == -1
def test_binary_search_find_single_array():
assert binary_search([3], 3) == 0
def test_binary_search_not_found_single_array():
assert binary_search([1], 0) == -1
def test_binary_search_not_found_in_short_array():
assert binary_search([1, 2, 3], 0) == -1
def test_binary_search_found_at_begining():
assert binary_search([0, 1, 2, 3, 4, 5], 0) == 0
def test_binary_search_found_at_end():
assert binary_search([0, 1, 3, 4, 5], 5) == 4
def test_binary_search_found_at_middle_even():
assert binary_search([0, 1, 3, 5], 3) == 2
def test_binary_search_found_at_middle_odd():
assert binary_search([1, 3, 5], 3) == 1
def test_binary_search_high_value():
assert binary_search([1, 3, 5], 3) == 1
def test_binary_search_large_array_low():
assert binary_search(list(range(0xFFFFFF)), 0xFF) == 0xFF
def test_binary_search_large_array_high():
assert binary_search(list(range(0xFFFFFF)), 0xFFFFF) == 0xFFFFF
def test_binary_search_large_array_not_found():
assert binary_search(list(range(0xFFFFFF)), -4) == -1
| from .binary_search import binary_search
def test_binary_search_empty_array():
assert binary_search([], 0) == -1
def test_binary_search_find_single_array():
assert binary_search([3], 3) == 0
def test_binary_search_not_found_single_array():
assert binary_search([1], 0) == -1
def test_binary_search_not_found_in_short_array():
assert binary_search([1, 2, 3], 0) == -1
def test_binary_search_found_at_begining():
assert binary_search([0, 1, 2, 3, 4, 5], 0) == 0
def test_binary_search_found_at_end():
assert binary_search([0, 1, 3, 4, 5], 5) == 4
def test_binary_search_found_at_middle_even():
assert binary_search([0, 1, 3, 5], 3) == 2
def test_binary_search_found_at_middle_odd():
assert binary_search([1, 3, 5], 3) == 1
def test_binary_search_high_value():
assert binary_search([1, 3, 5], 3) == 1
def test_binary_search_large_array_low():
assert binary_search(list(range(0xFFFFFF)), 0xFF) == 0xFF
def test_binary_search_large_array_high():
assert binary_search(list(range(0xFFFFFF)), 0xFFFFF) == 0xFFFFF
def test_binary_search_large_array_not_found():
assert binary_search(list(range(0xFFFFFF)), -4) == -1
| none | 1 | 3.565735 | 4 |
|
Module2.py | Cipalex/session3 | 0 | 9229 | def f():
print('f from module 2')
if __name__ == '__main__':
print('Module 2') | def f():
print('f from module 2')
if __name__ == '__main__':
print('Module 2') | none | 1 | 1.750569 | 2 |
|
analisis_de_variables.py | scmarquez/Hause-Price-Kaggle-Competition | 0 | 9230 | <filename>analisis_de_variables.py
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 29 16:40:53 2017
@author: Sergio
"""
#Analisis de variables
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import ensemble, tree, linear_model
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.utils import shuffle
import warnings
#Ignorar los warnings
warnings.filterwarnings('ignore')
#Lectura de los datos
#En train se guandan los datos con los que se entrenará al modelo
train = pd.read_csv('train.csv')
#En test se guarda el conjunto de datos para el test
test = pd.read_csv('test.csv')
#Primero hay que eliminar las varibles que tengan un número alto de valores perdidos
#El número de valores perdidos de cada conjunto en cada variable
NAs = pd.concat([train.isnull().sum()/1460, test.isnull().sum()/1459], axis=1, keys=['Train', 'Test'])
#print(NAs)
#Eliminar todas las variables que tengan más de un 0.2 de valores perdidos
eliminar = []
nvars = 0
for index, row in NAs.iterrows():
print(index)
print(row['Test'])
if (row['Test'] > 0.2) or (row ['Train'] > 0.2):
eliminar.append(index)
#En la variable eliminar estan los nombres de las variables que deben ser directamente eliminadas
#Dentro de las variables a eliminar encontramos que la variable de Alley NA no indica desconocido, es un posible valor más de los posibles a tomar
#Esa variable debe seguir estando en nuestro conjunto
print(eliminar)
eliminar.remove('Alley')
eliminar.remove('FireplaceQu')#Sucede lo mismo que con Alley
train.drop(eliminar,axis=1, inplace=True)
test.drop(eliminar,axis=1, inplace=True)
"""
Ahora es necesario un análisis más profundo de las variables.
En primer lugar encontramos algunas variables que parecen tener una representación
numérica, como por ejemplo 'MSSubClass' o 'OverallCond'.
Al leer la documentación sobre que información aportan las variables
encontramos que OverallCond aunque sea una variable aparentemente nominal
expresa cosas que son medibles como la calidad, es decir muestra una puntuación entre 1 y 10
"""
#Variables numéricas que deben ser transformadas a string
test['MSSubClass'] = test['MSSubClass'].astype(str)
train['MSSubClass'] = train['MSSubClass'].astype(str)
test['YrSold'] = test['YrSold'].astype(str)
train['YrSold'] = train['YrSold'].astype(str)
#Variables categóricas que deben ser numéricas, ya que expresan puntuación
#El lógico pensar que aumentar la puntuación en algo hace efecto directo en el precio final
ExterQualvalues = {'ExterQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
ExterCondvalues = {'ExterCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
BsmQualvalues = {'BsmtQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
BsmCondvalues = {'BsmtCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1,}}
HeatingQCvalues = {'HeatingQC':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
KitchenQualvalues = {'KitchenQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
FireplaceQuvalues = {'FireplaceQu':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
GarageCondvalues = {'GarageCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
GarageQualvalues = {'GarageQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
PoolQCvalues = {'PoolQC':{'Ex':4,'Gd':3,'TA':2,'Fa':1}}
#Reemplazar los valores en las tablas
train.replace(ExterQualvalues,inplace=True)
train.replace(ExterCondvalues,inplace=True)
train.replace(BsmQualvalues,inplace=True)
train.replace(BsmCondvalues,inplace=True)
train.replace(HeatingQCvalues,inplace=True)
train.replace(KitchenQualvalues,inplace=True)
train.replace(FireplaceQuvalues,inplace=True)
train.replace(GarageCondvalues,inplace=True)
train.replace(GarageQualvalues,inplace=True)
train.replace(PoolQCvalues,inplace=True)
test.replace(ExterQualvalues,inplace=True)
test.replace(ExterCondvalues,inplace=True)
test.replace(BsmQualvalues,inplace=True)
test.replace(BsmCondvalues,inplace=True)
test.replace(HeatingQCvalues,inplace=True)
test.replace(KitchenQualvalues,inplace=True)
test.replace(FireplaceQuvalues,inplace=True)
test.replace(GarageCondvalues,inplace=True)
test.replace(GarageQualvalues,inplace=True)
test.replace(PoolQCvalues,inplace=True)
#Ahora tenemos todas las variables con un tipo de dato 'correcto'
#Cuantas variables de cada tipo tenemos
train_labels = train.pop('SalePrice')
features = pd.concat([train, test], keys=['train', 'test'])
enteras = features.dtypes[features.dtypes == 'int64'].index
flotantes = features.dtypes[features.dtypes == 'float64'].index
nominales = features.dtypes[features.dtypes == 'object'].index
#Se pasa a formato lista para su uso
ent = []
for var in enteras:
ent.append(var)
flot = []
for var in flotantes:
flot.append(var)
nom = []
for var in nominales:
nom.append(var)
numericas = ent+flot
#Ahora es necesario rellenar los valores perdidos de cada variable.
"""En algunas de las variables que han sido transformadas a numéricas
NAN no expresa que el dato no exista, sino que expresa puntuación 0"""
features['BsmtQual'] = features['BsmtQual'].fillna(0)
features['BsmtCond'] = features['BsmtCond'].fillna(0)
features['FireplaceQu'] = features['FireplaceQu'].fillna(0)
features['GarageQual'] = features['GarageQual'].fillna(0)
features['GarageCond'] = features['GarageCond'].fillna(0)
#El resto de variables pueden rellenarse con la media
for var in numericas:
if features[var].isnull().sum() > 0:
features[var] = features[var].fillna(features[var].mean())
#El resto ce variables nomnales se rellenan con el valor más frecuente
for var in nominales:
if features[var].isnull().sum() > 0:
features[var] = features[var].fillna(features[var].mode()[0])
"""Una vez que la tabla de datos está en el formato correcto vamos a estudiar la correlación
de las variables con el precio. Las variables que presenten una correlación baja se descartarán
ya que lo único que van a hacer es hacer que nuestro modelo se impreciso.
Si se imputan demasiadas variables perderemos información valiosa y el modelo volverá a ser impreciso.
Sacando un Heatmap se puede ver la correlación de las variables"""
#train_labels = np.log(train_labels)#La transformación logarítmica de los datos los aproxima a una distribución normal
complete = features.loc['train']#Solo se usan las entradas de entrenamiento
complete = pd.concat([complete,train_labels],axis=1)#Se adjunta la columna de precios de nuevo
correlationPlot = complete.corr()#Mantiene la matriz de correlación en un DataFrame
f,ax = plt.subplots(figsize=(12,9))#Configuración del tamaño de la imagen
sns.heatmap(correlationPlot,vmax=.8,square=True)#Crea el heatmap con los valores de correlación
plt.yticks(rotation=0)#cambia el eje de las etiquetas del gráfico para que se vean bien
plt.xticks(rotation=90)#cambia el eje de las etiquetas del gráfico para que se vean bien
plt.show()#Muestra el gráfico
f.savefig('Heatmap.png')#Guarda el gráfico en un archivo
"""La matriz de correlación muestra la correlación entre dos variables de forma que los valores
más claros muestran que dos variables tienen una correlación alta
El siguiente paso del análisis es buscar que variables muestran una correlación alta entre sí y eliminar
una de esas variables, ya que es información redundante y puede eliminarse. Otra manera de enfocar el problema
es que usar dos variables correlacionadas puede ayudar a sofocar el efecto del ruido en una variable.
En primer lugar es necesario descubrir que variables son las que determinan el precio de la vivienda usando la correlación.
"""
#Crear la lista de variables con correlación alta con el precio de la vivienda
"""Inciso:
calcular la correlación antes de aplicar la escala logaritmica a los datos
tiene sentido, pues el coeficiente de correlación de Pearson no varía con
la escala y el origen. Además solo nos sirve para hacer una aproximación
hacia que variables usar o no en el algoritmo. Después si será necesario
hacer que las variables tengan una distribución normalizada.
"""
HighCorrelation = []
for index, row in correlationPlot.iterrows():
if (row['SalePrice'] >= 0.5) or (row ['SalePrice'] <= -0.5):
HighCorrelation.append(index)
print(row['SalePrice'])
print("total de variables: "+str(len(HighCorrelation)))
print(HighCorrelation)
"""Ahora hay que examniar las variables nominales que se tendrán en cuenta
Para hacer este análisis se va a usar una gráfica que exprese la relación entre
el precio y el valor de la vivienda."""
complete = features.loc['train']
complete = pd.concat([complete,train_labels],axis=1)
malas = [#'MSSubClass',
'LandContour',
'LandSlope',
#'RoofStyle',
#'RoofMatl',
'Exterior2nd',
#'Exterior1st',
'MasVnrType',
'BsmtExposure',
'Functional',
'YrSold']
##################################
#malas = ['Utilities', 'RoofMatl','Heating','Functional']
for var in malas:
data = pd.concat([complete[var],complete['SalePrice']],axis=1)
f,ax = plt.subplots(figsize=(12,9))
fig = sns.boxplot(x=var,y="SalePrice",data=data)
fig.axis(ymin=0,ymax=800000)
plt.xticks(rotation=90)
f.savefig(str(var)+'_Price.png')
"""
aparentemente malas variables:
LandContour
LandScope
RoofStyle
RoofMatl
Exterior2nd
Exterior1st
MasVnrType
BsmtExposure
Functional
YrSold
"""
"""Analisis con PCA"""
| <filename>analisis_de_variables.py
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 29 16:40:53 2017
@author: Sergio
"""
#Analisis de variables
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import ensemble, tree, linear_model
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.utils import shuffle
import warnings
#Ignorar los warnings
warnings.filterwarnings('ignore')
#Lectura de los datos
#En train se guandan los datos con los que se entrenará al modelo
train = pd.read_csv('train.csv')
#En test se guarda el conjunto de datos para el test
test = pd.read_csv('test.csv')
#Primero hay que eliminar las varibles que tengan un número alto de valores perdidos
#El número de valores perdidos de cada conjunto en cada variable
NAs = pd.concat([train.isnull().sum()/1460, test.isnull().sum()/1459], axis=1, keys=['Train', 'Test'])
#print(NAs)
#Eliminar todas las variables que tengan más de un 0.2 de valores perdidos
eliminar = []
nvars = 0
for index, row in NAs.iterrows():
print(index)
print(row['Test'])
if (row['Test'] > 0.2) or (row ['Train'] > 0.2):
eliminar.append(index)
#En la variable eliminar estan los nombres de las variables que deben ser directamente eliminadas
#Dentro de las variables a eliminar encontramos que la variable de Alley NA no indica desconocido, es un posible valor más de los posibles a tomar
#Esa variable debe seguir estando en nuestro conjunto
print(eliminar)
eliminar.remove('Alley')
eliminar.remove('FireplaceQu')#Sucede lo mismo que con Alley
train.drop(eliminar,axis=1, inplace=True)
test.drop(eliminar,axis=1, inplace=True)
"""
Ahora es necesario un análisis más profundo de las variables.
En primer lugar encontramos algunas variables que parecen tener una representación
numérica, como por ejemplo 'MSSubClass' o 'OverallCond'.
Al leer la documentación sobre que información aportan las variables
encontramos que OverallCond aunque sea una variable aparentemente nominal
expresa cosas que son medibles como la calidad, es decir muestra una puntuación entre 1 y 10
"""
#Variables numéricas que deben ser transformadas a string
test['MSSubClass'] = test['MSSubClass'].astype(str)
train['MSSubClass'] = train['MSSubClass'].astype(str)
test['YrSold'] = test['YrSold'].astype(str)
train['YrSold'] = train['YrSold'].astype(str)
#Variables categóricas que deben ser numéricas, ya que expresan puntuación
#El lógico pensar que aumentar la puntuación en algo hace efecto directo en el precio final
ExterQualvalues = {'ExterQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
ExterCondvalues = {'ExterCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
BsmQualvalues = {'BsmtQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
BsmCondvalues = {'BsmtCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1,}}
HeatingQCvalues = {'HeatingQC':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
KitchenQualvalues = {'KitchenQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
FireplaceQuvalues = {'FireplaceQu':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
GarageCondvalues = {'GarageCond':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
GarageQualvalues = {'GarageQual':{'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1}}
PoolQCvalues = {'PoolQC':{'Ex':4,'Gd':3,'TA':2,'Fa':1}}
#Reemplazar los valores en las tablas
train.replace(ExterQualvalues,inplace=True)
train.replace(ExterCondvalues,inplace=True)
train.replace(BsmQualvalues,inplace=True)
train.replace(BsmCondvalues,inplace=True)
train.replace(HeatingQCvalues,inplace=True)
train.replace(KitchenQualvalues,inplace=True)
train.replace(FireplaceQuvalues,inplace=True)
train.replace(GarageCondvalues,inplace=True)
train.replace(GarageQualvalues,inplace=True)
train.replace(PoolQCvalues,inplace=True)
test.replace(ExterQualvalues,inplace=True)
test.replace(ExterCondvalues,inplace=True)
test.replace(BsmQualvalues,inplace=True)
test.replace(BsmCondvalues,inplace=True)
test.replace(HeatingQCvalues,inplace=True)
test.replace(KitchenQualvalues,inplace=True)
test.replace(FireplaceQuvalues,inplace=True)
test.replace(GarageCondvalues,inplace=True)
test.replace(GarageQualvalues,inplace=True)
test.replace(PoolQCvalues,inplace=True)
#Ahora tenemos todas las variables con un tipo de dato 'correcto'
#Cuantas variables de cada tipo tenemos
train_labels = train.pop('SalePrice')
features = pd.concat([train, test], keys=['train', 'test'])
enteras = features.dtypes[features.dtypes == 'int64'].index
flotantes = features.dtypes[features.dtypes == 'float64'].index
nominales = features.dtypes[features.dtypes == 'object'].index
#Se pasa a formato lista para su uso
ent = []
for var in enteras:
ent.append(var)
flot = []
for var in flotantes:
flot.append(var)
nom = []
for var in nominales:
nom.append(var)
numericas = ent+flot
#Ahora es necesario rellenar los valores perdidos de cada variable.
"""En algunas de las variables que han sido transformadas a numéricas
NAN no expresa que el dato no exista, sino que expresa puntuación 0"""
features['BsmtQual'] = features['BsmtQual'].fillna(0)
features['BsmtCond'] = features['BsmtCond'].fillna(0)
features['FireplaceQu'] = features['FireplaceQu'].fillna(0)
features['GarageQual'] = features['GarageQual'].fillna(0)
features['GarageCond'] = features['GarageCond'].fillna(0)
#El resto de variables pueden rellenarse con la media
for var in numericas:
if features[var].isnull().sum() > 0:
features[var] = features[var].fillna(features[var].mean())
#El resto ce variables nomnales se rellenan con el valor más frecuente
for var in nominales:
if features[var].isnull().sum() > 0:
features[var] = features[var].fillna(features[var].mode()[0])
"""Una vez que la tabla de datos está en el formato correcto vamos a estudiar la correlación
de las variables con el precio. Las variables que presenten una correlación baja se descartarán
ya que lo único que van a hacer es hacer que nuestro modelo se impreciso.
Si se imputan demasiadas variables perderemos información valiosa y el modelo volverá a ser impreciso.
Sacando un Heatmap se puede ver la correlación de las variables"""
#train_labels = np.log(train_labels)#La transformación logarítmica de los datos los aproxima a una distribución normal
complete = features.loc['train']#Solo se usan las entradas de entrenamiento
complete = pd.concat([complete,train_labels],axis=1)#Se adjunta la columna de precios de nuevo
correlationPlot = complete.corr()#Mantiene la matriz de correlación en un DataFrame
f,ax = plt.subplots(figsize=(12,9))#Configuración del tamaño de la imagen
sns.heatmap(correlationPlot,vmax=.8,square=True)#Crea el heatmap con los valores de correlación
plt.yticks(rotation=0)#cambia el eje de las etiquetas del gráfico para que se vean bien
plt.xticks(rotation=90)#cambia el eje de las etiquetas del gráfico para que se vean bien
plt.show()#Muestra el gráfico
f.savefig('Heatmap.png')#Guarda el gráfico en un archivo
"""La matriz de correlación muestra la correlación entre dos variables de forma que los valores
más claros muestran que dos variables tienen una correlación alta
El siguiente paso del análisis es buscar que variables muestran una correlación alta entre sí y eliminar
una de esas variables, ya que es información redundante y puede eliminarse. Otra manera de enfocar el problema
es que usar dos variables correlacionadas puede ayudar a sofocar el efecto del ruido en una variable.
En primer lugar es necesario descubrir que variables son las que determinan el precio de la vivienda usando la correlación.
"""
#Crear la lista de variables con correlación alta con el precio de la vivienda
"""Inciso:
calcular la correlación antes de aplicar la escala logaritmica a los datos
tiene sentido, pues el coeficiente de correlación de Pearson no varía con
la escala y el origen. Además solo nos sirve para hacer una aproximación
hacia que variables usar o no en el algoritmo. Después si será necesario
hacer que las variables tengan una distribución normalizada.
"""
HighCorrelation = []
for index, row in correlationPlot.iterrows():
if (row['SalePrice'] >= 0.5) or (row ['SalePrice'] <= -0.5):
HighCorrelation.append(index)
print(row['SalePrice'])
print("total de variables: "+str(len(HighCorrelation)))
print(HighCorrelation)
"""Ahora hay que examniar las variables nominales que se tendrán en cuenta
Para hacer este análisis se va a usar una gráfica que exprese la relación entre
el precio y el valor de la vivienda."""
complete = features.loc['train']
complete = pd.concat([complete,train_labels],axis=1)
malas = [#'MSSubClass',
'LandContour',
'LandSlope',
#'RoofStyle',
#'RoofMatl',
'Exterior2nd',
#'Exterior1st',
'MasVnrType',
'BsmtExposure',
'Functional',
'YrSold']
##################################
#malas = ['Utilities', 'RoofMatl','Heating','Functional']
for var in malas:
data = pd.concat([complete[var],complete['SalePrice']],axis=1)
f,ax = plt.subplots(figsize=(12,9))
fig = sns.boxplot(x=var,y="SalePrice",data=data)
fig.axis(ymin=0,ymax=800000)
plt.xticks(rotation=90)
f.savefig(str(var)+'_Price.png')
"""
aparentemente malas variables:
LandContour
LandScope
RoofStyle
RoofMatl
Exterior2nd
Exterior1st
MasVnrType
BsmtExposure
Functional
YrSold
"""
"""Analisis con PCA"""
| es | 0.956916 | # -*- coding: utf-8 -*- Created on Fri Dec 29 16:40:53 2017
@author: Sergio #Analisis de variables #Ignorar los warnings #Lectura de los datos #En train se guandan los datos con los que se entrenará al modelo #En test se guarda el conjunto de datos para el test #Primero hay que eliminar las varibles que tengan un número alto de valores perdidos #El número de valores perdidos de cada conjunto en cada variable #print(NAs) #Eliminar todas las variables que tengan más de un 0.2 de valores perdidos #En la variable eliminar estan los nombres de las variables que deben ser directamente eliminadas #Dentro de las variables a eliminar encontramos que la variable de Alley NA no indica desconocido, es un posible valor más de los posibles a tomar #Esa variable debe seguir estando en nuestro conjunto #Sucede lo mismo que con Alley Ahora es necesario un análisis más profundo de las variables.
En primer lugar encontramos algunas variables que parecen tener una representación
numérica, como por ejemplo 'MSSubClass' o 'OverallCond'.
Al leer la documentación sobre que información aportan las variables
encontramos que OverallCond aunque sea una variable aparentemente nominal
expresa cosas que son medibles como la calidad, es decir muestra una puntuación entre 1 y 10 #Variables numéricas que deben ser transformadas a string #Variables categóricas que deben ser numéricas, ya que expresan puntuación #El lógico pensar que aumentar la puntuación en algo hace efecto directo en el precio final #Reemplazar los valores en las tablas #Ahora tenemos todas las variables con un tipo de dato 'correcto' #Cuantas variables de cada tipo tenemos #Se pasa a formato lista para su uso #Ahora es necesario rellenar los valores perdidos de cada variable. En algunas de las variables que han sido transformadas a numéricas
NAN no expresa que el dato no exista, sino que expresa puntuación 0 #El resto de variables pueden rellenarse con la media #El resto ce variables nomnales se rellenan con el valor más frecuente Una vez que la tabla de datos está en el formato correcto vamos a estudiar la correlación
de las variables con el precio. Las variables que presenten una correlación baja se descartarán
ya que lo único que van a hacer es hacer que nuestro modelo se impreciso.
Si se imputan demasiadas variables perderemos información valiosa y el modelo volverá a ser impreciso.
Sacando un Heatmap se puede ver la correlación de las variables #train_labels = np.log(train_labels)#La transformación logarítmica de los datos los aproxima a una distribución normal #Solo se usan las entradas de entrenamiento #Se adjunta la columna de precios de nuevo #Mantiene la matriz de correlación en un DataFrame #Configuración del tamaño de la imagen #Crea el heatmap con los valores de correlación #cambia el eje de las etiquetas del gráfico para que se vean bien #cambia el eje de las etiquetas del gráfico para que se vean bien #Muestra el gráfico #Guarda el gráfico en un archivo La matriz de correlación muestra la correlación entre dos variables de forma que los valores
más claros muestran que dos variables tienen una correlación alta
El siguiente paso del análisis es buscar que variables muestran una correlación alta entre sí y eliminar
una de esas variables, ya que es información redundante y puede eliminarse. Otra manera de enfocar el problema
es que usar dos variables correlacionadas puede ayudar a sofocar el efecto del ruido en una variable.
En primer lugar es necesario descubrir que variables son las que determinan el precio de la vivienda usando la correlación. #Crear la lista de variables con correlación alta con el precio de la vivienda Inciso:
calcular la correlación antes de aplicar la escala logaritmica a los datos
tiene sentido, pues el coeficiente de correlación de Pearson no varía con
la escala y el origen. Además solo nos sirve para hacer una aproximación
hacia que variables usar o no en el algoritmo. Después si será necesario
hacer que las variables tengan una distribución normalizada. Ahora hay que examniar las variables nominales que se tendrán en cuenta
Para hacer este análisis se va a usar una gráfica que exprese la relación entre
el precio y el valor de la vivienda. #'MSSubClass', #'RoofStyle', #'RoofMatl', #'Exterior1st', ################################## #malas = ['Utilities', 'RoofMatl','Heating','Functional'] aparentemente malas variables:
LandContour
LandScope
RoofStyle
RoofMatl
Exterior2nd
Exterior1st
MasVnrType
BsmtExposure
Functional
YrSold Analisis con PCA | 3.003461 | 3 |
query-gen.py | mdatsev/prostgres | 0 | 9231 | import random
import sys
ntables = 100
ncols = 100
nrows = 10000
def printstderr(s):
sys.stderr.write(s + '\n')
sys.stderr.flush()
def get_value():
return random.randint(-99999999, 99999999)
for t in range(ntables):
printstderr(f'{t}/{ntables}')
print(f"create table x ({','.join(['x int'] * ncols)});")
for r in range(nrows):
print(f"insert into _last ({','.join(['x'] * ncols)}) values (", end='')
for c in range(ncols):
print(get_value(), end=('' if c==ncols-1 else ','))
print(');')
# 10 min to generate
# 3 min to process | import random
import sys
ntables = 100
ncols = 100
nrows = 10000
def printstderr(s):
sys.stderr.write(s + '\n')
sys.stderr.flush()
def get_value():
return random.randint(-99999999, 99999999)
for t in range(ntables):
printstderr(f'{t}/{ntables}')
print(f"create table x ({','.join(['x int'] * ncols)});")
for r in range(nrows):
print(f"insert into _last ({','.join(['x'] * ncols)}) values (", end='')
for c in range(ncols):
print(get_value(), end=('' if c==ncols-1 else ','))
print(');')
# 10 min to generate
# 3 min to process | en | 0.709244 | # 10 min to generate # 3 min to process | 2.656802 | 3 |
molecule/default/tests/test_default.py | joshbenner/sensu-ansible-role | 0 | 9232 | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_packages(host):
package = host.package('sensu')
assert package.is_installed
assert '1.7.0' in package.version
def test_dir_ownership(host):
assert host.file('/opt/sensu').group == 'sensu'
def test_main_config(host):
f = host.file('/etc/sensu/config.json')
assert f.exists
assert f.is_file
assert f.user == 'sensu'
assert f.group == 'sensu'
assert f.mode == 0o600
assert f.contains('rabbitmq')
assert f.contains('check-cpu.rb')
assert f.contains('"foo": "bar"')
assert f.contains('example_subscription')
assert f.contains('"zip": "zap"')
assert not f.contains('subscription_to_be_overridden')
def test_server_running(host):
server = host.service('sensu-server')
assert server.is_running
assert server.is_enabled
def test_api_running(host):
api = host.service('sensu-api')
assert api.is_running
assert api.is_enabled
def test_client_running(host):
client = host.service('sensu-client')
assert client.is_running
assert client.is_enabled
def test_api_listening(host):
assert host.socket('tcp://0.0.0.0:4567').is_listening
def test_plugin_installed(host):
assert host.file('/opt/sensu/embedded/bin/check-memory.rb').exists
# Tests extension install/enable
def test_snmp_listening(host):
assert host.socket('udp://0.0.0.0:1062').is_listening
| import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_packages(host):
package = host.package('sensu')
assert package.is_installed
assert '1.7.0' in package.version
def test_dir_ownership(host):
assert host.file('/opt/sensu').group == 'sensu'
def test_main_config(host):
f = host.file('/etc/sensu/config.json')
assert f.exists
assert f.is_file
assert f.user == 'sensu'
assert f.group == 'sensu'
assert f.mode == 0o600
assert f.contains('rabbitmq')
assert f.contains('check-cpu.rb')
assert f.contains('"foo": "bar"')
assert f.contains('example_subscription')
assert f.contains('"zip": "zap"')
assert not f.contains('subscription_to_be_overridden')
def test_server_running(host):
server = host.service('sensu-server')
assert server.is_running
assert server.is_enabled
def test_api_running(host):
api = host.service('sensu-api')
assert api.is_running
assert api.is_enabled
def test_client_running(host):
client = host.service('sensu-client')
assert client.is_running
assert client.is_enabled
def test_api_listening(host):
assert host.socket('tcp://0.0.0.0:4567').is_listening
def test_plugin_installed(host):
assert host.file('/opt/sensu/embedded/bin/check-memory.rb').exists
# Tests extension install/enable
def test_snmp_listening(host):
assert host.socket('udp://0.0.0.0:1062').is_listening
| en | 0.825089 | # Tests extension install/enable | 1.981148 | 2 |
wgskex/worker/netlink.py | moepman/wgskex | 2 | 9233 | <filename>wgskex/worker/netlink.py
import hashlib
import logging
import re
from dataclasses import dataclass
from datetime import datetime, timedelta
from textwrap import wrap
from typing import Dict, List
from pyroute2 import IPRoute, NDB, WireGuard
from wgskex.common.utils import mac2eui64
logger = logging.getLogger(__name__)
# TODO make loglevel configurable
logger.setLevel("DEBUG")
@dataclass
class WireGuardClient:
public_key: str
domain: str
remove: bool
@property
def lladdr(self) -> str:
m = hashlib.md5()
m.update(self.public_key.encode("ascii") + b"\n")
hashed_key = m.hexdigest()
hash_as_list = wrap(hashed_key, 2)
temp_mac = ":".join(["02"] + hash_as_list[:5])
lladdr = re.sub(r"/\d+$", "/128", mac2eui64(mac=temp_mac, prefix="fe80::/10"))
return lladdr
@property
def vx_interface(self) -> str:
return f"vx-{self.domain}"
@property
def wg_interface(self) -> str:
return f"wg-{self.domain}"
"""WireGuardClient describes complete configuration for a specific WireGuard client
Attributes:
public_key: WireGuard Public key
domain: Domain Name of the WireGuard peer
lladdr: IPv6 lladdr of the WireGuard peer
wg_interface: Name of the WireGuard interface this peer will use
vx_interface: Name of the VXLAN interface we set a route for the lladdr to
remove: Are we removing this peer or not?
"""
def wg_flush_stale_peers(domain: str) -> List[Dict]:
stale_clients = find_stale_wireguard_clients("wg-" + domain)
result = []
for stale_client in stale_clients:
stale_wireguard_client = WireGuardClient(
public_key=stale_client,
domain=domain,
remove=True,
)
result.append(link_handler(stale_wireguard_client))
return result
# pyroute2 stuff
def link_handler(client: WireGuardClient) -> Dict[str, Dict]:
results = {}
results.update({"Wireguard": wireguard_handler(client)})
try:
results.update({"Route": route_handler(client)})
except Exception as e:
results.update({"Route": {"Exception": e}})
results.update({"Bridge FDB": bridge_fdb_handler(client)})
return results
def bridge_fdb_handler(client: WireGuardClient) -> Dict:
with IPRoute() as ip:
return ip.fdb(
"del" if client.remove else "append",
# FIXME this list may be empty if the interface is not existing
ifindex=ip.link_lookup(ifname=client.vx_interface)[0],
lladdr="00:00:00:00:00:00",
dst=re.sub(r"/\d+$", "", client.lladdr),
nda_ifindex=ip.link_lookup(ifname=client.wg_interface)[0],
)
def wireguard_handler(client: WireGuardClient) -> Dict:
with WireGuard() as wg:
wg_peer = {
"public_key": client.public_key,
"persistent_keepalive": 15,
"allowed_ips": [client.lladdr],
"remove": client.remove,
}
return wg.set(client.wg_interface, peer=wg_peer)
def route_handler(client: WireGuardClient) -> Dict:
with IPRoute() as ip:
return ip.route(
"del" if client.remove else "replace",
dst=client.lladdr,
oif=ip.link_lookup(ifname=client.wg_interface)[0],
)
def find_wireguard_domains() -> List[str]:
with NDB() as ndb:
# ndb.interfaces[{"kind": "wireguard"}]] seems to trigger https://github.com/svinota/pyroute2/issues/737
iface_values = ndb.interfaces.values()
interfaces = [iface.get("ifname", "") for iface in iface_values if iface.get("kind", "") == "wireguard"]
result = [iface.removeprefix("wg-") for iface in interfaces if iface.startswith("wg-")]
return result
def find_stale_wireguard_clients(wg_interface: str) -> List[str]:
with WireGuard() as wg:
all_clients = []
infos = wg.info(wg_interface)
for info in infos:
clients = info.get_attr("WGDEVICE_A_PEERS")
if clients is not None:
all_clients.extend(clients)
three_minutes_ago = (datetime.now() - timedelta(minutes=3)).timestamp()
stale_clients = [
client.get_attr("WGPEER_A_PUBLIC_KEY").decode("utf-8")
for client in all_clients
# TODO add never connected peers to a list and remove them on next call
if 0 < (client.get_attr("WGPEER_A_LAST_HANDSHAKE_TIME") or {}).get("tv_sec", int()) < three_minutes_ago
]
return stale_clients
| <filename>wgskex/worker/netlink.py
import hashlib
import logging
import re
from dataclasses import dataclass
from datetime import datetime, timedelta
from textwrap import wrap
from typing import Dict, List
from pyroute2 import IPRoute, NDB, WireGuard
from wgskex.common.utils import mac2eui64
logger = logging.getLogger(__name__)
# TODO make loglevel configurable
logger.setLevel("DEBUG")
@dataclass
class WireGuardClient:
public_key: str
domain: str
remove: bool
@property
def lladdr(self) -> str:
m = hashlib.md5()
m.update(self.public_key.encode("ascii") + b"\n")
hashed_key = m.hexdigest()
hash_as_list = wrap(hashed_key, 2)
temp_mac = ":".join(["02"] + hash_as_list[:5])
lladdr = re.sub(r"/\d+$", "/128", mac2eui64(mac=temp_mac, prefix="fe80::/10"))
return lladdr
@property
def vx_interface(self) -> str:
return f"vx-{self.domain}"
@property
def wg_interface(self) -> str:
return f"wg-{self.domain}"
"""WireGuardClient describes complete configuration for a specific WireGuard client
Attributes:
public_key: WireGuard Public key
domain: Domain Name of the WireGuard peer
lladdr: IPv6 lladdr of the WireGuard peer
wg_interface: Name of the WireGuard interface this peer will use
vx_interface: Name of the VXLAN interface we set a route for the lladdr to
remove: Are we removing this peer or not?
"""
def wg_flush_stale_peers(domain: str) -> List[Dict]:
stale_clients = find_stale_wireguard_clients("wg-" + domain)
result = []
for stale_client in stale_clients:
stale_wireguard_client = WireGuardClient(
public_key=stale_client,
domain=domain,
remove=True,
)
result.append(link_handler(stale_wireguard_client))
return result
# pyroute2 stuff
def link_handler(client: WireGuardClient) -> Dict[str, Dict]:
results = {}
results.update({"Wireguard": wireguard_handler(client)})
try:
results.update({"Route": route_handler(client)})
except Exception as e:
results.update({"Route": {"Exception": e}})
results.update({"Bridge FDB": bridge_fdb_handler(client)})
return results
def bridge_fdb_handler(client: WireGuardClient) -> Dict:
with IPRoute() as ip:
return ip.fdb(
"del" if client.remove else "append",
# FIXME this list may be empty if the interface is not existing
ifindex=ip.link_lookup(ifname=client.vx_interface)[0],
lladdr="00:00:00:00:00:00",
dst=re.sub(r"/\d+$", "", client.lladdr),
nda_ifindex=ip.link_lookup(ifname=client.wg_interface)[0],
)
def wireguard_handler(client: WireGuardClient) -> Dict:
with WireGuard() as wg:
wg_peer = {
"public_key": client.public_key,
"persistent_keepalive": 15,
"allowed_ips": [client.lladdr],
"remove": client.remove,
}
return wg.set(client.wg_interface, peer=wg_peer)
def route_handler(client: WireGuardClient) -> Dict:
with IPRoute() as ip:
return ip.route(
"del" if client.remove else "replace",
dst=client.lladdr,
oif=ip.link_lookup(ifname=client.wg_interface)[0],
)
def find_wireguard_domains() -> List[str]:
with NDB() as ndb:
# ndb.interfaces[{"kind": "wireguard"}]] seems to trigger https://github.com/svinota/pyroute2/issues/737
iface_values = ndb.interfaces.values()
interfaces = [iface.get("ifname", "") for iface in iface_values if iface.get("kind", "") == "wireguard"]
result = [iface.removeprefix("wg-") for iface in interfaces if iface.startswith("wg-")]
return result
def find_stale_wireguard_clients(wg_interface: str) -> List[str]:
with WireGuard() as wg:
all_clients = []
infos = wg.info(wg_interface)
for info in infos:
clients = info.get_attr("WGDEVICE_A_PEERS")
if clients is not None:
all_clients.extend(clients)
three_minutes_ago = (datetime.now() - timedelta(minutes=3)).timestamp()
stale_clients = [
client.get_attr("WGPEER_A_PUBLIC_KEY").decode("utf-8")
for client in all_clients
# TODO add never connected peers to a list and remove them on next call
if 0 < (client.get_attr("WGPEER_A_LAST_HANDSHAKE_TIME") or {}).get("tv_sec", int()) < three_minutes_ago
]
return stale_clients
| en | 0.618362 | # TODO make loglevel configurable WireGuardClient describes complete configuration for a specific WireGuard client Attributes: public_key: WireGuard Public key domain: Domain Name of the WireGuard peer lladdr: IPv6 lladdr of the WireGuard peer wg_interface: Name of the WireGuard interface this peer will use vx_interface: Name of the VXLAN interface we set a route for the lladdr to remove: Are we removing this peer or not? # pyroute2 stuff # FIXME this list may be empty if the interface is not existing # ndb.interfaces[{"kind": "wireguard"}]] seems to trigger https://github.com/svinota/pyroute2/issues/737 # TODO add never connected peers to a list and remove them on next call | 2.233167 | 2 |
api/main.py | Ju99ernaut/super-fastapi | 0 | 9234 | import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from routes import items
import config
from constants import *
config.parse_args()
app = FastAPI(
title="API",
description="API boilerplate",
version="1.0.0",
openapi_tags=API_TAGS_METADATA,
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(items.router)
@app.get("/")
async def root():
return {
"docs": "api documentation at /docs or /redoc",
}
if __name__ == "__main__":
uvicorn.run("main:app", host=config.CONFIG.host, port=int(config.CONFIG.port))
| import uvicorn
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from routes import items
import config
from constants import *
config.parse_args()
app = FastAPI(
title="API",
description="API boilerplate",
version="1.0.0",
openapi_tags=API_TAGS_METADATA,
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(items.router)
@app.get("/")
async def root():
return {
"docs": "api documentation at /docs or /redoc",
}
if __name__ == "__main__":
uvicorn.run("main:app", host=config.CONFIG.host, port=int(config.CONFIG.port))
| none | 1 | 2.355385 | 2 |
|
gellifinsta/models.py | vallka/djellifique | 0 | 9235 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.html import mark_safe
# Create your models here.
class Gellifinsta(models.Model):
class Meta:
ordering = ['-taken_at_datetime']
shortcode = models.CharField(_("Shortcode"), max_length=20)
taken_at_datetime = models.DateTimeField(_("taken at"))
username = models.CharField(_("Username"), max_length=100)
is_active = models.BooleanField(_("Active"),default=True)
is_video = models.BooleanField(_("Video"),default=False)
file_path = models.CharField(_("File Path"), max_length=500)
url = models.CharField(_("URL"), max_length=500)
created_dt = models.DateTimeField(_("Created Date/Time"), auto_now_add=True, null=True)
updated_dt = models.DateTimeField(_("Updated Date/Time"), auto_now=True, null=True)
caption = models.TextField(_("Caption"), blank=True, null=True)
tags = models.TextField(_("Tags"), blank=True, null=True)
def __str__(self):
return self.shortcode + ':' + str(self.taken_at_datetime)
def image_tag(self):
return mark_safe('<img src="%s" width="250" />' % (self.url))
image_tag.short_description = 'Image'
def tags_spaced(self):
return self.tags.replace(',',' ')
tags_spaced.short_description = 'Tags'
class Products(models.Model):
class Meta:
ordering = ['name']
name = models.CharField(_("Name"), max_length=100, unique=True)
is_active = models.BooleanField(_("Active"),default=True)
def __str__(self):
return self.name
| from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.html import mark_safe
# Create your models here.
class Gellifinsta(models.Model):
class Meta:
ordering = ['-taken_at_datetime']
shortcode = models.CharField(_("Shortcode"), max_length=20)
taken_at_datetime = models.DateTimeField(_("taken at"))
username = models.CharField(_("Username"), max_length=100)
is_active = models.BooleanField(_("Active"),default=True)
is_video = models.BooleanField(_("Video"),default=False)
file_path = models.CharField(_("File Path"), max_length=500)
url = models.CharField(_("URL"), max_length=500)
created_dt = models.DateTimeField(_("Created Date/Time"), auto_now_add=True, null=True)
updated_dt = models.DateTimeField(_("Updated Date/Time"), auto_now=True, null=True)
caption = models.TextField(_("Caption"), blank=True, null=True)
tags = models.TextField(_("Tags"), blank=True, null=True)
def __str__(self):
return self.shortcode + ':' + str(self.taken_at_datetime)
def image_tag(self):
return mark_safe('<img src="%s" width="250" />' % (self.url))
image_tag.short_description = 'Image'
def tags_spaced(self):
return self.tags.replace(',',' ')
tags_spaced.short_description = 'Tags'
class Products(models.Model):
class Meta:
ordering = ['name']
name = models.CharField(_("Name"), max_length=100, unique=True)
is_active = models.BooleanField(_("Active"),default=True)
def __str__(self):
return self.name
| en | 0.963489 | # Create your models here. | 2.129928 | 2 |
scanBase/migrations/0003_ipsection.py | wsqy/sacn_server | 0 | 9236 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-01-16 13:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('scanBase', '0002_auto_20180116_1321'),
]
operations = [
migrations.CreateModel(
name='IPSection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip_section', models.CharField(blank=True, max_length=30, null=True, unique=True, verbose_name='ip段')),
('ip_start', models.GenericIPAddressField(blank=True, null=True, verbose_name='开始ip')),
('ip_end', models.GenericIPAddressField(blank=True, null=True, verbose_name='结束ip')),
('total', models.IntegerField(blank=True, null=True, verbose_name='总量')),
('deal_time', models.DateTimeField(blank=True, null=True, verbose_name='处理时间')),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scanBase.CountryInfo', verbose_name='所属国家')),
],
options={
'verbose_name_plural': 'ip段信息',
'verbose_name': 'ip段信息',
},
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-01-16 13:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('scanBase', '0002_auto_20180116_1321'),
]
operations = [
migrations.CreateModel(
name='IPSection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip_section', models.CharField(blank=True, max_length=30, null=True, unique=True, verbose_name='ip段')),
('ip_start', models.GenericIPAddressField(blank=True, null=True, verbose_name='开始ip')),
('ip_end', models.GenericIPAddressField(blank=True, null=True, verbose_name='结束ip')),
('total', models.IntegerField(blank=True, null=True, verbose_name='总量')),
('deal_time', models.DateTimeField(blank=True, null=True, verbose_name='处理时间')),
('country', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scanBase.CountryInfo', verbose_name='所属国家')),
],
options={
'verbose_name_plural': 'ip段信息',
'verbose_name': 'ip段信息',
},
),
]
| en | 0.749519 | # -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-01-16 13:35 | 1.635085 | 2 |
sts/train.py | LostCow/KLUE | 18 | 9237 | <filename>sts/train.py
import argparse
import numpy as np
import os
import torch
from transformers import AutoTokenizer, AutoConfig, Trainer, TrainingArguments
from model import RobertaForStsRegression
from dataset import KlueStsWithSentenceMaskDataset
from utils import read_json, seed_everything
from metric import compute_metrics
def main(args):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
config = AutoConfig.from_pretrained(args.model_name_or_path)
config.num_labels = args.num_labels
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
train_file_path = os.path.join(args.data_dir, args.train_filename)
valid_file_path = os.path.join(args.data_dir, args.valid_filename)
train_json = read_json(train_file_path)
valid_json = read_json(valid_file_path)
train_dataset = KlueStsWithSentenceMaskDataset(train_json, tokenizer, 510)
valid_dataset = KlueStsWithSentenceMaskDataset(train_json, tokenizer, 510)
model = RobertaForStsRegression.from_pretrained(
args.model_name_or_path, config=config
)
model.to(device)
training_args = TrainingArguments(
output_dir=args.model_dir,
save_total_limit=args.save_total_limit,
save_steps=args.save_steps,
num_train_epochs=args.num_train_epochs,
learning_rate=args.learning_rate,
per_device_train_batch_size=args.batch_size,
per_device_eval_batch_size=64,
gradient_accumulation_steps=args.gradient_accumulation_steps,
weight_decay=args.weight_decay,
logging_dir="./logs",
logging_steps=args.save_steps,
evaluation_strategy=args.evaluation_strategy,
metric_for_best_model="pearsonr",
fp16=True,
fp16_opt_level="O1",
eval_steps=args.save_steps,
load_best_model_at_end=True,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=valid_dataset,
compute_metrics=compute_metrics,
)
trainer.train()
model.save_pretrained(args.model_dir)
tokenizer.save_pretrained(args.model_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# data_arg
parser.add_argument("--data_dir", type=str, default="./data")
parser.add_argument("--model_dir", type=str, default="./model")
parser.add_argument("--output_dir", type=str, default="./output")
parser.add_argument("--model_name_or_path", type=str, default="klue/roberta-large")
parser.add_argument(
"--train_filename", type=str, default="klue-sts-v1.1_train.json"
)
parser.add_argument("--valid_filename", type=str, default="klue-sts-v1.1_dev.json")
# train_arg
parser.add_argument("--num_labels", type=int, default=1)
parser.add_argument("--seed", type=int, default=15)
parser.add_argument("--num_train_epochs", type=int, default=5)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--learning_rate", type=float, default=5e-5)
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
parser.add_argument("--weight_decay", type=float, default=0.01)
# eval_arg
parser.add_argument("--evaluation_strategy", type=str, default="steps")
parser.add_argument("--save_steps", type=int, default=250)
parser.add_argument("--eval_steps", type=int, default=250)
parser.add_argument("--save_total_limit", type=int, default=2)
args = parser.parse_args()
main(args)
| <filename>sts/train.py
import argparse
import numpy as np
import os
import torch
from transformers import AutoTokenizer, AutoConfig, Trainer, TrainingArguments
from model import RobertaForStsRegression
from dataset import KlueStsWithSentenceMaskDataset
from utils import read_json, seed_everything
from metric import compute_metrics
def main(args):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
config = AutoConfig.from_pretrained(args.model_name_or_path)
config.num_labels = args.num_labels
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
train_file_path = os.path.join(args.data_dir, args.train_filename)
valid_file_path = os.path.join(args.data_dir, args.valid_filename)
train_json = read_json(train_file_path)
valid_json = read_json(valid_file_path)
train_dataset = KlueStsWithSentenceMaskDataset(train_json, tokenizer, 510)
valid_dataset = KlueStsWithSentenceMaskDataset(train_json, tokenizer, 510)
model = RobertaForStsRegression.from_pretrained(
args.model_name_or_path, config=config
)
model.to(device)
training_args = TrainingArguments(
output_dir=args.model_dir,
save_total_limit=args.save_total_limit,
save_steps=args.save_steps,
num_train_epochs=args.num_train_epochs,
learning_rate=args.learning_rate,
per_device_train_batch_size=args.batch_size,
per_device_eval_batch_size=64,
gradient_accumulation_steps=args.gradient_accumulation_steps,
weight_decay=args.weight_decay,
logging_dir="./logs",
logging_steps=args.save_steps,
evaluation_strategy=args.evaluation_strategy,
metric_for_best_model="pearsonr",
fp16=True,
fp16_opt_level="O1",
eval_steps=args.save_steps,
load_best_model_at_end=True,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=valid_dataset,
compute_metrics=compute_metrics,
)
trainer.train()
model.save_pretrained(args.model_dir)
tokenizer.save_pretrained(args.model_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# data_arg
parser.add_argument("--data_dir", type=str, default="./data")
parser.add_argument("--model_dir", type=str, default="./model")
parser.add_argument("--output_dir", type=str, default="./output")
parser.add_argument("--model_name_or_path", type=str, default="klue/roberta-large")
parser.add_argument(
"--train_filename", type=str, default="klue-sts-v1.1_train.json"
)
parser.add_argument("--valid_filename", type=str, default="klue-sts-v1.1_dev.json")
# train_arg
parser.add_argument("--num_labels", type=int, default=1)
parser.add_argument("--seed", type=int, default=15)
parser.add_argument("--num_train_epochs", type=int, default=5)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--learning_rate", type=float, default=5e-5)
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
parser.add_argument("--weight_decay", type=float, default=0.01)
# eval_arg
parser.add_argument("--evaluation_strategy", type=str, default="steps")
parser.add_argument("--save_steps", type=int, default=250)
parser.add_argument("--eval_steps", type=int, default=250)
parser.add_argument("--save_total_limit", type=int, default=2)
args = parser.parse_args()
main(args)
| en | 0.241452 | # data_arg # train_arg # eval_arg | 2.140695 | 2 |
test/test_base_client.py | walkr/nanoservice | 28 | 9238 | import unittest
from nanoservice import Responder
from nanoservice import Requester
class BaseTestCase(unittest.TestCase):
def setUp(self):
addr = 'inproc://test'
self.client = Requester(addr)
self.service = Responder(addr)
self.service.register('divide', lambda x, y: x / y)
self.service.register('echo', lambda x: x)
def tearDown(self):
self.client.socket.close()
self.service.socket.close()
class TestClient(BaseTestCase):
def test_build_payload(self):
payload = self.client.build_payload('echo', 'My Name')
method, args, ref = payload
self.assertTrue(method == 'echo')
self.assertTrue(len(payload) == 3)
def test_encoder(self):
data = {'name': '<NAME>'}
encoded = self.client.encode(data)
decoded = self.client.decode(encoded)
self.assertEqual(data, decoded)
def test_call_wo_receive(self):
# Requester side ops
method, args = 'echo', 'hello world'
payload = self.client.build_payload(method, args)
self.client.socket.send(self.client.encode(payload))
# Responder side ops
method, args, ref = self.service.receive()
self.assertEqual(method, 'echo')
self.assertEqual(args, 'hello world')
self.assertEqual(ref, payload[2])
def test_basic_socket_operation(self):
msg = 'abc'
self.client.socket.send(msg)
res = self.service.socket.recv().decode('utf-8')
self.assertEqual(msg, res)
def test_timeout(self):
c = Requester('inproc://timeout', timeouts=(1, 1))
c.socket.send('hello')
self.assertRaises(Exception, c.socket.recv)
if __name__ == '__main__':
unittest.main()
| import unittest
from nanoservice import Responder
from nanoservice import Requester
class BaseTestCase(unittest.TestCase):
def setUp(self):
addr = 'inproc://test'
self.client = Requester(addr)
self.service = Responder(addr)
self.service.register('divide', lambda x, y: x / y)
self.service.register('echo', lambda x: x)
def tearDown(self):
self.client.socket.close()
self.service.socket.close()
class TestClient(BaseTestCase):
def test_build_payload(self):
payload = self.client.build_payload('echo', 'My Name')
method, args, ref = payload
self.assertTrue(method == 'echo')
self.assertTrue(len(payload) == 3)
def test_encoder(self):
data = {'name': '<NAME>'}
encoded = self.client.encode(data)
decoded = self.client.decode(encoded)
self.assertEqual(data, decoded)
def test_call_wo_receive(self):
# Requester side ops
method, args = 'echo', 'hello world'
payload = self.client.build_payload(method, args)
self.client.socket.send(self.client.encode(payload))
# Responder side ops
method, args, ref = self.service.receive()
self.assertEqual(method, 'echo')
self.assertEqual(args, 'hello world')
self.assertEqual(ref, payload[2])
def test_basic_socket_operation(self):
msg = 'abc'
self.client.socket.send(msg)
res = self.service.socket.recv().decode('utf-8')
self.assertEqual(msg, res)
def test_timeout(self):
c = Requester('inproc://timeout', timeouts=(1, 1))
c.socket.send('hello')
self.assertRaises(Exception, c.socket.recv)
if __name__ == '__main__':
unittest.main()
| en | 0.412248 | # Requester side ops # Responder side ops | 2.779106 | 3 |
airtech_api/flight/models.py | chidioguejiofor/airtech-api | 1 | 9239 | <reponame>chidioguejiofor/airtech-api<filename>airtech_api/flight/models.py<gh_stars>1-10
from airtech_api.utils.auditable_model import AuditableBaseModel
from django.db import models
# Create your models here.
class Flight(AuditableBaseModel):
class Meta:
db_table = 'Flight'
capacity = models.IntegerField(null=False)
location = models.TextField(null=False)
destination = models.TextField(null=False)
schedule = models.DateTimeField(null=False)
current_price = models.IntegerField()
type = models.CharField(
choices=(('local', 'local'), ('international', 'international')),
max_length=13,
)
| from airtech_api.utils.auditable_model import AuditableBaseModel
from django.db import models
# Create your models here.
class Flight(AuditableBaseModel):
class Meta:
db_table = 'Flight'
capacity = models.IntegerField(null=False)
location = models.TextField(null=False)
destination = models.TextField(null=False)
schedule = models.DateTimeField(null=False)
current_price = models.IntegerField()
type = models.CharField(
choices=(('local', 'local'), ('international', 'international')),
max_length=13,
) | en | 0.963489 | # Create your models here. | 2.464812 | 2 |
Sensor Fusion and Tracking/Kalman Filters/Gaussian/gaussian.py | kaka-lin/autonomous-driving-notes | 0 | 9240 | <reponame>kaka-lin/autonomous-driving-notes
import numpy as np
import matplotlib.pyplot as plt
def gaussian(x, mean, std):
std2 = np.power(std, 2)
return (1 / np.sqrt(2* np.pi * std2)) * np.exp(-.5 * (x - mean)**2 / std2)
if __name__ == "__main__":
gauss_1 = gaussian(10, 8, 2) # 0.12098536225957168
gauss_2 = gaussian(10, 10, 2) # 0.19947114020071635
print("Gauss(10, 8, 2): {}".format(gauss_1))
print("Gauss(10, 10, 2): {}".format(gauss_2))
# 標準高斯分佈
mean = 0
variance = 1
std = np.sqrt(variance)
# Plot between -10 and 10 with .001 steps.
x = np.arange(-5, 5, 0.001)
gauss = []
for i in x:
gauss.append(gaussian(i, mean, std))
gauss = np.array(gauss)
plt.plot(x, gauss)
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
def gaussian(x, mean, std):
std2 = np.power(std, 2)
return (1 / np.sqrt(2* np.pi * std2)) * np.exp(-.5 * (x - mean)**2 / std2)
if __name__ == "__main__":
gauss_1 = gaussian(10, 8, 2) # 0.12098536225957168
gauss_2 = gaussian(10, 10, 2) # 0.19947114020071635
print("Gauss(10, 8, 2): {}".format(gauss_1))
print("Gauss(10, 10, 2): {}".format(gauss_2))
# 標準高斯分佈
mean = 0
variance = 1
std = np.sqrt(variance)
# Plot between -10 and 10 with .001 steps.
x = np.arange(-5, 5, 0.001)
gauss = []
for i in x:
gauss.append(gaussian(i, mean, std))
gauss = np.array(gauss)
plt.plot(x, gauss)
plt.show() | en | 0.698393 | # 0.12098536225957168 # 0.19947114020071635 # 標準高斯分佈 # Plot between -10 and 10 with .001 steps. | 3.926603 | 4 |
part19/test_interpreter.py | fazillatheef/lsbasi | 1,682 | 9241 | import unittest
class LexerTestCase(unittest.TestCase):
def makeLexer(self, text):
from spi import Lexer
lexer = Lexer(text)
return lexer
def test_tokens(self):
from spi import TokenType
records = (
('234', TokenType.INTEGER_CONST, 234),
('3.14', TokenType.REAL_CONST, 3.14),
('*', TokenType.MUL, '*'),
('DIV', TokenType.INTEGER_DIV, 'DIV'),
('/', TokenType.FLOAT_DIV, '/'),
('+', TokenType.PLUS, '+'),
('-', TokenType.MINUS, '-'),
('(', TokenType.LPAREN, '('),
(')', TokenType.RPAREN, ')'),
(':=', TokenType.ASSIGN, ':='),
('.', TokenType.DOT, '.'),
('number', TokenType.ID, 'number'),
(';', TokenType.SEMI, ';'),
('BEGIN', TokenType.BEGIN, 'BEGIN'),
('END', TokenType.END, 'END'),
('PROCEDURE', TokenType.PROCEDURE, 'PROCEDURE'),
)
for text, tok_type, tok_val in records:
lexer = self.makeLexer(text)
token = lexer.get_next_token()
self.assertEqual(token.type, tok_type)
self.assertEqual(token.value, tok_val)
def test_lexer_exception(self):
from spi import LexerError
lexer = self.makeLexer('<')
with self.assertRaises(LexerError):
lexer.get_next_token()
class ParserTestCase(unittest.TestCase):
def makeParser(self, text):
from spi import Lexer, Parser
lexer = Lexer(text)
parser = Parser(lexer)
return parser
def test_expression_invalid_syntax_01(self):
from spi import ParserError, ErrorCode
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := 10 * ; {Invalid syntax}
END.
"""
)
with self.assertRaises(ParserError) as cm:
parser.parse()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN)
self.assertEqual(the_exception.token.value, ';')
self.assertEqual(the_exception.token.lineno, 6)
def test_expression_invalid_syntax_02(self):
from spi import ParserError, ErrorCode
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := 1 (1 + 2); {Invalid syntax}
END.
"""
)
with self.assertRaises(ParserError) as cm:
parser.parse()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN)
self.assertEqual(the_exception.token.value, '(')
self.assertEqual(the_exception.token.lineno, 6)
def test_maximum_one_VAR_block_is_allowed(self):
from spi import ParserError, ErrorCode
# zero VARs
parser = self.makeParser(
"""
PROGRAM Test;
BEGIN
END.
"""
)
parser.parse()
# one VAR
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
END.
"""
)
parser.parse()
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
VAR
b : INTEGER;
BEGIN
a := 5;
b := a + 10;
END.
"""
)
with self.assertRaises(ParserError) as cm:
parser.parse()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN)
self.assertEqual(the_exception.token.value, 'VAR')
self.assertEqual(the_exception.token.lineno, 5) # second VAR
class SemanticAnalyzerTestCase(unittest.TestCase):
def runSemanticAnalyzer(self, text):
from spi import Lexer, Parser, SemanticAnalyzer
lexer = Lexer(text)
parser = Parser(lexer)
tree = parser.parse()
semantic_analyzer = SemanticAnalyzer()
semantic_analyzer.visit(tree)
return semantic_analyzer
def test_semantic_duplicate_id_error(self):
from spi import SemanticError, ErrorCode
with self.assertRaises(SemanticError) as cm:
self.runSemanticAnalyzer(
"""
PROGRAM Test;
VAR
a : INTEGER;
a : REAL; {Duplicate identifier}
BEGIN
a := 5;
END.
"""
)
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.DUPLICATE_ID)
self.assertEqual(the_exception.token.value, 'a')
self.assertEqual(the_exception.token.lineno, 5)
def test_semantic_id_not_found_error(self):
from spi import SemanticError, ErrorCode
with self.assertRaises(SemanticError) as cm:
self.runSemanticAnalyzer(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := 5 + b;
END.
"""
)
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.ID_NOT_FOUND)
self.assertEqual(the_exception.token.value, 'b')
class TestCallStack:
def __init__(self):
self._records = []
def push(self, ar):
self._records.append(ar)
def pop(self):
# do nothing
pass
def peek(self):
return self._records[-1]
class InterpreterTestCase(unittest.TestCase):
def makeInterpreter(self, text):
from spi import Lexer, Parser, SemanticAnalyzer, Interpreter
lexer = Lexer(text)
parser = Parser(lexer)
tree = parser.parse()
semantic_analyzer = SemanticAnalyzer()
semantic_analyzer.visit(tree)
interpreter = Interpreter(tree)
interpreter.call_stack = TestCallStack()
return interpreter
def test_integer_arithmetic_expressions(self):
for expr, result in (
('3', 3),
('2 + 7 * 4', 30),
('7 - 8 DIV 4', 5),
('14 + 2 * 3 - 6 DIV 2', 17),
('7 + 3 * (10 DIV (12 DIV (3 + 1) - 1))', 22),
('7 + 3 * (10 DIV (12 DIV (3 + 1) - 1)) DIV (2 + 3) - 5 - 3 + (8)', 10),
('7 + (((3 + 2)))', 12),
('- 3', -3),
('+ 3', 3),
('5 - - - + - 3', 8),
('5 - - - + - (3 + 4) - +2', 10),
):
interpreter = self.makeInterpreter(
"""PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := %s
END.
""" % expr
)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(ar['a'], result)
def test_float_arithmetic_expressions(self):
for expr, result in (
('3.14', 3.14),
('2.14 + 7 * 4', 30.14),
('7.14 - 8 / 4', 5.14),
):
interpreter = self.makeInterpreter(
"""PROGRAM Test;
VAR
a : REAL;
BEGIN
a := %s
END.
""" % expr
)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(ar['a'], result)
def test_procedure_call(self):
text = """\
program Main;
procedure Alpha(a : integer; b : integer);
var x : integer;
begin
x := (a + b ) * 2;
end;
begin { Main }
Alpha(3 + 5, 7);
end. { Main }
"""
interpreter = self.makeInterpreter(text)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(ar['a'], 8)
self.assertEqual(ar['b'], 7)
self.assertEqual(ar['x'], 30)
self.assertEqual(ar.nesting_level, 2)
def test_program(self):
text = """\
PROGRAM Part12;
VAR
number : INTEGER;
a, b : INTEGER;
y : REAL;
PROCEDURE P1;
VAR
a : REAL;
k : INTEGER;
PROCEDURE P2;
VAR
a, z : INTEGER;
BEGIN {P2}
z := 777;
END; {P2}
BEGIN {P1}
END; {P1}
BEGIN {Part12}
number := 2;
a := number ;
b := 10 * a + 10 * number DIV 4;
y := 20 / 7 + 3.14
END. {Part12}
"""
interpreter = self.makeInterpreter(text)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(len(ar.members.keys()), 4)
self.assertEqual(ar['number'], 2)
self.assertEqual(ar['a'], 2)
self.assertEqual(ar['b'], 25)
self.assertAlmostEqual(ar['y'], float(20) / 7 + 3.14) # 5.9971...
if __name__ == '__main__':
unittest.main()
| import unittest
class LexerTestCase(unittest.TestCase):
def makeLexer(self, text):
from spi import Lexer
lexer = Lexer(text)
return lexer
def test_tokens(self):
from spi import TokenType
records = (
('234', TokenType.INTEGER_CONST, 234),
('3.14', TokenType.REAL_CONST, 3.14),
('*', TokenType.MUL, '*'),
('DIV', TokenType.INTEGER_DIV, 'DIV'),
('/', TokenType.FLOAT_DIV, '/'),
('+', TokenType.PLUS, '+'),
('-', TokenType.MINUS, '-'),
('(', TokenType.LPAREN, '('),
(')', TokenType.RPAREN, ')'),
(':=', TokenType.ASSIGN, ':='),
('.', TokenType.DOT, '.'),
('number', TokenType.ID, 'number'),
(';', TokenType.SEMI, ';'),
('BEGIN', TokenType.BEGIN, 'BEGIN'),
('END', TokenType.END, 'END'),
('PROCEDURE', TokenType.PROCEDURE, 'PROCEDURE'),
)
for text, tok_type, tok_val in records:
lexer = self.makeLexer(text)
token = lexer.get_next_token()
self.assertEqual(token.type, tok_type)
self.assertEqual(token.value, tok_val)
def test_lexer_exception(self):
from spi import LexerError
lexer = self.makeLexer('<')
with self.assertRaises(LexerError):
lexer.get_next_token()
class ParserTestCase(unittest.TestCase):
def makeParser(self, text):
from spi import Lexer, Parser
lexer = Lexer(text)
parser = Parser(lexer)
return parser
def test_expression_invalid_syntax_01(self):
from spi import ParserError, ErrorCode
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := 10 * ; {Invalid syntax}
END.
"""
)
with self.assertRaises(ParserError) as cm:
parser.parse()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN)
self.assertEqual(the_exception.token.value, ';')
self.assertEqual(the_exception.token.lineno, 6)
def test_expression_invalid_syntax_02(self):
from spi import ParserError, ErrorCode
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := 1 (1 + 2); {Invalid syntax}
END.
"""
)
with self.assertRaises(ParserError) as cm:
parser.parse()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN)
self.assertEqual(the_exception.token.value, '(')
self.assertEqual(the_exception.token.lineno, 6)
def test_maximum_one_VAR_block_is_allowed(self):
from spi import ParserError, ErrorCode
# zero VARs
parser = self.makeParser(
"""
PROGRAM Test;
BEGIN
END.
"""
)
parser.parse()
# one VAR
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
END.
"""
)
parser.parse()
parser = self.makeParser(
"""
PROGRAM Test;
VAR
a : INTEGER;
VAR
b : INTEGER;
BEGIN
a := 5;
b := a + 10;
END.
"""
)
with self.assertRaises(ParserError) as cm:
parser.parse()
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.UNEXPECTED_TOKEN)
self.assertEqual(the_exception.token.value, 'VAR')
self.assertEqual(the_exception.token.lineno, 5) # second VAR
class SemanticAnalyzerTestCase(unittest.TestCase):
def runSemanticAnalyzer(self, text):
from spi import Lexer, Parser, SemanticAnalyzer
lexer = Lexer(text)
parser = Parser(lexer)
tree = parser.parse()
semantic_analyzer = SemanticAnalyzer()
semantic_analyzer.visit(tree)
return semantic_analyzer
def test_semantic_duplicate_id_error(self):
from spi import SemanticError, ErrorCode
with self.assertRaises(SemanticError) as cm:
self.runSemanticAnalyzer(
"""
PROGRAM Test;
VAR
a : INTEGER;
a : REAL; {Duplicate identifier}
BEGIN
a := 5;
END.
"""
)
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.DUPLICATE_ID)
self.assertEqual(the_exception.token.value, 'a')
self.assertEqual(the_exception.token.lineno, 5)
def test_semantic_id_not_found_error(self):
from spi import SemanticError, ErrorCode
with self.assertRaises(SemanticError) as cm:
self.runSemanticAnalyzer(
"""
PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := 5 + b;
END.
"""
)
the_exception = cm.exception
self.assertEqual(the_exception.error_code, ErrorCode.ID_NOT_FOUND)
self.assertEqual(the_exception.token.value, 'b')
class TestCallStack:
def __init__(self):
self._records = []
def push(self, ar):
self._records.append(ar)
def pop(self):
# do nothing
pass
def peek(self):
return self._records[-1]
class InterpreterTestCase(unittest.TestCase):
def makeInterpreter(self, text):
from spi import Lexer, Parser, SemanticAnalyzer, Interpreter
lexer = Lexer(text)
parser = Parser(lexer)
tree = parser.parse()
semantic_analyzer = SemanticAnalyzer()
semantic_analyzer.visit(tree)
interpreter = Interpreter(tree)
interpreter.call_stack = TestCallStack()
return interpreter
def test_integer_arithmetic_expressions(self):
for expr, result in (
('3', 3),
('2 + 7 * 4', 30),
('7 - 8 DIV 4', 5),
('14 + 2 * 3 - 6 DIV 2', 17),
('7 + 3 * (10 DIV (12 DIV (3 + 1) - 1))', 22),
('7 + 3 * (10 DIV (12 DIV (3 + 1) - 1)) DIV (2 + 3) - 5 - 3 + (8)', 10),
('7 + (((3 + 2)))', 12),
('- 3', -3),
('+ 3', 3),
('5 - - - + - 3', 8),
('5 - - - + - (3 + 4) - +2', 10),
):
interpreter = self.makeInterpreter(
"""PROGRAM Test;
VAR
a : INTEGER;
BEGIN
a := %s
END.
""" % expr
)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(ar['a'], result)
def test_float_arithmetic_expressions(self):
for expr, result in (
('3.14', 3.14),
('2.14 + 7 * 4', 30.14),
('7.14 - 8 / 4', 5.14),
):
interpreter = self.makeInterpreter(
"""PROGRAM Test;
VAR
a : REAL;
BEGIN
a := %s
END.
""" % expr
)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(ar['a'], result)
def test_procedure_call(self):
text = """\
program Main;
procedure Alpha(a : integer; b : integer);
var x : integer;
begin
x := (a + b ) * 2;
end;
begin { Main }
Alpha(3 + 5, 7);
end. { Main }
"""
interpreter = self.makeInterpreter(text)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(ar['a'], 8)
self.assertEqual(ar['b'], 7)
self.assertEqual(ar['x'], 30)
self.assertEqual(ar.nesting_level, 2)
def test_program(self):
text = """\
PROGRAM Part12;
VAR
number : INTEGER;
a, b : INTEGER;
y : REAL;
PROCEDURE P1;
VAR
a : REAL;
k : INTEGER;
PROCEDURE P2;
VAR
a, z : INTEGER;
BEGIN {P2}
z := 777;
END; {P2}
BEGIN {P1}
END; {P1}
BEGIN {Part12}
number := 2;
a := number ;
b := 10 * a + 10 * number DIV 4;
y := 20 / 7 + 3.14
END. {Part12}
"""
interpreter = self.makeInterpreter(text)
interpreter.interpret()
ar = interpreter.call_stack.peek()
self.assertEqual(len(ar.members.keys()), 4)
self.assertEqual(ar['number'], 2)
self.assertEqual(ar['a'], 2)
self.assertEqual(ar['b'], 25)
self.assertAlmostEqual(ar['y'], float(20) / 7 + 3.14) # 5.9971...
if __name__ == '__main__':
unittest.main()
| en | 0.177074 | PROGRAM Test; VAR a : INTEGER; BEGIN a := 10 * ; {Invalid syntax} END. PROGRAM Test; VAR a : INTEGER; BEGIN a := 1 (1 + 2); {Invalid syntax} END. # zero VARs PROGRAM Test; BEGIN END. # one VAR PROGRAM Test; VAR a : INTEGER; BEGIN END. PROGRAM Test; VAR a : INTEGER; VAR b : INTEGER; BEGIN a := 5; b := a + 10; END. # second VAR PROGRAM Test; VAR a : INTEGER; a : REAL; {Duplicate identifier} BEGIN a := 5; END. PROGRAM Test; VAR a : INTEGER; BEGIN a := 5 + b; END. # do nothing PROGRAM Test; VAR a : INTEGER; BEGIN a := %s END. PROGRAM Test; VAR a : REAL; BEGIN a := %s END. \ program Main; procedure Alpha(a : integer; b : integer); var x : integer; begin x := (a + b ) * 2; end; begin { Main } Alpha(3 + 5, 7); end. { Main } \ PROGRAM Part12; VAR number : INTEGER; a, b : INTEGER; y : REAL; PROCEDURE P1; VAR a : REAL; k : INTEGER; PROCEDURE P2; VAR a, z : INTEGER; BEGIN {P2} z := 777; END; {P2} BEGIN {P1} END; {P1} BEGIN {Part12} number := 2; a := number ; b := 10 * a + 10 * number DIV 4; y := 20 / 7 + 3.14 END. {Part12} # 5.9971... | 2.794416 | 3 |
bot_components/configurator.py | Ferlern/Arctic-Tundra | 3 | 9242 | <reponame>Ferlern/Arctic-Tundra
import json
from typing import TypedDict
from .bot_emoji import AdditionalEmoji
class Warn(TypedDict):
text: str
mute_time: int
ban: bool
class PersonalVoice(TypedDict):
categoty: int
price: int
slot_price: int
bitrate_price: int
class System(TypedDict):
token: str
initial_extensions: list[str]
class ExperienceSystem(TypedDict):
experience_channel: int
cooldown: int
minimal_message_length: int
experience_per_message: list[int]
roles: dict[str, int]
coins_per_level_up: int
class AutoTranslation(TypedDict):
channels: list
lang: str
class Config(TypedDict):
guild: int
token: str
prefixes: list[str]
commands_channels: list[int]
mute_role: int
suggestions_channel: int
moderators_roles: list[int]
warns_system: list[Warn]
coin: str
daily: int
marry_price: int
personal_voice: PersonalVoice
experience_system: ExperienceSystem
auto_translation: AutoTranslation
additional_emoji: AdditionalEmoji
class Configurator:
def __init__(self) -> None:
self.system: System
self.config: Config
def dump(self):
with open("./bot_components/config.json", "w") as write_file:
to_dump = [self.system, self.config]
json.dump(to_dump, write_file, indent=4)
def load(self):
with open("./bot_components/config.json", "r") as write_file:
data = json.load(write_file)
self.system = System(data[0])
self.config = Config(data[1])
def reload(self):
self.dump()
self.load()
configurator = Configurator()
configurator.load()
| import json
from typing import TypedDict
from .bot_emoji import AdditionalEmoji
class Warn(TypedDict):
text: str
mute_time: int
ban: bool
class PersonalVoice(TypedDict):
categoty: int
price: int
slot_price: int
bitrate_price: int
class System(TypedDict):
token: str
initial_extensions: list[str]
class ExperienceSystem(TypedDict):
experience_channel: int
cooldown: int
minimal_message_length: int
experience_per_message: list[int]
roles: dict[str, int]
coins_per_level_up: int
class AutoTranslation(TypedDict):
channels: list
lang: str
class Config(TypedDict):
guild: int
token: str
prefixes: list[str]
commands_channels: list[int]
mute_role: int
suggestions_channel: int
moderators_roles: list[int]
warns_system: list[Warn]
coin: str
daily: int
marry_price: int
personal_voice: PersonalVoice
experience_system: ExperienceSystem
auto_translation: AutoTranslation
additional_emoji: AdditionalEmoji
class Configurator:
def __init__(self) -> None:
self.system: System
self.config: Config
def dump(self):
with open("./bot_components/config.json", "w") as write_file:
to_dump = [self.system, self.config]
json.dump(to_dump, write_file, indent=4)
def load(self):
with open("./bot_components/config.json", "r") as write_file:
data = json.load(write_file)
self.system = System(data[0])
self.config = Config(data[1])
def reload(self):
self.dump()
self.load()
configurator = Configurator()
configurator.load() | none | 1 | 2.456306 | 2 |
|
recnn/utils/plot.py | ihash5/reinforcement-learning | 1 | 9243 | <filename>recnn/utils/plot.py
from scipy.spatial import distance
from scipy import ndimage
import matplotlib.pyplot as plt
import torch
from scipy import stats
import numpy as np
def pairwise_distances_fig(embs):
embs = embs.detach().cpu().numpy()
similarity_matrix_cos = distance.cdist(embs, embs, 'cosine')
similarity_matrix_euc = distance.cdist(embs, embs, 'euclidean')
fig = plt.figure(figsize=(16,10))
ax = fig.add_subplot(121)
cax = ax.matshow(similarity_matrix_cos)
fig.colorbar(cax)
ax.set_title('Cosine')
ax.axis('off')
ax = fig.add_subplot(122)
cax = ax.matshow(similarity_matrix_euc)
fig.colorbar(cax)
ax.set_title('Euclidian')
ax.axis('off')
fig.suptitle('Action pairwise distances')
plt.close()
return fig
def pairwise_distances(embs):
fig = pairwise_distances_fig(embs)
fig.show()
def smooth(scalars, weight): # Weight between 0 and 1
last = scalars[0] # First value in the plot (first timestep)
smoothed = list()
for point in scalars:
smoothed_val = last * weight + (1 - weight) * point # Calculate smoothed value
smoothed.append(smoothed_val) # Save it
last = smoothed_val # Anchor the last smoothed value
return smoothed
def smooth_gauss(arr, var):
return ndimage.gaussian_filter1d(arr, var)
class Plotter:
def __init__(self, loss, style):
self.loss = loss
self.style = style
self.smoothing = lambda x: smooth_gauss(x, 4)
def set_smoothing_func(self, f):
self.smoothing = f
def plot_loss(self):
for row in self.style:
fig, axes = plt.subplots(1, len(row), figsize=(16, 6))
if len(row) == 1: axes = [axes]
for col in range(len(row)):
key = row[col]
axes[col].set_title(key)
axes[col].plot(self.loss['train']['step'],
self.smoothing(self.loss['train'][key]), 'b-',
label='train')
axes[col].plot(self.loss['test']['step'],
self.loss['test'][key], 'r-.',
label='test')
plt.legend()
plt.show()
def log_loss(self, key, item, test=False):
kind = 'train'
if test:
kind = 'test'
self.loss[kind][key].append(item)
def log_losses(self, losses, test=False):
for key, val in losses.items():
self.log_loss(key, val, test)
@staticmethod
def kde_reconstruction_error(ad, gen_actions, true_actions, device=torch.device('cpu')):
def rec_score(actions):
return ad.rec_error(torch.tensor(actions).to(device).float()).detach().cpu().numpy()
true_scores = rec_score(true_actions)
gen_scores = rec_score(gen_actions)
true_kernel = stats.gaussian_kde(true_scores)
gen_kernel = stats.gaussian_kde(gen_scores)
x = np.linspace(0, 1000, 100)
probs_true = true_kernel(x)
probs_gen = gen_kernel(x)
fig = plt.figure(figsize=(16, 10))
ax = fig.add_subplot(111)
ax.plot(x, probs_true, '-b', label='true dist')
ax.plot(x, probs_gen, '-r', label='generated dist')
ax.legend()
return fig
@staticmethod
def plot_kde_reconstruction_error(*args, **kwargs):
fig = Plotter.kde_reconstruction_error(*args, **kwargs)
fig.show()
| <filename>recnn/utils/plot.py
from scipy.spatial import distance
from scipy import ndimage
import matplotlib.pyplot as plt
import torch
from scipy import stats
import numpy as np
def pairwise_distances_fig(embs):
embs = embs.detach().cpu().numpy()
similarity_matrix_cos = distance.cdist(embs, embs, 'cosine')
similarity_matrix_euc = distance.cdist(embs, embs, 'euclidean')
fig = plt.figure(figsize=(16,10))
ax = fig.add_subplot(121)
cax = ax.matshow(similarity_matrix_cos)
fig.colorbar(cax)
ax.set_title('Cosine')
ax.axis('off')
ax = fig.add_subplot(122)
cax = ax.matshow(similarity_matrix_euc)
fig.colorbar(cax)
ax.set_title('Euclidian')
ax.axis('off')
fig.suptitle('Action pairwise distances')
plt.close()
return fig
def pairwise_distances(embs):
fig = pairwise_distances_fig(embs)
fig.show()
def smooth(scalars, weight): # Weight between 0 and 1
last = scalars[0] # First value in the plot (first timestep)
smoothed = list()
for point in scalars:
smoothed_val = last * weight + (1 - weight) * point # Calculate smoothed value
smoothed.append(smoothed_val) # Save it
last = smoothed_val # Anchor the last smoothed value
return smoothed
def smooth_gauss(arr, var):
return ndimage.gaussian_filter1d(arr, var)
class Plotter:
def __init__(self, loss, style):
self.loss = loss
self.style = style
self.smoothing = lambda x: smooth_gauss(x, 4)
def set_smoothing_func(self, f):
self.smoothing = f
def plot_loss(self):
for row in self.style:
fig, axes = plt.subplots(1, len(row), figsize=(16, 6))
if len(row) == 1: axes = [axes]
for col in range(len(row)):
key = row[col]
axes[col].set_title(key)
axes[col].plot(self.loss['train']['step'],
self.smoothing(self.loss['train'][key]), 'b-',
label='train')
axes[col].plot(self.loss['test']['step'],
self.loss['test'][key], 'r-.',
label='test')
plt.legend()
plt.show()
def log_loss(self, key, item, test=False):
kind = 'train'
if test:
kind = 'test'
self.loss[kind][key].append(item)
def log_losses(self, losses, test=False):
for key, val in losses.items():
self.log_loss(key, val, test)
@staticmethod
def kde_reconstruction_error(ad, gen_actions, true_actions, device=torch.device('cpu')):
def rec_score(actions):
return ad.rec_error(torch.tensor(actions).to(device).float()).detach().cpu().numpy()
true_scores = rec_score(true_actions)
gen_scores = rec_score(gen_actions)
true_kernel = stats.gaussian_kde(true_scores)
gen_kernel = stats.gaussian_kde(gen_scores)
x = np.linspace(0, 1000, 100)
probs_true = true_kernel(x)
probs_gen = gen_kernel(x)
fig = plt.figure(figsize=(16, 10))
ax = fig.add_subplot(111)
ax.plot(x, probs_true, '-b', label='true dist')
ax.plot(x, probs_gen, '-r', label='generated dist')
ax.legend()
return fig
@staticmethod
def plot_kde_reconstruction_error(*args, **kwargs):
fig = Plotter.kde_reconstruction_error(*args, **kwargs)
fig.show()
| en | 0.815321 | # Weight between 0 and 1 # First value in the plot (first timestep) # Calculate smoothed value # Save it # Anchor the last smoothed value | 2.36012 | 2 |
tests/integration/insights/v1/call/test_metric.py | pazzy-stack/twilio | 0 | 9244 | <gh_stars>0
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class MetricTestCase(IntegrationTestCase):
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.insights.v1.calls(sid="CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.metrics.list()
self.holodeck.assert_has_request(Request(
'get',
'https://insights.twilio.com/v1/Voice/CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Metrics',
))
def test_read_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?PageSize=50&Page=0",
"previous_page_url": null,
"next_page_url": null,
"key": "metrics",
"url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?PageSize=50&Page=0"
},
"metrics": [
{
"timestamp": "2019-10-07T22:32:06Z",
"call_sid": "CA7569efe0253644fa4a88aa97beca3310",
"account_sid": "<KEY>",
"edge": "sdk_edge",
"direction": "both",
"sdk_edge": {
"interval": {
"packets_received": 50,
"packets_lost": 0,
"audio_in": {
"value": 81.0
},
"audio_out": {
"value": 5237.0
},
"jitter": {
"value": 9
},
"mos": {
"value": 4.39
},
"rtt": {
"value": 81
}
},
"cumulative": {
"bytes_received": 547788,
"bytes_sent": 329425,
"packets_received": 3900,
"packets_lost": 0,
"packets_sent": 3934
}
},
"client_edge": null,
"carrier_edge": null,
"sip_edge": null,
"gateway": null,
"client": null
}
]
}
'''
))
actual = self.client.insights.v1.calls(sid="CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.metrics.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 10,
"page_size": 5,
"first_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=0",
"previous_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=9&PageToken=DP10",
"next_page_url": null,
"key": "metrics",
"url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=10"
},
"metrics": [
{
"timestamp": "2019-10-07T22:32:06Z",
"call_sid": "CA7569efe0253644fa4a88aa97beca3310",
"account_sid": "<KEY>",
"edge": "sdk_edge",
"direction": "both",
"sdk_edge": {
"interval": {
"packets_received": 50,
"packets_lost": 0,
"audio_in": {
"value": 81.0
},
"audio_out": {
"value": 5237.0
},
"jitter": {
"value": 9
},
"mos": {
"value": 4.39
},
"rtt": {
"value": 81
}
},
"cumulative": {
"bytes_received": 547788,
"bytes_sent": 329425,
"packets_received": 3900,
"packets_lost": 0,
"packets_sent": 3934
}
},
"client_edge": null,
"carrier_edge": null,
"sip_edge": null,
"gateway": null,
"client": null
}
]
}
'''
))
actual = self.client.insights.v1.calls(sid="CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.metrics.list()
self.assertIsNotNone(actual)
| # coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class MetricTestCase(IntegrationTestCase):
def test_list_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.insights.v1.calls(sid="CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.metrics.list()
self.holodeck.assert_has_request(Request(
'get',
'https://insights.twilio.com/v1/Voice/CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Metrics',
))
def test_read_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 0,
"page_size": 50,
"first_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?PageSize=50&Page=0",
"previous_page_url": null,
"next_page_url": null,
"key": "metrics",
"url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?PageSize=50&Page=0"
},
"metrics": [
{
"timestamp": "2019-10-07T22:32:06Z",
"call_sid": "CA7569efe0253644fa4a88aa97beca3310",
"account_sid": "<KEY>",
"edge": "sdk_edge",
"direction": "both",
"sdk_edge": {
"interval": {
"packets_received": 50,
"packets_lost": 0,
"audio_in": {
"value": 81.0
},
"audio_out": {
"value": 5237.0
},
"jitter": {
"value": 9
},
"mos": {
"value": 4.39
},
"rtt": {
"value": 81
}
},
"cumulative": {
"bytes_received": 547788,
"bytes_sent": 329425,
"packets_received": 3900,
"packets_lost": 0,
"packets_sent": 3934
}
},
"client_edge": null,
"carrier_edge": null,
"sip_edge": null,
"gateway": null,
"client": null
}
]
}
'''
))
actual = self.client.insights.v1.calls(sid="CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.metrics.list()
self.assertIsNotNone(actual)
def test_read_full_response(self):
self.holodeck.mock(Response(
200,
'''
{
"meta": {
"page": 10,
"page_size": 5,
"first_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=0",
"previous_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=9&PageToken=DP10",
"next_page_url": null,
"key": "metrics",
"url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=10"
},
"metrics": [
{
"timestamp": "2019-10-07T22:32:06Z",
"call_sid": "CA7569efe0253644fa4a88aa97beca3310",
"account_sid": "<KEY>",
"edge": "sdk_edge",
"direction": "both",
"sdk_edge": {
"interval": {
"packets_received": 50,
"packets_lost": 0,
"audio_in": {
"value": 81.0
},
"audio_out": {
"value": 5237.0
},
"jitter": {
"value": 9
},
"mos": {
"value": 4.39
},
"rtt": {
"value": 81
}
},
"cumulative": {
"bytes_received": 547788,
"bytes_sent": 329425,
"packets_received": 3900,
"packets_lost": 0,
"packets_sent": 3934
}
},
"client_edge": null,
"carrier_edge": null,
"sip_edge": null,
"gateway": null,
"client": null
}
]
}
'''
))
actual = self.client.insights.v1.calls(sid="CAXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.metrics.list()
self.assertIsNotNone(actual) | en | 0.571334 | # coding=utf-8 This code was generated by \ / _ _ _| _ _ | (_)\/(_)(_|\/| |(/_ v1.0.0 / / { "meta": { "page": 0, "page_size": 50, "first_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?PageSize=50&Page=0", "previous_page_url": null, "next_page_url": null, "key": "metrics", "url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?PageSize=50&Page=0" }, "metrics": [ { "timestamp": "2019-10-07T22:32:06Z", "call_sid": "CA7569efe0253644fa4a88aa97beca3310", "account_sid": "<KEY>", "edge": "sdk_edge", "direction": "both", "sdk_edge": { "interval": { "packets_received": 50, "packets_lost": 0, "audio_in": { "value": 81.0 }, "audio_out": { "value": 5237.0 }, "jitter": { "value": 9 }, "mos": { "value": 4.39 }, "rtt": { "value": 81 } }, "cumulative": { "bytes_received": 547788, "bytes_sent": 329425, "packets_received": 3900, "packets_lost": 0, "packets_sent": 3934 } }, "client_edge": null, "carrier_edge": null, "sip_edge": null, "gateway": null, "client": null } ] } { "meta": { "page": 10, "page_size": 5, "first_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=0", "previous_page_url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=9&PageToken=DP10", "next_page_url": null, "key": "metrics", "url": "https://insights.twilio.com/v1/Voice/CAaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/Metrics?Direction=both&Edge=sdk_edge&PageSize=5&Page=10" }, "metrics": [ { "timestamp": "2019-10-07T22:32:06Z", "call_sid": "CA7569efe0253644fa4a88aa97beca3310", "account_sid": "<KEY>", "edge": "sdk_edge", "direction": "both", "sdk_edge": { "interval": { "packets_received": 50, "packets_lost": 0, "audio_in": { "value": 81.0 }, "audio_out": { "value": 5237.0 }, "jitter": { "value": 9 }, "mos": { "value": 4.39 }, "rtt": { "value": 81 } }, "cumulative": { "bytes_received": 547788, "bytes_sent": 329425, "packets_received": 3900, "packets_lost": 0, "packets_sent": 3934 } }, "client_edge": null, "carrier_edge": null, "sip_edge": null, "gateway": null, "client": null } ] } | 2.276583 | 2 |
2017-2018/lecture-notes/python/02-algorithms_listing_8_contains_word.py | essepuntato/comp-think | 19 | 9245 | def contains_word(first_word, second_word, bibliographic_entry):
contains_first_word = first_word in bibliographic_entry
contains_second_word = second_word in bibliographic_entry
if contains_first_word and contains_second_word:
return 2
elif contains_first_word or contains_second_word:
return 1
else:
return 0
if __name__ == "__main__":
bibliographic_entry = "<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., " \
"<NAME>. (2017). Research Articles in Simplified HTML: a Web-first format for " \
"HTML-based scholarly articles. PeerJ Computer Science 3: e132. e2513. " \
"DOI: https://doi.org/10.7717/peerj-cs.132"
print(contains_word("Peroni", "Osborne", bibliographic_entry))
print(contains_word("Peroni", "Asprino", bibliographic_entry))
print(contains_word("Reforgiato", "Osborne", bibliographic_entry))
print(contains_word("Reforgiato", "Asprino", bibliographic_entry))
| def contains_word(first_word, second_word, bibliographic_entry):
contains_first_word = first_word in bibliographic_entry
contains_second_word = second_word in bibliographic_entry
if contains_first_word and contains_second_word:
return 2
elif contains_first_word or contains_second_word:
return 1
else:
return 0
if __name__ == "__main__":
bibliographic_entry = "<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., " \
"<NAME>. (2017). Research Articles in Simplified HTML: a Web-first format for " \
"HTML-based scholarly articles. PeerJ Computer Science 3: e132. e2513. " \
"DOI: https://doi.org/10.7717/peerj-cs.132"
print(contains_word("Peroni", "Osborne", bibliographic_entry))
print(contains_word("Peroni", "Asprino", bibliographic_entry))
print(contains_word("Reforgiato", "Osborne", bibliographic_entry))
print(contains_word("Reforgiato", "Asprino", bibliographic_entry))
| none | 1 | 4.123122 | 4 |
|
backend/user/scripter.py | ivaivalous/ivodb | 0 | 9246 | <filename>backend/user/scripter.py<gh_stars>0
#!/usr/bin/env python
import responses
from selenium import webdriver
# This file contains/references the default JS
# used to provide functions dealing with input/output
SCRIPT_RUNNER = "runner.html"
ENCODING = 'utf-8'
PAGE_LOAD_TIMEOUT = 5
PAGE_LOAD_TIMEOUT_MS = PAGE_LOAD_TIMEOUT * 1000
capabilities = webdriver.DesiredCapabilities.PHANTOMJS
capabilities["phantomjs.page.settings.resourceTimeout"] = PAGE_LOAD_TIMEOUT_MS
capabilities["phantomjs.page.settings.loadImages"] = False
SCRIPT_TEMPLATE = """
window.requestData = {{method:"{0}", headers:{1}, data:"{2}", params:{3}}};
window.method = requestData.method;
window.headers = requestData.headers;
window.data = requestData.data;
window.params = requestData.params;
window.logs = [];
window.log = function(message) {{
window.logs.push({{
"time": (new Date).getTime(),
"message": message
}})
}};
"""
GET_LOGS_SCRIPT = 'return window.logs;'
class Scripter:
def __init__(self):
self.driver = webdriver.PhantomJS(desired_capabilities=capabilities)
self.driver.implicitly_wait(PAGE_LOAD_TIMEOUT)
self.driver.set_page_load_timeout(PAGE_LOAD_TIMEOUT)
def run(self, request, script_body, input_params):
self.driver.get(SCRIPT_RUNNER)
self.driver.execute_script(
Scripter.build_runner_script(request, input_params))
try:
response = self.execute_user_script(script_body)
logs = self.driver.execute_script(GET_LOGS_SCRIPT)
return response.encode(ENCODING), logs
except:
return responses.get_invalid_request(), []
def execute_user_script(self, script_body):
"""Execute a user-contributed script."""
return self.driver.execute_script(script_body)
@staticmethod
def build_runner_script(request, input_params):
# Build JS related to having access to input
# and request data.
return SCRIPT_TEMPLATE.format(
request.method,
Scripter.build_headers_map(request.headers),
request.get_data().encode(ENCODING),
Scripter.build_params_map(input_params.encode(ENCODING)))
@staticmethod
def build_params_map(input_params):
# input_params looks like "test=aaa&test2=jjj"
couples = input_params.split("&")
params_map = {}
for couple in couples:
c = couple.split("=")
key = c[0]
value = c[1] if len(c) > 1 else ""
params_map[key] = value
return params_map
@staticmethod
def build_headers_map(headers):
headers_map = {}
for key, value in headers:
if 'jwt=' in value:
continue
headers_map[key] = value.encode(ENCODING)
return headers_map
| <filename>backend/user/scripter.py<gh_stars>0
#!/usr/bin/env python
import responses
from selenium import webdriver
# This file contains/references the default JS
# used to provide functions dealing with input/output
SCRIPT_RUNNER = "runner.html"
ENCODING = 'utf-8'
PAGE_LOAD_TIMEOUT = 5
PAGE_LOAD_TIMEOUT_MS = PAGE_LOAD_TIMEOUT * 1000
capabilities = webdriver.DesiredCapabilities.PHANTOMJS
capabilities["phantomjs.page.settings.resourceTimeout"] = PAGE_LOAD_TIMEOUT_MS
capabilities["phantomjs.page.settings.loadImages"] = False
SCRIPT_TEMPLATE = """
window.requestData = {{method:"{0}", headers:{1}, data:"{2}", params:{3}}};
window.method = requestData.method;
window.headers = requestData.headers;
window.data = requestData.data;
window.params = requestData.params;
window.logs = [];
window.log = function(message) {{
window.logs.push({{
"time": (new Date).getTime(),
"message": message
}})
}};
"""
GET_LOGS_SCRIPT = 'return window.logs;'
class Scripter:
def __init__(self):
self.driver = webdriver.PhantomJS(desired_capabilities=capabilities)
self.driver.implicitly_wait(PAGE_LOAD_TIMEOUT)
self.driver.set_page_load_timeout(PAGE_LOAD_TIMEOUT)
def run(self, request, script_body, input_params):
self.driver.get(SCRIPT_RUNNER)
self.driver.execute_script(
Scripter.build_runner_script(request, input_params))
try:
response = self.execute_user_script(script_body)
logs = self.driver.execute_script(GET_LOGS_SCRIPT)
return response.encode(ENCODING), logs
except:
return responses.get_invalid_request(), []
def execute_user_script(self, script_body):
"""Execute a user-contributed script."""
return self.driver.execute_script(script_body)
@staticmethod
def build_runner_script(request, input_params):
# Build JS related to having access to input
# and request data.
return SCRIPT_TEMPLATE.format(
request.method,
Scripter.build_headers_map(request.headers),
request.get_data().encode(ENCODING),
Scripter.build_params_map(input_params.encode(ENCODING)))
@staticmethod
def build_params_map(input_params):
# input_params looks like "test=aaa&test2=jjj"
couples = input_params.split("&")
params_map = {}
for couple in couples:
c = couple.split("=")
key = c[0]
value = c[1] if len(c) > 1 else ""
params_map[key] = value
return params_map
@staticmethod
def build_headers_map(headers):
headers_map = {}
for key, value in headers:
if 'jwt=' in value:
continue
headers_map[key] = value.encode(ENCODING)
return headers_map
| en | 0.437611 | #!/usr/bin/env python # This file contains/references the default JS # used to provide functions dealing with input/output window.requestData = {{method:"{0}", headers:{1}, data:"{2}", params:{3}}}; window.method = requestData.method; window.headers = requestData.headers; window.data = requestData.data; window.params = requestData.params; window.logs = []; window.log = function(message) {{ window.logs.push({{ "time": (new Date).getTime(), "message": message }}) }}; Execute a user-contributed script. # Build JS related to having access to input # and request data. # input_params looks like "test=aaa&test2=jjj" | 2.673718 | 3 |
bwtougu/api/names.py | luhouxiang/byrobot | 0 | 9247 | <reponame>luhouxiang/byrobot
#!/usr/bin/env python
# -*- coding: utf-8 -*-
VALID_HISTORY_FIELDS = [
'datetime', 'open', 'close', 'high', 'low', 'total_turnover', 'volume',
'acc_net_value', 'discount_rate', 'unit_net_value',
'limit_up', 'limit_down', 'open_interest', 'basis_spread', 'settlement', 'prev_settlement'
]
VALID_GET_PRICE_FIELDS = [
'OpeningPx', 'ClosingPx', 'HighPx', 'LowPx', 'TotalTurnover', 'TotalVolumeTraded',
'AccNetValue', 'UnitNetValue', 'DiscountRate',
'SettlPx', 'PrevSettlPx', 'OpenInterest', 'BasisSpread', 'HighLimitPx', 'LowLimitPx'
]
VALID_TENORS = [
'0S', '1M', '2M', '3M', '6M', '9M', '1Y', '2Y', '3Y', '4Y',
'5Y', '6Y', '7Y', '8Y', '9Y', '10Y', '15Y', '20Y', '30Y',
'40Y', '50Y'
]
VALID_INSTRUMENT_TYPES = [
'CS', 'Future', 'INDX', 'ETF', 'LOF', 'SF', 'FenjiA', 'FenjiB', 'FenjiMu',
'Stock', 'Fund', 'Index'
]
VALID_XUEQIU_FIELDS = [
'new_comments', 'total_comments',
'new_followers', 'total_followers',
'sell_actions', 'buy_actions',
]
VALID_MARGIN_FIELDS = [
'margin_balance',
'buy_on_margin_value',
'short_sell_quantity',
'margin_repayment',
'short_balance_quantity',
'short_repayment_quantity',
'short_balance',
'total_balance'
]
VALID_SHARE_FIELDS = [
'total', 'circulation_a', 'management_circulation', 'non_circulation_a', 'total_a'
]
VALID_TURNOVER_FIELDS = (
'today',
'week',
'month',
'three_month',
'six_month',
'year',
'current_year',
'total',
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
VALID_HISTORY_FIELDS = [
'datetime', 'open', 'close', 'high', 'low', 'total_turnover', 'volume',
'acc_net_value', 'discount_rate', 'unit_net_value',
'limit_up', 'limit_down', 'open_interest', 'basis_spread', 'settlement', 'prev_settlement'
]
VALID_GET_PRICE_FIELDS = [
'OpeningPx', 'ClosingPx', 'HighPx', 'LowPx', 'TotalTurnover', 'TotalVolumeTraded',
'AccNetValue', 'UnitNetValue', 'DiscountRate',
'SettlPx', 'PrevSettlPx', 'OpenInterest', 'BasisSpread', 'HighLimitPx', 'LowLimitPx'
]
VALID_TENORS = [
'0S', '1M', '2M', '3M', '6M', '9M', '1Y', '2Y', '3Y', '4Y',
'5Y', '6Y', '7Y', '8Y', '9Y', '10Y', '15Y', '20Y', '30Y',
'40Y', '50Y'
]
VALID_INSTRUMENT_TYPES = [
'CS', 'Future', 'INDX', 'ETF', 'LOF', 'SF', 'FenjiA', 'FenjiB', 'FenjiMu',
'Stock', 'Fund', 'Index'
]
VALID_XUEQIU_FIELDS = [
'new_comments', 'total_comments',
'new_followers', 'total_followers',
'sell_actions', 'buy_actions',
]
VALID_MARGIN_FIELDS = [
'margin_balance',
'buy_on_margin_value',
'short_sell_quantity',
'margin_repayment',
'short_balance_quantity',
'short_repayment_quantity',
'short_balance',
'total_balance'
]
VALID_SHARE_FIELDS = [
'total', 'circulation_a', 'management_circulation', 'non_circulation_a', 'total_a'
]
VALID_TURNOVER_FIELDS = (
'today',
'week',
'month',
'three_month',
'six_month',
'year',
'current_year',
'total',
) | en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 1.291581 | 1 |
src/PeerRead/data_cleaning/process_PeerRead_abstracts.py | dveni/causal-text-embeddings | 114 | 9248 | <filename>src/PeerRead/data_cleaning/process_PeerRead_abstracts.py
"""
Simple pre-processing for PeerRead papers.
Takes in JSON formatted data from ScienceParse and outputs a tfrecord
Reference example:
https://github.com/tensorlayer/tensorlayer/blob/9528da50dfcaf9f0f81fba9453e488a1e6c8ee8f/examples/data_process/tutorial_tfrecord3.py
"""
import argparse
import glob
import os
import random
import io
import json
from dateutil.parser import parse as parse_date
import tensorflow as tf
import bert.tokenization as tokenization
from PeerRead.ScienceParse.Paper import Paper
from PeerRead.ScienceParse.ScienceParseReader import ScienceParseReader
from PeerRead.data_cleaning.PeerRead_hand_features import get_PeerRead_hand_features
rng = random.Random(0)
def process_json_paper(paper_json_filename, scienceparse_dir, tokenizer):
paper = Paper.from_json(paper_json_filename)
paper.SCIENCEPARSE = ScienceParseReader.read_science_parse(paper.ID, paper.TITLE, paper.ABSTRACT,
scienceparse_dir)
# tokenize PeerRead features
try:
title_tokens = tokenizer.tokenize(paper.TITLE)
except ValueError: # missing titles are quite common sciparse
print("Missing title for " + paper_json_filename)
title_tokens = None
abstract_tokens = tokenizer.tokenize(paper.ABSTRACT)
text_features = {'title': title_tokens,
'abstract': abstract_tokens}
context_features = {'authors': paper.AUTHORS,
'accepted': paper.ACCEPTED,
'name': paper.ID}
# add hand crafted features from PeerRead
pr_hand_features = get_PeerRead_hand_features(paper)
context_features.update(pr_hand_features)
return text_features, context_features
def bert_process_sentence(example_tokens, max_seq_length, tokenizer):
"""
Tokenization and pre-processing of text as expected by Bert
Parameters
----------
example_tokens
max_seq_length
tokenizer
Returns
-------
"""
# Account for [CLS] and [SEP] with "- 2"
if len(example_tokens) > max_seq_length - 2:
example_tokens = example_tokens[0:(max_seq_length - 2)]
# The convention in BERT for single sequences is:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. (vv: Not relevant for us)
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
# vv: segment_ids seem to be the same as type_ids
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in example_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return input_ids, input_mask, segment_ids
def paper_to_bert_Example(text_features, context_features, max_seq_length, tokenizer):
"""
Parses the input paper into a tf.Example as expected by Bert
Note: the docs for tensorflow Example are awful ¯\_(ツ)_/¯
"""
abstract_features = {}
abstract_tokens, abstract_padding_mask, _ = \
bert_process_sentence(text_features['abstract'], max_seq_length, tokenizer)
abstract_features["token_ids"] = _int64_feature(abstract_tokens)
abstract_features["token_mask"] = _int64_feature(abstract_padding_mask)
# abstract_features["segment_ids"] = create_int_feature(feature.segment_ids) TODO: ommission may cause bugs
# abstract_features["label_ids"] = _int64_feature([feature.label_id])
# non-sequential features
tf_context_features, tf_context_features_types = _dict_of_nonlist_numerical_to_tf_features(context_features)
features = {**tf_context_features, **abstract_features}
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
return tf_example
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto,
e.g, An integer label.
"""
if isinstance(value, list):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
else:
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
"""Wrapper for inserting a float Feature into a SequenceExample proto,
e.g, An integer label.
"""
if isinstance(value, list):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
else:
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto,
e.g, an image in byte
"""
# return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _dict_of_nonlist_numerical_to_tf_features(my_dict):
"""
Strip out non-numerical features
Returns tf_features_dict: a dictionary suitable for passing to tf.train.example
tf_types_dict: a dictionary of the tf types of previous dict
"""
tf_types_dict = {}
tf_features_dict = {}
for k, v in my_dict.items():
if isinstance(v, int) or isinstance(v, bool):
tf_features_dict[k] = _int64_feature(v)
tf_types_dict[k] = tf.int64
elif isinstance(v, float):
tf_features_dict[k] = _float_feature(v)
tf_types_dict[k] = tf.float32
else:
pass
return tf_features_dict, tf_types_dict
venues = {'acl': 1,
'conll': 2,
'iclr': 3,
'nips': 4,
'icml': 5,
'emnlp': 6,
'aaai': 7,
'hlt-naacl': 8,
'arxiv': 0}
def _venues(venue_name):
if venue_name.lower() in venues:
return venues[venue_name.lower()]
else:
return -1
def _arxiv_subject(subjects):
subject = subjects[0]
if 'lg' in subject.lower():
return 0
elif 'cl' in subject.lower():
return 1
elif 'ai' in subject.lower():
return 2
else:
raise Exception("arxiv subject not recognized")
def clean_PeerRead_dataset(review_json_dir, parsedpdf_json_dir,
venue, year,
out_dir, out_file,
max_abs_len, tokenizer,
default_accept=1,
is_arxiv = False):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
print('Reading reviews from...', review_json_dir)
paper_json_filenames = sorted(glob.glob('{}/*.json'.format(review_json_dir)))
with tf.python_io.TFRecordWriter(out_dir + "/" + out_file) as writer:
for idx, paper_json_filename in enumerate(paper_json_filenames):
text_features, context_features = process_json_paper(paper_json_filename, parsedpdf_json_dir, tokenizer)
if context_features['accepted'] is None: # missing for conferences other than ICLR (we only see accepts)
context_features['accepted'] = default_accept
many_split = rng.randint(0, 100) # useful for easy data splitting later
# other context features
arxiv = -1
if is_arxiv:
with io.open(paper_json_filename) as json_file:
loaded = json.load(json_file)
year = parse_date(loaded['DATE_OF_SUBMISSION']).year
venue = _venues(loaded['conference'])
arxiv = _arxiv_subject([loaded['SUBJECTS']])
extra_context = {'id': idx, 'venue': venue, 'year': year, 'many_split': many_split,
'arxiv': arxiv}
context_features.update(extra_context)
# turn it into a tf.data example
paper_ex = paper_to_bert_Example(text_features, context_features,
max_seq_length=max_abs_len, tokenizer=tokenizer)
writer.write(paper_ex.SerializeToString())
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--review-json-dir', type=str, default='../dat/PeerRead/arxiv.all/all/reviews')
parser.add_argument('--parsedpdf-json-dir', type=str, default='../dat/PeerRead/arxiv.all/all/parsed_pdfs')
parser.add_argument('--out-dir', type=str, default='../dat/PeerRead/proc')
parser.add_argument('--out-file', type=str, default='arxiv-all.tf_record')
parser.add_argument('--vocab-file', type=str, default='../../bert/pre-trained/uncased_L-12_H-768_A-12/vocab.txt')
parser.add_argument('--max-abs-len', type=int, default=250)
parser.add_argument('--venue', type=int, default=0)
parser.add_argument('--year', type=int, default=2017)
args = parser.parse_args()
tokenizer = tokenization.FullTokenizer(
vocab_file=args.vocab_file, do_lower_case=True)
clean_PeerRead_dataset(args.review_json_dir, args.parsedpdf_json_dir,
args.venue, args.year,
args.out_dir, args.out_file,
args.max_abs_len, tokenizer, is_arxiv=True)
if __name__ == "__main__":
main()
| <filename>src/PeerRead/data_cleaning/process_PeerRead_abstracts.py
"""
Simple pre-processing for PeerRead papers.
Takes in JSON formatted data from ScienceParse and outputs a tfrecord
Reference example:
https://github.com/tensorlayer/tensorlayer/blob/9528da50dfcaf9f0f81fba9453e488a1e6c8ee8f/examples/data_process/tutorial_tfrecord3.py
"""
import argparse
import glob
import os
import random
import io
import json
from dateutil.parser import parse as parse_date
import tensorflow as tf
import bert.tokenization as tokenization
from PeerRead.ScienceParse.Paper import Paper
from PeerRead.ScienceParse.ScienceParseReader import ScienceParseReader
from PeerRead.data_cleaning.PeerRead_hand_features import get_PeerRead_hand_features
rng = random.Random(0)
def process_json_paper(paper_json_filename, scienceparse_dir, tokenizer):
paper = Paper.from_json(paper_json_filename)
paper.SCIENCEPARSE = ScienceParseReader.read_science_parse(paper.ID, paper.TITLE, paper.ABSTRACT,
scienceparse_dir)
# tokenize PeerRead features
try:
title_tokens = tokenizer.tokenize(paper.TITLE)
except ValueError: # missing titles are quite common sciparse
print("Missing title for " + paper_json_filename)
title_tokens = None
abstract_tokens = tokenizer.tokenize(paper.ABSTRACT)
text_features = {'title': title_tokens,
'abstract': abstract_tokens}
context_features = {'authors': paper.AUTHORS,
'accepted': paper.ACCEPTED,
'name': paper.ID}
# add hand crafted features from PeerRead
pr_hand_features = get_PeerRead_hand_features(paper)
context_features.update(pr_hand_features)
return text_features, context_features
def bert_process_sentence(example_tokens, max_seq_length, tokenizer):
"""
Tokenization and pre-processing of text as expected by Bert
Parameters
----------
example_tokens
max_seq_length
tokenizer
Returns
-------
"""
# Account for [CLS] and [SEP] with "- 2"
if len(example_tokens) > max_seq_length - 2:
example_tokens = example_tokens[0:(max_seq_length - 2)]
# The convention in BERT for single sequences is:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. (vv: Not relevant for us)
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
# vv: segment_ids seem to be the same as type_ids
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in example_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return input_ids, input_mask, segment_ids
def paper_to_bert_Example(text_features, context_features, max_seq_length, tokenizer):
"""
Parses the input paper into a tf.Example as expected by Bert
Note: the docs for tensorflow Example are awful ¯\_(ツ)_/¯
"""
abstract_features = {}
abstract_tokens, abstract_padding_mask, _ = \
bert_process_sentence(text_features['abstract'], max_seq_length, tokenizer)
abstract_features["token_ids"] = _int64_feature(abstract_tokens)
abstract_features["token_mask"] = _int64_feature(abstract_padding_mask)
# abstract_features["segment_ids"] = create_int_feature(feature.segment_ids) TODO: ommission may cause bugs
# abstract_features["label_ids"] = _int64_feature([feature.label_id])
# non-sequential features
tf_context_features, tf_context_features_types = _dict_of_nonlist_numerical_to_tf_features(context_features)
features = {**tf_context_features, **abstract_features}
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
return tf_example
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto,
e.g, An integer label.
"""
if isinstance(value, list):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
else:
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
"""Wrapper for inserting a float Feature into a SequenceExample proto,
e.g, An integer label.
"""
if isinstance(value, list):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
else:
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto,
e.g, an image in byte
"""
# return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _dict_of_nonlist_numerical_to_tf_features(my_dict):
"""
Strip out non-numerical features
Returns tf_features_dict: a dictionary suitable for passing to tf.train.example
tf_types_dict: a dictionary of the tf types of previous dict
"""
tf_types_dict = {}
tf_features_dict = {}
for k, v in my_dict.items():
if isinstance(v, int) or isinstance(v, bool):
tf_features_dict[k] = _int64_feature(v)
tf_types_dict[k] = tf.int64
elif isinstance(v, float):
tf_features_dict[k] = _float_feature(v)
tf_types_dict[k] = tf.float32
else:
pass
return tf_features_dict, tf_types_dict
venues = {'acl': 1,
'conll': 2,
'iclr': 3,
'nips': 4,
'icml': 5,
'emnlp': 6,
'aaai': 7,
'hlt-naacl': 8,
'arxiv': 0}
def _venues(venue_name):
if venue_name.lower() in venues:
return venues[venue_name.lower()]
else:
return -1
def _arxiv_subject(subjects):
subject = subjects[0]
if 'lg' in subject.lower():
return 0
elif 'cl' in subject.lower():
return 1
elif 'ai' in subject.lower():
return 2
else:
raise Exception("arxiv subject not recognized")
def clean_PeerRead_dataset(review_json_dir, parsedpdf_json_dir,
venue, year,
out_dir, out_file,
max_abs_len, tokenizer,
default_accept=1,
is_arxiv = False):
if not os.path.exists(out_dir):
os.makedirs(out_dir)
print('Reading reviews from...', review_json_dir)
paper_json_filenames = sorted(glob.glob('{}/*.json'.format(review_json_dir)))
with tf.python_io.TFRecordWriter(out_dir + "/" + out_file) as writer:
for idx, paper_json_filename in enumerate(paper_json_filenames):
text_features, context_features = process_json_paper(paper_json_filename, parsedpdf_json_dir, tokenizer)
if context_features['accepted'] is None: # missing for conferences other than ICLR (we only see accepts)
context_features['accepted'] = default_accept
many_split = rng.randint(0, 100) # useful for easy data splitting later
# other context features
arxiv = -1
if is_arxiv:
with io.open(paper_json_filename) as json_file:
loaded = json.load(json_file)
year = parse_date(loaded['DATE_OF_SUBMISSION']).year
venue = _venues(loaded['conference'])
arxiv = _arxiv_subject([loaded['SUBJECTS']])
extra_context = {'id': idx, 'venue': venue, 'year': year, 'many_split': many_split,
'arxiv': arxiv}
context_features.update(extra_context)
# turn it into a tf.data example
paper_ex = paper_to_bert_Example(text_features, context_features,
max_seq_length=max_abs_len, tokenizer=tokenizer)
writer.write(paper_ex.SerializeToString())
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--review-json-dir', type=str, default='../dat/PeerRead/arxiv.all/all/reviews')
parser.add_argument('--parsedpdf-json-dir', type=str, default='../dat/PeerRead/arxiv.all/all/parsed_pdfs')
parser.add_argument('--out-dir', type=str, default='../dat/PeerRead/proc')
parser.add_argument('--out-file', type=str, default='arxiv-all.tf_record')
parser.add_argument('--vocab-file', type=str, default='../../bert/pre-trained/uncased_L-12_H-768_A-12/vocab.txt')
parser.add_argument('--max-abs-len', type=int, default=250)
parser.add_argument('--venue', type=int, default=0)
parser.add_argument('--year', type=int, default=2017)
args = parser.parse_args()
tokenizer = tokenization.FullTokenizer(
vocab_file=args.vocab_file, do_lower_case=True)
clean_PeerRead_dataset(args.review_json_dir, args.parsedpdf_json_dir,
args.venue, args.year,
args.out_dir, args.out_file,
args.max_abs_len, tokenizer, is_arxiv=True)
if __name__ == "__main__":
main()
| en | 0.795163 | Simple pre-processing for PeerRead papers. Takes in JSON formatted data from ScienceParse and outputs a tfrecord Reference example: https://github.com/tensorlayer/tensorlayer/blob/9528da50dfcaf9f0f81fba9453e488a1e6c8ee8f/examples/data_process/tutorial_tfrecord3.py # tokenize PeerRead features # missing titles are quite common sciparse # add hand crafted features from PeerRead Tokenization and pre-processing of text as expected by Bert Parameters ---------- example_tokens max_seq_length tokenizer Returns ------- # Account for [CLS] and [SEP] with "- 2" # The convention in BERT for single sequences is: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. (vv: Not relevant for us) # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. # vv: segment_ids seem to be the same as type_ids # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. # Zero-pad up to the sequence length. Parses the input paper into a tf.Example as expected by Bert Note: the docs for tensorflow Example are awful ¯\_(ツ)_/¯ # abstract_features["segment_ids"] = create_int_feature(feature.segment_ids) TODO: ommission may cause bugs # abstract_features["label_ids"] = _int64_feature([feature.label_id]) # non-sequential features Wrapper for inserting an int64 Feature into a SequenceExample proto, e.g, An integer label. Wrapper for inserting a float Feature into a SequenceExample proto, e.g, An integer label. Wrapper for inserting a bytes Feature into a SequenceExample proto, e.g, an image in byte # return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)])) Strip out non-numerical features Returns tf_features_dict: a dictionary suitable for passing to tf.train.example tf_types_dict: a dictionary of the tf types of previous dict # missing for conferences other than ICLR (we only see accepts) # useful for easy data splitting later # other context features # turn it into a tf.data example | 2.82543 | 3 |
app/packageB/__init__.py | An7ar35/python-app-skeleton-structure | 0 | 9249 | <reponame>An7ar35/python-app-skeleton-structure<gh_stars>0
__all__=['module1'] | __all__=['module1'] | none | 1 | 1.050038 | 1 |
|
lib/shop.py | ZakDoesGaming/OregonTrail | 6 | 9250 | from pygame import Surface, font
from copy import copy
from random import randint, choice
import string
from lib.transactionButton import TransactionButton
SHOP_PREFIX = ["archer", "baker", "fisher", "miller", "rancher", "robber"]
SHOP_SUFFIX = ["cave", "creek", "desert", "farm", "field", "forest", "hill", "lake", "mountain", "pass", "valley", "woods"]
class Shop():
def __init__(self, name, inventory, priceModifier, groupInventory, groupMoney, itemPrices, position, blitPosition, money, resourcePath):
self.yValue = 40
self.groupInventory = groupInventory
self.groupMoney = groupMoney
self.priceModifier = priceModifier
self.itemPrices = itemPrices
self.inventory = inventory
self.position = position
self.blitPosition = blitPosition
self.resourcePath = resourcePath
self.buyButtonList = []
self.sellButtonList = []
self.xPos = (-self.position * 40) + 1280
self.shopSurface = Surface((500, 300)).convert()
self.sepLine = Surface((self.shopSurface.get_width(), 10)).convert()
self.sepLine.fill((0, 0, 0))
self.invContainer = Surface((self.shopSurface.get_width() - 20,
self.shopSurface.get_height() / 2 - 35)).convert()
self.invContainer.fill((255, 255, 255))
self.titleFont = font.Font("res/fonts/west.ttf", 17)
self.textFont = font.Font("res/fonts/west.ttf", 15)
if (name == ""):
self.name = (choice(SHOP_PREFIX) + "'s " + choice(SHOP_SUFFIX)).capitalize()
else:
self.name = name
if (self.inventory == {}):
inventoryRandom = copy(self.groupInventory)
for key in list(inventoryRandom.keys()):
inventoryRandom[key] = randint(0, 10)
inventoryRandom["Food"] *= 20
self.inventory = inventoryRandom
if (money is None):
self.money = randint(200, 500)
else:
self.name = name
self.render()
def get_surface(self):
self.render()
return self.shopSurface
def update(self, groupInv, groupMoney):
self.groupInventory = groupInv
self.groupMoney = groupMoney
self.render()
def move(self, moveValue):
self.xPos += (2 * moveValue)
self.render()
def render(self):
self.yValue = 40
self.shopSurface.fill((133, 94, 66))
self.shopSurface.blit(self.titleFont.render(self.name + " - $" + str(self.money), 1, (0, 0, 255)), (10, 5))
self.shopSurface.blit(self.invContainer, (10, 25))
self.shopSurface.blit(self.invContainer, (10, self.shopSurface.get_height() / 2 + 30))
self.shopSurface.blit(self.textFont.render("Inventory", 1, (255, 0, 0)), (10, 25))
self.shopSurface.blit(self.textFont.render("Amount", 1, (255, 0, 0)), (130, 25))
self.shopSurface.blit(self.textFont.render("Price", 1, (255, 0, 0)), (200, 25))
for key in list(self.inventory.keys()):
self.shopSurface.blit(self.textFont.render(key + ":", 1, (0, 0, 0)), (10, self.yValue))
self.shopSurface.blit(self.textFont.render(str(self.inventory[key]), 1,
(0, 0, 0)), (150, self.yValue))
self.shopSurface.blit(self.textFont.render("$"+str(self.itemPrices[key] * self.priceModifier), 1,
(0, 0, 0)), (200, self.yValue))
if (len(self.buyButtonList) < len(self.inventory.keys())):
buttonPos = tuple(map(sum, zip(self.blitPosition, (250, self.yValue))))
self.buyButtonList.append(TransactionButton(transaction = "buy",
item = key,
imagePosition = (250, self.yValue),
rectPosition = buttonPos,
resourcePath = self.resourcePath))
self.yValue += 30
for button in self.buyButtonList:
self.shopSurface.blit(button.image, button.imagePosition)
self.shopSurface.blit(self.sepLine, (0, float(self.shopSurface.get_height()) / 2))
self.shopSurface.blit(self.titleFont.render("You - $" + str(self.groupMoney), 1, (0, 0, 255)),
(10, float(self.shopSurface.get_height()) / 2 + 10))
self.shopSurface.blit(self.titleFont.render("Inventory", 1, (255, 0, 0)),
(10, float(self.shopSurface.get_height()) / 2 + 30))
self.shopSurface.blit(self.titleFont.render("Amount", 1, (255, 0, 0)),
(130, float(self.shopSurface.get_height()) / 2 + 30))
self.shopSurface.blit(self.titleFont.render("Price", 1, (255, 0, 0)),
(200, float(self.shopSurface.get_height()) / 2 + 30))
self.yValue = (float(self.shopSurface.get_height()) / 2) + 45
for key in list(self.groupInventory.keys()):
self.shopSurface.blit(self.textFont.render(key + ":", 1, (0, 0, 0)), (10, self.yValue))
self.shopSurface.blit(self.textFont.render(str(self.groupInventory[key]), 1,
(0, 0, 0)), (150, self.yValue))
self.shopSurface.blit(self.textFont.render("$" + str(self.itemPrices[key] * self.priceModifier), 1,
(0, 0, 0)), (200, self.yValue))
if (len(self.sellButtonList) < len(self.inventory.keys())):
buttonPos = tuple(map(sum, zip(self.blitPosition, (250, self.yValue))))
self.sellButtonList.append(TransactionButton(transaction = "sell",
item = key,
imagePosition = (250, self.yValue),
rectPosition = buttonPos,
resourcePath = self.resourcePath))
self.yValue += 30
for button in self.sellButtonList:
self.shopSurface.blit(button.image, button.imagePosition)
| from pygame import Surface, font
from copy import copy
from random import randint, choice
import string
from lib.transactionButton import TransactionButton
SHOP_PREFIX = ["archer", "baker", "fisher", "miller", "rancher", "robber"]
SHOP_SUFFIX = ["cave", "creek", "desert", "farm", "field", "forest", "hill", "lake", "mountain", "pass", "valley", "woods"]
class Shop():
def __init__(self, name, inventory, priceModifier, groupInventory, groupMoney, itemPrices, position, blitPosition, money, resourcePath):
self.yValue = 40
self.groupInventory = groupInventory
self.groupMoney = groupMoney
self.priceModifier = priceModifier
self.itemPrices = itemPrices
self.inventory = inventory
self.position = position
self.blitPosition = blitPosition
self.resourcePath = resourcePath
self.buyButtonList = []
self.sellButtonList = []
self.xPos = (-self.position * 40) + 1280
self.shopSurface = Surface((500, 300)).convert()
self.sepLine = Surface((self.shopSurface.get_width(), 10)).convert()
self.sepLine.fill((0, 0, 0))
self.invContainer = Surface((self.shopSurface.get_width() - 20,
self.shopSurface.get_height() / 2 - 35)).convert()
self.invContainer.fill((255, 255, 255))
self.titleFont = font.Font("res/fonts/west.ttf", 17)
self.textFont = font.Font("res/fonts/west.ttf", 15)
if (name == ""):
self.name = (choice(SHOP_PREFIX) + "'s " + choice(SHOP_SUFFIX)).capitalize()
else:
self.name = name
if (self.inventory == {}):
inventoryRandom = copy(self.groupInventory)
for key in list(inventoryRandom.keys()):
inventoryRandom[key] = randint(0, 10)
inventoryRandom["Food"] *= 20
self.inventory = inventoryRandom
if (money is None):
self.money = randint(200, 500)
else:
self.name = name
self.render()
def get_surface(self):
self.render()
return self.shopSurface
def update(self, groupInv, groupMoney):
self.groupInventory = groupInv
self.groupMoney = groupMoney
self.render()
def move(self, moveValue):
self.xPos += (2 * moveValue)
self.render()
def render(self):
self.yValue = 40
self.shopSurface.fill((133, 94, 66))
self.shopSurface.blit(self.titleFont.render(self.name + " - $" + str(self.money), 1, (0, 0, 255)), (10, 5))
self.shopSurface.blit(self.invContainer, (10, 25))
self.shopSurface.blit(self.invContainer, (10, self.shopSurface.get_height() / 2 + 30))
self.shopSurface.blit(self.textFont.render("Inventory", 1, (255, 0, 0)), (10, 25))
self.shopSurface.blit(self.textFont.render("Amount", 1, (255, 0, 0)), (130, 25))
self.shopSurface.blit(self.textFont.render("Price", 1, (255, 0, 0)), (200, 25))
for key in list(self.inventory.keys()):
self.shopSurface.blit(self.textFont.render(key + ":", 1, (0, 0, 0)), (10, self.yValue))
self.shopSurface.blit(self.textFont.render(str(self.inventory[key]), 1,
(0, 0, 0)), (150, self.yValue))
self.shopSurface.blit(self.textFont.render("$"+str(self.itemPrices[key] * self.priceModifier), 1,
(0, 0, 0)), (200, self.yValue))
if (len(self.buyButtonList) < len(self.inventory.keys())):
buttonPos = tuple(map(sum, zip(self.blitPosition, (250, self.yValue))))
self.buyButtonList.append(TransactionButton(transaction = "buy",
item = key,
imagePosition = (250, self.yValue),
rectPosition = buttonPos,
resourcePath = self.resourcePath))
self.yValue += 30
for button in self.buyButtonList:
self.shopSurface.blit(button.image, button.imagePosition)
self.shopSurface.blit(self.sepLine, (0, float(self.shopSurface.get_height()) / 2))
self.shopSurface.blit(self.titleFont.render("You - $" + str(self.groupMoney), 1, (0, 0, 255)),
(10, float(self.shopSurface.get_height()) / 2 + 10))
self.shopSurface.blit(self.titleFont.render("Inventory", 1, (255, 0, 0)),
(10, float(self.shopSurface.get_height()) / 2 + 30))
self.shopSurface.blit(self.titleFont.render("Amount", 1, (255, 0, 0)),
(130, float(self.shopSurface.get_height()) / 2 + 30))
self.shopSurface.blit(self.titleFont.render("Price", 1, (255, 0, 0)),
(200, float(self.shopSurface.get_height()) / 2 + 30))
self.yValue = (float(self.shopSurface.get_height()) / 2) + 45
for key in list(self.groupInventory.keys()):
self.shopSurface.blit(self.textFont.render(key + ":", 1, (0, 0, 0)), (10, self.yValue))
self.shopSurface.blit(self.textFont.render(str(self.groupInventory[key]), 1,
(0, 0, 0)), (150, self.yValue))
self.shopSurface.blit(self.textFont.render("$" + str(self.itemPrices[key] * self.priceModifier), 1,
(0, 0, 0)), (200, self.yValue))
if (len(self.sellButtonList) < len(self.inventory.keys())):
buttonPos = tuple(map(sum, zip(self.blitPosition, (250, self.yValue))))
self.sellButtonList.append(TransactionButton(transaction = "sell",
item = key,
imagePosition = (250, self.yValue),
rectPosition = buttonPos,
resourcePath = self.resourcePath))
self.yValue += 30
for button in self.sellButtonList:
self.shopSurface.blit(button.image, button.imagePosition)
| none | 1 | 3.112693 | 3 |
|
core/dataflow/test/test_runners.py | ajmal017/amp | 0 | 9251 | <gh_stars>0
import logging
import numpy as np
import core.dataflow as dtf
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
class TestRollingFitPredictDagRunner(hut.TestCase):
def test1(self) -> None:
"""
Test the DagRunner using `ArmaReturnsBuilder`
"""
dag_builder = dtf.ArmaReturnsBuilder()
config = dag_builder.get_config_template()
dag_builder.get_dag(config)
#
dag_runner = dtf.RollingFitPredictDagRunner(
config=config,
dag_builder=dag_builder,
start="2010-01-04 09:30",
end="2010-01-04 15:30",
retraining_freq="H",
retraining_lookback=4,
)
result_bundles = list(dag_runner.fit_predict())
np.testing.assert_equal(len(result_bundles), 2)
class TestIncrementalDagRunner(hut.TestCase):
def test1(self) -> None:
"""
Test the DagRunner using `ArmaReturnsBuilder`
"""
dag_builder = dtf.ArmaReturnsBuilder()
config = dag_builder.get_config_template()
# Create DAG and generate fit state.
dag = dag_builder.get_dag(config)
dag.run_leq_node("rets/clip", "fit")
fit_state = dtf.get_fit_state(dag)
#
dag_runner = dtf.IncrementalDagRunner(
config=config,
dag_builder=dag_builder,
start="2010-01-04 15:30",
end="2010-01-04 15:45",
freq="5T",
fit_state=fit_state,
)
result_bundles = list(dag_runner.predict())
self.assertEqual(len(result_bundles), 4)
# Check that dataframe results of `col` do not retroactively change
# over successive prediction steps (which would suggest future
# peeking).
col = "vwap_ret_0_vol_2_hat"
for rb_i, rb_i_next in zip(result_bundles[:-1], result_bundles[1:]):
srs_i = rb_i.result_df[col]
srs_i_next = rb_i_next.result_df[col]
self.assertTrue(srs_i.compare(srs_i_next[:-1]).empty)
| import logging
import numpy as np
import core.dataflow as dtf
import helpers.unit_test as hut
_LOG = logging.getLogger(__name__)
class TestRollingFitPredictDagRunner(hut.TestCase):
def test1(self) -> None:
"""
Test the DagRunner using `ArmaReturnsBuilder`
"""
dag_builder = dtf.ArmaReturnsBuilder()
config = dag_builder.get_config_template()
dag_builder.get_dag(config)
#
dag_runner = dtf.RollingFitPredictDagRunner(
config=config,
dag_builder=dag_builder,
start="2010-01-04 09:30",
end="2010-01-04 15:30",
retraining_freq="H",
retraining_lookback=4,
)
result_bundles = list(dag_runner.fit_predict())
np.testing.assert_equal(len(result_bundles), 2)
class TestIncrementalDagRunner(hut.TestCase):
def test1(self) -> None:
"""
Test the DagRunner using `ArmaReturnsBuilder`
"""
dag_builder = dtf.ArmaReturnsBuilder()
config = dag_builder.get_config_template()
# Create DAG and generate fit state.
dag = dag_builder.get_dag(config)
dag.run_leq_node("rets/clip", "fit")
fit_state = dtf.get_fit_state(dag)
#
dag_runner = dtf.IncrementalDagRunner(
config=config,
dag_builder=dag_builder,
start="2010-01-04 15:30",
end="2010-01-04 15:45",
freq="5T",
fit_state=fit_state,
)
result_bundles = list(dag_runner.predict())
self.assertEqual(len(result_bundles), 4)
# Check that dataframe results of `col` do not retroactively change
# over successive prediction steps (which would suggest future
# peeking).
col = "vwap_ret_0_vol_2_hat"
for rb_i, rb_i_next in zip(result_bundles[:-1], result_bundles[1:]):
srs_i = rb_i.result_df[col]
srs_i_next = rb_i_next.result_df[col]
self.assertTrue(srs_i.compare(srs_i_next[:-1]).empty) | en | 0.736492 | Test the DagRunner using `ArmaReturnsBuilder` # Test the DagRunner using `ArmaReturnsBuilder` # Create DAG and generate fit state. # # Check that dataframe results of `col` do not retroactively change # over successive prediction steps (which would suggest future # peeking). | 2.193833 | 2 |
Main Project/Main_Program.py | hmnk-1967/OCR-Python-Project-CS-BUIC | 0 | 9252 | import tkinter.messagebox
from tkinter import *
import tkinter as tk
from tkinter import filedialog
import numpy
import pytesseract #Python wrapper for Google-owned OCR engine known by the name of Tesseract.
import cv2
from PIL import Image, ImageTk
import os
root = tk.Tk()
root.title("Object Character Recognizer")
root.geometry("1280x720")
test_image = None
def browse_image():
fin = filedialog.askopenfilename(initialdir=os.getcwd(), title="Select Image File", filetypes=(("PNG Files", "*.png"), ("JPG Files", "*.jpg"), ("All Files", "*.*")))
global test_image
image = Image.open(fin)
test_image = image
img = ImageTk.PhotoImage(image.resize((650, 400)))
lb = tk.Label(image=img)
lb.place(x=25, y=50)
root.mainloop()
def use_ocr_default():
try:
global test_image
messge = None
#OEM stands for OCR Engine Mode and PSM stands for Page Segmentation Mode.
#OEM defines what kind of OCR engine is to be used (this defines the dataset that would be used to cross-match
#the available data with the testing data).
#PSM defines how Tesseract will treat the image that supposedly contains characters and how it will extract the
#data from the image.
tess = pytesseract.image_to_string(test_image, config='-l eng --oem 1 --psm 3')
label = Label(messge, text='Result:')
label.place(x=850, y=320)
display_message = Text(messge, width=46, height=15)
display_message.insert(END, str(tess))
display_message.config(state=DISABLED)
display_message.delete(0, END)
display_message.place(x=890, y=330)
except: #Print a error message when the user inputs an incompatible image.
tkinter.messagebox.showinfo('Something\'s Wrong!', 'Your picture may not contain English characters or you may have not selected a picture. Please select a picture with detectable English characters.')
def use_ocr_handwriting():
try:
global test_image
opencv_img = numpy.array(test_image)
opencv_img = opencv_img[:, :, ::-1].copy() #This line is used to convert RGB PIL image file to BGR cv2 image file.
blurred_img = cv2.medianBlur(opencv_img, 5)
gray_img = cv2.cvtColor(blurred_img, cv2.COLOR_BGR2GRAY)
thresh, binary = cv2.threshold(gray_img, 122, 255, cv2.THRESH_BINARY)
messge = None
tess = pytesseract.image_to_string(binary, config='-l eng --oem 1 --psm 3')
label = Label(messge, text='Result:')
label.place(x=850, y=320)
display_message = Text(messge, width=46, height=15)
display_message.insert(END, str(tess))
display_message.config(state=DISABLED)
display_message.delete(0, END)
display_message.place(x=890, y=330)
except:
tkinter.messagebox.showinfo('Something\'s Wrong!', 'Your picture may not contain English characters or you may have not selected a picture. Please select a picture with detectable English characters.')
def use_ocr_singletext():
try:
global test_image
messge = None
tess = pytesseract.image_to_string(test_image, config='-l eng --oem 1 --psm 7')
label = Label(messge, text='Result:')
label.place(x=850, y=320)
display_message = Text(messge, width=46, height=15)
display_message.insert(END, str(tess))
display_message.config(state=DISABLED)
display_message.delete(0, END)
display_message.place(x=890, y=330)
except:
tkinter.messagebox.showinfo('Something\'s Wrong!', 'Your picture may not contain English characters or you may have not selected a picture. Please select a picture with detectable English characters.')
w = tk.LabelFrame(root, text="Image:", width=768, height=600)
w.place(x=20, y=10)
w.pack_propagate(0)
w1 = tk.LabelFrame(root, text="Extracted Text:", width=500, height=310)
w1.place(x=800, y=300)
w2 = tk.LabelFrame(root, text="Operations:", width=350, height=280)
w2.place(x=800, y=10)
btn1 = tk.Button(w2, text="Load Image", padx=40, pady=10, command=browse_image)
btn1.place(x=22, y=20)
btn1 = tk.Button(w2, text="Run Handwritten OCR", padx=40, pady=10, command=use_ocr_handwriting)
btn1.place(x=22, y=80)
btn1 = tk.Button(w2, text="Run Default OCR", padx=40, pady=10, command=use_ocr_default)
btn1.place(x=22, y=140)
btn1 = tk.Button(w2, text="Run Single Text OCR", padx=40, pady=10, command=use_ocr_singletext)
btn1.place(x=22, y=200)
root.mainloop()
| import tkinter.messagebox
from tkinter import *
import tkinter as tk
from tkinter import filedialog
import numpy
import pytesseract #Python wrapper for Google-owned OCR engine known by the name of Tesseract.
import cv2
from PIL import Image, ImageTk
import os
root = tk.Tk()
root.title("Object Character Recognizer")
root.geometry("1280x720")
test_image = None
def browse_image():
fin = filedialog.askopenfilename(initialdir=os.getcwd(), title="Select Image File", filetypes=(("PNG Files", "*.png"), ("JPG Files", "*.jpg"), ("All Files", "*.*")))
global test_image
image = Image.open(fin)
test_image = image
img = ImageTk.PhotoImage(image.resize((650, 400)))
lb = tk.Label(image=img)
lb.place(x=25, y=50)
root.mainloop()
def use_ocr_default():
try:
global test_image
messge = None
#OEM stands for OCR Engine Mode and PSM stands for Page Segmentation Mode.
#OEM defines what kind of OCR engine is to be used (this defines the dataset that would be used to cross-match
#the available data with the testing data).
#PSM defines how Tesseract will treat the image that supposedly contains characters and how it will extract the
#data from the image.
tess = pytesseract.image_to_string(test_image, config='-l eng --oem 1 --psm 3')
label = Label(messge, text='Result:')
label.place(x=850, y=320)
display_message = Text(messge, width=46, height=15)
display_message.insert(END, str(tess))
display_message.config(state=DISABLED)
display_message.delete(0, END)
display_message.place(x=890, y=330)
except: #Print a error message when the user inputs an incompatible image.
tkinter.messagebox.showinfo('Something\'s Wrong!', 'Your picture may not contain English characters or you may have not selected a picture. Please select a picture with detectable English characters.')
def use_ocr_handwriting():
try:
global test_image
opencv_img = numpy.array(test_image)
opencv_img = opencv_img[:, :, ::-1].copy() #This line is used to convert RGB PIL image file to BGR cv2 image file.
blurred_img = cv2.medianBlur(opencv_img, 5)
gray_img = cv2.cvtColor(blurred_img, cv2.COLOR_BGR2GRAY)
thresh, binary = cv2.threshold(gray_img, 122, 255, cv2.THRESH_BINARY)
messge = None
tess = pytesseract.image_to_string(binary, config='-l eng --oem 1 --psm 3')
label = Label(messge, text='Result:')
label.place(x=850, y=320)
display_message = Text(messge, width=46, height=15)
display_message.insert(END, str(tess))
display_message.config(state=DISABLED)
display_message.delete(0, END)
display_message.place(x=890, y=330)
except:
tkinter.messagebox.showinfo('Something\'s Wrong!', 'Your picture may not contain English characters or you may have not selected a picture. Please select a picture with detectable English characters.')
def use_ocr_singletext():
try:
global test_image
messge = None
tess = pytesseract.image_to_string(test_image, config='-l eng --oem 1 --psm 7')
label = Label(messge, text='Result:')
label.place(x=850, y=320)
display_message = Text(messge, width=46, height=15)
display_message.insert(END, str(tess))
display_message.config(state=DISABLED)
display_message.delete(0, END)
display_message.place(x=890, y=330)
except:
tkinter.messagebox.showinfo('Something\'s Wrong!', 'Your picture may not contain English characters or you may have not selected a picture. Please select a picture with detectable English characters.')
w = tk.LabelFrame(root, text="Image:", width=768, height=600)
w.place(x=20, y=10)
w.pack_propagate(0)
w1 = tk.LabelFrame(root, text="Extracted Text:", width=500, height=310)
w1.place(x=800, y=300)
w2 = tk.LabelFrame(root, text="Operations:", width=350, height=280)
w2.place(x=800, y=10)
btn1 = tk.Button(w2, text="Load Image", padx=40, pady=10, command=browse_image)
btn1.place(x=22, y=20)
btn1 = tk.Button(w2, text="Run Handwritten OCR", padx=40, pady=10, command=use_ocr_handwriting)
btn1.place(x=22, y=80)
btn1 = tk.Button(w2, text="Run Default OCR", padx=40, pady=10, command=use_ocr_default)
btn1.place(x=22, y=140)
btn1 = tk.Button(w2, text="Run Single Text OCR", padx=40, pady=10, command=use_ocr_singletext)
btn1.place(x=22, y=200)
root.mainloop()
| en | 0.865062 | #Python wrapper for Google-owned OCR engine known by the name of Tesseract. #OEM stands for OCR Engine Mode and PSM stands for Page Segmentation Mode. #OEM defines what kind of OCR engine is to be used (this defines the dataset that would be used to cross-match #the available data with the testing data). #PSM defines how Tesseract will treat the image that supposedly contains characters and how it will extract the #data from the image. #Print a error message when the user inputs an incompatible image. #This line is used to convert RGB PIL image file to BGR cv2 image file. | 3.072035 | 3 |
third_party/nasm/workspace.bzl | wainshine/tensorflow | 54 | 9253 | """loads the nasm library, used by TF."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
tf_http_archive(
name = "nasm",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/www.nasm.us/pub/nasm/releasebuilds/2.13.03/nasm-2.13.03.tar.bz2",
"http://pkgs.fedoraproject.org/repo/pkgs/nasm/nasm-2.13.03.tar.bz2/sha512/d7a6b4cee8dfd603d8d4c976e5287b5cc542fa0b466ff989b743276a6e28114e64289bf02a7819eca63142a5278aa6eed57773007e5f589e15768e6456a8919d/nasm-2.13.03.tar.bz2",
"http://www.nasm.us/pub/nasm/releasebuilds/2.13.03/nasm-2.13.03.tar.bz2",
],
sha256 = "63ec86477ad3f0f6292325fd89e1d93aea2e2fd490070863f17d48f7cd387011",
strip_prefix = "nasm-2.13.03",
build_file = "//third_party/nasm:nasm.BUILD",
system_build_file = "//third_party/nasm:BUILD.system",
)
| """loads the nasm library, used by TF."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
tf_http_archive(
name = "nasm",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/www.nasm.us/pub/nasm/releasebuilds/2.13.03/nasm-2.13.03.tar.bz2",
"http://pkgs.fedoraproject.org/repo/pkgs/nasm/nasm-2.13.03.tar.bz2/sha512/d7a6b4cee8dfd603d8d4c976e5287b5cc542fa0b466ff989b743276a6e28114e64289bf02a7819eca63142a5278aa6eed57773007e5f589e15768e6456a8919d/nasm-2.13.03.tar.bz2",
"http://www.nasm.us/pub/nasm/releasebuilds/2.13.03/nasm-2.13.03.tar.bz2",
],
sha256 = "63ec86477ad3f0f6292325fd89e1d93aea2e2fd490070863f17d48f7cd387011",
strip_prefix = "nasm-2.13.03",
build_file = "//third_party/nasm:nasm.BUILD",
system_build_file = "//third_party/nasm:BUILD.system",
)
| en | 0.929398 | loads the nasm library, used by TF. | 1.540133 | 2 |
python/tests/test-1-vector.py | wence-/libCEED | 0 | 9254 | # Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
# the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights
# reserved. See files LICENSE and NOTICE for details.
#
# This file is part of CEED, a collection of benchmarks, miniapps, software
# libraries and APIs for efficient high-order finite element and spectral
# element discretizations for exascale applications. For more information and
# source code availability see http://github.com/ceed.
#
# The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
# a collaborative effort of two U.S. Department of Energy organizations (Office
# of Science and the National Nuclear Security Administration) responsible for
# the planning and preparation of a capable exascale ecosystem, including
# software, applications, hardware, advanced system engineering and early
# testbed platforms, in support of the nation's exascale computing imperative.
# @file
# Test Ceed Vector functionality
import os
import libceed
import numpy as np
import check
TOL = libceed.EPSILON * 256
# -------------------------------------------------------------------------------
# Utility
# -------------------------------------------------------------------------------
def check_values(ceed, x, value):
with x.array_read() as b:
for i in range(len(b)):
assert b[i] == value
# -------------------------------------------------------------------------------
# Test creation, setting, reading, restoring, and destroying of a vector
# -------------------------------------------------------------------------------
def test_100(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array_read() as b:
for i in range(n):
assert b[i] == 10 + i
# -------------------------------------------------------------------------------
# Test setValue
# -------------------------------------------------------------------------------
def test_101(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
value = 1
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array() as b:
for i in range(len(b)):
assert b[i] == 10 + i
x.set_value(3.0)
check_values(ceed, x, 3.0)
del x
x = ceed.Vector(n)
# Set value before setting or getting the array
x.set_value(5.0)
check_values(ceed, x, 5.0)
# -------------------------------------------------------------------------------
# Test getArrayRead state counter
# -------------------------------------------------------------------------------
def test_102(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
x.set_value(0)
# Two read accesses should not generate an error
a = x.get_array_read()
b = x.get_array_read()
x.restore_array_read()
x.restore_array_read()
# -------------------------------------------------------------------------------
# Test setting one vector from array of another vector
# -------------------------------------------------------------------------------
def test_103(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array() as x_array:
y.set_array(x_array, cmode=libceed.USE_POINTER)
with y.array_read() as y_array:
for i in range(n):
assert y_array[i] == 10 + i
# -------------------------------------------------------------------------------
# Test getArray to modify array
# -------------------------------------------------------------------------------
def test_104(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.zeros(n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array() as b:
b[3] = -3.14
if libceed.lib.CEED_SCALAR_TYPE == libceed.SCALAR_FP32:
assert a[3] == np.float32(-3.14)
else:
assert a[3] == -3.14
# -------------------------------------------------------------------------------
# Test creation, setting, reading, restoring, and destroying of a vector using
# CEED_MEM_DEVICE
# -------------------------------------------------------------------------------
def test_105(ceed_resource):
# Skip test for non-GPU backend
if 'gpu' in ceed_resource:
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
arr = x.get_array_read(memtype=libceed.MEM_DEVICE)
y.set_array(arr, memtype=libceed.MEM_DEVICE)
x.restore_array_read()
with y.array_read() as b:
for i in range(n):
assert b[i] == 10 + i
# -------------------------------------------------------------------------------
# Test view
# -------------------------------------------------------------------------------
def test_107(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
print(x)
stdout, stderr, ref_stdout = check.output(capsys)
assert not stderr
assert stdout == ref_stdout
# -------------------------------------------------------------------------------
# Test norms
# -------------------------------------------------------------------------------
def test_108(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(0, n, dtype=ceed.scalar_type())
for i in range(n):
if (i % 2 == 0):
a[i] *= -1
x.set_array(a, cmode=libceed.USE_POINTER)
norm = x.norm(normtype=libceed.NORM_1)
assert abs(norm - 45.) < TOL
norm = x.norm()
assert abs(norm - np.sqrt(285.)) < TOL
norm = x.norm(normtype=libceed.NORM_MAX)
assert abs(norm - 9.) < TOL
# -------------------------------------------------------------------------------
# Test taking the reciprocal of a vector
# -------------------------------------------------------------------------------
def test_119(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
x.reciprocal()
with x.array_read() as b:
for i in range(n):
assert abs(b[i] - 1. / (10 + i)) < TOL
# -------------------------------------------------------------------------------
# Test AXPY
# -------------------------------------------------------------------------------
def test_121(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.COPY_VALUES)
y.set_array(a, cmode=libceed.COPY_VALUES)
y.axpy(-0.5, x)
with y.array() as b:
assert np.allclose(.5 * a, b)
# -------------------------------------------------------------------------------
# Test pointwise multiplication
# -------------------------------------------------------------------------------
def test_122(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
w = ceed.Vector(n)
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(0, n, dtype=ceed.scalar_type())
w.set_array(a, cmode=libceed.COPY_VALUES)
x.set_array(a, cmode=libceed.COPY_VALUES)
y.set_array(a, cmode=libceed.COPY_VALUES)
w.pointwise_mult(x, y)
with w.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i) < 1e-14
w.pointwise_mult(w, y)
with w.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i * i) < 1e-14
w.pointwise_mult(x, w)
with w.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i * i * i) < 1e-14
y.pointwise_mult(y, y)
with y.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i) < 1e-14
# -------------------------------------------------------------------------------
# Test Scale
# -------------------------------------------------------------------------------
def test_123(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.COPY_VALUES)
x.scale(-0.5)
with x.array() as b:
assert np.allclose(-.5 * a, b)
# -------------------------------------------------------------------------------
# Test getArrayWrite to modify array
# -------------------------------------------------------------------------------
def test_124(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
with x.array_write() as a:
for i in range(len(a)):
a[i] = 3 * i
with x.array_read() as a:
for i in range(len(a)):
assert a[i] == 3 * i
# -------------------------------------------------------------------------------
# Test modification of reshaped array
# -------------------------------------------------------------------------------
def test_199(ceed_resource):
"""Modification of reshaped array"""
ceed = libceed.Ceed(ceed_resource)
vec = ceed.Vector(12)
vec.set_value(0.0)
with vec.array(4, 3) as x:
x[...] = np.eye(4, 3)
with vec.array_read(3, 4) as x:
assert np.all(x == np.eye(4, 3).reshape(3, 4))
# -------------------------------------------------------------------------------
| # Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at
# the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights
# reserved. See files LICENSE and NOTICE for details.
#
# This file is part of CEED, a collection of benchmarks, miniapps, software
# libraries and APIs for efficient high-order finite element and spectral
# element discretizations for exascale applications. For more information and
# source code availability see http://github.com/ceed.
#
# The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
# a collaborative effort of two U.S. Department of Energy organizations (Office
# of Science and the National Nuclear Security Administration) responsible for
# the planning and preparation of a capable exascale ecosystem, including
# software, applications, hardware, advanced system engineering and early
# testbed platforms, in support of the nation's exascale computing imperative.
# @file
# Test Ceed Vector functionality
import os
import libceed
import numpy as np
import check
TOL = libceed.EPSILON * 256
# -------------------------------------------------------------------------------
# Utility
# -------------------------------------------------------------------------------
def check_values(ceed, x, value):
with x.array_read() as b:
for i in range(len(b)):
assert b[i] == value
# -------------------------------------------------------------------------------
# Test creation, setting, reading, restoring, and destroying of a vector
# -------------------------------------------------------------------------------
def test_100(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array_read() as b:
for i in range(n):
assert b[i] == 10 + i
# -------------------------------------------------------------------------------
# Test setValue
# -------------------------------------------------------------------------------
def test_101(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
value = 1
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array() as b:
for i in range(len(b)):
assert b[i] == 10 + i
x.set_value(3.0)
check_values(ceed, x, 3.0)
del x
x = ceed.Vector(n)
# Set value before setting or getting the array
x.set_value(5.0)
check_values(ceed, x, 5.0)
# -------------------------------------------------------------------------------
# Test getArrayRead state counter
# -------------------------------------------------------------------------------
def test_102(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
x.set_value(0)
# Two read accesses should not generate an error
a = x.get_array_read()
b = x.get_array_read()
x.restore_array_read()
x.restore_array_read()
# -------------------------------------------------------------------------------
# Test setting one vector from array of another vector
# -------------------------------------------------------------------------------
def test_103(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array() as x_array:
y.set_array(x_array, cmode=libceed.USE_POINTER)
with y.array_read() as y_array:
for i in range(n):
assert y_array[i] == 10 + i
# -------------------------------------------------------------------------------
# Test getArray to modify array
# -------------------------------------------------------------------------------
def test_104(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.zeros(n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
with x.array() as b:
b[3] = -3.14
if libceed.lib.CEED_SCALAR_TYPE == libceed.SCALAR_FP32:
assert a[3] == np.float32(-3.14)
else:
assert a[3] == -3.14
# -------------------------------------------------------------------------------
# Test creation, setting, reading, restoring, and destroying of a vector using
# CEED_MEM_DEVICE
# -------------------------------------------------------------------------------
def test_105(ceed_resource):
# Skip test for non-GPU backend
if 'gpu' in ceed_resource:
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
arr = x.get_array_read(memtype=libceed.MEM_DEVICE)
y.set_array(arr, memtype=libceed.MEM_DEVICE)
x.restore_array_read()
with y.array_read() as b:
for i in range(n):
assert b[i] == 10 + i
# -------------------------------------------------------------------------------
# Test view
# -------------------------------------------------------------------------------
def test_107(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
print(x)
stdout, stderr, ref_stdout = check.output(capsys)
assert not stderr
assert stdout == ref_stdout
# -------------------------------------------------------------------------------
# Test norms
# -------------------------------------------------------------------------------
def test_108(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(0, n, dtype=ceed.scalar_type())
for i in range(n):
if (i % 2 == 0):
a[i] *= -1
x.set_array(a, cmode=libceed.USE_POINTER)
norm = x.norm(normtype=libceed.NORM_1)
assert abs(norm - 45.) < TOL
norm = x.norm()
assert abs(norm - np.sqrt(285.)) < TOL
norm = x.norm(normtype=libceed.NORM_MAX)
assert abs(norm - 9.) < TOL
# -------------------------------------------------------------------------------
# Test taking the reciprocal of a vector
# -------------------------------------------------------------------------------
def test_119(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.USE_POINTER)
x.reciprocal()
with x.array_read() as b:
for i in range(n):
assert abs(b[i] - 1. / (10 + i)) < TOL
# -------------------------------------------------------------------------------
# Test AXPY
# -------------------------------------------------------------------------------
def test_121(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.COPY_VALUES)
y.set_array(a, cmode=libceed.COPY_VALUES)
y.axpy(-0.5, x)
with y.array() as b:
assert np.allclose(.5 * a, b)
# -------------------------------------------------------------------------------
# Test pointwise multiplication
# -------------------------------------------------------------------------------
def test_122(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
w = ceed.Vector(n)
x = ceed.Vector(n)
y = ceed.Vector(n)
a = np.arange(0, n, dtype=ceed.scalar_type())
w.set_array(a, cmode=libceed.COPY_VALUES)
x.set_array(a, cmode=libceed.COPY_VALUES)
y.set_array(a, cmode=libceed.COPY_VALUES)
w.pointwise_mult(x, y)
with w.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i) < 1e-14
w.pointwise_mult(w, y)
with w.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i * i) < 1e-14
w.pointwise_mult(x, w)
with w.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i * i * i) < 1e-14
y.pointwise_mult(y, y)
with y.array() as b:
for i in range(len(b)):
assert abs(b[i] - i * i) < 1e-14
# -------------------------------------------------------------------------------
# Test Scale
# -------------------------------------------------------------------------------
def test_123(ceed_resource, capsys):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
a = np.arange(10, 10 + n, dtype=ceed.scalar_type())
x.set_array(a, cmode=libceed.COPY_VALUES)
x.scale(-0.5)
with x.array() as b:
assert np.allclose(-.5 * a, b)
# -------------------------------------------------------------------------------
# Test getArrayWrite to modify array
# -------------------------------------------------------------------------------
def test_124(ceed_resource):
ceed = libceed.Ceed(ceed_resource)
n = 10
x = ceed.Vector(n)
with x.array_write() as a:
for i in range(len(a)):
a[i] = 3 * i
with x.array_read() as a:
for i in range(len(a)):
assert a[i] == 3 * i
# -------------------------------------------------------------------------------
# Test modification of reshaped array
# -------------------------------------------------------------------------------
def test_199(ceed_resource):
"""Modification of reshaped array"""
ceed = libceed.Ceed(ceed_resource)
vec = ceed.Vector(12)
vec.set_value(0.0)
with vec.array(4, 3) as x:
x[...] = np.eye(4, 3)
with vec.array_read(3, 4) as x:
assert np.all(x == np.eye(4, 3).reshape(3, 4))
# -------------------------------------------------------------------------------
| en | 0.377439 | # Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at # the Lawrence Livermore National Laboratory. LLNL-CODE-734707. All Rights # reserved. See files LICENSE and NOTICE for details. # # This file is part of CEED, a collection of benchmarks, miniapps, software # libraries and APIs for efficient high-order finite element and spectral # element discretizations for exascale applications. For more information and # source code availability see http://github.com/ceed. # # The CEED research is supported by the Exascale Computing Project 17-SC-20-SC, # a collaborative effort of two U.S. Department of Energy organizations (Office # of Science and the National Nuclear Security Administration) responsible for # the planning and preparation of a capable exascale ecosystem, including # software, applications, hardware, advanced system engineering and early # testbed platforms, in support of the nation's exascale computing imperative. # @file # Test Ceed Vector functionality # ------------------------------------------------------------------------------- # Utility # ------------------------------------------------------------------------------- # ------------------------------------------------------------------------------- # Test creation, setting, reading, restoring, and destroying of a vector # ------------------------------------------------------------------------------- # ------------------------------------------------------------------------------- # Test setValue # ------------------------------------------------------------------------------- # Set value before setting or getting the array # ------------------------------------------------------------------------------- # Test getArrayRead state counter # ------------------------------------------------------------------------------- # Two read accesses should not generate an error # ------------------------------------------------------------------------------- # Test setting one vector from array of another vector # ------------------------------------------------------------------------------- # ------------------------------------------------------------------------------- # Test getArray to modify array # ------------------------------------------------------------------------------- # ------------------------------------------------------------------------------- # Test creation, setting, reading, restoring, and destroying of a vector using # CEED_MEM_DEVICE # ------------------------------------------------------------------------------- # Skip test for non-GPU backend # ------------------------------------------------------------------------------- # Test view # ------------------------------------------------------------------------------- # ------------------------------------------------------------------------------- # Test norms # ------------------------------------------------------------------------------- # ------------------------------------------------------------------------------- # Test taking the reciprocal of a vector # ------------------------------------------------------------------------------- # ------------------------------------------------------------------------------- # Test AXPY # ------------------------------------------------------------------------------- # ------------------------------------------------------------------------------- # Test pointwise multiplication # ------------------------------------------------------------------------------- # ------------------------------------------------------------------------------- # Test Scale # ------------------------------------------------------------------------------- # ------------------------------------------------------------------------------- # Test getArrayWrite to modify array # ------------------------------------------------------------------------------- # ------------------------------------------------------------------------------- # Test modification of reshaped array # ------------------------------------------------------------------------------- Modification of reshaped array # ------------------------------------------------------------------------------- | 1.738299 | 2 |
esmvalcore/cmor/_fixes/cmip6/cesm2.py | aperezpredictia/ESMValCore | 1 | 9255 | <gh_stars>1-10
"""Fixes for CESM2 model."""
from ..fix import Fix
from ..shared import (add_scalar_depth_coord, add_scalar_height_coord,
add_scalar_typeland_coord, add_scalar_typesea_coord)
class Fgco2(Fix):
"""Fixes for fgco2."""
def fix_metadata(self, cubes):
"""Add depth (0m) coordinate.
Parameters
----------
cube : iris.cube.CubeList
Returns
-------
iris.cube.Cube
"""
cube = self.get_cube_from_list(cubes)
add_scalar_depth_coord(cube)
return cubes
class Tas(Fix):
"""Fixes for tas."""
def fix_metadata(self, cubes):
"""Add height (2m) coordinate.
Parameters
----------
cube : iris.cube.CubeList
Returns
-------
iris.cube.Cube
"""
cube = self.get_cube_from_list(cubes)
add_scalar_height_coord(cube)
return cubes
class Sftlf(Fix):
"""Fixes for sftlf."""
def fix_metadata(self, cubes):
"""Add typeland coordinate.
Parameters
----------
cube : iris.cube.CubeList
Returns
-------
iris.cube.Cube
"""
cube = self.get_cube_from_list(cubes)
add_scalar_typeland_coord(cube)
return cubes
class Sftof(Fix):
"""Fixes for sftof."""
def fix_metadata(self, cubes):
"""Add typesea coordinate.
Parameters
----------
cube : iris.cube.CubeList
Returns
-------
iris.cube.Cube
"""
cube = self.get_cube_from_list(cubes)
add_scalar_typesea_coord(cube)
return cubes
| """Fixes for CESM2 model."""
from ..fix import Fix
from ..shared import (add_scalar_depth_coord, add_scalar_height_coord,
add_scalar_typeland_coord, add_scalar_typesea_coord)
class Fgco2(Fix):
"""Fixes for fgco2."""
def fix_metadata(self, cubes):
"""Add depth (0m) coordinate.
Parameters
----------
cube : iris.cube.CubeList
Returns
-------
iris.cube.Cube
"""
cube = self.get_cube_from_list(cubes)
add_scalar_depth_coord(cube)
return cubes
class Tas(Fix):
"""Fixes for tas."""
def fix_metadata(self, cubes):
"""Add height (2m) coordinate.
Parameters
----------
cube : iris.cube.CubeList
Returns
-------
iris.cube.Cube
"""
cube = self.get_cube_from_list(cubes)
add_scalar_height_coord(cube)
return cubes
class Sftlf(Fix):
"""Fixes for sftlf."""
def fix_metadata(self, cubes):
"""Add typeland coordinate.
Parameters
----------
cube : iris.cube.CubeList
Returns
-------
iris.cube.Cube
"""
cube = self.get_cube_from_list(cubes)
add_scalar_typeland_coord(cube)
return cubes
class Sftof(Fix):
"""Fixes for sftof."""
def fix_metadata(self, cubes):
"""Add typesea coordinate.
Parameters
----------
cube : iris.cube.CubeList
Returns
-------
iris.cube.Cube
"""
cube = self.get_cube_from_list(cubes)
add_scalar_typesea_coord(cube)
return cubes | es | 0.09968 | Fixes for CESM2 model. Fixes for fgco2. Add depth (0m) coordinate. Parameters ---------- cube : iris.cube.CubeList Returns ------- iris.cube.Cube Fixes for tas. Add height (2m) coordinate. Parameters ---------- cube : iris.cube.CubeList Returns ------- iris.cube.Cube Fixes for sftlf. Add typeland coordinate. Parameters ---------- cube : iris.cube.CubeList Returns ------- iris.cube.Cube Fixes for sftof. Add typesea coordinate. Parameters ---------- cube : iris.cube.CubeList Returns ------- iris.cube.Cube | 2.292166 | 2 |
examples/GenerateSubset.py | vitay/YouTubeFacesDB | 11 | 9256 | from YouTubeFacesDB import generate_ytf_database
###############################################################################
# Create the dataset
###############################################################################
generate_ytf_database(
directory= '../data',#'/scratch/vitay/Datasets/YouTubeFaces', # Location of the YTF dataset
filename='ytfdb.h5', # Name of the HDF5 file to write to
labels=10, # Number of labels to randomly select
max_number=-1, # Maximum number of images to use
size=(100, 100), # Size of the images
color=False, # Black and white
bw_first=True, # Final shape is (1, w, h)
cropped=True # The original images are cropped to the faces
) | from YouTubeFacesDB import generate_ytf_database
###############################################################################
# Create the dataset
###############################################################################
generate_ytf_database(
directory= '../data',#'/scratch/vitay/Datasets/YouTubeFaces', # Location of the YTF dataset
filename='ytfdb.h5', # Name of the HDF5 file to write to
labels=10, # Number of labels to randomly select
max_number=-1, # Maximum number of images to use
size=(100, 100), # Size of the images
color=False, # Black and white
bw_first=True, # Final shape is (1, w, h)
cropped=True # The original images are cropped to the faces
) | de | 0.311403 | ############################################################################### # Create the dataset ############################################################################### #'/scratch/vitay/Datasets/YouTubeFaces', # Location of the YTF dataset # Name of the HDF5 file to write to # Number of labels to randomly select # Maximum number of images to use # Size of the images # Black and white # Final shape is (1, w, h) # The original images are cropped to the faces | 2.471962 | 2 |
src/waldur_mastermind/billing/tests/test_price_current.py | opennode/nodeconductor-assembly-waldur | 2 | 9257 | from freezegun import freeze_time
from rest_framework import test
from waldur_mastermind.billing.tests.utils import get_financial_report_url
from waldur_mastermind.invoices import models as invoice_models
from waldur_mastermind.invoices.tests import factories as invoice_factories
from waldur_mastermind.invoices.tests import fixtures as invoice_fixtures
@freeze_time('2017-01-10')
class PriceCurrentTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = invoice_fixtures.InvoiceFixture()
invoice_factories.InvoiceItemFactory(
invoice=self.fixture.invoice,
project=self.fixture.project,
unit=invoice_models.InvoiceItem.Units.PER_MONTH,
unit_price=100,
quantity=1,
)
invoice_factories.InvoiceItemFactory(
invoice=self.fixture.invoice,
project=self.fixture.project,
unit=invoice_models.InvoiceItem.Units.PER_DAY,
unit_price=3,
quantity=31,
)
def test_current_price(self):
self.client.force_authenticate(self.fixture.staff)
url = get_financial_report_url(self.fixture.project.customer)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['billing_price_estimate']['current'], 100 + 9 * 3)
diff = (
data['billing_price_estimate']['total']
- data['billing_price_estimate']['current']
)
self.assertEqual(diff, 22 * 3)
| from freezegun import freeze_time
from rest_framework import test
from waldur_mastermind.billing.tests.utils import get_financial_report_url
from waldur_mastermind.invoices import models as invoice_models
from waldur_mastermind.invoices.tests import factories as invoice_factories
from waldur_mastermind.invoices.tests import fixtures as invoice_fixtures
@freeze_time('2017-01-10')
class PriceCurrentTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = invoice_fixtures.InvoiceFixture()
invoice_factories.InvoiceItemFactory(
invoice=self.fixture.invoice,
project=self.fixture.project,
unit=invoice_models.InvoiceItem.Units.PER_MONTH,
unit_price=100,
quantity=1,
)
invoice_factories.InvoiceItemFactory(
invoice=self.fixture.invoice,
project=self.fixture.project,
unit=invoice_models.InvoiceItem.Units.PER_DAY,
unit_price=3,
quantity=31,
)
def test_current_price(self):
self.client.force_authenticate(self.fixture.staff)
url = get_financial_report_url(self.fixture.project.customer)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = response.json()
self.assertEqual(data['billing_price_estimate']['current'], 100 + 9 * 3)
diff = (
data['billing_price_estimate']['total']
- data['billing_price_estimate']['current']
)
self.assertEqual(diff, 22 * 3)
| none | 1 | 2.136931 | 2 |
|
tests/test_cli/test_utils/test_utils.py | ejfitzgerald/agents-aea | 0 | 9258 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This test module contains the tests for aea.cli.utils module."""
from builtins import FileNotFoundError
from typing import cast
from unittest import TestCase, mock
from click import BadParameter, ClickException
from jsonschema import ValidationError
from yaml import YAMLError
from aea.cli.utils.click_utils import AEAJsonPathType, PublicIdParameter
from aea.cli.utils.config import (
_init_cli_config,
get_or_create_cli_config,
update_cli_config,
)
from aea.cli.utils.context import Context
from aea.cli.utils.decorators import _validate_config_consistency, clean_after
from aea.cli.utils.formatting import format_items
from aea.cli.utils.generic import is_readme_present
from aea.cli.utils.package_utils import (
find_item_in_distribution,
find_item_locally,
is_fingerprint_correct,
try_get_balance,
try_get_item_source_path,
try_get_item_target_path,
validate_author_name,
validate_package_name,
)
from tests.conftest import FETCHAI
from tests.test_cli.tools_for_testing import (
ConfigLoaderMock,
ContextMock,
PublicIdMock,
StopTest,
raise_stoptest,
)
AUTHOR = "author"
class FormatItemsTestCase(TestCase):
"""Test case for format_items method."""
def testformat_items_positive(self):
"""Test format_items positive result."""
items = [
{
"public_id": "author/name:version",
"name": "obj-name",
"description": "Some description",
"author": "author",
"version": "1.0",
}
]
result = format_items(items)
expected_result = (
"------------------------------\n"
"Public ID: author/name:version\n"
"Name: obj-name\n"
"Description: Some description\n"
"Author: author\n"
"Version: 1.0\n"
"------------------------------\n"
)
self.assertEqual(result, expected_result)
@mock.patch("aea.cli.utils.package_utils.os.path.join", return_value="some-path")
class TryGetItemSourcePathTestCase(TestCase):
"""Test case for try_get_item_source_path method."""
@mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=True)
def test_get_item_source_path_positive(self, exists_mock, join_mock):
"""Test for get_item_source_path positive result."""
result = try_get_item_source_path("cwd", AUTHOR, "skills", "skill-name")
expected_result = "some-path"
self.assertEqual(result, expected_result)
join_mock.assert_called_once_with("cwd", AUTHOR, "skills", "skill-name")
exists_mock.assert_called_once_with("some-path")
result = try_get_item_source_path("cwd", None, "skills", "skill-name")
self.assertEqual(result, expected_result)
@mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=False)
def test_get_item_source_path_not_exists(self, exists_mock, join_mock):
"""Test for get_item_source_path item already exists."""
with self.assertRaises(ClickException):
try_get_item_source_path("cwd", AUTHOR, "skills", "skill-name")
@mock.patch("aea.cli.utils.package_utils.os.path.join", return_value="some-path")
class TryGetItemTargetPathTestCase(TestCase):
"""Test case for try_get_item_target_path method."""
@mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=False)
def test_get_item_target_path_positive(self, exists_mock, join_mock):
"""Test for get_item_source_path positive result."""
result = try_get_item_target_path("packages", AUTHOR, "skills", "skill-name")
expected_result = "some-path"
self.assertEqual(result, expected_result)
join_mock.assert_called_once_with("packages", AUTHOR, "skills", "skill-name")
exists_mock.assert_called_once_with("some-path")
@mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=True)
def test_get_item_target_path_already_exists(self, exists_mock, join_mock):
"""Test for get_item_target_path item already exists."""
with self.assertRaises(ClickException):
try_get_item_target_path("skills", AUTHOR, "skill-name", "packages_path")
class PublicIdParameterTestCase(TestCase):
"""Test case for PublicIdParameter class."""
def test_get_metavar_positive(self):
"""Test for get_metavar positive result."""
result = PublicIdParameter.get_metavar("obj", "param")
expected_result = "PUBLIC_ID"
self.assertEqual(result, expected_result)
@mock.patch("aea.cli.utils.config.os.path.dirname", return_value="dir-name")
@mock.patch("aea.cli.utils.config.os.path.exists", return_value=False)
@mock.patch("aea.cli.utils.config.os.makedirs")
@mock.patch("builtins.open")
class InitConfigFolderTestCase(TestCase):
"""Test case for _init_cli_config method."""
def test_init_cli_config_positive(
self, open_mock, makedirs_mock, exists_mock, dirname_mock
):
"""Test for _init_cli_config method positive result."""
_init_cli_config()
dirname_mock.assert_called_once()
exists_mock.assert_called_once_with("dir-name")
makedirs_mock.assert_called_once_with("dir-name")
@mock.patch("aea.cli.utils.config.get_or_create_cli_config")
@mock.patch("aea.cli.utils.generic.yaml.dump")
@mock.patch("builtins.open", mock.mock_open())
class UpdateCLIConfigTestCase(TestCase):
"""Test case for update_cli_config method."""
def testupdate_cli_config_positive(self, dump_mock, icf_mock):
"""Test for update_cli_config method positive result."""
update_cli_config({"some": "config"})
icf_mock.assert_called_once()
dump_mock.assert_called_once()
def _raise_yamlerror(*args):
raise YAMLError()
def _raise_file_not_found_error(*args):
raise FileNotFoundError()
@mock.patch("builtins.open", mock.mock_open())
class GetOrCreateCLIConfigTestCase(TestCase):
"""Test case for read_cli_config method."""
@mock.patch(
"aea.cli.utils.generic.yaml.safe_load", return_value={"correct": "output"}
)
def testget_or_create_cli_config_positive(self, safe_load_mock):
"""Test for get_or_create_cli_config method positive result."""
result = get_or_create_cli_config()
expected_result = {"correct": "output"}
self.assertEqual(result, expected_result)
safe_load_mock.assert_called_once()
@mock.patch("aea.cli.utils.generic.yaml.safe_load", _raise_yamlerror)
def testget_or_create_cli_config_bad_yaml(self):
"""Test for rget_or_create_cli_config method bad yaml behavior."""
with self.assertRaises(ClickException):
get_or_create_cli_config()
class CleanAfterTestCase(TestCase):
"""Test case for clean_after decorator method."""
@mock.patch("aea.cli.utils.decorators.os.path.exists", return_value=True)
@mock.patch("aea.cli.utils.decorators._cast_ctx", lambda x: x)
@mock.patch("aea.cli.utils.decorators.shutil.rmtree")
def test_clean_after_positive(self, rmtree_mock, *mocks):
"""Test clean_after decorator method for positive result."""
@clean_after
def func(click_context):
ctx = cast(Context, click_context.obj)
ctx.clean_paths.append("clean/path")
raise ClickException("Message")
with self.assertRaises(ClickException):
func(ContextMock())
rmtree_mock.assert_called_once_with("clean/path")
@mock.patch("aea.cli.utils.package_utils.click.echo", raise_stoptest)
class ValidateAuthorNameTestCase(TestCase):
"""Test case for validate_author_name method."""
@mock.patch(
"aea.cli.utils.package_utils.click.prompt", return_value="correct_author"
)
def test_validate_author_name_positive(self, prompt_mock):
"""Test validate_author_name for positive result."""
author = "valid_author"
result = validate_author_name(author=author)
self.assertEqual(result, author)
result = validate_author_name()
self.assertEqual(result, "correct_author")
prompt_mock.assert_called_once()
@mock.patch(
"aea.cli.utils.package_utils.click.prompt", return_value="inv@l1d_@uth&r"
)
def test_validate_author_name_negative(self, prompt_mock):
"""Test validate_author_name for negative result."""
with self.assertRaises(StopTest):
validate_author_name()
prompt_mock.return_value = "skills"
with self.assertRaises(StopTest):
validate_author_name()
class ValidatePackageNameTestCase(TestCase):
"""Test case for validate_package_name method."""
def test_validate_package_name_positive(self):
"""Test validate_package_name for positive result."""
validate_package_name("correct_name")
def test_validate_package_name_negative(self):
"""Test validate_package_name for negative result."""
with self.assertRaises(BadParameter):
validate_package_name("incorrect-name")
def _raise_validation_error(*args, **kwargs):
raise ValidationError("Message.")
class FindItemLocallyTestCase(TestCase):
"""Test case for find_item_locally method."""
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True)
@mock.patch(
"aea.cli.utils.package_utils.ConfigLoader.from_configuration_type",
_raise_validation_error,
)
def test_find_item_locally_bad_config(self, *mocks):
"""Test find_item_locally for bad config result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_locally(ContextMock(), "skill", public_id)
self.assertIn("configuration file not valid", cm.exception.message)
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True)
@mock.patch("aea.cli.utils.package_utils.Path.open", mock.mock_open())
@mock.patch(
"aea.cli.utils.package_utils.ConfigLoader.from_configuration_type",
return_value=ConfigLoaderMock(),
)
def test_find_item_locally_cant_find(self, from_conftype_mock, *mocks):
"""Test find_item_locally for can't find result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_locally(ContextMock(), "skill", public_id)
self.assertEqual(
cm.exception.message, "Cannot find skill with author and version specified."
)
class FindItemInDistributionTestCase(TestCase):
"""Test case for find_item_in_distribution method."""
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True)
@mock.patch(
"aea.cli.utils.package_utils.ConfigLoader.from_configuration_type",
_raise_validation_error,
)
def testfind_item_in_distribution_bad_config(self, *mocks):
"""Test find_item_in_distribution for bad config result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_in_distribution(ContextMock(), "skill", public_id)
self.assertIn("configuration file not valid", cm.exception.message)
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=False)
def testfind_item_in_distribution_not_found(self, *mocks):
"""Test find_item_in_distribution for not found result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_in_distribution(ContextMock(), "skill", public_id)
self.assertIn("Cannot find skill", cm.exception.message)
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True)
@mock.patch("aea.cli.utils.package_utils.Path.open", mock.mock_open())
@mock.patch(
"aea.cli.utils.package_utils.ConfigLoader.from_configuration_type",
return_value=ConfigLoaderMock(),
)
def testfind_item_in_distribution_cant_find(self, from_conftype_mock, *mocks):
"""Test find_item_locally for can't find result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_in_distribution(ContextMock(), "skill", public_id)
self.assertEqual(
cm.exception.message, "Cannot find skill with author and version specified."
)
class ValidateConfigConsistencyTestCase(TestCase):
"""Test case for _validate_config_consistency method."""
@mock.patch("aea.cli.utils.config.Path.exists", _raise_validation_error)
def test__validate_config_consistency_cant_find(self, *mocks):
"""Test _validate_config_consistency can't find result"""
with self.assertRaises(ValueError) as cm:
_validate_config_consistency(ContextMock(protocols=["some"]))
self.assertIn("Cannot find", str(cm.exception))
@mock.patch(
"aea.cli.utils.package_utils._compute_fingerprint",
return_value={"correct": "fingerprint"},
)
class IsFingerprintCorrectTestCase(TestCase):
"""Test case for adding skill with invalid fingerprint."""
def test_is_fingerprint_correct_positive(self, *mocks):
"""Test is_fingerprint_correct method for positive result."""
item_config = mock.Mock()
item_config.fingerprint = {"correct": "fingerprint"}
item_config.fingerprint_ignore_patterns = []
result = is_fingerprint_correct("package_path", item_config)
self.assertTrue(result)
def test_is_fingerprint_correct_negative(self, *mocks):
"""Test is_fingerprint_correct method for negative result."""
item_config = mock.Mock()
item_config.fingerprint = {"incorrect": "fingerprint"}
item_config.fingerprint_ignore_patterns = []
package_path = "package_dir"
result = is_fingerprint_correct(package_path, item_config)
self.assertFalse(result)
@mock.patch("aea.cli.config.click.ParamType")
class AEAJsonPathTypeTestCase(TestCase):
"""Test case for AEAJsonPathType class."""
@mock.patch("aea.cli.utils.click_utils.Path.exists", return_value=True)
def test_convert_root_vendor_positive(self, *mocks):
"""Test for convert method with root "vendor" positive result."""
value = "vendor.author.protocols.package_name.attribute_name"
ctx_mock = ContextMock()
ctx_mock.obj = mock.Mock()
ctx_mock.obj.set_config = mock.Mock()
obj = AEAJsonPathType()
obj.convert(value, "param", ctx_mock)
@mock.patch("aea.cli.utils.click_utils.Path.exists", return_value=False)
def test_convert_root_vendor_path_not_exists(self, *mocks):
"""Test for convert method with root "vendor" path not exists."""
value = "vendor.author.protocols.package_name.attribute_name"
obj = AEAJsonPathType()
with self.assertRaises(BadParameter):
obj.convert(value, "param", "ctx")
@mock.patch("aea.cli.utils.package_utils.LedgerApis", mock.MagicMock())
class TryGetBalanceTestCase(TestCase):
"""Test case for try_get_balance method."""
def test_try_get_balance_positive(self):
"""Test for try_get_balance method positive result."""
agent_config = mock.Mock()
agent_config.default_ledger_config = FETCHAI
wallet_mock = mock.Mock()
wallet_mock.addresses = {FETCHAI: "some-adress"}
try_get_balance(agent_config, wallet_mock, FETCHAI)
@mock.patch("aea.cli.utils.generic.os.path.exists", return_value=True)
class IsReadmePresentTestCase(TestCase):
"""Test case for is_readme_present method."""
def test_is_readme_present_positive(self, *mocks):
"""Test is_readme_present for positive result."""
self.assertTrue(is_readme_present("readme/path"))
| # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This test module contains the tests for aea.cli.utils module."""
from builtins import FileNotFoundError
from typing import cast
from unittest import TestCase, mock
from click import BadParameter, ClickException
from jsonschema import ValidationError
from yaml import YAMLError
from aea.cli.utils.click_utils import AEAJsonPathType, PublicIdParameter
from aea.cli.utils.config import (
_init_cli_config,
get_or_create_cli_config,
update_cli_config,
)
from aea.cli.utils.context import Context
from aea.cli.utils.decorators import _validate_config_consistency, clean_after
from aea.cli.utils.formatting import format_items
from aea.cli.utils.generic import is_readme_present
from aea.cli.utils.package_utils import (
find_item_in_distribution,
find_item_locally,
is_fingerprint_correct,
try_get_balance,
try_get_item_source_path,
try_get_item_target_path,
validate_author_name,
validate_package_name,
)
from tests.conftest import FETCHAI
from tests.test_cli.tools_for_testing import (
ConfigLoaderMock,
ContextMock,
PublicIdMock,
StopTest,
raise_stoptest,
)
AUTHOR = "author"
class FormatItemsTestCase(TestCase):
"""Test case for format_items method."""
def testformat_items_positive(self):
"""Test format_items positive result."""
items = [
{
"public_id": "author/name:version",
"name": "obj-name",
"description": "Some description",
"author": "author",
"version": "1.0",
}
]
result = format_items(items)
expected_result = (
"------------------------------\n"
"Public ID: author/name:version\n"
"Name: obj-name\n"
"Description: Some description\n"
"Author: author\n"
"Version: 1.0\n"
"------------------------------\n"
)
self.assertEqual(result, expected_result)
@mock.patch("aea.cli.utils.package_utils.os.path.join", return_value="some-path")
class TryGetItemSourcePathTestCase(TestCase):
"""Test case for try_get_item_source_path method."""
@mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=True)
def test_get_item_source_path_positive(self, exists_mock, join_mock):
"""Test for get_item_source_path positive result."""
result = try_get_item_source_path("cwd", AUTHOR, "skills", "skill-name")
expected_result = "some-path"
self.assertEqual(result, expected_result)
join_mock.assert_called_once_with("cwd", AUTHOR, "skills", "skill-name")
exists_mock.assert_called_once_with("some-path")
result = try_get_item_source_path("cwd", None, "skills", "skill-name")
self.assertEqual(result, expected_result)
@mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=False)
def test_get_item_source_path_not_exists(self, exists_mock, join_mock):
"""Test for get_item_source_path item already exists."""
with self.assertRaises(ClickException):
try_get_item_source_path("cwd", AUTHOR, "skills", "skill-name")
@mock.patch("aea.cli.utils.package_utils.os.path.join", return_value="some-path")
class TryGetItemTargetPathTestCase(TestCase):
"""Test case for try_get_item_target_path method."""
@mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=False)
def test_get_item_target_path_positive(self, exists_mock, join_mock):
"""Test for get_item_source_path positive result."""
result = try_get_item_target_path("packages", AUTHOR, "skills", "skill-name")
expected_result = "some-path"
self.assertEqual(result, expected_result)
join_mock.assert_called_once_with("packages", AUTHOR, "skills", "skill-name")
exists_mock.assert_called_once_with("some-path")
@mock.patch("aea.cli.utils.package_utils.os.path.exists", return_value=True)
def test_get_item_target_path_already_exists(self, exists_mock, join_mock):
"""Test for get_item_target_path item already exists."""
with self.assertRaises(ClickException):
try_get_item_target_path("skills", AUTHOR, "skill-name", "packages_path")
class PublicIdParameterTestCase(TestCase):
"""Test case for PublicIdParameter class."""
def test_get_metavar_positive(self):
"""Test for get_metavar positive result."""
result = PublicIdParameter.get_metavar("obj", "param")
expected_result = "PUBLIC_ID"
self.assertEqual(result, expected_result)
@mock.patch("aea.cli.utils.config.os.path.dirname", return_value="dir-name")
@mock.patch("aea.cli.utils.config.os.path.exists", return_value=False)
@mock.patch("aea.cli.utils.config.os.makedirs")
@mock.patch("builtins.open")
class InitConfigFolderTestCase(TestCase):
"""Test case for _init_cli_config method."""
def test_init_cli_config_positive(
self, open_mock, makedirs_mock, exists_mock, dirname_mock
):
"""Test for _init_cli_config method positive result."""
_init_cli_config()
dirname_mock.assert_called_once()
exists_mock.assert_called_once_with("dir-name")
makedirs_mock.assert_called_once_with("dir-name")
@mock.patch("aea.cli.utils.config.get_or_create_cli_config")
@mock.patch("aea.cli.utils.generic.yaml.dump")
@mock.patch("builtins.open", mock.mock_open())
class UpdateCLIConfigTestCase(TestCase):
"""Test case for update_cli_config method."""
def testupdate_cli_config_positive(self, dump_mock, icf_mock):
"""Test for update_cli_config method positive result."""
update_cli_config({"some": "config"})
icf_mock.assert_called_once()
dump_mock.assert_called_once()
def _raise_yamlerror(*args):
raise YAMLError()
def _raise_file_not_found_error(*args):
raise FileNotFoundError()
@mock.patch("builtins.open", mock.mock_open())
class GetOrCreateCLIConfigTestCase(TestCase):
"""Test case for read_cli_config method."""
@mock.patch(
"aea.cli.utils.generic.yaml.safe_load", return_value={"correct": "output"}
)
def testget_or_create_cli_config_positive(self, safe_load_mock):
"""Test for get_or_create_cli_config method positive result."""
result = get_or_create_cli_config()
expected_result = {"correct": "output"}
self.assertEqual(result, expected_result)
safe_load_mock.assert_called_once()
@mock.patch("aea.cli.utils.generic.yaml.safe_load", _raise_yamlerror)
def testget_or_create_cli_config_bad_yaml(self):
"""Test for rget_or_create_cli_config method bad yaml behavior."""
with self.assertRaises(ClickException):
get_or_create_cli_config()
class CleanAfterTestCase(TestCase):
"""Test case for clean_after decorator method."""
@mock.patch("aea.cli.utils.decorators.os.path.exists", return_value=True)
@mock.patch("aea.cli.utils.decorators._cast_ctx", lambda x: x)
@mock.patch("aea.cli.utils.decorators.shutil.rmtree")
def test_clean_after_positive(self, rmtree_mock, *mocks):
"""Test clean_after decorator method for positive result."""
@clean_after
def func(click_context):
ctx = cast(Context, click_context.obj)
ctx.clean_paths.append("clean/path")
raise ClickException("Message")
with self.assertRaises(ClickException):
func(ContextMock())
rmtree_mock.assert_called_once_with("clean/path")
@mock.patch("aea.cli.utils.package_utils.click.echo", raise_stoptest)
class ValidateAuthorNameTestCase(TestCase):
"""Test case for validate_author_name method."""
@mock.patch(
"aea.cli.utils.package_utils.click.prompt", return_value="correct_author"
)
def test_validate_author_name_positive(self, prompt_mock):
"""Test validate_author_name for positive result."""
author = "valid_author"
result = validate_author_name(author=author)
self.assertEqual(result, author)
result = validate_author_name()
self.assertEqual(result, "correct_author")
prompt_mock.assert_called_once()
@mock.patch(
"aea.cli.utils.package_utils.click.prompt", return_value="inv@l1d_@uth&r"
)
def test_validate_author_name_negative(self, prompt_mock):
"""Test validate_author_name for negative result."""
with self.assertRaises(StopTest):
validate_author_name()
prompt_mock.return_value = "skills"
with self.assertRaises(StopTest):
validate_author_name()
class ValidatePackageNameTestCase(TestCase):
"""Test case for validate_package_name method."""
def test_validate_package_name_positive(self):
"""Test validate_package_name for positive result."""
validate_package_name("correct_name")
def test_validate_package_name_negative(self):
"""Test validate_package_name for negative result."""
with self.assertRaises(BadParameter):
validate_package_name("incorrect-name")
def _raise_validation_error(*args, **kwargs):
raise ValidationError("Message.")
class FindItemLocallyTestCase(TestCase):
"""Test case for find_item_locally method."""
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True)
@mock.patch(
"aea.cli.utils.package_utils.ConfigLoader.from_configuration_type",
_raise_validation_error,
)
def test_find_item_locally_bad_config(self, *mocks):
"""Test find_item_locally for bad config result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_locally(ContextMock(), "skill", public_id)
self.assertIn("configuration file not valid", cm.exception.message)
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True)
@mock.patch("aea.cli.utils.package_utils.Path.open", mock.mock_open())
@mock.patch(
"aea.cli.utils.package_utils.ConfigLoader.from_configuration_type",
return_value=ConfigLoaderMock(),
)
def test_find_item_locally_cant_find(self, from_conftype_mock, *mocks):
"""Test find_item_locally for can't find result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_locally(ContextMock(), "skill", public_id)
self.assertEqual(
cm.exception.message, "Cannot find skill with author and version specified."
)
class FindItemInDistributionTestCase(TestCase):
"""Test case for find_item_in_distribution method."""
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True)
@mock.patch(
"aea.cli.utils.package_utils.ConfigLoader.from_configuration_type",
_raise_validation_error,
)
def testfind_item_in_distribution_bad_config(self, *mocks):
"""Test find_item_in_distribution for bad config result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_in_distribution(ContextMock(), "skill", public_id)
self.assertIn("configuration file not valid", cm.exception.message)
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=False)
def testfind_item_in_distribution_not_found(self, *mocks):
"""Test find_item_in_distribution for not found result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_in_distribution(ContextMock(), "skill", public_id)
self.assertIn("Cannot find skill", cm.exception.message)
@mock.patch("aea.cli.utils.package_utils.Path.exists", return_value=True)
@mock.patch("aea.cli.utils.package_utils.Path.open", mock.mock_open())
@mock.patch(
"aea.cli.utils.package_utils.ConfigLoader.from_configuration_type",
return_value=ConfigLoaderMock(),
)
def testfind_item_in_distribution_cant_find(self, from_conftype_mock, *mocks):
"""Test find_item_locally for can't find result."""
public_id = PublicIdMock.from_str("fetchai/echo:0.5.0")
with self.assertRaises(ClickException) as cm:
find_item_in_distribution(ContextMock(), "skill", public_id)
self.assertEqual(
cm.exception.message, "Cannot find skill with author and version specified."
)
class ValidateConfigConsistencyTestCase(TestCase):
"""Test case for _validate_config_consistency method."""
@mock.patch("aea.cli.utils.config.Path.exists", _raise_validation_error)
def test__validate_config_consistency_cant_find(self, *mocks):
"""Test _validate_config_consistency can't find result"""
with self.assertRaises(ValueError) as cm:
_validate_config_consistency(ContextMock(protocols=["some"]))
self.assertIn("Cannot find", str(cm.exception))
@mock.patch(
"aea.cli.utils.package_utils._compute_fingerprint",
return_value={"correct": "fingerprint"},
)
class IsFingerprintCorrectTestCase(TestCase):
"""Test case for adding skill with invalid fingerprint."""
def test_is_fingerprint_correct_positive(self, *mocks):
"""Test is_fingerprint_correct method for positive result."""
item_config = mock.Mock()
item_config.fingerprint = {"correct": "fingerprint"}
item_config.fingerprint_ignore_patterns = []
result = is_fingerprint_correct("package_path", item_config)
self.assertTrue(result)
def test_is_fingerprint_correct_negative(self, *mocks):
"""Test is_fingerprint_correct method for negative result."""
item_config = mock.Mock()
item_config.fingerprint = {"incorrect": "fingerprint"}
item_config.fingerprint_ignore_patterns = []
package_path = "package_dir"
result = is_fingerprint_correct(package_path, item_config)
self.assertFalse(result)
@mock.patch("aea.cli.config.click.ParamType")
class AEAJsonPathTypeTestCase(TestCase):
"""Test case for AEAJsonPathType class."""
@mock.patch("aea.cli.utils.click_utils.Path.exists", return_value=True)
def test_convert_root_vendor_positive(self, *mocks):
"""Test for convert method with root "vendor" positive result."""
value = "vendor.author.protocols.package_name.attribute_name"
ctx_mock = ContextMock()
ctx_mock.obj = mock.Mock()
ctx_mock.obj.set_config = mock.Mock()
obj = AEAJsonPathType()
obj.convert(value, "param", ctx_mock)
@mock.patch("aea.cli.utils.click_utils.Path.exists", return_value=False)
def test_convert_root_vendor_path_not_exists(self, *mocks):
"""Test for convert method with root "vendor" path not exists."""
value = "vendor.author.protocols.package_name.attribute_name"
obj = AEAJsonPathType()
with self.assertRaises(BadParameter):
obj.convert(value, "param", "ctx")
@mock.patch("aea.cli.utils.package_utils.LedgerApis", mock.MagicMock())
class TryGetBalanceTestCase(TestCase):
"""Test case for try_get_balance method."""
def test_try_get_balance_positive(self):
"""Test for try_get_balance method positive result."""
agent_config = mock.Mock()
agent_config.default_ledger_config = FETCHAI
wallet_mock = mock.Mock()
wallet_mock.addresses = {FETCHAI: "some-adress"}
try_get_balance(agent_config, wallet_mock, FETCHAI)
@mock.patch("aea.cli.utils.generic.os.path.exists", return_value=True)
class IsReadmePresentTestCase(TestCase):
"""Test case for is_readme_present method."""
def test_is_readme_present_positive(self, *mocks):
"""Test is_readme_present for positive result."""
self.assertTrue(is_readme_present("readme/path"))
| en | 0.599205 | # -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # # Copyright 2018-2019 Fetch.AI Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ------------------------------------------------------------------------------ This test module contains the tests for aea.cli.utils module. Test case for format_items method. Test format_items positive result. Test case for try_get_item_source_path method. Test for get_item_source_path positive result. Test for get_item_source_path item already exists. Test case for try_get_item_target_path method. Test for get_item_source_path positive result. Test for get_item_target_path item already exists. Test case for PublicIdParameter class. Test for get_metavar positive result. Test case for _init_cli_config method. Test for _init_cli_config method positive result. Test case for update_cli_config method. Test for update_cli_config method positive result. Test case for read_cli_config method. Test for get_or_create_cli_config method positive result. Test for rget_or_create_cli_config method bad yaml behavior. Test case for clean_after decorator method. Test clean_after decorator method for positive result. Test case for validate_author_name method. Test validate_author_name for positive result. Test validate_author_name for negative result. Test case for validate_package_name method. Test validate_package_name for positive result. Test validate_package_name for negative result. Test case for find_item_locally method. Test find_item_locally for bad config result. Test find_item_locally for can't find result. Test case for find_item_in_distribution method. Test find_item_in_distribution for bad config result. Test find_item_in_distribution for not found result. Test find_item_locally for can't find result. Test case for _validate_config_consistency method. Test _validate_config_consistency can't find result Test case for adding skill with invalid fingerprint. Test is_fingerprint_correct method for positive result. Test is_fingerprint_correct method for negative result. Test case for AEAJsonPathType class. Test for convert method with root "vendor" positive result. Test for convert method with root "vendor" path not exists. Test case for try_get_balance method. Test for try_get_balance method positive result. Test case for is_readme_present method. Test is_readme_present for positive result. | 1.573534 | 2 |
api/flat/urls.py | SanjarbekSaminjonov/musofirlar.backend | 1 | 9259 | from django.urls import path
from . import views
urlpatterns = [
path('', views.FlatListAPIView.as_view()),
path('create/', views.FlatCreateAPIView.as_view()),
path('<int:pk>/', views.FlatDetailAPIView.as_view()),
path('<int:pk>/update/', views.FlatUpdateAPIView.as_view()),
path('<int:pk>/delete/', views.FlatDeleteAPIView.as_view()),
]
| from django.urls import path
from . import views
urlpatterns = [
path('', views.FlatListAPIView.as_view()),
path('create/', views.FlatCreateAPIView.as_view()),
path('<int:pk>/', views.FlatDetailAPIView.as_view()),
path('<int:pk>/update/', views.FlatUpdateAPIView.as_view()),
path('<int:pk>/delete/', views.FlatDeleteAPIView.as_view()),
]
| none | 1 | 1.620273 | 2 |
|
hyssop_aiohttp/component/__init__.py | hsky77/hyssop | 0 | 9260 | # Copyright (C) 2020-Present the hyssop authors and contributors.
#
# This module is part of hyssop and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
'''
File created: January 1st 2021
Modified By: hsky77
Last Updated: January 7th 2021 15:30:08 pm
'''
from hyssop.project.component import ComponentTypes
from .aio_client import AioClientComponent
class AioHttpComponentTypes(ComponentTypes):
AioClient = ('aioclient', 'aio_client', 'AioClientComponent')
| # Copyright (C) 2020-Present the hyssop authors and contributors.
#
# This module is part of hyssop and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
'''
File created: January 1st 2021
Modified By: hsky77
Last Updated: January 7th 2021 15:30:08 pm
'''
from hyssop.project.component import ComponentTypes
from .aio_client import AioClientComponent
class AioHttpComponentTypes(ComponentTypes):
AioClient = ('aioclient', 'aio_client', 'AioClientComponent')
| en | 0.913848 | # Copyright (C) 2020-Present the hyssop authors and contributors. # # This module is part of hyssop and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php File created: January 1st 2021 Modified By: hsky77 Last Updated: January 7th 2021 15:30:08 pm | 1.809774 | 2 |
run_clone.py | tGhattas/IMP-seamless-cloning | 0 | 9261 | <reponame>tGhattas/IMP-seamless-cloning
import cv2
import getopt
import sys
from gui import MaskPainter, MaskMover
from clone import seamless_cloning, shepards_seamless_cloning
from utils import read_image, plt
from os import path
def usage():
print(
"Usage: python run_clone.py [options] \n\n\
Options: \n\
\t-h\t Flag to specify a brief help message and exits..\n\
\t-s\t(Required) Specify a source image.\n\
\t-t\t(Required) Specify a target image.\n\
\t-m\t(Optional) Specify a mask image with the object in white and other part in black, ignore this option if you plan to draw it later.\n\
\t-x\t(Optional) Flag to specify a mode, either 'possion' or 'shepard'. default is possion.\n\
\t-v\t(Optional) Flag to specify grad field of source only or both in case of Possion solver is used. default is source only.")
if __name__ == '__main__':
# parse command line arguments
args = {}
try:
opts, _ = getopt.getopt(sys.argv[1:], "vxhs:t:m:p:")
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
print("See help: run_clone.py -h")
exit(2)
for o, a in opts:
if o in ("-h"):
usage()
exit()
elif o in ("-s"):
args["source"] = a
elif o in ("-t"):
args["target"] = a
elif o in ("-m"):
args["mask"] = a
elif o in ("-x"):
args["mode"] = a.lower()
elif o in ("-v"):
args["gradient_field_source_only"] = a
else:
continue
#
if ("source" not in args) or ("target" not in args):
usage()
exit()
#
# set default mode to Possion solver
mode = "poisson" if ("mode" not in args) else args["mode"]
gradient_field_source_only = ("gradient_field_source_only" not in args)
source = read_image(args["source"], 2)
target = read_image(args["target"], 2)
if source is None or target is None:
print('Source or target image not exist.')
exit()
if source.shape[0] > target.shape[0] or source.shape[1] > target.shape[1]:
print('Source image cannot be larger than target image.')
exit()
# draw the mask
mask_path = ""
if "mask" not in args:
print('Please highlight the object to disapparate.\n')
mp = MaskPainter(args["source"])
mask_path = mp.paint_mask()
else:
mask_path = args["mask"]
# adjust mask position for target image
print('Please move the object to desired location to apparate.\n')
mm = MaskMover(args["target"], mask_path)
offset_x, offset_y, target_mask_path = mm.move_mask()
# blend
print('Blending ...')
target_mask = read_image(target_mask_path, 1)
offset = offset_x, offset_y
cloning_tool = seamless_cloning if mode == "poisson" else shepards_seamless_cloning
kwargs = {"gradient_field_source_only": gradient_field_source_only} if mode == "poisson" else {}
blend_result = cloning_tool(source, target, target_mask, offset, **kwargs)
cv2.imwrite(path.join(path.dirname(args["source"]), 'target_result.png'),
blend_result)
plt.figure("Result"), plt.imshow(blend_result), plt.show()
print('Done.\n')
'''
running example:
- Possion based solver:
python run_clone.py -s external/blend-1.jpg -t external/main-1.jpg
python run_clone.py -s external/source3.jpg -t external/target3.jpg -v
- Shepard's interpolation:
python run_clone.py -s external/blend-1.jpg -t external/main-1.jpg -x
python run_clone.py -s external/source3.jpg -t external/target3.jpg -x
''' | import cv2
import getopt
import sys
from gui import MaskPainter, MaskMover
from clone import seamless_cloning, shepards_seamless_cloning
from utils import read_image, plt
from os import path
def usage():
print(
"Usage: python run_clone.py [options] \n\n\
Options: \n\
\t-h\t Flag to specify a brief help message and exits..\n\
\t-s\t(Required) Specify a source image.\n\
\t-t\t(Required) Specify a target image.\n\
\t-m\t(Optional) Specify a mask image with the object in white and other part in black, ignore this option if you plan to draw it later.\n\
\t-x\t(Optional) Flag to specify a mode, either 'possion' or 'shepard'. default is possion.\n\
\t-v\t(Optional) Flag to specify grad field of source only or both in case of Possion solver is used. default is source only.")
if __name__ == '__main__':
# parse command line arguments
args = {}
try:
opts, _ = getopt.getopt(sys.argv[1:], "vxhs:t:m:p:")
except getopt.GetoptError as err:
# print help information and exit:
print(err) # will print something like "option -a not recognized"
print("See help: run_clone.py -h")
exit(2)
for o, a in opts:
if o in ("-h"):
usage()
exit()
elif o in ("-s"):
args["source"] = a
elif o in ("-t"):
args["target"] = a
elif o in ("-m"):
args["mask"] = a
elif o in ("-x"):
args["mode"] = a.lower()
elif o in ("-v"):
args["gradient_field_source_only"] = a
else:
continue
#
if ("source" not in args) or ("target" not in args):
usage()
exit()
#
# set default mode to Possion solver
mode = "poisson" if ("mode" not in args) else args["mode"]
gradient_field_source_only = ("gradient_field_source_only" not in args)
source = read_image(args["source"], 2)
target = read_image(args["target"], 2)
if source is None or target is None:
print('Source or target image not exist.')
exit()
if source.shape[0] > target.shape[0] or source.shape[1] > target.shape[1]:
print('Source image cannot be larger than target image.')
exit()
# draw the mask
mask_path = ""
if "mask" not in args:
print('Please highlight the object to disapparate.\n')
mp = MaskPainter(args["source"])
mask_path = mp.paint_mask()
else:
mask_path = args["mask"]
# adjust mask position for target image
print('Please move the object to desired location to apparate.\n')
mm = MaskMover(args["target"], mask_path)
offset_x, offset_y, target_mask_path = mm.move_mask()
# blend
print('Blending ...')
target_mask = read_image(target_mask_path, 1)
offset = offset_x, offset_y
cloning_tool = seamless_cloning if mode == "poisson" else shepards_seamless_cloning
kwargs = {"gradient_field_source_only": gradient_field_source_only} if mode == "poisson" else {}
blend_result = cloning_tool(source, target, target_mask, offset, **kwargs)
cv2.imwrite(path.join(path.dirname(args["source"]), 'target_result.png'),
blend_result)
plt.figure("Result"), plt.imshow(blend_result), plt.show()
print('Done.\n')
'''
running example:
- Possion based solver:
python run_clone.py -s external/blend-1.jpg -t external/main-1.jpg
python run_clone.py -s external/source3.jpg -t external/target3.jpg -v
- Shepard's interpolation:
python run_clone.py -s external/blend-1.jpg -t external/main-1.jpg -x
python run_clone.py -s external/source3.jpg -t external/target3.jpg -x
''' | en | 0.344571 | # parse command line arguments # print help information and exit: # will print something like "option -a not recognized" # # # set default mode to Possion solver # draw the mask # adjust mask position for target image # blend running example: - Possion based solver: python run_clone.py -s external/blend-1.jpg -t external/main-1.jpg python run_clone.py -s external/source3.jpg -t external/target3.jpg -v - Shepard's interpolation: python run_clone.py -s external/blend-1.jpg -t external/main-1.jpg -x python run_clone.py -s external/source3.jpg -t external/target3.jpg -x | 2.404754 | 2 |
punkweb_boards/rest/serializers.py | Punkweb/punkweb-boards | 20 | 9262 | from rest_framework import serializers
from punkweb_boards.conf.settings import SHOUTBOX_DISABLED_TAGS
from punkweb_boards.models import (
BoardProfile,
Category,
Subcategory,
Thread,
Post,
Conversation,
Message,
Report,
Shout,
)
class BoardProfileSerializer(serializers.ModelSerializer):
post_count = serializers.ReadOnlyField()
can_shout = serializers.ReadOnlyField()
rendered_username = serializers.ReadOnlyField()
rendered_rank = serializers.ReadOnlyField()
class Meta:
model = BoardProfile
fields = "__all__"
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
exclude = ("auth_req",)
class SubcategorySerializer(serializers.ModelSerializer):
last_thread = serializers.ReadOnlyField(source="last_thread.id")
last_thread_title = serializers.ReadOnlyField(source="last_thread.title")
last_thread_created = serializers.ReadOnlyField(
source="last_thread.created"
)
last_thread_user = serializers.ReadOnlyField(
source="last_thread.user.profile.rendered_username"
)
parent_name = serializers.ReadOnlyField(source="parent.name")
thread_count = serializers.ReadOnlyField()
post_count = serializers.ReadOnlyField()
can_post = serializers.SerializerMethodField()
def get_can_post(self, obj):
return obj.can_post(self.context.get("request").user)
class Meta:
model = Subcategory
exclude = ("auth_req",)
class ThreadSerializer(serializers.ModelSerializer):
last_post = serializers.ReadOnlyField(source="last_post.id")
last_post_created = serializers.ReadOnlyField(source="last_post.created")
last_post_username = serializers.ReadOnlyField(
source="last_post.user.username"
)
last_post_rendered_username = serializers.ReadOnlyField(
source="last_post.user.profile.rendered_username"
)
user_username = serializers.ReadOnlyField(source="user.username")
user_rendered_username = serializers.ReadOnlyField(
source="user.profile.rendered_username"
)
user_image = serializers.ReadOnlyField(source="user.profile.avatar")
user_post_count = serializers.ReadOnlyField(
source="user.profile.post_count"
)
user_join_date = serializers.ReadOnlyField(source="user.created")
flagged = serializers.ReadOnlyField(source="reported")
posts_count = serializers.ReadOnlyField()
can_edit = serializers.SerializerMethodField()
def get_can_edit(self, obj):
return obj.can_edit(self.context.get("request").user)
class Meta:
model = Thread
fields = "__all__"
read_only_fields = (
"pinned",
"closed",
"user",
"upvoted_by",
"downvoted_by",
)
class PostSerializer(serializers.ModelSerializer):
flagged = serializers.ReadOnlyField(source="reported")
can_edit = serializers.SerializerMethodField()
def get_can_edit(self, obj):
return obj.can_edit(self.context.get("request").user)
class Meta:
model = Post
fields = "__all__"
read_only_fields = ("user", "upvoted_by", "downvoted_by")
class ConversationSerializer(serializers.ModelSerializer):
last_message = serializers.ReadOnlyField(source="last_message.id")
last_message_title = serializers.ReadOnlyField(source="last_message.title")
last_message_created = serializers.ReadOnlyField(
source="last_message.created"
)
last_message_user = serializers.ReadOnlyField(
source="last_message.user.profile.rendered_username"
)
message_count = serializers.ReadOnlyField()
class Meta:
model = Conversation
fields = "__all__"
read_only_fields = ("unread_by",)
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = Message
fields = "__all__"
read_only_fields = ("user",)
class ShoutSerializer(serializers.ModelSerializer):
username = serializers.ReadOnlyField(source="user.username")
rendered_username = serializers.ReadOnlyField(
source="user.profile.rendered_username"
)
class Meta:
model = Shout
fields = (
"id",
"user",
"username",
"rendered_username",
"content",
"_content_rendered",
"created",
"modified",
)
read_only_fields = ("user",)
def create(self, validated_data):
for key in SHOUTBOX_DISABLED_TAGS:
key_tag = "[{}]".format(key).lower()
if (
key_tag[: len(key_tag) - 1]
in validated_data.get("content").lower()
):
raise serializers.ValidationError(
{
"notAllowed": "{} is not allowed in the shoutbox".format(
key_tag
)
}
)
return Shout.objects.create(**validated_data)
| from rest_framework import serializers
from punkweb_boards.conf.settings import SHOUTBOX_DISABLED_TAGS
from punkweb_boards.models import (
BoardProfile,
Category,
Subcategory,
Thread,
Post,
Conversation,
Message,
Report,
Shout,
)
class BoardProfileSerializer(serializers.ModelSerializer):
post_count = serializers.ReadOnlyField()
can_shout = serializers.ReadOnlyField()
rendered_username = serializers.ReadOnlyField()
rendered_rank = serializers.ReadOnlyField()
class Meta:
model = BoardProfile
fields = "__all__"
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
exclude = ("auth_req",)
class SubcategorySerializer(serializers.ModelSerializer):
last_thread = serializers.ReadOnlyField(source="last_thread.id")
last_thread_title = serializers.ReadOnlyField(source="last_thread.title")
last_thread_created = serializers.ReadOnlyField(
source="last_thread.created"
)
last_thread_user = serializers.ReadOnlyField(
source="last_thread.user.profile.rendered_username"
)
parent_name = serializers.ReadOnlyField(source="parent.name")
thread_count = serializers.ReadOnlyField()
post_count = serializers.ReadOnlyField()
can_post = serializers.SerializerMethodField()
def get_can_post(self, obj):
return obj.can_post(self.context.get("request").user)
class Meta:
model = Subcategory
exclude = ("auth_req",)
class ThreadSerializer(serializers.ModelSerializer):
last_post = serializers.ReadOnlyField(source="last_post.id")
last_post_created = serializers.ReadOnlyField(source="last_post.created")
last_post_username = serializers.ReadOnlyField(
source="last_post.user.username"
)
last_post_rendered_username = serializers.ReadOnlyField(
source="last_post.user.profile.rendered_username"
)
user_username = serializers.ReadOnlyField(source="user.username")
user_rendered_username = serializers.ReadOnlyField(
source="user.profile.rendered_username"
)
user_image = serializers.ReadOnlyField(source="user.profile.avatar")
user_post_count = serializers.ReadOnlyField(
source="user.profile.post_count"
)
user_join_date = serializers.ReadOnlyField(source="user.created")
flagged = serializers.ReadOnlyField(source="reported")
posts_count = serializers.ReadOnlyField()
can_edit = serializers.SerializerMethodField()
def get_can_edit(self, obj):
return obj.can_edit(self.context.get("request").user)
class Meta:
model = Thread
fields = "__all__"
read_only_fields = (
"pinned",
"closed",
"user",
"upvoted_by",
"downvoted_by",
)
class PostSerializer(serializers.ModelSerializer):
flagged = serializers.ReadOnlyField(source="reported")
can_edit = serializers.SerializerMethodField()
def get_can_edit(self, obj):
return obj.can_edit(self.context.get("request").user)
class Meta:
model = Post
fields = "__all__"
read_only_fields = ("user", "upvoted_by", "downvoted_by")
class ConversationSerializer(serializers.ModelSerializer):
last_message = serializers.ReadOnlyField(source="last_message.id")
last_message_title = serializers.ReadOnlyField(source="last_message.title")
last_message_created = serializers.ReadOnlyField(
source="last_message.created"
)
last_message_user = serializers.ReadOnlyField(
source="last_message.user.profile.rendered_username"
)
message_count = serializers.ReadOnlyField()
class Meta:
model = Conversation
fields = "__all__"
read_only_fields = ("unread_by",)
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = Message
fields = "__all__"
read_only_fields = ("user",)
class ShoutSerializer(serializers.ModelSerializer):
username = serializers.ReadOnlyField(source="user.username")
rendered_username = serializers.ReadOnlyField(
source="user.profile.rendered_username"
)
class Meta:
model = Shout
fields = (
"id",
"user",
"username",
"rendered_username",
"content",
"_content_rendered",
"created",
"modified",
)
read_only_fields = ("user",)
def create(self, validated_data):
for key in SHOUTBOX_DISABLED_TAGS:
key_tag = "[{}]".format(key).lower()
if (
key_tag[: len(key_tag) - 1]
in validated_data.get("content").lower()
):
raise serializers.ValidationError(
{
"notAllowed": "{} is not allowed in the shoutbox".format(
key_tag
)
}
)
return Shout.objects.create(**validated_data)
| none | 1 | 1.99812 | 2 |
|
runtime/components/Statistic/moving_minimum_time.py | ulise/hetida-designer | 41 | 9263 | <reponame>ulise/hetida-designer<gh_stars>10-100
from hetdesrun.component.registration import register
from hetdesrun.datatypes import DataType
import pandas as pd
import numpy as np
# ***** DO NOT EDIT LINES BELOW *****
# These lines may be overwritten if input/output changes.
@register(
inputs={"data": DataType.Any, "t": DataType.String},
outputs={"movmin": DataType.Any},
)
def main(*, data, t):
"""entrypoint function for this component
Usage example:
>>> main(
... data = pd.Series(
... {
... "2019-08-01T15:20:00": 4.0,
... "2019-08-01T15:20:01": 5.0,
... "2019-08-01T15:20:05": 1.0,
... "2019-08-01T15:20:09": 9.0,
... }
... ),
... t = "4s"
... )["movmin"]
2019-08-01 15:20:00 4.0
2019-08-01 15:20:01 4.0
2019-08-01 15:20:05 1.0
2019-08-01 15:20:09 9.0
dtype: float64
"""
# ***** DO NOT EDIT LINES ABOVE *****
# write your code here.
try:
data.index = pd.to_datetime(data.index)
except (ValueError, TypeError):
raise TypeError("indices of data must be datetime")
data_sort = data.sort_index().dropna()
try:
return {"movmin": data_sort.rolling(t).min()}
except (ValueError):
raise ValueError(f"t could not be parsed as frequency: {t}")
| from hetdesrun.component.registration import register
from hetdesrun.datatypes import DataType
import pandas as pd
import numpy as np
# ***** DO NOT EDIT LINES BELOW *****
# These lines may be overwritten if input/output changes.
@register(
inputs={"data": DataType.Any, "t": DataType.String},
outputs={"movmin": DataType.Any},
)
def main(*, data, t):
"""entrypoint function for this component
Usage example:
>>> main(
... data = pd.Series(
... {
... "2019-08-01T15:20:00": 4.0,
... "2019-08-01T15:20:01": 5.0,
... "2019-08-01T15:20:05": 1.0,
... "2019-08-01T15:20:09": 9.0,
... }
... ),
... t = "4s"
... )["movmin"]
2019-08-01 15:20:00 4.0
2019-08-01 15:20:01 4.0
2019-08-01 15:20:05 1.0
2019-08-01 15:20:09 9.0
dtype: float64
"""
# ***** DO NOT EDIT LINES ABOVE *****
# write your code here.
try:
data.index = pd.to_datetime(data.index)
except (ValueError, TypeError):
raise TypeError("indices of data must be datetime")
data_sort = data.sort_index().dropna()
try:
return {"movmin": data_sort.rolling(t).min()}
except (ValueError):
raise ValueError(f"t could not be parsed as frequency: {t}") | en | 0.387882 | # ***** DO NOT EDIT LINES BELOW ***** # These lines may be overwritten if input/output changes. entrypoint function for this component Usage example: >>> main( ... data = pd.Series( ... { ... "2019-08-01T15:20:00": 4.0, ... "2019-08-01T15:20:01": 5.0, ... "2019-08-01T15:20:05": 1.0, ... "2019-08-01T15:20:09": 9.0, ... } ... ), ... t = "4s" ... )["movmin"] 2019-08-01 15:20:00 4.0 2019-08-01 15:20:01 4.0 2019-08-01 15:20:05 1.0 2019-08-01 15:20:09 9.0 dtype: float64 # ***** DO NOT EDIT LINES ABOVE ***** # write your code here. | 2.664498 | 3 |
painter.py | MikhailNakhatovich/rooms_painting | 0 | 9264 | import cv2
import ezdxf
import numpy as np
def draw_hatch(img, entity, color, mask):
for poly_path in entity.paths.paths:
# print(poly_path.path_type_flags)
polygon = np.array([vertex[:-1] for vertex in poly_path.vertices]).astype(int)
if poly_path.path_type_flags & 1 == 1:
cv2.fillPoly(img, [polygon], color)
cv2.fillPoly(mask, [polygon], (255, 255, 255))
else:
cv2.fillPoly(img, [polygon], (255, 255, 255))
return color
def draw_line(img, entity, color, mask):
p1 = entity.dxf.start[:-1]
p2 = entity.dxf.end[:-1]
cv2.line(img, (int(p1[0]), int(p1[1])), (int(p2[0]), int(p2[1])), color, 1)
cv2.line(mask, (int(p1[0]), int(p1[1])), (int(p2[0]), int(p2[1])), (255, 255, 255), 2)
return color
def draw_lwpolyline(img, entity, color, mask):
polyline = []
a = np.array(entity.lwpoints.values).astype(int)
while len(a) > 0:
polyline.append((a[0], a[1]))
a = a[5:]
cv2.polylines(img, [np.array(polyline)], entity.closed, color, 1)
cv2.polylines(mask, [np.array(polyline)], entity.closed, (255, 255, 255), 2)
return color
def draw_arc(img, entity, color, mask):
s = entity.dxf.start_angle * np.pi / 180
e = entity.dxf.end_angle * np.pi / 180
if s > e:
s -= 2 * np.pi
d = (e - s) / (int((e - s) * 180 / np.pi) + 1)
r = entity.dxf.radius
cx, cy = entity.dxf.center.xyz[:-1]
angles = np.arange(s, e + d / 2, d)
x = cx + r * np.cos(angles)
y = cy + r * np.sin(angles)
points = np.column_stack((x, y)).astype(int)
cv2.polylines(img, [points], abs(s - e) < 1e-9, color, 1)
cv2.polylines(mask, [points], abs(s - e) < 1e-9, (255, 255, 255), 2)
return color
def draw_circle(img, entity, color, mask):
r = entity.dxf.radius
cx, cy = entity.dxf.center.xyz[:-1]
cv2.circle(img, (int(cx), int(cy)), int(r), color, 1)
cv2.circle(mask, (int(cx), int(cy)), int(r), (255, 255, 255), -1)
return color
def draw_ellipse(img, entity, color, mask):
cx, cy = entity.dxf.center.xyz[:-1]
ma = entity.dxf.major_axis.magnitude
angle = entity.dxf.major_axis.angle_deg
mi = ma * entity.dxf.ratio
s = entity.dxf.start_param * 180 / np.pi
e = entity.dxf.end_param * 180 / np.pi
if entity.dxf.extrusion.z == -1:
s = 360 - s
e = 360 - e
cv2.ellipse(img, (int(cx), int(cy)), (int(ma), int(mi)), angle, s, e, color, 1)
cv2.ellipse(mask, (int(cx), int(cy)), (int(ma), int(mi)), angle, s, e, (255, 255, 255), 1)
return color
def draw_point(img, entity, color, mask):
cx, cy = entity.dxf.location.xyz[:-1]
cv2.circle(img, (int(cx), int(cy)), 0, color, 1)
cv2.circle(mask, (int(cx), int(cy)), 0, (255, 255, 255), -1)
return color
draw_map = {
'HATCH': draw_hatch,
'LINE': draw_line,
'LWPOLYLINE': draw_lwpolyline,
'ARC': draw_arc,
'CIRCLE': draw_circle,
'ELLIPSE': draw_ellipse,
'POINT': draw_point,
}
def paint(in_path, out_path, config):
doc = ezdxf.readfile(in_path)
extmax, extmin = doc.header['$EXTMAX'], doc.header['$EXTMIN']
xmin, ymin = np.floor(extmin[:-1]).astype(int)
xmax, ymax = np.ceil(extmax[:-1]).astype(int)
img = np.ones((ymax + ymin, xmax + xmin, 3), np.uint8) * 255
mask = np.zeros_like(img)
msp = doc.modelspace()
layers = config.get('layers', {})
colors = config.get('colors', {})
# print(doc.layers.entries.keys())
for layer_name, names in layers.items():
color = tuple(colors.get(layer_name, [0, 0, 0]))
for name in names:
if name not in doc.layers:
continue
entities = msp.query('*[layer=="%s"]' % name)
tmp = np.zeros((ymax + ymin, xmax + xmin), np.uint8)
for entity in entities:
if entity.DXFTYPE in draw_map:
draw_map[entity.DXFTYPE](img, entity, color, tmp)
else:
print("%s: %s" % (name, entity.DXFTYPE))
contours, hierarchy = cv2.findContours(tmp, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(mask, contours, -1, color, -1)
res, img_png = cv2.imencode('.png', cv2.flip(img, 0))
res, mask_png = cv2.imencode('.png', cv2.flip(mask, 0))
with open(out_path, 'wb') as f:
f.write(img_png.tobytes())
with open(out_path[:-4] + "_mask.png", 'wb') as f:
f.write(mask_png.tobytes())
| import cv2
import ezdxf
import numpy as np
def draw_hatch(img, entity, color, mask):
for poly_path in entity.paths.paths:
# print(poly_path.path_type_flags)
polygon = np.array([vertex[:-1] for vertex in poly_path.vertices]).astype(int)
if poly_path.path_type_flags & 1 == 1:
cv2.fillPoly(img, [polygon], color)
cv2.fillPoly(mask, [polygon], (255, 255, 255))
else:
cv2.fillPoly(img, [polygon], (255, 255, 255))
return color
def draw_line(img, entity, color, mask):
p1 = entity.dxf.start[:-1]
p2 = entity.dxf.end[:-1]
cv2.line(img, (int(p1[0]), int(p1[1])), (int(p2[0]), int(p2[1])), color, 1)
cv2.line(mask, (int(p1[0]), int(p1[1])), (int(p2[0]), int(p2[1])), (255, 255, 255), 2)
return color
def draw_lwpolyline(img, entity, color, mask):
polyline = []
a = np.array(entity.lwpoints.values).astype(int)
while len(a) > 0:
polyline.append((a[0], a[1]))
a = a[5:]
cv2.polylines(img, [np.array(polyline)], entity.closed, color, 1)
cv2.polylines(mask, [np.array(polyline)], entity.closed, (255, 255, 255), 2)
return color
def draw_arc(img, entity, color, mask):
s = entity.dxf.start_angle * np.pi / 180
e = entity.dxf.end_angle * np.pi / 180
if s > e:
s -= 2 * np.pi
d = (e - s) / (int((e - s) * 180 / np.pi) + 1)
r = entity.dxf.radius
cx, cy = entity.dxf.center.xyz[:-1]
angles = np.arange(s, e + d / 2, d)
x = cx + r * np.cos(angles)
y = cy + r * np.sin(angles)
points = np.column_stack((x, y)).astype(int)
cv2.polylines(img, [points], abs(s - e) < 1e-9, color, 1)
cv2.polylines(mask, [points], abs(s - e) < 1e-9, (255, 255, 255), 2)
return color
def draw_circle(img, entity, color, mask):
r = entity.dxf.radius
cx, cy = entity.dxf.center.xyz[:-1]
cv2.circle(img, (int(cx), int(cy)), int(r), color, 1)
cv2.circle(mask, (int(cx), int(cy)), int(r), (255, 255, 255), -1)
return color
def draw_ellipse(img, entity, color, mask):
cx, cy = entity.dxf.center.xyz[:-1]
ma = entity.dxf.major_axis.magnitude
angle = entity.dxf.major_axis.angle_deg
mi = ma * entity.dxf.ratio
s = entity.dxf.start_param * 180 / np.pi
e = entity.dxf.end_param * 180 / np.pi
if entity.dxf.extrusion.z == -1:
s = 360 - s
e = 360 - e
cv2.ellipse(img, (int(cx), int(cy)), (int(ma), int(mi)), angle, s, e, color, 1)
cv2.ellipse(mask, (int(cx), int(cy)), (int(ma), int(mi)), angle, s, e, (255, 255, 255), 1)
return color
def draw_point(img, entity, color, mask):
cx, cy = entity.dxf.location.xyz[:-1]
cv2.circle(img, (int(cx), int(cy)), 0, color, 1)
cv2.circle(mask, (int(cx), int(cy)), 0, (255, 255, 255), -1)
return color
draw_map = {
'HATCH': draw_hatch,
'LINE': draw_line,
'LWPOLYLINE': draw_lwpolyline,
'ARC': draw_arc,
'CIRCLE': draw_circle,
'ELLIPSE': draw_ellipse,
'POINT': draw_point,
}
def paint(in_path, out_path, config):
doc = ezdxf.readfile(in_path)
extmax, extmin = doc.header['$EXTMAX'], doc.header['$EXTMIN']
xmin, ymin = np.floor(extmin[:-1]).astype(int)
xmax, ymax = np.ceil(extmax[:-1]).astype(int)
img = np.ones((ymax + ymin, xmax + xmin, 3), np.uint8) * 255
mask = np.zeros_like(img)
msp = doc.modelspace()
layers = config.get('layers', {})
colors = config.get('colors', {})
# print(doc.layers.entries.keys())
for layer_name, names in layers.items():
color = tuple(colors.get(layer_name, [0, 0, 0]))
for name in names:
if name not in doc.layers:
continue
entities = msp.query('*[layer=="%s"]' % name)
tmp = np.zeros((ymax + ymin, xmax + xmin), np.uint8)
for entity in entities:
if entity.DXFTYPE in draw_map:
draw_map[entity.DXFTYPE](img, entity, color, tmp)
else:
print("%s: %s" % (name, entity.DXFTYPE))
contours, hierarchy = cv2.findContours(tmp, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(mask, contours, -1, color, -1)
res, img_png = cv2.imencode('.png', cv2.flip(img, 0))
res, mask_png = cv2.imencode('.png', cv2.flip(mask, 0))
with open(out_path, 'wb') as f:
f.write(img_png.tobytes())
with open(out_path[:-4] + "_mask.png", 'wb') as f:
f.write(mask_png.tobytes())
| en | 0.424168 | # print(poly_path.path_type_flags) # print(doc.layers.entries.keys()) | 2.640296 | 3 |
misago/misago/users/serializers/auth.py | vascoalramos/misago-deployment | 2 | 9265 | <reponame>vascoalramos/misago-deployment
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework import serializers
from ...acl.useracl import serialize_user_acl
from .user import UserSerializer
User = get_user_model()
__all__ = ["AuthenticatedUserSerializer", "AnonymousUserSerializer"]
class AuthFlags:
def get_is_authenticated(self, obj):
return bool(obj.is_authenticated)
def get_is_anonymous(self, obj):
return bool(obj.is_anonymous)
class AuthenticatedUserSerializer(UserSerializer, AuthFlags):
email = serializers.SerializerMethodField()
is_authenticated = serializers.SerializerMethodField()
is_anonymous = serializers.SerializerMethodField()
class Meta:
model = User
fields = UserSerializer.Meta.fields + [
"has_usable_password",
"is_hiding_presence",
"limits_private_thread_invites_to",
"unread_private_threads",
"subscribe_to_started_threads",
"subscribe_to_replied_threads",
"is_authenticated",
"is_anonymous",
]
def get_acl(self, obj):
acl = self.context.get("acl")
if acl:
return serialize_user_acl(acl)
return {}
def get_email(self, obj):
return obj.email
def get_api(self, obj):
return {
"avatar": reverse("misago:api:user-avatar", kwargs={"pk": obj.pk}),
"data_downloads": reverse(
"misago:api:user-data-downloads", kwargs={"pk": obj.pk}
),
"details": reverse("misago:api:user-details", kwargs={"pk": obj.pk}),
"change_email": reverse(
"misago:api:user-change-email", kwargs={"pk": obj.pk}
),
"change_password": reverse(
"misago:api:user-change-password", kwargs={"pk": obj.pk}
),
"edit_details": reverse(
"misago:api:user-edit-details", kwargs={"pk": obj.pk}
),
"options": reverse("misago:api:user-forum-options", kwargs={"pk": obj.pk}),
"request_data_download": reverse(
"misago:api:user-request-data-download", kwargs={"pk": obj.pk}
),
"username": reverse("misago:api:user-username", kwargs={"pk": obj.pk}),
"delete": reverse(
"misago:api:user-delete-own-account", kwargs={"pk": obj.pk}
),
}
AuthenticatedUserSerializer = AuthenticatedUserSerializer.exclude_fields(
"is_avatar_locked",
"is_blocked",
"is_followed",
"is_signature_locked",
"meta",
"signature",
"status",
)
class AnonymousUserSerializer(serializers.Serializer, AuthFlags):
id = serializers.ReadOnlyField()
acl = serializers.SerializerMethodField()
is_authenticated = serializers.SerializerMethodField()
is_anonymous = serializers.SerializerMethodField()
def get_acl(self, obj):
acl = self.context.get("acl")
if acl:
return serialize_user_acl(acl)
return {}
| from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework import serializers
from ...acl.useracl import serialize_user_acl
from .user import UserSerializer
User = get_user_model()
__all__ = ["AuthenticatedUserSerializer", "AnonymousUserSerializer"]
class AuthFlags:
def get_is_authenticated(self, obj):
return bool(obj.is_authenticated)
def get_is_anonymous(self, obj):
return bool(obj.is_anonymous)
class AuthenticatedUserSerializer(UserSerializer, AuthFlags):
email = serializers.SerializerMethodField()
is_authenticated = serializers.SerializerMethodField()
is_anonymous = serializers.SerializerMethodField()
class Meta:
model = User
fields = UserSerializer.Meta.fields + [
"has_usable_password",
"is_hiding_presence",
"limits_private_thread_invites_to",
"unread_private_threads",
"subscribe_to_started_threads",
"subscribe_to_replied_threads",
"is_authenticated",
"is_anonymous",
]
def get_acl(self, obj):
acl = self.context.get("acl")
if acl:
return serialize_user_acl(acl)
return {}
def get_email(self, obj):
return obj.email
def get_api(self, obj):
return {
"avatar": reverse("misago:api:user-avatar", kwargs={"pk": obj.pk}),
"data_downloads": reverse(
"misago:api:user-data-downloads", kwargs={"pk": obj.pk}
),
"details": reverse("misago:api:user-details", kwargs={"pk": obj.pk}),
"change_email": reverse(
"misago:api:user-change-email", kwargs={"pk": obj.pk}
),
"change_password": reverse(
"misago:api:user-change-password", kwargs={"pk": obj.pk}
),
"edit_details": reverse(
"misago:api:user-edit-details", kwargs={"pk": obj.pk}
),
"options": reverse("misago:api:user-forum-options", kwargs={"pk": obj.pk}),
"request_data_download": reverse(
"misago:api:user-request-data-download", kwargs={"pk": obj.pk}
),
"username": reverse("misago:api:user-username", kwargs={"pk": obj.pk}),
"delete": reverse(
"misago:api:user-delete-own-account", kwargs={"pk": obj.pk}
),
}
AuthenticatedUserSerializer = AuthenticatedUserSerializer.exclude_fields(
"is_avatar_locked",
"is_blocked",
"is_followed",
"is_signature_locked",
"meta",
"signature",
"status",
)
class AnonymousUserSerializer(serializers.Serializer, AuthFlags):
id = serializers.ReadOnlyField()
acl = serializers.SerializerMethodField()
is_authenticated = serializers.SerializerMethodField()
is_anonymous = serializers.SerializerMethodField()
def get_acl(self, obj):
acl = self.context.get("acl")
if acl:
return serialize_user_acl(acl)
return {} | none | 1 | 2.260031 | 2 |
|
shop/models.py | mohammadanarul/Ecommerce-Django-YT | 0 | 9266 | <reponame>mohammadanarul/Ecommerce-Django-YT<gh_stars>0
from ctypes.wintypes import CHAR
from distutils.command.upload import upload
from random import choice
from telnetlib import STATUS
from unicodedata import category
from django.db import models
from ckeditor.fields import RichTextField
from taggit.managers import TaggableManager
# Create your models here.
from mptt.models import MPTTModel, TreeForeignKey
class Category(MPTTModel):
name = models.CharField(max_length=50, unique=True)
parent = TreeForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, related_name='children')
is_active = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class MPTTMeta:
order_insertion_by = ['name']
class Brand(models.Model):
name = models.CharField(max_length=50)
is_active = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Product(models.Model):
STATUS_CHOICES = (
('NONE', 'NONE'),
('NEW', 'NEW'),
('SALE', 'SALE'),
('HOT', 'HOT'),
)
title = models.CharField(max_length=50)
price = models.DecimalField(max_digits=5, decimal_places=2)
short_description = RichTextField()
tags = TaggableManager()
description = RichTextField()
specification = RichTextField()
image = models.ImageField(upload_to='product/')
category = models.ForeignKey(Category, on_delete=models.CASCADE)
brand = models.ForeignKey(Brand, on_delete=models.CASCADE)
stack = models.IntegerField(default=5)
status = models.CharField(max_length=5, choices=STATUS_CHOICES, default='NONE')
is_fetured = models.BooleanField(default=False)
is_special = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class ProductImages(models.Model):
category = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='images')
image = models.ImageField(upload_to='products/') | from ctypes.wintypes import CHAR
from distutils.command.upload import upload
from random import choice
from telnetlib import STATUS
from unicodedata import category
from django.db import models
from ckeditor.fields import RichTextField
from taggit.managers import TaggableManager
# Create your models here.
from mptt.models import MPTTModel, TreeForeignKey
class Category(MPTTModel):
name = models.CharField(max_length=50, unique=True)
parent = TreeForeignKey('self', on_delete=models.CASCADE, null=True, blank=True, related_name='children')
is_active = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class MPTTMeta:
order_insertion_by = ['name']
class Brand(models.Model):
name = models.CharField(max_length=50)
is_active = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Product(models.Model):
STATUS_CHOICES = (
('NONE', 'NONE'),
('NEW', 'NEW'),
('SALE', 'SALE'),
('HOT', 'HOT'),
)
title = models.CharField(max_length=50)
price = models.DecimalField(max_digits=5, decimal_places=2)
short_description = RichTextField()
tags = TaggableManager()
description = RichTextField()
specification = RichTextField()
image = models.ImageField(upload_to='product/')
category = models.ForeignKey(Category, on_delete=models.CASCADE)
brand = models.ForeignKey(Brand, on_delete=models.CASCADE)
stack = models.IntegerField(default=5)
status = models.CharField(max_length=5, choices=STATUS_CHOICES, default='NONE')
is_fetured = models.BooleanField(default=False)
is_special = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class ProductImages(models.Model):
category = models.ForeignKey(Product, on_delete=models.CASCADE, related_name='images')
image = models.ImageField(upload_to='products/') | en | 0.963489 | # Create your models here. | 2.100363 | 2 |
supriya/patterns/NoteEvent.py | deeuu/supriya | 0 | 9267 | <gh_stars>0
import uuid
import supriya.commands
import supriya.realtime
from supriya.patterns.Event import Event
class NoteEvent(Event):
### CLASS VARIABLES ###
__slots__ = ()
### INITIALIZER ###
def __init__(
self,
add_action=None,
delta=None,
duration=None,
is_stop=True,
synthdef=None,
target_node=None,
uuid=None,
**settings,
):
if add_action is not None:
add_action = supriya.AddAction.from_expr(add_action)
Event.__init__(
self,
add_action=add_action,
delta=delta,
duration=duration,
is_stop=bool(is_stop),
synthdef=synthdef,
target_node=target_node,
uuid=uuid,
**settings,
)
### PRIVATE METHODS ###
def _perform_nonrealtime(self, session, uuids, offset, maximum_offset=None):
import supriya.assets.synthdefs
settings = self.settings.copy() # Do not mutate in place.
synthdef = self.get("synthdef", supriya.assets.synthdefs.default)
synthdef = synthdef or supriya.assets.synthdefs.default
synth_uuid = self.get("uuid", uuid.uuid4())
is_stop = self.get("is_stop")
duration = self.get("duration")
if duration is None:
duration = 1
if "duration" in settings:
duration = settings.pop("duration")
dictionaries = self._expand(
settings, synthdef, uuids, realtime=False, synth_parameters_only=True
)
if synth_uuid not in uuids:
# Begin a Pbind or Pmono synth
target_node = self["target_node"]
if isinstance(target_node, uuid.UUID) and target_node in uuids:
target_node = uuids[target_node]
prototype = (supriya.nonrealtime.Session, supriya.nonrealtime.Node)
if not isinstance(target_node, prototype):
target_node = session
synths = []
with session.at(offset):
for dictionary in dictionaries:
synth = target_node.add_synth(
add_action=self["add_action"],
duration=duration,
synthdef=synthdef,
**dictionary,
)
synths.append(synth)
if not is_stop:
uuids[synth_uuid] = tuple(synths)
else:
# Extend and make settings on a Pmono synth
synths = uuids[synth_uuid]
stop_offset = offset + duration
for synth, dictionary in zip(synths, dictionaries):
duration = stop_offset - synth.start_offset
synth.set_duration(duration)
with session.at(offset):
for key, value in dictionary.items():
synth[key] = value
return offset + max(self.delta, self.get("duration", 0))
def _perform_realtime(self, index=0, server=None, timestamp=0, uuids=None):
import supriya.assets.synthdefs
import supriya.patterns
synth_uuid = self.get("uuid") or uuid.uuid4()
synthdef = self.get("synthdef", supriya.assets.synthdefs.default)
synthdef = synthdef or supriya.assets.synthdefs.default
is_stop = self.get("is_stop")
duration = self["duration"]
if duration is None:
duration = 1
dictionaries = self._expand(self.settings, synthdef, uuids)
first_visit = False
if synth_uuid not in uuids:
first_visit = True
node_ids = {
server.node_id_allocator.allocate_node_id(): None
for _ in range(len(dictionaries))
}
uuids[synth_uuid] = node_ids
start_product = self._build_start_bundle(
dictionaries, first_visit, index, synth_uuid, synthdef, timestamp, uuids
)
if self.get("duration"):
if is_stop:
stop_product = self._build_stop_bundle(
index, synth_uuid, synthdef, timestamp, uuids
)
else:
stop_product = supriya.patterns.EventProduct(
event=None,
index=index,
is_stop=True,
requests=(),
timestamp=timestamp + duration,
uuid=None,
)
return [start_product, stop_product]
else:
uuids.pop(synth_uuid)
return [start_product]
def _build_start_bundle(
self, dictionaries, first_visit, index, synth_uuid, synthdef, timestamp, uuids
):
import supriya.patterns
requests = []
node_ids = uuids[synth_uuid]
if first_visit:
for node_id, dictionary in zip(node_ids, dictionaries):
add_action = dictionary.pop("add_action")
target_node = dictionary.pop("target_node")
if target_node is None:
target_node = 1
synth_kwargs = {
key: value
for key, value in dictionary.items()
if key in synthdef.parameter_names
}
request = supriya.commands.SynthNewRequest(
add_action=add_action,
node_id=node_id,
synthdef=synthdef,
target_node_id=target_node,
**synth_kwargs,
)
requests.append(request)
synth = supriya.realtime.Synth(synthdef)
node_ids[node_id] = synth
else:
for node_id, dictionary in zip(node_ids, dictionaries):
synth_kwargs = {
key: value
for key, value in dictionary.items()
if key in synthdef.parameter_names
}
request = supriya.commands.NodeSetRequest(
node_id=node_id, **synth_kwargs
)
requests.append(request)
event_product = supriya.patterns.EventProduct(
event=self,
index=index,
is_stop=False,
requests=requests,
timestamp=timestamp,
uuid=synth_uuid,
)
return event_product
def _build_stop_bundle(self, index, synth_uuid, synthdef, timestamp, uuids):
import supriya.patterns
import supriya.synthdefs
duration = self["duration"]
if duration is None:
duration = 1
requests = []
timestamp = timestamp + duration
node_ids = sorted(uuids[synth_uuid])
if synthdef.has_gate:
for node_id in node_ids:
request = supriya.commands.NodeSetRequest(node_id=node_id, gate=0)
requests.append(request)
elif any(x >= supriya.DoneAction.FREE_SYNTH for x in synthdef.done_actions):
pass
else:
request = supriya.commands.NodeFreeRequest(node_ids=node_ids)
requests.append(request)
event_product = supriya.patterns.EventProduct(
event=self,
index=index,
is_stop=True,
requests=requests,
timestamp=timestamp,
uuid=synth_uuid,
)
return event_product
| import uuid
import supriya.commands
import supriya.realtime
from supriya.patterns.Event import Event
class NoteEvent(Event):
### CLASS VARIABLES ###
__slots__ = ()
### INITIALIZER ###
def __init__(
self,
add_action=None,
delta=None,
duration=None,
is_stop=True,
synthdef=None,
target_node=None,
uuid=None,
**settings,
):
if add_action is not None:
add_action = supriya.AddAction.from_expr(add_action)
Event.__init__(
self,
add_action=add_action,
delta=delta,
duration=duration,
is_stop=bool(is_stop),
synthdef=synthdef,
target_node=target_node,
uuid=uuid,
**settings,
)
### PRIVATE METHODS ###
def _perform_nonrealtime(self, session, uuids, offset, maximum_offset=None):
import supriya.assets.synthdefs
settings = self.settings.copy() # Do not mutate in place.
synthdef = self.get("synthdef", supriya.assets.synthdefs.default)
synthdef = synthdef or supriya.assets.synthdefs.default
synth_uuid = self.get("uuid", uuid.uuid4())
is_stop = self.get("is_stop")
duration = self.get("duration")
if duration is None:
duration = 1
if "duration" in settings:
duration = settings.pop("duration")
dictionaries = self._expand(
settings, synthdef, uuids, realtime=False, synth_parameters_only=True
)
if synth_uuid not in uuids:
# Begin a Pbind or Pmono synth
target_node = self["target_node"]
if isinstance(target_node, uuid.UUID) and target_node in uuids:
target_node = uuids[target_node]
prototype = (supriya.nonrealtime.Session, supriya.nonrealtime.Node)
if not isinstance(target_node, prototype):
target_node = session
synths = []
with session.at(offset):
for dictionary in dictionaries:
synth = target_node.add_synth(
add_action=self["add_action"],
duration=duration,
synthdef=synthdef,
**dictionary,
)
synths.append(synth)
if not is_stop:
uuids[synth_uuid] = tuple(synths)
else:
# Extend and make settings on a Pmono synth
synths = uuids[synth_uuid]
stop_offset = offset + duration
for synth, dictionary in zip(synths, dictionaries):
duration = stop_offset - synth.start_offset
synth.set_duration(duration)
with session.at(offset):
for key, value in dictionary.items():
synth[key] = value
return offset + max(self.delta, self.get("duration", 0))
def _perform_realtime(self, index=0, server=None, timestamp=0, uuids=None):
import supriya.assets.synthdefs
import supriya.patterns
synth_uuid = self.get("uuid") or uuid.uuid4()
synthdef = self.get("synthdef", supriya.assets.synthdefs.default)
synthdef = synthdef or supriya.assets.synthdefs.default
is_stop = self.get("is_stop")
duration = self["duration"]
if duration is None:
duration = 1
dictionaries = self._expand(self.settings, synthdef, uuids)
first_visit = False
if synth_uuid not in uuids:
first_visit = True
node_ids = {
server.node_id_allocator.allocate_node_id(): None
for _ in range(len(dictionaries))
}
uuids[synth_uuid] = node_ids
start_product = self._build_start_bundle(
dictionaries, first_visit, index, synth_uuid, synthdef, timestamp, uuids
)
if self.get("duration"):
if is_stop:
stop_product = self._build_stop_bundle(
index, synth_uuid, synthdef, timestamp, uuids
)
else:
stop_product = supriya.patterns.EventProduct(
event=None,
index=index,
is_stop=True,
requests=(),
timestamp=timestamp + duration,
uuid=None,
)
return [start_product, stop_product]
else:
uuids.pop(synth_uuid)
return [start_product]
def _build_start_bundle(
self, dictionaries, first_visit, index, synth_uuid, synthdef, timestamp, uuids
):
import supriya.patterns
requests = []
node_ids = uuids[synth_uuid]
if first_visit:
for node_id, dictionary in zip(node_ids, dictionaries):
add_action = dictionary.pop("add_action")
target_node = dictionary.pop("target_node")
if target_node is None:
target_node = 1
synth_kwargs = {
key: value
for key, value in dictionary.items()
if key in synthdef.parameter_names
}
request = supriya.commands.SynthNewRequest(
add_action=add_action,
node_id=node_id,
synthdef=synthdef,
target_node_id=target_node,
**synth_kwargs,
)
requests.append(request)
synth = supriya.realtime.Synth(synthdef)
node_ids[node_id] = synth
else:
for node_id, dictionary in zip(node_ids, dictionaries):
synth_kwargs = {
key: value
for key, value in dictionary.items()
if key in synthdef.parameter_names
}
request = supriya.commands.NodeSetRequest(
node_id=node_id, **synth_kwargs
)
requests.append(request)
event_product = supriya.patterns.EventProduct(
event=self,
index=index,
is_stop=False,
requests=requests,
timestamp=timestamp,
uuid=synth_uuid,
)
return event_product
def _build_stop_bundle(self, index, synth_uuid, synthdef, timestamp, uuids):
import supriya.patterns
import supriya.synthdefs
duration = self["duration"]
if duration is None:
duration = 1
requests = []
timestamp = timestamp + duration
node_ids = sorted(uuids[synth_uuid])
if synthdef.has_gate:
for node_id in node_ids:
request = supriya.commands.NodeSetRequest(node_id=node_id, gate=0)
requests.append(request)
elif any(x >= supriya.DoneAction.FREE_SYNTH for x in synthdef.done_actions):
pass
else:
request = supriya.commands.NodeFreeRequest(node_ids=node_ids)
requests.append(request)
event_product = supriya.patterns.EventProduct(
event=self,
index=index,
is_stop=True,
requests=requests,
timestamp=timestamp,
uuid=synth_uuid,
)
return event_product | en | 0.528668 | ### CLASS VARIABLES ### ### INITIALIZER ### ### PRIVATE METHODS ### # Do not mutate in place. # Begin a Pbind or Pmono synth # Extend and make settings on a Pmono synth | 2.207112 | 2 |
emoji_utils.py | ApacheAA/LastSeen | 0 | 9268 | <filename>emoji_utils.py
# unicode digit emojis
# digits from '0' to '9'
zero_digit_code = zd = 48
# excluded digits
excl_digits = [2, 4, 5, 7]
# unicode digit keycap
udkc = '\U0000fe0f\U000020e3'
hours_0_9 = [chr(i) + udkc for i in range(zd, zd + 10)
if i - zd not in excl_digits]
# number '10' emoji
hours_0_9.append('\U0001f51f')
# custom emojis from '11' to '23'
hours_11_23 = [str(i) for i in range(11, 24)]
vote = ('PLUS', 'MINUS')
edit = '\U0001F4DD' | <filename>emoji_utils.py
# unicode digit emojis
# digits from '0' to '9'
zero_digit_code = zd = 48
# excluded digits
excl_digits = [2, 4, 5, 7]
# unicode digit keycap
udkc = '\U0000fe0f\U000020e3'
hours_0_9 = [chr(i) + udkc for i in range(zd, zd + 10)
if i - zd not in excl_digits]
# number '10' emoji
hours_0_9.append('\U0001f51f')
# custom emojis from '11' to '23'
hours_11_23 = [str(i) for i in range(11, 24)]
vote = ('PLUS', 'MINUS')
edit = '\U0001F4DD' | en | 0.376239 | # unicode digit emojis # digits from '0' to '9' # excluded digits # unicode digit keycap # number '10' emoji # custom emojis from '11' to '23' | 2.834555 | 3 |
TFBertForMaskedLM/main.py | Sniper970119/ExampleForTransformers | 3 | 9269 | <filename>TFBertForMaskedLM/main.py
# -*- coding:utf-8 -*-
"""
┏┛ ┻━━━━━┛ ┻┓
┃ ┃
┃ ━ ┃
┃ ┳┛ ┗┳ ┃
┃ ┃
┃ ┻ ┃
┃ ┃
┗━┓ ┏━━━┛
┃ ┃ 神兽保佑
┃ ┃ 代码无BUG!
┃ ┗━━━━━━━━━┓
┃CREATE BY SNIPER┣┓
┃ ┏┛
┗━┓ ┓ ┏━━━┳ ┓ ┏━┛
┃ ┫ ┫ ┃ ┫ ┫
┗━┻━┛ ┗━┻━┛
"""
import tensorflow as tf
import numpy as np
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
from transformers import BertTokenizer, TFBertForMaskedLM
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
model = TFBertForMaskedLM.from_pretrained('bert-base-cased', return_dict=True)
inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf")
outputs = model(inputs)
logits = outputs.logits
output = np.argmax(logits[0][6])
o1 = tokenizer.decode(int(output))
inputs = tokenizer("The capital of [MASK] is BeiJing.", return_tensors="tf")
outputs = model(inputs)
logits = outputs.logits
output = np.argmax(logits[0][4])
o2 = tokenizer.decode(int(output))
print()
| <filename>TFBertForMaskedLM/main.py
# -*- coding:utf-8 -*-
"""
┏┛ ┻━━━━━┛ ┻┓
┃ ┃
┃ ━ ┃
┃ ┳┛ ┗┳ ┃
┃ ┃
┃ ┻ ┃
┃ ┃
┗━┓ ┏━━━┛
┃ ┃ 神兽保佑
┃ ┃ 代码无BUG!
┃ ┗━━━━━━━━━┓
┃CREATE BY SNIPER┣┓
┃ ┏┛
┗━┓ ┓ ┏━━━┳ ┓ ┏━┛
┃ ┫ ┫ ┃ ┫ ┫
┗━┻━┛ ┗━┻━┛
"""
import tensorflow as tf
import numpy as np
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
from transformers import BertTokenizer, TFBertForMaskedLM
tokenizer = BertTokenizer.from_pretrained('bert-base-cased')
model = TFBertForMaskedLM.from_pretrained('bert-base-cased', return_dict=True)
inputs = tokenizer("The capital of France is [MASK].", return_tensors="tf")
outputs = model(inputs)
logits = outputs.logits
output = np.argmax(logits[0][6])
o1 = tokenizer.decode(int(output))
inputs = tokenizer("The capital of [MASK] is BeiJing.", return_tensors="tf")
outputs = model(inputs)
logits = outputs.logits
output = np.argmax(logits[0][4])
o2 = tokenizer.decode(int(output))
print()
| ja | 0.492424 | # -*- coding:utf-8 -*- ┏┛ ┻━━━━━┛ ┻┓ ┃ ┃ ┃ ━ ┃ ┃ ┳┛ ┗┳ ┃ ┃ ┃ ┃ ┻ ┃ ┃ ┃ ┗━┓ ┏━━━┛ ┃ ┃ 神兽保佑 ┃ ┃ 代码无BUG! ┃ ┗━━━━━━━━━┓ ┃CREATE BY SNIPER┣┓ ┃ ┏┛ ┗━┓ ┓ ┏━━━┳ ┓ ┏━┛ ┃ ┫ ┫ ┃ ┫ ┫ ┗━┻━┛ ┗━┻━┛ | 2.655953 | 3 |
mirari/TCS/migrations/0042_auto_20190726_0145.py | gcastellan0s/mirariapp | 0 | 9270 | # Generated by Django 2.0.5 on 2019-07-26 06:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('TCS', '0041_auto_20190726_0030'),
]
operations = [
migrations.AlterModelOptions(
name='modelo',
options={'default_permissions': [], 'ordering': ['-id'], 'permissions': [('Can_View__Modelo', 'Ve modelos'), ('Can_Create__Modelo', 'Crea modelos'), ('Can_Update__Modelo', 'Modifica modelos'), ('Can_Delete__Modelo', 'Elimina modelos'), ('Can_Change__ModelTCS', 'Modifica modelos de equipo')], 'verbose_name': 'Modelo', 'verbose_name_plural': 'Modelos'},
),
]
| # Generated by Django 2.0.5 on 2019-07-26 06:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('TCS', '0041_auto_20190726_0030'),
]
operations = [
migrations.AlterModelOptions(
name='modelo',
options={'default_permissions': [], 'ordering': ['-id'], 'permissions': [('Can_View__Modelo', 'Ve modelos'), ('Can_Create__Modelo', 'Crea modelos'), ('Can_Update__Modelo', 'Modifica modelos'), ('Can_Delete__Modelo', 'Elimina modelos'), ('Can_Change__ModelTCS', 'Modifica modelos de equipo')], 'verbose_name': 'Modelo', 'verbose_name_plural': 'Modelos'},
),
]
| en | 0.696614 | # Generated by Django 2.0.5 on 2019-07-26 06:45 | 1.588936 | 2 |
kornia/geometry/calibration/undistort.py | belltailjp/kornia | 1 | 9271 | import torch
from kornia.geometry.linalg import transform_points
from kornia.geometry.transform import remap
from kornia.utils import create_meshgrid
from .distort import distort_points, tilt_projection
# Based on https://github.com/opencv/opencv/blob/master/modules/calib3d/src/undistort.dispatch.cpp#L384
def undistort_points(points: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Compensate for lens distortion a set of 2D image points.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)`
distortion models are considered in this function.
Args:
points: Input image points with shape :math:`(*, N, 2)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`.
Returns:
Undistorted 2D points with shape :math:`(*, N, 2)`.
Example:
>>> _ = torch.manual_seed(0)
>>> x = torch.rand(1, 4, 2)
>>> K = torch.eye(3)[None]
>>> dist = torch.rand(1, 4)
>>> undistort_points(x, K, dist)
tensor([[[-0.1513, -0.1165],
[ 0.0711, 0.1100],
[-0.0697, 0.0228],
[-0.1843, -0.1606]]])
"""
if points.dim() < 2 and points.shape[-1] != 2:
raise ValueError(f'points shape is invalid. Got {points.shape}.')
if K.shape[-2:] != (3, 3):
raise ValueError(f'K matrix shape is invalid. Got {K.shape}.')
if dist.shape[-1] not in [4, 5, 8, 12, 14]:
raise ValueError(f"Invalid number of distortion coefficients. Got {dist.shape[-1]}")
# Adding zeros to obtain vector with 14 coeffs.
if dist.shape[-1] < 14:
dist = torch.nn.functional.pad(dist, [0, 14 - dist.shape[-1]])
# Convert 2D points from pixels to normalized camera coordinates
cx: torch.Tensor = K[..., 0:1, 2] # princial point in x (Bx1)
cy: torch.Tensor = K[..., 1:2, 2] # princial point in y (Bx1)
fx: torch.Tensor = K[..., 0:1, 0] # focal in x (Bx1)
fy: torch.Tensor = K[..., 1:2, 1] # focal in y (Bx1)
# This is equivalent to K^-1 [u,v,1]^T
x: torch.Tensor = (points[..., 0] - cx) / fx # (BxN - Bx1)/Bx1 -> BxN
y: torch.Tensor = (points[..., 1] - cy) / fy # (BxN - Bx1)/Bx1 -> BxN
# Compensate for tilt distortion
if torch.any(dist[..., 12] != 0) or torch.any(dist[..., 13] != 0):
inv_tilt = tilt_projection(dist[..., 12], dist[..., 13], True)
# Transposed untilt points (instead of [x,y,1]^T, we obtain [x,y,1])
x, y = transform_points(inv_tilt, torch.stack([x, y], dim=-1)).unbind(-1)
# Iteratively undistort points
x0, y0 = x, y
for _ in range(5):
r2 = x * x + y * y
inv_rad_poly = (1 + dist[..., 5:6] * r2 + dist[..., 6:7] * r2 * r2 + dist[..., 7:8] * r2 ** 3) / (
1 + dist[..., 0:1] * r2 + dist[..., 1:2] * r2 * r2 + dist[..., 4:5] * r2 ** 3
)
deltaX = (
2 * dist[..., 2:3] * x * y
+ dist[..., 3:4] * (r2 + 2 * x * x)
+ dist[..., 8:9] * r2
+ dist[..., 9:10] * r2 * r2
)
deltaY = (
dist[..., 2:3] * (r2 + 2 * y * y)
+ 2 * dist[..., 3:4] * x * y
+ dist[..., 10:11] * r2
+ dist[..., 11:12] * r2 * r2
)
x = (x0 - deltaX) * inv_rad_poly
y = (y0 - deltaY) * inv_rad_poly
# Convert points from normalized camera coordinates to pixel coordinates
x = fx * x + cx
y = fy * y + cy
return torch.stack([x, y], -1)
# Based on https://github.com/opencv/opencv/blob/master/modules/calib3d/src/undistort.dispatch.cpp#L287
def undistort_image(image: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Compensate an image for lens distortion.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)`
distortion models are considered in this function.
Args:
image: Input image with shape :math:`(*, C, H, W)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`.
Returns:
Undistorted image with shape :math:`(*, C, H, W)`.
Example:
>>> img = torch.rand(1, 3, 5, 5)
>>> K = torch.eye(3)[None]
>>> dist_coeff = torch.rand(4)
>>> out = undistort_image(img, K, dist_coeff)
>>> out.shape
torch.Size([1, 3, 5, 5])
"""
if len(image.shape) < 2:
raise ValueError(f"Image shape is invalid. Got: {image.shape}.")
if K.shape[-2:] != (3, 3):
raise ValueError(f'K matrix shape is invalid. Got {K.shape}.')
if dist.shape[-1] not in [4, 5, 8, 12, 14]:
raise ValueError(f'Invalid number of distortion coefficients. Got {dist.shape[-1]}.')
if not image.is_floating_point():
raise ValueError(f'Invalid input image data type. Input should be float. Got {image.dtype}.')
B, _, rows, cols = image.shape
# Create point coordinates for each pixel of the image
xy_grid: torch.Tensor = create_meshgrid(rows, cols, False, image.device, image.dtype)
pts = xy_grid.reshape(-1, 2) # (rows*cols)x2 matrix of pixel coordinates
# Distort points and define maps
ptsd: torch.Tensor = distort_points(pts, K, dist) # Bx(rows*cols)x2
mapx: torch.Tensor = ptsd[..., 0].reshape(B, rows, cols) # B x rows x cols, float
mapy: torch.Tensor = ptsd[..., 1].reshape(B, rows, cols) # B x rows x cols, float
# Remap image to undistort
out = remap(image, mapx, mapy, align_corners=True)
return out
| import torch
from kornia.geometry.linalg import transform_points
from kornia.geometry.transform import remap
from kornia.utils import create_meshgrid
from .distort import distort_points, tilt_projection
# Based on https://github.com/opencv/opencv/blob/master/modules/calib3d/src/undistort.dispatch.cpp#L384
def undistort_points(points: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Compensate for lens distortion a set of 2D image points.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)`
distortion models are considered in this function.
Args:
points: Input image points with shape :math:`(*, N, 2)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`.
Returns:
Undistorted 2D points with shape :math:`(*, N, 2)`.
Example:
>>> _ = torch.manual_seed(0)
>>> x = torch.rand(1, 4, 2)
>>> K = torch.eye(3)[None]
>>> dist = torch.rand(1, 4)
>>> undistort_points(x, K, dist)
tensor([[[-0.1513, -0.1165],
[ 0.0711, 0.1100],
[-0.0697, 0.0228],
[-0.1843, -0.1606]]])
"""
if points.dim() < 2 and points.shape[-1] != 2:
raise ValueError(f'points shape is invalid. Got {points.shape}.')
if K.shape[-2:] != (3, 3):
raise ValueError(f'K matrix shape is invalid. Got {K.shape}.')
if dist.shape[-1] not in [4, 5, 8, 12, 14]:
raise ValueError(f"Invalid number of distortion coefficients. Got {dist.shape[-1]}")
# Adding zeros to obtain vector with 14 coeffs.
if dist.shape[-1] < 14:
dist = torch.nn.functional.pad(dist, [0, 14 - dist.shape[-1]])
# Convert 2D points from pixels to normalized camera coordinates
cx: torch.Tensor = K[..., 0:1, 2] # princial point in x (Bx1)
cy: torch.Tensor = K[..., 1:2, 2] # princial point in y (Bx1)
fx: torch.Tensor = K[..., 0:1, 0] # focal in x (Bx1)
fy: torch.Tensor = K[..., 1:2, 1] # focal in y (Bx1)
# This is equivalent to K^-1 [u,v,1]^T
x: torch.Tensor = (points[..., 0] - cx) / fx # (BxN - Bx1)/Bx1 -> BxN
y: torch.Tensor = (points[..., 1] - cy) / fy # (BxN - Bx1)/Bx1 -> BxN
# Compensate for tilt distortion
if torch.any(dist[..., 12] != 0) or torch.any(dist[..., 13] != 0):
inv_tilt = tilt_projection(dist[..., 12], dist[..., 13], True)
# Transposed untilt points (instead of [x,y,1]^T, we obtain [x,y,1])
x, y = transform_points(inv_tilt, torch.stack([x, y], dim=-1)).unbind(-1)
# Iteratively undistort points
x0, y0 = x, y
for _ in range(5):
r2 = x * x + y * y
inv_rad_poly = (1 + dist[..., 5:6] * r2 + dist[..., 6:7] * r2 * r2 + dist[..., 7:8] * r2 ** 3) / (
1 + dist[..., 0:1] * r2 + dist[..., 1:2] * r2 * r2 + dist[..., 4:5] * r2 ** 3
)
deltaX = (
2 * dist[..., 2:3] * x * y
+ dist[..., 3:4] * (r2 + 2 * x * x)
+ dist[..., 8:9] * r2
+ dist[..., 9:10] * r2 * r2
)
deltaY = (
dist[..., 2:3] * (r2 + 2 * y * y)
+ 2 * dist[..., 3:4] * x * y
+ dist[..., 10:11] * r2
+ dist[..., 11:12] * r2 * r2
)
x = (x0 - deltaX) * inv_rad_poly
y = (y0 - deltaY) * inv_rad_poly
# Convert points from normalized camera coordinates to pixel coordinates
x = fx * x + cx
y = fy * y + cy
return torch.stack([x, y], -1)
# Based on https://github.com/opencv/opencv/blob/master/modules/calib3d/src/undistort.dispatch.cpp#L287
def undistort_image(image: torch.Tensor, K: torch.Tensor, dist: torch.Tensor) -> torch.Tensor:
r"""Compensate an image for lens distortion.
Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`,
tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)`
distortion models are considered in this function.
Args:
image: Input image with shape :math:`(*, C, H, W)`.
K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`.
dist: Distortion coefficients
:math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is
a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`.
Returns:
Undistorted image with shape :math:`(*, C, H, W)`.
Example:
>>> img = torch.rand(1, 3, 5, 5)
>>> K = torch.eye(3)[None]
>>> dist_coeff = torch.rand(4)
>>> out = undistort_image(img, K, dist_coeff)
>>> out.shape
torch.Size([1, 3, 5, 5])
"""
if len(image.shape) < 2:
raise ValueError(f"Image shape is invalid. Got: {image.shape}.")
if K.shape[-2:] != (3, 3):
raise ValueError(f'K matrix shape is invalid. Got {K.shape}.')
if dist.shape[-1] not in [4, 5, 8, 12, 14]:
raise ValueError(f'Invalid number of distortion coefficients. Got {dist.shape[-1]}.')
if not image.is_floating_point():
raise ValueError(f'Invalid input image data type. Input should be float. Got {image.dtype}.')
B, _, rows, cols = image.shape
# Create point coordinates for each pixel of the image
xy_grid: torch.Tensor = create_meshgrid(rows, cols, False, image.device, image.dtype)
pts = xy_grid.reshape(-1, 2) # (rows*cols)x2 matrix of pixel coordinates
# Distort points and define maps
ptsd: torch.Tensor = distort_points(pts, K, dist) # Bx(rows*cols)x2
mapx: torch.Tensor = ptsd[..., 0].reshape(B, rows, cols) # B x rows x cols, float
mapy: torch.Tensor = ptsd[..., 1].reshape(B, rows, cols) # B x rows x cols, float
# Remap image to undistort
out = remap(image, mapx, mapy, align_corners=True)
return out
| en | 0.686688 | # Based on https://github.com/opencv/opencv/blob/master/modules/calib3d/src/undistort.dispatch.cpp#L384 Compensate for lens distortion a set of 2D image points. Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`, tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)` distortion models are considered in this function. Args: points: Input image points with shape :math:`(*, N, 2)`. K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`. dist: Distortion coefficients :math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`. Returns: Undistorted 2D points with shape :math:`(*, N, 2)`. Example: >>> _ = torch.manual_seed(0) >>> x = torch.rand(1, 4, 2) >>> K = torch.eye(3)[None] >>> dist = torch.rand(1, 4) >>> undistort_points(x, K, dist) tensor([[[-0.1513, -0.1165], [ 0.0711, 0.1100], [-0.0697, 0.0228], [-0.1843, -0.1606]]]) # Adding zeros to obtain vector with 14 coeffs. # Convert 2D points from pixels to normalized camera coordinates # princial point in x (Bx1) # princial point in y (Bx1) # focal in x (Bx1) # focal in y (Bx1) # This is equivalent to K^-1 [u,v,1]^T # (BxN - Bx1)/Bx1 -> BxN # (BxN - Bx1)/Bx1 -> BxN # Compensate for tilt distortion # Transposed untilt points (instead of [x,y,1]^T, we obtain [x,y,1]) # Iteratively undistort points # Convert points from normalized camera coordinates to pixel coordinates # Based on https://github.com/opencv/opencv/blob/master/modules/calib3d/src/undistort.dispatch.cpp#L287 Compensate an image for lens distortion. Radial :math:`(k_1, k_2, k_3, k_4, k_4, k_6)`, tangential :math:`(p_1, p_2)`, thin prism :math:`(s_1, s_2, s_3, s_4)`, and tilt :math:`(\tau_x, \tau_y)` distortion models are considered in this function. Args: image: Input image with shape :math:`(*, C, H, W)`. K: Intrinsic camera matrix with shape :math:`(*, 3, 3)`. dist: Distortion coefficients :math:`(k_1,k_2,p_1,p_2[,k_3[,k_4,k_5,k_6[,s_1,s_2,s_3,s_4[,\tau_x,\tau_y]]]])`. This is a vector with 4, 5, 8, 12 or 14 elements with shape :math:`(*, n)`. Returns: Undistorted image with shape :math:`(*, C, H, W)`. Example: >>> img = torch.rand(1, 3, 5, 5) >>> K = torch.eye(3)[None] >>> dist_coeff = torch.rand(4) >>> out = undistort_image(img, K, dist_coeff) >>> out.shape torch.Size([1, 3, 5, 5]) # Create point coordinates for each pixel of the image # (rows*cols)x2 matrix of pixel coordinates # Distort points and define maps # Bx(rows*cols)x2 # B x rows x cols, float # B x rows x cols, float # Remap image to undistort | 2.492273 | 2 |
Tests/Aula_7a.py | o-Ian/Practice-Python | 4 | 9272 | n1 = int(input('Digite um valor: '))
n2 = int(input('Digite outro valor: '))
print('A soma é: {}!' .format(n1+n2))
print('A subtração entre {} e {} é {}!' .format(n1, n2, n1-n2))
print('A multiplicação desses valores é {}!' .format(n1 * n2))
print('A divisão entre {} e {} é {:.3}' .format(n1, n2, n1/n2))
print('A divisão sem restos é {}!' .format(n1//n2), end = ' ')
print('O resto dessa divisão é {}' .format(n1 % n2))
| n1 = int(input('Digite um valor: '))
n2 = int(input('Digite outro valor: '))
print('A soma é: {}!' .format(n1+n2))
print('A subtração entre {} e {} é {}!' .format(n1, n2, n1-n2))
print('A multiplicação desses valores é {}!' .format(n1 * n2))
print('A divisão entre {} e {} é {:.3}' .format(n1, n2, n1/n2))
print('A divisão sem restos é {}!' .format(n1//n2), end = ' ')
print('O resto dessa divisão é {}' .format(n1 % n2))
| none | 1 | 4.077381 | 4 |
|
manubot/cite/tests/test_citekey_api.py | shuvro-zz/manubot | 1 | 9273 | <filename>manubot/cite/tests/test_citekey_api.py
"""Tests API-level functions in manubot.cite. Both functions are found in citekey.py"""
import pytest
from manubot.cite import citekey_to_csl_item, standardize_citekey
@pytest.mark.parametrize(
"citekey,expected",
[
("doi:10.5061/DRYAD.q447c/1", "doi:10.5061/dryad.q447c/1"),
("doi:10.5061/dryad.q447c/1", "doi:10.5061/dryad.q447c/1"),
("doi:10/b6vnmd", "doi:10.1016/s0933-3657(96)00367-3"),
("doi:10/B6VNMD", "doi:10.1016/s0933-3657(96)00367-3"),
(
"doi:10/xxxxxxxxxxxxxYY",
"doi:10/xxxxxxxxxxxxxyy",
), # passthrough non-existent shortDOI
("pmid:24159271", "pmid:24159271"),
("isbn:1339919885", "isbn:9781339919881"),
("isbn:1-339-91988-5", "isbn:9781339919881"),
("isbn:978-0-387-95069-3", "isbn:9780387950693"),
("isbn:9780387950938", "isbn:9780387950938"),
("isbn:1-55860-510-X", "isbn:9781558605107"),
("isbn:1-55860-510-x", "isbn:9781558605107"),
],
)
def test_standardize_citekey(citekey, expected):
"""
Standardize identifiers based on their source
"""
output = standardize_citekey(citekey)
assert output == expected
@pytest.mark.xfail(reason="https://twitter.com/dhimmel/status/950443969313419264")
def test_citekey_to_csl_item_doi_datacite():
citekey = "doi:10.7287/peerj.preprints.3100v1"
csl_item = citekey_to_csl_item(citekey)
assert csl_item["id"] == "11cb5HXoY"
assert csl_item["URL"] == "https://doi.org/10.7287/peerj.preprints.3100v1"
assert csl_item["DOI"] == "10.7287/peerj.preprints.3100v1"
assert csl_item["type"] == "report"
assert (
csl_item["title"]
== "Sci-Hub provides access to nearly all scholarly literature"
)
authors = csl_item["author"]
assert authors[0]["family"] == "Himmelstein"
assert authors[-1]["family"] == "Greene"
def test_citekey_to_csl_item_arxiv():
citekey = "arxiv:cond-mat/0703470v2"
csl_item = citekey_to_csl_item(citekey)
assert csl_item["id"] == "ES92tcdg"
assert csl_item["URL"] == "https://arxiv.org/abs/cond-mat/0703470v2"
assert csl_item["number"] == "cond-mat/0703470v2"
assert csl_item["version"] == "2"
assert csl_item["type"] == "report"
assert csl_item["container-title"] == "arXiv"
assert csl_item["title"] == "Portraits of Complex Networks"
authors = csl_item["author"]
assert authors[0]["literal"] == "<NAME>"
assert csl_item["DOI"] == "10.1209/0295-5075/81/68004"
def test_citekey_to_csl_item_pmc():
"""
https://api.ncbi.nlm.nih.gov/lit/ctxp/v1/pmc/?format=csl&id=3041534
"""
citekey = f"pmcid:PMC3041534"
csl_item = citekey_to_csl_item(citekey)
assert csl_item["id"] == "RoOhUFKU"
assert csl_item["URL"] == "https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3041534/"
assert csl_item["container-title-short"] == "Summit Transl Bioinform"
assert (
csl_item["title"]
== "Secondary Use of EHR: Data Quality Issues and Informatics Opportunities"
)
authors = csl_item["author"]
assert authors[0]["family"] == "Botsis"
assert csl_item["PMID"] == "21347133"
assert csl_item["PMCID"] == "PMC3041534"
assert "generated by Manubot" in csl_item["note"]
assert "standard_id: pmcid:PMC3041534" in csl_item["note"]
def test_citekey_to_csl_item_pubmed_1():
"""
Generated from XML returned by
https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=21347133&rettype=full
"""
citekey = "pmid:21347133"
csl_item = citekey_to_csl_item(citekey)
assert csl_item["id"] == "y9ONtSZ9"
assert csl_item["type"] == "article-journal"
assert csl_item["URL"] == "https://www.ncbi.nlm.nih.gov/pubmed/21347133"
assert csl_item["container-title"] == "Summit on translational bioinformatics"
assert (
csl_item["title"]
== "Secondary Use of EHR: Data Quality Issues and Informatics Opportunities."
)
assert csl_item["issued"]["date-parts"] == [[2010, 3, 1]]
authors = csl_item["author"]
assert authors[0]["given"] == "Taxiarchis"
assert authors[0]["family"] == "Botsis"
assert csl_item["PMID"] == "21347133"
assert csl_item["PMCID"] == "PMC3041534"
def test_citekey_to_csl_item_pubmed_2():
"""
Generated from XML returned by
https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=27094199&rettype=full
"""
citekey = "pmid:27094199"
csl_item = citekey_to_csl_item(citekey)
print(csl_item)
assert csl_item["id"] == "alaFV9OY"
assert csl_item["type"] == "article-journal"
assert csl_item["URL"] == "https://www.ncbi.nlm.nih.gov/pubmed/27094199"
assert csl_item["container-title"] == "Circulation. Cardiovascular genetics"
assert csl_item["container-title-short"] == "Circ Cardiovasc Genet"
assert csl_item["page"] == "179-84"
assert (
csl_item["title"]
== "Genetic Association-Guided Analysis of Gene Networks for the Study of Complex Traits."
)
assert csl_item["issued"]["date-parts"] == [[2016, 4]]
authors = csl_item["author"]
assert authors[0]["given"] == "<NAME>"
assert authors[0]["family"] == "Greene"
assert csl_item["PMID"] == "27094199"
assert csl_item["DOI"] == "10.1161/circgenetics.115.001181"
def test_citekey_to_csl_item_pubmed_with_numeric_month():
"""
Generated from XML returned by
https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=29028984&rettype=full
See https://github.com/manubot/manubot/issues/69
"""
citekey = "pmid:29028984"
csl_item = citekey_to_csl_item(citekey)
print(csl_item)
assert csl_item["issued"]["date-parts"] == [[2018, 3, 15]]
def test_citekey_to_csl_item_pubmed_book():
"""
Extracting CSL metadata from books in PubMed is not supported.
Logic not implemented to parse XML returned by
https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=29227604&rettype=full
"""
with pytest.raises(NotImplementedError):
citekey_to_csl_item("pmid:29227604")
def test_citekey_to_csl_item_isbn():
csl_item = citekey_to_csl_item("isbn:9780387950693")
assert csl_item["type"] == "book"
assert csl_item["title"] == "Complex analysis"
| <filename>manubot/cite/tests/test_citekey_api.py
"""Tests API-level functions in manubot.cite. Both functions are found in citekey.py"""
import pytest
from manubot.cite import citekey_to_csl_item, standardize_citekey
@pytest.mark.parametrize(
"citekey,expected",
[
("doi:10.5061/DRYAD.q447c/1", "doi:10.5061/dryad.q447c/1"),
("doi:10.5061/dryad.q447c/1", "doi:10.5061/dryad.q447c/1"),
("doi:10/b6vnmd", "doi:10.1016/s0933-3657(96)00367-3"),
("doi:10/B6VNMD", "doi:10.1016/s0933-3657(96)00367-3"),
(
"doi:10/xxxxxxxxxxxxxYY",
"doi:10/xxxxxxxxxxxxxyy",
), # passthrough non-existent shortDOI
("pmid:24159271", "pmid:24159271"),
("isbn:1339919885", "isbn:9781339919881"),
("isbn:1-339-91988-5", "isbn:9781339919881"),
("isbn:978-0-387-95069-3", "isbn:9780387950693"),
("isbn:9780387950938", "isbn:9780387950938"),
("isbn:1-55860-510-X", "isbn:9781558605107"),
("isbn:1-55860-510-x", "isbn:9781558605107"),
],
)
def test_standardize_citekey(citekey, expected):
"""
Standardize identifiers based on their source
"""
output = standardize_citekey(citekey)
assert output == expected
@pytest.mark.xfail(reason="https://twitter.com/dhimmel/status/950443969313419264")
def test_citekey_to_csl_item_doi_datacite():
citekey = "doi:10.7287/peerj.preprints.3100v1"
csl_item = citekey_to_csl_item(citekey)
assert csl_item["id"] == "11cb5HXoY"
assert csl_item["URL"] == "https://doi.org/10.7287/peerj.preprints.3100v1"
assert csl_item["DOI"] == "10.7287/peerj.preprints.3100v1"
assert csl_item["type"] == "report"
assert (
csl_item["title"]
== "Sci-Hub provides access to nearly all scholarly literature"
)
authors = csl_item["author"]
assert authors[0]["family"] == "Himmelstein"
assert authors[-1]["family"] == "Greene"
def test_citekey_to_csl_item_arxiv():
citekey = "arxiv:cond-mat/0703470v2"
csl_item = citekey_to_csl_item(citekey)
assert csl_item["id"] == "ES92tcdg"
assert csl_item["URL"] == "https://arxiv.org/abs/cond-mat/0703470v2"
assert csl_item["number"] == "cond-mat/0703470v2"
assert csl_item["version"] == "2"
assert csl_item["type"] == "report"
assert csl_item["container-title"] == "arXiv"
assert csl_item["title"] == "Portraits of Complex Networks"
authors = csl_item["author"]
assert authors[0]["literal"] == "<NAME>"
assert csl_item["DOI"] == "10.1209/0295-5075/81/68004"
def test_citekey_to_csl_item_pmc():
"""
https://api.ncbi.nlm.nih.gov/lit/ctxp/v1/pmc/?format=csl&id=3041534
"""
citekey = f"pmcid:PMC3041534"
csl_item = citekey_to_csl_item(citekey)
assert csl_item["id"] == "RoOhUFKU"
assert csl_item["URL"] == "https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3041534/"
assert csl_item["container-title-short"] == "Summit Transl Bioinform"
assert (
csl_item["title"]
== "Secondary Use of EHR: Data Quality Issues and Informatics Opportunities"
)
authors = csl_item["author"]
assert authors[0]["family"] == "Botsis"
assert csl_item["PMID"] == "21347133"
assert csl_item["PMCID"] == "PMC3041534"
assert "generated by Manubot" in csl_item["note"]
assert "standard_id: pmcid:PMC3041534" in csl_item["note"]
def test_citekey_to_csl_item_pubmed_1():
"""
Generated from XML returned by
https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=21347133&rettype=full
"""
citekey = "pmid:21347133"
csl_item = citekey_to_csl_item(citekey)
assert csl_item["id"] == "y9ONtSZ9"
assert csl_item["type"] == "article-journal"
assert csl_item["URL"] == "https://www.ncbi.nlm.nih.gov/pubmed/21347133"
assert csl_item["container-title"] == "Summit on translational bioinformatics"
assert (
csl_item["title"]
== "Secondary Use of EHR: Data Quality Issues and Informatics Opportunities."
)
assert csl_item["issued"]["date-parts"] == [[2010, 3, 1]]
authors = csl_item["author"]
assert authors[0]["given"] == "Taxiarchis"
assert authors[0]["family"] == "Botsis"
assert csl_item["PMID"] == "21347133"
assert csl_item["PMCID"] == "PMC3041534"
def test_citekey_to_csl_item_pubmed_2():
"""
Generated from XML returned by
https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=27094199&rettype=full
"""
citekey = "pmid:27094199"
csl_item = citekey_to_csl_item(citekey)
print(csl_item)
assert csl_item["id"] == "alaFV9OY"
assert csl_item["type"] == "article-journal"
assert csl_item["URL"] == "https://www.ncbi.nlm.nih.gov/pubmed/27094199"
assert csl_item["container-title"] == "Circulation. Cardiovascular genetics"
assert csl_item["container-title-short"] == "Circ Cardiovasc Genet"
assert csl_item["page"] == "179-84"
assert (
csl_item["title"]
== "Genetic Association-Guided Analysis of Gene Networks for the Study of Complex Traits."
)
assert csl_item["issued"]["date-parts"] == [[2016, 4]]
authors = csl_item["author"]
assert authors[0]["given"] == "<NAME>"
assert authors[0]["family"] == "Greene"
assert csl_item["PMID"] == "27094199"
assert csl_item["DOI"] == "10.1161/circgenetics.115.001181"
def test_citekey_to_csl_item_pubmed_with_numeric_month():
"""
Generated from XML returned by
https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=29028984&rettype=full
See https://github.com/manubot/manubot/issues/69
"""
citekey = "pmid:29028984"
csl_item = citekey_to_csl_item(citekey)
print(csl_item)
assert csl_item["issued"]["date-parts"] == [[2018, 3, 15]]
def test_citekey_to_csl_item_pubmed_book():
"""
Extracting CSL metadata from books in PubMed is not supported.
Logic not implemented to parse XML returned by
https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=29227604&rettype=full
"""
with pytest.raises(NotImplementedError):
citekey_to_csl_item("pmid:29227604")
def test_citekey_to_csl_item_isbn():
csl_item = citekey_to_csl_item("isbn:9780387950693")
assert csl_item["type"] == "book"
assert csl_item["title"] == "Complex analysis"
| en | 0.70936 | Tests API-level functions in manubot.cite. Both functions are found in citekey.py # passthrough non-existent shortDOI Standardize identifiers based on their source https://api.ncbi.nlm.nih.gov/lit/ctxp/v1/pmc/?format=csl&id=3041534 Generated from XML returned by https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=21347133&rettype=full Generated from XML returned by https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=27094199&rettype=full Generated from XML returned by https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=29028984&rettype=full See https://github.com/manubot/manubot/issues/69 Extracting CSL metadata from books in PubMed is not supported. Logic not implemented to parse XML returned by https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id=29227604&rettype=full | 2.249024 | 2 |
vispy/io/datasets.py | hmaarrfk/vispy | 2,617 | 9274 | # -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from os import path as op
from ..util import load_data_file
# This is the package data dir, not the dir for config, etc.
DATA_DIR = op.join(op.dirname(__file__), '_data')
def load_iris():
"""Load the iris dataset
Returns
-------
iris : NpzFile
data['data'] : a (150, 4) NumPy array with the iris' features
data['group'] : a (150,) NumPy array with the iris' group
"""
return np.load(load_data_file('iris/iris.npz',
force_download='2014-09-04'))
def load_crate():
"""Load an image of a crate
Returns
-------
crate : array
256x256x3 crate image.
"""
return np.load(load_data_file('orig/crate.npz'))['crate']
def pack_unit(value):
"""Packs float values between [0,1] into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
pack = np.zeros(value.shape + (4,), dtype=np.ubyte)
for i in range(4):
value, pack[..., i] = np.modf(value * 256.)
return pack
def pack_ieee(value):
"""Packs float ieee binary representation into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
return np.fromstring(value.tobytes(),
np.ubyte).reshape((value.shape + (4,)))
def load_spatial_filters(packed=True):
"""Load spatial-filters kernel
Parameters
----------
packed : bool
Whether or not the data should be in "packed" representation
for use in GLSL code.
Returns
-------
kernel : array
16x1024x4 (packed float in rgba) or
16x1024 (unpacked float)
16 interpolation kernel with length 1024 each.
names : tuple of strings
Respective interpolation names, plus "Nearest" which does
not require a filter but can still be used
"""
names = ("Bilinear", "Hanning", "Hamming", "Hermite",
"Kaiser", "Quadric", "Bicubic", "CatRom",
"Mitchell", "Spline16", "Spline36", "Gaussian",
"Bessel", "Sinc", "Lanczos", "Blackman", "Nearest")
kernel = np.load(op.join(DATA_DIR, 'spatial-filters.npy'))
if packed:
# convert the kernel to a packed representation
kernel = pack_unit(kernel)
return kernel, names
| # -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
import numpy as np
from os import path as op
from ..util import load_data_file
# This is the package data dir, not the dir for config, etc.
DATA_DIR = op.join(op.dirname(__file__), '_data')
def load_iris():
"""Load the iris dataset
Returns
-------
iris : NpzFile
data['data'] : a (150, 4) NumPy array with the iris' features
data['group'] : a (150,) NumPy array with the iris' group
"""
return np.load(load_data_file('iris/iris.npz',
force_download='2014-09-04'))
def load_crate():
"""Load an image of a crate
Returns
-------
crate : array
256x256x3 crate image.
"""
return np.load(load_data_file('orig/crate.npz'))['crate']
def pack_unit(value):
"""Packs float values between [0,1] into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
pack = np.zeros(value.shape + (4,), dtype=np.ubyte)
for i in range(4):
value, pack[..., i] = np.modf(value * 256.)
return pack
def pack_ieee(value):
"""Packs float ieee binary representation into 4 unsigned int8
Returns
-------
pack: array
packed interpolation kernel
"""
return np.fromstring(value.tobytes(),
np.ubyte).reshape((value.shape + (4,)))
def load_spatial_filters(packed=True):
"""Load spatial-filters kernel
Parameters
----------
packed : bool
Whether or not the data should be in "packed" representation
for use in GLSL code.
Returns
-------
kernel : array
16x1024x4 (packed float in rgba) or
16x1024 (unpacked float)
16 interpolation kernel with length 1024 each.
names : tuple of strings
Respective interpolation names, plus "Nearest" which does
not require a filter but can still be used
"""
names = ("Bilinear", "Hanning", "Hamming", "Hermite",
"Kaiser", "Quadric", "Bicubic", "CatRom",
"Mitchell", "Spline16", "Spline36", "Gaussian",
"Bessel", "Sinc", "Lanczos", "Blackman", "Nearest")
kernel = np.load(op.join(DATA_DIR, 'spatial-filters.npy'))
if packed:
# convert the kernel to a packed representation
kernel = pack_unit(kernel)
return kernel, names
| en | 0.643633 | # -*- coding: utf-8 -*- # Copyright (c) Vispy Development Team. All Rights Reserved. # Distributed under the (new) BSD License. See LICENSE.txt for more info. # This is the package data dir, not the dir for config, etc. Load the iris dataset Returns ------- iris : NpzFile data['data'] : a (150, 4) NumPy array with the iris' features data['group'] : a (150,) NumPy array with the iris' group Load an image of a crate Returns ------- crate : array 256x256x3 crate image. Packs float values between [0,1] into 4 unsigned int8 Returns ------- pack: array packed interpolation kernel Packs float ieee binary representation into 4 unsigned int8 Returns ------- pack: array packed interpolation kernel Load spatial-filters kernel Parameters ---------- packed : bool Whether or not the data should be in "packed" representation for use in GLSL code. Returns ------- kernel : array 16x1024x4 (packed float in rgba) or 16x1024 (unpacked float) 16 interpolation kernel with length 1024 each. names : tuple of strings Respective interpolation names, plus "Nearest" which does not require a filter but can still be used # convert the kernel to a packed representation | 2.2424 | 2 |
universal_portfolio/knapsack.py | jehung/universal_portfolio | 14 | 9275 | <reponame>jehung/universal_portfolio
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
np.random.seed(1335) # for reproducibility
np.set_printoptions(precision=5, suppress=True, linewidth=150)
import os
import pandas as pd
import backtest as twp
from matplotlib import pyplot as plt
from sklearn import metrics, preprocessing
from talib.abstract import *
from sklearn.externals import joblib
import quandl
import random, timeit
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from keras.optimizers import RMSprop, Adam
'''
Name: The Self Learning Quant, Example 3
Author: <NAME>
Created: 30/03/2016
Copyright: (c) <NAME> 2016
Licence: BSD
Requirements:
Numpy
Pandas
MatplotLib
scikit-learn
TA-Lib, instructions at https://mrjbq7.github.io/ta-lib/install.html
Keras, https://keras.io/
Quandl, https://www.quandl.com/tools/python
backtest.py from the TWP library. Download backtest.py and put in the same folder
/plt create a subfolder in the same directory where plot files will be saved
'''
def get_ticker(x):
return x.split('/')[-1].split('.')[0]
def read_file(file, test=None):
scaler = preprocessing.MinMaxScaler()
d = pd.read_csv(file).set_index('Date')
d.fillna(0, inplace=True)
ticker = get_ticker(file)
d['ticker'] = ticker
d.rename(columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Close': 'close', 'Adj Close': 'adj_close',
'Volume (BTC)': 'volume'},
inplace=True)
x_train = d.iloc[:-100, ]
x_test = d.iloc[-100:, ]
if test:
return x_test, ticker
else:
return x_train, ticker
# Initialize first state, all items are placed deterministically
def init_state(file, test):
d, ticker = read_file(file, test=test)
xdata = pd.DataFrame()
scaler = preprocessing.StandardScaler()
xdata['adj_close'] = d['adj_close'] # .values
xdata['diff'] = xdata['adj_close'].diff(periods=1)
xdata['diff'].fillna(0, inplace=True)
xdata['sma15'] = SMA(d, timeperiod=15)
xdata['sma60'] = SMA(d, timeperiod=60)
xdata['rsi'] = RSI(d, timeperiod=14)
xdata['atr'] = ATR(d, timeperiod=14)
xdata.fillna(0, inplace=True)
# --- Preprocess data
# xdata = np.column_stack((close, diff, sma15, close - sma15, sma15 - sma60, rsi, atr))
xdata = pd.DataFrame(scaler.fit_transform(xdata), columns=xdata.columns)
xdata['ticker'] = ticker
pivot_columns = xdata.columns[0:-1]
pivot = xdata.pivot_table(index=d.index, columns='ticker', values=pivot_columns) # Make a pivot table from the data
pivot.columns = [s1 + '-' + s2 for (s1, s2) in pivot.columns.tolist()]
return pivot
def all_init_data(test=False):
filepath = 'util/stock_dfs/'
all = []
scaler = preprocessing.StandardScaler()
for f in os.listdir(filepath):
datapath = os.path.join(filepath, f)
if datapath.endswith('.csv'):
# print(datapath)
Res = init_state(datapath, test=test)
all.append(Res)
all = pd.concat(all, axis=1)
all.fillna(0, inplace=True)
closecol = [col for col in all.columns if 'adj_close' in col]
close = all[closecol].values
# xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr))
xdata = np.vstack(all.values)
xdata = np.nan_to_num(xdata)
if test == False:
scaler = preprocessing.StandardScaler()
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
joblib.dump(scaler, 'data/scaler.pkl')
else:
scaler = joblib.load('data/scaler.pkl')
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
state = xdata[0:1, 0:1, :]
return state, xdata, close
# Take Action
def take_action(state, xdata, action, signal, time_step):
# this should generate a list of trade signals that at evaluation time are fed to the backtester
# the backtester should get a list of trade signals and a list of price data for the assett
# make necessary adjustments to state and then return it
time_step += 1
# if the current iteration is the last state ("terminal state") then set terminal_state to 1
if time_step + 1 == xdata.shape[0]:
state = xdata[time_step - 1:time_step, 0:1, :]
terminal_state = 1
signal.loc[time_step] = 0
return state, time_step, signal, terminal_state
# move the market data window one step forward
state = xdata[time_step - 1:time_step, 0:1, :]
# take action
if action == 1:
signal.loc[time_step] = 100
elif action == 2:
signal.loc[time_step] = -100
else:
signal.loc[time_step] = 0
# print(state)
terminal_state = 0
# print(signal)
return state, time_step, signal, terminal_state
# Get Reward, the reward is returned at the end of an episode
def get_reward(new_state, time_step, action, xdata, signal, terminal_state, eval=False, epoch=0):
reward = 0
signal.fillna(value=0, inplace=True)
if eval == False:
try:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata[time_step - 2:time_step]], index=signal[time_step - 2:time_step].index.values),
signal[time_step - 2:time_step], signalType='shares')
reward = np.max((bt.data['price'].iloc[-1] - bt.data['price'].iloc[-2]) * bt.data['shares'].iloc[-1])
except:
pass
if terminal_state == 1 and eval == True:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata], index=signal.index.values), signal, signalType='shares')
reward = bt.pnl.iloc[-1]
plt.figure(figsize=(9, 16))
bt.plotTrades()
plt.axvline(x=400, color='black', linestyle='--')
plt.text(250, 400, 'training data')
plt.text(450, 400, 'test data')
plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png')
plt.close('all')
'''
# save a figure of the test set
plt.figure(figsize=(10, 25))
for i in range(xdata.T.shape[0]):
#frame = pd.concat(btFrame, axis=1)
bt = twp.Backtest(pd.Series(data=[x for x in xdata.T[i]], index=signal.index.values), signal, signalType='shares')
reward += np.max(bt.pnl.iloc[-1])
bt.plotTrades()
#plt.axvline(x=400, color='black', linestyle='--')
#plt.text(250, 400, 'training data')
#plt.text(450, 400, 'test data')
#plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.close('all')
'''
# print(time_step, terminal_state, eval, reward)
return reward
def evaluate_Q(eval_data, eval_model, epoch=0):
# This function is used to evaluate the performance of the system each epoch, without the influence of epsilon and random actions
signal = pd.Series(index=np.arange(len(eval_data)))
state, xdata, price_data = all_init_data()
status = 1
terminal_state = 0
time_step = 1
while (status == 1):
# We start in state S
qval = eval_model.predict(state, batch_size=batch_size)
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
eval_reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state, eval=True,
epoch=epoch)
state = new_state
if terminal_state == 1: # terminal state
status = 0
return eval_reward
if __name__ == "__main__":
# This neural network is the the Q-function, run it like this:
# model.predict(state.reshape(1,64), batch_size=1)
batch_size = 7
num_features = 2544
epochs = 3
gamma = 0.95 # since the reward can be several time steps away, make gamma high
epsilon = 1
batchSize = 100
buffer = 200
replay = []
learning_progress = []
model = Sequential()
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=True,
stateful=False))
model.add(Dropout(0.5))
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=False,
stateful=False))
model.add(Dropout(0.5))
model.add(Dense(4, init='lecun_uniform'))
model.add(Activation('linear')) # linear output so we can have range of real-valued outputs
rms = RMSprop()
adam = Adam()
model.compile(loss='mse', optimizer=adam)
start_time = timeit.default_timer()
# read_convert_data(symbol='XBTEUR') #run once to read indata, resample and convert to pickle
astate, xdata, aprice_data = all_init_data()
bstate, test_data, test_price_data = all_init_data(test=True)
'''
bstate, test_data, test_price_data = all_init_data(test=True)
print(astate.shape)
print(bstate.shape)
print(xdata.shape)
print(test_data.shape)
print(price_data.shape)
print(test_price_data.shape)
'''
# stores tuples of (S, A, R, S')
h = 0
# signal = pd.Series(index=market_data.index)
signal = pd.Series(index=np.arange(len(xdata)))
for i in range(epochs):
if i == epochs - 1: # the last epoch, use test data set
state, xdata, price_data = all_init_data()
else:
state, xdata, price_data = all_init_data(test=True)
status = 1
terminal_state = 0
time_step = 5
# while game still in progress
while (status == 1):
# We are in state S
# Let's run our Q function on S to get Q values for all possible actions
print('epoch ' + str(i))
qval = model.predict(state, batch_size=batch_size)
if (random.random() < epsilon): # choose random action
action = np.random.randint(0, 4) # assumes 4 different actions
else: # choose best action from Q(s,a) values
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state)
print('new_state', new_state)
print('reward', reward)
# Experience replay storage
if (len(replay) < buffer): # if buffer not filled, add to it
replay.append((state, action, reward, new_state))
# print(time_step, reward, terminal_state)
else: # if buffer full, overwrite old values
if (h < (buffer - 1)):
h += 1
else:
h = 0
replay[h] = (state, action, reward, new_state)
# randomly sample our experience replay memory
minibatch = random.sample(replay, batchSize)
X_train = []
y_train = []
for memory in minibatch:
# Get max_Q(S',a)
old_state, action, reward, new_state = memory
old_qval = model.predict(old_state, batch_size=batch_size)
newQ = model.predict(new_state, batch_size=batch_size)
maxQ = np.max(newQ)
y = np.zeros((1, 4))
y[:] = old_qval[:]
if terminal_state == 0: # non-terminal state
update = (reward + (gamma * maxQ))
else: # terminal state
update = reward
# print('rewardbase', reward)
# print('update', update)
y[0][action] = update
# print(time_step, reward, terminal_state)
X_train.append(old_state)
y_train.append(y.reshape(4, ))
X_train = np.squeeze(np.array(X_train), axis=(1))
y_train = np.array(y_train)
model.fit(X_train, y_train, batch_size=batchSize, epochs=100, verbose=0)
state = new_state
if terminal_state == 1: # if reached terminal state, update epoch status
status = 0
eval_reward = evaluate_Q(test_data, model, i)
# eval_reward = value_iter(test_data, epsilon, epochs)
learning_progress.append(eval_reward)
print("Epoch #: %s Reward: %f Epsilon: %f" % (i, eval_reward, epsilon))
# learning_progress.append((reward))
if epsilon > 0.1: # decrement epsilon over time
epsilon -= (1.0 / epochs)
elapsed = np.round(timeit.default_timer() - start_time, decimals=2)
print("Completed in %f" % (elapsed,))
bt = twp.Backtest(pd.Series(data=[x[0] for x in test_price_data]), signal, signalType='shares')
bt.data['delta'] = bt.data['shares'].diff().fillna(0)
print(bt.data)
bt.data.to_csv('plt/knapsack_data.csv')
unique, counts = np.unique(filter(lambda v: v == v, signal.values), return_counts=True)
print(np.asarray((unique, counts)).T)
plt.figure()
plt.subplot(3, 1, 1)
bt.plotTrades()
plt.subplot(3, 1, 2)
bt.pnl.plot(style='x-')
plt.subplot(3, 1, 3)
plt.plot(learning_progress)
print('to plot', learning_progress)
plt.savefig('plt/knapsack_summary' + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.show()
| # -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
np.random.seed(1335) # for reproducibility
np.set_printoptions(precision=5, suppress=True, linewidth=150)
import os
import pandas as pd
import backtest as twp
from matplotlib import pyplot as plt
from sklearn import metrics, preprocessing
from talib.abstract import *
from sklearn.externals import joblib
import quandl
import random, timeit
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from keras.optimizers import RMSprop, Adam
'''
Name: The Self Learning Quant, Example 3
Author: <NAME>
Created: 30/03/2016
Copyright: (c) <NAME> 2016
Licence: BSD
Requirements:
Numpy
Pandas
MatplotLib
scikit-learn
TA-Lib, instructions at https://mrjbq7.github.io/ta-lib/install.html
Keras, https://keras.io/
Quandl, https://www.quandl.com/tools/python
backtest.py from the TWP library. Download backtest.py and put in the same folder
/plt create a subfolder in the same directory where plot files will be saved
'''
def get_ticker(x):
return x.split('/')[-1].split('.')[0]
def read_file(file, test=None):
scaler = preprocessing.MinMaxScaler()
d = pd.read_csv(file).set_index('Date')
d.fillna(0, inplace=True)
ticker = get_ticker(file)
d['ticker'] = ticker
d.rename(columns={'Open': 'open', 'High': 'high', 'Low': 'low', 'Close': 'close', 'Adj Close': 'adj_close',
'Volume (BTC)': 'volume'},
inplace=True)
x_train = d.iloc[:-100, ]
x_test = d.iloc[-100:, ]
if test:
return x_test, ticker
else:
return x_train, ticker
# Initialize first state, all items are placed deterministically
def init_state(file, test):
d, ticker = read_file(file, test=test)
xdata = pd.DataFrame()
scaler = preprocessing.StandardScaler()
xdata['adj_close'] = d['adj_close'] # .values
xdata['diff'] = xdata['adj_close'].diff(periods=1)
xdata['diff'].fillna(0, inplace=True)
xdata['sma15'] = SMA(d, timeperiod=15)
xdata['sma60'] = SMA(d, timeperiod=60)
xdata['rsi'] = RSI(d, timeperiod=14)
xdata['atr'] = ATR(d, timeperiod=14)
xdata.fillna(0, inplace=True)
# --- Preprocess data
# xdata = np.column_stack((close, diff, sma15, close - sma15, sma15 - sma60, rsi, atr))
xdata = pd.DataFrame(scaler.fit_transform(xdata), columns=xdata.columns)
xdata['ticker'] = ticker
pivot_columns = xdata.columns[0:-1]
pivot = xdata.pivot_table(index=d.index, columns='ticker', values=pivot_columns) # Make a pivot table from the data
pivot.columns = [s1 + '-' + s2 for (s1, s2) in pivot.columns.tolist()]
return pivot
def all_init_data(test=False):
filepath = 'util/stock_dfs/'
all = []
scaler = preprocessing.StandardScaler()
for f in os.listdir(filepath):
datapath = os.path.join(filepath, f)
if datapath.endswith('.csv'):
# print(datapath)
Res = init_state(datapath, test=test)
all.append(Res)
all = pd.concat(all, axis=1)
all.fillna(0, inplace=True)
closecol = [col for col in all.columns if 'adj_close' in col]
close = all[closecol].values
# xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr))
xdata = np.vstack(all.values)
xdata = np.nan_to_num(xdata)
if test == False:
scaler = preprocessing.StandardScaler()
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
joblib.dump(scaler, 'data/scaler.pkl')
else:
scaler = joblib.load('data/scaler.pkl')
xdata = np.expand_dims(scaler.fit_transform(xdata), axis=1)
state = xdata[0:1, 0:1, :]
return state, xdata, close
# Take Action
def take_action(state, xdata, action, signal, time_step):
# this should generate a list of trade signals that at evaluation time are fed to the backtester
# the backtester should get a list of trade signals and a list of price data for the assett
# make necessary adjustments to state and then return it
time_step += 1
# if the current iteration is the last state ("terminal state") then set terminal_state to 1
if time_step + 1 == xdata.shape[0]:
state = xdata[time_step - 1:time_step, 0:1, :]
terminal_state = 1
signal.loc[time_step] = 0
return state, time_step, signal, terminal_state
# move the market data window one step forward
state = xdata[time_step - 1:time_step, 0:1, :]
# take action
if action == 1:
signal.loc[time_step] = 100
elif action == 2:
signal.loc[time_step] = -100
else:
signal.loc[time_step] = 0
# print(state)
terminal_state = 0
# print(signal)
return state, time_step, signal, terminal_state
# Get Reward, the reward is returned at the end of an episode
def get_reward(new_state, time_step, action, xdata, signal, terminal_state, eval=False, epoch=0):
reward = 0
signal.fillna(value=0, inplace=True)
if eval == False:
try:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata[time_step - 2:time_step]], index=signal[time_step - 2:time_step].index.values),
signal[time_step - 2:time_step], signalType='shares')
reward = np.max((bt.data['price'].iloc[-1] - bt.data['price'].iloc[-2]) * bt.data['shares'].iloc[-1])
except:
pass
if terminal_state == 1 and eval == True:
bt = twp.Backtest(pd.Series(data=[x[0] for x in xdata], index=signal.index.values), signal, signalType='shares')
reward = bt.pnl.iloc[-1]
plt.figure(figsize=(9, 16))
bt.plotTrades()
plt.axvline(x=400, color='black', linestyle='--')
plt.text(250, 400, 'training data')
plt.text(450, 400, 'test data')
plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png')
plt.close('all')
'''
# save a figure of the test set
plt.figure(figsize=(10, 25))
for i in range(xdata.T.shape[0]):
#frame = pd.concat(btFrame, axis=1)
bt = twp.Backtest(pd.Series(data=[x for x in xdata.T[i]], index=signal.index.values), signal, signalType='shares')
reward += np.max(bt.pnl.iloc[-1])
bt.plotTrades()
#plt.axvline(x=400, color='black', linestyle='--')
#plt.text(250, 400, 'training data')
#plt.text(450, 400, 'test data')
#plt.suptitle(str(epoch))
plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.close('all')
'''
# print(time_step, terminal_state, eval, reward)
return reward
def evaluate_Q(eval_data, eval_model, epoch=0):
# This function is used to evaluate the performance of the system each epoch, without the influence of epsilon and random actions
signal = pd.Series(index=np.arange(len(eval_data)))
state, xdata, price_data = all_init_data()
status = 1
terminal_state = 0
time_step = 1
while (status == 1):
# We start in state S
qval = eval_model.predict(state, batch_size=batch_size)
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
eval_reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state, eval=True,
epoch=epoch)
state = new_state
if terminal_state == 1: # terminal state
status = 0
return eval_reward
if __name__ == "__main__":
# This neural network is the the Q-function, run it like this:
# model.predict(state.reshape(1,64), batch_size=1)
batch_size = 7
num_features = 2544
epochs = 3
gamma = 0.95 # since the reward can be several time steps away, make gamma high
epsilon = 1
batchSize = 100
buffer = 200
replay = []
learning_progress = []
model = Sequential()
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=True,
stateful=False))
model.add(Dropout(0.5))
model.add(LSTM(64,
input_shape=(1, num_features),
return_sequences=False,
stateful=False))
model.add(Dropout(0.5))
model.add(Dense(4, init='lecun_uniform'))
model.add(Activation('linear')) # linear output so we can have range of real-valued outputs
rms = RMSprop()
adam = Adam()
model.compile(loss='mse', optimizer=adam)
start_time = timeit.default_timer()
# read_convert_data(symbol='XBTEUR') #run once to read indata, resample and convert to pickle
astate, xdata, aprice_data = all_init_data()
bstate, test_data, test_price_data = all_init_data(test=True)
'''
bstate, test_data, test_price_data = all_init_data(test=True)
print(astate.shape)
print(bstate.shape)
print(xdata.shape)
print(test_data.shape)
print(price_data.shape)
print(test_price_data.shape)
'''
# stores tuples of (S, A, R, S')
h = 0
# signal = pd.Series(index=market_data.index)
signal = pd.Series(index=np.arange(len(xdata)))
for i in range(epochs):
if i == epochs - 1: # the last epoch, use test data set
state, xdata, price_data = all_init_data()
else:
state, xdata, price_data = all_init_data(test=True)
status = 1
terminal_state = 0
time_step = 5
# while game still in progress
while (status == 1):
# We are in state S
# Let's run our Q function on S to get Q values for all possible actions
print('epoch ' + str(i))
qval = model.predict(state, batch_size=batch_size)
if (random.random() < epsilon): # choose random action
action = np.random.randint(0, 4) # assumes 4 different actions
else: # choose best action from Q(s,a) values
action = (np.argmax(qval))
# Take action, observe new state S'
new_state, time_step, signal, terminal_state = take_action(state, xdata, action, signal, time_step)
# Observe reward
reward = get_reward(new_state, time_step, action, price_data, signal, terminal_state)
print('new_state', new_state)
print('reward', reward)
# Experience replay storage
if (len(replay) < buffer): # if buffer not filled, add to it
replay.append((state, action, reward, new_state))
# print(time_step, reward, terminal_state)
else: # if buffer full, overwrite old values
if (h < (buffer - 1)):
h += 1
else:
h = 0
replay[h] = (state, action, reward, new_state)
# randomly sample our experience replay memory
minibatch = random.sample(replay, batchSize)
X_train = []
y_train = []
for memory in minibatch:
# Get max_Q(S',a)
old_state, action, reward, new_state = memory
old_qval = model.predict(old_state, batch_size=batch_size)
newQ = model.predict(new_state, batch_size=batch_size)
maxQ = np.max(newQ)
y = np.zeros((1, 4))
y[:] = old_qval[:]
if terminal_state == 0: # non-terminal state
update = (reward + (gamma * maxQ))
else: # terminal state
update = reward
# print('rewardbase', reward)
# print('update', update)
y[0][action] = update
# print(time_step, reward, terminal_state)
X_train.append(old_state)
y_train.append(y.reshape(4, ))
X_train = np.squeeze(np.array(X_train), axis=(1))
y_train = np.array(y_train)
model.fit(X_train, y_train, batch_size=batchSize, epochs=100, verbose=0)
state = new_state
if terminal_state == 1: # if reached terminal state, update epoch status
status = 0
eval_reward = evaluate_Q(test_data, model, i)
# eval_reward = value_iter(test_data, epsilon, epochs)
learning_progress.append(eval_reward)
print("Epoch #: %s Reward: %f Epsilon: %f" % (i, eval_reward, epsilon))
# learning_progress.append((reward))
if epsilon > 0.1: # decrement epsilon over time
epsilon -= (1.0 / epochs)
elapsed = np.round(timeit.default_timer() - start_time, decimals=2)
print("Completed in %f" % (elapsed,))
bt = twp.Backtest(pd.Series(data=[x[0] for x in test_price_data]), signal, signalType='shares')
bt.data['delta'] = bt.data['shares'].diff().fillna(0)
print(bt.data)
bt.data.to_csv('plt/knapsack_data.csv')
unique, counts = np.unique(filter(lambda v: v == v, signal.values), return_counts=True)
print(np.asarray((unique, counts)).T)
plt.figure()
plt.subplot(3, 1, 1)
bt.plotTrades()
plt.subplot(3, 1, 2)
bt.pnl.plot(style='x-')
plt.subplot(3, 1, 3)
plt.plot(learning_progress)
print('to plot', learning_progress)
plt.savefig('plt/knapsack_summary' + '.png', bbox_inches='tight', pad_inches=1, dpi=72)
plt.show() | en | 0.612079 | # -*- coding: utf-8 -*- # for reproducibility Name: The Self Learning Quant, Example 3 Author: <NAME> Created: 30/03/2016 Copyright: (c) <NAME> 2016 Licence: BSD Requirements: Numpy Pandas MatplotLib scikit-learn TA-Lib, instructions at https://mrjbq7.github.io/ta-lib/install.html Keras, https://keras.io/ Quandl, https://www.quandl.com/tools/python backtest.py from the TWP library. Download backtest.py and put in the same folder /plt create a subfolder in the same directory where plot files will be saved # Initialize first state, all items are placed deterministically # .values # --- Preprocess data # xdata = np.column_stack((close, diff, sma15, close - sma15, sma15 - sma60, rsi, atr)) # Make a pivot table from the data # print(datapath) # xdata = np.column_stack((close, diff, sma15, close-sma15, sma15-sma60, rsi, atr)) # Take Action # this should generate a list of trade signals that at evaluation time are fed to the backtester # the backtester should get a list of trade signals and a list of price data for the assett # make necessary adjustments to state and then return it # if the current iteration is the last state ("terminal state") then set terminal_state to 1 # move the market data window one step forward # take action # print(state) # print(signal) # Get Reward, the reward is returned at the end of an episode # save a figure of the test set plt.figure(figsize=(10, 25)) for i in range(xdata.T.shape[0]): #frame = pd.concat(btFrame, axis=1) bt = twp.Backtest(pd.Series(data=[x for x in xdata.T[i]], index=signal.index.values), signal, signalType='shares') reward += np.max(bt.pnl.iloc[-1]) bt.plotTrades() #plt.axvline(x=400, color='black', linestyle='--') #plt.text(250, 400, 'training data') #plt.text(450, 400, 'test data') #plt.suptitle(str(epoch)) plt.savefig('plt/' + 'knapsack_' + str(epoch) + '.png', bbox_inches='tight', pad_inches=1, dpi=72) plt.close('all') # print(time_step, terminal_state, eval, reward) # This function is used to evaluate the performance of the system each epoch, without the influence of epsilon and random actions # We start in state S # Take action, observe new state S' # Observe reward # terminal state # This neural network is the the Q-function, run it like this: # model.predict(state.reshape(1,64), batch_size=1) # since the reward can be several time steps away, make gamma high # linear output so we can have range of real-valued outputs # read_convert_data(symbol='XBTEUR') #run once to read indata, resample and convert to pickle bstate, test_data, test_price_data = all_init_data(test=True) print(astate.shape) print(bstate.shape) print(xdata.shape) print(test_data.shape) print(price_data.shape) print(test_price_data.shape) # stores tuples of (S, A, R, S') # signal = pd.Series(index=market_data.index) # the last epoch, use test data set # while game still in progress # We are in state S # Let's run our Q function on S to get Q values for all possible actions # choose random action # assumes 4 different actions # choose best action from Q(s,a) values # Take action, observe new state S' # Observe reward # Experience replay storage # if buffer not filled, add to it # print(time_step, reward, terminal_state) # if buffer full, overwrite old values # randomly sample our experience replay memory # Get max_Q(S',a) # non-terminal state # terminal state # print('rewardbase', reward) # print('update', update) # print(time_step, reward, terminal_state) # if reached terminal state, update epoch status # eval_reward = value_iter(test_data, epsilon, epochs) #: %s Reward: %f Epsilon: %f" % (i, eval_reward, epsilon)) # learning_progress.append((reward)) # decrement epsilon over time | 2.358647 | 2 |
experiments/experiment_01.py | bask0/q10hybrid | 2 | 9276 | <reponame>bask0/q10hybrid
import pytorch_lightning as pl
import optuna
import xarray as xr
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
import os
import shutil
from argparse import ArgumentParser
from datetime import datetime
from project.fluxdata import FluxData
from models.hybrid import Q10Model
# Hardcoded `Trainer` args. Note that these cannot be passed via cli.
TRAINER_ARGS = dict(
max_epochs=100,
log_every_n_steps=1,
weights_summary=None
)
class Objective(object):
def __init__(self, args):
self.args = args
def __call__(self, trial: optuna.trial.Trial) -> float:
q10_init = trial.suggest_float('q10_init', 0.0001, 1000.)
seed = trial.suggest_int('seed', 0, 999999999999)
use_ta = trial.suggest_categorical('use_ta', [True, False])
dropout = trial.suggest_float('dropout', 0.0, 1.0)
if use_ta:
features = ['sw_pot', 'dsw_pot', 'ta']
else:
features = ['sw_pot', 'dsw_pot']
pl.seed_everything(seed)
# Further variables used in the hybrid model.
physical = ['ta']
# Target (multiple targets not possible currently).
targets = ['reco']
# Find variables that are only needed in physical model but not in NN.
physical_exclusive = [v for v in physical if v not in features]
# ------------
# data
# ------------
ds = xr.open_dataset(self.args.data_path)
fluxdata = FluxData(
ds,
features=features + physical_exclusive,
targets=targets,
context_size=1,
train_time=slice('2003-01-01', '2006-12-31'),
valid_time=slice('2007-01-01', '2007-12-31'),
test_time=slice('2008-01-01', '2008-12-31'),
batch_size=self.args.batch_size,
data_loader_kwargs={'num_workers': 4})
train_loader = fluxdata.train_dataloader()
val_loader = fluxdata.val_dataloader()
test_loader = fluxdata.test_dataloader()
# Create empty xr.Dataset, will be used by the model to save predictions every epoch.
max_epochs = TRAINER_ARGS['max_epochs']
ds_pred = fluxdata.target_xr('valid', varnames=['reco', 'rb'], num_epochs=max_epochs)
# ------------
# model
# ------------
model = Q10Model(
features=features,
targets=targets,
norm=fluxdata._norm,
ds=ds_pred,
q10_init=q10_init,
hidden_dim=self.args.hidden_dim,
num_layers=self.args.num_layers,
learning_rate=self.args.learning_rate,
dropout=dropout,
weight_decay=self.args.weight_decay,
num_steps=len(train_loader) * max_epochs)
# ------------
# training
# ------------
trainer = pl.Trainer.from_argparse_args(
self.args,
default_root_dir=self.args.log_dir,
**TRAINER_ARGS,
callbacks=[
EarlyStopping(
monitor='valid_loss',
patience=10,
min_delta=0.00001),
ModelCheckpoint(
filename='{epoch}-{val_loss:.2f}',
save_top_k=1,
verbose=False,
monitor='valid_loss',
mode='min',
prefix=model.__class__.__name__)
])
trainer.fit(model, train_loader, val_loader)
# ------------
# testing
# ------------
# trainer.test(test_dataloaders=test_loader)
# ------------
# save results
# ------------
# Store predictions.
ds = fluxdata.add_scalar_record(model.ds, varname='q10', x=model.q10_history)
trial.set_user_attr('q10', ds.q10[-1].item())
# Add some attributes that are required for analysis.
ds.attrs = {
'created': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'author': '<EMAIL>',
'q10_init': q10_init,
'dropout': dropout,
'use_ta': int(use_ta),
'loss': trainer.callback_metrics['valid_loss'].item()
}
ds = ds.isel(epoch=slice(0, trainer.current_epoch + 1))
# Save data.
save_dir = os.path.join(model.logger.log_dir, 'predictions.nc')
print(f'Saving predictions to: {save_dir}')
ds.to_netcdf(save_dir)
return trainer.callback_metrics['valid_loss'].item()
@staticmethod
def add_project_specific_args(parent_parser: ArgumentParser) -> ArgumentParser:
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument(
'--batch_size', default=240, type=int)
parser.add_argument(
'--data_path', default='./data/Synthetic4BookChap.nc', type=str)
parser.add_argument(
'--log_dir', default='./logs/experiment_01/', type=str)
return parser
def main(parser: ArgumentParser = None, **kwargs):
"""Use kwargs to overload argparse args."""
# ------------
# args
# ------------
if parser is None:
parser = ArgumentParser()
parser = Objective.add_project_specific_args(parser)
parser = pl.Trainer.add_argparse_args(parser)
parser = Q10Model.add_model_specific_args(parser)
parser.add_argument('--create_study', action='store_true', help='create new study (deletes old) and exits')
parser.add_argument('--single_seed', action='store_true', help='use only one seed instead of (1, ..., 10).')
args = parser.parse_args()
globargs = TRAINER_ARGS.copy()
globargs.update(kwargs)
for k, v in globargs.items():
setattr(args, k, v)
# ------------
# study setup
# ------------
search_space = {
'q10_init': [0.5, 1.5, 2.5],
'seed': [0] if args.single_seed else [i for i in range(10)],
'dropout': [0.0, 0.2, 0.4, 0.6],
'use_ta': [True, False]
}
sql_file = os.path.abspath(os.path.join(args.log_dir, "optuna.db"))
sql_path = f'sqlite:///{sql_file}'
if args.create_study | (not os.path.isfile(sql_file)):
if os.path.isdir(args.log_dir):
shutil.rmtree(args.log_dir)
os.makedirs(args.log_dir, exist_ok=True)
study = optuna.create_study(
study_name="q10hybrid",
storage=sql_path,
sampler=optuna.samplers.GridSampler(search_space),
direction='minimize',
load_if_exists=False)
if args.create_study:
return None
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
# ------------
# run study
# ------------
n_trials = 1
for _, v in search_space.items():
n_trials *= len(v)
study = optuna.load_study(
study_name="q10hybrid",
storage=sql_path,
sampler=optuna.samplers.GridSampler(search_space))
study.optimize(Objective(args), n_trials=n_trials)
if __name__ == '__main__':
main()
| import pytorch_lightning as pl
import optuna
import xarray as xr
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
import os
import shutil
from argparse import ArgumentParser
from datetime import datetime
from project.fluxdata import FluxData
from models.hybrid import Q10Model
# Hardcoded `Trainer` args. Note that these cannot be passed via cli.
TRAINER_ARGS = dict(
max_epochs=100,
log_every_n_steps=1,
weights_summary=None
)
class Objective(object):
def __init__(self, args):
self.args = args
def __call__(self, trial: optuna.trial.Trial) -> float:
q10_init = trial.suggest_float('q10_init', 0.0001, 1000.)
seed = trial.suggest_int('seed', 0, 999999999999)
use_ta = trial.suggest_categorical('use_ta', [True, False])
dropout = trial.suggest_float('dropout', 0.0, 1.0)
if use_ta:
features = ['sw_pot', 'dsw_pot', 'ta']
else:
features = ['sw_pot', 'dsw_pot']
pl.seed_everything(seed)
# Further variables used in the hybrid model.
physical = ['ta']
# Target (multiple targets not possible currently).
targets = ['reco']
# Find variables that are only needed in physical model but not in NN.
physical_exclusive = [v for v in physical if v not in features]
# ------------
# data
# ------------
ds = xr.open_dataset(self.args.data_path)
fluxdata = FluxData(
ds,
features=features + physical_exclusive,
targets=targets,
context_size=1,
train_time=slice('2003-01-01', '2006-12-31'),
valid_time=slice('2007-01-01', '2007-12-31'),
test_time=slice('2008-01-01', '2008-12-31'),
batch_size=self.args.batch_size,
data_loader_kwargs={'num_workers': 4})
train_loader = fluxdata.train_dataloader()
val_loader = fluxdata.val_dataloader()
test_loader = fluxdata.test_dataloader()
# Create empty xr.Dataset, will be used by the model to save predictions every epoch.
max_epochs = TRAINER_ARGS['max_epochs']
ds_pred = fluxdata.target_xr('valid', varnames=['reco', 'rb'], num_epochs=max_epochs)
# ------------
# model
# ------------
model = Q10Model(
features=features,
targets=targets,
norm=fluxdata._norm,
ds=ds_pred,
q10_init=q10_init,
hidden_dim=self.args.hidden_dim,
num_layers=self.args.num_layers,
learning_rate=self.args.learning_rate,
dropout=dropout,
weight_decay=self.args.weight_decay,
num_steps=len(train_loader) * max_epochs)
# ------------
# training
# ------------
trainer = pl.Trainer.from_argparse_args(
self.args,
default_root_dir=self.args.log_dir,
**TRAINER_ARGS,
callbacks=[
EarlyStopping(
monitor='valid_loss',
patience=10,
min_delta=0.00001),
ModelCheckpoint(
filename='{epoch}-{val_loss:.2f}',
save_top_k=1,
verbose=False,
monitor='valid_loss',
mode='min',
prefix=model.__class__.__name__)
])
trainer.fit(model, train_loader, val_loader)
# ------------
# testing
# ------------
# trainer.test(test_dataloaders=test_loader)
# ------------
# save results
# ------------
# Store predictions.
ds = fluxdata.add_scalar_record(model.ds, varname='q10', x=model.q10_history)
trial.set_user_attr('q10', ds.q10[-1].item())
# Add some attributes that are required for analysis.
ds.attrs = {
'created': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
'author': '<EMAIL>',
'q10_init': q10_init,
'dropout': dropout,
'use_ta': int(use_ta),
'loss': trainer.callback_metrics['valid_loss'].item()
}
ds = ds.isel(epoch=slice(0, trainer.current_epoch + 1))
# Save data.
save_dir = os.path.join(model.logger.log_dir, 'predictions.nc')
print(f'Saving predictions to: {save_dir}')
ds.to_netcdf(save_dir)
return trainer.callback_metrics['valid_loss'].item()
@staticmethod
def add_project_specific_args(parent_parser: ArgumentParser) -> ArgumentParser:
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument(
'--batch_size', default=240, type=int)
parser.add_argument(
'--data_path', default='./data/Synthetic4BookChap.nc', type=str)
parser.add_argument(
'--log_dir', default='./logs/experiment_01/', type=str)
return parser
def main(parser: ArgumentParser = None, **kwargs):
"""Use kwargs to overload argparse args."""
# ------------
# args
# ------------
if parser is None:
parser = ArgumentParser()
parser = Objective.add_project_specific_args(parser)
parser = pl.Trainer.add_argparse_args(parser)
parser = Q10Model.add_model_specific_args(parser)
parser.add_argument('--create_study', action='store_true', help='create new study (deletes old) and exits')
parser.add_argument('--single_seed', action='store_true', help='use only one seed instead of (1, ..., 10).')
args = parser.parse_args()
globargs = TRAINER_ARGS.copy()
globargs.update(kwargs)
for k, v in globargs.items():
setattr(args, k, v)
# ------------
# study setup
# ------------
search_space = {
'q10_init': [0.5, 1.5, 2.5],
'seed': [0] if args.single_seed else [i for i in range(10)],
'dropout': [0.0, 0.2, 0.4, 0.6],
'use_ta': [True, False]
}
sql_file = os.path.abspath(os.path.join(args.log_dir, "optuna.db"))
sql_path = f'sqlite:///{sql_file}'
if args.create_study | (not os.path.isfile(sql_file)):
if os.path.isdir(args.log_dir):
shutil.rmtree(args.log_dir)
os.makedirs(args.log_dir, exist_ok=True)
study = optuna.create_study(
study_name="q10hybrid",
storage=sql_path,
sampler=optuna.samplers.GridSampler(search_space),
direction='minimize',
load_if_exists=False)
if args.create_study:
return None
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
# ------------
# run study
# ------------
n_trials = 1
for _, v in search_space.items():
n_trials *= len(v)
study = optuna.load_study(
study_name="q10hybrid",
storage=sql_path,
sampler=optuna.samplers.GridSampler(search_space))
study.optimize(Objective(args), n_trials=n_trials)
if __name__ == '__main__':
main() | en | 0.555881 | # Hardcoded `Trainer` args. Note that these cannot be passed via cli. # Further variables used in the hybrid model. # Target (multiple targets not possible currently). # Find variables that are only needed in physical model but not in NN. # ------------ # data # ------------ # Create empty xr.Dataset, will be used by the model to save predictions every epoch. # ------------ # model # ------------ # ------------ # training # ------------ # ------------ # testing # ------------ # trainer.test(test_dataloaders=test_loader) # ------------ # save results # ------------ # Store predictions. # Add some attributes that are required for analysis. # Save data. Use kwargs to overload argparse args. # ------------ # args # ------------ # ------------ # study setup # ------------ # ------------ # run study # ------------ | 2.040674 | 2 |
main.py | warifp/InstagramPostAndDelete | 4 | 9277 | #! @@Author : <NAME>
#! @@Create : 18 Januari 2019
#! @@Modify : 19 Januari 2019
#! Gambar dari reddit.
#! Gunakan VPN karena DNS situs reddit sudah di blokir dari negara Indonesia.
import os
import json
import requests
import progressbar
from PIL import Image
from lxml import html
from time import sleep
from ImageDeleter import delete_png
from InstagramAPI import InstagramAPI
InstagramAPI = InstagramAPI(input("Username: "), input("Password: "))
while True:
if (InstagramAPI.login()):
break
else:
for x in range(300):
os.system('cls')
print(300-x)
sleep(1)
global useable
useable = []
os.system('pause')
def get_image():
print("Memulai mendapatkan gambar ..")
json_raw = requests.get('https://www.reddit.com/r/me_irl/new/.json', headers = {'User-agent': 'Image_Testing_V3'}).json()
json_data = json_raw['data']
json_children = json_data['children']
for x in range(len(json_children)):
json_current = json_children[x]
json_current_data = json_current['data']
json_current_url = json_current_data['url']
if "https://i.redd.it/" not in json_current_url:
pass
else:
if json_current_url not in useable:
useable.append(json_current_url)
download()
else:
pass
def download():
print("Memulai download ..")
global filename
new_filename = ""
filename = useable[-1]
filename = filename.replace("https://i.redd.it/", "")
print(filename)
f = open(filename, 'wb')
f.write(requests.get(useable[-1]).content)
f.close()
if (filename[-3] + filename[-2] + filename[-1]) != 'jpg':
im = Image.open(filename)
for x in range(len(filename)-3):
new_filename = new_filename + filename[x]
im = im.convert("RGB")
im.save("edit" + new_filename + 'jpg')
new_filename = "edit" + new_filename + "jpg"
print(new_filename)
else:
new_filename = filename
upload(new_filename)
def delete_image(bad_file):
print("Memulai menghapus gambar ..")
if (bad_file[0] + bad_file[1] + bad_file[2] + bad_file[3]) == "edit":
png_bad_file = ''
for x in range(len(bad_file)-3):
png_bad_file = png_bad_file + bad_file[x]
png_bad_file = png_bad_file + "png"
try:
os.remove(png_bad_file)
except Exception as e:
pass
os.remove(bad_file)
delete_png()
print("Selesai.")
wait()
def upload(file):
print("Memulai upload ..")
caption = ""
InstagramAPI.uploadPhoto(file, caption=caption)
delete_image(file)
def wait():
for i in progressbar.progressbar(range(1800)):
sleep(1)
while True:
get_image()
print("Gambar sukses di upload.")
sleep(5)
os.system('pause')
| #! @@Author : <NAME>
#! @@Create : 18 Januari 2019
#! @@Modify : 19 Januari 2019
#! Gambar dari reddit.
#! Gunakan VPN karena DNS situs reddit sudah di blokir dari negara Indonesia.
import os
import json
import requests
import progressbar
from PIL import Image
from lxml import html
from time import sleep
from ImageDeleter import delete_png
from InstagramAPI import InstagramAPI
InstagramAPI = InstagramAPI(input("Username: "), input("Password: "))
while True:
if (InstagramAPI.login()):
break
else:
for x in range(300):
os.system('cls')
print(300-x)
sleep(1)
global useable
useable = []
os.system('pause')
def get_image():
print("Memulai mendapatkan gambar ..")
json_raw = requests.get('https://www.reddit.com/r/me_irl/new/.json', headers = {'User-agent': 'Image_Testing_V3'}).json()
json_data = json_raw['data']
json_children = json_data['children']
for x in range(len(json_children)):
json_current = json_children[x]
json_current_data = json_current['data']
json_current_url = json_current_data['url']
if "https://i.redd.it/" not in json_current_url:
pass
else:
if json_current_url not in useable:
useable.append(json_current_url)
download()
else:
pass
def download():
print("Memulai download ..")
global filename
new_filename = ""
filename = useable[-1]
filename = filename.replace("https://i.redd.it/", "")
print(filename)
f = open(filename, 'wb')
f.write(requests.get(useable[-1]).content)
f.close()
if (filename[-3] + filename[-2] + filename[-1]) != 'jpg':
im = Image.open(filename)
for x in range(len(filename)-3):
new_filename = new_filename + filename[x]
im = im.convert("RGB")
im.save("edit" + new_filename + 'jpg')
new_filename = "edit" + new_filename + "jpg"
print(new_filename)
else:
new_filename = filename
upload(new_filename)
def delete_image(bad_file):
print("Memulai menghapus gambar ..")
if (bad_file[0] + bad_file[1] + bad_file[2] + bad_file[3]) == "edit":
png_bad_file = ''
for x in range(len(bad_file)-3):
png_bad_file = png_bad_file + bad_file[x]
png_bad_file = png_bad_file + "png"
try:
os.remove(png_bad_file)
except Exception as e:
pass
os.remove(bad_file)
delete_png()
print("Selesai.")
wait()
def upload(file):
print("Memulai upload ..")
caption = ""
InstagramAPI.uploadPhoto(file, caption=caption)
delete_image(file)
def wait():
for i in progressbar.progressbar(range(1800)):
sleep(1)
while True:
get_image()
print("Gambar sukses di upload.")
sleep(5)
os.system('pause')
| id | 0.689184 | #! @@Author : <NAME> #! @@Create : 18 Januari 2019 #! @@Modify : 19 Januari 2019 #! Gambar dari reddit. #! Gunakan VPN karena DNS situs reddit sudah di blokir dari negara Indonesia. | 3.10056 | 3 |
pyspectator/collection.py | maximilionus/pyspectator-x | 39 | 9278 | from collections import MutableMapping, Container
from datetime import datetime, timedelta
from pyvalid import accepts
class LimitedTimeTable(MutableMapping, Container):
def __init__(self, time_span):
self.__storage = dict()
self.__time_span = None
self.time_span = time_span
@property
def time_span(self):
return self.__time_span
@time_span.setter
@accepts(object, timedelta)
def time_span(self, value):
self.__time_span = value
@property
def oldest(self):
value = None
if self.__len__() > 0:
value = min(self.__storage.keys())
return value
@property
def newest(self):
value = None
if self.__len__() > 0:
value = max(self.__storage.keys())
return value
def oldest_keys(self, size):
for key in self.__get_slice(0, size):
yield key
def oldest_values(self, size):
for key in self.oldest_keys(size):
yield self.__storage.get(key)
def oldest_items(self, size):
for key in self.oldest_keys(size):
yield (key, self.__storage.get(key))
def newest_keys(self, size):
for key in self.__get_slice(-size, None):
yield key
def newest_values(self, size):
for key in self.newest_keys(size):
yield self.__storage.get(key)
def newest_items(self, size):
for key in self.newest_keys(size):
yield (key, self.__storage.get(key))
def __get_slice(self, start, end):
keys = sorted(self.keys())
return keys[start:end]
def __getitem__(self, item):
return self.__storage.__getitem__(item)
@accepts(object, datetime, object)
def __setitem__(self, key, value):
now = datetime.now()
if key > now:
raise ValueError('Can\'t set item from future!')
oldest = self.oldest
if (oldest is not None) and (oldest != key):
longest_time_span = now - oldest
# Item is too old for current timetable
if longest_time_span >= self.time_span:
self.__delitem__(oldest)
return self.__storage.__setitem__(key, value)
def __delitem__(self, key):
return self.__storage.__delitem__(key)
def __len__(self):
return self.__storage.__len__()
def __iter__(self):
return self.__storage.__iter__()
def __contains__(self, item):
return self.__storage.__contains__(item)
__all__ = ['LimitedTimeTable']
| from collections import MutableMapping, Container
from datetime import datetime, timedelta
from pyvalid import accepts
class LimitedTimeTable(MutableMapping, Container):
def __init__(self, time_span):
self.__storage = dict()
self.__time_span = None
self.time_span = time_span
@property
def time_span(self):
return self.__time_span
@time_span.setter
@accepts(object, timedelta)
def time_span(self, value):
self.__time_span = value
@property
def oldest(self):
value = None
if self.__len__() > 0:
value = min(self.__storage.keys())
return value
@property
def newest(self):
value = None
if self.__len__() > 0:
value = max(self.__storage.keys())
return value
def oldest_keys(self, size):
for key in self.__get_slice(0, size):
yield key
def oldest_values(self, size):
for key in self.oldest_keys(size):
yield self.__storage.get(key)
def oldest_items(self, size):
for key in self.oldest_keys(size):
yield (key, self.__storage.get(key))
def newest_keys(self, size):
for key in self.__get_slice(-size, None):
yield key
def newest_values(self, size):
for key in self.newest_keys(size):
yield self.__storage.get(key)
def newest_items(self, size):
for key in self.newest_keys(size):
yield (key, self.__storage.get(key))
def __get_slice(self, start, end):
keys = sorted(self.keys())
return keys[start:end]
def __getitem__(self, item):
return self.__storage.__getitem__(item)
@accepts(object, datetime, object)
def __setitem__(self, key, value):
now = datetime.now()
if key > now:
raise ValueError('Can\'t set item from future!')
oldest = self.oldest
if (oldest is not None) and (oldest != key):
longest_time_span = now - oldest
# Item is too old for current timetable
if longest_time_span >= self.time_span:
self.__delitem__(oldest)
return self.__storage.__setitem__(key, value)
def __delitem__(self, key):
return self.__storage.__delitem__(key)
def __len__(self):
return self.__storage.__len__()
def __iter__(self):
return self.__storage.__iter__()
def __contains__(self, item):
return self.__storage.__contains__(item)
__all__ = ['LimitedTimeTable']
| en | 0.943502 | # Item is too old for current timetable | 2.491096 | 2 |
keyboardrow.py | AndySamoil/Elite_Code | 0 | 9279 | def findWords(self, words: List[str]) -> List[str]:
''' sets and iterate through sets
'''
every = [set("qwertyuiop"), set("asdfghjkl"), set("zxcvbnm")]
ans = []
for word in words:
l = len(word)
for sett in every:
count = 0
for let in word:
if let.lower() in sett:
count += 1
if count == l:
ans.append(word)
return ans | def findWords(self, words: List[str]) -> List[str]:
''' sets and iterate through sets
'''
every = [set("qwertyuiop"), set("asdfghjkl"), set("zxcvbnm")]
ans = []
for word in words:
l = len(word)
for sett in every:
count = 0
for let in word:
if let.lower() in sett:
count += 1
if count == l:
ans.append(word)
return ans | en | 0.966702 | sets and iterate through sets | 3.723266 | 4 |
DFS_Backtracking/31. Next Permutation.py | xli1110/LC | 2 | 9280 | <gh_stars>1-10
class Solution:
def __init__(self):
self.res = []
self.path = []
def arr_to_num(self, arr):
s = ""
for x in arr:
s += str(x)
return int(s)
def find_position(self, nums):
for i in range(len(self.res)):
if self.res[i] == nums:
if i == len(self.res) - 1:
return 0
# we need the check below for duplicate elements in nums
# run nums = [1, 5, 1] and see the case
next_num = self.arr_to_num(self.res[i + 1])
if next_num > self.arr_to_num(nums):
return i + 1
raise Exception("The permutation function has something wrong, please debug it.")
def DFS(self, arr):
if not arr:
self.res.append(self.path[:])
return
for i in range(len(arr)):
self.path.append(arr[i])
self.DFS(arr[:i] + arr[i + 1:])
self.path.pop()
def nextPermutation(self, nums: [int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
if not nums:
raise Exception("Empty Array")
# all permutations
# note that we need to SORT the array at first
arr = nums[:]
arr.sort()
self.DFS(arr)
# find position
position = self.find_position(nums)
# in-place replacement
for i in range(len(nums)):
nums[i] = self.res[position][i]
if __name__ == "__main__":
sol = Solution()
# nums = [2, 1, 3]
nums = [1, 5, 1]
sol.nextPermutation(nums)
print(sol.res)
| class Solution:
def __init__(self):
self.res = []
self.path = []
def arr_to_num(self, arr):
s = ""
for x in arr:
s += str(x)
return int(s)
def find_position(self, nums):
for i in range(len(self.res)):
if self.res[i] == nums:
if i == len(self.res) - 1:
return 0
# we need the check below for duplicate elements in nums
# run nums = [1, 5, 1] and see the case
next_num = self.arr_to_num(self.res[i + 1])
if next_num > self.arr_to_num(nums):
return i + 1
raise Exception("The permutation function has something wrong, please debug it.")
def DFS(self, arr):
if not arr:
self.res.append(self.path[:])
return
for i in range(len(arr)):
self.path.append(arr[i])
self.DFS(arr[:i] + arr[i + 1:])
self.path.pop()
def nextPermutation(self, nums: [int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
if not nums:
raise Exception("Empty Array")
# all permutations
# note that we need to SORT the array at first
arr = nums[:]
arr.sort()
self.DFS(arr)
# find position
position = self.find_position(nums)
# in-place replacement
for i in range(len(nums)):
nums[i] = self.res[position][i]
if __name__ == "__main__":
sol = Solution()
# nums = [2, 1, 3]
nums = [1, 5, 1]
sol.nextPermutation(nums)
print(sol.res) | en | 0.723942 | # we need the check below for duplicate elements in nums # run nums = [1, 5, 1] and see the case Do not return anything, modify nums in-place instead. # all permutations # note that we need to SORT the array at first # find position # in-place replacement # nums = [2, 1, 3] | 3.848518 | 4 |
plugin/DataExport/extend.py | konradotto/TS | 125 | 9281 | #!/usr/bin/python
# Copyright (C) 2015 Ion Torrent Systems, Inc. All Rights Reserved
import subprocess
import re
pluginName = 'DataExport'
pluginDir = ""
networkFS = ["nfs", "cifs"]
localFS = ["ext4", "ext3", "xfs", "ntfs", "exfat", "vboxsf"]
supportedFS = ",".join(localFS + networkFS)
def test(bucket):
return bucket
def runProcess(exe):
p = subprocess.Popen(exe, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return iter(p.stdout.readline, b'')
def runProcessAndReturnLastLine(exe):
p = subprocess.Popen(exe, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.stdout.readlines()[-1]
def backupDevices(bucket):
devices = ""
cmd = "mount -l -t " + supportedFS
for line in runProcess(cmd.split()):
line_arr = line.split()
folder = line_arr[2]
fstype = line_arr[4]
perms = line_arr[5]
if perms.find('w') != -1:
use = True
if fstype in localFS:
m = re.match('^(/media|/mnt)', folder)
if not m:
use = False
if use:
cmd2 = "df -h %s " % folder
df = runProcessAndReturnLastLine(cmd2.split())
avail = df.split()[2]
devices = devices + "<OPTION VALUE=\"" + folder + "\">" + folder + " (" + avail + " free, " + fstype + ")</option>"
return devices
| #!/usr/bin/python
# Copyright (C) 2015 Ion Torrent Systems, Inc. All Rights Reserved
import subprocess
import re
pluginName = 'DataExport'
pluginDir = ""
networkFS = ["nfs", "cifs"]
localFS = ["ext4", "ext3", "xfs", "ntfs", "exfat", "vboxsf"]
supportedFS = ",".join(localFS + networkFS)
def test(bucket):
return bucket
def runProcess(exe):
p = subprocess.Popen(exe, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return iter(p.stdout.readline, b'')
def runProcessAndReturnLastLine(exe):
p = subprocess.Popen(exe, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p.stdout.readlines()[-1]
def backupDevices(bucket):
devices = ""
cmd = "mount -l -t " + supportedFS
for line in runProcess(cmd.split()):
line_arr = line.split()
folder = line_arr[2]
fstype = line_arr[4]
perms = line_arr[5]
if perms.find('w') != -1:
use = True
if fstype in localFS:
m = re.match('^(/media|/mnt)', folder)
if not m:
use = False
if use:
cmd2 = "df -h %s " % folder
df = runProcessAndReturnLastLine(cmd2.split())
avail = df.split()[2]
devices = devices + "<OPTION VALUE=\"" + folder + "\">" + folder + " (" + avail + " free, " + fstype + ")</option>"
return devices
| en | 0.523136 | #!/usr/bin/python # Copyright (C) 2015 Ion Torrent Systems, Inc. All Rights Reserved | 2.054894 | 2 |
boids/biods_object.py | PaulAustin/sb7-pgz | 1 | 9282 | # Ported from JavaSript version to Python and Pygame Zero
# Designed to work well with mu-editor environment.
#
# The original Javascript version wasdonw by <NAME>
# at https://github.com/beneater/boids (MIT License)
# No endorsement implied.
#
# Complex numbers are are used as vectors to integrate x and y positions and velocities
# MIT licesense (details in parent directory)
import random
import time
HEIGHT = 500 # window height
WIDTH = 900 # window width
MARGIN = 150 # disstance to start avoid edge
NUM_BOIDS = 75
VISUAL_RANGE = 70 # radius of influence for most algoriths
SPEED_LIMIT_UPPER = 13 # boids canonly fly so fast.
SPEED_LIMIT_LOWER = 3 # boid will fall if flying too slow
SPEED_INIT = 20 # range for random velocity
MIN_DISTANCE = 10 # the distance to stay away from other boids
AVOID_FACTOR = 0.05 # % location change if too close
CENTERING_FACTOR = 0.050 # % location change to pull to center
MATCHING_FACTOR = 0.015 # % velocity change if close
MARGIN_FACTOR = 0.25+0.0j # rate of turning away from edge
HISTORY_LENGTH = 30
BACK_COLOR = (0, 0, 90)
BOID_COLOR = (255, 128, 128)
BOID_SIZE = 8
TRAIL_COLOR = (255, 255, 64)
g_boids = []
class Boid:
def __init__(boid) :
boid.loc = complex(
(random.randint(0, WIDTH)),
(random.randint(0, HEIGHT)))
boid.vel = complex(
(random.randint(-SPEED_INIT, SPEED_INIT)),
(random.randint(-SPEED_INIT, SPEED_INIT)))
boid.history = []
def keep_within_bounds(boid) :
# Constrain a boid to within the window. If it gets too close to an edge,
# nudge it back in and reverse its direction.
if (boid.loc.real < MARGIN):
boid.vel += MARGIN_FACTOR * 1.0
if (boid.loc.real > WIDTH - MARGIN) :
boid.vel += MARGIN_FACTOR * -1.0
if (boid.loc.imag < MARGIN) :
boid.vel += MARGIN_FACTOR * 1.0j
if (boid.loc.imag > HEIGHT - MARGIN) :
boid.vel += MARGIN_FACTOR * -1.0j
def fly_towards_center(boid):
# Find the center of mass of the other boids and
# adjust velocity slightly to point towards the
# center of mass.
center = 0+0j
num_neighbors = 0
for other_boid in g_boids :
if abs(boid.loc - other_boid.loc) < VISUAL_RANGE :
center += other_boid.loc
num_neighbors += 1
if num_neighbors > 0 :
center = center / num_neighbors
boid.loc += (center - boid.loc) * CENTERING_FACTOR
def avoid_others(boid):
# Move away from other boids that are too close to avoid colliding
move = 0+0j
for other_boid in g_boids :
if not (other_boid is boid) :
if abs(boid.loc - other_boid.loc) < MIN_DISTANCE :
move += boid.loc - other_boid.loc
boid.vel += move * AVOID_FACTOR
def match_velocity(boid):
# Find the average velocity (speed and direction)
# of the other boids and adjust velocity slightly to match.
avg_vel = 0+0j
num_neighbors = 0
for otherBoid in g_boids:
if abs(boid.loc - otherBoid.loc) < VISUAL_RANGE :
avg_vel += otherBoid.vel
num_neighbors += 1
if num_neighbors > 0:
avg_vel /= num_neighbors
boid.vel += (avg_vel - boid.vel) * MATCHING_FACTOR
def limit_speed(boid):
# Speed will naturally vary in flocking behavior,
# but real animals can't go arbitrarily fast (or slow)
speed = abs(boid.vel)
if (speed > SPEED_LIMIT_UPPER) :
boid.vel = boid.vel / speed * SPEED_LIMIT_UPPER
if (speed < SPEED_LIMIT_LOWER) :
boid.vel = boid.vel / speed * SPEED_LIMIT_LOWER
return
def draw(boid):
screen.draw.filled_circle((boid.loc.real, boid.loc.imag), BOID_SIZE, BOID_COLOR)
tail = boid.loc + boid.vel * -1.8
screen.draw.line(
(boid.loc.real, boid.loc.imag),
(tail.real, tail.imag),
BOID_COLOR)
def draw_trail(boid):
pt_from = (boid.loc.real, boid.loc.imag)
for p in boid.history:
pt_to = (p.real, p.imag)
screen.draw.line(pt_from, pt_to, TRAIL_COLOR)
pt_from = pt_to
def draw():
screen.fill(BACK_COLOR)
if keyboard.space:
for boid in g_boids:
boid.draw_trail()
for boid in g_boids:
boid.draw()
screen.draw.text("space:tails r:restart", (20, 20))
def update():
for boid in g_boids:
# Apply rules
boid.fly_towards_center()
boid.avoid_others()
boid.match_velocity()
boid.limit_speed()
boid.keep_within_bounds()
# Update the position based on the current velocity
boid.loc += boid.vel
boid.history.insert(0, boid.loc)
boid.history = boid.history[:HISTORY_LENGTH]
def init():
global g_boids
g_boids = [Boid() for _ in range(NUM_BOIDS)]
def on_key_down(key, mod, unicode):
if (key == keys.R):
init()
init()
| # Ported from JavaSript version to Python and Pygame Zero
# Designed to work well with mu-editor environment.
#
# The original Javascript version wasdonw by <NAME>
# at https://github.com/beneater/boids (MIT License)
# No endorsement implied.
#
# Complex numbers are are used as vectors to integrate x and y positions and velocities
# MIT licesense (details in parent directory)
import random
import time
HEIGHT = 500 # window height
WIDTH = 900 # window width
MARGIN = 150 # disstance to start avoid edge
NUM_BOIDS = 75
VISUAL_RANGE = 70 # radius of influence for most algoriths
SPEED_LIMIT_UPPER = 13 # boids canonly fly so fast.
SPEED_LIMIT_LOWER = 3 # boid will fall if flying too slow
SPEED_INIT = 20 # range for random velocity
MIN_DISTANCE = 10 # the distance to stay away from other boids
AVOID_FACTOR = 0.05 # % location change if too close
CENTERING_FACTOR = 0.050 # % location change to pull to center
MATCHING_FACTOR = 0.015 # % velocity change if close
MARGIN_FACTOR = 0.25+0.0j # rate of turning away from edge
HISTORY_LENGTH = 30
BACK_COLOR = (0, 0, 90)
BOID_COLOR = (255, 128, 128)
BOID_SIZE = 8
TRAIL_COLOR = (255, 255, 64)
g_boids = []
class Boid:
def __init__(boid) :
boid.loc = complex(
(random.randint(0, WIDTH)),
(random.randint(0, HEIGHT)))
boid.vel = complex(
(random.randint(-SPEED_INIT, SPEED_INIT)),
(random.randint(-SPEED_INIT, SPEED_INIT)))
boid.history = []
def keep_within_bounds(boid) :
# Constrain a boid to within the window. If it gets too close to an edge,
# nudge it back in and reverse its direction.
if (boid.loc.real < MARGIN):
boid.vel += MARGIN_FACTOR * 1.0
if (boid.loc.real > WIDTH - MARGIN) :
boid.vel += MARGIN_FACTOR * -1.0
if (boid.loc.imag < MARGIN) :
boid.vel += MARGIN_FACTOR * 1.0j
if (boid.loc.imag > HEIGHT - MARGIN) :
boid.vel += MARGIN_FACTOR * -1.0j
def fly_towards_center(boid):
# Find the center of mass of the other boids and
# adjust velocity slightly to point towards the
# center of mass.
center = 0+0j
num_neighbors = 0
for other_boid in g_boids :
if abs(boid.loc - other_boid.loc) < VISUAL_RANGE :
center += other_boid.loc
num_neighbors += 1
if num_neighbors > 0 :
center = center / num_neighbors
boid.loc += (center - boid.loc) * CENTERING_FACTOR
def avoid_others(boid):
# Move away from other boids that are too close to avoid colliding
move = 0+0j
for other_boid in g_boids :
if not (other_boid is boid) :
if abs(boid.loc - other_boid.loc) < MIN_DISTANCE :
move += boid.loc - other_boid.loc
boid.vel += move * AVOID_FACTOR
def match_velocity(boid):
# Find the average velocity (speed and direction)
# of the other boids and adjust velocity slightly to match.
avg_vel = 0+0j
num_neighbors = 0
for otherBoid in g_boids:
if abs(boid.loc - otherBoid.loc) < VISUAL_RANGE :
avg_vel += otherBoid.vel
num_neighbors += 1
if num_neighbors > 0:
avg_vel /= num_neighbors
boid.vel += (avg_vel - boid.vel) * MATCHING_FACTOR
def limit_speed(boid):
# Speed will naturally vary in flocking behavior,
# but real animals can't go arbitrarily fast (or slow)
speed = abs(boid.vel)
if (speed > SPEED_LIMIT_UPPER) :
boid.vel = boid.vel / speed * SPEED_LIMIT_UPPER
if (speed < SPEED_LIMIT_LOWER) :
boid.vel = boid.vel / speed * SPEED_LIMIT_LOWER
return
def draw(boid):
screen.draw.filled_circle((boid.loc.real, boid.loc.imag), BOID_SIZE, BOID_COLOR)
tail = boid.loc + boid.vel * -1.8
screen.draw.line(
(boid.loc.real, boid.loc.imag),
(tail.real, tail.imag),
BOID_COLOR)
def draw_trail(boid):
pt_from = (boid.loc.real, boid.loc.imag)
for p in boid.history:
pt_to = (p.real, p.imag)
screen.draw.line(pt_from, pt_to, TRAIL_COLOR)
pt_from = pt_to
def draw():
screen.fill(BACK_COLOR)
if keyboard.space:
for boid in g_boids:
boid.draw_trail()
for boid in g_boids:
boid.draw()
screen.draw.text("space:tails r:restart", (20, 20))
def update():
for boid in g_boids:
# Apply rules
boid.fly_towards_center()
boid.avoid_others()
boid.match_velocity()
boid.limit_speed()
boid.keep_within_bounds()
# Update the position based on the current velocity
boid.loc += boid.vel
boid.history.insert(0, boid.loc)
boid.history = boid.history[:HISTORY_LENGTH]
def init():
global g_boids
g_boids = [Boid() for _ in range(NUM_BOIDS)]
def on_key_down(key, mod, unicode):
if (key == keys.R):
init()
init()
| en | 0.873752 | # Ported from JavaSript version to Python and Pygame Zero # Designed to work well with mu-editor environment. # # The original Javascript version wasdonw by <NAME> # at https://github.com/beneater/boids (MIT License) # No endorsement implied. # # Complex numbers are are used as vectors to integrate x and y positions and velocities # MIT licesense (details in parent directory) # window height # window width # disstance to start avoid edge # radius of influence for most algoriths # boids canonly fly so fast. # boid will fall if flying too slow # range for random velocity # the distance to stay away from other boids # % location change if too close # % location change to pull to center # % velocity change if close # rate of turning away from edge # Constrain a boid to within the window. If it gets too close to an edge, # nudge it back in and reverse its direction. # Find the center of mass of the other boids and # adjust velocity slightly to point towards the # center of mass. # Move away from other boids that are too close to avoid colliding # Find the average velocity (speed and direction) # of the other boids and adjust velocity slightly to match. # Speed will naturally vary in flocking behavior, # but real animals can't go arbitrarily fast (or slow) # Apply rules # Update the position based on the current velocity | 2.770236 | 3 |
upoutdf/types/recurring/yearly.py | UpOut/UpOutDF | 0 | 9283 | <filename>upoutdf/types/recurring/yearly.py<gh_stars>0
# coding: utf-8
import pytz
from dateutil.relativedelta import relativedelta
from .base import BaseRecurring
from upoutdf.occurences import OccurenceBlock, OccurenceGroup
from upoutdf.constants import YEARLY_TYPE
class YearlyType(BaseRecurring):
year_day = None
required_attributes = [
'every',
'timezone',
'starting_time',
'lasting_seconds',
'type',
'starting_date'
]
def increment_by(self):
return relativedelta(years=+self.every)
def _snap_datetime(self,datetime,yearday):
if datetime is None:
return None
snapper = self.snapping_class(self.timezone)
return snapper.snap_to_year_day(datetime,yearday)
def _canonicalize_date(self,date):
if not date.tzinfo:
date = date.replace(tzinfo=pytz.utc)
if date.tzinfo != self.timezone:
date = self.timezone.normalize(date.astimezone(self.timezone))
return date
def canonicalize(self):
canonical = "every %s year" % self.every
if self.year_day is not None:
canonical = "%s day %s" % (
canonical,
self.year_day
)
#(starting <datetimestring>) (ending <datetimestring>)
if not self.starting_date_infinite:
starting_date = self._canonicalize_date(self.starting_date)
canonical = "%s starting %s" % (
canonical,
starting_date.strftime("_%m/%d/%Y")
)
if not self.ending_date_infinite:
ending_date = self._canonicalize_date(self.ending_date)
canonical = "%s ending %s" % (
canonical,
ending_date.strftime("_%m/%d/%Y")
)
if self.repeating_count is not None:
canonical = "%s repeating %s times" % (
canonical,
self.repeating_count
)
starting_time = self._canonicalize_date(self.starting_time)
canonical = "%s at %s" % (
canonical,
starting_time.strftime("%-I:%M%p")
)
canonical = "%s lasting %s seconds in %s" % (
canonical,
self.lasting_seconds,
str(self.timezone)
)
return canonical
def occurences(self):
if not self.verify_parsed():
raise RuntimeError("Please call parse before calling occurences")
ending = self.ending_date
repeating_count = self.repeating_count
ending_date_infinite = self.ending_date_infinite
if repeating_count is not None:
ending_date_infinite = False
if ending is not None:
ending = self._set_start_time(ending)
ending = self._strip_microseconds(ending)
occurence_start = self.starting_date
if self.year_day is not None:
try:
occurence_start = self._snap_datetime(self.starting_date,self.year_day)
except ValueError:
#If we had a problem, try the next year
occurence_start = self._snap_datetime(
self.starting_date+relativedelta(years=+1),
self.year_day
)
occurence_start = self._set_start_time(occurence_start)
occurence_start = self._strip_microseconds(occurence_start)
occurence_block = OccurenceBlock(
starting_date=occurence_start,
ending_date=None,
starting_date_infinite=self.starting_date_infinite,
ending_date_infinite=ending_date_infinite,
typeobj=self
)
repeated = 1
occurence_end = None
#While we're before the end date (if we have it)
#And we're before the max repetetions (if we have it)
while ((ending is None or occurence_start <= ending)
and (repeating_count is None or repeated <= repeating_count)):
occurence_end = self._get_end_datetime(occurence_start)
occurence_end = self._strip_microseconds(occurence_end)
occurence_block.add_occurence(occurence_start,occurence_end)
occurence_start = self._increment_occurence(occurence_start)
occurence_start = self._strip_microseconds(occurence_start)
repeated+=1
occurence_block.ending_date = occurence_end
#We always return a OccurenceGroup, even if just 1
return OccurenceGroup(blocks=[occurence_block])
def _parse_type(self,tokens):
if tokens[0] == 'day':
tokens = self._step_tokens(tokens)
try:
self.year_day = int(tokens[0])
except ValueError:
raise ValueError("Invalid year day")
tokens = self._step_tokens(tokens)
self.type = YEARLY_TYPE
return tokens
| <filename>upoutdf/types/recurring/yearly.py<gh_stars>0
# coding: utf-8
import pytz
from dateutil.relativedelta import relativedelta
from .base import BaseRecurring
from upoutdf.occurences import OccurenceBlock, OccurenceGroup
from upoutdf.constants import YEARLY_TYPE
class YearlyType(BaseRecurring):
year_day = None
required_attributes = [
'every',
'timezone',
'starting_time',
'lasting_seconds',
'type',
'starting_date'
]
def increment_by(self):
return relativedelta(years=+self.every)
def _snap_datetime(self,datetime,yearday):
if datetime is None:
return None
snapper = self.snapping_class(self.timezone)
return snapper.snap_to_year_day(datetime,yearday)
def _canonicalize_date(self,date):
if not date.tzinfo:
date = date.replace(tzinfo=pytz.utc)
if date.tzinfo != self.timezone:
date = self.timezone.normalize(date.astimezone(self.timezone))
return date
def canonicalize(self):
canonical = "every %s year" % self.every
if self.year_day is not None:
canonical = "%s day %s" % (
canonical,
self.year_day
)
#(starting <datetimestring>) (ending <datetimestring>)
if not self.starting_date_infinite:
starting_date = self._canonicalize_date(self.starting_date)
canonical = "%s starting %s" % (
canonical,
starting_date.strftime("_%m/%d/%Y")
)
if not self.ending_date_infinite:
ending_date = self._canonicalize_date(self.ending_date)
canonical = "%s ending %s" % (
canonical,
ending_date.strftime("_%m/%d/%Y")
)
if self.repeating_count is not None:
canonical = "%s repeating %s times" % (
canonical,
self.repeating_count
)
starting_time = self._canonicalize_date(self.starting_time)
canonical = "%s at %s" % (
canonical,
starting_time.strftime("%-I:%M%p")
)
canonical = "%s lasting %s seconds in %s" % (
canonical,
self.lasting_seconds,
str(self.timezone)
)
return canonical
def occurences(self):
if not self.verify_parsed():
raise RuntimeError("Please call parse before calling occurences")
ending = self.ending_date
repeating_count = self.repeating_count
ending_date_infinite = self.ending_date_infinite
if repeating_count is not None:
ending_date_infinite = False
if ending is not None:
ending = self._set_start_time(ending)
ending = self._strip_microseconds(ending)
occurence_start = self.starting_date
if self.year_day is not None:
try:
occurence_start = self._snap_datetime(self.starting_date,self.year_day)
except ValueError:
#If we had a problem, try the next year
occurence_start = self._snap_datetime(
self.starting_date+relativedelta(years=+1),
self.year_day
)
occurence_start = self._set_start_time(occurence_start)
occurence_start = self._strip_microseconds(occurence_start)
occurence_block = OccurenceBlock(
starting_date=occurence_start,
ending_date=None,
starting_date_infinite=self.starting_date_infinite,
ending_date_infinite=ending_date_infinite,
typeobj=self
)
repeated = 1
occurence_end = None
#While we're before the end date (if we have it)
#And we're before the max repetetions (if we have it)
while ((ending is None or occurence_start <= ending)
and (repeating_count is None or repeated <= repeating_count)):
occurence_end = self._get_end_datetime(occurence_start)
occurence_end = self._strip_microseconds(occurence_end)
occurence_block.add_occurence(occurence_start,occurence_end)
occurence_start = self._increment_occurence(occurence_start)
occurence_start = self._strip_microseconds(occurence_start)
repeated+=1
occurence_block.ending_date = occurence_end
#We always return a OccurenceGroup, even if just 1
return OccurenceGroup(blocks=[occurence_block])
def _parse_type(self,tokens):
if tokens[0] == 'day':
tokens = self._step_tokens(tokens)
try:
self.year_day = int(tokens[0])
except ValueError:
raise ValueError("Invalid year day")
tokens = self._step_tokens(tokens)
self.type = YEARLY_TYPE
return tokens
| en | 0.800906 | # coding: utf-8 #(starting <datetimestring>) (ending <datetimestring>) #If we had a problem, try the next year #While we're before the end date (if we have it) #And we're before the max repetetions (if we have it) #We always return a OccurenceGroup, even if just 1 | 2.4085 | 2 |
project/urls.py | dbinetti/captable | 18 | 9284 | <reponame>dbinetti/captable
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import TemplateView
urlpatterns = patterns(
'',
url(r'^$', TemplateView.as_view(template_name='home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='about.html'), name='about'),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('apps.captable.urls',)),
)
urlpatterns += staticfiles_urlpatterns()
| from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import TemplateView
urlpatterns = patterns(
'',
url(r'^$', TemplateView.as_view(template_name='home.html'), name='home'),
url(r'^about/$', TemplateView.as_view(template_name='about.html'), name='about'),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('apps.captable.urls',)),
)
urlpatterns += staticfiles_urlpatterns() | none | 1 | 2.02493 | 2 |
|
common/evaluators/bert_emotion_evaluator.py | marjanhs/procon20 | 5 | 9285 | import warnings
import numpy as np
import torch
import torch.nn.functional as F
from sklearn import metrics
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from tqdm import tqdm
from datasets.bert_processors.abstract_processor import convert_examples_to_features_with_emotion, \
convert_examples_to_hierarchical_features
from utils.preprocessing import pad_input_matrix
from utils.tokenization import BertTokenizer
from utils.emotion import Emotion
# Suppress warnings from sklearn.metrics
warnings.filterwarnings('ignore')
class BertEvaluator(object):
def __init__(self, model, processor, args, split='dev'):
self.args = args
self.model = model
self.processor = processor
self.tokenizer = BertTokenizer.from_pretrained(args.model, is_lowercase=args.is_lowercase)
self.emotioner = Emotion(args.nrc_path, args.max_em_len, args.emotion_filters)
if split == 'test':
self.eval_examples = self.processor.get_test_examples(args.data_dir, args.test_name)
elif split == 'dev':
self.eval_examples = self.processor.get_dev_examples(args.data_dir, args.dev_name)
else:
self.eval_examples = self.processor.get_any_examples(args.data_dir, split)
def get_scores(self, silent=False, return_indices=False):
all_indices = []
if self.args.is_hierarchical:
eval_features = convert_examples_to_hierarchical_features(
self.eval_examples, self.args.max_seq_length, self.tokenizer)
else:
eval_features = convert_examples_to_features_with_emotion(
self.eval_examples, self.args.max_seq_length, self.tokenizer, self.emotioner)
unpadded_input_ids = [f.input_ids for f in eval_features]
unpadded_input_mask = [f.input_mask for f in eval_features]
unpadded_segment_ids = [f.segment_ids for f in eval_features]
unpadded_emotion_scores = [f.sentiment_scores for f in eval_features]
if self.args.is_hierarchical:
pad_input_matrix(unpadded_input_ids, self.args.max_doc_length)
pad_input_matrix(unpadded_input_mask, self.args.max_doc_length)
pad_input_matrix(unpadded_segment_ids, self.args.max_doc_length)
padded_input_ids = torch.tensor(unpadded_input_ids, dtype=torch.long)
padded_input_mask = torch.tensor(unpadded_input_mask, dtype=torch.long)
padded_segment_ids = torch.tensor(unpadded_segment_ids, dtype=torch.long)
padded_emotion_ids = torch.tensor(unpadded_emotion_scores, dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(padded_input_ids, padded_input_mask, padded_segment_ids, padded_emotion_ids, label_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.args.batch_size)
self.model.eval()
total_loss = 0
nb_eval_steps, nb_eval_examples = 0, 0
predicted_labels, target_labels = list(), list()
for input_ids, input_mask, segment_ids, emotion_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating", disable=silent):
input_ids = input_ids.to(self.args.device)
input_mask = input_mask.to(self.args.device)
segment_ids = segment_ids.to(self.args.device)
emotion_ids = emotion_ids.to(self.args.device)
label_ids = label_ids.to(self.args.device)
with torch.no_grad():
if return_indices:
outs = self.model(input_ids, segment_ids, input_mask, emotion_ids=emotion_ids, return_indices=return_indices)
else:
outs = self.model(input_ids, segment_ids, input_mask, emotion_ids=emotion_ids)
if isinstance(outs, tuple):
outs, _ = outs
if return_indices:
logits, indices = outs
all_indices.extend(indices.cpu().detach().numpy())
else:
logits = outs
if self.args.is_multilabel:
predicted_labels.extend(F.sigmoid(logits).round().long().cpu().detach().numpy())
target_labels.extend(label_ids.cpu().detach().numpy())
loss = F.binary_cross_entropy_with_logits(logits, label_ids.float(), size_average=False)
average, average_mac = 'micro', 'macro'
else:
predicted_labels.extend(torch.argmax(logits, dim=1).cpu().detach().numpy())
target_labels.extend(torch.argmax(label_ids, dim=1).cpu().detach().numpy())
loss = F.cross_entropy(logits, torch.argmax(label_ids, dim=1))
average, average_mac = 'binary', 'binary'
if self.args.n_gpu > 1:
loss = loss.mean()
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
total_loss += loss.item()
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
predicted_labels, target_labels = np.array(predicted_labels), np.array(target_labels)
accuracy = metrics.accuracy_score(target_labels, predicted_labels)
precision = metrics.precision_score(target_labels, predicted_labels, average=average)
recall = metrics.recall_score(target_labels, predicted_labels, average=average)
avg_loss = total_loss / nb_eval_steps
hamming_loss = metrics.hamming_loss(target_labels, predicted_labels)
jaccard_score = metrics.jaccard_score(target_labels, predicted_labels, average=average)
f1_micro = metrics.f1_score(target_labels, predicted_labels, average=average)
f1_macro = metrics.f1_score(target_labels, predicted_labels, average=average_mac)
if return_indices:
return [accuracy, precision, recall, f1_micro, avg_loss, f1_macro, hamming_loss, jaccard_score, predicted_labels, target_labels, all_indices],\
['accuracy', 'precision', 'recall', 'f1_micro', 'avg_loss', 'f1_macro', 'hamming_loss', 'jaccard', 'predicted_labels', 'target_labels', 'all_indices']
else:
return [accuracy, precision, recall, f1_micro, avg_loss, f1_macro, hamming_loss, jaccard_score, predicted_labels, target_labels],\
['accuracy', 'precision', 'recall', 'f1_micro', 'avg_loss', 'f1_macro', 'hamming_loss', 'jaccard', 'predicted_labels', 'target_labels']
def get_bert_layers(self, silent=False, last_bert_layers=-1):
if self.args.is_hierarchical:
eval_features = convert_examples_to_hierarchical_features(
self.eval_examples, self.args.max_seq_length, self.tokenizer)
else:
eval_features = convert_examples_to_features_with_emotion(
self.eval_examples, self.args.max_seq_length, self.tokenizer, self.emotioner)
unpadded_input_ids = [f.input_ids for f in eval_features]
unpadded_input_mask = [f.input_mask for f in eval_features]
unpadded_segment_ids = [f.segment_ids for f in eval_features]
unpadded_emotion_ids = [f.emotioniment_scores for f in eval_features]
if self.args.is_hierarchical:
pad_input_matrix(unpadded_input_ids, self.args.max_doc_length)
pad_input_matrix(unpadded_input_mask, self.args.max_doc_length)
pad_input_matrix(unpadded_segment_ids, self.args.max_doc_length)
padded_input_ids = torch.tensor(unpadded_input_ids, dtype=torch.long)
padded_input_mask = torch.tensor(unpadded_input_mask, dtype=torch.long)
padded_segment_ids = torch.tensor(unpadded_segment_ids, dtype=torch.long)
padded_emotion_ids = torch.tensor(unpadded_emotion_ids, dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(padded_input_ids, padded_input_mask, padded_segment_ids, padded_emotion_ids, label_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.args.batch_size)
self.model.eval()
bert_layers_l, label_ids_l = [], []
for input_ids, input_mask, segment_ids, emotion_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating", disable=silent):
input_ids = input_ids.to(self.args.device)
input_mask = input_mask.to(self.args.device)
segment_ids = segment_ids.to(self.args.device)
emotion_ids = emotion_ids.to(self.args.device)
label_ids = label_ids.to(self.args.device)
with torch.no_grad():
bert_layers = self.model.get_bert_embedding(input_ids, segment_ids, input_mask, emotion_ids=emotion_ids, last_bert_layers=last_bert_layers)
label_ids = torch.argmax(label_ids, dim=1).cpu().detach().numpy()
bert_layers_l.extend(bert_layers)
label_ids_l.extend(label_ids)
bert_layers_l = torch.stack(bert_layers_l, dim=0)
return bert_layers_l, label_ids_l
| import warnings
import numpy as np
import torch
import torch.nn.functional as F
from sklearn import metrics
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from tqdm import tqdm
from datasets.bert_processors.abstract_processor import convert_examples_to_features_with_emotion, \
convert_examples_to_hierarchical_features
from utils.preprocessing import pad_input_matrix
from utils.tokenization import BertTokenizer
from utils.emotion import Emotion
# Suppress warnings from sklearn.metrics
warnings.filterwarnings('ignore')
class BertEvaluator(object):
def __init__(self, model, processor, args, split='dev'):
self.args = args
self.model = model
self.processor = processor
self.tokenizer = BertTokenizer.from_pretrained(args.model, is_lowercase=args.is_lowercase)
self.emotioner = Emotion(args.nrc_path, args.max_em_len, args.emotion_filters)
if split == 'test':
self.eval_examples = self.processor.get_test_examples(args.data_dir, args.test_name)
elif split == 'dev':
self.eval_examples = self.processor.get_dev_examples(args.data_dir, args.dev_name)
else:
self.eval_examples = self.processor.get_any_examples(args.data_dir, split)
def get_scores(self, silent=False, return_indices=False):
all_indices = []
if self.args.is_hierarchical:
eval_features = convert_examples_to_hierarchical_features(
self.eval_examples, self.args.max_seq_length, self.tokenizer)
else:
eval_features = convert_examples_to_features_with_emotion(
self.eval_examples, self.args.max_seq_length, self.tokenizer, self.emotioner)
unpadded_input_ids = [f.input_ids for f in eval_features]
unpadded_input_mask = [f.input_mask for f in eval_features]
unpadded_segment_ids = [f.segment_ids for f in eval_features]
unpadded_emotion_scores = [f.sentiment_scores for f in eval_features]
if self.args.is_hierarchical:
pad_input_matrix(unpadded_input_ids, self.args.max_doc_length)
pad_input_matrix(unpadded_input_mask, self.args.max_doc_length)
pad_input_matrix(unpadded_segment_ids, self.args.max_doc_length)
padded_input_ids = torch.tensor(unpadded_input_ids, dtype=torch.long)
padded_input_mask = torch.tensor(unpadded_input_mask, dtype=torch.long)
padded_segment_ids = torch.tensor(unpadded_segment_ids, dtype=torch.long)
padded_emotion_ids = torch.tensor(unpadded_emotion_scores, dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(padded_input_ids, padded_input_mask, padded_segment_ids, padded_emotion_ids, label_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.args.batch_size)
self.model.eval()
total_loss = 0
nb_eval_steps, nb_eval_examples = 0, 0
predicted_labels, target_labels = list(), list()
for input_ids, input_mask, segment_ids, emotion_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating", disable=silent):
input_ids = input_ids.to(self.args.device)
input_mask = input_mask.to(self.args.device)
segment_ids = segment_ids.to(self.args.device)
emotion_ids = emotion_ids.to(self.args.device)
label_ids = label_ids.to(self.args.device)
with torch.no_grad():
if return_indices:
outs = self.model(input_ids, segment_ids, input_mask, emotion_ids=emotion_ids, return_indices=return_indices)
else:
outs = self.model(input_ids, segment_ids, input_mask, emotion_ids=emotion_ids)
if isinstance(outs, tuple):
outs, _ = outs
if return_indices:
logits, indices = outs
all_indices.extend(indices.cpu().detach().numpy())
else:
logits = outs
if self.args.is_multilabel:
predicted_labels.extend(F.sigmoid(logits).round().long().cpu().detach().numpy())
target_labels.extend(label_ids.cpu().detach().numpy())
loss = F.binary_cross_entropy_with_logits(logits, label_ids.float(), size_average=False)
average, average_mac = 'micro', 'macro'
else:
predicted_labels.extend(torch.argmax(logits, dim=1).cpu().detach().numpy())
target_labels.extend(torch.argmax(label_ids, dim=1).cpu().detach().numpy())
loss = F.cross_entropy(logits, torch.argmax(label_ids, dim=1))
average, average_mac = 'binary', 'binary'
if self.args.n_gpu > 1:
loss = loss.mean()
if self.args.gradient_accumulation_steps > 1:
loss = loss / self.args.gradient_accumulation_steps
total_loss += loss.item()
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
predicted_labels, target_labels = np.array(predicted_labels), np.array(target_labels)
accuracy = metrics.accuracy_score(target_labels, predicted_labels)
precision = metrics.precision_score(target_labels, predicted_labels, average=average)
recall = metrics.recall_score(target_labels, predicted_labels, average=average)
avg_loss = total_loss / nb_eval_steps
hamming_loss = metrics.hamming_loss(target_labels, predicted_labels)
jaccard_score = metrics.jaccard_score(target_labels, predicted_labels, average=average)
f1_micro = metrics.f1_score(target_labels, predicted_labels, average=average)
f1_macro = metrics.f1_score(target_labels, predicted_labels, average=average_mac)
if return_indices:
return [accuracy, precision, recall, f1_micro, avg_loss, f1_macro, hamming_loss, jaccard_score, predicted_labels, target_labels, all_indices],\
['accuracy', 'precision', 'recall', 'f1_micro', 'avg_loss', 'f1_macro', 'hamming_loss', 'jaccard', 'predicted_labels', 'target_labels', 'all_indices']
else:
return [accuracy, precision, recall, f1_micro, avg_loss, f1_macro, hamming_loss, jaccard_score, predicted_labels, target_labels],\
['accuracy', 'precision', 'recall', 'f1_micro', 'avg_loss', 'f1_macro', 'hamming_loss', 'jaccard', 'predicted_labels', 'target_labels']
def get_bert_layers(self, silent=False, last_bert_layers=-1):
if self.args.is_hierarchical:
eval_features = convert_examples_to_hierarchical_features(
self.eval_examples, self.args.max_seq_length, self.tokenizer)
else:
eval_features = convert_examples_to_features_with_emotion(
self.eval_examples, self.args.max_seq_length, self.tokenizer, self.emotioner)
unpadded_input_ids = [f.input_ids for f in eval_features]
unpadded_input_mask = [f.input_mask for f in eval_features]
unpadded_segment_ids = [f.segment_ids for f in eval_features]
unpadded_emotion_ids = [f.emotioniment_scores for f in eval_features]
if self.args.is_hierarchical:
pad_input_matrix(unpadded_input_ids, self.args.max_doc_length)
pad_input_matrix(unpadded_input_mask, self.args.max_doc_length)
pad_input_matrix(unpadded_segment_ids, self.args.max_doc_length)
padded_input_ids = torch.tensor(unpadded_input_ids, dtype=torch.long)
padded_input_mask = torch.tensor(unpadded_input_mask, dtype=torch.long)
padded_segment_ids = torch.tensor(unpadded_segment_ids, dtype=torch.long)
padded_emotion_ids = torch.tensor(unpadded_emotion_ids, dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(padded_input_ids, padded_input_mask, padded_segment_ids, padded_emotion_ids, label_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=self.args.batch_size)
self.model.eval()
bert_layers_l, label_ids_l = [], []
for input_ids, input_mask, segment_ids, emotion_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating", disable=silent):
input_ids = input_ids.to(self.args.device)
input_mask = input_mask.to(self.args.device)
segment_ids = segment_ids.to(self.args.device)
emotion_ids = emotion_ids.to(self.args.device)
label_ids = label_ids.to(self.args.device)
with torch.no_grad():
bert_layers = self.model.get_bert_embedding(input_ids, segment_ids, input_mask, emotion_ids=emotion_ids, last_bert_layers=last_bert_layers)
label_ids = torch.argmax(label_ids, dim=1).cpu().detach().numpy()
bert_layers_l.extend(bert_layers)
label_ids_l.extend(label_ids)
bert_layers_l = torch.stack(bert_layers_l, dim=0)
return bert_layers_l, label_ids_l
| en | 0.580672 | # Suppress warnings from sklearn.metrics | 2.21914 | 2 |
model/mlp1.py | andrearosasco/DistilledReplay | 7 | 9286 | import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, config):
super(Model, self).__init__()
self.drop = nn.Dropout(config['dropout'])
self.fc1 = nn.Linear(784, 2000)
self.fc2 = nn.Linear(2000, 2000)
self.fc3 = nn.Linear(2000, 2000)
self.fc4 = nn.Linear(2000, 2000)
self.fc5 = nn.Linear(2000, 10)
def forward(self, x):
# 784 -> 2000
x = F.relu(self.drop(self.fc1(x)))
# 2000 -> 2000
x = F.relu(self.drop(self.fc2(x)))
# 2000 -> 2000
x = F.relu(self.drop(self.fc3(x)))
# 2000 -> 2000
x = F.relu(self.drop(self.fc4(x)))
# 2000 -> 100
x = self.fc5(x)
return x | import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self, config):
super(Model, self).__init__()
self.drop = nn.Dropout(config['dropout'])
self.fc1 = nn.Linear(784, 2000)
self.fc2 = nn.Linear(2000, 2000)
self.fc3 = nn.Linear(2000, 2000)
self.fc4 = nn.Linear(2000, 2000)
self.fc5 = nn.Linear(2000, 10)
def forward(self, x):
# 784 -> 2000
x = F.relu(self.drop(self.fc1(x)))
# 2000 -> 2000
x = F.relu(self.drop(self.fc2(x)))
# 2000 -> 2000
x = F.relu(self.drop(self.fc3(x)))
# 2000 -> 2000
x = F.relu(self.drop(self.fc4(x)))
# 2000 -> 100
x = self.fc5(x)
return x | en | 0.232787 | # 784 -> 2000 # 2000 -> 2000 # 2000 -> 2000 # 2000 -> 2000 # 2000 -> 100 | 2.857979 | 3 |
netbox/ipam/managers.py | aslafy-z/netbox | 1 | 9287 | <filename>netbox/ipam/managers.py
from django.db import models
from ipam.lookups import Host, Inet
class IPAddressManager(models.Manager):
def get_queryset(self):
"""
By default, PostgreSQL will order INETs with shorter (larger) prefix lengths ahead of those with longer
(smaller) masks. This makes no sense when ordering IPs, which should be ordered solely by family and host
address. We can use HOST() to extract just the host portion of the address (ignoring its mask), but we must
then re-cast this value to INET() so that records will be ordered properly. We are essentially re-casting each
IP address as a /32 or /128.
"""
qs = super().get_queryset()
return qs.order_by(Inet(Host('address')))
| <filename>netbox/ipam/managers.py
from django.db import models
from ipam.lookups import Host, Inet
class IPAddressManager(models.Manager):
def get_queryset(self):
"""
By default, PostgreSQL will order INETs with shorter (larger) prefix lengths ahead of those with longer
(smaller) masks. This makes no sense when ordering IPs, which should be ordered solely by family and host
address. We can use HOST() to extract just the host portion of the address (ignoring its mask), but we must
then re-cast this value to INET() so that records will be ordered properly. We are essentially re-casting each
IP address as a /32 or /128.
"""
qs = super().get_queryset()
return qs.order_by(Inet(Host('address')))
| en | 0.942583 | By default, PostgreSQL will order INETs with shorter (larger) prefix lengths ahead of those with longer (smaller) masks. This makes no sense when ordering IPs, which should be ordered solely by family and host address. We can use HOST() to extract just the host portion of the address (ignoring its mask), but we must then re-cast this value to INET() so that records will be ordered properly. We are essentially re-casting each IP address as a /32 or /128. | 2.47511 | 2 |
train.py | VArdulov/learning-kis | 0 | 9288 | #!/usr/bin/env python
# coding: utf-8
""" Learning Koopman Invariant Subspace
(c) <NAME>, 2017.
<EMAIL>
"""
import numpy as np
np.random.seed(1234567890)
from argparse import ArgumentParser
from os import path
import time
from lkis import TimeSeriesBatchMaker, KoopmanInvariantSubspaceLearner
from losses import combined_loss
from torch import device, save, manual_seed
from torch.optim import SGD
import matplotlib.pyplot as plt
import seaborn as sns
# -- Parse arguments
t = time.time()
parser = ArgumentParser(description='Learning Koopman Invariant Subspace (Now with PyTorch!)')
parser.add_argument("--name", "-n", type=str, default=f"lkis-{int(time.time())}", help="name of experiment")
parser.add_argument("--data-path", type=str, default="./train.npy", help="time-series data to model")
parser.add_argument("--epochs", "-e", type=int, default=1000, help="number of epochs to train for")
parser.add_argument("--num-batches", "-b", type=int, default=1, help="how many batchs for break the data up into")
parser.add_argument("--gpu", action="store_true", default=False, help="use a GPU or no")
parser.add_argument("--intermediate-observable", "-i", type=int, default=-1, help="intermediate dimensional observation space")
parser.add_argument("--save-model", "-m", action="store_true", default=False, help="whether or not you want the model saved to $name$.torch.mdl")
parser.add_argument("--save-training-plot", "-p", action="store_true", default=False, help="where to save plotting")
parser.add_argument("--max-lag", "-l", type=int, default=-1, help="maximum_lag")
parser.add_argument("--state-space", "-s", type=int, default=1, help="dimensionality of the underlying state space")
parser.add_argument("--alpha", "-a", type=float, default=1.0, help="value to score the reconstruction loss by")
parser.add_argument("--learning-rate", "-r", type=float, default=0.001, help="Optimizer learning rate")
parser.add_argument("--validation-data-path", "-v", type=str, default="")
#ToDo: Implement
parser.add_argument("--dmd", action="store_true", default=False, help="Execute and save the DMD on the training set")
if __name__ == "__main__":
# grab the command line arguments
cli_args = parser.parse_args()
manual_seed(216)
# find and load the training data
data_path = cli_args.data_path
print(f"Loading training data from {data_path}")
data_train = np.load(data_path)
if len(data_train.shape) == 1:
data_train = data_train.reshape(-1, 1)
print(f"Loaded a dataset with dimension: {data_train.shape}")
validate = cli_args.validation_data_path != ""
data_val = None
if validate:
data_path = cli_args.validation_data_path
print(f"Loading validation data from {data_path}")
data_val = np.load(data_path)
# process the delay either set by the user or is set to one 10th of the data
delay = cli_args.max_lag if cli_args.max_lag > 0 else (data_train.shape[0] // 10)
# based on the number of batches, delay, and size of the data compute the samples per batch
samples_per_batch = (data_train.shape[0] - delay) // cli_args.num_batches
# construct the data preparer
batch_iterator = TimeSeriesBatchMaker(
y=data_train,
batch_size=samples_per_batch,
max_lag=delay
)
if validate:
val_batch_iterator = TimeSeriesBatchMaker(
y=data_val,
max_lag=delay
)
# construct the end-to-end model
lkis = KoopmanInvariantSubspaceLearner(
observable_dim=data_train.shape[1],
latent_dim=cli_args.state_space,
intermediate_observable=cli_args.intermediate_observable,
delay=delay
)
if cli_args.gpu:
device = device("cuda")
# initialize the optimizer
optimizer = SGD(lkis.parameters(), lr=cli_args.learning_rate)
losses = []
val_losses = []
for epoch in range(cli_args.epochs):
loss = 0
for b in range(cli_args.num_batches):
optimizer.zero_grad()
time_delayed_ys, y_true = next(batch_iterator)
if cli_args.gpu:
time_delayed_ys.to(device)
y_true.to(device)
g_pred, y_pred = lkis(time_delayed_ys)
g_0 = g_pred[:-1]
g_1 = g_pred[1:]
batch_loss = combined_loss(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1)
batch_loss.backward()
optimizer.step()
loss += batch_loss.item()
# display the epoch training loss
print(f"epoch : {epoch + 1}/{cli_args.epochs}, loss = {loss:.6f}")
losses.append(loss)
if validate:
y_time_delayed_val, y_true = next(val_batch_iterator)
if cli_args.gpu:
y_time_delayed_val.to(device)
y_true.to(device)
g_pred, y_pred = lkis(y_time_delayed_val)
g_0 = g_pred[:-1]
g_1 = g_pred[1:]
batch_loss = combined_loss(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1)
val_loss = batch_loss.item()
print(f"\tval-loss = {val_loss:.6f}")
val_losses.append(val_loss)
if cli_args.save_model:
save(lkis, f"{cli_args.name}.torch.mdl")
if cli_args.save_training_plot:
sns.lineplot(x=list(range(cli_args.epochs)), y=losses, label="training loss")
if validate:
sns.lineplot(x=list(range(cli_args.epochs)), y=val_losses, label="validation loss")
plt.xlabel("Epochs")
plt.ylabel("Combined Reconstruction and DMD Loss")
plt.title(f"Training Loss for {cli_args.name}")
plt.savefig(f"{cli_args.name}-training-loss.png")
| #!/usr/bin/env python
# coding: utf-8
""" Learning Koopman Invariant Subspace
(c) <NAME>, 2017.
<EMAIL>
"""
import numpy as np
np.random.seed(1234567890)
from argparse import ArgumentParser
from os import path
import time
from lkis import TimeSeriesBatchMaker, KoopmanInvariantSubspaceLearner
from losses import combined_loss
from torch import device, save, manual_seed
from torch.optim import SGD
import matplotlib.pyplot as plt
import seaborn as sns
# -- Parse arguments
t = time.time()
parser = ArgumentParser(description='Learning Koopman Invariant Subspace (Now with PyTorch!)')
parser.add_argument("--name", "-n", type=str, default=f"lkis-{int(time.time())}", help="name of experiment")
parser.add_argument("--data-path", type=str, default="./train.npy", help="time-series data to model")
parser.add_argument("--epochs", "-e", type=int, default=1000, help="number of epochs to train for")
parser.add_argument("--num-batches", "-b", type=int, default=1, help="how many batchs for break the data up into")
parser.add_argument("--gpu", action="store_true", default=False, help="use a GPU or no")
parser.add_argument("--intermediate-observable", "-i", type=int, default=-1, help="intermediate dimensional observation space")
parser.add_argument("--save-model", "-m", action="store_true", default=False, help="whether or not you want the model saved to $name$.torch.mdl")
parser.add_argument("--save-training-plot", "-p", action="store_true", default=False, help="where to save plotting")
parser.add_argument("--max-lag", "-l", type=int, default=-1, help="maximum_lag")
parser.add_argument("--state-space", "-s", type=int, default=1, help="dimensionality of the underlying state space")
parser.add_argument("--alpha", "-a", type=float, default=1.0, help="value to score the reconstruction loss by")
parser.add_argument("--learning-rate", "-r", type=float, default=0.001, help="Optimizer learning rate")
parser.add_argument("--validation-data-path", "-v", type=str, default="")
#ToDo: Implement
parser.add_argument("--dmd", action="store_true", default=False, help="Execute and save the DMD on the training set")
if __name__ == "__main__":
# grab the command line arguments
cli_args = parser.parse_args()
manual_seed(216)
# find and load the training data
data_path = cli_args.data_path
print(f"Loading training data from {data_path}")
data_train = np.load(data_path)
if len(data_train.shape) == 1:
data_train = data_train.reshape(-1, 1)
print(f"Loaded a dataset with dimension: {data_train.shape}")
validate = cli_args.validation_data_path != ""
data_val = None
if validate:
data_path = cli_args.validation_data_path
print(f"Loading validation data from {data_path}")
data_val = np.load(data_path)
# process the delay either set by the user or is set to one 10th of the data
delay = cli_args.max_lag if cli_args.max_lag > 0 else (data_train.shape[0] // 10)
# based on the number of batches, delay, and size of the data compute the samples per batch
samples_per_batch = (data_train.shape[0] - delay) // cli_args.num_batches
# construct the data preparer
batch_iterator = TimeSeriesBatchMaker(
y=data_train,
batch_size=samples_per_batch,
max_lag=delay
)
if validate:
val_batch_iterator = TimeSeriesBatchMaker(
y=data_val,
max_lag=delay
)
# construct the end-to-end model
lkis = KoopmanInvariantSubspaceLearner(
observable_dim=data_train.shape[1],
latent_dim=cli_args.state_space,
intermediate_observable=cli_args.intermediate_observable,
delay=delay
)
if cli_args.gpu:
device = device("cuda")
# initialize the optimizer
optimizer = SGD(lkis.parameters(), lr=cli_args.learning_rate)
losses = []
val_losses = []
for epoch in range(cli_args.epochs):
loss = 0
for b in range(cli_args.num_batches):
optimizer.zero_grad()
time_delayed_ys, y_true = next(batch_iterator)
if cli_args.gpu:
time_delayed_ys.to(device)
y_true.to(device)
g_pred, y_pred = lkis(time_delayed_ys)
g_0 = g_pred[:-1]
g_1 = g_pred[1:]
batch_loss = combined_loss(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1)
batch_loss.backward()
optimizer.step()
loss += batch_loss.item()
# display the epoch training loss
print(f"epoch : {epoch + 1}/{cli_args.epochs}, loss = {loss:.6f}")
losses.append(loss)
if validate:
y_time_delayed_val, y_true = next(val_batch_iterator)
if cli_args.gpu:
y_time_delayed_val.to(device)
y_true.to(device)
g_pred, y_pred = lkis(y_time_delayed_val)
g_0 = g_pred[:-1]
g_1 = g_pred[1:]
batch_loss = combined_loss(y_pred=y_pred, y_true=y_true, g_0=g_0, g_1=g_1)
val_loss = batch_loss.item()
print(f"\tval-loss = {val_loss:.6f}")
val_losses.append(val_loss)
if cli_args.save_model:
save(lkis, f"{cli_args.name}.torch.mdl")
if cli_args.save_training_plot:
sns.lineplot(x=list(range(cli_args.epochs)), y=losses, label="training loss")
if validate:
sns.lineplot(x=list(range(cli_args.epochs)), y=val_losses, label="validation loss")
plt.xlabel("Epochs")
plt.ylabel("Combined Reconstruction and DMD Loss")
plt.title(f"Training Loss for {cli_args.name}")
plt.savefig(f"{cli_args.name}-training-loss.png")
| en | 0.745454 | #!/usr/bin/env python # coding: utf-8 Learning Koopman Invariant Subspace (c) <NAME>, 2017. <EMAIL> # -- Parse arguments #ToDo: Implement # grab the command line arguments # find and load the training data # process the delay either set by the user or is set to one 10th of the data # based on the number of batches, delay, and size of the data compute the samples per batch # construct the data preparer # construct the end-to-end model # initialize the optimizer # display the epoch training loss | 2.494588 | 2 |
Algorithms/Easy/1200. Minimum Absolute Difference/answer.py | KenWoo/Algorithm | 0 | 9289 | <reponame>KenWoo/Algorithm<gh_stars>0
from typing import List
class Solution:
def minimumAbsDifference(self, arr: List[int]) -> List[List[int]]:
arr.sort()
res = []
min_diff = arr[1] - arr[0]
res.append([arr[0], arr[1]])
for i in range(1, len(arr)-1):
diff = arr[i+1]-arr[i]
if diff < min_diff:
min_diff = diff
res.clear()
res.append([arr[i], arr[i+1]])
elif diff == min_diff:
res.append([arr[i], arr[i+1]])
return res
if __name__ == "__main__":
s = Solution()
result = s.minimumAbsDifference([3, 8, -10, 23, 19, -4, -14, 27])
print(result)
| from typing import List
class Solution:
def minimumAbsDifference(self, arr: List[int]) -> List[List[int]]:
arr.sort()
res = []
min_diff = arr[1] - arr[0]
res.append([arr[0], arr[1]])
for i in range(1, len(arr)-1):
diff = arr[i+1]-arr[i]
if diff < min_diff:
min_diff = diff
res.clear()
res.append([arr[i], arr[i+1]])
elif diff == min_diff:
res.append([arr[i], arr[i+1]])
return res
if __name__ == "__main__":
s = Solution()
result = s.minimumAbsDifference([3, 8, -10, 23, 19, -4, -14, 27])
print(result) | none | 1 | 3.609747 | 4 |
|
resources/physequations.py | VijayStroup/Physics_Problem_Solver_Basic | 0 | 9290 | <filename>resources/physequations.py
import math
def close(expected, actual, maxerror):
'''checks to see if the actual number is within expected +- maxerror.'''
low = expected - maxerror
high = expected + maxerror
if actual >= low and actual <= high:
return True
else:
return False
def grav_potential_energy(mass, height, gravity=9.81):
'''calculate potential energy given mass and height. Mass in
kilograms and height in meters.'''
gp_energy = mass * height * gravity
return gp_energy
def kin_energy(mass, velocity):
'''calculate kinetic energy given mass and velocity. Mass in
kilograms and velocity in meters per second.'''
k_energy = .5 * mass * velocity ** 2
return k_energy
def work_energy(force, displacement, angle):
'''calculate work energy given force, displancement,
and angle. Force in newtons, displacement in meters, angle in degrees.'''
anglerad = math.radians(angle)
cos = math.cos(anglerad)
w_energy = force * displacement * cos
return w_energy
'''=============================================================================
Tests
============================================================================='''
if __name__ == '__main__':
def check(funcname, args, expected, ans, maxerror):
if not close(expected, ans, maxerror):
print(f'{funcname}({args}) = {ans} should = {expected}')
print(close(10, 11.1, 1))
print(close(100, 100.001, .01))
print(close(-10, -11.01, 1))
print(close(84756, 84300.2, 500.5))
#gravitional potential energy tests
ans = grav_potential_energy(3.00, 7.00)
check('grav_potential_energy', '3.00, 7.00', 206.01, ans, 0.00000000000000000000000001)
ans = grav_potential_energy(2.00, 5.00)
check('grav_potential_energy', '2.00, 5.00', 98.1, ans, 0.01)
#kinetic energy tests
ans = kin_energy(2, 6.55)
check('kin_energy', '2, 6.55', 42.90, ans, 0.01)
ans = kin_energy(5.65, 10)
check('kin_energy', '5.65, 10', 282.5, ans, 0.1)
#work energy tests
ans = work_energy(500, 10, 0)
check('work_energy', '500, 10, 0', 5000.0, ans, 0.1)
ans = work_energy(150, 50, 45)
check('work_energy', '150, 50, 45', 5303.30, ans, 0.01)
| <filename>resources/physequations.py
import math
def close(expected, actual, maxerror):
'''checks to see if the actual number is within expected +- maxerror.'''
low = expected - maxerror
high = expected + maxerror
if actual >= low and actual <= high:
return True
else:
return False
def grav_potential_energy(mass, height, gravity=9.81):
'''calculate potential energy given mass and height. Mass in
kilograms and height in meters.'''
gp_energy = mass * height * gravity
return gp_energy
def kin_energy(mass, velocity):
'''calculate kinetic energy given mass and velocity. Mass in
kilograms and velocity in meters per second.'''
k_energy = .5 * mass * velocity ** 2
return k_energy
def work_energy(force, displacement, angle):
'''calculate work energy given force, displancement,
and angle. Force in newtons, displacement in meters, angle in degrees.'''
anglerad = math.radians(angle)
cos = math.cos(anglerad)
w_energy = force * displacement * cos
return w_energy
'''=============================================================================
Tests
============================================================================='''
if __name__ == '__main__':
def check(funcname, args, expected, ans, maxerror):
if not close(expected, ans, maxerror):
print(f'{funcname}({args}) = {ans} should = {expected}')
print(close(10, 11.1, 1))
print(close(100, 100.001, .01))
print(close(-10, -11.01, 1))
print(close(84756, 84300.2, 500.5))
#gravitional potential energy tests
ans = grav_potential_energy(3.00, 7.00)
check('grav_potential_energy', '3.00, 7.00', 206.01, ans, 0.00000000000000000000000001)
ans = grav_potential_energy(2.00, 5.00)
check('grav_potential_energy', '2.00, 5.00', 98.1, ans, 0.01)
#kinetic energy tests
ans = kin_energy(2, 6.55)
check('kin_energy', '2, 6.55', 42.90, ans, 0.01)
ans = kin_energy(5.65, 10)
check('kin_energy', '5.65, 10', 282.5, ans, 0.1)
#work energy tests
ans = work_energy(500, 10, 0)
check('work_energy', '500, 10, 0', 5000.0, ans, 0.1)
ans = work_energy(150, 50, 45)
check('work_energy', '150, 50, 45', 5303.30, ans, 0.01)
| en | 0.696741 | checks to see if the actual number is within expected +- maxerror. calculate potential energy given mass and height. Mass in kilograms and height in meters. calculate kinetic energy given mass and velocity. Mass in kilograms and velocity in meters per second. calculate work energy given force, displancement, and angle. Force in newtons, displacement in meters, angle in degrees. ============================================================================= Tests ============================================================================= #gravitional potential energy tests #kinetic energy tests #work energy tests | 3.710374 | 4 |
mvpa2/tests/test_erdataset.py | andycon/PyMVPA | 0 | 9291 | <reponame>andycon/PyMVPA
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
'''Tests for the event-related dataset'''
from mvpa2.testing import *
from mvpa2.datasets import dataset_wizard
from mvpa2.mappers.flatten import FlattenMapper
from mvpa2.mappers.boxcar import BoxcarMapper
from mvpa2.mappers.fx import FxMapper
from mvpa2.datasets.eventrelated import find_events, eventrelated_dataset, \
extract_boxcar_event_samples
from mvpa2.datasets.sources import load_example_fmri_dataset
from mvpa2.mappers.zscore import zscore
def test_erdataset():
# 3 chunks, 5 targets, blocks of 5 samples each
nchunks = 3
ntargets = 5
blocklength = 5
nfeatures = 10
targets = np.tile(np.repeat(range(ntargets), blocklength), nchunks)
chunks = np.repeat(np.arange(nchunks), ntargets * blocklength)
samples = np.repeat(
np.arange(nchunks * ntargets * blocklength),
nfeatures).reshape(-1, nfeatures)
ds = dataset_wizard(samples, targets=targets, chunks=chunks)
# check if events are determined properly
evs = find_events(targets=ds.sa.targets, chunks=ds.sa.chunks)
for ev in evs:
assert_equal(ev['duration'], blocklength)
assert_equal(ntargets * nchunks, len(evs))
for t in range(ntargets):
assert_equal(len([ev for ev in evs if ev['targets'] == t]),
nchunks)
# now turn `ds` into an eventreleated dataset
erds = eventrelated_dataset(ds, evs)
# the only unprefixed sample attributes are
assert_equal(sorted([a for a in ds.sa if not a.startswith('event')]),
['chunks', 'targets'])
# samples as expected?
assert_array_equal(erds.samples[0],
np.repeat(np.arange(blocklength), nfeatures))
# that should also be the temporal feature offset
assert_array_equal(erds.samples[0], erds.fa.event_offsetidx)
assert_array_equal(erds.sa.event_onsetidx, np.arange(0,71,5))
# finally we should see two mappers
assert_equal(len(erds.a.mapper), 2)
assert_true(isinstance(erds.a.mapper[0], BoxcarMapper))
assert_true(isinstance(erds.a.mapper[1], FlattenMapper))
# check alternative event mapper
# this one does temporal compression by averaging
erds_compress = eventrelated_dataset(
ds, evs, event_mapper=FxMapper('features', np.mean))
assert_equal(len(erds), len(erds_compress))
assert_array_equal(erds_compress.samples[:,0], np.arange(2,73,5))
#
# now check the same dataset with event descretization
tr = 2.5
ds.sa['time'] = np.arange(nchunks * ntargets * blocklength) * tr
evs = [{'onset': 4.9, 'duration': 6.2}]
# doesn't work without conversion
assert_raises(ValueError, eventrelated_dataset, ds, evs)
erds = eventrelated_dataset(ds, evs, time_attr='time')
assert_equal(len(erds), 1)
assert_array_equal(erds.samples[0], np.repeat(np.arange(1,5), nfeatures))
assert_array_equal(erds.sa.orig_onset, [evs[0]['onset']])
assert_array_equal(erds.sa.orig_duration, [evs[0]['duration']])
assert_array_almost_equal(erds.sa.orig_offset, [2.4])
assert_array_equal(erds.sa.time, [np.arange(2.5, 11, 2.5)])
# now with closest match
erds = eventrelated_dataset(ds, evs, time_attr='time', match='closest')
expected_nsamples = 3
assert_equal(len(erds), 1)
assert_array_equal(erds.samples[0],
np.repeat(np.arange(2,2+expected_nsamples),
nfeatures))
assert_array_equal(erds.sa.orig_onset, [evs[0]['onset']])
assert_array_equal(erds.sa.orig_duration, [evs[0]['duration']])
assert_array_almost_equal(erds.sa.orig_offset, [-0.1])
assert_array_equal(erds.sa.time, [np.arange(5.0, 11, 2.5)])
# now test the way back
results = np.arange(erds.nfeatures)
assert_array_equal(erds.a.mapper.reverse1(results),
results.reshape(expected_nsamples, nfeatures))
# what about multiple results?
nresults = 5
results = dataset_wizard([results] * nresults)
# and let's have an attribute to make it more difficult
results.sa['myattr'] = np.arange(5)
rds = erds.a.mapper.reverse(results)
assert_array_equal(rds,
results.samples.reshape(nresults * expected_nsamples,
nfeatures))
assert_array_equal(rds.sa.myattr, np.repeat(results.sa.myattr,
expected_nsamples))
evs = [dict(onset=12, duration=2), dict(onset=70, duration=3)]
evds = extract_boxcar_event_samples(ds, evs)
# it goes for the max of all durations
assert_equal(evds.shape, (len(evs), 3 * ds.nfeatures))
# overide duration
evds = extract_boxcar_event_samples(ds, evs, event_duration=1)
assert_equal(evds.shape, (len(evs), 1 * ds.nfeatures))
assert_equal(np.unique(evds.samples[1]), 70)
# overide onset
evds = extract_boxcar_event_samples(ds, evs, event_offset=2)
assert_equal(evds.shape, (len(evs), 3 * ds.nfeatures))
assert_equal(np.unique(evds.samples[1,:10]), 72)
# overide both
evds = extract_boxcar_event_samples(ds, evs, event_offset=-2,
event_duration=1)
assert_equal(evds.shape, (len(evs), 1 * ds.nfeatures))
assert_equal(np.unique(evds.samples[1]), 68)
def test_hrf_modeling():
skip_if_no_external('nibabel')
skip_if_no_external('nipy') # ATM relies on NiPy's GLM implementation
ds = load_example_fmri_dataset('25mm', literal=True)
# TODO: simulate short dataset with known properties and use it
# for testing
events = find_events(targets=ds.sa.targets, chunks=ds.sa.chunks)
tr = ds.a.imghdr['pixdim'][4]
for ev in events:
for a in ('onset', 'duration'):
ev[a] = ev[a] * tr
evds = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr='targets',
design_kwargs=dict(drift_model='blank'),
glmfit_kwargs=dict(model='ols'),
model='hrf')
# same voxels
assert_equal(ds.nfeatures, evds.nfeatures)
assert_array_equal(ds.fa.voxel_indices, evds.fa.voxel_indices)
# one sample for each condition, plus constant
assert_equal(sorted(ds.sa['targets'].unique), sorted(evds.sa.targets))
assert_equal(evds.a.add_regs.sa.regressor_names[0], 'constant')
# with centered data
zscore(ds)
evds_demean = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr='targets',
design_kwargs=dict(drift_model='blank'),
glmfit_kwargs=dict(model='ols'),
model='hrf')
# after demeaning the constant should consume a lot less
assert(evds.a.add_regs[0].samples.mean()
> evds_demean.a.add_regs[0].samples.mean())
# from eyeballing the sensitivity example -- would be better to test this on
# the tutorial data
assert(evds_demean[evds.sa.targets == 'shoe'].samples.max() \
> evds_demean[evds.sa.targets == 'bottle'].samples.max())
# HRF models
assert('regressors' in evds.sa)
assert('regressors' in evds.a.add_regs.sa)
assert_equal(evds.sa.regressors.shape[1], len(ds))
# custom regressors
evds_regrs = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr='targets',
regr_attrs=['time_indices'],
design_kwargs=dict(drift_model='blank'),
glmfit_kwargs=dict(model='ols'),
model='hrf')
# verify that nothing screwed up time_coords
assert_equal(ds.sa.time_coords[0], 0)
assert_equal(len(evds_regrs), len(evds))
# one more output sample in .a.add_regs
assert_equal(len(evds_regrs.a.add_regs) - 1, len(evds.a.add_regs))
# comes last before constant
assert_equal('time_indices', evds_regrs.a.add_regs.sa.regressor_names[-2])
# order of main regressors is unchanged
assert_array_equal(evds.sa.targets, evds_regrs.sa.targets)
# custom regressors from external sources
evds_regrs = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr='targets',
regr_attrs=['time_coords'],
design_kwargs=dict(drift_model='blank',
add_regs=np.linspace(1, -1, len(ds))[None].T,
add_reg_names=['negative_trend']),
glmfit_kwargs=dict(model='ols'),
model='hrf')
assert_equal(len(evds_regrs), len(evds))
# But we got one more in additional regressors
assert_equal(len(evds_regrs.a.add_regs) - 2, len(evds.a.add_regs))
# comes last before constant
assert_array_equal(['negative_trend', 'time_coords', 'constant'],
evds_regrs.a.add_regs.sa.regressor_names)
# order is otherwise unchanged
assert_array_equal(evds.sa.targets, evds_regrs.sa.targets)
# HRF models with estimating per each chunk
assert_equal(ds.sa.time_coords[0], 0)
evds_regrs = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr=['targets', 'chunks'],
regr_attrs=['time_indices'],
design_kwargs=dict(drift_model='blank'),
glmfit_kwargs=dict(model='ols'),
model='hrf')
assert_true('add_regs' in evds_regrs.a)
assert_true('time_indices' in evds_regrs.a.add_regs.sa.regressor_names)
assert_equal(len(ds.UC) * len(ds.UT), len(evds_regrs))
assert_equal(len(evds_regrs.UC) * len(evds_regrs.UT), len(evds_regrs))
from mvpa2.mappers.fx import mean_group_sample
evds_regrs_meaned = mean_group_sample(['targets'])(evds_regrs)
assert_array_equal(evds_regrs_meaned.T, evds.T) # targets should be the same
#corr = np.corrcoef(np.vstack((evds.samples, evds_regrs_meaned)))
#import pydb; pydb.debugger()
#pass
#i = 1
| # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
'''Tests for the event-related dataset'''
from mvpa2.testing import *
from mvpa2.datasets import dataset_wizard
from mvpa2.mappers.flatten import FlattenMapper
from mvpa2.mappers.boxcar import BoxcarMapper
from mvpa2.mappers.fx import FxMapper
from mvpa2.datasets.eventrelated import find_events, eventrelated_dataset, \
extract_boxcar_event_samples
from mvpa2.datasets.sources import load_example_fmri_dataset
from mvpa2.mappers.zscore import zscore
def test_erdataset():
# 3 chunks, 5 targets, blocks of 5 samples each
nchunks = 3
ntargets = 5
blocklength = 5
nfeatures = 10
targets = np.tile(np.repeat(range(ntargets), blocklength), nchunks)
chunks = np.repeat(np.arange(nchunks), ntargets * blocklength)
samples = np.repeat(
np.arange(nchunks * ntargets * blocklength),
nfeatures).reshape(-1, nfeatures)
ds = dataset_wizard(samples, targets=targets, chunks=chunks)
# check if events are determined properly
evs = find_events(targets=ds.sa.targets, chunks=ds.sa.chunks)
for ev in evs:
assert_equal(ev['duration'], blocklength)
assert_equal(ntargets * nchunks, len(evs))
for t in range(ntargets):
assert_equal(len([ev for ev in evs if ev['targets'] == t]),
nchunks)
# now turn `ds` into an eventreleated dataset
erds = eventrelated_dataset(ds, evs)
# the only unprefixed sample attributes are
assert_equal(sorted([a for a in ds.sa if not a.startswith('event')]),
['chunks', 'targets'])
# samples as expected?
assert_array_equal(erds.samples[0],
np.repeat(np.arange(blocklength), nfeatures))
# that should also be the temporal feature offset
assert_array_equal(erds.samples[0], erds.fa.event_offsetidx)
assert_array_equal(erds.sa.event_onsetidx, np.arange(0,71,5))
# finally we should see two mappers
assert_equal(len(erds.a.mapper), 2)
assert_true(isinstance(erds.a.mapper[0], BoxcarMapper))
assert_true(isinstance(erds.a.mapper[1], FlattenMapper))
# check alternative event mapper
# this one does temporal compression by averaging
erds_compress = eventrelated_dataset(
ds, evs, event_mapper=FxMapper('features', np.mean))
assert_equal(len(erds), len(erds_compress))
assert_array_equal(erds_compress.samples[:,0], np.arange(2,73,5))
#
# now check the same dataset with event descretization
tr = 2.5
ds.sa['time'] = np.arange(nchunks * ntargets * blocklength) * tr
evs = [{'onset': 4.9, 'duration': 6.2}]
# doesn't work without conversion
assert_raises(ValueError, eventrelated_dataset, ds, evs)
erds = eventrelated_dataset(ds, evs, time_attr='time')
assert_equal(len(erds), 1)
assert_array_equal(erds.samples[0], np.repeat(np.arange(1,5), nfeatures))
assert_array_equal(erds.sa.orig_onset, [evs[0]['onset']])
assert_array_equal(erds.sa.orig_duration, [evs[0]['duration']])
assert_array_almost_equal(erds.sa.orig_offset, [2.4])
assert_array_equal(erds.sa.time, [np.arange(2.5, 11, 2.5)])
# now with closest match
erds = eventrelated_dataset(ds, evs, time_attr='time', match='closest')
expected_nsamples = 3
assert_equal(len(erds), 1)
assert_array_equal(erds.samples[0],
np.repeat(np.arange(2,2+expected_nsamples),
nfeatures))
assert_array_equal(erds.sa.orig_onset, [evs[0]['onset']])
assert_array_equal(erds.sa.orig_duration, [evs[0]['duration']])
assert_array_almost_equal(erds.sa.orig_offset, [-0.1])
assert_array_equal(erds.sa.time, [np.arange(5.0, 11, 2.5)])
# now test the way back
results = np.arange(erds.nfeatures)
assert_array_equal(erds.a.mapper.reverse1(results),
results.reshape(expected_nsamples, nfeatures))
# what about multiple results?
nresults = 5
results = dataset_wizard([results] * nresults)
# and let's have an attribute to make it more difficult
results.sa['myattr'] = np.arange(5)
rds = erds.a.mapper.reverse(results)
assert_array_equal(rds,
results.samples.reshape(nresults * expected_nsamples,
nfeatures))
assert_array_equal(rds.sa.myattr, np.repeat(results.sa.myattr,
expected_nsamples))
evs = [dict(onset=12, duration=2), dict(onset=70, duration=3)]
evds = extract_boxcar_event_samples(ds, evs)
# it goes for the max of all durations
assert_equal(evds.shape, (len(evs), 3 * ds.nfeatures))
# overide duration
evds = extract_boxcar_event_samples(ds, evs, event_duration=1)
assert_equal(evds.shape, (len(evs), 1 * ds.nfeatures))
assert_equal(np.unique(evds.samples[1]), 70)
# overide onset
evds = extract_boxcar_event_samples(ds, evs, event_offset=2)
assert_equal(evds.shape, (len(evs), 3 * ds.nfeatures))
assert_equal(np.unique(evds.samples[1,:10]), 72)
# overide both
evds = extract_boxcar_event_samples(ds, evs, event_offset=-2,
event_duration=1)
assert_equal(evds.shape, (len(evs), 1 * ds.nfeatures))
assert_equal(np.unique(evds.samples[1]), 68)
def test_hrf_modeling():
skip_if_no_external('nibabel')
skip_if_no_external('nipy') # ATM relies on NiPy's GLM implementation
ds = load_example_fmri_dataset('25mm', literal=True)
# TODO: simulate short dataset with known properties and use it
# for testing
events = find_events(targets=ds.sa.targets, chunks=ds.sa.chunks)
tr = ds.a.imghdr['pixdim'][4]
for ev in events:
for a in ('onset', 'duration'):
ev[a] = ev[a] * tr
evds = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr='targets',
design_kwargs=dict(drift_model='blank'),
glmfit_kwargs=dict(model='ols'),
model='hrf')
# same voxels
assert_equal(ds.nfeatures, evds.nfeatures)
assert_array_equal(ds.fa.voxel_indices, evds.fa.voxel_indices)
# one sample for each condition, plus constant
assert_equal(sorted(ds.sa['targets'].unique), sorted(evds.sa.targets))
assert_equal(evds.a.add_regs.sa.regressor_names[0], 'constant')
# with centered data
zscore(ds)
evds_demean = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr='targets',
design_kwargs=dict(drift_model='blank'),
glmfit_kwargs=dict(model='ols'),
model='hrf')
# after demeaning the constant should consume a lot less
assert(evds.a.add_regs[0].samples.mean()
> evds_demean.a.add_regs[0].samples.mean())
# from eyeballing the sensitivity example -- would be better to test this on
# the tutorial data
assert(evds_demean[evds.sa.targets == 'shoe'].samples.max() \
> evds_demean[evds.sa.targets == 'bottle'].samples.max())
# HRF models
assert('regressors' in evds.sa)
assert('regressors' in evds.a.add_regs.sa)
assert_equal(evds.sa.regressors.shape[1], len(ds))
# custom regressors
evds_regrs = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr='targets',
regr_attrs=['time_indices'],
design_kwargs=dict(drift_model='blank'),
glmfit_kwargs=dict(model='ols'),
model='hrf')
# verify that nothing screwed up time_coords
assert_equal(ds.sa.time_coords[0], 0)
assert_equal(len(evds_regrs), len(evds))
# one more output sample in .a.add_regs
assert_equal(len(evds_regrs.a.add_regs) - 1, len(evds.a.add_regs))
# comes last before constant
assert_equal('time_indices', evds_regrs.a.add_regs.sa.regressor_names[-2])
# order of main regressors is unchanged
assert_array_equal(evds.sa.targets, evds_regrs.sa.targets)
# custom regressors from external sources
evds_regrs = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr='targets',
regr_attrs=['time_coords'],
design_kwargs=dict(drift_model='blank',
add_regs=np.linspace(1, -1, len(ds))[None].T,
add_reg_names=['negative_trend']),
glmfit_kwargs=dict(model='ols'),
model='hrf')
assert_equal(len(evds_regrs), len(evds))
# But we got one more in additional regressors
assert_equal(len(evds_regrs.a.add_regs) - 2, len(evds.a.add_regs))
# comes last before constant
assert_array_equal(['negative_trend', 'time_coords', 'constant'],
evds_regrs.a.add_regs.sa.regressor_names)
# order is otherwise unchanged
assert_array_equal(evds.sa.targets, evds_regrs.sa.targets)
# HRF models with estimating per each chunk
assert_equal(ds.sa.time_coords[0], 0)
evds_regrs = eventrelated_dataset(ds, events, time_attr='time_coords',
condition_attr=['targets', 'chunks'],
regr_attrs=['time_indices'],
design_kwargs=dict(drift_model='blank'),
glmfit_kwargs=dict(model='ols'),
model='hrf')
assert_true('add_regs' in evds_regrs.a)
assert_true('time_indices' in evds_regrs.a.add_regs.sa.regressor_names)
assert_equal(len(ds.UC) * len(ds.UT), len(evds_regrs))
assert_equal(len(evds_regrs.UC) * len(evds_regrs.UT), len(evds_regrs))
from mvpa2.mappers.fx import mean_group_sample
evds_regrs_meaned = mean_group_sample(['targets'])(evds_regrs)
assert_array_equal(evds_regrs_meaned.T, evds.T) # targets should be the same
#corr = np.corrcoef(np.vstack((evds.samples, evds_regrs_meaned)))
#import pydb; pydb.debugger()
#pass
#i = 1 | en | 0.785098 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## # # See COPYING file distributed along with the PyMVPA package for the # copyright and license terms. # ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## Tests for the event-related dataset # 3 chunks, 5 targets, blocks of 5 samples each # check if events are determined properly # now turn `ds` into an eventreleated dataset # the only unprefixed sample attributes are # samples as expected? # that should also be the temporal feature offset # finally we should see two mappers # check alternative event mapper # this one does temporal compression by averaging # # now check the same dataset with event descretization # doesn't work without conversion # now with closest match # now test the way back # what about multiple results? # and let's have an attribute to make it more difficult # it goes for the max of all durations # overide duration # overide onset # overide both # ATM relies on NiPy's GLM implementation # TODO: simulate short dataset with known properties and use it # for testing # same voxels # one sample for each condition, plus constant # with centered data # after demeaning the constant should consume a lot less # from eyeballing the sensitivity example -- would be better to test this on # the tutorial data # HRF models # custom regressors # verify that nothing screwed up time_coords # one more output sample in .a.add_regs # comes last before constant # order of main regressors is unchanged # custom regressors from external sources # But we got one more in additional regressors # comes last before constant # order is otherwise unchanged # HRF models with estimating per each chunk # targets should be the same #corr = np.corrcoef(np.vstack((evds.samples, evds_regrs_meaned))) #import pydb; pydb.debugger() #pass #i = 1 | 1.921974 | 2 |
userbot/plugins/delfp.py | aksr-aashish/FIREXUSERBOT | 0 | 9292 | from telethon.tl.functions.photos import DeletePhotosRequest, GetUserPhotosRequest
from telethon.tl.types import InputPhoto
from userbot.cmdhelp import CmdHelp
from userbot.utils import admin_cmd, edit_or_reply, sudo_cmd
CmdHelp("delfp").add_command("delpfp", None, "delete ur currnt profile picture").add()
@borg.on(admin_cmd(pattern="delpfp ?(.*)"))
@borg.on(sudo_cmd(pattern="delpfp ?(.*)", allow_sudo=True))
async def remove_profilepic(delpfp):
"""For .delpfp command, delete your current profile picture in Telegram."""
group = delpfp.text[8:]
if group == "all":
lim = 0
elif group.isdigit():
lim = int(group)
else:
lim = 1
pfplist = await delpfp.client(
GetUserPhotosRequest(user_id=delpfp.from_id, offset=0, max_id=0, limit=lim)
)
input_photos = [InputPhoto(
id=sep.id,
access_hash=sep.access_hash,
file_reference=sep.file_reference,
) for sep in pfplist.photos]
await delpfp.client(DeletePhotosRequest(id=input_photos))
await edit_or_reply(
delpfp, f"`Successfully deleted {len(input_photos)} profile picture(s).`"
)
| from telethon.tl.functions.photos import DeletePhotosRequest, GetUserPhotosRequest
from telethon.tl.types import InputPhoto
from userbot.cmdhelp import CmdHelp
from userbot.utils import admin_cmd, edit_or_reply, sudo_cmd
CmdHelp("delfp").add_command("delpfp", None, "delete ur currnt profile picture").add()
@borg.on(admin_cmd(pattern="delpfp ?(.*)"))
@borg.on(sudo_cmd(pattern="delpfp ?(.*)", allow_sudo=True))
async def remove_profilepic(delpfp):
"""For .delpfp command, delete your current profile picture in Telegram."""
group = delpfp.text[8:]
if group == "all":
lim = 0
elif group.isdigit():
lim = int(group)
else:
lim = 1
pfplist = await delpfp.client(
GetUserPhotosRequest(user_id=delpfp.from_id, offset=0, max_id=0, limit=lim)
)
input_photos = [InputPhoto(
id=sep.id,
access_hash=sep.access_hash,
file_reference=sep.file_reference,
) for sep in pfplist.photos]
await delpfp.client(DeletePhotosRequest(id=input_photos))
await edit_or_reply(
delpfp, f"`Successfully deleted {len(input_photos)} profile picture(s).`"
)
| en | 0.89697 | For .delpfp command, delete your current profile picture in Telegram. | 2.514316 | 3 |
amlb/benchmarks/file.py | pplonski/automlbenchmark | 282 | 9293 | <filename>amlb/benchmarks/file.py<gh_stars>100-1000
import logging
import os
from typing import List, Tuple, Optional
from amlb.utils import config_load, Namespace
log = logging.getLogger(__name__)
def _find_local_benchmark_definition(name: str, benchmark_definition_dirs: List[str]) -> str:
# 'name' should be either a full path to the benchmark,
# or a filename (without extension) in the benchmark directory.
if os.path.exists(name):
return name
for bd in benchmark_definition_dirs:
bf = os.path.join(bd, f"{name}.yaml")
if os.path.exists(bf):
# We don't account for duplicate definitions (yet).
return bf
# should we support s3 and check for s3 path before raising error?
raise ValueError(f"Incorrect benchmark name or path `{name}`, name not available in {benchmark_definition_dirs}.")
def load_file_benchmark(name: str, benchmark_definition_dirs: List[str]) -> Tuple[str, Optional[str], List[Namespace]]:
""" Loads benchmark from a local file. """
benchmark_file = _find_local_benchmark_definition(name, benchmark_definition_dirs)
log.info("Loading benchmark definitions from %s.", benchmark_file)
tasks = config_load(benchmark_file)
benchmark_name, _ = os.path.splitext(os.path.basename(benchmark_file))
return benchmark_name, benchmark_file, tasks
| <filename>amlb/benchmarks/file.py<gh_stars>100-1000
import logging
import os
from typing import List, Tuple, Optional
from amlb.utils import config_load, Namespace
log = logging.getLogger(__name__)
def _find_local_benchmark_definition(name: str, benchmark_definition_dirs: List[str]) -> str:
# 'name' should be either a full path to the benchmark,
# or a filename (without extension) in the benchmark directory.
if os.path.exists(name):
return name
for bd in benchmark_definition_dirs:
bf = os.path.join(bd, f"{name}.yaml")
if os.path.exists(bf):
# We don't account for duplicate definitions (yet).
return bf
# should we support s3 and check for s3 path before raising error?
raise ValueError(f"Incorrect benchmark name or path `{name}`, name not available in {benchmark_definition_dirs}.")
def load_file_benchmark(name: str, benchmark_definition_dirs: List[str]) -> Tuple[str, Optional[str], List[Namespace]]:
""" Loads benchmark from a local file. """
benchmark_file = _find_local_benchmark_definition(name, benchmark_definition_dirs)
log.info("Loading benchmark definitions from %s.", benchmark_file)
tasks = config_load(benchmark_file)
benchmark_name, _ = os.path.splitext(os.path.basename(benchmark_file))
return benchmark_name, benchmark_file, tasks
| en | 0.847454 | # 'name' should be either a full path to the benchmark, # or a filename (without extension) in the benchmark directory. # We don't account for duplicate definitions (yet). # should we support s3 and check for s3 path before raising error? Loads benchmark from a local file. | 2.392496 | 2 |
pybuspro/devices/control.py | eyesoft/pybuspro | 2 | 9294 | from ..core.telegram import Telegram
from ..helpers.enums import OperateCode
class _Control:
def __init__(self, buspro):
self._buspro = buspro
self.subnet_id = None
self.device_id = None
@staticmethod
def build_telegram_from_control(control):
if control is None:
return None
if type(control) == _SingleChannelControl:
operate_code = OperateCode.SingleChannelControl
payload = [control.channel_number, control.channel_level, control.running_time_minutes,
control.running_time_seconds]
elif type(control) == _SceneControl:
operate_code = OperateCode.SceneControl
payload = [control.area_number, control.scene_number]
elif type(control) == _ReadStatusOfChannels:
operate_code = OperateCode.ReadStatusOfChannels
payload = []
elif type(control) == _GenericControl:
operate_code = control.operate_code
payload = control.payload
elif type(control) == _UniversalSwitch:
operate_code = OperateCode.UniversalSwitchControl
payload = [control.switch_number, control.switch_status.value]
elif type(control) == _ReadStatusOfUniversalSwitch:
operate_code = OperateCode.ReadStatusOfUniversalSwitch
payload = [control.switch_number]
elif type(control) == _ReadSensorStatus:
operate_code = OperateCode.ReadSensorStatus
payload = []
elif type(control) == _ReadSensorsInOneStatus:
operate_code = OperateCode.ReadSensorsInOneStatus
payload = []
elif type(control) == _ReadFloorHeatingStatus:
operate_code = OperateCode.ReadFloorHeatingStatus
payload = []
elif type(control) == _ReadDryContactStatus:
operate_code = OperateCode.ReadDryContactStatus
payload = [1, control.switch_number]
elif type(control) == _ControlFloorHeatingStatus:
operate_code = OperateCode.ControlFloorHeatingStatus
payload = [control.temperature_type, control.status, control.mode, control.normal_temperature,
control.day_temperature, control.night_temperature, control.away_temperature]
else:
return None
telegram = Telegram()
telegram.target_address = (control.subnet_id, control.device_id)
telegram.operate_code = operate_code
telegram.payload = payload
return telegram
@property
def telegram(self):
return self.build_telegram_from_control(self)
async def send(self):
telegram = self.telegram
# if telegram.target_address[1] == 100:
# print("==== {}".format(str(telegram)))
await self._buspro.network_interface.send_telegram(telegram)
class _GenericControl(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.payload = None
self.operate_code = None
class _SingleChannelControl(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.channel_number = None
self.channel_level = None
self.running_time_minutes = None
self.running_time_seconds = None
class _SceneControl(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.area_number = None
self.scene_number = None
class _ReadStatusOfChannels(_Control):
def __init__(self, buspro):
super().__init__(buspro)
# no more properties
class _UniversalSwitch(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.switch_number = None
self.switch_status = None
class _ReadStatusOfUniversalSwitch(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.switch_number = None
class _ReadSensorStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
# no more properties
class _ReadSensorsInOneStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
# no more properties
class _ReadFloorHeatingStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
# no more properties
class _ControlFloorHeatingStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.temperature_type = None
self.status = None
self.mode = None
self.normal_temperature = None
self.day_temperature = None
self.night_temperature = None
self.away_temperature = None
class _ReadDryContactStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.switch_number = None
| from ..core.telegram import Telegram
from ..helpers.enums import OperateCode
class _Control:
def __init__(self, buspro):
self._buspro = buspro
self.subnet_id = None
self.device_id = None
@staticmethod
def build_telegram_from_control(control):
if control is None:
return None
if type(control) == _SingleChannelControl:
operate_code = OperateCode.SingleChannelControl
payload = [control.channel_number, control.channel_level, control.running_time_minutes,
control.running_time_seconds]
elif type(control) == _SceneControl:
operate_code = OperateCode.SceneControl
payload = [control.area_number, control.scene_number]
elif type(control) == _ReadStatusOfChannels:
operate_code = OperateCode.ReadStatusOfChannels
payload = []
elif type(control) == _GenericControl:
operate_code = control.operate_code
payload = control.payload
elif type(control) == _UniversalSwitch:
operate_code = OperateCode.UniversalSwitchControl
payload = [control.switch_number, control.switch_status.value]
elif type(control) == _ReadStatusOfUniversalSwitch:
operate_code = OperateCode.ReadStatusOfUniversalSwitch
payload = [control.switch_number]
elif type(control) == _ReadSensorStatus:
operate_code = OperateCode.ReadSensorStatus
payload = []
elif type(control) == _ReadSensorsInOneStatus:
operate_code = OperateCode.ReadSensorsInOneStatus
payload = []
elif type(control) == _ReadFloorHeatingStatus:
operate_code = OperateCode.ReadFloorHeatingStatus
payload = []
elif type(control) == _ReadDryContactStatus:
operate_code = OperateCode.ReadDryContactStatus
payload = [1, control.switch_number]
elif type(control) == _ControlFloorHeatingStatus:
operate_code = OperateCode.ControlFloorHeatingStatus
payload = [control.temperature_type, control.status, control.mode, control.normal_temperature,
control.day_temperature, control.night_temperature, control.away_temperature]
else:
return None
telegram = Telegram()
telegram.target_address = (control.subnet_id, control.device_id)
telegram.operate_code = operate_code
telegram.payload = payload
return telegram
@property
def telegram(self):
return self.build_telegram_from_control(self)
async def send(self):
telegram = self.telegram
# if telegram.target_address[1] == 100:
# print("==== {}".format(str(telegram)))
await self._buspro.network_interface.send_telegram(telegram)
class _GenericControl(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.payload = None
self.operate_code = None
class _SingleChannelControl(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.channel_number = None
self.channel_level = None
self.running_time_minutes = None
self.running_time_seconds = None
class _SceneControl(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.area_number = None
self.scene_number = None
class _ReadStatusOfChannels(_Control):
def __init__(self, buspro):
super().__init__(buspro)
# no more properties
class _UniversalSwitch(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.switch_number = None
self.switch_status = None
class _ReadStatusOfUniversalSwitch(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.switch_number = None
class _ReadSensorStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
# no more properties
class _ReadSensorsInOneStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
# no more properties
class _ReadFloorHeatingStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
# no more properties
class _ControlFloorHeatingStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.temperature_type = None
self.status = None
self.mode = None
self.normal_temperature = None
self.day_temperature = None
self.night_temperature = None
self.away_temperature = None
class _ReadDryContactStatus(_Control):
def __init__(self, buspro):
super().__init__(buspro)
self.switch_number = None
| en | 0.285575 | # if telegram.target_address[1] == 100: # print("==== {}".format(str(telegram))) # no more properties # no more properties # no more properties # no more properties | 2.299809 | 2 |
appengine/chrome_infra_console_loadtest/main.py | eunchong/infra | 0 | 9295 | <filename>appengine/chrome_infra_console_loadtest/main.py
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import endpoints
import random
import webapp2
from apiclient import discovery
from google.appengine.ext import ndb
from oauth2client.client import GoogleCredentials
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from components import auth
CONFIG_DATASTORE_KEY = "CONFIG_DATASTORE_KEY"
API_NAME = 'consoleapp'
API_VERSION = 'v1'
DISCOVERY_URL = '%s/_ah/api/discovery/v1/apis/{api}/{apiVersion}/rest'
class FieldParamsModel(ndb.Model):
field_key = ndb.StringProperty()
values = ndb.StringProperty(repeated=True)
class MetricModel(ndb.Model):
name = ndb.StringProperty(default="")
minimum = ndb.FloatProperty(default=0)
maximum = ndb.FloatProperty(default=100)
class ParamsModel(ndb.Model):
time = ndb.FloatProperty(default=10)
freq = ndb.FloatProperty(default=1)
url = ndb.StringProperty()
params = ndb.LocalStructuredProperty(FieldParamsModel, repeated=True)
metrics = ndb.LocalStructuredProperty(MetricModel, repeated=True)
class Field(messages.Message):
key = messages.StringField(1)
value = messages.StringField(2)
class Point(messages.Message):
time = messages.FloatField(1)
value = messages.FloatField(2)
class FieldParams(messages.Message):
field_key = messages.StringField(1)
values = messages.StringField(2, repeated=True)
class Metric(messages.Message):
name = messages.StringField(1)
minimum = messages.FloatField(2)
maximum = messages.FloatField(3)
class Params(messages.Message):
time = messages.FloatField(1)
freq = messages.FloatField(2)
url = messages.StringField(3)
params = messages.MessageField(FieldParams, 4, repeated=True)
metrics = messages.MessageField(Metric, 5, repeated=True)
class TimeSeries(messages.Message):
points = messages.MessageField(Point, 1, repeated=True)
fields = messages.MessageField(Field, 2, repeated=True)
metric = messages.StringField(3)
class DataPacket(messages.Message):
timeseries = messages.MessageField(TimeSeries, 1, repeated=True)
@auth.endpoints_api(name='consoleapp', version='v1')
class LoadTestApi(remote.Service):
"""A testing endpoint that receives timeseries data."""
@auth.endpoints_method(DataPacket, message_types.VoidMessage,
name='timeseries.update')
@auth.require(lambda: auth.is_group_member('metric-generators'))
def timeseries_update(self, request):
logging.debug('Datapacket length is %d', len(request.timeseries))
return message_types.VoidMessage()
@auth.endpoints_api(name='ui', version='v1')
class UIApi(remote.Service):
"""API for the loadtest configuration UI."""
@auth.endpoints_method(message_types.VoidMessage, Params,
name='ui.get')
@auth.require(lambda: auth.is_group_member('metric-generators'))
def UI_get(self, _request):
data = ParamsModel.get_or_insert(CONFIG_DATASTORE_KEY)
params = [FieldParams(field_key=field.field_key, values=field.values)
for field in data.params]
metrics = [Metric(name=metric.name,
minimum=metric.minimum,
maximum=metric.maximum)
for metric in data.metrics]
return Params(time=data.time, freq=data.freq, url=data.url, params=params,
metrics=metrics)
@auth.endpoints_method(Params, message_types.VoidMessage,
name='ui.set')
@auth.require(lambda: auth.is_group_member('metric-generators'))
def UI_set(self, request):
logging.debug('Got %s', request)
data = ParamsModel.get_or_insert(CONFIG_DATASTORE_KEY)
data.time = request.time
data.freq = request.freq
data.url = request.url
data.params = [FieldParamsModel(field_key=field.field_key,
values=field.values)
for field in request.params]
data.metrics = [MetricModel(name=metric.name,
minimum=metric.minimum,
maximum=metric.maximum)
for metric in request.metrics]
data.put()
return message_types.VoidMessage()
def field_generator(dataparams, index, fields):
if index == len(dataparams):
return [fields]
else:
key = dataparams[index].field_key
return sum((field_generator(
dataparams, index+1, fields+[{'key': key, 'value': value}])
for value in dataparams[index].values), [])
class CronHandler(webapp2.RequestHandler):
def get(self):
data = ParamsModel.get_or_insert(CONFIG_DATASTORE_KEY)
metric_ranges = {}
for metric in data.metrics:
metric_ranges[metric.name] = (metric.minimum,metric.maximum)
datapacket = {'timeseries': []}
logging.debug('There are %d metrics', len(metric_ranges))
fieldlist = field_generator(data.params, 0, [])
for metric in metric_ranges:
for fields in fieldlist:
points = []
for x in xrange(0, int(data.time), int(data.freq)):
points.append({'time': x,
'value': random.uniform(*metric_ranges[metric])})
timeseries = {'points': points,
'fields': fields,
'metric': metric}
datapacket['timeseries'].append(timeseries)
logging.info('Send data to %s', data.url)
discovery_url = DISCOVERY_URL % data.url
credentials = GoogleCredentials.get_application_default()
service = discovery.build(API_NAME, API_VERSION,
discoveryServiceUrl=discovery_url,
credentials=credentials)
_response = service.timeseries().update(body=datapacket).execute()
backend_handlers = [
('/cron', CronHandler)
]
WEBAPP = webapp2.WSGIApplication(backend_handlers, debug=True)
APPLICATION = endpoints.api_server([LoadTestApi, UIApi])
| <filename>appengine/chrome_infra_console_loadtest/main.py
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import endpoints
import random
import webapp2
from apiclient import discovery
from google.appengine.ext import ndb
from oauth2client.client import GoogleCredentials
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from components import auth
CONFIG_DATASTORE_KEY = "CONFIG_DATASTORE_KEY"
API_NAME = 'consoleapp'
API_VERSION = 'v1'
DISCOVERY_URL = '%s/_ah/api/discovery/v1/apis/{api}/{apiVersion}/rest'
class FieldParamsModel(ndb.Model):
field_key = ndb.StringProperty()
values = ndb.StringProperty(repeated=True)
class MetricModel(ndb.Model):
name = ndb.StringProperty(default="")
minimum = ndb.FloatProperty(default=0)
maximum = ndb.FloatProperty(default=100)
class ParamsModel(ndb.Model):
time = ndb.FloatProperty(default=10)
freq = ndb.FloatProperty(default=1)
url = ndb.StringProperty()
params = ndb.LocalStructuredProperty(FieldParamsModel, repeated=True)
metrics = ndb.LocalStructuredProperty(MetricModel, repeated=True)
class Field(messages.Message):
key = messages.StringField(1)
value = messages.StringField(2)
class Point(messages.Message):
time = messages.FloatField(1)
value = messages.FloatField(2)
class FieldParams(messages.Message):
field_key = messages.StringField(1)
values = messages.StringField(2, repeated=True)
class Metric(messages.Message):
name = messages.StringField(1)
minimum = messages.FloatField(2)
maximum = messages.FloatField(3)
class Params(messages.Message):
time = messages.FloatField(1)
freq = messages.FloatField(2)
url = messages.StringField(3)
params = messages.MessageField(FieldParams, 4, repeated=True)
metrics = messages.MessageField(Metric, 5, repeated=True)
class TimeSeries(messages.Message):
points = messages.MessageField(Point, 1, repeated=True)
fields = messages.MessageField(Field, 2, repeated=True)
metric = messages.StringField(3)
class DataPacket(messages.Message):
timeseries = messages.MessageField(TimeSeries, 1, repeated=True)
@auth.endpoints_api(name='consoleapp', version='v1')
class LoadTestApi(remote.Service):
"""A testing endpoint that receives timeseries data."""
@auth.endpoints_method(DataPacket, message_types.VoidMessage,
name='timeseries.update')
@auth.require(lambda: auth.is_group_member('metric-generators'))
def timeseries_update(self, request):
logging.debug('Datapacket length is %d', len(request.timeseries))
return message_types.VoidMessage()
@auth.endpoints_api(name='ui', version='v1')
class UIApi(remote.Service):
"""API for the loadtest configuration UI."""
@auth.endpoints_method(message_types.VoidMessage, Params,
name='ui.get')
@auth.require(lambda: auth.is_group_member('metric-generators'))
def UI_get(self, _request):
data = ParamsModel.get_or_insert(CONFIG_DATASTORE_KEY)
params = [FieldParams(field_key=field.field_key, values=field.values)
for field in data.params]
metrics = [Metric(name=metric.name,
minimum=metric.minimum,
maximum=metric.maximum)
for metric in data.metrics]
return Params(time=data.time, freq=data.freq, url=data.url, params=params,
metrics=metrics)
@auth.endpoints_method(Params, message_types.VoidMessage,
name='ui.set')
@auth.require(lambda: auth.is_group_member('metric-generators'))
def UI_set(self, request):
logging.debug('Got %s', request)
data = ParamsModel.get_or_insert(CONFIG_DATASTORE_KEY)
data.time = request.time
data.freq = request.freq
data.url = request.url
data.params = [FieldParamsModel(field_key=field.field_key,
values=field.values)
for field in request.params]
data.metrics = [MetricModel(name=metric.name,
minimum=metric.minimum,
maximum=metric.maximum)
for metric in request.metrics]
data.put()
return message_types.VoidMessage()
def field_generator(dataparams, index, fields):
if index == len(dataparams):
return [fields]
else:
key = dataparams[index].field_key
return sum((field_generator(
dataparams, index+1, fields+[{'key': key, 'value': value}])
for value in dataparams[index].values), [])
class CronHandler(webapp2.RequestHandler):
def get(self):
data = ParamsModel.get_or_insert(CONFIG_DATASTORE_KEY)
metric_ranges = {}
for metric in data.metrics:
metric_ranges[metric.name] = (metric.minimum,metric.maximum)
datapacket = {'timeseries': []}
logging.debug('There are %d metrics', len(metric_ranges))
fieldlist = field_generator(data.params, 0, [])
for metric in metric_ranges:
for fields in fieldlist:
points = []
for x in xrange(0, int(data.time), int(data.freq)):
points.append({'time': x,
'value': random.uniform(*metric_ranges[metric])})
timeseries = {'points': points,
'fields': fields,
'metric': metric}
datapacket['timeseries'].append(timeseries)
logging.info('Send data to %s', data.url)
discovery_url = DISCOVERY_URL % data.url
credentials = GoogleCredentials.get_application_default()
service = discovery.build(API_NAME, API_VERSION,
discoveryServiceUrl=discovery_url,
credentials=credentials)
_response = service.timeseries().update(body=datapacket).execute()
backend_handlers = [
('/cron', CronHandler)
]
WEBAPP = webapp2.WSGIApplication(backend_handlers, debug=True)
APPLICATION = endpoints.api_server([LoadTestApi, UIApi])
| en | 0.881827 | # Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. A testing endpoint that receives timeseries data. API for the loadtest configuration UI. | 2.200837 | 2 |
src/mitre/securingai/restapi/task_plugin/controller.py | usnistgov/dioptra | 14 | 9296 | <reponame>usnistgov/dioptra
# This Software (Dioptra) is being made available as a public service by the
# National Institute of Standards and Technology (NIST), an Agency of the United
# States Department of Commerce. This software was developed in part by employees of
# NIST and in part by NIST contractors. Copyright in portions of this software that
# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant
# to Title 17 United States Code Section 105, works of NIST employees are not
# subject to copyright protection in the United States. However, NIST may hold
# international copyright in software created by its employees and domestic
# copyright (or licensing rights) in portions of software that were assigned or
# licensed to NIST. To the extent that NIST holds copyright in this software, it is
# being made available under the Creative Commons Attribution 4.0 International
# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts
# of the software developed or licensed by NIST.
#
# ACCESS THE FULL CC BY 4.0 LICENSE HERE:
# https://creativecommons.org/licenses/by/4.0/legalcode
"""The module defining the task plugin endpoints."""
import uuid
from typing import List, Optional
import structlog
from flask import current_app, jsonify
from flask.wrappers import Response
from flask_accepts import accepts, responds
from flask_restx import Namespace, Resource
from injector import inject
from structlog.stdlib import BoundLogger
from mitre.securingai.restapi.utils import as_api_parser
from .errors import TaskPluginDoesNotExistError, TaskPluginUploadError
from .model import TaskPlugin, TaskPluginUploadForm, TaskPluginUploadFormData
from .schema import TaskPluginSchema, TaskPluginUploadSchema
from .service import TaskPluginService
LOGGER: BoundLogger = structlog.stdlib.get_logger()
api: Namespace = Namespace(
"TaskPlugin",
description="Task plugin registry operations",
)
@api.route("/")
class TaskPluginResource(Resource):
"""Shows a list of all task plugins, and lets you POST to upload new ones."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema(many=True), api=api)
def get(self) -> List[TaskPlugin]:
"""Gets a list of all registered task plugins."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()), resource="taskPlugin", request_type="GET"
)
log.info("Request received")
return self._task_plugin_service.get_all(
bucket=current_app.config["AI_PLUGINS_BUCKET"], log=log
)
@api.expect(as_api_parser(api, TaskPluginUploadSchema))
@accepts(TaskPluginUploadSchema, api=api)
@responds(schema=TaskPluginSchema, api=api)
def post(self) -> TaskPlugin:
"""Registers a new task plugin uploaded via the task plugin upload form."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()), resource="taskPlugin", request_type="POST"
)
task_plugin_upload_form: TaskPluginUploadForm = TaskPluginUploadForm()
log.info("Request received")
if not task_plugin_upload_form.validate_on_submit():
log.error("Form validation failed")
raise TaskPluginUploadError
log.info("Form validation successful")
task_plugin_upload_form_data: TaskPluginUploadFormData = (
self._task_plugin_service.extract_data_from_form(
task_plugin_upload_form=task_plugin_upload_form, log=log
)
)
return self._task_plugin_service.create(
task_plugin_upload_form_data=task_plugin_upload_form_data,
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
@api.route("/securingai_builtins")
class TaskPluginBuiltinsCollectionResource(Resource):
"""Shows a list of all builtin task plugins."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema(many=True), api=api)
def get(self) -> List[TaskPlugin]:
"""Gets a list of all available builtin task plugins."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginBuiltinCollection",
request_type="GET",
)
log.info("Request received")
return self._task_plugin_service.get_all_in_collection(
collection="securingai_builtins",
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
@api.route("/securingai_builtins/<string:taskPluginName>")
@api.param(
"taskPluginName",
"A unique string identifying a task plugin package within securingai_builtins "
"collection.",
)
class TaskPluginBuiltinCollectionNameResource(Resource):
"""Shows a single builtin task plugin package."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema, api=api)
def get(self, taskPluginName: str) -> TaskPlugin:
"""Gets a builtin task plugin by its unique name."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginBuiltinCollectionName",
request_type="GET",
)
log.info("Request received")
task_plugin: Optional[
TaskPlugin
] = self._task_plugin_service.get_by_name_in_collection(
collection="securingai_builtins",
task_plugin_name=taskPluginName,
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
if task_plugin is None:
log.error(
"TaskPlugin not found",
task_plugin_name=taskPluginName,
collection="securingai_builtins",
)
raise TaskPluginDoesNotExistError
return task_plugin
@api.route("/securingai_custom")
class TaskPluginCustomCollectionResource(Resource):
"""Shows a list of all custom task plugins."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema(many=True), api=api)
def get(self) -> List[TaskPlugin]:
"""Gets a list of all registered custom task plugins."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginCustomCollection",
request_type="GET",
)
log.info("Request received")
return self._task_plugin_service.get_all_in_collection(
collection="securingai_custom",
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
@api.route("/securingai_custom/<string:taskPluginName>")
@api.param(
"taskPluginName",
"A unique string identifying a task plugin package within securingai_custom "
"collection.",
)
class TaskPluginCustomCollectionNameResource(Resource):
"""Shows a single custom task plugin package and lets you delete it."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema, api=api)
def get(self, taskPluginName: str) -> TaskPlugin:
"""Gets a custom task plugin by its unique name."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginCustomCollectionName",
request_type="GET",
)
log.info("Request received")
task_plugin: Optional[
TaskPlugin
] = self._task_plugin_service.get_by_name_in_collection(
collection="securingai_custom",
task_plugin_name=taskPluginName,
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
if task_plugin is None:
log.error(
"TaskPlugin not found",
task_plugin_name=taskPluginName,
collection="securingai_custom",
)
raise TaskPluginDoesNotExistError
return task_plugin
def delete(self, taskPluginName: str) -> Response:
"""Deletes a custom task plugin by its unique name."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginCustomCollectionName",
task_plugin_name=taskPluginName,
request_type="DELETE",
)
log.info("Request received")
task_plugins: List[TaskPlugin] = self._task_plugin_service.delete(
collection="securingai_custom",
task_plugin_name=taskPluginName,
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
name: List[str] = [x.task_plugin_name for x in task_plugins]
return jsonify( # type: ignore
dict(status="Success", collection="securingai_custom", taskPluginName=name)
)
| # This Software (Dioptra) is being made available as a public service by the
# National Institute of Standards and Technology (NIST), an Agency of the United
# States Department of Commerce. This software was developed in part by employees of
# NIST and in part by NIST contractors. Copyright in portions of this software that
# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant
# to Title 17 United States Code Section 105, works of NIST employees are not
# subject to copyright protection in the United States. However, NIST may hold
# international copyright in software created by its employees and domestic
# copyright (or licensing rights) in portions of software that were assigned or
# licensed to NIST. To the extent that NIST holds copyright in this software, it is
# being made available under the Creative Commons Attribution 4.0 International
# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts
# of the software developed or licensed by NIST.
#
# ACCESS THE FULL CC BY 4.0 LICENSE HERE:
# https://creativecommons.org/licenses/by/4.0/legalcode
"""The module defining the task plugin endpoints."""
import uuid
from typing import List, Optional
import structlog
from flask import current_app, jsonify
from flask.wrappers import Response
from flask_accepts import accepts, responds
from flask_restx import Namespace, Resource
from injector import inject
from structlog.stdlib import BoundLogger
from mitre.securingai.restapi.utils import as_api_parser
from .errors import TaskPluginDoesNotExistError, TaskPluginUploadError
from .model import TaskPlugin, TaskPluginUploadForm, TaskPluginUploadFormData
from .schema import TaskPluginSchema, TaskPluginUploadSchema
from .service import TaskPluginService
LOGGER: BoundLogger = structlog.stdlib.get_logger()
api: Namespace = Namespace(
"TaskPlugin",
description="Task plugin registry operations",
)
@api.route("/")
class TaskPluginResource(Resource):
"""Shows a list of all task plugins, and lets you POST to upload new ones."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema(many=True), api=api)
def get(self) -> List[TaskPlugin]:
"""Gets a list of all registered task plugins."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()), resource="taskPlugin", request_type="GET"
)
log.info("Request received")
return self._task_plugin_service.get_all(
bucket=current_app.config["AI_PLUGINS_BUCKET"], log=log
)
@api.expect(as_api_parser(api, TaskPluginUploadSchema))
@accepts(TaskPluginUploadSchema, api=api)
@responds(schema=TaskPluginSchema, api=api)
def post(self) -> TaskPlugin:
"""Registers a new task plugin uploaded via the task plugin upload form."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()), resource="taskPlugin", request_type="POST"
)
task_plugin_upload_form: TaskPluginUploadForm = TaskPluginUploadForm()
log.info("Request received")
if not task_plugin_upload_form.validate_on_submit():
log.error("Form validation failed")
raise TaskPluginUploadError
log.info("Form validation successful")
task_plugin_upload_form_data: TaskPluginUploadFormData = (
self._task_plugin_service.extract_data_from_form(
task_plugin_upload_form=task_plugin_upload_form, log=log
)
)
return self._task_plugin_service.create(
task_plugin_upload_form_data=task_plugin_upload_form_data,
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
@api.route("/securingai_builtins")
class TaskPluginBuiltinsCollectionResource(Resource):
"""Shows a list of all builtin task plugins."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema(many=True), api=api)
def get(self) -> List[TaskPlugin]:
"""Gets a list of all available builtin task plugins."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginBuiltinCollection",
request_type="GET",
)
log.info("Request received")
return self._task_plugin_service.get_all_in_collection(
collection="securingai_builtins",
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
@api.route("/securingai_builtins/<string:taskPluginName>")
@api.param(
"taskPluginName",
"A unique string identifying a task plugin package within securingai_builtins "
"collection.",
)
class TaskPluginBuiltinCollectionNameResource(Resource):
"""Shows a single builtin task plugin package."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema, api=api)
def get(self, taskPluginName: str) -> TaskPlugin:
"""Gets a builtin task plugin by its unique name."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginBuiltinCollectionName",
request_type="GET",
)
log.info("Request received")
task_plugin: Optional[
TaskPlugin
] = self._task_plugin_service.get_by_name_in_collection(
collection="securingai_builtins",
task_plugin_name=taskPluginName,
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
if task_plugin is None:
log.error(
"TaskPlugin not found",
task_plugin_name=taskPluginName,
collection="securingai_builtins",
)
raise TaskPluginDoesNotExistError
return task_plugin
@api.route("/securingai_custom")
class TaskPluginCustomCollectionResource(Resource):
"""Shows a list of all custom task plugins."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema(many=True), api=api)
def get(self) -> List[TaskPlugin]:
"""Gets a list of all registered custom task plugins."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginCustomCollection",
request_type="GET",
)
log.info("Request received")
return self._task_plugin_service.get_all_in_collection(
collection="securingai_custom",
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
@api.route("/securingai_custom/<string:taskPluginName>")
@api.param(
"taskPluginName",
"A unique string identifying a task plugin package within securingai_custom "
"collection.",
)
class TaskPluginCustomCollectionNameResource(Resource):
"""Shows a single custom task plugin package and lets you delete it."""
@inject
def __init__(self, *args, task_plugin_service: TaskPluginService, **kwargs) -> None:
self._task_plugin_service = task_plugin_service
super().__init__(*args, **kwargs)
@responds(schema=TaskPluginSchema, api=api)
def get(self, taskPluginName: str) -> TaskPlugin:
"""Gets a custom task plugin by its unique name."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginCustomCollectionName",
request_type="GET",
)
log.info("Request received")
task_plugin: Optional[
TaskPlugin
] = self._task_plugin_service.get_by_name_in_collection(
collection="securingai_custom",
task_plugin_name=taskPluginName,
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
if task_plugin is None:
log.error(
"TaskPlugin not found",
task_plugin_name=taskPluginName,
collection="securingai_custom",
)
raise TaskPluginDoesNotExistError
return task_plugin
def delete(self, taskPluginName: str) -> Response:
"""Deletes a custom task plugin by its unique name."""
log: BoundLogger = LOGGER.new(
request_id=str(uuid.uuid4()),
resource="taskPluginCustomCollectionName",
task_plugin_name=taskPluginName,
request_type="DELETE",
)
log.info("Request received")
task_plugins: List[TaskPlugin] = self._task_plugin_service.delete(
collection="securingai_custom",
task_plugin_name=taskPluginName,
bucket=current_app.config["AI_PLUGINS_BUCKET"],
log=log,
)
name: List[str] = [x.task_plugin_name for x in task_plugins]
return jsonify( # type: ignore
dict(status="Success", collection="securingai_custom", taskPluginName=name)
) | en | 0.933608 | # This Software (Dioptra) is being made available as a public service by the # National Institute of Standards and Technology (NIST), an Agency of the United # States Department of Commerce. This software was developed in part by employees of # NIST and in part by NIST contractors. Copyright in portions of this software that # were developed by NIST contractors has been licensed or assigned to NIST. Pursuant # to Title 17 United States Code Section 105, works of NIST employees are not # subject to copyright protection in the United States. However, NIST may hold # international copyright in software created by its employees and domestic # copyright (or licensing rights) in portions of software that were assigned or # licensed to NIST. To the extent that NIST holds copyright in this software, it is # being made available under the Creative Commons Attribution 4.0 International # license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts # of the software developed or licensed by NIST. # # ACCESS THE FULL CC BY 4.0 LICENSE HERE: # https://creativecommons.org/licenses/by/4.0/legalcode The module defining the task plugin endpoints. Shows a list of all task plugins, and lets you POST to upload new ones. Gets a list of all registered task plugins. Registers a new task plugin uploaded via the task plugin upload form. Shows a list of all builtin task plugins. Gets a list of all available builtin task plugins. Shows a single builtin task plugin package. Gets a builtin task plugin by its unique name. Shows a list of all custom task plugins. Gets a list of all registered custom task plugins. Shows a single custom task plugin package and lets you delete it. Gets a custom task plugin by its unique name. Deletes a custom task plugin by its unique name. # type: ignore | 1.398845 | 1 |
dulwich/tests/test_lru_cache.py | mjmaenpaa/dulwich | 0 | 9297 | # Copyright (C) 2006, 2008 Canonical Ltd
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Tests for the lru_cache module."""
from dulwich import (
lru_cache,
)
from dulwich.tests import (
TestCase,
)
class TestLRUCache(TestCase):
"""Test that LRU cache properly keeps track of entries."""
def test_cache_size(self):
cache = lru_cache.LRUCache(max_cache=10)
self.assertEqual(10, cache.cache_size())
cache = lru_cache.LRUCache(max_cache=256)
self.assertEqual(256, cache.cache_size())
cache.resize(512)
self.assertEqual(512, cache.cache_size())
def test_missing(self):
cache = lru_cache.LRUCache(max_cache=10)
self.assertFalse('foo' in cache)
self.assertRaises(KeyError, cache.__getitem__, 'foo')
cache['foo'] = 'bar'
self.assertEqual('bar', cache['foo'])
self.assertTrue('foo' in cache)
self.assertFalse('bar' in cache)
def test_map_None(self):
# Make sure that we can properly map None as a key.
cache = lru_cache.LRUCache(max_cache=10)
self.assertFalse(None in cache)
cache[None] = 1
self.assertEqual(1, cache[None])
cache[None] = 2
self.assertEqual(2, cache[None])
# Test the various code paths of __getitem__, to make sure that we can
# handle when None is the key for the LRU and the MRU
cache[1] = 3
cache[None] = 1
cache[None]
cache[1]
cache[None]
self.assertEqual([None, 1], [n.key for n in cache._walk_lru()])
def test_add__null_key(self):
cache = lru_cache.LRUCache(max_cache=10)
self.assertRaises(ValueError, cache.add, lru_cache._null_key, 1)
def test_overflow(self):
"""Adding extra entries will pop out old ones."""
cache = lru_cache.LRUCache(max_cache=1, after_cleanup_count=1)
cache['foo'] = 'bar'
# With a max cache of 1, adding 'baz' should pop out 'foo'
cache['baz'] = 'biz'
self.assertFalse('foo' in cache)
self.assertTrue('baz' in cache)
self.assertEqual('biz', cache['baz'])
def test_by_usage(self):
"""Accessing entries bumps them up in priority."""
cache = lru_cache.LRUCache(max_cache=2)
cache['baz'] = 'biz'
cache['foo'] = 'bar'
self.assertEqual('biz', cache['baz'])
# This must kick out 'foo' because it was the last accessed
cache['nub'] = 'in'
self.assertFalse('foo' in cache)
def test_cleanup(self):
"""Test that we can use a cleanup function."""
cleanup_called = []
def cleanup_func(key, val):
cleanup_called.append((key, val))
cache = lru_cache.LRUCache(max_cache=2, after_cleanup_count=2)
cache.add('baz', '1', cleanup=cleanup_func)
cache.add('foo', '2', cleanup=cleanup_func)
cache.add('biz', '3', cleanup=cleanup_func)
self.assertEqual([('baz', '1')], cleanup_called)
# 'foo' is now most recent, so final cleanup will call it last
cache['foo']
cache.clear()
self.assertEqual([('baz', '1'), ('biz', '3'), ('foo', '2')],
cleanup_called)
def test_cleanup_on_replace(self):
"""Replacing an object should cleanup the old value."""
cleanup_called = []
def cleanup_func(key, val):
cleanup_called.append((key, val))
cache = lru_cache.LRUCache(max_cache=2)
cache.add(1, 10, cleanup=cleanup_func)
cache.add(2, 20, cleanup=cleanup_func)
cache.add(2, 25, cleanup=cleanup_func)
self.assertEqual([(2, 20)], cleanup_called)
self.assertEqual(25, cache[2])
# Even __setitem__ should make sure cleanup() is called
cache[2] = 26
self.assertEqual([(2, 20), (2, 25)], cleanup_called)
def test_len(self):
cache = lru_cache.LRUCache(max_cache=10, after_cleanup_count=10)
cache[1] = 10
cache[2] = 20
cache[3] = 30
cache[4] = 40
self.assertEqual(4, len(cache))
cache[5] = 50
cache[6] = 60
cache[7] = 70
cache[8] = 80
self.assertEqual(8, len(cache))
cache[1] = 15 # replacement
self.assertEqual(8, len(cache))
cache[9] = 90
cache[10] = 100
cache[11] = 110
# We hit the max
self.assertEqual(10, len(cache))
self.assertEqual([11, 10, 9, 1, 8, 7, 6, 5, 4, 3],
[n.key for n in cache._walk_lru()])
def test_cleanup_shrinks_to_after_clean_count(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=3)
cache.add(1, 10)
cache.add(2, 20)
cache.add(3, 25)
cache.add(4, 30)
cache.add(5, 35)
self.assertEqual(5, len(cache))
# This will bump us over the max, which causes us to shrink down to
# after_cleanup_cache size
cache.add(6, 40)
self.assertEqual(3, len(cache))
def test_after_cleanup_larger_than_max(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=10)
self.assertEqual(5, cache._after_cleanup_count)
def test_after_cleanup_none(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=None)
# By default _after_cleanup_size is 80% of the normal size
self.assertEqual(4, cache._after_cleanup_count)
def test_cleanup_2(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=2)
# Add these in order
cache.add(1, 10)
cache.add(2, 20)
cache.add(3, 25)
cache.add(4, 30)
cache.add(5, 35)
self.assertEqual(5, len(cache))
# Force a compaction
cache.cleanup()
self.assertEqual(2, len(cache))
def test_preserve_last_access_order(self):
cache = lru_cache.LRUCache(max_cache=5)
# Add these in order
cache.add(1, 10)
cache.add(2, 20)
cache.add(3, 25)
cache.add(4, 30)
cache.add(5, 35)
self.assertEqual([5, 4, 3, 2, 1], [n.key for n in cache._walk_lru()])
# Now access some randomly
cache[2]
cache[5]
cache[3]
cache[2]
self.assertEqual([2, 3, 5, 4, 1], [n.key for n in cache._walk_lru()])
def test_get(self):
cache = lru_cache.LRUCache(max_cache=5)
cache.add(1, 10)
cache.add(2, 20)
self.assertEqual(20, cache.get(2))
self.assertEqual(None, cache.get(3))
obj = object()
self.assertTrue(obj is cache.get(3, obj))
self.assertEqual([2, 1], [n.key for n in cache._walk_lru()])
self.assertEqual(10, cache.get(1))
self.assertEqual([1, 2], [n.key for n in cache._walk_lru()])
def test_keys(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=5)
cache[1] = 2
cache[2] = 3
cache[3] = 4
self.assertEqual([1, 2, 3], sorted(cache.keys()))
cache[4] = 5
cache[5] = 6
cache[6] = 7
self.assertEqual([2, 3, 4, 5, 6], sorted(cache.keys()))
def test_resize_smaller(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=4)
cache[1] = 2
cache[2] = 3
cache[3] = 4
cache[4] = 5
cache[5] = 6
self.assertEqual([1, 2, 3, 4, 5], sorted(cache.keys()))
cache[6] = 7
self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
# Now resize to something smaller, which triggers a cleanup
cache.resize(max_cache=3, after_cleanup_count=2)
self.assertEqual([5, 6], sorted(cache.keys()))
# Adding something will use the new size
cache[7] = 8
self.assertEqual([5, 6, 7], sorted(cache.keys()))
cache[8] = 9
self.assertEqual([7, 8], sorted(cache.keys()))
def test_resize_larger(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=4)
cache[1] = 2
cache[2] = 3
cache[3] = 4
cache[4] = 5
cache[5] = 6
self.assertEqual([1, 2, 3, 4, 5], sorted(cache.keys()))
cache[6] = 7
self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
cache.resize(max_cache=8, after_cleanup_count=6)
self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
cache[7] = 8
cache[8] = 9
cache[9] = 10
cache[10] = 11
self.assertEqual([3, 4, 5, 6, 7, 8, 9, 10], sorted(cache.keys()))
cache[11] = 12 # triggers cleanup back to new after_cleanup_count
self.assertEqual([6, 7, 8, 9, 10, 11], sorted(cache.keys()))
class TestLRUSizeCache(TestCase):
def test_basic_init(self):
cache = lru_cache.LRUSizeCache()
self.assertEqual(2048, cache._max_cache)
self.assertEqual(int(cache._max_size*0.8), cache._after_cleanup_size)
self.assertEqual(0, cache._value_size)
def test_add__null_key(self):
cache = lru_cache.LRUSizeCache()
self.assertRaises(ValueError, cache.add, lru_cache._null_key, 1)
def test_add_tracks_size(self):
cache = lru_cache.LRUSizeCache()
self.assertEqual(0, cache._value_size)
cache.add('my key', 'my value text')
self.assertEqual(13, cache._value_size)
def test_remove_tracks_size(self):
cache = lru_cache.LRUSizeCache()
self.assertEqual(0, cache._value_size)
cache.add('my key', 'my value text')
self.assertEqual(13, cache._value_size)
node = cache._cache['my key']
cache._remove_node(node)
self.assertEqual(0, cache._value_size)
def test_no_add_over_size(self):
"""Adding a large value may not be cached at all."""
cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=5)
self.assertEqual(0, cache._value_size)
self.assertEqual({}, cache.items())
cache.add('test', 'key')
self.assertEqual(3, cache._value_size)
self.assertEqual({'test': 'key'}, cache.items())
cache.add('test2', 'key that is too big')
self.assertEqual(3, cache._value_size)
self.assertEqual({'test':'key'}, cache.items())
# If we would add a key, only to cleanup and remove all cached entries,
# then obviously that value should not be stored
cache.add('test3', 'bigkey')
self.assertEqual(3, cache._value_size)
self.assertEqual({'test':'key'}, cache.items())
cache.add('test4', 'bikey')
self.assertEqual(3, cache._value_size)
self.assertEqual({'test':'key'}, cache.items())
def test_no_add_over_size_cleanup(self):
"""If a large value is not cached, we will call cleanup right away."""
cleanup_calls = []
def cleanup(key, value):
cleanup_calls.append((key, value))
cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=5)
self.assertEqual(0, cache._value_size)
self.assertEqual({}, cache.items())
cache.add('test', 'key that is too big', cleanup=cleanup)
# key was not added
self.assertEqual(0, cache._value_size)
self.assertEqual({}, cache.items())
# and cleanup was called
self.assertEqual([('test', 'key that is too big')], cleanup_calls)
def test_adding_clears_cache_based_on_size(self):
"""The cache is cleared in LRU order until small enough"""
cache = lru_cache.LRUSizeCache(max_size=20)
cache.add('key1', 'value') # 5 chars
cache.add('key2', 'value2') # 6 chars
cache.add('key3', 'value23') # 7 chars
self.assertEqual(5+6+7, cache._value_size)
cache['key2'] # reference key2 so it gets a newer reference time
cache.add('key4', 'value234') # 8 chars, over limit
# We have to remove 2 keys to get back under limit
self.assertEqual(6+8, cache._value_size)
self.assertEqual({'key2':'value2', 'key4':'value234'},
cache.items())
def test_adding_clears_to_after_cleanup_size(self):
cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10)
cache.add('key1', 'value') # 5 chars
cache.add('key2', 'value2') # 6 chars
cache.add('key3', 'value23') # 7 chars
self.assertEqual(5+6+7, cache._value_size)
cache['key2'] # reference key2 so it gets a newer reference time
cache.add('key4', 'value234') # 8 chars, over limit
# We have to remove 3 keys to get back under limit
self.assertEqual(8, cache._value_size)
self.assertEqual({'key4':'value234'}, cache.items())
def test_custom_sizes(self):
def size_of_list(lst):
return sum(len(x) for x in lst)
cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10,
compute_size=size_of_list)
cache.add('key1', ['val', 'ue']) # 5 chars
cache.add('key2', ['val', 'ue2']) # 6 chars
cache.add('key3', ['val', 'ue23']) # 7 chars
self.assertEqual(5+6+7, cache._value_size)
cache['key2'] # reference key2 so it gets a newer reference time
cache.add('key4', ['value', '234']) # 8 chars, over limit
# We have to remove 3 keys to get back under limit
self.assertEqual(8, cache._value_size)
self.assertEqual({'key4':['value', '234']}, cache.items())
def test_cleanup(self):
cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10)
# Add these in order
cache.add('key1', 'value') # 5 chars
cache.add('key2', 'value2') # 6 chars
cache.add('key3', 'value23') # 7 chars
self.assertEqual(5+6+7, cache._value_size)
cache.cleanup()
# Only the most recent fits after cleaning up
self.assertEqual(7, cache._value_size)
def test_keys(self):
cache = lru_cache.LRUSizeCache(max_size=10)
cache[1] = 'a'
cache[2] = 'b'
cache[3] = 'cdef'
self.assertEqual([1, 2, 3], sorted(cache.keys()))
def test_resize_smaller(self):
cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=9)
cache[1] = 'abc'
cache[2] = 'def'
cache[3] = 'ghi'
cache[4] = 'jkl'
# Triggers a cleanup
self.assertEqual([2, 3, 4], sorted(cache.keys()))
# Resize should also cleanup again
cache.resize(max_size=6, after_cleanup_size=4)
self.assertEqual([4], sorted(cache.keys()))
# Adding should use the new max size
cache[5] = 'mno'
self.assertEqual([4, 5], sorted(cache.keys()))
cache[6] = 'pqr'
self.assertEqual([6], sorted(cache.keys()))
def test_resize_larger(self):
cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=9)
cache[1] = 'abc'
cache[2] = 'def'
cache[3] = 'ghi'
cache[4] = 'jkl'
# Triggers a cleanup
self.assertEqual([2, 3, 4], sorted(cache.keys()))
cache.resize(max_size=15, after_cleanup_size=12)
self.assertEqual([2, 3, 4], sorted(cache.keys()))
cache[5] = 'mno'
cache[6] = 'pqr'
self.assertEqual([2, 3, 4, 5, 6], sorted(cache.keys()))
cache[7] = 'stu'
self.assertEqual([4, 5, 6, 7], sorted(cache.keys()))
| # Copyright (C) 2006, 2008 Canonical Ltd
#
# Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
# General Public License as public by the Free Software Foundation; version 2.0
# or (at your option) any later version. You can redistribute it and/or
# modify it under the terms of either of these two licenses.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# You should have received a copy of the licenses; if not, see
# <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
# and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
# License, Version 2.0.
#
"""Tests for the lru_cache module."""
from dulwich import (
lru_cache,
)
from dulwich.tests import (
TestCase,
)
class TestLRUCache(TestCase):
"""Test that LRU cache properly keeps track of entries."""
def test_cache_size(self):
cache = lru_cache.LRUCache(max_cache=10)
self.assertEqual(10, cache.cache_size())
cache = lru_cache.LRUCache(max_cache=256)
self.assertEqual(256, cache.cache_size())
cache.resize(512)
self.assertEqual(512, cache.cache_size())
def test_missing(self):
cache = lru_cache.LRUCache(max_cache=10)
self.assertFalse('foo' in cache)
self.assertRaises(KeyError, cache.__getitem__, 'foo')
cache['foo'] = 'bar'
self.assertEqual('bar', cache['foo'])
self.assertTrue('foo' in cache)
self.assertFalse('bar' in cache)
def test_map_None(self):
# Make sure that we can properly map None as a key.
cache = lru_cache.LRUCache(max_cache=10)
self.assertFalse(None in cache)
cache[None] = 1
self.assertEqual(1, cache[None])
cache[None] = 2
self.assertEqual(2, cache[None])
# Test the various code paths of __getitem__, to make sure that we can
# handle when None is the key for the LRU and the MRU
cache[1] = 3
cache[None] = 1
cache[None]
cache[1]
cache[None]
self.assertEqual([None, 1], [n.key for n in cache._walk_lru()])
def test_add__null_key(self):
cache = lru_cache.LRUCache(max_cache=10)
self.assertRaises(ValueError, cache.add, lru_cache._null_key, 1)
def test_overflow(self):
"""Adding extra entries will pop out old ones."""
cache = lru_cache.LRUCache(max_cache=1, after_cleanup_count=1)
cache['foo'] = 'bar'
# With a max cache of 1, adding 'baz' should pop out 'foo'
cache['baz'] = 'biz'
self.assertFalse('foo' in cache)
self.assertTrue('baz' in cache)
self.assertEqual('biz', cache['baz'])
def test_by_usage(self):
"""Accessing entries bumps them up in priority."""
cache = lru_cache.LRUCache(max_cache=2)
cache['baz'] = 'biz'
cache['foo'] = 'bar'
self.assertEqual('biz', cache['baz'])
# This must kick out 'foo' because it was the last accessed
cache['nub'] = 'in'
self.assertFalse('foo' in cache)
def test_cleanup(self):
"""Test that we can use a cleanup function."""
cleanup_called = []
def cleanup_func(key, val):
cleanup_called.append((key, val))
cache = lru_cache.LRUCache(max_cache=2, after_cleanup_count=2)
cache.add('baz', '1', cleanup=cleanup_func)
cache.add('foo', '2', cleanup=cleanup_func)
cache.add('biz', '3', cleanup=cleanup_func)
self.assertEqual([('baz', '1')], cleanup_called)
# 'foo' is now most recent, so final cleanup will call it last
cache['foo']
cache.clear()
self.assertEqual([('baz', '1'), ('biz', '3'), ('foo', '2')],
cleanup_called)
def test_cleanup_on_replace(self):
"""Replacing an object should cleanup the old value."""
cleanup_called = []
def cleanup_func(key, val):
cleanup_called.append((key, val))
cache = lru_cache.LRUCache(max_cache=2)
cache.add(1, 10, cleanup=cleanup_func)
cache.add(2, 20, cleanup=cleanup_func)
cache.add(2, 25, cleanup=cleanup_func)
self.assertEqual([(2, 20)], cleanup_called)
self.assertEqual(25, cache[2])
# Even __setitem__ should make sure cleanup() is called
cache[2] = 26
self.assertEqual([(2, 20), (2, 25)], cleanup_called)
def test_len(self):
cache = lru_cache.LRUCache(max_cache=10, after_cleanup_count=10)
cache[1] = 10
cache[2] = 20
cache[3] = 30
cache[4] = 40
self.assertEqual(4, len(cache))
cache[5] = 50
cache[6] = 60
cache[7] = 70
cache[8] = 80
self.assertEqual(8, len(cache))
cache[1] = 15 # replacement
self.assertEqual(8, len(cache))
cache[9] = 90
cache[10] = 100
cache[11] = 110
# We hit the max
self.assertEqual(10, len(cache))
self.assertEqual([11, 10, 9, 1, 8, 7, 6, 5, 4, 3],
[n.key for n in cache._walk_lru()])
def test_cleanup_shrinks_to_after_clean_count(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=3)
cache.add(1, 10)
cache.add(2, 20)
cache.add(3, 25)
cache.add(4, 30)
cache.add(5, 35)
self.assertEqual(5, len(cache))
# This will bump us over the max, which causes us to shrink down to
# after_cleanup_cache size
cache.add(6, 40)
self.assertEqual(3, len(cache))
def test_after_cleanup_larger_than_max(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=10)
self.assertEqual(5, cache._after_cleanup_count)
def test_after_cleanup_none(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=None)
# By default _after_cleanup_size is 80% of the normal size
self.assertEqual(4, cache._after_cleanup_count)
def test_cleanup_2(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=2)
# Add these in order
cache.add(1, 10)
cache.add(2, 20)
cache.add(3, 25)
cache.add(4, 30)
cache.add(5, 35)
self.assertEqual(5, len(cache))
# Force a compaction
cache.cleanup()
self.assertEqual(2, len(cache))
def test_preserve_last_access_order(self):
cache = lru_cache.LRUCache(max_cache=5)
# Add these in order
cache.add(1, 10)
cache.add(2, 20)
cache.add(3, 25)
cache.add(4, 30)
cache.add(5, 35)
self.assertEqual([5, 4, 3, 2, 1], [n.key for n in cache._walk_lru()])
# Now access some randomly
cache[2]
cache[5]
cache[3]
cache[2]
self.assertEqual([2, 3, 5, 4, 1], [n.key for n in cache._walk_lru()])
def test_get(self):
cache = lru_cache.LRUCache(max_cache=5)
cache.add(1, 10)
cache.add(2, 20)
self.assertEqual(20, cache.get(2))
self.assertEqual(None, cache.get(3))
obj = object()
self.assertTrue(obj is cache.get(3, obj))
self.assertEqual([2, 1], [n.key for n in cache._walk_lru()])
self.assertEqual(10, cache.get(1))
self.assertEqual([1, 2], [n.key for n in cache._walk_lru()])
def test_keys(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=5)
cache[1] = 2
cache[2] = 3
cache[3] = 4
self.assertEqual([1, 2, 3], sorted(cache.keys()))
cache[4] = 5
cache[5] = 6
cache[6] = 7
self.assertEqual([2, 3, 4, 5, 6], sorted(cache.keys()))
def test_resize_smaller(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=4)
cache[1] = 2
cache[2] = 3
cache[3] = 4
cache[4] = 5
cache[5] = 6
self.assertEqual([1, 2, 3, 4, 5], sorted(cache.keys()))
cache[6] = 7
self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
# Now resize to something smaller, which triggers a cleanup
cache.resize(max_cache=3, after_cleanup_count=2)
self.assertEqual([5, 6], sorted(cache.keys()))
# Adding something will use the new size
cache[7] = 8
self.assertEqual([5, 6, 7], sorted(cache.keys()))
cache[8] = 9
self.assertEqual([7, 8], sorted(cache.keys()))
def test_resize_larger(self):
cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=4)
cache[1] = 2
cache[2] = 3
cache[3] = 4
cache[4] = 5
cache[5] = 6
self.assertEqual([1, 2, 3, 4, 5], sorted(cache.keys()))
cache[6] = 7
self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
cache.resize(max_cache=8, after_cleanup_count=6)
self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
cache[7] = 8
cache[8] = 9
cache[9] = 10
cache[10] = 11
self.assertEqual([3, 4, 5, 6, 7, 8, 9, 10], sorted(cache.keys()))
cache[11] = 12 # triggers cleanup back to new after_cleanup_count
self.assertEqual([6, 7, 8, 9, 10, 11], sorted(cache.keys()))
class TestLRUSizeCache(TestCase):
def test_basic_init(self):
cache = lru_cache.LRUSizeCache()
self.assertEqual(2048, cache._max_cache)
self.assertEqual(int(cache._max_size*0.8), cache._after_cleanup_size)
self.assertEqual(0, cache._value_size)
def test_add__null_key(self):
cache = lru_cache.LRUSizeCache()
self.assertRaises(ValueError, cache.add, lru_cache._null_key, 1)
def test_add_tracks_size(self):
cache = lru_cache.LRUSizeCache()
self.assertEqual(0, cache._value_size)
cache.add('my key', 'my value text')
self.assertEqual(13, cache._value_size)
def test_remove_tracks_size(self):
cache = lru_cache.LRUSizeCache()
self.assertEqual(0, cache._value_size)
cache.add('my key', 'my value text')
self.assertEqual(13, cache._value_size)
node = cache._cache['my key']
cache._remove_node(node)
self.assertEqual(0, cache._value_size)
def test_no_add_over_size(self):
"""Adding a large value may not be cached at all."""
cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=5)
self.assertEqual(0, cache._value_size)
self.assertEqual({}, cache.items())
cache.add('test', 'key')
self.assertEqual(3, cache._value_size)
self.assertEqual({'test': 'key'}, cache.items())
cache.add('test2', 'key that is too big')
self.assertEqual(3, cache._value_size)
self.assertEqual({'test':'key'}, cache.items())
# If we would add a key, only to cleanup and remove all cached entries,
# then obviously that value should not be stored
cache.add('test3', 'bigkey')
self.assertEqual(3, cache._value_size)
self.assertEqual({'test':'key'}, cache.items())
cache.add('test4', 'bikey')
self.assertEqual(3, cache._value_size)
self.assertEqual({'test':'key'}, cache.items())
def test_no_add_over_size_cleanup(self):
"""If a large value is not cached, we will call cleanup right away."""
cleanup_calls = []
def cleanup(key, value):
cleanup_calls.append((key, value))
cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=5)
self.assertEqual(0, cache._value_size)
self.assertEqual({}, cache.items())
cache.add('test', 'key that is too big', cleanup=cleanup)
# key was not added
self.assertEqual(0, cache._value_size)
self.assertEqual({}, cache.items())
# and cleanup was called
self.assertEqual([('test', 'key that is too big')], cleanup_calls)
def test_adding_clears_cache_based_on_size(self):
"""The cache is cleared in LRU order until small enough"""
cache = lru_cache.LRUSizeCache(max_size=20)
cache.add('key1', 'value') # 5 chars
cache.add('key2', 'value2') # 6 chars
cache.add('key3', 'value23') # 7 chars
self.assertEqual(5+6+7, cache._value_size)
cache['key2'] # reference key2 so it gets a newer reference time
cache.add('key4', 'value234') # 8 chars, over limit
# We have to remove 2 keys to get back under limit
self.assertEqual(6+8, cache._value_size)
self.assertEqual({'key2':'value2', 'key4':'value234'},
cache.items())
def test_adding_clears_to_after_cleanup_size(self):
cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10)
cache.add('key1', 'value') # 5 chars
cache.add('key2', 'value2') # 6 chars
cache.add('key3', 'value23') # 7 chars
self.assertEqual(5+6+7, cache._value_size)
cache['key2'] # reference key2 so it gets a newer reference time
cache.add('key4', 'value234') # 8 chars, over limit
# We have to remove 3 keys to get back under limit
self.assertEqual(8, cache._value_size)
self.assertEqual({'key4':'value234'}, cache.items())
def test_custom_sizes(self):
def size_of_list(lst):
return sum(len(x) for x in lst)
cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10,
compute_size=size_of_list)
cache.add('key1', ['val', 'ue']) # 5 chars
cache.add('key2', ['val', 'ue2']) # 6 chars
cache.add('key3', ['val', 'ue23']) # 7 chars
self.assertEqual(5+6+7, cache._value_size)
cache['key2'] # reference key2 so it gets a newer reference time
cache.add('key4', ['value', '234']) # 8 chars, over limit
# We have to remove 3 keys to get back under limit
self.assertEqual(8, cache._value_size)
self.assertEqual({'key4':['value', '234']}, cache.items())
def test_cleanup(self):
cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10)
# Add these in order
cache.add('key1', 'value') # 5 chars
cache.add('key2', 'value2') # 6 chars
cache.add('key3', 'value23') # 7 chars
self.assertEqual(5+6+7, cache._value_size)
cache.cleanup()
# Only the most recent fits after cleaning up
self.assertEqual(7, cache._value_size)
def test_keys(self):
cache = lru_cache.LRUSizeCache(max_size=10)
cache[1] = 'a'
cache[2] = 'b'
cache[3] = 'cdef'
self.assertEqual([1, 2, 3], sorted(cache.keys()))
def test_resize_smaller(self):
cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=9)
cache[1] = 'abc'
cache[2] = 'def'
cache[3] = 'ghi'
cache[4] = 'jkl'
# Triggers a cleanup
self.assertEqual([2, 3, 4], sorted(cache.keys()))
# Resize should also cleanup again
cache.resize(max_size=6, after_cleanup_size=4)
self.assertEqual([4], sorted(cache.keys()))
# Adding should use the new max size
cache[5] = 'mno'
self.assertEqual([4, 5], sorted(cache.keys()))
cache[6] = 'pqr'
self.assertEqual([6], sorted(cache.keys()))
def test_resize_larger(self):
cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=9)
cache[1] = 'abc'
cache[2] = 'def'
cache[3] = 'ghi'
cache[4] = 'jkl'
# Triggers a cleanup
self.assertEqual([2, 3, 4], sorted(cache.keys()))
cache.resize(max_size=15, after_cleanup_size=12)
self.assertEqual([2, 3, 4], sorted(cache.keys()))
cache[5] = 'mno'
cache[6] = 'pqr'
self.assertEqual([2, 3, 4, 5, 6], sorted(cache.keys()))
cache[7] = 'stu'
self.assertEqual([4, 5, 6, 7], sorted(cache.keys()))
| en | 0.881848 | # Copyright (C) 2006, 2008 Canonical Ltd # # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU # General Public License as public by the Free Software Foundation; version 2.0 # or (at your option) any later version. You can redistribute it and/or # modify it under the terms of either of these two licenses. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # You should have received a copy of the licenses; if not, see # <http://www.gnu.org/licenses/> for a copy of the GNU General Public License # and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache # License, Version 2.0. # Tests for the lru_cache module. Test that LRU cache properly keeps track of entries. # Make sure that we can properly map None as a key. # Test the various code paths of __getitem__, to make sure that we can # handle when None is the key for the LRU and the MRU Adding extra entries will pop out old ones. # With a max cache of 1, adding 'baz' should pop out 'foo' Accessing entries bumps them up in priority. # This must kick out 'foo' because it was the last accessed Test that we can use a cleanup function. # 'foo' is now most recent, so final cleanup will call it last Replacing an object should cleanup the old value. # Even __setitem__ should make sure cleanup() is called # replacement # We hit the max # This will bump us over the max, which causes us to shrink down to # after_cleanup_cache size # By default _after_cleanup_size is 80% of the normal size # Add these in order # Force a compaction # Add these in order # Now access some randomly # Now resize to something smaller, which triggers a cleanup # Adding something will use the new size # triggers cleanup back to new after_cleanup_count Adding a large value may not be cached at all. # If we would add a key, only to cleanup and remove all cached entries, # then obviously that value should not be stored If a large value is not cached, we will call cleanup right away. # key was not added # and cleanup was called The cache is cleared in LRU order until small enough # 5 chars # 6 chars # 7 chars # reference key2 so it gets a newer reference time # 8 chars, over limit # We have to remove 2 keys to get back under limit # 5 chars # 6 chars # 7 chars # reference key2 so it gets a newer reference time # 8 chars, over limit # We have to remove 3 keys to get back under limit # 5 chars # 6 chars # 7 chars # reference key2 so it gets a newer reference time # 8 chars, over limit # We have to remove 3 keys to get back under limit # Add these in order # 5 chars # 6 chars # 7 chars # Only the most recent fits after cleaning up # Triggers a cleanup # Resize should also cleanup again # Adding should use the new max size # Triggers a cleanup | 2.131507 | 2 |
py/2016/5B.py | pedrotari7/advent_of_code | 0 | 9298 | import md5
(i,count) = (0,0)
password = ['']*8
while 1:
key = 'reyedfim' + str(i)
md = md5.new(key).hexdigest()
if md[:5] == '00000':
index = int(md[5],16)
if index < len(password) and password[index]=='':
password[index] = md[6]
count += 1
if count == 8:
break
i+=1
print ''.join(password) | import md5
(i,count) = (0,0)
password = ['']*8
while 1:
key = 'reyedfim' + str(i)
md = md5.new(key).hexdigest()
if md[:5] == '00000':
index = int(md[5],16)
if index < len(password) and password[index]=='':
password[index] = md[6]
count += 1
if count == 8:
break
i+=1
print ''.join(password) | none | 1 | 3.050654 | 3 |
|
release/scripts/mgear/shifter_epic_components/EPIC_foot_01/__init__.py | lsica-scopely/mgear4 | 0 | 9299 | <filename>release/scripts/mgear/shifter_epic_components/EPIC_foot_01/__init__.py<gh_stars>0
import pymel.core as pm
import ast
from pymel.core import datatypes
from mgear.shifter import component
from mgear.core import node, applyop, vector
from mgear.core import attribute, transform, primitive
class Component(component.Main):
"""Shifter component Class"""
# =====================================================
# OBJECTS
# =====================================================
def addObjects(self):
"""Add all the objects needed to create the component."""
# joint Description Names
jd_names = ast.literal_eval(
self.settings["jointNamesDescription_custom"]
)
jdn_ball = jd_names[0]
self.up_axis = pm.upAxis(q=True, axis=True)
self.div_count = len(self.guide.apos) - 5
plane = [self.guide.apos[0], self.guide.apos[-4], self.guide.apos[-3]]
self.normal = self.getNormalFromPos(plane)
self.binormal = self.getBiNormalFromPos(plane)
# Heel ---------------------------------------------
# bank pivot
t = transform.getTransformLookingAt(
self.guide.pos["heel"],
self.guide.apos[-4],
self.normal,
"xz",
self.negate,
)
t = transform.setMatrixPosition(t, self.guide.pos["inpivot"])
self.in_npo = primitive.addTransform(
self.root, self.getName("in_npo"), t
)
self.in_piv = primitive.addTransform(
self.in_npo, self.getName("in_piv"), t
)
t = transform.setMatrixPosition(t, self.guide.pos["outpivot"])
self.out_piv = primitive.addTransform(
self.in_piv, self.getName("out_piv"), t
)
# heel
t = transform.getTransformLookingAt(
self.guide.pos["heel"],
self.guide.apos[-4],
self.normal,
"xz",
self.negate,
)
self.heel_loc = primitive.addTransform(
self.out_piv, self.getName("heel_loc"), t
)
attribute.setRotOrder(self.heel_loc, "YZX")
self.heel_ctl = self.addCtl(
self.heel_loc,
"heel_ctl",
t,
self.color_ik,
"sphere",
w=self.size * 0.1,
tp=self.parentCtlTag,
)
attribute.setKeyableAttributes(self.heel_ctl, self.r_params)
# Tip ----------------------------------------------
if self.up_axis == "y":
v = datatypes.Vector(
self.guide.apos[-5].x,
self.guide.pos["heel"].y,
self.guide.apos[-5].z,
)
else:
v = datatypes.Vector(
self.guide.apos[-5].x,
self.guide.apos[-5].y,
self.guide.pos["heel"].z,
)
t = transform.setMatrixPosition(t, v)
self.tip_ctl = self.addCtl(
self.heel_ctl,
"tip_ctl",
t,
self.color_ik,
"circle",
w=self.size,
tp=self.heel_ctl,
)
attribute.setKeyableAttributes(self.tip_ctl, self.r_params)
# Roll ---------------------------------------------
if self.settings["useRollCtl"]:
t = transform.getTransformLookingAt(
self.guide.pos["heel"],
self.guide.apos[-4],
self.normal,
"xz",
self.negate,
)
t = transform.setMatrixPosition(t, self.guide.pos["root"])
self.roll_np = primitive.addTransform(
self.root, self.getName("roll_npo"), t
)
self.roll_ctl = self.addCtl(
self.roll_np,
"roll_ctl",
t,
self.color_ik,
"cylinder",
w=self.size * 0.5,
h=self.size * 0.5,
ro=datatypes.Vector(3.1415 * 0.5, 0, 0),
tp=self.tip_ctl,
)
attribute.setKeyableAttributes(self.roll_ctl, ["rx", "rz"])
# Backward Controlers ------------------------------
bk_pos = self.guide.apos[1:-3]
bk_pos.reverse()
parent = self.tip_ctl
self.bk_ctl = []
self.bk_loc = []
self.previousTag = self.tip_ctl
for i, pos in enumerate(bk_pos):
if i == 0:
t = transform.getTransform(self.heel_ctl)
t = transform.setMatrixPosition(t, pos)
else:
direction = bk_pos[i - 1]
t = transform.getTransformLookingAt(
pos, direction, self.normal, "xz", self.negate
)
bk_loc = primitive.addTransform(
parent, self.getName("bk%s_loc" % i), t
)
bk_ctl = self.addCtl(
bk_loc,
"bk%s_ctl" % i,
t,
self.color_ik,
"sphere",
w=self.size * 0.15,
tp=self.previousTag,
)
attribute.setKeyableAttributes(bk_ctl, self.r_params)
self.previousTag = bk_ctl
self.bk_loc.append(bk_loc)
self.bk_ctl.append(bk_ctl)
parent = bk_ctl
# FK Reference ------------------------------------
self.fk_ref = primitive.addTransformFromPos(
self.bk_ctl[-1], self.getName("fk_ref"), self.guide.apos[0]
)
self.fk_npo = primitive.addTransform(
self.fk_ref,
self.getName("fk0_npo"),
transform.getTransform(self.bk_ctl[-1]),
)
# Forward Controlers ------------------------------
self.fk_ctl = []
self.fk_loc = []
parent = self.fk_npo
self.previousTag = self.tip_ctl
for i, bk_ctl in enumerate(reversed(self.bk_ctl[1:])):
if i == len(self.bk_ctl) - 2:
t = transform.getTransform(self.tip_ctl)
v = transform.getTranslation(bk_ctl)
t = transform.setMatrixPosition(t, v)
else:
t = transform.getTransform(bk_ctl)
dist = vector.getDistance(
self.guide.apos[i + 1], self.guide.apos[i + 2]
)
fk_loc = primitive.addTransform(
parent, self.getName("fk%s_loc" % i), t
)
po_vec = datatypes.Vector(dist * 0.5 * self.n_factor, 0, 0)
fk_ctl = self.addCtl(
fk_loc,
"fk%s_ctl" % i,
t,
self.color_fk,
"cube",
w=dist,
h=self.size * 0.5,
d=self.size * 0.5,
po=po_vec,
tp=self.previousTag,
)
self.previousTag = fk_ctl
attribute.setKeyableAttributes(fk_ctl)
if i:
name = jdn_ball + str(i)
else:
name = jdn_ball
self.jnt_pos.append([fk_ctl, name])
parent = fk_ctl
self.fk_ctl.append(fk_ctl)
self.fk_loc.append(fk_loc)
# =====================================================
# ATTRIBUTES
# =====================================================
def addAttributes(self):
"""Create the anim and setupr rig attributes for the component"""
# Anim -------------------------------------------
# Roll Angles
if not self.settings["useRollCtl"]:
self.roll_att = self.addAnimParam(
"roll", "Roll", "double", 0, -180, 180
)
self.bank_att = self.addAnimParam(
"bank", "Bank", "double", 0, -180, 180
)
self.angles_att = [
self.addAnimParam("angle_%s" % i, "Angle %s" % i, "double", -20)
for i in range(self.div_count)
]
# Setup ------------------------------------------
self.blend_att = self.addSetupParam(
"blend", "Fk/Ik Blend", "double", 1, 0, 1
)
# =====================================================
# OPERATORS
# =====================================================
def addOperators(self):
"""Create operators and set the relations for the component rig
Apply operators, constraints, expressions to the hierarchy.
In order to keep the code clean and easier to debug,
we shouldn't create any new object in this method.
"""
# Visibilities -------------------------------------
try:
# ik
if self.settings["useRollCtl"]:
for shp in self.roll_ctl.getShapes():
pm.connectAttr(self.blend_att, shp.attr("visibility"))
for bk_ctl in self.bk_ctl:
for shp in bk_ctl.getShapes():
pm.connectAttr(self.blend_att, shp.attr("visibility"))
for shp in self.heel_ctl.getShapes():
pm.connectAttr(self.blend_att, shp.attr("visibility"))
for shp in self.tip_ctl.getShapes():
pm.connectAttr(self.blend_att, shp.attr("visibility"))
except RuntimeError:
pm.displayInfo("Visibility already connect")
# Roll / Bank --------------------------------------
if self.settings["useRollCtl"]: # Using the controler
self.roll_att = self.roll_ctl.attr("rz")
self.bank_att = self.roll_ctl.attr("rx")
clamp_node = node.createClampNode(
[self.roll_att, self.bank_att, self.bank_att],
[0, -180, 0],
[180, 0, 180],
)
inAdd_nod = node.createAddNode(
clamp_node.outputB,
pm.getAttr(self.in_piv.attr("rx")) * self.n_factor,
)
pm.connectAttr(clamp_node.outputR, self.heel_loc.attr("rz"))
pm.connectAttr(clamp_node.outputG, self.out_piv.attr("rx"))
pm.connectAttr(inAdd_nod.output, self.in_piv.attr("rx"))
# Reverse Controler offset -------------------------
angle_outputs = node.createAddNodeMulti(self.angles_att)
for i, bk_loc in enumerate(reversed(self.bk_loc)):
if i == 0: # First
inpu = self.roll_att
min_input = self.angles_att[i]
elif i == len(self.angles_att): # Last
sub_nod = node.createSubNode(
self.roll_att, angle_outputs[i - 1]
)
inpu = sub_nod.output
min_input = -360
else: # Others
sub_nod = node.createSubNode(
self.roll_att, angle_outputs[i - 1]
)
inpu = sub_nod.output
min_input = self.angles_att[i]
clamp_node = node.createClampNode(inpu, min_input, 0)
add_node = node.createAddNode(
clamp_node.outputR, bk_loc.getAttr("rz")
)
pm.connectAttr(add_node.output, bk_loc.attr("rz"))
# Reverse compensation -----------------------------
for i, fk_loc in enumerate(self.fk_loc):
bk_ctl = self.bk_ctl[-i - 1]
bk_loc = self.bk_loc[-i - 1]
fk_ctl = self.fk_ctl[i]
# Inverse Rotorder
o_node = applyop.gear_inverseRotorder_op(bk_ctl, fk_ctl)
pm.connectAttr(o_node.output, bk_loc.attr("ro"))
pm.connectAttr(fk_ctl.attr("ro"), fk_loc.attr("ro"))
attribute.lockAttribute(bk_ctl, "ro")
# Compensate the backward rotation
# ik
addx_node = node.createAddNode(
bk_ctl.attr("rx"), bk_loc.attr("rx")
)
addy_node = node.createAddNode(
bk_ctl.attr("ry"), bk_loc.attr("ry")
)
addz_node = node.createAddNode(
bk_ctl.attr("rz"), bk_loc.attr("rz")
)
addz_node = node.createAddNode(
addz_node.output, -bk_loc.getAttr("rz") - fk_loc.getAttr("rz")
)
neg_node = node.createMulNode(
[addx_node.output, addy_node.output, addz_node.output],
[-1, -1, -1],
)
add_node = node.createAddNode(
neg_node.outputY.get() * -1, neg_node.outputY
)
ik_outputs = [neg_node.outputX, add_node.output, neg_node.outputZ]
# fk
fk_outputs = [0, 0, fk_loc.getAttr("rz")]
# blend
blend_node = node.createBlendNode(
ik_outputs, fk_outputs, self.blend_att
)
pm.connectAttr(blend_node.output, fk_loc.attr("rotate"))
return
# =====================================================
# CONNECTOR
# =====================================================
def setRelation(self):
"""Set the relation beetween object from guide to rig"""
self.relatives["root"] = self.fk_ctl[0]
self.relatives["heel"] = self.fk_ctl[0]
self.relatives["inpivot"] = self.fk_ctl[0]
self.relatives["outpivot"] = self.fk_ctl[0]
self.controlRelatives["root"] = self.fk_ctl[0]
self.controlRelatives["heel"] = self.fk_ctl[0]
self.controlRelatives["inpivot"] = self.fk_ctl[0]
self.controlRelatives["outpivot"] = self.fk_ctl[0]
self.jointRelatives["root"] = 0
self.jointRelatives["heel"] = 0
self.jointRelatives["inpivot"] = 0
self.jointRelatives["outpivot"] = 0
for i in range(self.div_count):
self.relatives["%s_loc" % i] = self.fk_ctl[i]
self.jointRelatives["%s_loc" % i] = i
if self.div_count > 0:
self.relatives["%s_loc" % self.div_count] = self.fk_ctl[-1]
self.jointRelatives["%s_loc" % self.div_count] = self.div_count - 1
def addConnection(self):
"""Add more connection definition to the set"""
self.connections["EPIC_leg_01"] = self.connect_leg_2jnt_01
self.connections["leg_2jnt_01"] = self.connect_leg_2jnt_01
self.connections["leg_ms_2jnt_01"] = self.connect_leg_ms_2jnt_01
self.connections["leg_3jnt_01"] = self.connect_leg_3jnt_01
def connect_leg_2jnt_01(self):
"""Connector for leg 2jnt"""
# If the parent component hasn't been generated we skip the connection
if self.parent_comp is None:
return
pm.connectAttr(self.parent_comp.blend_att, self.blend_att)
pm.parent(self.root, self.parent_comp.ik_ctl)
pm.parent(self.parent_comp.ik_ref, self.bk_ctl[-1])
pm.parentConstraint(
self.parent_comp.tws2_rot, self.fk_ref, maintainOffset=True
)
return
def connect_leg_ms_2jnt_01(self):
"""Connector for leg ms 2jnt"""
# If the parent component hasn't been generated we skip the connection
if self.parent_comp is None:
return
pm.connectAttr(self.parent_comp.blend_att, self.blend_att)
pm.parent(self.root, self.parent_comp.ik_ctl)
pm.parent(self.parent_comp.ik_ref, self.bk_ctl[-1])
pm.parentConstraint(
self.parent_comp.tws3_rot, self.fk_ref, maintainOffset=True
)
cns = pm.scaleConstraint(
self.parent_comp.fk_ref,
self.parent_comp.ik_ref,
self.fk_ref,
wal=True,
)
bc_node = pm.createNode("blendColors")
pm.connectAttr(
bc_node.outputB, cns + ".%sW0" % self.parent_comp.fk_ref
)
pm.connectAttr(
bc_node.outputR, cns + ".%sW1" % self.parent_comp.ik_ref
)
pm.connectAttr(self.parent_comp.blend_att, bc_node.blender)
return
def connect_leg_3jnt_01(self):
"""Connector for leg 3jnt"""
# If the parent component hasn't been generated we skip the connection
if self.parent_comp is None:
return
pm.connectAttr(self.parent_comp.blend_att, self.blend_att)
pm.parent(self.root, self.parent_comp.ik_ctl)
pm.parent(self.parent_comp.ik_ref, self.bk_ctl[-1])
pm.parent(self.parent_comp.ik2b_ikCtl_ref, self.bk_ctl[-1])
pm.parentConstraint(
self.parent_comp.tws3_rot, self.fk_ref, maintainOffset=True
)
return
| <filename>release/scripts/mgear/shifter_epic_components/EPIC_foot_01/__init__.py<gh_stars>0
import pymel.core as pm
import ast
from pymel.core import datatypes
from mgear.shifter import component
from mgear.core import node, applyop, vector
from mgear.core import attribute, transform, primitive
class Component(component.Main):
"""Shifter component Class"""
# =====================================================
# OBJECTS
# =====================================================
def addObjects(self):
"""Add all the objects needed to create the component."""
# joint Description Names
jd_names = ast.literal_eval(
self.settings["jointNamesDescription_custom"]
)
jdn_ball = jd_names[0]
self.up_axis = pm.upAxis(q=True, axis=True)
self.div_count = len(self.guide.apos) - 5
plane = [self.guide.apos[0], self.guide.apos[-4], self.guide.apos[-3]]
self.normal = self.getNormalFromPos(plane)
self.binormal = self.getBiNormalFromPos(plane)
# Heel ---------------------------------------------
# bank pivot
t = transform.getTransformLookingAt(
self.guide.pos["heel"],
self.guide.apos[-4],
self.normal,
"xz",
self.negate,
)
t = transform.setMatrixPosition(t, self.guide.pos["inpivot"])
self.in_npo = primitive.addTransform(
self.root, self.getName("in_npo"), t
)
self.in_piv = primitive.addTransform(
self.in_npo, self.getName("in_piv"), t
)
t = transform.setMatrixPosition(t, self.guide.pos["outpivot"])
self.out_piv = primitive.addTransform(
self.in_piv, self.getName("out_piv"), t
)
# heel
t = transform.getTransformLookingAt(
self.guide.pos["heel"],
self.guide.apos[-4],
self.normal,
"xz",
self.negate,
)
self.heel_loc = primitive.addTransform(
self.out_piv, self.getName("heel_loc"), t
)
attribute.setRotOrder(self.heel_loc, "YZX")
self.heel_ctl = self.addCtl(
self.heel_loc,
"heel_ctl",
t,
self.color_ik,
"sphere",
w=self.size * 0.1,
tp=self.parentCtlTag,
)
attribute.setKeyableAttributes(self.heel_ctl, self.r_params)
# Tip ----------------------------------------------
if self.up_axis == "y":
v = datatypes.Vector(
self.guide.apos[-5].x,
self.guide.pos["heel"].y,
self.guide.apos[-5].z,
)
else:
v = datatypes.Vector(
self.guide.apos[-5].x,
self.guide.apos[-5].y,
self.guide.pos["heel"].z,
)
t = transform.setMatrixPosition(t, v)
self.tip_ctl = self.addCtl(
self.heel_ctl,
"tip_ctl",
t,
self.color_ik,
"circle",
w=self.size,
tp=self.heel_ctl,
)
attribute.setKeyableAttributes(self.tip_ctl, self.r_params)
# Roll ---------------------------------------------
if self.settings["useRollCtl"]:
t = transform.getTransformLookingAt(
self.guide.pos["heel"],
self.guide.apos[-4],
self.normal,
"xz",
self.negate,
)
t = transform.setMatrixPosition(t, self.guide.pos["root"])
self.roll_np = primitive.addTransform(
self.root, self.getName("roll_npo"), t
)
self.roll_ctl = self.addCtl(
self.roll_np,
"roll_ctl",
t,
self.color_ik,
"cylinder",
w=self.size * 0.5,
h=self.size * 0.5,
ro=datatypes.Vector(3.1415 * 0.5, 0, 0),
tp=self.tip_ctl,
)
attribute.setKeyableAttributes(self.roll_ctl, ["rx", "rz"])
# Backward Controlers ------------------------------
bk_pos = self.guide.apos[1:-3]
bk_pos.reverse()
parent = self.tip_ctl
self.bk_ctl = []
self.bk_loc = []
self.previousTag = self.tip_ctl
for i, pos in enumerate(bk_pos):
if i == 0:
t = transform.getTransform(self.heel_ctl)
t = transform.setMatrixPosition(t, pos)
else:
direction = bk_pos[i - 1]
t = transform.getTransformLookingAt(
pos, direction, self.normal, "xz", self.negate
)
bk_loc = primitive.addTransform(
parent, self.getName("bk%s_loc" % i), t
)
bk_ctl = self.addCtl(
bk_loc,
"bk%s_ctl" % i,
t,
self.color_ik,
"sphere",
w=self.size * 0.15,
tp=self.previousTag,
)
attribute.setKeyableAttributes(bk_ctl, self.r_params)
self.previousTag = bk_ctl
self.bk_loc.append(bk_loc)
self.bk_ctl.append(bk_ctl)
parent = bk_ctl
# FK Reference ------------------------------------
self.fk_ref = primitive.addTransformFromPos(
self.bk_ctl[-1], self.getName("fk_ref"), self.guide.apos[0]
)
self.fk_npo = primitive.addTransform(
self.fk_ref,
self.getName("fk0_npo"),
transform.getTransform(self.bk_ctl[-1]),
)
# Forward Controlers ------------------------------
self.fk_ctl = []
self.fk_loc = []
parent = self.fk_npo
self.previousTag = self.tip_ctl
for i, bk_ctl in enumerate(reversed(self.bk_ctl[1:])):
if i == len(self.bk_ctl) - 2:
t = transform.getTransform(self.tip_ctl)
v = transform.getTranslation(bk_ctl)
t = transform.setMatrixPosition(t, v)
else:
t = transform.getTransform(bk_ctl)
dist = vector.getDistance(
self.guide.apos[i + 1], self.guide.apos[i + 2]
)
fk_loc = primitive.addTransform(
parent, self.getName("fk%s_loc" % i), t
)
po_vec = datatypes.Vector(dist * 0.5 * self.n_factor, 0, 0)
fk_ctl = self.addCtl(
fk_loc,
"fk%s_ctl" % i,
t,
self.color_fk,
"cube",
w=dist,
h=self.size * 0.5,
d=self.size * 0.5,
po=po_vec,
tp=self.previousTag,
)
self.previousTag = fk_ctl
attribute.setKeyableAttributes(fk_ctl)
if i:
name = jdn_ball + str(i)
else:
name = jdn_ball
self.jnt_pos.append([fk_ctl, name])
parent = fk_ctl
self.fk_ctl.append(fk_ctl)
self.fk_loc.append(fk_loc)
# =====================================================
# ATTRIBUTES
# =====================================================
def addAttributes(self):
"""Create the anim and setupr rig attributes for the component"""
# Anim -------------------------------------------
# Roll Angles
if not self.settings["useRollCtl"]:
self.roll_att = self.addAnimParam(
"roll", "Roll", "double", 0, -180, 180
)
self.bank_att = self.addAnimParam(
"bank", "Bank", "double", 0, -180, 180
)
self.angles_att = [
self.addAnimParam("angle_%s" % i, "Angle %s" % i, "double", -20)
for i in range(self.div_count)
]
# Setup ------------------------------------------
self.blend_att = self.addSetupParam(
"blend", "Fk/Ik Blend", "double", 1, 0, 1
)
# =====================================================
# OPERATORS
# =====================================================
def addOperators(self):
"""Create operators and set the relations for the component rig
Apply operators, constraints, expressions to the hierarchy.
In order to keep the code clean and easier to debug,
we shouldn't create any new object in this method.
"""
# Visibilities -------------------------------------
try:
# ik
if self.settings["useRollCtl"]:
for shp in self.roll_ctl.getShapes():
pm.connectAttr(self.blend_att, shp.attr("visibility"))
for bk_ctl in self.bk_ctl:
for shp in bk_ctl.getShapes():
pm.connectAttr(self.blend_att, shp.attr("visibility"))
for shp in self.heel_ctl.getShapes():
pm.connectAttr(self.blend_att, shp.attr("visibility"))
for shp in self.tip_ctl.getShapes():
pm.connectAttr(self.blend_att, shp.attr("visibility"))
except RuntimeError:
pm.displayInfo("Visibility already connect")
# Roll / Bank --------------------------------------
if self.settings["useRollCtl"]: # Using the controler
self.roll_att = self.roll_ctl.attr("rz")
self.bank_att = self.roll_ctl.attr("rx")
clamp_node = node.createClampNode(
[self.roll_att, self.bank_att, self.bank_att],
[0, -180, 0],
[180, 0, 180],
)
inAdd_nod = node.createAddNode(
clamp_node.outputB,
pm.getAttr(self.in_piv.attr("rx")) * self.n_factor,
)
pm.connectAttr(clamp_node.outputR, self.heel_loc.attr("rz"))
pm.connectAttr(clamp_node.outputG, self.out_piv.attr("rx"))
pm.connectAttr(inAdd_nod.output, self.in_piv.attr("rx"))
# Reverse Controler offset -------------------------
angle_outputs = node.createAddNodeMulti(self.angles_att)
for i, bk_loc in enumerate(reversed(self.bk_loc)):
if i == 0: # First
inpu = self.roll_att
min_input = self.angles_att[i]
elif i == len(self.angles_att): # Last
sub_nod = node.createSubNode(
self.roll_att, angle_outputs[i - 1]
)
inpu = sub_nod.output
min_input = -360
else: # Others
sub_nod = node.createSubNode(
self.roll_att, angle_outputs[i - 1]
)
inpu = sub_nod.output
min_input = self.angles_att[i]
clamp_node = node.createClampNode(inpu, min_input, 0)
add_node = node.createAddNode(
clamp_node.outputR, bk_loc.getAttr("rz")
)
pm.connectAttr(add_node.output, bk_loc.attr("rz"))
# Reverse compensation -----------------------------
for i, fk_loc in enumerate(self.fk_loc):
bk_ctl = self.bk_ctl[-i - 1]
bk_loc = self.bk_loc[-i - 1]
fk_ctl = self.fk_ctl[i]
# Inverse Rotorder
o_node = applyop.gear_inverseRotorder_op(bk_ctl, fk_ctl)
pm.connectAttr(o_node.output, bk_loc.attr("ro"))
pm.connectAttr(fk_ctl.attr("ro"), fk_loc.attr("ro"))
attribute.lockAttribute(bk_ctl, "ro")
# Compensate the backward rotation
# ik
addx_node = node.createAddNode(
bk_ctl.attr("rx"), bk_loc.attr("rx")
)
addy_node = node.createAddNode(
bk_ctl.attr("ry"), bk_loc.attr("ry")
)
addz_node = node.createAddNode(
bk_ctl.attr("rz"), bk_loc.attr("rz")
)
addz_node = node.createAddNode(
addz_node.output, -bk_loc.getAttr("rz") - fk_loc.getAttr("rz")
)
neg_node = node.createMulNode(
[addx_node.output, addy_node.output, addz_node.output],
[-1, -1, -1],
)
add_node = node.createAddNode(
neg_node.outputY.get() * -1, neg_node.outputY
)
ik_outputs = [neg_node.outputX, add_node.output, neg_node.outputZ]
# fk
fk_outputs = [0, 0, fk_loc.getAttr("rz")]
# blend
blend_node = node.createBlendNode(
ik_outputs, fk_outputs, self.blend_att
)
pm.connectAttr(blend_node.output, fk_loc.attr("rotate"))
return
# =====================================================
# CONNECTOR
# =====================================================
def setRelation(self):
"""Set the relation beetween object from guide to rig"""
self.relatives["root"] = self.fk_ctl[0]
self.relatives["heel"] = self.fk_ctl[0]
self.relatives["inpivot"] = self.fk_ctl[0]
self.relatives["outpivot"] = self.fk_ctl[0]
self.controlRelatives["root"] = self.fk_ctl[0]
self.controlRelatives["heel"] = self.fk_ctl[0]
self.controlRelatives["inpivot"] = self.fk_ctl[0]
self.controlRelatives["outpivot"] = self.fk_ctl[0]
self.jointRelatives["root"] = 0
self.jointRelatives["heel"] = 0
self.jointRelatives["inpivot"] = 0
self.jointRelatives["outpivot"] = 0
for i in range(self.div_count):
self.relatives["%s_loc" % i] = self.fk_ctl[i]
self.jointRelatives["%s_loc" % i] = i
if self.div_count > 0:
self.relatives["%s_loc" % self.div_count] = self.fk_ctl[-1]
self.jointRelatives["%s_loc" % self.div_count] = self.div_count - 1
def addConnection(self):
"""Add more connection definition to the set"""
self.connections["EPIC_leg_01"] = self.connect_leg_2jnt_01
self.connections["leg_2jnt_01"] = self.connect_leg_2jnt_01
self.connections["leg_ms_2jnt_01"] = self.connect_leg_ms_2jnt_01
self.connections["leg_3jnt_01"] = self.connect_leg_3jnt_01
def connect_leg_2jnt_01(self):
"""Connector for leg 2jnt"""
# If the parent component hasn't been generated we skip the connection
if self.parent_comp is None:
return
pm.connectAttr(self.parent_comp.blend_att, self.blend_att)
pm.parent(self.root, self.parent_comp.ik_ctl)
pm.parent(self.parent_comp.ik_ref, self.bk_ctl[-1])
pm.parentConstraint(
self.parent_comp.tws2_rot, self.fk_ref, maintainOffset=True
)
return
def connect_leg_ms_2jnt_01(self):
"""Connector for leg ms 2jnt"""
# If the parent component hasn't been generated we skip the connection
if self.parent_comp is None:
return
pm.connectAttr(self.parent_comp.blend_att, self.blend_att)
pm.parent(self.root, self.parent_comp.ik_ctl)
pm.parent(self.parent_comp.ik_ref, self.bk_ctl[-1])
pm.parentConstraint(
self.parent_comp.tws3_rot, self.fk_ref, maintainOffset=True
)
cns = pm.scaleConstraint(
self.parent_comp.fk_ref,
self.parent_comp.ik_ref,
self.fk_ref,
wal=True,
)
bc_node = pm.createNode("blendColors")
pm.connectAttr(
bc_node.outputB, cns + ".%sW0" % self.parent_comp.fk_ref
)
pm.connectAttr(
bc_node.outputR, cns + ".%sW1" % self.parent_comp.ik_ref
)
pm.connectAttr(self.parent_comp.blend_att, bc_node.blender)
return
def connect_leg_3jnt_01(self):
"""Connector for leg 3jnt"""
# If the parent component hasn't been generated we skip the connection
if self.parent_comp is None:
return
pm.connectAttr(self.parent_comp.blend_att, self.blend_att)
pm.parent(self.root, self.parent_comp.ik_ctl)
pm.parent(self.parent_comp.ik_ref, self.bk_ctl[-1])
pm.parent(self.parent_comp.ik2b_ikCtl_ref, self.bk_ctl[-1])
pm.parentConstraint(
self.parent_comp.tws3_rot, self.fk_ref, maintainOffset=True
)
return
| en | 0.506139 | Shifter component Class # ===================================================== # OBJECTS # ===================================================== Add all the objects needed to create the component. # joint Description Names # Heel --------------------------------------------- # bank pivot # heel # Tip ---------------------------------------------- # Roll --------------------------------------------- # Backward Controlers ------------------------------ # FK Reference ------------------------------------ # Forward Controlers ------------------------------ # ===================================================== # ATTRIBUTES # ===================================================== Create the anim and setupr rig attributes for the component # Anim ------------------------------------------- # Roll Angles # Setup ------------------------------------------ # ===================================================== # OPERATORS # ===================================================== Create operators and set the relations for the component rig Apply operators, constraints, expressions to the hierarchy. In order to keep the code clean and easier to debug, we shouldn't create any new object in this method. # Visibilities ------------------------------------- # ik # Roll / Bank -------------------------------------- # Using the controler # Reverse Controler offset ------------------------- # First # Last # Others # Reverse compensation ----------------------------- # Inverse Rotorder # Compensate the backward rotation # ik # fk # blend # ===================================================== # CONNECTOR # ===================================================== Set the relation beetween object from guide to rig Add more connection definition to the set Connector for leg 2jnt # If the parent component hasn't been generated we skip the connection Connector for leg ms 2jnt # If the parent component hasn't been generated we skip the connection Connector for leg 3jnt # If the parent component hasn't been generated we skip the connection | 2.183324 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.