code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
import os
import re
import cv2
import numpy as np
path = 'C:/Courses/Computer Vision/project/Datasets/Triesch'
extension = 'pgm'
imgDict={}
#for eg -> bfritza1.pgm
#name - 'bfritz'
#letter - 'a'
#sample - '1'
#access elemets using imgDict[name][letter][sample]
for file in os.listdir(path):
if re.match('.*\.'+extension,file):
name = file[0:6]
letter = file[6]
sample = file[7]
if imgDict.has_key(name):
letterDict = imgDict[name]
if letterDict.has_key(letter):
sampleDict = letterDict[letter]
sampleDict[sample] = cv2.imread(path+'/'+file)
else:
letterDict[letter] = {sample:cv2.imread(path+'/'+file)}
else:
imgDict[name] = {letter:{sample:cv2.imread(path+'/'+file)}}
np.save('triesch.npy',imgDict) | [
"os.listdir",
"re.match",
"numpy.save",
"cv2.imread"
] | [((275, 291), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (285, 291), False, 'import os\n'), ((715, 746), 'numpy.save', 'np.save', (['"""triesch.npy"""', 'imgDict'], {}), "('triesch.npy', imgDict)\n", (722, 746), True, 'import numpy as np\n'), ((298, 333), 're.match', 're.match', (["('.*\\\\.' + extension)", 'file'], {}), "('.*\\\\.' + extension, file)\n", (306, 333), False, 'import re\n'), ((541, 570), 'cv2.imread', 'cv2.imread', (["(path + '/' + file)"], {}), "(path + '/' + file)\n", (551, 570), False, 'import cv2\n'), ((609, 638), 'cv2.imread', 'cv2.imread', (["(path + '/' + file)"], {}), "(path + '/' + file)\n", (619, 638), False, 'import cv2\n'), ((679, 708), 'cv2.imread', 'cv2.imread', (["(path + '/' + file)"], {}), "(path + '/' + file)\n", (689, 708), False, 'import cv2\n')] |
#!/usr/bin/env python
import glob
import os
import sys
import anndata as ad
import pandas as pd
outdir = sys.argv[1]
adata = ad.AnnData()
files = glob.glob(outdir + "/*.transcript.bam")
kinds = {"expected_count": "count", "TPM": "tpm", "FPKM": "fpkm"}
for kind in kinds.keys():
i = 0
expr = pd.DataFrame()
for file in files:
file = file[: -len(".transcript.bam")] + ".genes.results"
df = pd.read_csv(file, sep="\t")
name = os.path.basename(file)[: -len(".genes.results")]
df = df[["gene_id", "expected_count", "TPM", "FPKM"]]
df = df.set_index(["gene_id"])
if i == 0:
expr = pd.DataFrame(index=df.index)
expr[name] = df[kind]
i += 1
expr = expr.transpose()
expr = expr.sort_index(ascending=True)
if kind == "expected_count":
adata = ad.AnnData(expr)
else:
adata.layers[kind] = expr.values
adata.write(outdir + "/_expression.h5ad.gz", compression="gzip")
| [
"pandas.read_csv",
"os.path.basename",
"pandas.DataFrame",
"anndata.AnnData",
"glob.glob"
] | [((127, 139), 'anndata.AnnData', 'ad.AnnData', ([], {}), '()\n', (137, 139), True, 'import anndata as ad\n'), ((148, 187), 'glob.glob', 'glob.glob', (["(outdir + '/*.transcript.bam')"], {}), "(outdir + '/*.transcript.bam')\n", (157, 187), False, 'import glob\n'), ((301, 315), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (313, 315), True, 'import pandas as pd\n'), ((418, 445), 'pandas.read_csv', 'pd.read_csv', (['file'], {'sep': '"""\t"""'}), "(file, sep='\\t')\n", (429, 445), True, 'import pandas as pd\n'), ((843, 859), 'anndata.AnnData', 'ad.AnnData', (['expr'], {}), '(expr)\n', (853, 859), True, 'import anndata as ad\n'), ((461, 483), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (477, 483), False, 'import os\n'), ((649, 677), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'df.index'}), '(index=df.index)\n', (661, 677), True, 'import pandas as pd\n')] |
from unittest import TestCase
from elastic_dql.field import ElasticDjangoQlField
from ..utils import generate_random_string
class FieldTestCase(TestCase):
def setUp(self) -> None:
pass
def test_get_lookup_name(self):
field_name = generate_random_string()
field = ElasticDjangoQlField(field_name)
self.assertEqual(field.get_lookup_name(), field_name)
def test_format_value(self):
field_name = generate_random_string()
field_type = int
field = ElasticDjangoQlField(field_name, field_type=field_type)
value = field.format_value(2.6)
self.assertIsInstance(value, field_type)
def test_format_value_raise_exception(self):
field_name = generate_random_string()
field_type = int
field = ElasticDjangoQlField(field_name, field_type=field_type)
self.assertRaises(ValueError, field.format_value, "asdasdasd")
| [
"elastic_dql.field.ElasticDjangoQlField"
] | [((300, 332), 'elastic_dql.field.ElasticDjangoQlField', 'ElasticDjangoQlField', (['field_name'], {}), '(field_name)\n', (320, 332), False, 'from elastic_dql.field import ElasticDjangoQlField\n'), ((516, 571), 'elastic_dql.field.ElasticDjangoQlField', 'ElasticDjangoQlField', (['field_name'], {'field_type': 'field_type'}), '(field_name, field_type=field_type)\n', (536, 571), False, 'from elastic_dql.field import ElasticDjangoQlField\n'), ((798, 853), 'elastic_dql.field.ElasticDjangoQlField', 'ElasticDjangoQlField', (['field_name'], {'field_type': 'field_type'}), '(field_name, field_type=field_type)\n', (818, 853), False, 'from elastic_dql.field import ElasticDjangoQlField\n')] |
# -*- coding: utf-8 -*-
# © 2017-2019, ETH Zurich, Institut für Theoretische Physik
# Author: <NAME> <<EMAIL>>
"""
Defines a workflow which optimizes the energy windows.
"""
import copy
from aiida import orm
from aiida.engine import WorkChain, ToContext
from aiida_tools import check_workchain_step, get_outputs_dict
from aiida_optimize import OptimizationWorkChain
from aiida_optimize.engines import NelderMead
from .run_window import RunWindow
__all__ = ('WindowSearch', )
class WindowSearch(WorkChain):
"""
This workchain runs a series of possible energy windows and selects the best-matching tight-binding model.
"""
@classmethod
def define(cls, spec):
super().define(spec)
spec.expose_inputs(RunWindow, exclude=['window', 'wannier.kpoints'])
# Workaround for plumpy issue #135 (https://github.com/aiidateam/plumpy/issues/135)
spec.inputs['model_evaluation'].dynamic = True
spec.input(
'initial_window',
valid_type=orm.List,
help=
'Initial value for the disentanglement energy windows, given as a list ``[dis_win_min, dis_froz_min, dis_froz_max, dis_win_max]``.'
)
spec.input(
'window_tol',
valid_type=orm.Float,
default=lambda: orm.Float(0.5),
help='Tolerance in energy windows for the window optimization.'
)
spec.input(
'cost_tol',
valid_type=orm.Float,
default=lambda: orm.Float(0.02),
help="Tolerance in the 'cost_value' for the window optimization."
)
spec.output('window', valid_type=orm.List)
spec.outputs.dynamic = True
spec.outline(cls.create_optimization, cls.finalize)
@check_workchain_step
def create_optimization(self):
"""
Run the optimization workchain.
"""
self.report('Launching Window optimization.')
initial_window_list = self.inputs.initial_window.get_list()
window_simplex = [initial_window_list]
simplex_dist = 0.5
for i in range(len(initial_window_list)):
window = copy.deepcopy(initial_window_list)
window[i] += simplex_dist
window_simplex.append(window)
runwindow_inputs = self.exposed_inputs(RunWindow)
runwindow_inputs['wannier']['kpoints'] = self.inputs.wannier_bands
return ToContext(
optimization=self.submit(
OptimizationWorkChain,
engine=NelderMead,
engine_kwargs=orm.Dict(
dict=dict(
result_key='cost_value',
xtol=self.inputs.window_tol.value,
ftol=None,
input_key='window',
simplex=window_simplex
)
),
evaluate_process=RunWindow,
evaluate=runwindow_inputs
)
)
@check_workchain_step
def finalize(self):
"""
Add the optimization results to the outputs.
"""
self.report('Add optimization results to outputs.')
optimal_calc = orm.load_node(
self.ctx.optimization.outputs.optimal_process_uuid.value
)
self.report('Adding optimal window to outputs.')
self.out('window', optimal_calc.inputs.window)
self.report("Adding outputs of the optimal calculation.")
self.out_many(get_outputs_dict(optimal_calc))
self.report('Finished!')
| [
"aiida.orm.load_node",
"aiida_tools.get_outputs_dict",
"aiida.orm.Float",
"copy.deepcopy"
] | [((3210, 3281), 'aiida.orm.load_node', 'orm.load_node', (['self.ctx.optimization.outputs.optimal_process_uuid.value'], {}), '(self.ctx.optimization.outputs.optimal_process_uuid.value)\n', (3223, 3281), False, 'from aiida import orm\n'), ((2156, 2190), 'copy.deepcopy', 'copy.deepcopy', (['initial_window_list'], {}), '(initial_window_list)\n', (2169, 2190), False, 'import copy\n'), ((3504, 3534), 'aiida_tools.get_outputs_dict', 'get_outputs_dict', (['optimal_calc'], {}), '(optimal_calc)\n', (3520, 3534), False, 'from aiida_tools import check_workchain_step, get_outputs_dict\n'), ((1302, 1316), 'aiida.orm.Float', 'orm.Float', (['(0.5)'], {}), '(0.5)\n', (1311, 1316), False, 'from aiida import orm\n'), ((1510, 1525), 'aiida.orm.Float', 'orm.Float', (['(0.02)'], {}), '(0.02)\n', (1519, 1525), False, 'from aiida import orm\n')] |
# Generated by Django 2.2.15 on 2020-09-21 22:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('poc', '0006_auto_20200921_1301'),
]
operations = [
migrations.AddField(
model_name='document',
name='order',
field=models.IntegerField(default=1),
),
migrations.AddField(
model_name='document',
name='rotation',
field=models.IntegerField(default=0),
),
]
| [
"django.db.models.IntegerField"
] | [((332, 362), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1)'}), '(default=1)\n', (351, 362), False, 'from django.db import migrations, models\n'), ((486, 516), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (505, 516), False, 'from django.db import migrations, models\n')] |
from django.db import models
class Ebook(models.Model):
title = models.CharField(max_length=140)
author = models.CharField(max_length=60)
description = models.TextField()
publication_date = models.DateField()
def __str__(self):
return self.title
class Review(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
review_author = models.CharField(max_length=8, blank=True, null=True) | [
"django.db.models.DateTimeField",
"django.db.models.TextField",
"django.db.models.DateField",
"django.db.models.CharField"
] | [((70, 102), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(140)'}), '(max_length=140)\n', (86, 102), False, 'from django.db import models\n'), ((116, 147), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)'}), '(max_length=60)\n', (132, 147), False, 'from django.db import models\n'), ((166, 184), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (182, 184), False, 'from django.db import models\n'), ((208, 226), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (224, 226), False, 'from django.db import models\n'), ((324, 363), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (344, 363), False, 'from django.db import models\n'), ((381, 416), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (401, 416), False, 'from django.db import models\n'), ((437, 490), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(8)', 'blank': '(True)', 'null': '(True)'}), '(max_length=8, blank=True, null=True)\n', (453, 490), False, 'from django.db import models\n')] |
import contextlib
import dataclasses
import datetime
import decimal
import functools
import logging
import textwrap
from typing import Any, cast, Dict, Generator, Hashable, Optional, Tuple, TYPE_CHECKING
from tzlocal import get_localzone
import arrow
import click
import pandas as pd
from tabulate import tabulate
from jira_offline.exceptions import DeserializeError, FieldNotOnModelClass, ProjectNotConfigured
from jira_offline.utils.serializer import deserialize_value, get_enum, get_base_type, istype
if TYPE_CHECKING:
from jira_offline.models import ProjectMeta, Issue # pylint: disable=cyclic-import
from jira_offline.jira import Jira # pylint: disable=cyclic-import
@functools.lru_cache()
def get_field_by_name(cls: type, field_name: str) -> dataclasses.Field:
'''
Lookup a field by name from the passed dataclass
Params:
cls: The dataclass type on which to search
field_name: Filter for fields by this name
Returns:
Dataclass field
'''
for f in dataclasses.fields(cls):
if f.name == field_name:
return f
raise FieldNotOnModelClass(f'{cls}.{field_name}')
@functools.lru_cache()
def get_dataclass_defaults_for_pandas(cls: type) -> Dict[str, str]:
'''
Return a mapping of Issue.field_name->default, where the default is compatible with pandas
'''
attrs = dict()
for f in dataclasses.fields(cls):
if f.default != dataclasses.MISSING:
# Cast for mypy as get_base_type uses @functools.lru_cache
typ_ = get_base_type(cast(Hashable, f.type))
if istype(typ_, datetime.datetime):
attrs[f.name] = pd.to_datetime(0).tz_localize('utc')
elif istype(typ_, (list, decimal.Decimal)):
attrs[f.name] = ''
else:
attrs[f.name] = typ_()
return attrs
def find_project(jira: 'Jira', project_key: str) -> 'ProjectMeta':
'''
Extract the project configuration object for the specified project key
Params:
jira: Dependency-injected jira.Jira object
project_key: Short Jira project key
'''
try:
return next(p for p in jira.config.projects.values() if p.key == project_key)
except StopIteration:
raise ProjectNotConfigured(project_key)
@functools.lru_cache()
def friendly_title(cls: type, field_name: str) -> str:
'''
Util function to convert a dataclass field name into a friendly title. If `field_name` does not
exist as a field on the dataclass, return a capitalised string.
Params:
cls: The class which has `field_name` as an attrib
field_name: Dataclass field to create a title for
Returns:
Pretty field title
'''
try:
f = get_field_by_name(cls, field_name)
title = f.metadata.get('friendly', field_name)
except FieldNotOnModelClass:
if field_name.startswith('extended.'):
# Trim the 'extended.' prefix from Issue class extended customfields
title = field_name[9:]
else:
title = field_name
return str(title.replace('_', ' ').title())
def render_dataclass_field(cls: type, field_name: str, value: Any) -> Tuple[str, str]:
'''
A simple single-field pretty formatting function supporting various types.
Params:
cls: The class which has `field_name` as an attrib
field_name: Dataclass attribute name to render
value: Value to be rendered according to dataclass.field type
Returns:
Tuple of field title, formatted value
'''
title = friendly_title(cls, field_name)
try:
f = get_field_by_name(cls, field_name)
# Determine the origin type for this field (thus handling Optional[type])
type_ = get_base_type(cast(Hashable, f.type))
# Format value as type specified by dataclass.field
value = render_value(value, type_)
except FieldNotOnModelClass:
# Assume string type if `field_name` does not exist on the dataclass - likely it's an
# extended field
value = render_value(value, str)
return title, value
def render_issue_field(
issue: 'Issue', field_name: str, value: Any, title_prefix: str=None, value_prefix: str=None,
color: str=None
) -> Tuple[str, str]:
'''
A slighty more complicated single-field pretty formatting function, specifically for fields on an
instance of the Issue dataclass.
Params:
issue: Instance of Issue class with the field to render
field_name: Issue dataclass attribute name to render
value: Value to be rendered, the type of the dataclass.field
title_prefix: Arbitrary string to be prepended to the title
value_prefix: Arbitrary string to be prepended to the field value
color: Render all output in this colour
Returns:
Pretty field title, formatted value
'''
try:
f = get_field_by_name(cast(Hashable, type(issue)), field_name)
# Execute a pre-render util function on the field value, if one is defined
prerender_func = f.metadata.get('prerender_func')
if callable(prerender_func):
value = prerender_func(value)
except FieldNotOnModelClass:
# Extended fields do not have an attribute on the Issue dataclass; see `render_dataclass_field`
pass
title, value = render_dataclass_field(type(issue), field_name, value)
if title_prefix:
title = f'{title_prefix}{title}'
if value_prefix:
value = f'{value_prefix}{value}'
if color:
title = click.style(f'{title}', fg=color)
value = click.style(f'{value}', fg=color)
return title, value
def render_value(value: Any, type_: Optional[type]=None) -> str:
'''
Params:
value: Value to be rendered according to type_
type_: Optional supplied
Returns:
Formatted value
'''
if not type_:
type_ = type(value)
if value is None:
return ''
elif type_ in (set, list):
return tabulate([('-', v) for v in value], tablefmt='plain')
elif type_ is dict:
return tabulate(value.items(), tablefmt='plain')
elif type_ is datetime.datetime:
dt = arrow.get(value)
return f'{dt.humanize()} [{dt.format()}]'
elif get_enum(type_):
return str(value.value)
elif value and type_ is str and len(value) > 100:
return '\n'.join(textwrap.wrap(value, width=100))
else:
return str(value)
def deserialize_single_issue_field(field_name: str, value: Optional[Any],
tz: Optional[datetime.tzinfo]=None) -> Any:
'''
Use DataclassSerializer.deserialize_value to convert from string to the correct type.
Params:
field_name: Name of the field Issue dataclass
value: Value to deserialize to field_name's type
tz: Timezone to apply to dates/datetimes in `value`
'''
if value is None:
return
if tz is None:
tz = get_localzone()
try:
# late import to avoid circular dependency
from jira_offline.models import Issue # pylint: disable=import-outside-toplevel
# look up the type of Issue.field_name and deserialize string value to this type
return deserialize_value(get_field_by_name(Issue, field_name).type, value, tz)
except DeserializeError as e:
raise DeserializeError(f'Failed parsing "{field_name}" with value "{value}" ({e})')
@contextlib.contextmanager
def critical_logger(logger_):
'''
Context manager which sets a logger to CRITICAL.
with critical_logger(logger):
...
'''
log_level = logger_.level
logger_.setLevel(logging.CRITICAL)
yield logger_
logger_.setLevel(log_level)
| [
"tabulate.tabulate",
"jira_offline.exceptions.DeserializeError",
"dataclasses.fields",
"tzlocal.get_localzone",
"jira_offline.exceptions.ProjectNotConfigured",
"click.style",
"pandas.to_datetime",
"jira_offline.utils.serializer.istype",
"jira_offline.utils.serializer.get_enum",
"arrow.get",
"textwrap.wrap",
"functools.lru_cache",
"typing.cast",
"jira_offline.exceptions.FieldNotOnModelClass"
] | [((688, 709), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (707, 709), False, 'import functools\n'), ((1161, 1182), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (1180, 1182), False, 'import functools\n'), ((2324, 2345), 'functools.lru_cache', 'functools.lru_cache', ([], {}), '()\n', (2343, 2345), False, 'import functools\n'), ((1025, 1048), 'dataclasses.fields', 'dataclasses.fields', (['cls'], {}), '(cls)\n', (1043, 1048), False, 'import dataclasses\n'), ((1114, 1157), 'jira_offline.exceptions.FieldNotOnModelClass', 'FieldNotOnModelClass', (['f"""{cls}.{field_name}"""'], {}), "(f'{cls}.{field_name}')\n", (1134, 1157), False, 'from jira_offline.exceptions import DeserializeError, FieldNotOnModelClass, ProjectNotConfigured\n'), ((1394, 1417), 'dataclasses.fields', 'dataclasses.fields', (['cls'], {}), '(cls)\n', (1412, 1417), False, 'import dataclasses\n'), ((5687, 5720), 'click.style', 'click.style', (['f"""{title}"""'], {'fg': 'color'}), "(f'{title}', fg=color)\n", (5698, 5720), False, 'import click\n'), ((5737, 5770), 'click.style', 'click.style', (['f"""{value}"""'], {'fg': 'color'}), "(f'{value}', fg=color)\n", (5748, 5770), False, 'import click\n'), ((7140, 7155), 'tzlocal.get_localzone', 'get_localzone', ([], {}), '()\n', (7153, 7155), False, 'from tzlocal import get_localzone\n'), ((1608, 1639), 'jira_offline.utils.serializer.istype', 'istype', (['typ_', 'datetime.datetime'], {}), '(typ_, datetime.datetime)\n', (1614, 1639), False, 'from jira_offline.utils.serializer import deserialize_value, get_enum, get_base_type, istype\n'), ((2287, 2320), 'jira_offline.exceptions.ProjectNotConfigured', 'ProjectNotConfigured', (['project_key'], {}), '(project_key)\n', (2307, 2320), False, 'from jira_offline.exceptions import DeserializeError, FieldNotOnModelClass, ProjectNotConfigured\n'), ((3841, 3863), 'typing.cast', 'cast', (['Hashable', 'f.type'], {}), '(Hashable, f.type)\n', (3845, 3863), False, 'from typing import Any, cast, Dict, Generator, Hashable, Optional, Tuple, TYPE_CHECKING\n'), ((6151, 6204), 'tabulate.tabulate', 'tabulate', (["[('-', v) for v in value]"], {'tablefmt': '"""plain"""'}), "([('-', v) for v in value], tablefmt='plain')\n", (6159, 6204), False, 'from tabulate import tabulate\n'), ((7532, 7609), 'jira_offline.exceptions.DeserializeError', 'DeserializeError', (['f"""Failed parsing "{field_name}" with value "{value}" ({e})"""'], {}), '(f\'Failed parsing "{field_name}" with value "{value}" ({e})\')\n', (7548, 7609), False, 'from jira_offline.exceptions import DeserializeError, FieldNotOnModelClass, ProjectNotConfigured\n'), ((1568, 1590), 'typing.cast', 'cast', (['Hashable', 'f.type'], {}), '(Hashable, f.type)\n', (1572, 1590), False, 'from typing import Any, cast, Dict, Generator, Hashable, Optional, Tuple, TYPE_CHECKING\n'), ((1727, 1764), 'jira_offline.utils.serializer.istype', 'istype', (['typ_', '(list, decimal.Decimal)'], {}), '(typ_, (list, decimal.Decimal))\n', (1733, 1764), False, 'from jira_offline.utils.serializer import deserialize_value, get_enum, get_base_type, istype\n'), ((6336, 6352), 'arrow.get', 'arrow.get', (['value'], {}), '(value)\n', (6345, 6352), False, 'import arrow\n'), ((6412, 6427), 'jira_offline.utils.serializer.get_enum', 'get_enum', (['type_'], {}), '(type_)\n', (6420, 6427), False, 'from jira_offline.utils.serializer import deserialize_value, get_enum, get_base_type, istype\n'), ((1673, 1690), 'pandas.to_datetime', 'pd.to_datetime', (['(0)'], {}), '(0)\n', (1687, 1690), True, 'import pandas as pd\n'), ((6540, 6571), 'textwrap.wrap', 'textwrap.wrap', (['value'], {'width': '(100)'}), '(value, width=100)\n', (6553, 6571), False, 'import textwrap\n')] |
'''
Created on 13/01/2014
@author: Dani
This class is used to download the entire database of UNDP. The constructor
receives a format to dowload it (xml, csv, json). To init the process the
"run" method should be called
'''
import sys
import logging
import urllib2
from ConfigParser import ConfigParser
from es.weso.unpd_entities.DataTable import DataTable
from es.weso.util.file_writer import FileWriter
class UNDPExtractor(object):
'''
classdocs
'''
def __init__(self, config, log, extension):
'''
Constructor
'''
self.config = config
self.log = log
self.extension = extension
self.tables = self.parse_urls()
def parse_urls(self):
'''
There is a file containing all the names and URLs of the databases to download. This
method returns a list of DataTable containing all this information
'''
result = []
file_urls = open(self.config.get("UNDP", "file_tables"))
lines = file_urls.readlines()
file_urls.close()
for line in lines:
if not line.startswith("#"):
line = line.replace("\r", "")
line = line.replace("\n", "")
arr = line.split("\t")
result.append(DataTable(arr[0], arr[1] + "." + self.extension))
return result
def run(self):
'''
Tracks the whole data form UNDP
'''
self.log.info("Initializing data extraction from UNDP...")
table_counter = 0
for table in self.tables:
table_counter += 1
file_name = '../../../downloaded_data/Table' + str(table_counter) + "." + self.extension
self.extract_data(table, file_name)
self.log.info("Data extraction from UNDP done.")
def extract_data(self, table, file_name):
"""
Tracks data from a single UNDP table
"""
try:
self.log.info('Extracting data form {0}, with URL {1}...'.format(table.name, table.url))
response = urllib2.urlopen(table.url)
xml_content = response.read()
FileWriter.write_text_to_file(xml_content, file_name)
self.log.info('data from {0} extracted.'.format(table.name))
except BaseException as e: # catch all
self.log.warning("Error during the extraction from {0}. Cause: {1}. Data from that table ignored.".format(table.name, e.message))
| [
"es.weso.unpd_entities.DataTable.DataTable",
"es.weso.util.file_writer.FileWriter.write_text_to_file",
"urllib2.urlopen"
] | [((2094, 2120), 'urllib2.urlopen', 'urllib2.urlopen', (['table.url'], {}), '(table.url)\n', (2109, 2120), False, 'import urllib2\n'), ((2175, 2228), 'es.weso.util.file_writer.FileWriter.write_text_to_file', 'FileWriter.write_text_to_file', (['xml_content', 'file_name'], {}), '(xml_content, file_name)\n', (2204, 2228), False, 'from es.weso.util.file_writer import FileWriter\n'), ((1312, 1360), 'es.weso.unpd_entities.DataTable.DataTable', 'DataTable', (['arr[0]', "(arr[1] + '.' + self.extension)"], {}), "(arr[0], arr[1] + '.' + self.extension)\n", (1321, 1360), False, 'from es.weso.unpd_entities.DataTable import DataTable\n')] |
# pylint: disable=C0114,C0115,C0116
import unittest
import numpy as np
from scipy import constants as const
from nonrad.scaling import (charged_supercell_scaling,
charged_supercell_scaling_VASP, distance_PBC,
find_charge_center, radial_distribution,
sommerfeld_parameter, thermal_velocity)
from nonrad.tests import TEST_FILES, FakeFig
class SommerfeldTest(unittest.TestCase):
def setUp(self):
self.args = {
'T': 300,
'Z': 0,
'm_eff': 1.,
'eps0': 1.,
'method': 'Integrate'
}
def test_neutral(self):
self.assertAlmostEqual(sommerfeld_parameter(**self.args), 1.)
self.args['method'] = 'Analytic'
self.assertAlmostEqual(sommerfeld_parameter(**self.args), 1.)
def test_attractive(self):
self.args['Z'] = -1
self.assertGreater(sommerfeld_parameter(**self.args), 1.)
self.args['method'] = 'Analytic'
self.assertGreater(sommerfeld_parameter(**self.args), 1.)
def test_repulsive(self):
self.args['Z'] = 1
self.assertLess(sommerfeld_parameter(**self.args), 1.)
self.args['method'] = 'Analytic'
self.assertLess(sommerfeld_parameter(**self.args), 1.)
def test_list(self):
self.args['T'] = np.linspace(0.1, 1000, 100)
self.assertEqual(sommerfeld_parameter(**self.args), 1.)
self.args['Z'] = -1
self.assertTrue(np.all(sommerfeld_parameter(**self.args) > 1.))
self.args['Z'] = 1
self.assertTrue(np.all(sommerfeld_parameter(**self.args) < 1.))
self.args['Z'] = 0
self.args['method'] = 'Analytic'
self.assertEqual(sommerfeld_parameter(**self.args), 1.)
self.args['Z'] = -1
self.assertTrue(np.all(sommerfeld_parameter(**self.args) > 1.))
self.args['Z'] = 1
self.assertTrue(np.all(sommerfeld_parameter(**self.args) < 1.))
def test_compare_methods(self):
self.args = {
'T': 150,
'Z': -1,
'm_eff': 0.2,
'eps0': 8.9,
'method': 'Integrate'
}
f0 = sommerfeld_parameter(**self.args)
self.args['method'] = 'Analytic'
f1 = sommerfeld_parameter(**self.args)
self.assertAlmostEqual(f0, f1, places=2)
self.args['Z'] = 1
self.args['T'] = 900
f0 = sommerfeld_parameter(**self.args)
self.args['method'] = 'Integrate'
f1 = sommerfeld_parameter(**self.args)
self.assertGreater(np.abs(f0-f1)/f1, 0.1)
class ChargedSupercellScalingTest(unittest.TestCase):
def test_find_charge_center(self):
lattice = np.eye(3)
density = np.ones((50, 50, 50))
self.assertTrue(
np.allclose(find_charge_center(density, lattice), [0.49]*3)
)
density = np.zeros((50, 50, 50))
density[0, 0, 0] = 1.
self.assertTrue(
np.allclose(find_charge_center(density, lattice), [0.]*3)
)
def test_distance_PBC(self):
a = np.array([0.25]*3)
b = np.array([0.5]*3)
lattice = np.eye(3)
self.assertEqual(distance_PBC(a, b, lattice), np.sqrt(3)*0.25)
b = np.array([0.9]*3)
self.assertEqual(distance_PBC(a, b, lattice), np.sqrt(3)*0.35)
def test_radial_distribution(self):
lattice = np.eye(3)
density = np.zeros((50, 50, 50))
density[0, 0, 0] = 1.
point = np.array([0.]*3)
dist = distance_PBC(np.zeros(3), point, lattice)
r, n = radial_distribution(density, point, lattice)
self.assertAlmostEqual(r[np.where(n == 1.)[0][0]], dist)
point = np.array([0.25]*3)
dist = distance_PBC(np.zeros(3), point, lattice)
r, n = radial_distribution(density, point, lattice)
self.assertAlmostEqual(r[np.where(n == 1.)[0][0]], dist)
point = np.array([0.29, 0.73, 0.44])
dist = distance_PBC(np.zeros(3), point, lattice)
r, n = radial_distribution(density, point, lattice)
self.assertAlmostEqual(r[np.where(n == 1.)[0][0]], dist)
@unittest.skip('WAVECARs too large to share')
def test_charged_supercell_scaling_VASP(self):
f = charged_supercell_scaling_VASP(
str(TEST_FILES / 'WAVECAR.C-'),
189,
def_index=192
)
self.assertAlmostEqual(f, 1.08)
def test_charged_supercell_scaling(self):
# test that numbers work out for homogeneous case
wf = np.ones((20, 20, 20))
f = charged_supercell_scaling(wf, 10*np.eye(3), np.array([0.]*3))
self.assertAlmostEqual(f, 1.00)
# test the plotting stuff
wf = np.ones((1, 1, 1))
f = charged_supercell_scaling(wf, 10*np.eye(3), np.array([0.]*3),
fig=FakeFig())
self.assertAlmostEqual(f, 1.00)
f = charged_supercell_scaling(wf, 10*np.eye(3), np.array([0.]*3),
fig=FakeFig(), full_range=True)
self.assertAlmostEqual(f, 1.00)
class ThermalVelocityTest(unittest.TestCase):
def test_thermal_velocity(self):
f = thermal_velocity(1., 1.)
self.assertAlmostEqual(f, np.sqrt(3 * const.k / const.m_e) * 1e2)
f = thermal_velocity(np.array([1.]), 1.)
self.assertEqual(type(f), np.ndarray)
| [
"numpy.abs",
"numpy.eye",
"numpy.sqrt",
"numpy.ones",
"numpy.where",
"nonrad.tests.FakeFig",
"nonrad.scaling.distance_PBC",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"nonrad.scaling.radial_distribution",
"nonrad.scaling.find_charge_center",
"unittest.skip",
"nonrad.scaling.sommerfeld_parameter",
"nonrad.scaling.thermal_velocity"
] | [((4155, 4199), 'unittest.skip', 'unittest.skip', (['"""WAVECARs too large to share"""'], {}), "('WAVECARs too large to share')\n", (4168, 4199), False, 'import unittest\n'), ((1362, 1389), 'numpy.linspace', 'np.linspace', (['(0.1)', '(1000)', '(100)'], {}), '(0.1, 1000, 100)\n', (1373, 1389), True, 'import numpy as np\n'), ((2195, 2228), 'nonrad.scaling.sommerfeld_parameter', 'sommerfeld_parameter', ([], {}), '(**self.args)\n', (2215, 2228), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((2283, 2316), 'nonrad.scaling.sommerfeld_parameter', 'sommerfeld_parameter', ([], {}), '(**self.args)\n', (2303, 2316), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((2436, 2469), 'nonrad.scaling.sommerfeld_parameter', 'sommerfeld_parameter', ([], {}), '(**self.args)\n', (2456, 2469), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((2525, 2558), 'nonrad.scaling.sommerfeld_parameter', 'sommerfeld_parameter', ([], {}), '(**self.args)\n', (2545, 2558), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((2722, 2731), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (2728, 2731), True, 'import numpy as np\n'), ((2750, 2771), 'numpy.ones', 'np.ones', (['(50, 50, 50)'], {}), '((50, 50, 50))\n', (2757, 2771), True, 'import numpy as np\n'), ((2897, 2919), 'numpy.zeros', 'np.zeros', (['(50, 50, 50)'], {}), '((50, 50, 50))\n', (2905, 2919), True, 'import numpy as np\n'), ((3101, 3121), 'numpy.array', 'np.array', (['([0.25] * 3)'], {}), '([0.25] * 3)\n', (3109, 3121), True, 'import numpy as np\n'), ((3132, 3151), 'numpy.array', 'np.array', (['([0.5] * 3)'], {}), '([0.5] * 3)\n', (3140, 3151), True, 'import numpy as np\n'), ((3168, 3177), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3174, 3177), True, 'import numpy as np\n'), ((3261, 3280), 'numpy.array', 'np.array', (['([0.9] * 3)'], {}), '([0.9] * 3)\n', (3269, 3280), True, 'import numpy as np\n'), ((3409, 3418), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (3415, 3418), True, 'import numpy as np\n'), ((3437, 3459), 'numpy.zeros', 'np.zeros', (['(50, 50, 50)'], {}), '((50, 50, 50))\n', (3445, 3459), True, 'import numpy as np\n'), ((3506, 3525), 'numpy.array', 'np.array', (['([0.0] * 3)'], {}), '([0.0] * 3)\n', (3514, 3525), True, 'import numpy as np\n'), ((3595, 3639), 'nonrad.scaling.radial_distribution', 'radial_distribution', (['density', 'point', 'lattice'], {}), '(density, point, lattice)\n', (3614, 3639), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((3721, 3741), 'numpy.array', 'np.array', (['([0.25] * 3)'], {}), '([0.25] * 3)\n', (3729, 3741), True, 'import numpy as np\n'), ((3812, 3856), 'nonrad.scaling.radial_distribution', 'radial_distribution', (['density', 'point', 'lattice'], {}), '(density, point, lattice)\n', (3831, 3856), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((3938, 3966), 'numpy.array', 'np.array', (['[0.29, 0.73, 0.44]'], {}), '([0.29, 0.73, 0.44])\n', (3946, 3966), True, 'import numpy as np\n'), ((4039, 4083), 'nonrad.scaling.radial_distribution', 'radial_distribution', (['density', 'point', 'lattice'], {}), '(density, point, lattice)\n', (4058, 4083), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((4550, 4571), 'numpy.ones', 'np.ones', (['(20, 20, 20)'], {}), '((20, 20, 20))\n', (4557, 4571), True, 'import numpy as np\n'), ((4734, 4752), 'numpy.ones', 'np.ones', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (4741, 4752), True, 'import numpy as np\n'), ((5201, 5227), 'nonrad.scaling.thermal_velocity', 'thermal_velocity', (['(1.0)', '(1.0)'], {}), '(1.0, 1.0)\n', (5217, 5227), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((703, 736), 'nonrad.scaling.sommerfeld_parameter', 'sommerfeld_parameter', ([], {}), '(**self.args)\n', (723, 736), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((814, 847), 'nonrad.scaling.sommerfeld_parameter', 'sommerfeld_parameter', ([], {}), '(**self.args)\n', (834, 847), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((940, 973), 'nonrad.scaling.sommerfeld_parameter', 'sommerfeld_parameter', ([], {}), '(**self.args)\n', (960, 973), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((1047, 1080), 'nonrad.scaling.sommerfeld_parameter', 'sommerfeld_parameter', ([], {}), '(**self.args)\n', (1067, 1080), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((1168, 1201), 'nonrad.scaling.sommerfeld_parameter', 'sommerfeld_parameter', ([], {}), '(**self.args)\n', (1188, 1201), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((1272, 1305), 'nonrad.scaling.sommerfeld_parameter', 'sommerfeld_parameter', ([], {}), '(**self.args)\n', (1292, 1305), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((1415, 1448), 'nonrad.scaling.sommerfeld_parameter', 'sommerfeld_parameter', ([], {}), '(**self.args)\n', (1435, 1448), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((1746, 1779), 'nonrad.scaling.sommerfeld_parameter', 'sommerfeld_parameter', ([], {}), '(**self.args)\n', (1766, 1779), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((3203, 3230), 'nonrad.scaling.distance_PBC', 'distance_PBC', (['a', 'b', 'lattice'], {}), '(a, b, lattice)\n', (3215, 3230), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((3304, 3331), 'nonrad.scaling.distance_PBC', 'distance_PBC', (['a', 'b', 'lattice'], {}), '(a, b, lattice)\n', (3316, 3331), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((3551, 3562), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3559, 3562), True, 'import numpy as np\n'), ((3768, 3779), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3776, 3779), True, 'import numpy as np\n'), ((3995, 4006), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4003, 4006), True, 'import numpy as np\n'), ((4628, 4647), 'numpy.array', 'np.array', (['([0.0] * 3)'], {}), '([0.0] * 3)\n', (4636, 4647), True, 'import numpy as np\n'), ((4809, 4828), 'numpy.array', 'np.array', (['([0.0] * 3)'], {}), '([0.0] * 3)\n', (4817, 4828), True, 'import numpy as np\n'), ((4976, 4995), 'numpy.array', 'np.array', (['([0.0] * 3)'], {}), '([0.0] * 3)\n', (4984, 4995), True, 'import numpy as np\n'), ((5329, 5344), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (5337, 5344), True, 'import numpy as np\n'), ((2586, 2601), 'numpy.abs', 'np.abs', (['(f0 - f1)'], {}), '(f0 - f1)\n', (2592, 2601), True, 'import numpy as np\n'), ((2821, 2857), 'nonrad.scaling.find_charge_center', 'find_charge_center', (['density', 'lattice'], {}), '(density, lattice)\n', (2839, 2857), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((2999, 3035), 'nonrad.scaling.find_charge_center', 'find_charge_center', (['density', 'lattice'], {}), '(density, lattice)\n', (3017, 3035), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((3232, 3242), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (3239, 3242), True, 'import numpy as np\n'), ((3333, 3343), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (3340, 3343), True, 'import numpy as np\n'), ((4617, 4626), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4623, 4626), True, 'import numpy as np\n'), ((4798, 4807), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4804, 4807), True, 'import numpy as np\n'), ((4869, 4878), 'nonrad.tests.FakeFig', 'FakeFig', ([], {}), '()\n', (4876, 4878), False, 'from nonrad.tests import TEST_FILES, FakeFig\n'), ((4965, 4974), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4971, 4974), True, 'import numpy as np\n'), ((5036, 5045), 'nonrad.tests.FakeFig', 'FakeFig', ([], {}), '()\n', (5043, 5045), False, 'from nonrad.tests import TEST_FILES, FakeFig\n'), ((5260, 5292), 'numpy.sqrt', 'np.sqrt', (['(3 * const.k / const.m_e)'], {}), '(3 * const.k / const.m_e)\n', (5267, 5292), True, 'import numpy as np\n'), ((1513, 1546), 'nonrad.scaling.sommerfeld_parameter', 'sommerfeld_parameter', ([], {}), '(**self.args)\n', (1533, 1546), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((1612, 1645), 'nonrad.scaling.sommerfeld_parameter', 'sommerfeld_parameter', ([], {}), '(**self.args)\n', (1632, 1645), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((1844, 1877), 'nonrad.scaling.sommerfeld_parameter', 'sommerfeld_parameter', ([], {}), '(**self.args)\n', (1864, 1877), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((1943, 1976), 'nonrad.scaling.sommerfeld_parameter', 'sommerfeld_parameter', ([], {}), '(**self.args)\n', (1963, 1976), False, 'from nonrad.scaling import charged_supercell_scaling, charged_supercell_scaling_VASP, distance_PBC, find_charge_center, radial_distribution, sommerfeld_parameter, thermal_velocity\n'), ((3673, 3691), 'numpy.where', 'np.where', (['(n == 1.0)'], {}), '(n == 1.0)\n', (3681, 3691), True, 'import numpy as np\n'), ((3890, 3908), 'numpy.where', 'np.where', (['(n == 1.0)'], {}), '(n == 1.0)\n', (3898, 3908), True, 'import numpy as np\n'), ((4117, 4135), 'numpy.where', 'np.where', (['(n == 1.0)'], {}), '(n == 1.0)\n', (4125, 4135), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "<NAME>" at 20:22, 12/06/2020 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
import concurrent.futures as parallel
from functools import partial
import numpy as np
from mealpy.optimizer import Optimizer
class BaseSMA(Optimizer):
"""
My modified version of: Slime Mould Algorithm (SMA)
(Slime Mould Algorithm: A New Method for Stochastic Optimization)
Link:
https://doi.org/10.1016/j.future.2020.03.055
https://www.researchgate.net/publication/340431861_Slime_mould_algorithm_A_new_method_for_stochastic_optimization
Notes:
+ Selected 2 unique and random solution to create new solution (not to create variable) --> remove third loop in original version
+ Check bound and update fitness after each individual move instead of after the whole population move in the original version
+ My version not only faster but also better
"""
ID_WEI = 2
def __init__(self, problem, epoch=10000, pop_size=100, pr=0.03, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
pr (float): probability threshold (z in the paper), default = 0.03
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = True
self.epoch = epoch
self.pop_size = pop_size
self.pr = pr
def create_solution(self):
"""
Returns:
The position position with 2 element: index of position/location and index of fitness wrapper
The general format: [position, [target, [obj1, obj2, ...]], weight]
## To get the position, fitness wrapper, target and obj list
## A[self.ID_POS] --> Return: position
## A[self.ID_FIT] --> Return: [target, [obj1, obj2, ...]]
## A[self.ID_FIT][self.ID_TAR] --> Return: target
## A[self.ID_FIT][self.ID_OBJ] --> Return: [obj1, obj2, ...]
"""
position = np.random.uniform(self.problem.lb, self.problem.ub)
fitness = self.get_fitness_position(position=position)
weight = np.zeros(self.problem.n_dims)
return [position, fitness, weight]
def create_child(self, idx, pop_copy, g_best, a, b):
# Update the Position of search agent
if np.random.uniform() < self.pr: # Eq.(2.7)
pos_new = np.random.uniform(self.problem.lb, self.problem.ub)
else:
p = np.tanh(np.abs(pop_copy[idx][self.ID_FIT][self.ID_TAR] - g_best[self.ID_FIT][self.ID_TAR])) # Eq.(2.2)
vb = np.random.uniform(-a, a, self.problem.n_dims) # Eq.(2.3)
vc = np.random.uniform(-b, b, self.problem.n_dims)
# two positions randomly selected from population, apply for the whole problem size instead of 1 variable
id_a, id_b = np.random.choice(list(set(range(0, self.pop_size)) - {idx}), 2, replace=False)
pos_1 = g_best[self.ID_POS] + vb * (pop_copy[idx][self.ID_WEI] * pop_copy[id_a][self.ID_POS] - pop_copy[id_b][self.ID_POS])
pos_2 = vc * pop_copy[idx][self.ID_POS]
pos_new = np.where(np.random.uniform(0, 1, self.problem.n_dims) < p, pos_1, pos_2)
# Check bound and re-calculate fitness after each individual move
pos_new = self.amend_position_faster(pos_new)
fit_new = self.get_fitness_position(pos_new)
current_agent = pop_copy[idx].copy()
current_agent[self.ID_POS] = pos_new
current_agent[self.ID_FIT] = fit_new
# # Sorted population and update the global best
# ## batch-size idea
# if self.problem.batch_idea:
# if (idx + 1) % self.problem.batch_size == 0:
# pop, g_best = self.update_sorted_population_and_global_best_solution(pop, self.ID_MIN_PROB, g_best)
# else:
# if (i + 1) % self.pop_size == 0:
# pop, g_best = self.update_sorted_population_and_global_best_solution(pop, self.ID_MIN_PROB, g_best)
return current_agent
def evolve(self, mode='sequential', epoch=None, pop=None, g_best=None):
"""
Args:
mode (str): 'sequential', 'thread', 'process'
+ 'sequential': recommended for simple and small task (< 10 seconds for calculating objective)
+ 'thread': recommended for IO bound task, or small computing task (< 2 minutes for calculating objective)
+ 'process': recommended for hard and big task (> 2 minutes for calculating objective)
Returns:
[position, fitness value]
"""
s = g_best[self.ID_FIT][self.ID_TAR] - pop[-1][self.ID_FIT][self.ID_TAR] + self.EPSILON # plus eps to avoid denominator zero
# calculate the fitness weight of each slime mold
for i in range(0, self.pop_size):
# Eq.(2.5)
if i <= int(self.pop_size / 2):
pop[i][self.ID_WEI] = 1 + np.random.uniform(0, 1, self.problem.n_dims) * \
np.log10((g_best[self.ID_FIT][self.ID_TAR] - pop[i][self.ID_FIT][self.ID_TAR]) / s + 1)
else:
pop[i][self.ID_WEI] = 1 - np.random.uniform(0, 1, self.problem.n_dims) * \
np.log10((g_best[self.ID_FIT][self.ID_TAR] - pop[i][self.ID_FIT][self.ID_TAR]) / s + 1)
a = np.arctanh(-((epoch + 1) / self.epoch) + 1) # Eq.(2.4)
b = 1 - (epoch + 1) / self.epoch
pop_copy = pop.copy()
pop_idx = np.array(range(0, self.pop_size))
if mode == "thread":
with parallel.ThreadPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop_copy=pop_copy, g_best=g_best, a=a, b=b), pop_idx)
pop = [x for x in pop_child]
elif mode == "process":
with parallel.ProcessPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop_copy=pop_copy, g_best=g_best, a=a, b=b), pop_idx)
pop = [x for x in pop_child]
else:
pop = [self.create_child(idx, pop_copy, g_best, a, b) for idx in pop_idx]
return pop
class OriginalSMA(BaseSMA):
"""
This version developed by one on my student: Slime Mould Algorithm (SMA)
(Slime Mould Algorithm: A New Method for Stochastic Optimization)
Link:
https://doi.org/10.1016/j.future.2020.03.055
"""
ID_WEI = 2
def __init__(self, problem, epoch=10000, pop_size=100, pr=0.03, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 1000
pop_size (int): number of population size, default = 100
pr (float): probability threshold (z in the paper), default = 0.03
"""
super().__init__(problem, epoch, pop_size, pr, **kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = True
self.epoch = epoch
self.pop_size = pop_size
self.pr = pr
def create_child(self, idx, pop_copy, g_best, a, b):
# Update the Position of search agent
current_agent = pop_copy[idx].copy()
if np.random.uniform() < self.pr: # Eq.(2.7)
current_agent[self.ID_POS] = np.random.uniform(self.problem.lb, self.problem.ub)
else:
p = np.tanh(np.abs(current_agent[self.ID_FIT][self.ID_TAR] - g_best[self.ID_FIT][self.ID_TAR])) # Eq.(2.2)
vb = np.random.uniform(-a, a, self.problem.n_dims) # Eq.(2.3)
vc = np.random.uniform(-b, b, self.problem.n_dims)
for j in range(0, self.problem.n_dims):
# two positions randomly selected from population
id_a, id_b = np.random.choice(list(set(range(0, self.pop_size)) - {idx}), 2, replace=False)
if np.random.uniform() < p: # Eq.(2.1)
current_agent[self.ID_POS][j] = g_best[self.ID_POS][j] + vb[j] * (
current_agent[self.ID_WEI][j] * pop_copy[id_a][self.ID_POS][j] - pop_copy[id_b][self.ID_POS][j])
else:
current_agent[self.ID_POS][j] = vc[j] * current_agent[self.ID_POS][j]
return current_agent
def evolve(self, mode='sequential', epoch=None, pop=None, g_best=None):
"""
Args:
mode (str): 'sequential', 'thread', 'process'
+ 'sequential': recommended for simple and small task (< 10 seconds for calculating objective)
+ 'thread': recommended for IO bound task, or small computing task (< 2 minutes for calculating objective)
+ 'process': recommended for hard and big task (> 2 minutes for calculating objective)
Returns:
[position, fitness value]
"""
s = g_best[self.ID_FIT][self.ID_TAR] - pop[-1][self.ID_FIT][self.ID_TAR] + self.EPSILON # plus eps to avoid denominator zero
# calculate the fitness weight of each slime mold
for i in range(0, self.pop_size):
# Eq.(2.5)
if i <= int(self.pop_size / 2):
pop[i][self.ID_WEI] = 1 + np.random.uniform(0, 1, self.problem.n_dims) * \
np.log10((g_best[self.ID_FIT][self.ID_TAR] - pop[i][self.ID_FIT][self.ID_TAR]) / s + 1)
else:
pop[i][self.ID_WEI] = 1 - np.random.uniform(0, 1, self.problem.n_dims) * \
np.log10((g_best[self.ID_FIT][self.ID_TAR] - pop[i][self.ID_FIT][self.ID_TAR]) / s + 1)
a = np.arctanh(-((epoch + 1) / self.epoch) + 1) # Eq.(2.4)
b = 1 - (epoch + 1) / self.epoch
pop_copy = pop.copy()
pop_idx = np.array(range(0, self.pop_size))
if mode == "thread":
with parallel.ThreadPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop_copy=pop_copy, g_best=g_best, a=a, b=b), pop_idx)
pop = [x for x in pop_child]
elif mode == "process":
with parallel.ProcessPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop_copy=pop_copy, g_best=g_best, a=a, b=b), pop_idx)
pop = [x for x in pop_child]
else:
pop = [self.create_child(idx, pop_copy, g_best, a, b) for idx in pop_idx]
# Check bound and re-calculate fitness after the whole population move
for i in range(0, self.pop_size):
pos_new = self.amend_position_faster(pop[i][self.ID_POS])
fit_new = self.get_fitness_position(pos_new)
pop[i][self.ID_POS] = pos_new
pop[i][self.ID_FIT] = fit_new
return pop | [
"numpy.abs",
"numpy.log10",
"concurrent.futures.ThreadPoolExecutor",
"numpy.zeros",
"functools.partial",
"concurrent.futures.ProcessPoolExecutor",
"numpy.random.uniform",
"numpy.arctanh"
] | [((2796, 2847), 'numpy.random.uniform', 'np.random.uniform', (['self.problem.lb', 'self.problem.ub'], {}), '(self.problem.lb, self.problem.ub)\n', (2813, 2847), True, 'import numpy as np\n'), ((2928, 2957), 'numpy.zeros', 'np.zeros', (['self.problem.n_dims'], {}), '(self.problem.n_dims)\n', (2936, 2957), True, 'import numpy as np\n'), ((6216, 6259), 'numpy.arctanh', 'np.arctanh', (['(-((epoch + 1) / self.epoch) + 1)'], {}), '(-((epoch + 1) / self.epoch) + 1)\n', (6226, 6259), True, 'import numpy as np\n'), ((10438, 10481), 'numpy.arctanh', 'np.arctanh', (['(-((epoch + 1) / self.epoch) + 1)'], {}), '(-((epoch + 1) / self.epoch) + 1)\n', (10448, 10481), True, 'import numpy as np\n'), ((3116, 3135), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (3133, 3135), True, 'import numpy as np\n'), ((3194, 3245), 'numpy.random.uniform', 'np.random.uniform', (['self.problem.lb', 'self.problem.ub'], {}), '(self.problem.lb, self.problem.ub)\n', (3211, 3245), True, 'import numpy as np\n'), ((3397, 3442), 'numpy.random.uniform', 'np.random.uniform', (['(-a)', 'a', 'self.problem.n_dims'], {}), '(-a, a, self.problem.n_dims)\n', (3414, 3442), True, 'import numpy as np\n'), ((3472, 3517), 'numpy.random.uniform', 'np.random.uniform', (['(-b)', 'b', 'self.problem.n_dims'], {}), '(-b, b, self.problem.n_dims)\n', (3489, 3517), True, 'import numpy as np\n'), ((8033, 8052), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (8050, 8052), True, 'import numpy as np\n'), ((8117, 8168), 'numpy.random.uniform', 'np.random.uniform', (['self.problem.lb', 'self.problem.ub'], {}), '(self.problem.lb, self.problem.ub)\n', (8134, 8168), True, 'import numpy as np\n'), ((8320, 8365), 'numpy.random.uniform', 'np.random.uniform', (['(-a)', 'a', 'self.problem.n_dims'], {}), '(-a, a, self.problem.n_dims)\n', (8337, 8365), True, 'import numpy as np\n'), ((8395, 8440), 'numpy.random.uniform', 'np.random.uniform', (['(-b)', 'b', 'self.problem.n_dims'], {}), '(-b, b, self.problem.n_dims)\n', (8412, 8440), True, 'import numpy as np\n'), ((3284, 3371), 'numpy.abs', 'np.abs', (['(pop_copy[idx][self.ID_FIT][self.ID_TAR] - g_best[self.ID_FIT][self.ID_TAR])'], {}), '(pop_copy[idx][self.ID_FIT][self.ID_TAR] - g_best[self.ID_FIT][self.\n ID_TAR])\n', (3290, 3371), True, 'import numpy as np\n'), ((6449, 6478), 'concurrent.futures.ThreadPoolExecutor', 'parallel.ThreadPoolExecutor', ([], {}), '()\n', (6476, 6478), True, 'import concurrent.futures as parallel\n'), ((8207, 8294), 'numpy.abs', 'np.abs', (['(current_agent[self.ID_FIT][self.ID_TAR] - g_best[self.ID_FIT][self.ID_TAR])'], {}), '(current_agent[self.ID_FIT][self.ID_TAR] - g_best[self.ID_FIT][self.\n ID_TAR])\n', (8213, 8294), True, 'import numpy as np\n'), ((10664, 10693), 'concurrent.futures.ThreadPoolExecutor', 'parallel.ThreadPoolExecutor', ([], {}), '()\n', (10691, 10693), True, 'import concurrent.futures as parallel\n'), ((3961, 4005), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'self.problem.n_dims'], {}), '(0, 1, self.problem.n_dims)\n', (3978, 4005), True, 'import numpy as np\n'), ((6533, 6603), 'functools.partial', 'partial', (['self.create_child'], {'pop_copy': 'pop_copy', 'g_best': 'g_best', 'a': 'a', 'b': 'b'}), '(self.create_child, pop_copy=pop_copy, g_best=g_best, a=a, b=b)\n', (6540, 6603), False, 'from functools import partial\n'), ((6704, 6734), 'concurrent.futures.ProcessPoolExecutor', 'parallel.ProcessPoolExecutor', ([], {}), '()\n', (6732, 6734), True, 'import concurrent.futures as parallel\n'), ((8686, 8705), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (8703, 8705), True, 'import numpy as np\n'), ((10748, 10818), 'functools.partial', 'partial', (['self.create_child'], {'pop_copy': 'pop_copy', 'g_best': 'g_best', 'a': 'a', 'b': 'b'}), '(self.create_child, pop_copy=pop_copy, g_best=g_best, a=a, b=b)\n', (10755, 10818), False, 'from functools import partial\n'), ((10919, 10949), 'concurrent.futures.ProcessPoolExecutor', 'parallel.ProcessPoolExecutor', ([], {}), '()\n', (10947, 10949), True, 'import concurrent.futures as parallel\n'), ((5793, 5837), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'self.problem.n_dims'], {}), '(0, 1, self.problem.n_dims)\n', (5810, 5837), True, 'import numpy as np\n'), ((5880, 5972), 'numpy.log10', 'np.log10', (['((g_best[self.ID_FIT][self.ID_TAR] - pop[i][self.ID_FIT][self.ID_TAR]) / s + 1)'], {}), '((g_best[self.ID_FIT][self.ID_TAR] - pop[i][self.ID_FIT][self.\n ID_TAR]) / s + 1)\n', (5888, 5972), True, 'import numpy as np\n'), ((6028, 6072), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'self.problem.n_dims'], {}), '(0, 1, self.problem.n_dims)\n', (6045, 6072), True, 'import numpy as np\n'), ((6115, 6207), 'numpy.log10', 'np.log10', (['((g_best[self.ID_FIT][self.ID_TAR] - pop[i][self.ID_FIT][self.ID_TAR]) / s + 1)'], {}), '((g_best[self.ID_FIT][self.ID_TAR] - pop[i][self.ID_FIT][self.\n ID_TAR]) / s + 1)\n', (6123, 6207), True, 'import numpy as np\n'), ((6789, 6859), 'functools.partial', 'partial', (['self.create_child'], {'pop_copy': 'pop_copy', 'g_best': 'g_best', 'a': 'a', 'b': 'b'}), '(self.create_child, pop_copy=pop_copy, g_best=g_best, a=a, b=b)\n', (6796, 6859), False, 'from functools import partial\n'), ((10015, 10059), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'self.problem.n_dims'], {}), '(0, 1, self.problem.n_dims)\n', (10032, 10059), True, 'import numpy as np\n'), ((10102, 10194), 'numpy.log10', 'np.log10', (['((g_best[self.ID_FIT][self.ID_TAR] - pop[i][self.ID_FIT][self.ID_TAR]) / s + 1)'], {}), '((g_best[self.ID_FIT][self.ID_TAR] - pop[i][self.ID_FIT][self.\n ID_TAR]) / s + 1)\n', (10110, 10194), True, 'import numpy as np\n'), ((10250, 10294), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'self.problem.n_dims'], {}), '(0, 1, self.problem.n_dims)\n', (10267, 10294), True, 'import numpy as np\n'), ((10337, 10429), 'numpy.log10', 'np.log10', (['((g_best[self.ID_FIT][self.ID_TAR] - pop[i][self.ID_FIT][self.ID_TAR]) / s + 1)'], {}), '((g_best[self.ID_FIT][self.ID_TAR] - pop[i][self.ID_FIT][self.\n ID_TAR]) / s + 1)\n', (10345, 10429), True, 'import numpy as np\n'), ((11004, 11074), 'functools.partial', 'partial', (['self.create_child'], {'pop_copy': 'pop_copy', 'g_best': 'g_best', 'a': 'a', 'b': 'b'}), '(self.create_child, pop_copy=pop_copy, g_best=g_best, a=a, b=b)\n', (11011, 11074), False, 'from functools import partial\n')] |
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (C) 2020 Telecom Infra Project and GNPy contributors
# see LICENSE.md for a list of contributors
#
import pytest
from gnpy.core.parameters import SimParams, NLIParams, RamanParams
@pytest.fixture
def set_sim_params(monkeypatch):
monkeypatch.setattr(SimParams, '_shared_dict', {'nli_params': NLIParams(), 'raman_params': RamanParams()})
| [
"gnpy.core.parameters.NLIParams",
"gnpy.core.parameters.RamanParams"
] | [((352, 363), 'gnpy.core.parameters.NLIParams', 'NLIParams', ([], {}), '()\n', (361, 363), False, 'from gnpy.core.parameters import SimParams, NLIParams, RamanParams\n'), ((381, 394), 'gnpy.core.parameters.RamanParams', 'RamanParams', ([], {}), '()\n', (392, 394), False, 'from gnpy.core.parameters import SimParams, NLIParams, RamanParams\n')] |
import tensorflow as tf
import tensorflow.contrib.layers as layers
def layer_norm_fn(x, relu=True):
x = layers.layer_norm(x, scale=True, center=True)
if relu:
x = tf.nn.relu(x)
return x
def model(img_in, num_actions, scope, nlayers = 3, hidden_units = 512, channel_factor = 1, reuse=False, layer_norm=False, freeze_cnn = False):
"""As described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf"""
with tf.variable_scope(scope, reuse=reuse):
out = img_in
with tf.variable_scope("convnet"):
# original architecture
out = layers.convolution2d(out, num_outputs=32//channel_factor, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
if nlayers >= 2:
out = layers.convolution2d(out, num_outputs=64//channel_factor, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
if nlayers >= 3:
out = layers.convolution2d(out, num_outputs=64//channel_factor, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
conv_out = layers.flatten(out)
if freeze_cnn:
conv_out = tf.stop_gradient(conv_out)
with tf.variable_scope("action_value"):
value_out = layers.fully_connected(conv_out, num_outputs=hidden_units, activation_fn=None)
if layer_norm:
value_out = layer_norm_fn(value_out, relu=True)
else:
value_out = tf.nn.relu(value_out)
value_out = layers.fully_connected(value_out, num_outputs=num_actions, activation_fn=None)
return value_out
def dueling_model(img_in, num_actions, scope, nlayers = 3, hidden_units = 512, channel_factor = 1, reuse=False, layer_norm=False, freeze_cnn = False):
return dueling_test_model(img_in, num_actions, scope, nlayers, hidden_units, channel_factor, reuse, layer_norm, freeze_cnn)['q']
def dueling_test_model(img_in, num_actions, scope, nlayers = 3, hidden_units = 512, channel_factor = 1, reuse=False, layer_norm=False, freeze_cnn = False):
"""As described in https://arxiv.org/abs/1511.06581"""
with tf.variable_scope(scope, reuse=reuse):
out = img_in
with tf.variable_scope("convnet"):
# original architecture
out = layers.convolution2d(out, num_outputs=32//channel_factor, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
if nlayers >= 2:
out = layers.convolution2d(out, num_outputs=64//channel_factor, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
if nlayers >= 3:
out = layers.convolution2d(out, num_outputs=64//channel_factor, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
conv_out = layers.flatten(out)
if freeze_cnn:
conv_out = tf.stop_gradient(conv_out)
with tf.variable_scope("state_value"):
state_hidden = layers.fully_connected(conv_out, num_outputs=hidden_units, activation_fn=None)
if layer_norm:
state_hidden = layer_norm_fn(state_hidden, relu=True)
else:
state_hidden = tf.nn.relu(state_hidden)
state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None)
with tf.variable_scope("action_value"):
actions_hidden = layers.fully_connected(conv_out, num_outputs=hidden_units, activation_fn=None)
if layer_norm:
actions_hidden = layer_norm_fn(actions_hidden, relu=True)
else:
actions_hidden = tf.nn.relu(actions_hidden)
action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)
return {'q': state_score + action_scores, 's': state_score, 'a': action_scores}
def dueling_test_model_activations(img_in, num_actions, scope, nlayers = 3, hidden_units = 512, channel_factor = 1, reuse=False, layer_norm=False, freeze_cnn = False):
"""As described in https://arxiv.org/abs/1511.06581"""
with tf.variable_scope(scope, reuse=reuse):
out = img_in
with tf.variable_scope("convnet"):
# original architecture
out = layers.convolution2d(out, num_outputs=32//channel_factor, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
if nlayers >= 2:
out = layers.convolution2d(out, num_outputs=64//channel_factor, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
if nlayers >= 3:
out = layers.convolution2d(out, num_outputs=64//channel_factor, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
conv_out = layers.flatten(out)
if freeze_cnn:
conv_out = tf.stop_gradient(conv_out)
with tf.variable_scope("state_value"):
state_hidden = layers.fully_connected(conv_out, num_outputs=hidden_units, activation_fn=None)
if layer_norm:
state_hidden = layer_norm_fn(state_hidden, relu=True)
else:
state_hidden = tf.nn.relu(state_hidden)
state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None)
with tf.variable_scope("action_value"):
actions_hidden = layers.fully_connected(conv_out, num_outputs=hidden_units, activation_fn=None)
if layer_norm:
actions_hidden = layer_norm_fn(actions_hidden, relu=True)
else:
actions_hidden = tf.nn.relu(actions_hidden)
action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None)
action_scores_mean = tf.reduce_mean(action_scores, 1)
action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)
return {'q': tf.concat([state_hidden, actions_hidden], 1), 's': state_score, 'a': action_scores} #only difference from dueling_test_model()
| [
"tensorflow.expand_dims",
"tensorflow.contrib.layers.flatten",
"tensorflow.variable_scope",
"tensorflow.nn.relu",
"tensorflow.contrib.layers.layer_norm",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.concat",
"tensorflow.stop_gradient",
"tensorflow.reduce_mean",
"tensorflow.contrib.layers.convolution2d"
] | [((110, 155), 'tensorflow.contrib.layers.layer_norm', 'layers.layer_norm', (['x'], {'scale': '(True)', 'center': '(True)'}), '(x, scale=True, center=True)\n', (127, 155), True, 'import tensorflow.contrib.layers as layers\n'), ((181, 194), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (191, 194), True, 'import tensorflow as tf\n'), ((476, 513), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'reuse': 'reuse'}), '(scope, reuse=reuse)\n', (493, 513), True, 'import tensorflow as tf\n'), ((1081, 1100), 'tensorflow.contrib.layers.flatten', 'layers.flatten', (['out'], {}), '(out)\n', (1095, 1100), True, 'import tensorflow.contrib.layers as layers\n'), ((2124, 2161), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'reuse': 'reuse'}), '(scope, reuse=reuse)\n', (2141, 2161), True, 'import tensorflow as tf\n'), ((2729, 2748), 'tensorflow.contrib.layers.flatten', 'layers.flatten', (['out'], {}), '(out)\n', (2743, 2748), True, 'import tensorflow.contrib.layers as layers\n'), ((4165, 4202), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {'reuse': 'reuse'}), '(scope, reuse=reuse)\n', (4182, 4202), True, 'import tensorflow as tf\n'), ((4770, 4789), 'tensorflow.contrib.layers.flatten', 'layers.flatten', (['out'], {}), '(out)\n', (4784, 4789), True, 'import tensorflow.contrib.layers as layers\n'), ((549, 577), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""convnet"""'], {}), "('convnet')\n", (566, 577), True, 'import tensorflow as tf\n'), ((633, 747), 'tensorflow.contrib.layers.convolution2d', 'layers.convolution2d', (['out'], {'num_outputs': '(32 // channel_factor)', 'kernel_size': '(8)', 'stride': '(4)', 'activation_fn': 'tf.nn.relu'}), '(out, num_outputs=32 // channel_factor, kernel_size=8,\n stride=4, activation_fn=tf.nn.relu)\n', (653, 747), True, 'import tensorflow.contrib.layers as layers\n'), ((1147, 1173), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['conv_out'], {}), '(conv_out)\n', (1163, 1173), True, 'import tensorflow as tf\n'), ((1188, 1221), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""action_value"""'], {}), "('action_value')\n", (1205, 1221), True, 'import tensorflow as tf\n'), ((1247, 1325), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['conv_out'], {'num_outputs': 'hidden_units', 'activation_fn': 'None'}), '(conv_out, num_outputs=hidden_units, activation_fn=None)\n', (1269, 1325), True, 'import tensorflow.contrib.layers as layers\n'), ((1509, 1587), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['value_out'], {'num_outputs': 'num_actions', 'activation_fn': 'None'}), '(value_out, num_outputs=num_actions, activation_fn=None)\n', (1531, 1587), True, 'import tensorflow.contrib.layers as layers\n'), ((2197, 2225), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""convnet"""'], {}), "('convnet')\n", (2214, 2225), True, 'import tensorflow as tf\n'), ((2281, 2395), 'tensorflow.contrib.layers.convolution2d', 'layers.convolution2d', (['out'], {'num_outputs': '(32 // channel_factor)', 'kernel_size': '(8)', 'stride': '(4)', 'activation_fn': 'tf.nn.relu'}), '(out, num_outputs=32 // channel_factor, kernel_size=8,\n stride=4, activation_fn=tf.nn.relu)\n', (2301, 2395), True, 'import tensorflow.contrib.layers as layers\n'), ((2795, 2821), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['conv_out'], {}), '(conv_out)\n', (2811, 2821), True, 'import tensorflow as tf\n'), ((2836, 2868), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""state_value"""'], {}), "('state_value')\n", (2853, 2868), True, 'import tensorflow as tf\n'), ((2897, 2975), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['conv_out'], {'num_outputs': 'hidden_units', 'activation_fn': 'None'}), '(conv_out, num_outputs=hidden_units, activation_fn=None)\n', (2919, 2975), True, 'import tensorflow.contrib.layers as layers\n'), ((3173, 3244), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['state_hidden'], {'num_outputs': '(1)', 'activation_fn': 'None'}), '(state_hidden, num_outputs=1, activation_fn=None)\n', (3195, 3244), True, 'import tensorflow.contrib.layers as layers\n'), ((3258, 3291), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""action_value"""'], {}), "('action_value')\n", (3275, 3291), True, 'import tensorflow as tf\n'), ((3322, 3400), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['conv_out'], {'num_outputs': 'hidden_units', 'activation_fn': 'None'}), '(conv_out, num_outputs=hidden_units, activation_fn=None)\n', (3344, 3400), True, 'import tensorflow.contrib.layers as layers\n'), ((3608, 3695), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['actions_hidden'], {'num_outputs': 'num_actions', 'activation_fn': 'None'}), '(actions_hidden, num_outputs=num_actions,\n activation_fn=None)\n', (3630, 3695), True, 'import tensorflow.contrib.layers as layers\n'), ((3725, 3757), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['action_scores', '(1)'], {}), '(action_scores, 1)\n', (3739, 3757), True, 'import tensorflow as tf\n'), ((4238, 4266), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""convnet"""'], {}), "('convnet')\n", (4255, 4266), True, 'import tensorflow as tf\n'), ((4322, 4436), 'tensorflow.contrib.layers.convolution2d', 'layers.convolution2d', (['out'], {'num_outputs': '(32 // channel_factor)', 'kernel_size': '(8)', 'stride': '(4)', 'activation_fn': 'tf.nn.relu'}), '(out, num_outputs=32 // channel_factor, kernel_size=8,\n stride=4, activation_fn=tf.nn.relu)\n', (4342, 4436), True, 'import tensorflow.contrib.layers as layers\n'), ((4836, 4862), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['conv_out'], {}), '(conv_out)\n', (4852, 4862), True, 'import tensorflow as tf\n'), ((4877, 4909), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""state_value"""'], {}), "('state_value')\n", (4894, 4909), True, 'import tensorflow as tf\n'), ((4938, 5016), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['conv_out'], {'num_outputs': 'hidden_units', 'activation_fn': 'None'}), '(conv_out, num_outputs=hidden_units, activation_fn=None)\n', (4960, 5016), True, 'import tensorflow.contrib.layers as layers\n'), ((5214, 5285), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['state_hidden'], {'num_outputs': '(1)', 'activation_fn': 'None'}), '(state_hidden, num_outputs=1, activation_fn=None)\n', (5236, 5285), True, 'import tensorflow.contrib.layers as layers\n'), ((5299, 5332), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""action_value"""'], {}), "('action_value')\n", (5316, 5332), True, 'import tensorflow as tf\n'), ((5363, 5441), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['conv_out'], {'num_outputs': 'hidden_units', 'activation_fn': 'None'}), '(conv_out, num_outputs=hidden_units, activation_fn=None)\n', (5385, 5441), True, 'import tensorflow.contrib.layers as layers\n'), ((5649, 5736), 'tensorflow.contrib.layers.fully_connected', 'layers.fully_connected', (['actions_hidden'], {'num_outputs': 'num_actions', 'activation_fn': 'None'}), '(actions_hidden, num_outputs=num_actions,\n activation_fn=None)\n', (5671, 5736), True, 'import tensorflow.contrib.layers as layers\n'), ((5766, 5798), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['action_scores', '(1)'], {}), '(action_scores, 1)\n', (5780, 5798), True, 'import tensorflow as tf\n'), ((5902, 5946), 'tensorflow.concat', 'tf.concat', (['[state_hidden, actions_hidden]', '(1)'], {}), '([state_hidden, actions_hidden], 1)\n', (5911, 5946), True, 'import tensorflow as tf\n'), ((793, 907), 'tensorflow.contrib.layers.convolution2d', 'layers.convolution2d', (['out'], {'num_outputs': '(64 // channel_factor)', 'kernel_size': '(4)', 'stride': '(2)', 'activation_fn': 'tf.nn.relu'}), '(out, num_outputs=64 // channel_factor, kernel_size=4,\n stride=2, activation_fn=tf.nn.relu)\n', (813, 907), True, 'import tensorflow.contrib.layers as layers\n'), ((953, 1067), 'tensorflow.contrib.layers.convolution2d', 'layers.convolution2d', (['out'], {'num_outputs': '(64 // channel_factor)', 'kernel_size': '(3)', 'stride': '(1)', 'activation_fn': 'tf.nn.relu'}), '(out, num_outputs=64 // channel_factor, kernel_size=3,\n stride=1, activation_fn=tf.nn.relu)\n', (973, 1067), True, 'import tensorflow.contrib.layers as layers\n'), ((1463, 1484), 'tensorflow.nn.relu', 'tf.nn.relu', (['value_out'], {}), '(value_out)\n', (1473, 1484), True, 'import tensorflow as tf\n'), ((2441, 2555), 'tensorflow.contrib.layers.convolution2d', 'layers.convolution2d', (['out'], {'num_outputs': '(64 // channel_factor)', 'kernel_size': '(4)', 'stride': '(2)', 'activation_fn': 'tf.nn.relu'}), '(out, num_outputs=64 // channel_factor, kernel_size=4,\n stride=2, activation_fn=tf.nn.relu)\n', (2461, 2555), True, 'import tensorflow.contrib.layers as layers\n'), ((2601, 2715), 'tensorflow.contrib.layers.convolution2d', 'layers.convolution2d', (['out'], {'num_outputs': '(64 // channel_factor)', 'kernel_size': '(3)', 'stride': '(1)', 'activation_fn': 'tf.nn.relu'}), '(out, num_outputs=64 // channel_factor, kernel_size=3,\n stride=1, activation_fn=tf.nn.relu)\n', (2621, 2715), True, 'import tensorflow.contrib.layers as layers\n'), ((3122, 3146), 'tensorflow.nn.relu', 'tf.nn.relu', (['state_hidden'], {}), '(state_hidden)\n', (3132, 3146), True, 'import tensorflow as tf\n'), ((3553, 3579), 'tensorflow.nn.relu', 'tf.nn.relu', (['actions_hidden'], {}), '(actions_hidden)\n', (3563, 3579), True, 'import tensorflow as tf\n'), ((3802, 3839), 'tensorflow.expand_dims', 'tf.expand_dims', (['action_scores_mean', '(1)'], {}), '(action_scores_mean, 1)\n', (3816, 3839), True, 'import tensorflow as tf\n'), ((4482, 4596), 'tensorflow.contrib.layers.convolution2d', 'layers.convolution2d', (['out'], {'num_outputs': '(64 // channel_factor)', 'kernel_size': '(4)', 'stride': '(2)', 'activation_fn': 'tf.nn.relu'}), '(out, num_outputs=64 // channel_factor, kernel_size=4,\n stride=2, activation_fn=tf.nn.relu)\n', (4502, 4596), True, 'import tensorflow.contrib.layers as layers\n'), ((4642, 4756), 'tensorflow.contrib.layers.convolution2d', 'layers.convolution2d', (['out'], {'num_outputs': '(64 // channel_factor)', 'kernel_size': '(3)', 'stride': '(1)', 'activation_fn': 'tf.nn.relu'}), '(out, num_outputs=64 // channel_factor, kernel_size=3,\n stride=1, activation_fn=tf.nn.relu)\n', (4662, 4756), True, 'import tensorflow.contrib.layers as layers\n'), ((5163, 5187), 'tensorflow.nn.relu', 'tf.nn.relu', (['state_hidden'], {}), '(state_hidden)\n', (5173, 5187), True, 'import tensorflow as tf\n'), ((5594, 5620), 'tensorflow.nn.relu', 'tf.nn.relu', (['actions_hidden'], {}), '(actions_hidden)\n', (5604, 5620), True, 'import tensorflow as tf\n'), ((5843, 5880), 'tensorflow.expand_dims', 'tf.expand_dims', (['action_scores_mean', '(1)'], {}), '(action_scores_mean, 1)\n', (5857, 5880), True, 'import tensorflow as tf\n')] |
import numpy as np
from Model import FR_Model
from keras.preprocessing import image
def Font_Recognition(all_detected_segments_direction):
FR_model = FR_Model(img_shape=(32, 32, 3))
FR_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
FR_model.load_weights('model.h5')
segments_dir = 'segments'
results = []
#print(all_detected_segments_direction)
for line_direction in all_detected_segments_direction:
for segments_direction in line_direction:
#print('-------------------------')
#print(segments_direction)
img = image.load_img(segments_dir + '/' + segments_direction[0], target_size= (32,32))
test_img = image.img_to_array(img)
test_img = np.expand_dims(test_img , axis=0)
result = FR_model.predict(test_img)
results.append([result,segments_direction[1:]])
return results
import math
def Describing_Result(results):
result = [0,0,0]
for r in results:
result[list(r[0][0]).index(max(list(r[0][0])))] += 1
#print(result)
sum_segments = sum(result)
print(sum_segments)
print('Elham: '+ str(round((100*result[0])/sum_segments , 2)) + '% \n' +
'Farisi: '+ str(round((100*result[1])/sum_segments , 2)) + '% \n' +
'Vazir: '+ str(round((100*result[2])/sum_segments , 2)) + '% \n')
return
# -*- coding: utf-8 -*-
| [
"keras.preprocessing.image.img_to_array",
"Model.FR_Model",
"numpy.expand_dims",
"keras.preprocessing.image.load_img"
] | [((157, 188), 'Model.FR_Model', 'FR_Model', ([], {'img_shape': '(32, 32, 3)'}), '(img_shape=(32, 32, 3))\n', (165, 188), False, 'from Model import FR_Model\n'), ((642, 727), 'keras.preprocessing.image.load_img', 'image.load_img', (["(segments_dir + '/' + segments_direction[0])"], {'target_size': '(32, 32)'}), "(segments_dir + '/' + segments_direction[0], target_size=(32, 32)\n )\n", (656, 727), False, 'from keras.preprocessing import image\n'), ((746, 769), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['img'], {}), '(img)\n', (764, 769), False, 'from keras.preprocessing import image\n'), ((793, 825), 'numpy.expand_dims', 'np.expand_dims', (['test_img'], {'axis': '(0)'}), '(test_img, axis=0)\n', (807, 825), True, 'import numpy as np\n')] |
import inspect
import json
import os
import platform
import sys
for name, value in inspect.getmembers(platform):
if name[0] != '_' and callable(value):
try:
value = value()
except (IndexError, TypeError):
continue
if str(value).strip("( ,')"):
print('{:>21}() = {}'.format(name, value))
print(sys.platform, sys.version)
print(json.dumps(dict(os.environ), indent=2))
| [
"inspect.getmembers"
] | [((84, 112), 'inspect.getmembers', 'inspect.getmembers', (['platform'], {}), '(platform)\n', (102, 112), False, 'import inspect\n')] |
import io
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from langdetect import detect
def pdf2string(path):
"""
From a given pdf path, it creates a string of the pdf.
:param path: Path to the pdf file
:return: string of the pdf file
"""
file_in = open(path, 'rb')
# Create a PDF interpreter object. (pdfminer)
retstr = io.StringIO()
rsrcmgr = PDFResourceManager()
device = TextConverter(rsrcmgr, retstr, codec='utf-8', laparams=LAParams())
interpreter = PDFPageInterpreter(rsrcmgr, device)
# Process each page contained in the document.
for page in PDFPage.get_pages(file_in):
interpreter.process_page(page)
data = retstr.getvalue()
return data
def string2txt(string, path):
"""
From a given string, creates a .txt file on the given path.
:param string: The string to be converted to .txt
:param path: The path of the .txt file
:return: File created
"""
# Writes the string with the encoding wanted
with open(path, 'w', encoding='utf-8') as file_out:
file_out.write(string)
file_out.close()
def detect_language(string):
"""
For a given string, returns the language it is writen in.
:param string: the string to be analysed
:return: the language detected (string)
"""
return detect(string) | [
"pdfminer.pdfinterp.PDFPageInterpreter",
"langdetect.detect",
"pdfminer.layout.LAParams",
"io.StringIO",
"pdfminer.pdfpage.PDFPage.get_pages",
"pdfminer.pdfinterp.PDFResourceManager"
] | [((499, 512), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (510, 512), False, 'import io\n'), ((527, 547), 'pdfminer.pdfinterp.PDFResourceManager', 'PDFResourceManager', ([], {}), '()\n', (545, 547), False, 'from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\n'), ((646, 681), 'pdfminer.pdfinterp.PDFPageInterpreter', 'PDFPageInterpreter', (['rsrcmgr', 'device'], {}), '(rsrcmgr, device)\n', (664, 681), False, 'from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\n'), ((750, 776), 'pdfminer.pdfpage.PDFPage.get_pages', 'PDFPage.get_pages', (['file_in'], {}), '(file_in)\n', (767, 776), False, 'from pdfminer.pdfpage import PDFPage\n'), ((1470, 1484), 'langdetect.detect', 'detect', (['string'], {}), '(string)\n', (1476, 1484), False, 'from langdetect import detect\n'), ((616, 626), 'pdfminer.layout.LAParams', 'LAParams', ([], {}), '()\n', (624, 626), False, 'from pdfminer.layout import LAParams\n')] |
""".. Ignore pydocstyle D400.
=================================
Generate Differential Expressions
=================================
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import csv
import gzip
import json
import logging
import os
import random
import shutil
import string
import datetime
from django.core.management.base import BaseCommand
from django.conf import settings
from django.utils import timezone
from resolwe.flow.models import Data, Storage
from resolwe.utils import BraceMessage as __
from resolwe_bio.models import Sample
from .utils import get_descriptorschema, get_process, get_superuser, generate_sample_desciptor
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class Command(BaseCommand):
"""Generate test differential expression data."""
help = "Generate differential expressions"
def __init__(self, *args, **kwargs):
"""Set command defaults."""
super(Command, self).__init__(*args, **kwargs)
self.data_dir = settings.FLOW_EXECUTOR['DATA_DIR']
self.test_files_path = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', 'tests', 'files'))
def add_arguments(self, parser):
"""Define command arguments."""
parser.add_argument('-n', '--n-diffexps', type=int, default=4,
help="Number of differential expressions to generate (default: %(default)s)")
parser.add_argument('-g', '--group-size', type=int, default=5,
help="Number of samples in case/control group per DE (default: %(default)s)")
parser.add_argument('--rseed', action='store_true', help="Use fixed random seed")
@staticmethod
def get_random_word(length):
"""Generate a random word."""
return ''.join(random.choice(string.ascii_lowercase) for _ in range(length))
def get_name(self, de_name):
"""Generate a random name."""
return 'DE_{}_{}'.format(self.get_random_word(4), de_name)
def create_expressions(self, num):
"""Generate expressions."""
expressions = []
sample_name = 'Cuffdiff_{}'.format(self.get_random_word(4))
for i in range(num):
cuffquant_file = 'cuffquant_{}.cxb'.format(random.choice([1, 2]))
# Create expressios
exp = Data.objects.create(
name='Smpl_Ex_{}_rep{}'.format(sample_name, i + 1),
process=get_process('upload-cxb'),
contributor=get_superuser(),
status=Data.STATUS_PROCESSING,
input={'src': {'file': cuffquant_file}, 'source': 'UCSC'})
os.mkdir(os.path.join(self.data_dir, str(exp.id)))
shutil.copy(os.path.join(self.test_files_path, cuffquant_file), os.path.join(self.data_dir, str(exp.id)))
exp.output = {
'cxb': {'file': cuffquant_file},
'source': 'UCSC'
}
exp.status = Data.STATUS_DONE
exp.save()
sample = Sample.objects.filter(data=exp)[0]
sample.presample = False
sample.descriptor = generate_sample_desciptor('Hs_')
sample.save()
with open(os.path.join(self.data_dir, str(exp.id), 'stdout.txt'), 'w') as stdout:
stdout.write('Upload gene expressions. Sample was created '
'with the generate_diffexr_cuffdiff django-admin command.')
logger.info(__('Created sample: {} (id={})', sample.name, sample.id))
logger.info(__('\tData object: (id={})', exp.id))
expressions.append(exp)
return expressions
def create_genome_annotation(self, filename):
"""Create a genome annotation."""
ann = Data.objects.create(
name='Annotation_{}'.format(filename.split('.')[0]),
process=get_process('upload-gtf'),
contributor=get_superuser(),
status=Data.STATUS_PROCESSING,
input={'src': {'file': filename}, 'source': 'UCSC'})
os.mkdir(os.path.join(self.data_dir, str(ann.id)))
with gzip.open(os.path.join(self.test_files_path, filename), 'rb') as gzfile:
with open(os.path.join(self.data_dir, str(ann.id), filename[:-3]), 'wb') as outfile:
shutil.copyfileobj(gzfile, outfile)
ann.output = {
'gtf': {'file': filename[:-3]},
'source': 'UCSC'
}
ann.status = Data.STATUS_DONE
ann.save()
with open(os.path.join(self.data_dir, str(ann.id), 'stdout.txt'), 'w') as stdout:
stdout.write('Upload genome annotation with the '
'generate_diffexpr_cuffdiff django-admin command.')
logger.info(__('Genome annotation created: {} (id={})', filename, ann.id))
return ann
@staticmethod
def generate_raw_data(gene_ids, path):
"""Generate random DE data."""
de_data = {}
header = ['test_id', 'gene_id', 'gene', 'locus', 'sample_1',
'sample_2', 'status', 'value_1', 'value_2',
'log2(fold_change)', 'test_stat', 'p_value', 'q_value',
'significant']
with gzip.open(gene_ids, mode='rt') as gene_ids:
all_genes = [line.strip() for line in gene_ids]
n_of_genes = len(all_genes)
de_data['test_id'] = all_genes
de_data['gene_id'] = all_genes
de_data['gene'] = all_genes
de_data['locus'] = ['chr20:463337-524482'] * n_of_genes
de_data['sample_1'] = ['control'] * n_of_genes
de_data['sample_2'] = ['case'] * n_of_genes
de_data['status'] = ['OK'] * n_of_genes
de_data['value_1'] = [random.gammavariate(1, 100) for _ in all_genes]
de_data['value_2'] = [random.gammavariate(1, 100) for _ in all_genes]
de_data['log2(fold_change)'] = [random.uniform(-10, 10) for _ in all_genes]
de_data['test_stat'] = [random.uniform(-3, 3) for _ in all_genes]
de_data['p_value'] = [random.uniform(0, 1) for _ in all_genes]
de_data['q_value'] = [random.uniform(0, 1) for _ in all_genes]
de_data['significant'] = [random.choice(['yes', 'no']) for _ in all_genes]
rows = zip(de_data['test_id'], de_data['gene_id'], de_data['gene'],
de_data['locus'], de_data['sample_1'], de_data['sample_2'],
de_data['status'], de_data['value_1'], de_data['value_2'],
de_data['log2(fold_change)'], de_data['test_stat'],
de_data['p_value'], de_data['q_value'], de_data['significant'])
with gzip.open(os.path.join(path, 'de_raw.tab.gz'), 'wt') as raw_df:
writer = csv.writer(raw_df, delimiter=str('\t'), lineterminator='\n')
writer.writerow(header)
for row in rows:
writer.writerow(row)
with open(os.path.join(path, 'de_json.json'), 'w') as json_file:
de_data_std = {
'stat': de_data['test_stat'],
'logfc': de_data['log2(fold_change)'],
'pvalue': de_data['p_value'],
'fdr': de_data['q_value'],
'gene_id': de_data['gene_id']
}
json.dump(de_data_std, json_file, indent=4, sort_keys=True)
rows = zip(de_data_std['gene_id'], de_data_std['logfc'], de_data_std['fdr'],
de_data_std['pvalue'], de_data_std['stat'])
with gzip.open(os.path.join(path, 'de_file.tab.gz'), 'wt') as de_file:
writer = csv.writer(de_file, delimiter=str('\t'), lineterminator='\n')
writer.writerow(['gene_id', 'logfc', 'fdr', 'pvalue', 'stat'])
for row in rows:
writer.writerow(row)
def generate_diffexp_data(self, group_size):
"""Generate differential expression data."""
de_name = 'cuffdiff'
human_genes = os.path.join(self.test_files_path, 'human_genes.tab.gz')
logger.info('---- case samples ----')
case_samples = self.create_expressions(group_size)
logger.info('---- control samples ----')
control_samples = self.create_expressions(group_size)
logger.info('---- upload annotation ----')
case_input = [sample.id for sample in case_samples]
control_input = [sample.id for sample in control_samples]
genome_annotation_input = self.create_genome_annotation('hg19_chr20_small.gtf.gz')
de_descriptor = {
'thresholds': {
'prob_field': 'fdr',
'logfc': 2,
'prob': 0.05},
'case_label': 'Case group',
'control_label': 'Control group'
}
de_inputs = {
'case': case_input,
'control': control_input,
'annotation': genome_annotation_input.id
}
# Create DE data object
started = timezone.now()
de_obj = Data.objects.create(
name=self.get_name(de_name),
started=started,
finished=started + datetime.timedelta(minutes=20),
status=Data.STATUS_PROCESSING,
descriptor_schema=get_descriptorschema('diff-exp'),
descriptor=de_descriptor,
process=get_process(de_name),
contributor=get_superuser(),
input=de_inputs)
# Create data directory
os.mkdir(os.path.join(self.data_dir, str(de_obj.id)))
self.generate_raw_data(human_genes, os.path.join(self.data_dir, str(de_obj.id)))
with open(os.path.join(self.data_dir, str(de_obj.id), 'test.txt'), 'w') as _:
pass
json_object = Storage.objects.create(
json=json.load(open(os.path.join(self.data_dir, str(de_obj.id), 'de_json.json'))),
contributor=get_superuser(),
name='{}_storage'.format(de_obj.name),
data=de_obj)
os.remove(os.path.join(self.data_dir, str(de_obj.id), 'de_json.json'))
# TODO: reference on existing true files
de_obj.output = {
'raw': {'file': 'de_raw.tab.gz'},
'de_json': json_object.id,
'de_file': {'file': 'de_file.tab.gz'},
'transcript_diff_exp': {'file': 'test.txt'},
'cds_diff_exp': {'file': 'test.txt'},
'tss_group_diff_exp': {'file': 'test.txt'},
'cuffdiff_output': {'file': 'test.txt'},
'source': 'UCSC'
}
de_obj.status = Data.STATUS_DONE
de_obj.save()
logger.info('---- new differential expression ----')
logger.info(__('DE created with id: {}', de_obj.id))
# Create stdout file
with open(os.path.join(self.data_dir, str(de_obj.id), 'stdout.txt'), 'w') as stdout:
stdout.write('Differential expression was '
'created with the generate_diffexpr_cuffdiff django-admin command.')
def handle(self, *args, **options):
"""Command handle."""
if options['rseed']:
random.seed(42)
for _ in range(options['n_diffexps']):
self.generate_diffexp_data(options['group_size'])
| [
"logging.getLogger",
"resolwe_bio.models.Sample.objects.filter",
"random.uniform",
"random.choice",
"shutil.copyfileobj",
"random.gammavariate",
"gzip.open",
"os.path.join",
"random.seed",
"django.utils.timezone.now",
"os.path.dirname",
"resolwe.utils.BraceMessage",
"datetime.timedelta",
"json.dump"
] | [((696, 723), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (713, 723), False, 'import logging\n'), ((8087, 8143), 'os.path.join', 'os.path.join', (['self.test_files_path', '"""human_genes.tab.gz"""'], {}), "(self.test_files_path, 'human_genes.tab.gz')\n", (8099, 8143), False, 'import os\n'), ((9085, 9099), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (9097, 9099), False, 'from django.utils import timezone\n'), ((4808, 4869), 'resolwe.utils.BraceMessage', '__', (['"""Genome annotation created: {} (id={})"""', 'filename', 'ann.id'], {}), "('Genome annotation created: {} (id={})', filename, ann.id)\n", (4810, 4869), True, 'from resolwe.utils import BraceMessage as __\n'), ((5265, 5295), 'gzip.open', 'gzip.open', (['gene_ids'], {'mode': '"""rt"""'}), "(gene_ids, mode='rt')\n", (5274, 5295), False, 'import gzip\n'), ((7391, 7450), 'json.dump', 'json.dump', (['de_data_std', 'json_file'], {'indent': '(4)', 'sort_keys': '(True)'}), '(de_data_std, json_file, indent=4, sort_keys=True)\n', (7400, 7450), False, 'import json\n'), ((10769, 10808), 'resolwe.utils.BraceMessage', '__', (['"""DE created with id: {}"""', 'de_obj.id'], {}), "('DE created with id: {}', de_obj.id)\n", (10771, 10808), True, 'from resolwe.utils import BraceMessage as __\n'), ((11195, 11210), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (11206, 11210), False, 'import random\n'), ((1154, 1179), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1169, 1179), False, 'import os\n'), ((1847, 1884), 'random.choice', 'random.choice', (['string.ascii_lowercase'], {}), '(string.ascii_lowercase)\n', (1860, 1884), False, 'import random\n'), ((2302, 2323), 'random.choice', 'random.choice', (['[1, 2]'], {}), '([1, 2])\n', (2315, 2323), False, 'import random\n'), ((2771, 2821), 'os.path.join', 'os.path.join', (['self.test_files_path', 'cuffquant_file'], {}), '(self.test_files_path, cuffquant_file)\n', (2783, 2821), False, 'import os\n'), ((3076, 3107), 'resolwe_bio.models.Sample.objects.filter', 'Sample.objects.filter', ([], {'data': 'exp'}), '(data=exp)\n', (3097, 3107), False, 'from resolwe_bio.models import Sample\n'), ((3524, 3580), 'resolwe.utils.BraceMessage', '__', (['"""Created sample: {} (id={})"""', 'sample.name', 'sample.id'], {}), "('Created sample: {} (id={})', sample.name, sample.id)\n", (3526, 3580), True, 'from resolwe.utils import BraceMessage as __\n'), ((3606, 3642), 'resolwe.utils.BraceMessage', '__', (['"""\tData object: (id={})"""', 'exp.id'], {}), "('\\tData object: (id={})', exp.id)\n", (3608, 3642), True, 'from resolwe.utils import BraceMessage as __\n'), ((4181, 4225), 'os.path.join', 'os.path.join', (['self.test_files_path', 'filename'], {}), '(self.test_files_path, filename)\n', (4193, 4225), False, 'import os\n'), ((4357, 4392), 'shutil.copyfileobj', 'shutil.copyfileobj', (['gzfile', 'outfile'], {}), '(gzfile, outfile)\n', (4375, 4392), False, 'import shutil\n'), ((5804, 5831), 'random.gammavariate', 'random.gammavariate', (['(1)', '(100)'], {}), '(1, 100)\n', (5823, 5831), False, 'import random\n'), ((5886, 5913), 'random.gammavariate', 'random.gammavariate', (['(1)', '(100)'], {}), '(1, 100)\n', (5905, 5913), False, 'import random\n'), ((5978, 6001), 'random.uniform', 'random.uniform', (['(-10)', '(10)'], {}), '(-10, 10)\n', (5992, 6001), False, 'import random\n'), ((6058, 6079), 'random.uniform', 'random.uniform', (['(-3)', '(3)'], {}), '(-3, 3)\n', (6072, 6079), False, 'import random\n'), ((6134, 6154), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (6148, 6154), False, 'import random\n'), ((6209, 6229), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (6223, 6229), False, 'import random\n'), ((6288, 6316), 'random.choice', 'random.choice', (["['yes', 'no']"], {}), "(['yes', 'no'])\n", (6301, 6316), False, 'import random\n'), ((7046, 7080), 'os.path.join', 'os.path.join', (['path', '"""de_json.json"""'], {}), "(path, 'de_json.json')\n", (7058, 7080), False, 'import os\n'), ((6773, 6808), 'os.path.join', 'os.path.join', (['path', '"""de_raw.tab.gz"""'], {}), "(path, 'de_raw.tab.gz')\n", (6785, 6808), False, 'import os\n'), ((7636, 7672), 'os.path.join', 'os.path.join', (['path', '"""de_file.tab.gz"""'], {}), "(path, 'de_file.tab.gz')\n", (7648, 7672), False, 'import os\n'), ((9240, 9270), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': '(20)'}), '(minutes=20)\n', (9258, 9270), False, 'import datetime\n')] |
from pysat.solvers import Solver
from gen_factor_sat import utils
def assign(variables, values):
for variable, value in zip(variables, values):
yield variable if value in ('1', True) else -variable
def run_cnf(input_assignment, output_variables, clauses):
with Solver(name="cadical", bootstrap_with=clauses) as solver:
if solver.solve(assumptions=input_assignment):
return list(assignment_to_bin(output_variables, solver.get_model()))
else:
return None
def assignment_to_int(variables, assignment):
return utils.to_int(list(assignment_to_bin(variables, assignment)))
def assignment_to_bin(variables, assignments):
for variable in variables:
if isinstance(variable, str):
yield utils.to_bin_string(variable == '1')
elif isinstance(variable, int):
assignment = assignments[abs(variable) - 1]
assignment = assignment if variable >= 0 else -assignment
yield utils.to_bin_string(assignment >= 0)
else:
raise ValueError("Invalid output: " + variable)
| [
"pysat.solvers.Solver",
"gen_factor_sat.utils.to_bin_string"
] | [((282, 328), 'pysat.solvers.Solver', 'Solver', ([], {'name': '"""cadical"""', 'bootstrap_with': 'clauses'}), "(name='cadical', bootstrap_with=clauses)\n", (288, 328), False, 'from pysat.solvers import Solver\n'), ((770, 806), 'gen_factor_sat.utils.to_bin_string', 'utils.to_bin_string', (["(variable == '1')"], {}), "(variable == '1')\n", (789, 806), False, 'from gen_factor_sat import utils\n'), ((991, 1027), 'gen_factor_sat.utils.to_bin_string', 'utils.to_bin_string', (['(assignment >= 0)'], {}), '(assignment >= 0)\n', (1010, 1027), False, 'from gen_factor_sat import utils\n')] |
"""Tests various time series functions which are used extensively in tcapy
"""
__author__ = 'saeedamen' # <NAME> / <EMAIL>
#
# Copyright 2017 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro
#
# See the License for the specific language governing permissions and limitations under the License.
#
import pandas as pd
import numpy as np
from datetime import timedelta
from pandas.testing import assert_frame_equal
from tcapy.util.timeseries import TimeSeriesOps
from tcapy.util.customexceptions import *
from test.config import *
ticker = 'EURUSD'
start_date = '20 Apr 2017'
finish_date = '07 Jun 2017'
def test_vlookup():
"""Runs a test for the VLOOKUP function which is used extensively in a lot of the metric construction
"""
dt = pd.date_range(start='01 Jan 2018', end='05 Jan 2018', freq='1min')
rand_data = np.random.random(len(dt))
df_before = pd.DataFrame(index=dt, columns=['rand'], data=rand_data)
millseconds_tests = [100, 500]
# Try perturbing by nothing, then 100 and 500 milliseconds
for millseconds in millseconds_tests:
df_perturb = pd.DataFrame(index=dt - timedelta(milliseconds=millseconds), columns=['rand'],
data=rand_data)
# Do a VLOOKUP (which should give us all the previous ones) - take off the last point (which would be AFTER
# our perturbation)
search, dt_search = TimeSeriesOps().vlookup_style_data_frame(dt[0:-1], df_perturb, 'rand')
df_after = pd.DataFrame(index=dt_search + timedelta(milliseconds=millseconds), data=search.values,
columns=['rand'])
# check the search dataframes are equal
assert_frame_equal(df_before[0:-1], df_after, check_dtype=False)
# in this case, our lookup series doesn't overlap at all with our range, so we should get back and exception
dt_lookup = pd.date_range(start='30 Dec 2017', end='31 Dec 2018', freq='1min')
df_perturb = pd.DataFrame(index=dt + timedelta(milliseconds=millseconds), columns=['rand'],
data=rand_data)
exception_has_been_triggered = False
try:
search, dt_search = TimeSeriesOps().vlookup_style_data_frame(dt_lookup, df_perturb, 'rand')
except ValidationException:
exception_has_been_triggered = True
assert (exception_has_been_triggered)
def test_filter_between_days_times():
"""Runs a test for the filter by time of day and day of the week, on synthetically constructed data and then checks
that no data is outside those time windows
"""
from tcapy.analysis.tradeorderfilter import TradeOrderFilterTimeOfDayWeekMonth
dt = pd.date_range(start='01 Jan 2018', end='05 Jan 2018', freq='1min')
df = pd.DataFrame(index=dt, columns=['Rand'], data=np.random.random(len(dt)))
df = df.tz_localize('utc')
trade_order_filter = TradeOrderFilterTimeOfDayWeekMonth(time_of_day={'start_time': '07:00:00',
'finish_time': '17:00:00'},
day_of_week='mon')
df = trade_order_filter.filter_trade_order(trade_order_df=df)
assert (df.index[0].hour >= 7 and df.index[-1].hour <= 17 and df.index[0].dayofweek == 0)
def test_remove_consecutive_duplicates():
"""Tests that consecutive duplicates are removed correctly in time series
"""
dt = pd.date_range(start='01 Jan 2018', end='05 Jan 2018', freq='30s')
df = pd.DataFrame(index=dt, columns=['bid', 'mid', 'ask'])
df['mid'] = np.random.random(len(dt))
df['bid'] = np.random.random(len(dt))
df['ask'] = np.random.random(len(dt))
# Filter by 'mid'
df2 = df.copy()
df2.index = df2.index + timedelta(seconds=10)
df_new = df.append(df2)
df_new = df_new.sort_index()
df_new = TimeSeriesOps().drop_consecutive_duplicates(df_new, 'mid')
assert_frame_equal(df_new, df)
# For 'bid' and 'ask'
df2 = df.copy()
df2.index = df2.index + timedelta(seconds=10)
df_new = df.append(df2)
df_new = df_new.sort_index()
df_new = TimeSeriesOps().drop_consecutive_duplicates(df_new, ['bid', 'ask'])
assert_frame_equal(df_new, df)
def test_ohlc():
"""Tests the open/high/low/close resampling works on time series
"""
dt = pd.date_range(start='01 Jan 2018', end='05 Jan 2018', freq='1s')
df = pd.DataFrame(index=dt, columns=['bid', 'mid', 'ask'])
df['mid'] = np.random.random(len(dt))
df_ohlc = TimeSeriesOps().resample_time_series(df, resample_amount=1, how='ohlc', unit='minutes', field='mid')
assert all(df_ohlc['high'] >= df_ohlc['low'])
def test_time_delta():
"""Tests time delta function works for a number of different times"""
td = TimeSeriesOps().get_time_delta("12:30")
assert (td.seconds == 45000)
td = TimeSeriesOps().get_time_delta("12:30:35")
assert (td.seconds == 45035)
print(td)
def test_overwrite_time_in_datetimeindex():
"""Tests that overwriting the time with a specific time of day works
"""
# Clocks went forward in London on 00:00 31 Mar 2020
datetimeindex = pd.date_range('28 Mar 2020', '05 Apr 2020', freq='h')
datetimeindex = datetimeindex.tz_localize("utc")
datetimeindex = TimeSeriesOps().overwrite_time_of_day_in_datetimeindex(datetimeindex, "16:00", overwrite_timezone="Europe/London")
# Back in UTC time 16:00 LDN is 15:00 UTC after DST changes (and is 16:00 UTC beforehand)
assert datetimeindex[0].hour == 16 and datetimeindex[-1].hour == 15
def test_chunk():
"""Tests the chunking of dataframes works
"""
dt = pd.date_range(start='01 Jan 2018', end='05 Jan 2018', freq='1min')
df = pd.DataFrame(index=dt, columns=['bid', 'mid', 'ask'])
df['mid'] = np.random.random(len(dt))
df_chunk = TimeSeriesOps().split_array_chunks(df, chunks=None, chunk_size=100)
df_chunk = pd.concat(df_chunk)
assert_frame_equal(df_chunk, df)
def test_cache_handle():
"""Tests the storing of DataFrames in the CacheHandle
"""
from tcapy.data.volatilecache import VolatileRedis as VolatileCache
volatile_cache = VolatileCache()
dt = pd.date_range(start='01 Jan 2017', end='05 Jan 2019', freq='1m')
df = pd.DataFrame(index=dt, columns=['bid', 'mid', 'ask'])
df['mid'] = np.ones(len(dt))
ch = volatile_cache.put_dataframe_handle(df, use_cache_handles=True)
df_1 = volatile_cache.get_dataframe_handle(ch, burn_after_reading=True)
assert_frame_equal(df, df_1)
def test_data_frame_holder():
"""Tests the storing of DataFrameHolder object which is like an enhanced dict specifically for storing DataFrames,
alongside using the VolatileCache
"""
from tcapy.analysis.dataframeholder import DataFrameHolder
from tcapy.data.volatilecache import VolatileRedis as VolatileCache
volatile_cache = VolatileCache()
# Create a very large DataFrame, which needs to be chunked in storage
dt = pd.date_range(start='01 Jan 2000', end='05 Jan 2020', freq='10s')
df = pd.DataFrame(index=dt, columns=['bid', 'mid', 'ask'])
df['bid'] = np.ones(len(dt))
df['mid'] = np.ones(len(dt))
df['ask'] = np.ones(len(dt))
df_list = TimeSeriesOps().split_array_chunks(df, chunks=2)
df_lower = df_list[0]
df_higher = df_list[1]
for i in ['_comp', '']:
df_holder = DataFrameHolder()
df_holder.add_dataframe(volatile_cache.put_dataframe_handle(df_lower, use_cache_handles=True), 'EURUSD_df' + i)
df_holder.add_dataframe(volatile_cache.put_dataframe_handle(df_higher, use_cache_handles=True), 'EURUSD_df' + i)
df_dict = df_holder.get_combined_dataframe_dict()
df_final = df_dict['EURUSD_df' + i]
assert_frame_equal(df, df_final)
| [
"tcapy.data.volatilecache.VolatileRedis",
"tcapy.util.timeseries.TimeSeriesOps",
"tcapy.analysis.dataframeholder.DataFrameHolder",
"pandas.concat",
"pandas.DataFrame",
"pandas.testing.assert_frame_equal",
"datetime.timedelta",
"tcapy.analysis.tradeorderfilter.TradeOrderFilterTimeOfDayWeekMonth",
"pandas.date_range"
] | [((756, 822), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""01 Jan 2018"""', 'end': '"""05 Jan 2018"""', 'freq': '"""1min"""'}), "(start='01 Jan 2018', end='05 Jan 2018', freq='1min')\n", (769, 822), True, 'import pandas as pd\n'), ((883, 939), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dt', 'columns': "['rand']", 'data': 'rand_data'}), "(index=dt, columns=['rand'], data=rand_data)\n", (895, 939), True, 'import pandas as pd\n'), ((1894, 1960), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""30 Dec 2017"""', 'end': '"""31 Dec 2018"""', 'freq': '"""1min"""'}), "(start='30 Dec 2017', end='31 Dec 2018', freq='1min')\n", (1907, 1960), True, 'import pandas as pd\n'), ((2686, 2752), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""01 Jan 2018"""', 'end': '"""05 Jan 2018"""', 'freq': '"""1min"""'}), "(start='01 Jan 2018', end='05 Jan 2018', freq='1min')\n", (2699, 2752), True, 'import pandas as pd\n'), ((2893, 3017), 'tcapy.analysis.tradeorderfilter.TradeOrderFilterTimeOfDayWeekMonth', 'TradeOrderFilterTimeOfDayWeekMonth', ([], {'time_of_day': "{'start_time': '07:00:00', 'finish_time': '17:00:00'}", 'day_of_week': '"""mon"""'}), "(time_of_day={'start_time': '07:00:00',\n 'finish_time': '17:00:00'}, day_of_week='mon')\n", (2927, 3017), False, 'from tcapy.analysis.tradeorderfilter import TradeOrderFilterTimeOfDayWeekMonth\n'), ((3446, 3511), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""01 Jan 2018"""', 'end': '"""05 Jan 2018"""', 'freq': '"""30s"""'}), "(start='01 Jan 2018', end='05 Jan 2018', freq='30s')\n", (3459, 3511), True, 'import pandas as pd\n'), ((3522, 3575), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dt', 'columns': "['bid', 'mid', 'ask']"}), "(index=dt, columns=['bid', 'mid', 'ask'])\n", (3534, 3575), True, 'import pandas as pd\n'), ((3937, 3967), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['df_new', 'df'], {}), '(df_new, df)\n', (3955, 3967), False, 'from pandas.testing import assert_frame_equal\n'), ((4215, 4245), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['df_new', 'df'], {}), '(df_new, df)\n', (4233, 4245), False, 'from pandas.testing import assert_frame_equal\n'), ((4350, 4414), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""01 Jan 2018"""', 'end': '"""05 Jan 2018"""', 'freq': '"""1s"""'}), "(start='01 Jan 2018', end='05 Jan 2018', freq='1s')\n", (4363, 4414), True, 'import pandas as pd\n'), ((4425, 4478), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dt', 'columns': "['bid', 'mid', 'ask']"}), "(index=dt, columns=['bid', 'mid', 'ask'])\n", (4437, 4478), True, 'import pandas as pd\n'), ((5176, 5229), 'pandas.date_range', 'pd.date_range', (['"""28 Mar 2020"""', '"""05 Apr 2020"""'], {'freq': '"""h"""'}), "('28 Mar 2020', '05 Apr 2020', freq='h')\n", (5189, 5229), True, 'import pandas as pd\n'), ((5668, 5734), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""01 Jan 2018"""', 'end': '"""05 Jan 2018"""', 'freq': '"""1min"""'}), "(start='01 Jan 2018', end='05 Jan 2018', freq='1min')\n", (5681, 5734), True, 'import pandas as pd\n'), ((5745, 5798), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dt', 'columns': "['bid', 'mid', 'ask']"}), "(index=dt, columns=['bid', 'mid', 'ask'])\n", (5757, 5798), True, 'import pandas as pd\n'), ((5941, 5960), 'pandas.concat', 'pd.concat', (['df_chunk'], {}), '(df_chunk)\n', (5950, 5960), True, 'import pandas as pd\n'), ((5966, 5998), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['df_chunk', 'df'], {}), '(df_chunk, df)\n', (5984, 5998), False, 'from pandas.testing import assert_frame_equal\n'), ((6184, 6199), 'tcapy.data.volatilecache.VolatileRedis', 'VolatileCache', ([], {}), '()\n', (6197, 6199), True, 'from tcapy.data.volatilecache import VolatileRedis as VolatileCache\n'), ((6210, 6274), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""01 Jan 2017"""', 'end': '"""05 Jan 2019"""', 'freq': '"""1m"""'}), "(start='01 Jan 2017', end='05 Jan 2019', freq='1m')\n", (6223, 6274), True, 'import pandas as pd\n'), ((6284, 6337), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dt', 'columns': "['bid', 'mid', 'ask']"}), "(index=dt, columns=['bid', 'mid', 'ask'])\n", (6296, 6337), True, 'import pandas as pd\n'), ((6527, 6555), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['df', 'df_1'], {}), '(df, df_1)\n', (6545, 6555), False, 'from pandas.testing import assert_frame_equal\n'), ((6908, 6923), 'tcapy.data.volatilecache.VolatileRedis', 'VolatileCache', ([], {}), '()\n', (6921, 6923), True, 'from tcapy.data.volatilecache import VolatileRedis as VolatileCache\n'), ((7008, 7073), 'pandas.date_range', 'pd.date_range', ([], {'start': '"""01 Jan 2000"""', 'end': '"""05 Jan 2020"""', 'freq': '"""10s"""'}), "(start='01 Jan 2000', end='05 Jan 2020', freq='10s')\n", (7021, 7073), True, 'import pandas as pd\n'), ((7083, 7136), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'dt', 'columns': "['bid', 'mid', 'ask']"}), "(index=dt, columns=['bid', 'mid', 'ask'])\n", (7095, 7136), True, 'import pandas as pd\n'), ((7772, 7804), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['df', 'df_final'], {}), '(df, df_final)\n', (7790, 7804), False, 'from pandas.testing import assert_frame_equal\n'), ((1699, 1763), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['df_before[0:-1]', 'df_after'], {'check_dtype': '(False)'}), '(df_before[0:-1], df_after, check_dtype=False)\n', (1717, 1763), False, 'from pandas.testing import assert_frame_equal\n'), ((3775, 3796), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (3784, 3796), False, 'from datetime import timedelta\n'), ((4044, 4065), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (4053, 4065), False, 'from datetime import timedelta\n'), ((7403, 7420), 'tcapy.analysis.dataframeholder.DataFrameHolder', 'DataFrameHolder', ([], {}), '()\n', (7418, 7420), False, 'from tcapy.analysis.dataframeholder import DataFrameHolder\n'), ((3873, 3888), 'tcapy.util.timeseries.TimeSeriesOps', 'TimeSeriesOps', ([], {}), '()\n', (3886, 3888), False, 'from tcapy.util.timeseries import TimeSeriesOps\n'), ((4142, 4157), 'tcapy.util.timeseries.TimeSeriesOps', 'TimeSeriesOps', ([], {}), '()\n', (4155, 4157), False, 'from tcapy.util.timeseries import TimeSeriesOps\n'), ((4537, 4552), 'tcapy.util.timeseries.TimeSeriesOps', 'TimeSeriesOps', ([], {}), '()\n', (4550, 4552), False, 'from tcapy.util.timeseries import TimeSeriesOps\n'), ((4797, 4812), 'tcapy.util.timeseries.TimeSeriesOps', 'TimeSeriesOps', ([], {}), '()\n', (4810, 4812), False, 'from tcapy.util.timeseries import TimeSeriesOps\n'), ((4881, 4896), 'tcapy.util.timeseries.TimeSeriesOps', 'TimeSeriesOps', ([], {}), '()\n', (4894, 4896), False, 'from tcapy.util.timeseries import TimeSeriesOps\n'), ((5304, 5319), 'tcapy.util.timeseries.TimeSeriesOps', 'TimeSeriesOps', ([], {}), '()\n', (5317, 5319), False, 'from tcapy.util.timeseries import TimeSeriesOps\n'), ((5858, 5873), 'tcapy.util.timeseries.TimeSeriesOps', 'TimeSeriesOps', ([], {}), '()\n', (5871, 5873), False, 'from tcapy.util.timeseries import TimeSeriesOps\n'), ((7252, 7267), 'tcapy.util.timeseries.TimeSeriesOps', 'TimeSeriesOps', ([], {}), '()\n', (7265, 7267), False, 'from tcapy.util.timeseries import TimeSeriesOps\n'), ((1409, 1424), 'tcapy.util.timeseries.TimeSeriesOps', 'TimeSeriesOps', ([], {}), '()\n', (1422, 1424), False, 'from tcapy.util.timeseries import TimeSeriesOps\n'), ((2003, 2038), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': 'millseconds'}), '(milliseconds=millseconds)\n', (2012, 2038), False, 'from datetime import timedelta\n'), ((2188, 2203), 'tcapy.util.timeseries.TimeSeriesOps', 'TimeSeriesOps', ([], {}), '()\n', (2201, 2203), False, 'from tcapy.util.timeseries import TimeSeriesOps\n'), ((1127, 1162), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': 'millseconds'}), '(milliseconds=millseconds)\n', (1136, 1162), False, 'from datetime import timedelta\n'), ((1531, 1566), 'datetime.timedelta', 'timedelta', ([], {'milliseconds': 'millseconds'}), '(milliseconds=millseconds)\n', (1540, 1566), False, 'from datetime import timedelta\n')] |
# -*- coding:utf-8 -*-
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import rrc_evaluation_funcs_1_1 as rrc_evaluation_funcs
import importlib
def evaluation_imports():
"""
evaluation_imports: Dictionary ( key = module name , value = alias ) with
python modules used in the evaluation.
"""
return {
'math': 'math',
'numpy': 'np'
}
def default_evaluation_params():
"""
default_evaluation_params: Default parameters to use for the validation and
evaluation.
"""
return {
'AREA_RECALL_CONSTRAINT': 0.8,
'AREA_PRECISION_CONSTRAINT': 0.4,
'EV_PARAM_IND_CENTER_DIFF_THR': 1,
'MTYPE_OO_O': 1.,
'MTYPE_OM_O': 0.8,
'MTYPE_OM_M': 1.,
'GT_SAMPLE_NAME_2_ID': 'gt_img_([0-9]+).txt',
'DET_SAMPLE_NAME_2_ID': 'res_img_([0-9]+).txt',
'CRLF': False # Lines are delimited by Windows CRLF format
}
def validate_data(gt_file_path, sub_file_path, evaluation_params):
"""
Method validate_data: validates that all files in the results folder are
correct (have the correct name contents).
Validates also that there are no missing files in
the folder.
If some error detected, the method raises the error
"""
gt = rrc_evaluation_funcs.load_zip_file(
gt_file_path, evaluation_params['GT_SAMPLE_NAME_2_ID'])
sub = rrc_evaluation_funcs.load_zip_file(
sub_file_path, evaluation_params['DET_SAMPLE_NAME_2_ID'], True)
# Validate format of GroundTruth
for k in gt:
rrc_evaluation_funcs.validate_lines_in_file(
k, gt[k], evaluation_params['CRLF'], True, True)
# Validate format of results
for k in sub:
if not (k in gt):
raise Exception("The sample %s not present in GT" % k)
rrc_evaluation_funcs.validate_lines_in_file(
k, sub[k], evaluation_params['CRLF'], True, False)
def evaluate_method(gt_file_path, sub_file_path, evaluation_param):
"""
Method evaluate_method: evaluate method and returns the results
Results. Dictionary with the following values:
- method (required) Global method metrics. Ex:
{ 'Precision':0.8,'Recall':0.9 }
- samples (optional) Per sample metrics. Ex:
{'sample1' : { 'Precision':0.8,'Recall':0.9 },
'sample2' : { 'Precision':0.8,'Recall':0.9 }
"""
for module, alias in evaluation_imports().items():
globals()[alias] = importlib.import_module(module)
def one_to_one_match(row, col):
cont = 0
for j in range(len(recall_mat[0])):
if recall_mat[row, j] >= evaluation_param['AREA_RECALL_CONSTRAINT']\
and precision_mat[row, j] >= \
evaluation_param['AREA_PRECISION_CONSTRAINT']:
cont = cont + 1
if cont != 1:
return False
cont = 0
for i in range(len(recall_mat)):
if recall_mat[i, col] >= evaluation_param['AREA_RECALL_CONSTRAINT']\
and precision_mat[i, col] >= \
evaluation_param['AREA_PRECISION_CONSTRAINT']:
cont = cont + 1
if cont != 1:
return False
if recall_mat[row, col] >= evaluation_param['AREA_RECALL_CONSTRAINT']\
and precision_mat[row, col] >= \
evaluation_param['AREA_PRECISION_CONSTRAINT']:
return True
return False
def num_overlaps_gt(gt_num_):
cont = 0
for det_num_ in range(len(det_rects)):
if det_num_ not in det_dont_care_rects_num:
if recall_mat[gt_num_, det_num_] > 0:
cont = cont + 1
return cont
def num_overlaps_det(det_num_):
cont = 0
for gt_num_ in range(len(recall_mat)):
if gt_num_ not in gt_dont_care_rects_num:
if recall_mat[gt_num_, det_num_] > 0:
cont = cont + 1
return cont
def is_single_overlap(row, col):
if num_overlaps_gt(row) == 1 and num_overlaps_det(col) == 1:
return True
else:
return False
def one_to_many_match(gt_num_):
many_sum = 0
det_rects_ = []
for det_num_ in range(len(recall_mat[0])):
if gt_rect_mat[gt_num_] == 0 and det_rect_mat[det_num_] == 0 \
and det_num_ not in det_dont_care_rects_num:
if precision_mat[gt_num_, det_num_] >= \
evaluation_param['AREA_PRECISION_CONSTRAINT']:
many_sum += recall_mat[gt_num_, det_num_]
det_rects_.append(det_num_)
if round(many_sum, 4) >= evaluation_param['AREA_RECALL_CONSTRAINT']:
return True, det_rects_
else:
return False, []
def many_to_one_match(det_num_):
many_sum = 0
gt_rect_list = []
for gt_num_ in range(len(recall_mat)):
if gt_rect_mat[gt_num_] == 0 and det_rect_mat[det_num_] == 0 \
and gt_num_ not in gt_dont_care_rects_num:
if recall_mat[gt_num_, det_num_] >= \
evaluation_param['AREA_RECALL_CONSTRAINT']:
many_sum += precision_mat[gt_num_, det_num_]
gt_rect_list.append(gt_num_)
if round(many_sum, 4) >= evaluation_param['AREA_PRECISION_CONSTRAINT']:
return True, gt_rect_list
else:
return False, []
def area(a, b):
dx = min(a.xmax, b.xmax) - max(a.xmin, b.xmin) + 1
dy = min(a.ymax, b.ymax) - max(a.ymin, b.ymin) + 1
if (dx >= 0) and (dy >= 0):
return dx * dy
else:
return 0.
def center(r):
x = float(r.xmin) + float(r.xmax - r.xmin + 1) / 2.
y = float(r.ymin) + float(r.ymax - r.ymin + 1) / 2.
return Point(x, y)
def point_distance(r1, r2):
distx = math.fabs(r1.x - r2.x)
disty = math.fabs(r1.y - r2.y)
return math.sqrt(distx * distx + disty * disty)
def center_distance(r1, r2):
return point_distance(center(r1), center(r2))
def diag(r):
w = (r.xmax - r.xmin + 1)
h = (r.ymax - r.ymin + 1)
return math.sqrt(h * h + w * w)
per_sample_metrics = {}
method_recall_sum = 0
method_precision_sum = 0
Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')
Point = namedtuple('Point', 'x y')
gt = rrc_evaluation_funcs.load_zip_file(
gt_file_path, evaluation_param['GT_SAMPLE_NAME_2_ID'])
subm = rrc_evaluation_funcs.load_zip_file(
sub_file_path, evaluation_param['DET_SAMPLE_NAME_2_ID'], True)
num_gt = 0
num_det = 0
for resFile in gt:
gt_file = rrc_evaluation_funcs.decode_utf8(gt[resFile])
recall = 0
precision = 0
hmean = 0
recall_accum = 0.
precision_accum = 0.
gt_rects = []
det_rects = []
gt_pol_points = []
det_pol_points = []
# Array of Ground Truth Rectangles' keys marked as don't Care
gt_dont_care_rects_num = []
# Array of Detected Rectangles' matched with a don't Care GT
det_dont_care_rects_num = []
pairs = []
evaluation_log = ""
recall_mat = np.empty([1, 1])
precision_mat = np.empty([1, 1])
points_list, _, transcriptions_list = \
rrc_evaluation_funcs.get_tl_line_values_from_file_contents(
gt_file, evaluation_param['CRLF'], True, True, False)
for n in range(len(points_list)):
points = points_list[n]
transcription = transcriptions_list[n]
dont_care = transcription == "###"
gt_rect = Rectangle(*points)
gt_rects.append(gt_rect)
gt_pol_points.append(points)
if dont_care:
gt_dont_care_rects_num.append(len(gt_rects) - 1)
evaluation_log += "GT rectangles: " + str(len(gt_rects)) \
+ (" (" + str(len(gt_dont_care_rects_num))
+ " don't care)\n" if
len(gt_dont_care_rects_num) > 0 else "\n")
if resFile in subm:
det_file = rrc_evaluation_funcs.decode_utf8(subm[resFile])
points_list, _, _ = \
rrc_evaluation_funcs.get_tl_line_values_from_file_contents(
det_file, evaluation_param['CRLF'], True, False, False)
for n in range(len(points_list)):
points = points_list[n]
det_rect = Rectangle(*points)
det_rects.append(det_rect)
det_pol_points.append(points)
if len(gt_dont_care_rects_num) > 0:
for dontCareRectNum in gt_dont_care_rects_num:
dont_care_rect = gt_rects[dontCareRectNum]
intersected_area = area(dont_care_rect, det_rect)
rd_dimensions = ((det_rect.xmax - det_rect.xmin + 1) *
(det_rect.ymax - det_rect.ymin + 1))
if rd_dimensions == 0:
precision = 0
else:
precision = intersected_area / rd_dimensions
if (precision >
evaluation_param['AREA_PRECISION_CONSTRAINT']):
det_dont_care_rects_num.append(len(det_rects) - 1)
break
evaluation_log += "DET rectangles: " + str(len(det_rects)) + \
(" (" + str(len(det_dont_care_rects_num)) +
" don't care)\n" if
len(det_dont_care_rects_num) > 0 else "\n")
if len(gt_rects) == 0:
recall = 1
precision = 0 if len(det_rects) > 0 else 1
if len(det_rects) > 0:
# Calculate recall and precision matrixs
output_shape = [len(gt_rects), len(det_rects)]
recall_mat = np.empty(output_shape)
precision_mat = np.empty(output_shape)
gt_rect_mat = np.zeros(len(gt_rects), np.int8)
det_rect_mat = np.zeros(len(det_rects), np.int8)
for gt_num in range(len(gt_rects)):
for det_num in range(len(det_rects)):
r_g = gt_rects[gt_num]
r_d = det_rects[det_num]
intersected_area = area(r_g, r_d)
rg_dimensions = ((r_g.xmax - r_g.xmin + 1) *
(r_g.ymax - r_g.ymin + 1))
rd_dimensions = ((r_d.xmax - r_d.xmin + 1) *
(r_d.ymax - r_d.ymin + 1))
recall_mat[gt_num, det_num] = 0 if rg_dimensions == 0 \
else intersected_area / rg_dimensions
precision_mat[gt_num, det_num] = 0 \
if rd_dimensions == 0 \
else intersected_area / rd_dimensions
# Find one-to-one matches
evaluation_log += "Find one-to-one matches\n"
for gt_num in range(len(gt_rects)):
for det_num in range(len(det_rects)):
if gt_rect_mat[gt_num] == 0 and \
det_rect_mat[det_num] == 0 and \
gt_num not in gt_dont_care_rects_num and \
det_num not in det_dont_care_rects_num:
match = one_to_one_match(gt_num, det_num)
if match is True:
# in deteval we have to make other validation
# before mark as one-to-one
if is_single_overlap(gt_num, det_num) is True:
r_g = gt_rects[gt_num]
r_d = det_rects[det_num]
normDist = center_distance(r_g, r_d)
normDist /= diag(r_g) + diag(r_d)
normDist *= 2.0
if normDist < evaluation_param['EV_PARAM_IND_CENTER_DIFF_THR']:
gt_rect_mat[gt_num] = 1
det_rect_mat[det_num] = 1
recall_accum += evaluation_param['MTYPE_OO_O']
precision_accum += evaluation_param['MTYPE_OO_O']
pairs.append(
{'gt': gt_num, 'det': det_num, 'type': 'OO'})
evaluation_log += "Match GT #" + \
str(gt_num) + " with Det #" + str(det_num) + "\n"
else:
evaluation_log += "Match Discarded GT #" + \
str(gt_num) + " with Det #" + str(det_num) + " normDist: " + str(normDist) + " \n"
else:
evaluation_log += "Match Discarded GT #" + \
str(gt_num) + " with Det #" + str(det_num) + " not single overlap\n"
# Find one-to-many matches
evaluation_log += "Find one-to-many matches\n"
for gt_num in range(len(gt_rects)):
if gt_num not in gt_dont_care_rects_num:
match, matchesDet = one_to_many_match(gt_num)
if match is True:
evaluation_log += "num_overlaps_gt=" + \
str(num_overlaps_gt(gt_num))
# in deteval we have to make other validation
# before mark as one-to-one
if num_overlaps_gt(gt_num) >= 2:
gt_rect_mat[gt_num] = 1
recall_accum += (evaluation_param['MTYPE_OO_O'] if len(
matchesDet) == 1 else evaluation_param['MTYPE_OM_O'])
precision_accum += (evaluation_param['MTYPE_OO_O'] if len(
matchesDet) == 1 else evaluation_param['MTYPE_OM_O'] * len(matchesDet))
pairs.append({'gt': gt_num, 'det': matchesDet, 'type': 'OO' if len(
matchesDet) == 1 else 'OM'})
for det_num in matchesDet:
det_rect_mat[det_num] = 1
evaluation_log += "Match GT #" + \
str(gt_num) + " with Det #" + str(matchesDet) + "\n"
else:
evaluation_log += "Match Discarded GT #" + \
str(gt_num) + " with Det #" + str(matchesDet) + " not single overlap\n"
# Find many-to-one matches
evaluation_log += "Find many-to-one matches\n"
for det_num in range(len(det_rects)):
if det_num not in det_dont_care_rects_num:
match, matchesGt = many_to_one_match(det_num)
if match is True:
# in deteval we have to make other validation
# before mark as one-to-one
if num_overlaps_det(det_num) >= 2:
det_rect_mat[det_num] = 1
recall_accum += (evaluation_param['MTYPE_OO_O'] if len(
matchesGt) == 1 else evaluation_param['MTYPE_OM_M'] * len(matchesGt))
precision_accum += (evaluation_param['MTYPE_OO_O'] if len(
matchesGt) == 1 else evaluation_param['MTYPE_OM_M'])
pairs.append(
{'gt': matchesGt, 'det': det_num, 'type': 'OO' if len(matchesGt) == 1 else 'MO'})
for gt_num in matchesGt:
gt_rect_mat[gt_num] = 1
evaluation_log += "Match GT #" + \
str(matchesGt) + " with Det #" + str(det_num) + "\n"
else:
evaluation_log += "Match Discarded GT #" + \
str(matchesGt) + " with Det #" + str(det_num) + " not single overlap\n"
num_gt_care = (len(gt_rects) - len(gt_dont_care_rects_num))
if num_gt_care == 0:
recall = float(1)
precision = float(0) if len(det_rects) > 0 else float(1)
else:
recall = float(recall_accum) / num_gt_care
precision = float(0) if (
len(det_rects) - len(det_dont_care_rects_num)) == 0 \
else float(precision_accum) / (
len(det_rects) - len(det_dont_care_rects_num))
hmean = 0 if (precision + recall) == 0 else 2.0 * \
precision * recall / (precision + recall)
method_recall_sum += recall_accum
method_precision_sum += precision_accum
num_gt += len(gt_rects) - len(gt_dont_care_rects_num)
num_det += len(det_rects) - len(det_dont_care_rects_num)
per_sample_metrics[resFile] = {
'precision': precision,
'recall': recall,
'hmean': hmean,
'pairs': pairs,
'recallMat': [] if len(det_rects) > 100 else recall_mat.tolist(),
'precision_mat': [] if len(det_rects) > 100 else precision_mat.tolist(),
'gt_pol_points': gt_pol_points,
'det_pol_points': det_pol_points,
'gtDontCare': gt_dont_care_rects_num,
'detDontCare': det_dont_care_rects_num,
'evaluation_params': evaluation_param,
'evaluation_log': evaluation_log}
method_recall = 0 if num_gt == 0 else method_recall_sum / num_gt
method_precision = 0 if num_det == 0 else method_precision_sum / num_det
method_hmean = 0 if method_recall + method_precision == 0 else 2 * \
method_recall * method_precision / (method_recall + method_precision)
method_metrics = {
'precision': method_precision,
'recall': method_recall,
'hmean': method_hmean}
res_dict = {
'calculated': True,
'Message': '',
'method': method_metrics,
'per_sample': per_sample_metrics}
return res_dict
if __name__ == '__main__':
rrc_evaluation_funcs.main_evaluation(
None,
default_evaluation_params,
validate_data,
evaluate_method)
| [
"collections.namedtuple",
"importlib.import_module",
"rrc_evaluation_funcs_1_1.decode_utf8",
"rrc_evaluation_funcs_1_1.validate_lines_in_file",
"rrc_evaluation_funcs_1_1.main_evaluation",
"rrc_evaluation_funcs_1_1.load_zip_file",
"rrc_evaluation_funcs_1_1.get_tl_line_values_from_file_contents"
] | [((1917, 2012), 'rrc_evaluation_funcs_1_1.load_zip_file', 'rrc_evaluation_funcs.load_zip_file', (['gt_file_path', "evaluation_params['GT_SAMPLE_NAME_2_ID']"], {}), "(gt_file_path, evaluation_params[\n 'GT_SAMPLE_NAME_2_ID'])\n", (1951, 2012), True, 'import rrc_evaluation_funcs_1_1 as rrc_evaluation_funcs\n'), ((2028, 2131), 'rrc_evaluation_funcs_1_1.load_zip_file', 'rrc_evaluation_funcs.load_zip_file', (['sub_file_path', "evaluation_params['DET_SAMPLE_NAME_2_ID']", '(True)'], {}), "(sub_file_path, evaluation_params[\n 'DET_SAMPLE_NAME_2_ID'], True)\n", (2062, 2131), True, 'import rrc_evaluation_funcs_1_1 as rrc_evaluation_funcs\n'), ((7020, 7066), 'collections.namedtuple', 'namedtuple', (['"""Rectangle"""', '"""xmin ymin xmax ymax"""'], {}), "('Rectangle', 'xmin ymin xmax ymax')\n", (7030, 7066), False, 'from collections import namedtuple\n'), ((7079, 7105), 'collections.namedtuple', 'namedtuple', (['"""Point"""', '"""x y"""'], {}), "('Point', 'x y')\n", (7089, 7105), False, 'from collections import namedtuple\n'), ((7116, 7210), 'rrc_evaluation_funcs_1_1.load_zip_file', 'rrc_evaluation_funcs.load_zip_file', (['gt_file_path', "evaluation_param['GT_SAMPLE_NAME_2_ID']"], {}), "(gt_file_path, evaluation_param[\n 'GT_SAMPLE_NAME_2_ID'])\n", (7150, 7210), True, 'import rrc_evaluation_funcs_1_1 as rrc_evaluation_funcs\n'), ((7226, 7328), 'rrc_evaluation_funcs_1_1.load_zip_file', 'rrc_evaluation_funcs.load_zip_file', (['sub_file_path', "evaluation_param['DET_SAMPLE_NAME_2_ID']", '(True)'], {}), "(sub_file_path, evaluation_param[\n 'DET_SAMPLE_NAME_2_ID'], True)\n", (7260, 7328), True, 'import rrc_evaluation_funcs_1_1 as rrc_evaluation_funcs\n'), ((19700, 19805), 'rrc_evaluation_funcs_1_1.main_evaluation', 'rrc_evaluation_funcs.main_evaluation', (['None', 'default_evaluation_params', 'validate_data', 'evaluate_method'], {}), '(None, default_evaluation_params,\n validate_data, evaluate_method)\n', (19736, 19805), True, 'import rrc_evaluation_funcs_1_1 as rrc_evaluation_funcs\n'), ((2199, 2296), 'rrc_evaluation_funcs_1_1.validate_lines_in_file', 'rrc_evaluation_funcs.validate_lines_in_file', (['k', 'gt[k]', "evaluation_params['CRLF']", '(True)', '(True)'], {}), "(k, gt[k], evaluation_params[\n 'CRLF'], True, True)\n", (2242, 2296), True, 'import rrc_evaluation_funcs_1_1 as rrc_evaluation_funcs\n'), ((2459, 2558), 'rrc_evaluation_funcs_1_1.validate_lines_in_file', 'rrc_evaluation_funcs.validate_lines_in_file', (['k', 'sub[k]', "evaluation_params['CRLF']", '(True)', '(False)'], {}), "(k, sub[k], evaluation_params[\n 'CRLF'], True, False)\n", (2502, 2558), True, 'import rrc_evaluation_funcs_1_1 as rrc_evaluation_funcs\n'), ((3117, 3148), 'importlib.import_module', 'importlib.import_module', (['module'], {}), '(module)\n', (3140, 3148), False, 'import importlib\n'), ((7408, 7453), 'rrc_evaluation_funcs_1_1.decode_utf8', 'rrc_evaluation_funcs.decode_utf8', (['gt[resFile]'], {}), '(gt[resFile])\n', (7440, 7453), True, 'import rrc_evaluation_funcs_1_1 as rrc_evaluation_funcs\n'), ((8068, 8184), 'rrc_evaluation_funcs_1_1.get_tl_line_values_from_file_contents', 'rrc_evaluation_funcs.get_tl_line_values_from_file_contents', (['gt_file', "evaluation_param['CRLF']", '(True)', '(True)', '(False)'], {}), "(gt_file,\n evaluation_param['CRLF'], True, True, False)\n", (8126, 8184), True, 'import rrc_evaluation_funcs_1_1 as rrc_evaluation_funcs\n'), ((8896, 8943), 'rrc_evaluation_funcs_1_1.decode_utf8', 'rrc_evaluation_funcs.decode_utf8', (['subm[resFile]'], {}), '(subm[resFile])\n', (8928, 8943), True, 'import rrc_evaluation_funcs_1_1 as rrc_evaluation_funcs\n'), ((8994, 9112), 'rrc_evaluation_funcs_1_1.get_tl_line_values_from_file_contents', 'rrc_evaluation_funcs.get_tl_line_values_from_file_contents', (['det_file', "evaluation_param['CRLF']", '(True)', '(False)', '(False)'], {}), "(det_file,\n evaluation_param['CRLF'], True, False, False)\n", (9052, 9112), True, 'import rrc_evaluation_funcs_1_1 as rrc_evaluation_funcs\n')] |
from flask_marshmallow import Marshmallow
from website.database.models import RandomPair, UsersPerson
marshmallow = Marshmallow()
class ResultSchema(marshmallow.SQLAlchemyAutoSchema):
class Meta:
model = RandomPair
include_fk = True
class RandomPersonSchema(marshmallow.SQLAlchemyAutoSchema):
class Meta:
model = UsersPerson
include_fk = True
| [
"flask_marshmallow.Marshmallow"
] | [((118, 131), 'flask_marshmallow.Marshmallow', 'Marshmallow', ([], {}), '()\n', (129, 131), False, 'from flask_marshmallow import Marshmallow\n')] |
from errorLogger import BaseErrorLogger
from datetime import datetime
import sqlite3
import mysql.connector
class DatabaseLoggers(BaseErrorLogger):
@staticmethod
def databaseLogger(exception):
errorTime=datetime.now()
try:
connection=sqlite3.connect("database.db")
cursor=connection.cursor()
cursor.execute(f"INSERT INTO Errors ('ErrorMessage','ErrorTime') VALUES ('{exception}','{errorTime}')")
connection.commit()
#mysqlConnection = mysql.connector.connect(host="localhost", user="root", password="<PASSWORD>",database="sicekmepeti")
#mysqlCursor = mysqlConnection.cursor()
#mysqlCursor.execute(f"INSERT INTO Errors (ErrorMessage,ErrorTime) VALUES ('{exception}','{errorTime}')")
#mysqlConnection.commit()
except:
pass
finally:
connection.close()
#mysqlConnection.close()
| [
"datetime.datetime.now",
"sqlite3.connect"
] | [((220, 234), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (232, 234), False, 'from datetime import datetime\n'), ((271, 301), 'sqlite3.connect', 'sqlite3.connect', (['"""database.db"""'], {}), "('database.db')\n", (286, 301), False, 'import sqlite3\n')] |
"""
*************************************************************************
*************************************************************************
Python implementation of DRMRR algorithm introduced in the paper:
"Distributionally Robust Multi-Output Regression Ranking"
*************************************************************************
*************************************************************************
"""
import numpy as np
from scipy.stats import rankdata
from LTR_Functions import NDCG_K,MRR,AP_K
import pandas as pd
import xgboost as xgb
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
import tensorflow as tf
def Evaluation_LamdaMART_MAP_NDCG(Validation_Unique_QID,Learned_Model_i,Unified_All_Y_QID_X):
Validation_Num_Queries = len(Validation_Unique_QID)
Predicted_NDCGatk = np.array([1, 5, 10, 25, 50, 100, 333333 ,1, 5, 10, 25, 50, 100])
NDCG_all = [0]*len(Predicted_NDCGatk)
for tt in range(Validation_Num_Queries):
Index_ID=np.where(Unified_All_Y_QID_X[:,1] == Validation_Unique_QID[tt])[0]
dValid_CoMPlex = xgb.DMatrix( Unified_All_Y_QID_X[Index_ID,2:] , label= np.expand_dims(Unified_All_Y_QID_X[Index_ID,0], axis=1) )
Pred_Q = pd.DataFrame(Learned_Model_i.predict(dValid_CoMPlex))
Ground_Q= Unified_All_Y_QID_X[Index_ID,0]
Pred_Ranks=(rankdata(-Pred_Q, method='ordinal')) # Decreasing order
Concat = np.hstack([ np.expand_dims(Ground_Q, axis=1) , np.expand_dims(Pred_Ranks, axis=1) ])
sorted_array = Concat[np.argsort(Concat[:, 1])]
RGT= sorted_array[:,0]
# Performance Metrics
Set_NDCG = [NDCG_K(RGT, i) for i in [1, 5, min(10,len(RGT)), min(25,len(RGT)), min(50,len(RGT)), min(100,len(RGT))]]
Set_Mean_Reciprocal_Rank = [MRR(RGT)]
Set_Average_Precision = [AP_K(RGT, i) for i in [1, 5, min(10,len(RGT)), min(25,len(RGT)), min(50,len(RGT)), min(100,len(RGT))]]
All_Metrics = Set_NDCG + Set_Mean_Reciprocal_Rank + Set_Average_Precision
NDCG_all = np.add(NDCG_all, All_Metrics)
Predicted_NDCG=NDCG_all/Validation_Num_Queries
return Predicted_NDCG
def Evaluation_LamdaMART_XE(Validation_Unique_QID,Learned_Model_i,Unified_All_Y_QID_X):
Validation_Num_Queries = len(Validation_Unique_QID)
Predicted_NDCGatk = np.array([1, 5, 10, 25, 50, 100, 333333 ,1, 5, 10, 25, 50, 100])
NDCG_all = [0]*len(Predicted_NDCGatk)
for tt in range(Validation_Num_Queries):
Index_ID=np.where(Unified_All_Y_QID_X[:,1] == Validation_Unique_QID[tt])[0]
Pred_Q = pd.DataFrame(Learned_Model_i.predict(Unified_All_Y_QID_X[Index_ID,2:]))
Ground_Q= Unified_All_Y_QID_X[Index_ID,0]
Pred_Ranks=(rankdata(-Pred_Q, method='ordinal')) # Decreasing order
Concat = np.hstack([ np.expand_dims(Ground_Q, axis=1) , np.expand_dims(Pred_Ranks, axis=1) ])
sorted_array = Concat[np.argsort(Concat[:, 1])]
RGT= sorted_array[:,0]
# Performance Metrics
Set_NDCG = [NDCG_K(RGT, i) for i in [1, 5, min(10,len(RGT)), min(25,len(RGT)), min(50,len(RGT)), min(100,len(RGT))]]
Set_Mean_Reciprocal_Rank = [MRR(RGT)]
Set_Average_Precision = [AP_K(RGT, i) for i in [1, 5, min(10,len(RGT)), min(25,len(RGT)), min(50,len(RGT)), min(100,len(RGT))]]
All_Metrics = Set_NDCG + Set_Mean_Reciprocal_Rank + Set_Average_Precision
NDCG_all = np.add(NDCG_all, All_Metrics)
Predicted_NDCG=NDCG_all/Validation_Num_Queries
return Predicted_NDCG
def FGSM_Func_SimpReg(THETA,Single_Doc_feat,Single_Doc_Rel,EPSILON_FGSM):
A1 = 2*np.matmul( THETA, np.transpose(THETA))*Single_Doc_feat
A2 = -2*Single_Doc_Rel*THETA
A3= A1+A2
FGSM_Delta = EPSILON_FGSM * np.sign(A3)
return FGSM_Delta
def get_NN_model(n_inputs, n_outputs,params):
model = Sequential()
model.add(Dense(128, input_dim=n_inputs, kernel_initializer='he_uniform', activation=params['activation']))
model.add(Dropout(0.2))
model.add(Dense(128, activation=params['activation']))
model.add(Dropout(0.2))
model.add(Dense(64, input_dim=n_inputs, kernel_initializer='he_uniform', activation=params['activation']))
model.add(Dropout(0.2))
model.add(Dense(32, activation=params['activation']))
model.add(Dense(n_outputs, kernel_initializer='he_uniform'))
model.compile(loss=params['losses'], optimizer='Adam')
return model
def Create_NN_BlackBox_Model(Train_Unique_QID,BEST_Model,Final_X,Final_Relevance,Model,params):
if Model == 'MRR_or_DRMRR':
C=0
for tt in range(len(Train_Unique_QID)):
Index_ID=np.where(Final_X[:,0] == Train_Unique_QID[tt])[0]
Test_X_features = Final_X[Index_ID,1:]
if tt == 0:
X_SM = Test_X_features
else:
X_SM = np.vstack([X_SM, Test_X_features ])
for k in range(len(Test_X_features)):
P1 = np.matmul(np.transpose(BEST_Model) , Test_X_features[k,:])
if C == 0:
Y_SM = P1
else:
Y_SM = np.vstack([Y_SM, P1 ]) # each row: NDCG scores for Xi
C += 1
elif Model == 'LamdaMART_MAP_NDCG':
for tt in range(len(Train_Unique_QID)):
Index_ID=np.where(Final_X[:,0] == Train_Unique_QID[tt])[0]
dValid_CoMPlex = xgb.DMatrix( Final_X[Index_ID,1:] , label= np.expand_dims(Final_Relevance[Index_ID,:], axis=1) )
dValid_CoMPlex.set_group(group=np.array([len(Index_ID)]).flatten()) # Grouping
Pred_Q = pd.DataFrame(BEST_Model.predict(dValid_CoMPlex))
if tt == 0:
X_SM = Final_X[Index_ID,1:]
Y_SM = Pred_Q
else:
X_SM = np.vstack([X_SM, Final_X[Index_ID,1:] ])
Y_SM = np.vstack([Y_SM, Pred_Q ])
elif Model == 'XE_NDCG_MART':
for tt in range(len(Train_Unique_QID)):
Index_ID=np.where(Final_X[:,0] == Train_Unique_QID[tt])[0]
Pred_Q = pd.DataFrame(BEST_Model.predict(Final_X[Index_ID,1:]))
if tt == 0:
X_SM = Final_X[Index_ID,1:]
Y_SM = Pred_Q
else:
X_SM = np.vstack([X_SM, Final_X[Index_ID,1:] ])
Y_SM = np.vstack([Y_SM, Pred_Q ])
# Training NN Model
NN_Model = get_NN_model(X_SM.shape[1], Y_SM.shape[1], params)
NN_Model.fit(X_SM, Y_SM,batch_size= params['batch_size'] ,epochs=params['epochs'],verbose=0)
return NN_Model
def Generate_adversary_NN_Samples(model, image,label , eps):
image = np.expand_dims(image, axis=0)
# cast the image
image = tf.cast(image, tf.float32)
# record our gradients
with tf.GradientTape() as tape:
tape.watch(image)
pred = model(image)
loss = tf.keras.losses.MSE(label,pred)
gradient = tape.gradient(loss, image)
signedGrad = tf.sign(gradient)
adversary = (image + (signedGrad * eps)).numpy()
return adversary
| [
"tensorflow.keras.losses.MSE",
"LTR_Functions.NDCG_K",
"LTR_Functions.MRR",
"tensorflow.GradientTape",
"numpy.array",
"numpy.argsort",
"keras.layers.Dense",
"tensorflow.cast",
"numpy.where",
"tensorflow.sign",
"numpy.vstack",
"numpy.add",
"keras.models.Sequential",
"numpy.sign",
"numpy.transpose",
"keras.layers.Dropout",
"scipy.stats.rankdata",
"LTR_Functions.AP_K",
"numpy.expand_dims"
] | [((875, 939), 'numpy.array', 'np.array', (['[1, 5, 10, 25, 50, 100, 333333, 1, 5, 10, 25, 50, 100]'], {}), '([1, 5, 10, 25, 50, 100, 333333, 1, 5, 10, 25, 50, 100])\n', (883, 939), True, 'import numpy as np\n'), ((2363, 2427), 'numpy.array', 'np.array', (['[1, 5, 10, 25, 50, 100, 333333, 1, 5, 10, 25, 50, 100]'], {}), '([1, 5, 10, 25, 50, 100, 333333, 1, 5, 10, 25, 50, 100])\n', (2371, 2427), True, 'import numpy as np\n'), ((3883, 3895), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3893, 3895), False, 'from keras.models import Sequential\n'), ((6692, 6721), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (6706, 6721), True, 'import numpy as np\n'), ((6757, 6783), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (6764, 6783), True, 'import tensorflow as tf\n'), ((7004, 7021), 'tensorflow.sign', 'tf.sign', (['gradient'], {}), '(gradient)\n', (7011, 7021), True, 'import tensorflow as tf\n'), ((1397, 1432), 'scipy.stats.rankdata', 'rankdata', (['(-Pred_Q)'], {'method': '"""ordinal"""'}), "(-Pred_Q, method='ordinal')\n", (1405, 1432), False, 'from scipy.stats import rankdata\n'), ((2077, 2106), 'numpy.add', 'np.add', (['NDCG_all', 'All_Metrics'], {}), '(NDCG_all, All_Metrics)\n', (2083, 2106), True, 'import numpy as np\n'), ((2762, 2797), 'scipy.stats.rankdata', 'rankdata', (['(-Pred_Q)'], {'method': '"""ordinal"""'}), "(-Pred_Q, method='ordinal')\n", (2770, 2797), False, 'from scipy.stats import rankdata\n'), ((3442, 3471), 'numpy.add', 'np.add', (['NDCG_all', 'All_Metrics'], {}), '(NDCG_all, All_Metrics)\n', (3448, 3471), True, 'import numpy as np\n'), ((3783, 3794), 'numpy.sign', 'np.sign', (['A3'], {}), '(A3)\n', (3790, 3794), True, 'import numpy as np\n'), ((3910, 4011), 'keras.layers.Dense', 'Dense', (['(128)'], {'input_dim': 'n_inputs', 'kernel_initializer': '"""he_uniform"""', 'activation': "params['activation']"}), "(128, input_dim=n_inputs, kernel_initializer='he_uniform', activation=\n params['activation'])\n", (3915, 4011), False, 'from keras.layers import Dense\n'), ((4022, 4034), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (4029, 4034), False, 'from keras.layers import Dropout\n'), ((4051, 4094), 'keras.layers.Dense', 'Dense', (['(128)'], {'activation': "params['activation']"}), "(128, activation=params['activation'])\n", (4056, 4094), False, 'from keras.layers import Dense\n'), ((4110, 4122), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (4117, 4122), False, 'from keras.layers import Dropout\n'), ((4139, 4239), 'keras.layers.Dense', 'Dense', (['(64)'], {'input_dim': 'n_inputs', 'kernel_initializer': '"""he_uniform"""', 'activation': "params['activation']"}), "(64, input_dim=n_inputs, kernel_initializer='he_uniform', activation=\n params['activation'])\n", (4144, 4239), False, 'from keras.layers import Dense\n'), ((4250, 4262), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (4257, 4262), False, 'from keras.layers import Dropout\n'), ((4279, 4321), 'keras.layers.Dense', 'Dense', (['(32)'], {'activation': "params['activation']"}), "(32, activation=params['activation'])\n", (4284, 4321), False, 'from keras.layers import Dense\n'), ((4337, 4386), 'keras.layers.Dense', 'Dense', (['n_outputs'], {'kernel_initializer': '"""he_uniform"""'}), "(n_outputs, kernel_initializer='he_uniform')\n", (4342, 4386), False, 'from keras.layers import Dense\n'), ((6817, 6834), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (6832, 6834), True, 'import tensorflow as tf\n'), ((6913, 6945), 'tensorflow.keras.losses.MSE', 'tf.keras.losses.MSE', (['label', 'pred'], {}), '(label, pred)\n', (6932, 6945), True, 'import tensorflow as tf\n'), ((1051, 1115), 'numpy.where', 'np.where', (['(Unified_All_Y_QID_X[:, 1] == Validation_Unique_QID[tt])'], {}), '(Unified_All_Y_QID_X[:, 1] == Validation_Unique_QID[tt])\n', (1059, 1115), True, 'import numpy as np\n'), ((1589, 1613), 'numpy.argsort', 'np.argsort', (['Concat[:, 1]'], {}), '(Concat[:, 1])\n', (1599, 1613), True, 'import numpy as np\n'), ((1693, 1707), 'LTR_Functions.NDCG_K', 'NDCG_K', (['RGT', 'i'], {}), '(RGT, i)\n', (1699, 1707), False, 'from LTR_Functions import NDCG_K, MRR, AP_K\n'), ((1833, 1841), 'LTR_Functions.MRR', 'MRR', (['RGT'], {}), '(RGT)\n', (1836, 1841), False, 'from LTR_Functions import NDCG_K, MRR, AP_K\n'), ((1875, 1887), 'LTR_Functions.AP_K', 'AP_K', (['RGT', 'i'], {}), '(RGT, i)\n', (1879, 1887), False, 'from LTR_Functions import NDCG_K, MRR, AP_K\n'), ((2539, 2603), 'numpy.where', 'np.where', (['(Unified_All_Y_QID_X[:, 1] == Validation_Unique_QID[tt])'], {}), '(Unified_All_Y_QID_X[:, 1] == Validation_Unique_QID[tt])\n', (2547, 2603), True, 'import numpy as np\n'), ((2954, 2978), 'numpy.argsort', 'np.argsort', (['Concat[:, 1]'], {}), '(Concat[:, 1])\n', (2964, 2978), True, 'import numpy as np\n'), ((3058, 3072), 'LTR_Functions.NDCG_K', 'NDCG_K', (['RGT', 'i'], {}), '(RGT, i)\n', (3064, 3072), False, 'from LTR_Functions import NDCG_K, MRR, AP_K\n'), ((3198, 3206), 'LTR_Functions.MRR', 'MRR', (['RGT'], {}), '(RGT)\n', (3201, 3206), False, 'from LTR_Functions import NDCG_K, MRR, AP_K\n'), ((3240, 3252), 'LTR_Functions.AP_K', 'AP_K', (['RGT', 'i'], {}), '(RGT, i)\n', (3244, 3252), False, 'from LTR_Functions import NDCG_K, MRR, AP_K\n'), ((1197, 1253), 'numpy.expand_dims', 'np.expand_dims', (['Unified_All_Y_QID_X[Index_ID, 0]'], {'axis': '(1)'}), '(Unified_All_Y_QID_X[Index_ID, 0], axis=1)\n', (1211, 1253), True, 'import numpy as np\n'), ((1484, 1516), 'numpy.expand_dims', 'np.expand_dims', (['Ground_Q'], {'axis': '(1)'}), '(Ground_Q, axis=1)\n', (1498, 1516), True, 'import numpy as np\n'), ((1521, 1555), 'numpy.expand_dims', 'np.expand_dims', (['Pred_Ranks'], {'axis': '(1)'}), '(Pred_Ranks, axis=1)\n', (1535, 1555), True, 'import numpy as np\n'), ((2849, 2881), 'numpy.expand_dims', 'np.expand_dims', (['Ground_Q'], {'axis': '(1)'}), '(Ground_Q, axis=1)\n', (2863, 2881), True, 'import numpy as np\n'), ((2886, 2920), 'numpy.expand_dims', 'np.expand_dims', (['Pred_Ranks'], {'axis': '(1)'}), '(Pred_Ranks, axis=1)\n', (2900, 2920), True, 'import numpy as np\n'), ((3667, 3686), 'numpy.transpose', 'np.transpose', (['THETA'], {}), '(THETA)\n', (3679, 3686), True, 'import numpy as np\n'), ((4676, 4723), 'numpy.where', 'np.where', (['(Final_X[:, 0] == Train_Unique_QID[tt])'], {}), '(Final_X[:, 0] == Train_Unique_QID[tt])\n', (4684, 4723), True, 'import numpy as np\n'), ((4883, 4917), 'numpy.vstack', 'np.vstack', (['[X_SM, Test_X_features]'], {}), '([X_SM, Test_X_features])\n', (4892, 4917), True, 'import numpy as np\n'), ((5017, 5041), 'numpy.transpose', 'np.transpose', (['BEST_Model'], {}), '(BEST_Model)\n', (5029, 5041), True, 'import numpy as np\n'), ((5173, 5194), 'numpy.vstack', 'np.vstack', (['[Y_SM, P1]'], {}), '([Y_SM, P1])\n', (5182, 5194), True, 'import numpy as np\n'), ((5361, 5408), 'numpy.where', 'np.where', (['(Final_X[:, 0] == Train_Unique_QID[tt])'], {}), '(Final_X[:, 0] == Train_Unique_QID[tt])\n', (5369, 5408), True, 'import numpy as np\n'), ((5842, 5882), 'numpy.vstack', 'np.vstack', (['[X_SM, Final_X[Index_ID, 1:]]'], {}), '([X_SM, Final_X[Index_ID, 1:]])\n', (5851, 5882), True, 'import numpy as np\n'), ((5909, 5934), 'numpy.vstack', 'np.vstack', (['[Y_SM, Pred_Q]'], {}), '([Y_SM, Pred_Q])\n', (5918, 5934), True, 'import numpy as np\n'), ((5484, 5536), 'numpy.expand_dims', 'np.expand_dims', (['Final_Relevance[Index_ID, :]'], {'axis': '(1)'}), '(Final_Relevance[Index_ID, :], axis=1)\n', (5498, 5536), True, 'import numpy as np\n'), ((6039, 6086), 'numpy.where', 'np.where', (['(Final_X[:, 0] == Train_Unique_QID[tt])'], {}), '(Final_X[:, 0] == Train_Unique_QID[tt])\n', (6047, 6086), True, 'import numpy as np\n'), ((6311, 6351), 'numpy.vstack', 'np.vstack', (['[X_SM, Final_X[Index_ID, 1:]]'], {}), '([X_SM, Final_X[Index_ID, 1:]])\n', (6320, 6351), True, 'import numpy as np\n'), ((6378, 6403), 'numpy.vstack', 'np.vstack', (['[Y_SM, Pred_Q]'], {}), '([Y_SM, Pred_Q])\n', (6387, 6403), True, 'import numpy as np\n')] |
import os
import sys
import re
import logging
from collections import OrderedDict
import numpy as np
import nltk as nl
import pandas as pd
from dotenv import load_dotenv, find_dotenv
import spacy
import hunspell
from descrive import db, scrape
level = 'INFO' #'ERROR'
logging.basicConfig(level=level)
load_dotenv(find_dotenv())
nlp = spacy.load('en_core_web_md')
# Add personal possessive pronouns to stop-words collection
for ppp in ['my', 'your', 'their', 'his', 'her', 'our']:
nlp.vocab[ppp].is_stop = True
spellcheck = hunspell.HunSpell('dict/en_CA.dic', 'dict/en_CA.aff')
# Connect to db
db_url = os.getenv('DB_URL')
sess = db.connect_db(db_url)
def get_listings(subject):
'''
Returns a dataframe with all listings for a subject
'''
subject = subject.lower()
subject_id, ads_to_skip = db.probe_subject(subject, sess)
# Scrape unless it was done recently
if ads_to_skip is not None:
url = None
all_ads_scraped = False
# Every 50 ads, write to DB
limit = 50
while not all_ads_scraped:
ads, already_scraped_ad_ids, url = \
scrape.scrape(subject, ads_to_skip, limit=limit, url=url)
all_ads_scraped = True if url is None else False
db.write_ads_to_db(subject_id, ads, sess)
db.write_subject_listing_relations_for_already_scraped_ads_to_db(
subject_id, already_scraped_ad_ids, sess)
for ad_id in ads:
ads_to_skip.add(ad_id)
db.update_date_scraped(subject, sess)
db.delete_dupes(sess)
# This could be done with the ORM
query = f'''
select listing_id, title, description from (
select * from subjects
inner join subject_listings on subjects.id = subject_listings.subject_id where subjects.name = '{subject}'
) as this_subject_listings
inner join listings on this_subject_listings.listing_id = listings.id;
'''
return pd.read_sql(query, sess.get_bind(), index_col='listing_id')
def fix_capitalization(text):
'''
Lowercases sentences in a body of text that are either:
* all-(or nearly-all-)caps
Too many capitalized english words (very common in classifieds)
'''
sents = nl.tokenize.sent_tokenize(text)
# First, figure out if a sentence is mostly caps, vs lowers and digits
# Lowercasing mostly-caps sentences improves parsing, and using digits
# helps avoid targeting model names with this heuristic
for i, sent in enumerate(sents):
words = nl.tokenize.word_tokenize(sent)
uppers = 0
lowers_digits = 0
capitalized_words = 0
for word in words:
for letter in word:
if letter.isupper():
uppers += 1
elif letter.islower() or letter.isdigit():
lowers_digits += 1
if word[0].isupper() and spellcheck.spell(word):
capitalized_words += 1
if uppers > lowers_digits * 3 or capitalized_words > 5:
fixed_sent = sent.lower()
sents[i] = fixed_sent
return ' '.join(sents)
def replace_newlines_with_periods(descs):
newline_with_optional_periods = re.compile('\.?\n[-*]*')
return [newline_with_optional_periods.sub('. ', desc) for desc in descs]
# I'm sure there's a way to generalize this regex,
# but I'm also sure nobody will be describing a four-dimensional feature
def normalize_measurements(descs):
# start-of-line or whitespace
non_alphanumeric = r'([^\w])'
# measurement
m = r'(\d{1,9}|\d*\.\d+|\d+/\d+|\d+ \d+/\d+)'
# dimensional separator
DS = r'\s*[*xX×]\s*'
unit = r'[-\s]*(\'+|"|[a-zA-Z]{1,2}\d?|in\.|in(?:ch)?e?s?)'
# Keyphrase with which to replace measurements
# Will be affixed with dimensional info
MK = '1029384756'
# Unit and unitless regexes overlap, so they must be applied in that order
dimension_regexes = []
dimension_regexes.append((
re.compile(f'{non_alphanumeric}{m}{DS}{m}{DS}{m}{unit}{non_alphanumeric}'),
f'\\g<1>32{MK}\\6'
))
dimension_regexes.append((
re.compile(f'{non_alphanumeric}{m}{DS}{m}{DS}{m}{non_alphanumeric}'),
f'\\g<1>31{MK}\\5'
))
dimension_regexes.append((
re.compile(f'{non_alphanumeric}{m}{DS}{m}{unit}{non_alphanumeric}'),
f'\\g<1>22{MK}\\5'
))
dimension_regexes.append((
re.compile(f'{non_alphanumeric}{m}{DS}{m}{non_alphanumeric}'),
f'\\g<1>21{MK}\\4'
))
dimension_regexes.append((
re.compile(f'{non_alphanumeric}{m}{unit}{non_alphanumeric}'),
f'\\g<1>12{MK}\\4'
))
dimension_regexes.append((
re.compile(f'{non_alphanumeric}{m}{non_alphanumeric}'),
f'\\g<1>11{MK}\\3'
))
for regex, repl in dimension_regexes:
for desc_i, desc in enumerate(descs):
i = 0
while True:
subbed_desc = regex.sub(repl, desc, count=1)
i += 1
if i > 2000:
logging.error('too many measurements; probably a parsing error')
logging.error(desc)
logging.error(subbed_desc)
return
if desc == subbed_desc: break
desc = subbed_desc
descs[desc_i] = desc
def generate_preferred_spelling_dict(words):
'''
For some set of words, returns a dict mapping each unique lowercased word to
its most popular spelling and total occurrences of all spellings.
'''
spellings = {}
for word in words:
word_lower = word.lower()
if word_lower in spellings:
spellings[word_lower].append(word)
else:
spellings[word_lower] = [word]
preferred_spellings = {}
for (word, spelling_cands) in spellings.items():
n_occurrences = len(spelling_cands)
preferred_spelling = max(set(spelling_cands), key=spelling_cands.count)
preferred_spellings[word] = (preferred_spelling, n_occurrences)
return preferred_spellings
def generate_multiplicity_dict(words):
'''
Counts the number of occurrences of each word, case-sensitive.
'''
multiplicities = {}
for word in words:
if word not in multiplicities:
multiplicities[word] = words.count(word)
return multiplicities
def reassociate_orphaned_descriptor(orphaned_descriptor, features_descriptors):
most_occurrences = 0
for _, feature_descriptors in features_descriptors.items():
for i, (feature_descriptor, mult) in enumerate(feature_descriptors):
if orphaned_descriptor == feature_descriptor:
if mult > most_occurrences: most_occurrences = mult
for _, feature_descriptors in features_descriptors.items():
for i, (feature_descriptor, mult) in enumerate(feature_descriptors):
if mult == most_occurrences and orphaned_descriptor == feature_descriptor:
feature_descriptors[i] = (feature_descriptor, mult + 1)
return True
return False
measurement_code_substitution_map = {
re.compile('111029384756'): r'<span style="color: #ff00ff">Number</span>',
re.compile('121029384756'): r'<span style="color: #ff00ff">1D</span>',
re.compile('211029384756'): r'<span style="color: #ff00ff">2D unitless</span>',
re.compile('221029384756'): r'<span style="color: #ff00ff">2D</span>',
re.compile('311029384756'): r'<span style="color: #ff00ff">3D unitless</span>',
re.compile('321029384756'): r'<span style="color: #ff00ff">3D</span>',
}
def backconvert_measurement_codes(string):
for regex, repl in measurement_code_substitution_map.items():
string = regex.sub(repl, string)
return string
def top_features_and_descriptors(subject):
my_subject = subject
df = get_listings(my_subject)
# Kijiji uids look like unix timestamps, and afaict there's no way do stop
# pandas interpreting them as such while using orient='index'
#df.index = df.index.astype(np.int64) // 10**9
#return df
descs = [row['description'] for _, row in df.iterrows() if len(row['description']) < 400]
#logging.info(descs)
#descs = ['Kindhuman Kampionne road bike with 27" Mavic Aksium wheels and Ritchey WCS cockpit. 21 speed. BUY FAST']
# ## Pre-processing
# * lowercasing all-caps and over-capped sentences
# * replacing measurements with tokens identifying their dimensionality and whether or not they carry a unit
original_descs = descs.copy()
descs = replace_newlines_with_periods(descs)
logging.info('normalizing measurements')
normalize_measurements(descs)
descs = [fix_capitalization(desc) for desc in descs]
# lol, hardcode tv as the lemma for tvs
my_subject_lemmas = [word.lemma_ if word.text != 'tvs' else 'tv' for word in nlp(my_subject)]
docs = [nlp(desc) for desc in descs]
## Post-processing
tagged_words_spacy = []
for doc in docs:
tagged_words_spacy.append([(token.text, token.tag_) for token in doc])
my_subject_lower = my_subject.lower()
brand_model_cands = []
listings_described_features = []
listings_orphaned_descriptors = []
stop_tags = ['PRP', 'DT'] #'IN'
for doc in docs:
described_features = []
orphaned_descriptors = []
# Prevent run-on ads with a million products from dominating the
# results with nonsense
already_described_in_this_ad = set()
for np in doc.noun_chunks:
feature = np.root
if feature.text in already_described_in_this_ad:
continue
already_described_in_this_ad.add(feature.text)
if feature.tag_ not in stop_tags:
interesting_descriptors = [
word for word in np
if not word.tag_ in stop_tags
and not word.is_stop
and not word.text == feature.text
]
if np.root.lemma_ in my_subject_lemmas:
orphaned_descriptors.append(interesting_descriptors)
else:
described_features.append((
interesting_descriptors,
feature.text
))
listings_described_features.append(described_features)
listings_orphaned_descriptors.append(orphaned_descriptors)
for original, desc, described_features in zip(original_descs, descs, listings_described_features):
logging.info(original)
logging.info(desc)
logging.info(described_features)
features = [
feature
for listing_described_features in listings_described_features
for (descriptors, feature) in listing_described_features
]
feature_preferred_spellings = generate_preferred_spelling_dict(features)
popular_features = list(feature_preferred_spellings.items())
popular_features.sort(key=lambda desc: desc[1][1], reverse=True)
most_popular_features = [feature for (feature, _) in popular_features[:11]]
all_descriptors = set()
feature_descriptors = {feature:[] for feature in most_popular_features}
for listing_described_features in listings_described_features:
for descriptors, feature in listing_described_features:
feature = feature.lower()
if feature in most_popular_features:
feature_descriptions = []
for descriptor in descriptors:
if descriptor in all_descriptors or descriptor.is_stop: continue
if descriptor.head.text == feature:
full_description = []
# Not sure how valid the assumption is that the children will be
# in front of the main descriptor
for dependent_descriptor in descriptor.children:
if not dependent_descriptor.is_stop:
all_descriptors.add(dependent_descriptor)
full_description.append(dependent_descriptor.text)
all_descriptors.add(descriptor)
descriptor_text = descriptor.text
full_description.append(descriptor_text)
# This filters out a lot of stray punctuation
if not (len(full_description) == 1 and len(full_description[0]) == 1):
feature_descriptions.append(full_description)
feature_descriptors[feature].append(feature_descriptions)
flattened_orphaned_descriptors = [
descriptor.text
for listing_orphaned_descriptors in listings_orphaned_descriptors
for descriptors in listing_orphaned_descriptors
for descriptor in descriptors
]
preferred_descriptor_spellings = generate_preferred_spelling_dict(
[descriptor.text for descriptor in all_descriptors] +
flattened_orphaned_descriptors
)
top_descriptors = OrderedDict()
top_descriptors['Type'] = []
for feature in most_popular_features:
top_descriptors[feature] = []
for feature, listings in feature_descriptors.items():
flattened_indirect_descriptor_phrase_list = []
for listing in listings:
for description in listing:
# This will unfortunately put spaces around hyphens, and that sort of thing
for i, descriptor in enumerate(description):
description[i] = backconvert_measurement_codes(descriptor)
# It used to be a mmt code
text_description = ' '.join([
preferred_descriptor_spellings[descriptor.lower()][0]
if '<spa' not in descriptor\
else descriptor
for descriptor in description
])
flattened_indirect_descriptor_phrase_list.append(text_description)
preferred_descriptions = list(generate_multiplicity_dict(flattened_indirect_descriptor_phrase_list).items())
top_descriptors[feature] = preferred_descriptions
for feature, descriptors in top_descriptors.items():
descriptors.sort(key=lambda desc: desc[1], reverse=True)
top_descriptors[feature] = descriptors[:5]
true_orphans = []
for orphaned_descriptor in flattened_orphaned_descriptors:
if len(orphaned_descriptor) == 1: continue
# possibly bugged
orphaned_descriptor = backconvert_measurement_codes(orphaned_descriptor)
# It used to be a mmt code
orphaned_descriptor =\
preferred_descriptor_spellings[orphaned_descriptor.lower()][0] \
if '<spa' not in orphaned_descriptor\
else orphaned_descriptor
if not reassociate_orphaned_descriptor(orphaned_descriptor, top_descriptors):
true_orphans.append(orphaned_descriptor)
preferred_orphan_descriptors = list(generate_multiplicity_dict(true_orphans).items())
preferred_orphan_descriptors.sort(key=lambda desc: desc[1], reverse=True)
top_descriptors['Type'] = preferred_orphan_descriptors[:5]
# re-sort since some multiplicities may have been incremented
for feature, descriptors in top_descriptors.items():
descriptors.sort(key=lambda desc: desc[1], reverse=True)
top_descriptors[feature] = descriptors
return top_descriptors
| [
"logging.basicConfig",
"descrive.db.probe_subject",
"collections.OrderedDict",
"descrive.db.delete_dupes",
"dotenv.find_dotenv",
"os.getenv",
"re.compile",
"spacy.load",
"descrive.db.write_ads_to_db",
"descrive.scrape.scrape",
"descrive.db.connect_db",
"nltk.tokenize.word_tokenize",
"hunspell.HunSpell",
"descrive.db.write_subject_listing_relations_for_already_scraped_ads_to_db",
"nltk.tokenize.sent_tokenize",
"descrive.db.update_date_scraped",
"logging.info",
"logging.error"
] | [((272, 304), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'level'}), '(level=level)\n', (291, 304), False, 'import logging\n'), ((339, 367), 'spacy.load', 'spacy.load', (['"""en_core_web_md"""'], {}), "('en_core_web_md')\n", (349, 367), False, 'import spacy\n'), ((534, 587), 'hunspell.HunSpell', 'hunspell.HunSpell', (['"""dict/en_CA.dic"""', '"""dict/en_CA.aff"""'], {}), "('dict/en_CA.dic', 'dict/en_CA.aff')\n", (551, 587), False, 'import hunspell\n'), ((614, 633), 'os.getenv', 'os.getenv', (['"""DB_URL"""'], {}), "('DB_URL')\n", (623, 633), False, 'import os\n'), ((641, 662), 'descrive.db.connect_db', 'db.connect_db', (['db_url'], {}), '(db_url)\n', (654, 662), False, 'from descrive import db, scrape\n'), ((317, 330), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (328, 330), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((824, 855), 'descrive.db.probe_subject', 'db.probe_subject', (['subject', 'sess'], {}), '(subject, sess)\n', (840, 855), False, 'from descrive import db, scrape\n'), ((2259, 2290), 'nltk.tokenize.sent_tokenize', 'nl.tokenize.sent_tokenize', (['text'], {}), '(text)\n', (2284, 2290), True, 'import nltk as nl\n'), ((3234, 3259), 're.compile', 're.compile', (['"""\\\\.?\n[-*]*"""'], {}), "('\\\\.?\\n[-*]*')\n", (3244, 3259), False, 'import re\n'), ((7157, 7183), 're.compile', 're.compile', (['"""111029384756"""'], {}), "('111029384756')\n", (7167, 7183), False, 'import re\n'), ((7236, 7262), 're.compile', 're.compile', (['"""121029384756"""'], {}), "('121029384756')\n", (7246, 7262), False, 'import re\n'), ((7311, 7337), 're.compile', 're.compile', (['"""211029384756"""'], {}), "('211029384756')\n", (7321, 7337), False, 'import re\n'), ((7395, 7421), 're.compile', 're.compile', (['"""221029384756"""'], {}), "('221029384756')\n", (7405, 7421), False, 'import re\n'), ((7470, 7496), 're.compile', 're.compile', (['"""311029384756"""'], {}), "('311029384756')\n", (7480, 7496), False, 'import re\n'), ((7554, 7580), 're.compile', 're.compile', (['"""321029384756"""'], {}), "('321029384756')\n", (7564, 7580), False, 'import re\n'), ((8633, 8673), 'logging.info', 'logging.info', (['"""normalizing measurements"""'], {}), "('normalizing measurements')\n", (8645, 8673), False, 'import logging\n'), ((13141, 13154), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13152, 13154), False, 'from collections import OrderedDict\n'), ((1527, 1564), 'descrive.db.update_date_scraped', 'db.update_date_scraped', (['subject', 'sess'], {}), '(subject, sess)\n', (1549, 1564), False, 'from descrive import db, scrape\n'), ((1573, 1594), 'descrive.db.delete_dupes', 'db.delete_dupes', (['sess'], {}), '(sess)\n', (1588, 1594), False, 'from descrive import db, scrape\n'), ((2556, 2587), 'nltk.tokenize.word_tokenize', 'nl.tokenize.word_tokenize', (['sent'], {}), '(sent)\n', (2581, 2587), True, 'import nltk as nl\n'), ((10584, 10606), 'logging.info', 'logging.info', (['original'], {}), '(original)\n', (10596, 10606), False, 'import logging\n'), ((10615, 10633), 'logging.info', 'logging.info', (['desc'], {}), '(desc)\n', (10627, 10633), False, 'import logging\n'), ((10642, 10674), 'logging.info', 'logging.info', (['described_features'], {}), '(described_features)\n', (10654, 10674), False, 'import logging\n'), ((1140, 1197), 'descrive.scrape.scrape', 'scrape.scrape', (['subject', 'ads_to_skip'], {'limit': 'limit', 'url': 'url'}), '(subject, ads_to_skip, limit=limit, url=url)\n', (1153, 1197), False, 'from descrive import db, scrape\n'), ((1271, 1312), 'descrive.db.write_ads_to_db', 'db.write_ads_to_db', (['subject_id', 'ads', 'sess'], {}), '(subject_id, ads, sess)\n', (1289, 1312), False, 'from descrive import db, scrape\n'), ((1325, 1435), 'descrive.db.write_subject_listing_relations_for_already_scraped_ads_to_db', 'db.write_subject_listing_relations_for_already_scraped_ads_to_db', (['subject_id', 'already_scraped_ad_ids', 'sess'], {}), '(subject_id,\n already_scraped_ad_ids, sess)\n', (1389, 1435), False, 'from descrive import db, scrape\n'), ((4014, 4088), 're.compile', 're.compile', (['f"""{non_alphanumeric}{m}{DS}{m}{DS}{m}{unit}{non_alphanumeric}"""'], {}), "(f'{non_alphanumeric}{m}{DS}{m}{DS}{m}{unit}{non_alphanumeric}')\n", (4024, 4088), False, 'import re\n'), ((4163, 4231), 're.compile', 're.compile', (['f"""{non_alphanumeric}{m}{DS}{m}{DS}{m}{non_alphanumeric}"""'], {}), "(f'{non_alphanumeric}{m}{DS}{m}{DS}{m}{non_alphanumeric}')\n", (4173, 4231), False, 'import re\n'), ((4306, 4373), 're.compile', 're.compile', (['f"""{non_alphanumeric}{m}{DS}{m}{unit}{non_alphanumeric}"""'], {}), "(f'{non_alphanumeric}{m}{DS}{m}{unit}{non_alphanumeric}')\n", (4316, 4373), False, 'import re\n'), ((4448, 4509), 're.compile', 're.compile', (['f"""{non_alphanumeric}{m}{DS}{m}{non_alphanumeric}"""'], {}), "(f'{non_alphanumeric}{m}{DS}{m}{non_alphanumeric}')\n", (4458, 4509), False, 'import re\n'), ((4584, 4644), 're.compile', 're.compile', (['f"""{non_alphanumeric}{m}{unit}{non_alphanumeric}"""'], {}), "(f'{non_alphanumeric}{m}{unit}{non_alphanumeric}')\n", (4594, 4644), False, 'import re\n'), ((4719, 4773), 're.compile', 're.compile', (['f"""{non_alphanumeric}{m}{non_alphanumeric}"""'], {}), "(f'{non_alphanumeric}{m}{non_alphanumeric}')\n", (4729, 4773), False, 'import re\n'), ((5073, 5137), 'logging.error', 'logging.error', (['"""too many measurements; probably a parsing error"""'], {}), "('too many measurements; probably a parsing error')\n", (5086, 5137), False, 'import logging\n'), ((5158, 5177), 'logging.error', 'logging.error', (['desc'], {}), '(desc)\n', (5171, 5177), False, 'import logging\n'), ((5198, 5224), 'logging.error', 'logging.error', (['subbed_desc'], {}), '(subbed_desc)\n', (5211, 5224), False, 'import logging\n')] |
'''
The circle of fifths is a useful tool for determining the various chord within a progression based on key
Much of what I have learned about the circle of 5ths and how to code for it is from
Rand Scullard's excellent JS app http://randscullard.com/CircleOfFifths/
(It is very useful and well done)
'''
import collections
# The currently selected tonic in the tonic table.
default_tonic = "A"
# The currently selected mode in the mode table.
#default_mode = "ionian"
default_mode = "minor"
# This is for computer consumption... so we will never s'see' Rands css
position_cord_type = [
'major',
'major',
'major',
'minor',
'minor',
'minor',
'diminished'
]
# Hash table of key signatures, where the key is the number of sharps (positive) or flats (negative).
# The value is an array of note names in the key signature, starting at the 1: 00 clock position
# and ending at the 12: 00 position.
key_signatures_d = {
-13: [ "Abb", "Ebb", "Bbb", "Fb", "Cb", "Gb", "Db", "Ab", "Eb", "Cbb", "Gbb", "Dbb" ],
-12: [ "Abb", "Ebb", "Bbb", "Fb", "Cb", "Gb", "Db", "Ab", "Eb", "Bb", "Gbb", "Dbb" ],
-11: [ "Abb", "Ebb", "Bbb", "Fb", "Cb", "Gb", "Db", "Ab", "Eb", "Bb", "F", "Dbb" ],
-10: [ "Abb", "Ebb", "Bbb", "Fb", "Cb", "Gb", "Db", "Ab", "Eb", "Bb", "F", "C" ],
-9 : [ "G", "Ebb", "Bbb", "Fb", "Cb", "Gb", "Db", "Ab", "Eb", "Bb", "F", "C" ],
-8 : [ "G", "D", "Bbb", "Fb", "Cb", "Gb", "Db", "Ab", "Eb", "Bb", "F", "C" ],
-7 : [ "G", "D", "A", "Fb", "Cb", "Gb", "Db", "Ab", "Eb", "Bb", "F", "C" ],
-6 : [ "G", "D", "A", "E", "Cb", "Gb", "Db", "Ab", "Eb", "Bb", "F", "C" ],
-5 : [ "G", "D", "A", "E", "B", "Gb", "Db", "Ab", "Eb", "Bb", "F", "C" ],
-4 : [ "G", "D", "A", "E", "B", "F#", "Db", "Ab", "Eb", "Bb", "F", "C" ],
-3 : [ "G", "D", "A", "E", "B", "F#", "Db", "Ab", "Eb", "Bb", "F", "C" ],
-2 : [ "G", "D", "A", "E", "B", "F#", "Db", "Ab", "Eb", "Bb", "F", "C" ],
-1 : [ "G", "D", "A", "E", "B", "F#", "Db", "Ab", "Eb", "Bb", "F", "C" ],
0 : [ "G", "D", "A", "E", "B", "F#", "Db", "Ab", "Eb", "Bb", "F", "C" ],
1 : [ "G", "D", "A", "E", "B", "F#", "Db", "Ab", "Eb", "Bb", "F", "C" ],
2 : [ "G", "D", "A", "E", "B", "F#", "C#", "Ab", "Eb", "Bb", "F", "C" ],
3 : [ "G", "D", "A", "E", "B", "F#", "C#", "G#", "Eb", "Bb", "F", "C" ],
4 : [ "G", "D", "A", "E", "B", "F#", "C#", "G#", "D#", "Bb", "F", "C" ],
5 : [ "G", "D", "A", "E", "B", "F#", "C#", "G#", "D#", "A#", "F", "C" ],
6 : [ "G", "D", "A", "E", "B", "F#", "C#", "G#", "D#", "A#", "E#", "C" ],
7 : [ "G", "D", "A", "E", "B", "F#", "C#", "G#", "D#", "A#", "E#", "B#" ],
8 : [ "F##", "D", "A", "E", "B", "F#", "C#", "G#", "D#", "A#", "E#", "B#" ],
9 : [ "F##", "C##", "A", "E", "B", "F#", "C#", "G#", "D#", "A#", "E#", "B#" ],
10: [ "F##", "C##", "G##", "E", "B", "F#", "C#", "G#", "D#", "A#", "E#", "B#" ],
11: [ "F##", "C##", "G##", "D##", "B", "F#", "C#", "G#", "D#", "A#", "E#", "B#" ],
12: [ "F##", "C##", "G##", "D##", "A##", "F#", "C#", "G#", "D#", "A#", "E#", "B#" ],
13: [ "F##", "C##", "G##", "D##", "A##", "E##", "C#", "G#", "D#", "A#", "E#", "B#" ]
}
# Array of scale degrees in the Lydian mode, starting at the first note position on the
# circle and proceeding clockwise. Modes other than Lydian are simply rotations of this array by
# the number of steps specified by mode_info_d.counter_clockwise_offset
degrees_li = [1, 5, 2, 6, 3, 7, 4]
# Hash table of information on each tonic that can be selected by the user, where the key is
# the tonic itself, and the value has two pieces of info: clock_position is the clock position on the
# circle where the notes of the Lydian mode begin for that tonic. key_signature_id is the key signature
# in the key_signatures_d table for that tonic in the Lydian mode. (The mode_info_d table
# is used to translate this info into modes other than Lydian.)
tonic_info_d = {
"B#": {'rotation': -11, 'clock_position': 12, 'key_signature_id': 13 },
"E#": {'rotation': -10, 'clock_position': 11, 'key_signature_id': 12 },
"A#": {'rotation': -9, 'clock_position': 10, 'key_signature_id': 11 },
"D#": {'rotation': -8, 'clock_position': 9, 'key_signature_id': 10 },
"G#": {'rotation': -7, 'clock_position': 8, 'key_signature_id': 9 },
"C#": {'rotation': -6, 'clock_position': 7, 'key_signature_id': 8 },
"F#": {'rotation': -5, 'clock_position': 6, 'key_signature_id': 7 },
"B" : {'rotation': -4, 'clock_position': 5, 'key_signature_id': 6 },
"E" : {'rotation': -3, 'clock_position': 4, 'key_signature_id': 5 },
"A" : {'rotation': -2, 'clock_position': 3, 'key_signature_id': 4 },
"D" : {'rotation': -1, 'clock_position': 2, 'key_signature_id': 3 },
"G" : {'rotation': 0, 'clock_position': 1, 'key_signature_id': 2 },
"C" : {'rotation': -11, 'clock_position': 12, 'key_signature_id': 1 },
"F" : {'rotation': -10, 'clock_position': 11, 'key_signature_id': 0 },
"Bb": {'rotation': -9, 'clock_position': 10, 'key_signature_id': -1 },
"Eb": {'rotation': -8, 'clock_position': 9, 'key_signature_id': -2 },
"Ab": {'rotation': -7, 'clock_position': 8, 'key_signature_id': -3 },
"Db": {'rotation': -6, 'clock_position': 7, 'key_signature_id': -4 },
"Gb": {'rotation': -5, 'clock_position': 6, 'key_signature_id': -5 },
"Cb": {'rotation': -4, 'clock_position': 5, 'key_signature_id': -6 },
"Fb": {'rotation': -3, 'clock_position': 4, 'key_signature_id': -7 }
}
# Hash table of information on each mode that can be selected by the user, where the key is
# the mode itself, and the counter_clockwise_offset value has the number of steps counterclockwise around
# the circle where the mode begins, relative to Lydian. For example, if the Lydian begins
# at 3: 00, then the Mixolydian of the same tonic begins at 1: 00.
# SZABEL I'm using collection.deque so the negative number becomes positive
mode_info_d = {
"lydian" : { 'offset': 0, 'rotation': 0 },
"ionian" : { 'offset': -1, 'rotation': 1 },
"major" : { 'offset': -1, 'rotation': 1 },
"mixolydian" : { 'offset': -2, 'rotation': 2 },
"dorian" : { 'offset': -3, 'rotation': 3 },
"aeolian" : { 'offset': -4, 'rotation': 4 },
"minor" : { 'offset': -4, 'rotation': 4 },
"phrygian" : { 'offset': -5, 'rotation': 5 },
"locrian" : { 'offset': -6, 'rotation': 6 }
}
def get_key_info(tonic=default_tonic, mode=default_mode):
if not tonic in tonic_info_d:
raise Exception("Please use one of the following tonics ['{}']".format("', '".join(tonic_info_d.keys())))
tonic_info = tonic_info_d[tonic]
if not mode in mode_info_d:
raise Exception("Please use one of the following modes ['{}']".format("', '".join(mode_info_d.keys())))
mode_info = mode_info_d[mode]
# I suspect that this is no longer needed and that we could use the most common
# names internally
note_names = key_signatures_d[
tonic_info['key_signature_id']
]
# deque note_names for rotation
note_names = collections.deque(note_names)
degrees = collections.deque(degrees_li)
# rotate notes so that tonic is in position 0
note_names.rotate(tonic_info['rotation'])
# now rotate both degrees and notes by mode
note_names.rotate(mode_info['rotation'])
degrees.rotate(mode_info['rotation'])
# now write everything to a dict
return_d = {}
for i, degree in enumerate(degrees):
return_d[degree] = {
'tonic': note_names[i],
'cord_type': position_cord_type[i]
}
return return_d
| [
"collections.deque"
] | [((7522, 7551), 'collections.deque', 'collections.deque', (['note_names'], {}), '(note_names)\n', (7539, 7551), False, 'import collections\n'), ((7566, 7595), 'collections.deque', 'collections.deque', (['degrees_li'], {}), '(degrees_li)\n', (7583, 7595), False, 'import collections\n')] |
#!/usr/bin/env python3
# usb.py
#
# Copyright 2013 <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import select
import sys
import threading
import traceback
root = os.path.dirname(__file__) or os.getcwd()
sys.path.append(os.path.join(root, "usb"))
del root
from USB import *
from USBDevice import *
from USBConfiguration import *
from USBInterface import *
from USBEndpoint import *
from Facedancer import *
from MAXUSBApp import *
from serial import Serial, PARITY_NONE
from serial.serialutil import SerialException
import tools
DEBUG = False
DEFAULT_TTY = "/dev/ttyUSB0"
class dizzyUSBDevice(USBDevice):
name = "dizzy USB device"
class dizzyUSBInterface(USBInterface):
name = "dizzy USB interface"
def __init__(self, ID, verbose=0):
endpoints = [ USBEndpoint(
ID["EP"].index(i), # endpoint number
################# fill in data from file ################
USBEndpoint.direction_in,
USBEndpoint.transfer_type_interrupt,
USBEndpoint.sync_type_none,
USBEndpoint.usage_type_data,
#########################################################
i["wMaxPacketSize"], # max packet size
i["bInterval"], # polling interval, see USB 2.0 spec Table 9-13
self.handle_buffer_available # handler function
) for i in ID["EP"] ]
USBInterface.__init__(
self,
ID["bInterfaceNumber"], # interface number
ID["bAlternateSetting"], # alternate setting
ID["bInterfaceClass"], # interface class
ID["bInterfaceSubClass"], # subclass
ID["bInterfaceProtocol"], # protocol
ID["iInterface"], # string index
verbose,
endpoints
)
def handle_buffer_available(self):
pass
def __init__(self, maxusb_app, DD, verbose=0, data="", fuzz_dscr=""):
config = [ USBConfiguration(
DD["CD"].index(i), # index
i["iConfiguration_str"], # string desc
[ self.dizzyUSBInterface(j, verbose=verbose)
for j in i["ID"] ] # interfaces
) for i in DD["CD"] ]
USBDevice.__init__(
self,
maxusb_app,
DD["bDeviceClass"], # device class
DD["bDeviceSubClass"], # device subclass
DD["bDeviceProtocol"], # protocol release number
DD["bMaxPacketSize"], # max packet size for endpoint 0
DD["idVendor"], # vendor id
DD["idProduct"], # product id
self.bcd2int(DD["bcdDevice"]), # device revision
DD["iManufacturer_str"], # manufacturer string
DD["iProduct_str"], # product string
DD["iSerial_str"], # serial number string
config,
verbose=verbose
)
for i in DD["CD"]:
for j in i["ID"]:
self.strings.insert(j["iInterface"], j["iInterface_str"])
self.data = data
self.fuzz_dscr = fuzz_dscr
self.dd_sent = False
self.cd_sent = False
self.scr_recieved = False
def bcd2int(self, bcd):
tmp = bcd.split(".")
return (int(tmp[0]) << 8) + int(tmp[1])
def handle_get_descriptor_request(self, req):
dtype = (req.value >> 8) & 0xff
dindex = req.value & 0xff
lang = req.index
n = req.length
response = None
if self.verbose > 2:
print(self.name, ("received GET_DESCRIPTOR req %d, index %d, " \
+ "language 0x%04x, length %d") \
% (dtype, dindex, lang, n))
if dtype == USB.desc_type_device and self.fuzz_dscr == "DD":
response = self.data
elif dtype == USB.desc_type_configuration and self.fuzz_dscr == "CD":
response = self.data
#add IDs and EDs to response!
else:
response = self.descriptors.get(dtype, None)
if callable(response):
response = response(dindex)
if not response is None:
n = min(n, len(response))
self.maxusb_app.verbose += 1
self.maxusb_app.send_on_endpoint(0, response[:n])
self.maxusb_app.verbose -= 1
if self.verbose > 5:
print(self.name, "sent", n, "bytes in response")
else:
self.maxusb_app.stall_ep0()
if n == len(response):
if dtype == USB.desc_type_device:
self.dd_sent = True
elif dtype == USB.desc_type_configuration:
self.cd_sent = True
def handle_set_configuration_request(self, req):
if self.verbose > 2:
print(self.name, "received SET_CONFIGURATION request")
# configs are one-based
self.config_num = req.value - 1
self.configuration = self.configurations[self.config_num]
self.state = USB.state_configured
# collate endpoint numbers
self.endpoints = { }
for i in self.configuration.interfaces:
for e in i.endpoints:
self.endpoints[e.number] = e
# HACK: blindly acknowledge request
self.ack_status_stage()
self.scr_recieved = True
class dizzyUSB(object):
def __init__(self, filename, timeout, device=DEFAULT_TTY, data="", fuzz_dscr=""):
self.filename = filename
self.timeout = timeout
self.device = device
self.data = data
self.fuzz_dscr = fuzz_dscr
self.sp = None
self.d = None
def open(self, dst=""):
if DEBUG:
verbose = 1
else:
verbose = 0
ns = {}
with open(self.filename) as f:
exec(compile(f.read(), self.filename, 'exec'), ns)
DD = ns["DD"]
success = False
if DEBUG:
print("setting up facedancer")
sys.__stdout__.flush()
while not success:
try:
self.sp = Serial(self.device, 115200, parity=PARITY_NONE, timeout=2)
self.fd = Facedancer(self.sp, verbose=verbose)
self.app = MAXUSBApp(self.fd, verbose=verbose)
self.d = dizzyUSBDevice(self.app, DD, verbose, self.data, self.fuzz_dscr)
success = True
except:
time.sleep(0.1)
self.d.connect()
self.t = threading.Thread(target=self.run)
self.ep = None
self.opened = False
if not dst == "":
self.ep = int(dst)
self.t.start()
self.opened = True
if DEBUG:
print("Waiting for USB to setup...")
if self.fuzz_dscr == "":
time.sleep(2)
else:
times = self.timeout
while (not (self.d.dd_sent and self.d.cd_sent and self.d.scr_recieved and False)) and times > 0:
if DEBUG:
sys.__stdout__.write(".")
sys.__stdout__.flush()
time.sleep(0.1)
times -= 1
if DEBUG:
sys.__stdout__.write("\n")
sys.__stdout__.flush()
if times <= 0 and DEBUG:
print("timeout reached, canceled!")
#raise
return
if DEBUG:
print("USB setup complete.")
def run(self):
try:
self.d.run()
except SerialException:
pass
except select.error:
pass
except OSError:
pass
except TypeError:
pass
except IndexError:
pass
except Exception as e:
if DEBUG:
traceback.print_exc()
print(e)
self.opened = False
def close(self):
if not self.open:
return
if not self.d is None:
try:
self.d.disconnect()
except IndexError:
pass
except SerialException:
pass
except ValueError:
pass
except Exception as e:
if DEBUG:
traceback.print_exc()
print(e)
if not self.sp is None:
self.sp.close()
self.open = False
def read(self):
pass
def write(self, data):
if not self.ep is None:
while not self.opened:
time.sleep(0.1)
try:
self.app.send_on_endpoint(self.ep, data)
except Exception as e:
#~ if DEBUG:
#~ traceback.print_exc()
#~ print(e)
raise e
| [
"sys.__stdout__.flush",
"os.path.join",
"os.getcwd",
"sys.__stdout__.write",
"os.path.dirname",
"serial.Serial",
"threading.Thread",
"traceback.print_exc"
] | [((1787, 1812), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1802, 1812), False, 'import os\n'), ((1816, 1827), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1825, 1827), False, 'import os\n'), ((1844, 1869), 'os.path.join', 'os.path.join', (['root', '"""usb"""'], {}), "(root, 'usb')\n", (1856, 1869), False, 'import os\n'), ((8262, 8284), 'sys.__stdout__.flush', 'sys.__stdout__.flush', ([], {}), '()\n', (8282, 8284), False, 'import sys\n'), ((8764, 8797), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.run'}), '(target=self.run)\n', (8780, 8797), False, 'import threading\n'), ((8355, 8413), 'serial.Serial', 'Serial', (['self.device', '(115200)'], {'parity': 'PARITY_NONE', 'timeout': '(2)'}), '(self.device, 115200, parity=PARITY_NONE, timeout=2)\n', (8361, 8413), False, 'from serial import Serial, PARITY_NONE\n'), ((9459, 9485), 'sys.__stdout__.write', 'sys.__stdout__.write', (['"""\n"""'], {}), "('\\n')\n", (9479, 9485), False, 'import sys\n'), ((9502, 9524), 'sys.__stdout__.flush', 'sys.__stdout__.flush', ([], {}), '()\n', (9522, 9524), False, 'import sys\n'), ((9293, 9318), 'sys.__stdout__.write', 'sys.__stdout__.write', (['"""."""'], {}), "('.')\n", (9313, 9318), False, 'import sys\n'), ((9339, 9361), 'sys.__stdout__.flush', 'sys.__stdout__.flush', ([], {}), '()\n', (9359, 9361), False, 'import sys\n'), ((10085, 10106), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (10104, 10106), False, 'import traceback\n'), ((10553, 10574), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (10572, 10574), False, 'import traceback\n')] |
# Generated by Django 3.2.5 on 2021-09-13 15:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0005_alter_playeringame_color'),
]
operations = [
migrations.RemoveField(
model_name='game',
name='winner',
),
migrations.AddField(
model_name='game',
name='draw',
field=models.BooleanField(default=False),
),
]
| [
"django.db.migrations.RemoveField",
"django.db.models.BooleanField"
] | [((241, 297), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""game"""', 'name': '"""winner"""'}), "(model_name='game', name='winner')\n", (263, 297), False, 'from django.db import migrations, models\n'), ((437, 471), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (456, 471), False, 'from django.db import migrations, models\n')] |
#############################################################################
##
## Copyright (c) 2012 Riverbank Computing Limited <<EMAIL>>
##
## This file is part of PyQt.
##
## This file may be used under the terms of the GNU General Public
## License versions 2.0 or 3.0 as published by the Free Software
## Foundation and appearing in the files LICENSE.GPL2 and LICENSE.GPL3
## included in the packaging of this file. Alternatively you may (at
## your option) use any later version of the GNU General Public
## License if such license has been publicly approved by Riverbank
## Computing Limited (or its successors, if any) and the KDE Free Qt
## Foundation. In addition, as a special exception, Riverbank gives you
## certain additional rights. These rights are described in the Riverbank
## GPL Exception version 1.1, which can be found in the file
## GPL_EXCEPTION.txt in this package.
##
## If you are unsure which license is appropriate for your use, please
## contact the sales department at <EMAIL>.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
#############################################################################
import sys
import os.path
if sys.hexversion >= 0x03000000:
from PyQt4.uic.port_v3.as_string import as_string
else:
from PyQt4.uic.port_v2.as_string import as_string
class IconCache(object):
"""Maintain a cache of icons. If an icon is used more than once by a GUI
then ensure that only one copy is created.
"""
def __init__(self, object_factory, qtgui_module):
"""Initialise the cache."""
self._object_factory = object_factory
self._qtgui_module = qtgui_module
self._base_dir = ''
self._cache = []
def set_base_dir(self, base_dir):
""" Set the base directory to be used for all relative filenames. """
self._base_dir = base_dir
def get_icon(self, iconset):
"""Return an icon described by the given iconset tag."""
# Handle a themed icon.
theme = iconset.attrib.get('theme')
if theme is not None:
return self._object_factory.createQObject("QIcon.fromTheme",
'icon', (as_string(theme), ), is_attribute=False)
# Handle an empty iconset property.
if iconset.text is None:
return None
iset = _IconSet(iconset, self._base_dir)
try:
idx = self._cache.index(iset)
except ValueError:
idx = -1
if idx >= 0:
# Return the icon from the cache.
iset = self._cache[idx]
else:
# Follow uic's naming convention.
name = 'icon'
idx = len(self._cache)
if idx > 0:
name += str(idx)
icon = self._object_factory.createQObject("QIcon", name, (),
is_attribute=False)
iset.set_icon(icon, self._qtgui_module)
self._cache.append(iset)
return iset.icon
class _IconSet(object):
"""An icon set, ie. the mode and state and the pixmap used for each."""
def __init__(self, iconset, base_dir):
"""Initialise the icon set from an XML tag."""
# Set the pre-Qt v4.4 fallback (ie. with no roles).
self._fallback = self._file_name(iconset.text, base_dir)
self._use_fallback = True
# Parse the icon set.
self._roles = {}
for i in iconset:
file_name = i.text
if file_name is not None:
file_name = self._file_name(file_name, base_dir)
self._roles[i.tag] = file_name
self._use_fallback = False
# There is no real icon yet.
self.icon = None
@staticmethod
def _file_name(fname, base_dir):
""" Convert a relative filename if we have a base directory. """
fname = fname.replace("\\", "\\\\")
if base_dir != '' and fname[0] != ':' and not os.path.isabs(fname):
fname = os.path.join(base_dir, fname)
return fname
def set_icon(self, icon, qtgui_module):
"""Save the icon and set its attributes."""
if self._use_fallback:
icon.addFile(self._fallback)
else:
for role, pixmap in self._roles.items():
if role.endswith("off"):
mode = role[:-3]
state = qtgui_module.QIcon.Off
elif role.endswith("on"):
mode = role[:-2]
state = qtgui_module.QIcon.On
else:
continue
mode = getattr(qtgui_module.QIcon, mode.title())
if pixmap:
icon.addPixmap(qtgui_module.QPixmap(pixmap), mode, state)
else:
icon.addPixmap(qtgui_module.QPixmap(), mode, state)
self.icon = icon
def __eq__(self, other):
"""Compare two icon sets for equality."""
if not isinstance(other, type(self)):
return NotImplemented
if self._use_fallback:
if other._use_fallback:
return self._fallback == other._fallback
return False
if other._use_fallback:
return False
return self._roles == other._roles
| [
"PyQt4.uic.port_v2.as_string.as_string"
] | [((2282, 2298), 'PyQt4.uic.port_v2.as_string.as_string', 'as_string', (['theme'], {}), '(theme)\n', (2291, 2298), False, 'from PyQt4.uic.port_v2.as_string import as_string\n')] |
#coding = utf-8
#running in python3.6
import os
import time
GP_1_natural = [
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=google-play\&zerokey_click_id=\&zerokey_channel=14449_"',
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=google-play\&cskey_click_id=\&cskey_channel=14449_"'
] #gp自然(-1)
GP_1_na_or_no8 = [
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=null\&zerokey_click_id=\&zerokey_channel=14449_"'
] #gp自然(-1)/非gp自然(8):(非gp自然:gp版本≥6.8.24;gp自然:gp版本<6.8.24)
Natural0 = [
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=appflood\&utm_medium=banner\&utm_campaign=appflood\&zerokey_channel\&gokey_channel=\&from_3g_channel=organic"'
] #自然带量:(0)
No_natural7 = [
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=appflood\&utm_medium=banner\&cskey_click_id\&from_3g_chadnnel=organic"',
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=appflood\&utm_medium=banner\&utm_campaign=appflood\&zerokey_channel\&cskey_channel=\&from_3G_channel=anything"',
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=suing\&from_3G_channel=withCount"',
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=appflood\&utm_medium=banner\&utm_campaign=appflood\&zerokey_channel\&cskey_channel=1452"',
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=not%20set\&utm_medium=banner\&utm_campaign=appflood\&zerokey_channel\&cskey_channel="',
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=4568\&from_3g_channel"'
] #非自然带量:(7)
GA1 = [
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=appflood\&zerokey_click_id=\&zerokey_channel=14449_\&from_3g_channel=organic"',
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=not%20set\&utm_medium=banner\&utm_campaign=appflood_speed\&cskey_click_id=5a49def5b4bfab58e618b191fe7704ed011\&cskey_channel=153_"'
] #GA买量(1)
AF1 = [
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=&zerokey_click_id=&af_tranid=AL7oybEZ6cvS07b-fDV5nA&pid=mundomedia_int&c=CD25730_212537703&clickid=9ad8dbfe-2f0b-5009-b692-94e803b13a60&af_siteid=CD25730_212537703&campaignid=e2c4035474r2w2&af_sub1=CD25730&af_status=Non-organic&media_source=aa &campaign=xm_ediakey&is_fb=false&agency=ddd"',
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=appflood&zerokey_channel=14449_&af_tranid=AL7oybEZ6cvS07b-fDV5nA&pid=mundomedia_int&c=CD25730_212537703&clickid=9ad8dbfe-2f0b-5009-b692-94e803b13a60&af_siteid=CD25730_212537703&campaignid=e2c4035474r2w2&af_sub1=CD25730&af_status=Non-organic&media_source=aa &campaign=xmediazerokey&is_fb=false&agency="',
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=appflood&zerokey_channel=14449_&af_tranid=AL7oybEZ6cvS07b-fDV5nA&pid=mundomedia_int&c=CD25730_212537703&clickid=9ad8dbfe-2f0b-5009-b692-94e803b13a60&af_siteid=CD25730_212537703&campaignid=e2c4035474r2w2&af_sub1=CD25730&af_status=Non-organic&media_source=aa &campaign=xmediazerokey&agency=null"'
] #GA广播模拟AF普通买量,AF普通买量,类型(1)
AF_FB2 = [
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=appflood&zerokey_click_id=&zerokey_channel=14449_&af_tranid=AL7oybEZ6cvS07b-fDV5nA&pid=mundomedia_int&c=CD25730_212537703&clickid=9ad8dbfe-2f0b-5009-b692-94e803b13a60&af_siteid=CD25730_212537703&campaignid=e2c4035474r2w2&af_sub1=CD25730&af_status=Non-organic&media_source=Facebook Ads&campaign=xmediazerokey_&is_fb=true"',
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=not%20set&af_tranid=AL7oybEZ6cvS07b-fDV5nA&pid=mundomedia_int&c=CD25730_212537703&clickid=9ad8dbfe-2f0b-5009-b692-94e803b13a60&af_siteid=CD25730_212537703&campaignid=e2c4035474r2w2&af_sub1=CD25730&af_status=Non-organic&media_source=Facebook Ads&campaign=xmxxmxlihahhd_ediazerokey&is_fb=true"'
] #GA广播模拟AF FB自投(2)
AF_FBN3 = [
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=not%20set&af_tranid=AL7oybEZ6cvS07b-fDV5nA&pid=mundomedia_int&c=CD25730_212537703&clickid=9ad8dbfe-2f0b-5009-b692-94e803b13a60&af_siteid=CD25730_212537703&campaignid=e2c4035474r2w2&af_sub1=CD25730&af_status=Non-organic&media_source=Facebook Ads&campaign=&is_fb=true"',
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=appflood&zerokey_click_id=&zerokey_channel=14449_&af_tranid=AL7oybEZ6cvS07b-fDV5nA&pid=mundomedia_int&c=CD25730_212537703&clickid=9ad8dbfe-2f0b-5009-b692-94e803b13a60&af_siteid=CD25730_212537703&campaignid=e2c4035474r2w2&af_sub1=CD25730&af_status=Non-organic&media_source=Facebook Ads&campaign=xm&is_fb=true"'
] #GA广播模拟AF FB非自投(3)
AF_aw6 = [
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=appflood\&gokey_channel=\&gokey_click_id=14449\&af_tranid=AL7oybEZ6cvS07b-fDV5nA\&agency=123\&campaignid=111\&media_source=googleadwords_int"',
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=appflood\&gokey_channel=\&gokey_click_id=14449\&af_tranid=AL7oybEZ6cvS07b-fDV5nA\&agency=123\&campaignid=111\&c=CD25730_212537703\&clickid=9ad8dbfe\&af_status=Non-organic\&af_siteid=CD25730_212537703\&media_source=googleadwords_int"'
] #GA广播模拟AF adwords非自投(6)
AF_aw4 = [
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=appflood\&gokey_channel=\&gokey_click_id=14449\&af_tranid=AL7oybEZ6cvS07b-fDV5nA\&agency=\&campaignid=111\&c=CD25730_212537703\&clickid=9ad8dbfe\&af_status=Non-organic\&af_siteid=CD25730_212537703&is_fb=false\&media_source=googleadwords_int"',
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=appflood\&gokey_channel=\&gokey_click_id=14449\&af_tranid=AL7oybEZ6cvS07b-fDV5nA\&agency=\&campaignid=111\&media_source=googleadwords_int"'
] #GA广播模拟AF 模拟adwords自投(4)
AF_aw_GDN = [
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=appflood\&gokey_channel=\&gokey_click_id=14449\&af_tranid=AL7oybEZ6cvS07b-fDV5nA\&agency=\&campaignid=111\&c=CD25730_212537703\&clickid=9ad8dbfe\&af_status=Non-organic\&af_siteid=CD25730_212537703&is_fb=false\&media_source=googleadwords_int"',
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=appflood\&gokey_channel=\&gokey_click_id=14449\&af_tranid=AL7oybEZ6cvS07b-fDV5nA\&agency=\&campaignid=111\&media_source=googleadwords_int"',
'adb shell am broadcast -a com.android.vending.INSTALL_REFERRER --es referrer "utm_source=appflood\&gokey_channel=\&gokey_click_id=14449\&af_tranid=AL7oybEZ6cvS07b-fDV5nA\&agency=123\&campaignid=111\&media_source=googleadwords_int"'
] #GA广播模拟AF 模拟adwords子渠道GDN(测试时,把campaignid=111修改为客户端传进来的真实campaignid)
all_buychannel = [GP_1_natural,GP_1_na_or_no8,Natural0,No_natural7,GA1,AF1,AF_FB2,AF_FBN3,AF_aw4,AF_aw6]
phone_api = int(os.popen('adb shell getprop ro.build.version.sdk').read()) #get API version of Android phone
loo = str(os.popen('adb shell getprop ro.product.model').read())#get model name of Android phone
phone_model_name = ''
for i in loo:
if i == '\n':
pass
else:
phone_model_name = phone_model_name+i
#print(phone_model_name)
log_time = time.localtime(time.time())
log_name = str('%s-'%(phone_model_name)+'%s%s%s%s%s%s'%(log_time.tm_year,log_time.tm_mon,log_time.tm_mday,log_time.tm_hour,log_time.tm_min,log_time.tm_sec))
#print(log_name)
#record logat
def fun_buychannel(list):
buychannel_list = []
if phone_api >= 26: # identify which buychannel list should we use
for x in list:
y = x + ' -n com.nj.fface/com.appsflyer.MultipleInstallBroadcastReceiver'#"com.nj.fface"修改成自己对应的包名
buychannel_list.append(y)
# print('26+')
else:
buychannel_list = list
# print('26-')
for i in buychannel_list:
print(i)
#clear the app
os.system('adb shell pm clear com.nj.fface')
time.sleep(1)
#start the app
os.system('adb shell am start -n com.nj.fface/com.nj.fface.activity.SplashActivity')#"com.nj.fface"修改成自己对应的包名
time.sleep(1)
#set buychannel
os.system(i)
time.sleep(17)
def run_log():
os.popen("adb logcat>%s.txt"%(log_name))
if __name__ == '__main__' :
for ls in all_buychannel:
fun_buychannel(ls)
| [
"os.system",
"os.popen",
"time.time",
"time.sleep"
] | [((7822, 7833), 'time.time', 'time.time', ([], {}), '()\n', (7831, 7833), False, 'import time\n'), ((8818, 8858), 'os.popen', 'os.popen', (["('adb logcat>%s.txt' % log_name)"], {}), "('adb logcat>%s.txt' % log_name)\n", (8826, 8858), False, 'import os\n'), ((8498, 8542), 'os.system', 'os.system', (['"""adb shell pm clear com.nj.fface"""'], {}), "('adb shell pm clear com.nj.fface')\n", (8507, 8542), False, 'import os\n'), ((8552, 8565), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (8562, 8565), False, 'import time\n'), ((8595, 8684), 'os.system', 'os.system', (['"""adb shell am start -n com.nj.fface/com.nj.fface.activity.SplashActivity"""'], {}), "(\n 'adb shell am start -n com.nj.fface/com.nj.fface.activity.SplashActivity')\n", (8604, 8684), False, 'import os\n'), ((8714, 8727), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (8724, 8727), False, 'import time\n'), ((8758, 8770), 'os.system', 'os.system', (['i'], {}), '(i)\n', (8767, 8770), False, 'import os\n'), ((8780, 8794), 'time.sleep', 'time.sleep', (['(17)'], {}), '(17)\n', (8790, 8794), False, 'import time\n'), ((7441, 7491), 'os.popen', 'os.popen', (['"""adb shell getprop ro.build.version.sdk"""'], {}), "('adb shell getprop ro.build.version.sdk')\n", (7449, 7491), False, 'import os\n'), ((7545, 7591), 'os.popen', 'os.popen', (['"""adb shell getprop ro.product.model"""'], {}), "('adb shell getprop ro.product.model')\n", (7553, 7591), False, 'import os\n')] |
#!/usr/bin/python3
# Standard library imports
from datetime import datetime, timedelta
import hashlib
import json
import logging
import os
import sys
from urllib.parse import urljoin
from urllib.request import urlretrieve
import shutil
from sys import platform
from typing import Any, Dict, List, Optional, Tuple
# Third party imports
import bs4
from bs4 import BeautifulSoup
import requests
# Local imports
# Globals
# Run this script from the command line to generate or update the api folder in src/ebay_rest.
# For a complete directory of eBay's APIs visit https://developer.ebay.com/docs. Ignore the "Traditional" APIs.
# For an introduction to OpenAPI and how to use eBay's REST-ful APIs
# visit https://developer.ebay.com/api-docs/static/openapi-swagger-codegen.html.
class Locations:
""" Where things are located in the locale file store. """
target_directory: str = 'api'
target_path: str = '../src/ebay_rest/' + target_directory
cache_path: str = './' + target_directory + '_cache'
state_file: str = 'state.json'
state_path_file: str = os.path.join(cache_path, state_file)
class State:
""" Track the state of progress, even if the program is re-run. """
def __init__(self) -> None:
try:
with open(Locations.state_path_file) as file_handle:
self._states = json.load(file_handle)
except OSError:
self._states = dict()
def get(self, key: str) -> str or None:
if key in self._states:
return self._states[key]
else:
return None
def set(self, key: str, value: str) -> None:
self._states[key] = value
try:
with open(Locations.state_path_file, 'w') as file_handle:
json.dump(self._states, file_handle, sort_keys=True, indent=4)
except OSError:
message = f"Can't write to {Locations.state_path_file}."
logging.fatal(message)
sys.exit(message)
class Contract:
def __init__(self, limit: int = 100) -> None:
self.contracts = self.get_contracts(limit=limit)
self.cache_contracts()
self.patch_contracts()
def cache_contracts(self) -> None:
for contract in self.contracts:
[category, call, link_href, file_name] = contract
destination = os.path.join(Locations.cache_path, file_name)
urlretrieve(link_href, destination) # if a destination file exists, it will be replaced
def get_contracts(self, limit: int = 100) -> List[List[str]]:
contracts = []
overview_links = []
base = 'https://developer.ebay.com/'
logging.info('Find eBay OpenAPI 3 JSON contracts.')
soup = self.get_soup_via_link(urljoin(base, 'docs'))
for link in soup.find_all('a', href=lambda href: href and 'overview.html' in href):
overview_links.append(urljoin(base, link.get('href')))
if len(contracts) >= limit:
break
assert (len(overview_links) > 0), 'No contract overview pages found!'
for overview_link in overview_links:
soup = self.get_soup_via_link(overview_link)
for link in soup.find_all('a', href=lambda href: href and 'oas3.json' in href, limit=1):
link_href = urljoin(base, link.get('href'))
parts = link_href.split('/')
category = parts[5]
call = parts[6].replace('-', '_')
file_name = parts[-1]
record = [category, call, link_href, file_name]
if ('beta' not in call) and (record not in contracts):
contracts.append(record)
logging.info(record)
if len(contracts) >= limit: # useful for expediting debugging with a reduced data set
break
assert (len(contracts) > 0), 'No contracts found on any overview pages!'
return contracts
@staticmethod
def patch_contracts() -> None:
# In the Sell Fulfillment API, the model 'Address' is returned with attribute 'countryCode'.
# However, the JSON specifies 'country' instead, thus Swagger generates the wrong API.
file_location = os.path.join(Locations.cache_path, 'sell_fulfillment_v1_oas3.json')
try:
with open(file_location) as file_handle:
data = json.load(file_handle)
properties = data['components']['schemas']['Address']['properties']
if 'country' in properties:
properties['countryCode'] = properties.pop('country') # Warning, alphabetical key order spoiled.
with open(file_location, 'w') as outfile:
json.dump(data, outfile, sort_keys=True, indent=4)
else:
logging.warning('Patching sell_fulfillment_v1_oas3.json is no longer needed.')
except FileNotFoundError:
logging.error(f"Can't open {file_location}.")
# A description in buy_browse_v1_oas3.json contains invalid escapes.
file_location = os.path.join(Locations.cache_path, 'buy_browse_v1_oas3.json')
bad = '\\\n \\'
problem_fixed = True
content = ''
try:
with open(file_location) as file_handle:
content = file_handle.read()
if bad in content:
problem_fixed = False
content = content.replace(bad, '')
except FileNotFoundError:
logging.error(f"Can't open {file_location}.")
if problem_fixed:
logging.warning('Patching buy_browse_v1_oas3.json is no longer needed.')
else:
with open(file_location, 'w') as file_handle:
file_handle.write(content)
@staticmethod
def get_soup_via_link(url: str) -> bs4.BeautifulSoup:
# Make a GET request to fetch the raw HTML content
html_content = requests.get(url).text
# Parse the html content
return BeautifulSoup(html_content, "html.parser")
def get_base_paths_and_flows(self) -> Tuple[dict, Dict[Any, dict], Dict[Any, Dict[Any, Optional[Any]]]]:
"""Process the JSON contract and extract two things for later use.
1) the base_path for each category_call (e.g. buy_browse)
2) the security flow for each scope in each category_call
3) the scopes for each call in each category_call
"""
base_paths = {}
flows = {}
scopes = {}
for [category, call, link_href, file_name] in self.contracts:
source = os.path.join(Locations.cache_path, file_name)
with open(source) as file_handle:
try:
data = json.load(file_handle)
except ValueError:
message = "Invalid JSON in " + source
logging.fatal(message) # Invalid \escape: line 3407 column 90 (char 262143)
sys.exit(message)
# Get base path
base_path = data['servers'][0]['variables']['basePath']['default']
# Get flows for this category_call
category_flows = (
data['components']['securitySchemes']['api_auth']['flows']
)
flow_by_scope = {} # dict of scope: flow type
for flow, flow_details in category_flows.items():
for scope in flow_details['scopes']:
flow_by_scope[scope] = flow
# Get scope for each individually path-ed call
operation_id_scopes = {}
for path, path_methods in data['paths'].items():
for method, method_dict in path_methods.items():
if method not in ('get', 'post', 'put', 'delete'):
# Consider only the HTTP request parts
continue
operation_id = method_dict['operationId'].lower()
security_list = method_dict.get('security', [])
if len(security_list) > 1:
raise ValueError(
'Expected zero/one security entry per path!')
elif len(security_list) == 1:
security = security_list[0]['api_auth']
else:
security = None
if operation_id in operation_id_scopes:
logging.warning('Duplicate operation!')
logging.warning(path, path_methods)
logging.warning(method, method_dict)
raise ValueError('nope')
operation_id_scopes[operation_id] = security
# TODO Get headers parameters
# look for this "in": "header",
name = category + '_' + call
base_paths[name] = base_path
flows[name] = flow_by_scope
scopes[name] = operation_id_scopes
return base_paths, flows, scopes
def install_tools() -> None:
if platform == 'darwin': # OS X or MacOS
logging.info('Install or update the package manager named HomeBrew.')
os.system('/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"')
if os.path.isfile('/usr/local/bin/swagger-codegen'):
logging.info('Upgrade the code generator from Swagger. https://swagger.io/')
os.system('brew upgrade swagger-codegen')
else:
logging.info('Install the code generator from Swagger. https://swagger.io/')
os.system('brew install swagger-codegen')
logging.info('Test the generator installation by invoking its help screen.')
os.system('/usr/local/bin/swagger-codegen -h')
elif platform == 'linux': # Linux platform
# Don't install packages without user interaction.
if not os.path.isfile('swagger-codegen-cli.jar'):
os.system(
'wget https://repo1.maven.org/maven2/io/swagger/codegen/v3/'
+ 'swagger-codegen-cli/3.0.26/swagger-codegen-cli-3.0.26.jar '
+ '-O swagger-codegen-cli.jar'
)
logging.info('Test the generator installation by invoking its help screen.')
os.system('java -jar swagger-codegen-cli.jar -h')
else:
message = f'Please extend install_tools() for your {platform} platform.'
logging.fatal(message)
sys.exit(message)
def delete_folder_contents(path_to_folder: str):
list_dir = os.listdir(path_to_folder)
for filename in list_dir:
file_path = os.path.join(path_to_folder, filename)
if os.path.isfile(file_path) or os.path.islink(file_path):
logging.debug("deleting file:", file_path)
os.unlink(file_path)
elif os.path.isdir(file_path):
logging.debug("deleting folder:", file_path)
shutil.rmtree(file_path)
class Process:
""" The processing steps are split into bite sized methods. """
def __init__(self) -> None:
self.file_ebay_rest = os.path.abspath('../src/ebay_rest/a_p_i.py')
self.file_setup = os.path.abspath('../setup.cfg')
self.path_cache = os.path.abspath(Locations.cache_path)
self.path_final = os.path.abspath(Locations.target_path)
self.path_ebay_rest = os.path.abspath('../src/ebay_rest')
assert os.path.isdir(self.path_cache), \
'Fatal error. Prior, you must run the script generate_api_cache.py.'
for (_root, dirs, _files) in os.walk(self.path_cache):
dirs.sort()
self.names = dirs
break
with open(os.path.join(Locations.cache_path, 'base_paths.json')) as file_handle:
self.base_paths = json.load(file_handle)
with open(os.path.join(Locations.cache_path, 'flows.json')) as file_handle:
self.flows = json.load(file_handle)
with open(os.path.join(Locations.cache_path, 'scopes.json')) as file_handle:
self.scopes = json.load(file_handle)
def copy_libraries(self) -> None:
""" Copy essential parts of the generated eBay libraries to within the src folder. """
# purge what might already be there
for filename in os.listdir(self.path_final):
file_path = os.path.join(self.path_final, filename)
if os.path.isdir(file_path):
shutil.rmtree(file_path)
# copy each library's directory
for name in self.names:
src = os.path.join(self.path_cache, name, name)
dst = os.path.join(self.path_final, name)
_destination = shutil.copytree(src, dst)
def fix_imports(self) -> None:
""" The deeper the directory, the more dots are needed to make the correct relative path. """
for name in self.names:
self._fix_imports_recursive(name, '..', os.path.join(self.path_final, name))
def _fix_imports_recursive(self, name: str, dots: str, path: str) -> None:
""" This does the recursive part of fix_imports. """
for (_root, dirs, files) in os.walk(path):
swaps = [ # order is crucial, put more specific swaps before less
(f'import {name}.models', f'from {dots}{name} import models'),
(f'from models', f'from {dots}{name}.models'),
(f'import {name}', f'import {dots}{name}'),
(f'from {name}', f'from {dots}{name}'),
(f'{name}.models', f'models'),
]
for file in files:
target_file = os.path.join(path, file)
new_lines = ''
with open(target_file) as file_handle:
for old_line in file_handle:
for (original, replacement) in swaps:
if original in old_line:
old_line = old_line.replace(original, replacement)
break # only the first matching swap should happen
new_lines += old_line
with open(target_file, 'w') as file_handle:
file_handle.write(new_lines)
dots += '.'
for directory in dirs:
self._fix_imports_recursive(name, dots, os.path.join(path, directory))
break
def merge_setup(self) -> None:
""" Merge the essential bits of the generated setup files into the master. """
# compile a list of all unique requirements from the generated libraries
start_tag = 'REQUIRES = ['
end_tag = ']\n'
requirements = set()
for name in self.names:
src = os.path.join(self.path_cache, name, 'setup.py')
with open(src) as file:
for line in file:
if line.startswith(start_tag):
line = line.replace(start_tag, '')
line = line.replace(end_tag, '')
parts = line.split(', ')
for part in parts:
requirements.add(part)
break
requirements = list(requirements)
requirements.sort()
# include these with the other requirements for our package
insert_lines = ''
for requirement in requirements:
insert_lines += f' {requirement}\n'
# TODO This was commented out because it caused an error. Is something like it truly needed?
# self._put_anchored_lines(target_file=self.file_setup, anchor='setup.cfg', insert_lines=insert_lines)
def make_includes(self) -> None:
""" Make includes for all the libraries. """
lines = []
for name in self.names:
lines.append(f'from .{Locations.target_directory} import {name}')
line = f'from .{Locations.target_directory}.{name}.rest import ApiException as {self._camel(name)}Exception'
lines.append(line)
insert_lines = '\n'.join(lines) + '\n'
self._put_anchored_lines(target_file=self.file_ebay_rest, anchor='er_imports', insert_lines=insert_lines)
def get_methods(self) -> List[Tuple[str, str, str, str, str, str]]:
""" For all modules, get all methods. """
# catalog the module files that contain all method implementations
modules = []
for name in self.names:
path = os.path.join(self.path_cache, name, name, 'api')
for (root, _dirs, files) in os.walk(path):
for file in files:
if file != '__init__.py':
modules.append((name, file.replace('.py', ''), os.path.join(root, file)))
# catalog all methods in all modules
methods = []
method_marker_part = '_with_http_info'
method_marker_whole = method_marker_part + '(self,'
docstring_marker = '"""'
bad_docstring_markers = (
'>>> ',
'synchronous',
'async_req',
'request thread',
)
typo_remedy = ( # pairs of typos found in docstrings and their remedy
('cerate', 'create'), # noqa: - suppress flake8 compatible linters, misspelling is intended
('distibuted', 'distributed'), # noqa:
('http:', 'https:'), # noqa:
('identfier', 'identifier'), # noqa:
('Limt', 'Limit'), # noqa:
('lisitng', 'listing'), # noqa:
('maketplace', 'marketplace'), # noqa:
('motorcyles', 'motorcycles'), # noqa:
('parmeter', 'parameter'), # noqa:
('publlish', 'publish'), # noqa:
)
for (name, module, path) in modules:
step = 0
with open(path) as file_handle:
for line in file_handle:
if step == 0: # looking for the next method
if method_marker_whole in line:
(method_and_params, _junk) = line.split(')')
(method, params) = method_and_params.split('(')
method = method.replace(' def ', '')
method = method.replace(method_marker_part, '')
params = params.replace('self, ', '')
step += 1
elif step == 1: # looking for the start of the docstring block
if docstring_marker in line:
docstring = line
step += 1
elif step == 2: # looking for the end of the docstring block
if docstring_marker not in line:
bad = False
for bad_docstring_marker in bad_docstring_markers:
if bad_docstring_marker in line:
bad = True
break
if not bad:
docstring += line
else:
docstring += line
for (typo, remedy) in typo_remedy:
docstring = docstring.replace(typo, remedy)
methods.append((name, module, path, method, params, docstring))
step = 0
methods.sort()
return methods
def make_methods(self, methods: List[Tuple[str, str, str, str, str, str]]) -> None:
""" Make all the python methods and insert them where needed. """
code = "\n"
for method in methods:
code += self._make_method(method)
self._put_anchored_lines(target_file=self.file_ebay_rest, anchor='er_methods', insert_lines=code)
def _make_method(self, method: Tuple[str, str, str, str, str, str]) -> str:
""" Return the code for one python method. """
(name, module, path, method, params, docstring) = method
ignore_long = ' # noqa: E501' # flake8 compatible linters should not warn about long lines
# Fix how the docstring expresses optional parameters then end up in **kwargs
# catalog all parameters listed in the docstring
docstring_params = set()
for line in docstring.split('\n'):
if ':param' in line:
for word in line.split(' '):
if word.endswith(':'):
docstring_params.add(word.replace(':', ''))
break
# determine if any docstring parameters are method parameters
has_docstring_problem = False
for docstring_param in docstring_params:
if docstring_param not in params:
has_docstring_problem = True
break
# if we found an optional parameter, then add a provision for 'optionals' aka *args in the right spot
if has_docstring_problem:
pass # TODO Do something to make the comments aka docstring handle optional parameters properly
# prepare the method type by testing for 'offset' parameter
method_type = 'paged' if (':param str offset' in docstring) else 'single'
# identify if this is a user_access_token routine
operation_id = method.lower().replace('_', '')
scopes = self.scopes[name][operation_id]
if not scopes:
# Assume application keys
flows = {'clientCredentials'}
else:
flows = {self.flows[name][scope] for scope in scopes}
if len(flows) != 1:
if operation_id in ('getitemconditionpolicies',) or module in ('subscription_api',):
# This usually uses the client credentials method
flows = {'clientCredentials'}
else:
message = 'Could not identify authorization method!'
logging.warning(message)
logging.warning('method: ', method)
logging.warning('scopes: ', scopes)
logging.warning('flows: ', flows)
raise ValueError(message)
auth_method, = flows # note tuple unpacking of set
user_access_token = auth_method == 'authorizationCode'
# identify and prep for parameter possibilities
stars_kwargs = '**kwargs'
params_modified = params.split(', ')
if len(params_modified) == 0:
has_args = False
has_kw = False
else:
if params_modified[-1] == stars_kwargs:
has_kw = True
params_modified.pop()
else:
has_kw = False
if len(params_modified) > 0:
has_args = True
params_modified = ', '.join(params_modified)
else:
has_args = False
# Prepare the list of rate lookup information that will be used for throttling.
resource_name_base = name.replace('_', '.')
resource_name_module = module.replace('_api', '')
rate = [resource_name_base, resource_name_module]
code = f" def {name}_{method}(self, {params}):{ignore_long}\n"
code += docstring
code += " try:\n"
code += f" return self._method_{method_type}(" \
f"{name}.Configuration," \
f" '{self.base_paths[name]}'," \
f" {name}.{self._camel(module)}," \
f" {name}.ApiClient," \
f" '{method}'," \
f" {self._camel(name)}Exception," \
f" {user_access_token}," \
f" {rate},"
if has_args:
if ',' in params_modified:
code += f" ({params_modified}),"
else:
code += f" {params_modified},"
else:
code += f" None,"
if has_kw:
code += f" **kwargs"
else:
code += f" None"
code += f"){ignore_long}\n"
code += " except Error:\n"
code += " raise\n"
code += "\n"
return code
def remove_duplicates(self) -> None:
""" Deduplicate identical .py files found in all APIs.
for example when comments are ignored the rest.py files appear identical. """
# build a catalog that includes a hashed file signature
catalog = []
for name in self.names:
catalog.extend(self._remove_duplicates_recursive_catalog(name, os.path.join(self.path_final, name)))
# count how many times each signature appears
signature_tally = {}
for (name, file, path, signature) in catalog:
if signature in signature_tally:
signature_tally[signature] = + 1
else:
signature_tally[signature] = 1
# make a sub catalog that just includes signature repeaters
catalog_repeaters = []
for values in catalog:
(name, file, path, signature) = values
if signature_tally[signature] > 1:
catalog_repeaters.append(values)
# TODO apply the DRY principle to the repeaters
def _remove_duplicates_recursive_catalog(self, name: str, path: str) -> list:
""" This does the recursive part of cataloging for remove_duplicates. """
catalog = []
for (_root, dirs, files) in os.walk(path):
for file in files:
if file != '__init__.py' and file.endswith('.py'):
target_file = os.path.join(path, file)
with open(target_file) as file_handle:
code_text = file_handle.read()
# TODO Remove whitespace and comments from the Python code before hashing.
m = hashlib.sha256()
m.update(code_text.encode())
catalog.append((name, file, target_file, m.digest()))
for directory in dirs:
catalog.extend(self._remove_duplicates_recursive_catalog(name, os.path.join(path, directory)))
return catalog
@staticmethod
def _camel(name: str) -> str:
""" Convert a name with underscore separators to upper camel case. """
camel = ''
for part in name.split('_'):
camel += part.capitalize()
return camel
@staticmethod
def _put_anchored_lines(target_file: str, anchor: str, insert_lines: str) -> None:
""" In the file replace what is between anchors with new lines of code. """
if os.path.isfile(target_file):
new_lines = ''
start = f"ANCHOR-{anchor}-START"
end = f"ANCHOR-{anchor}-END"
start_found = False
end_found = False
with open(target_file) as file:
for old_line in file:
if not start_found:
new_lines += old_line
if start in old_line:
start_found = True
new_lines += insert_lines
elif start_found and not end_found:
if end in old_line:
end_found = True
new_lines += old_line
else:
new_lines += old_line
if start_found and end_found:
with open(target_file, 'w') as file:
file.write(new_lines)
else:
logging.error(f"Can't find proper start or end anchors for {anchor} in {target_file}.")
else:
logging.error(f"Can't find {target_file}")
def main() -> None:
# while debugging it is handy to change the log level from INFO to DEBUG
logging.basicConfig(format='%(asctime)s %(levelname)s %(filename)s %(funcName)s: %(message)s', level=logging.DEBUG)
# ensure that we have a cache
if os.path.isdir(Locations.cache_path):
# delete_folder_contents(Locations.cache_path) # TODO flush the cache when we want a fresh start
pass
else:
os.mkdir(Locations.cache_path)
s = State() # Track the state of progress
# install tools if they are missing # TODO
# or, update tools if it has been more than a day
key = 'tool_date_time'
dt_format = "%Y-%m-%dT%H:%M:%S.%fZ"
if s.get(key) is None or datetime.strptime(s.get(key), dt_format) < datetime.now() - timedelta(days=1):
# install_tools()
s.set(key, datetime.now().strftime(dt_format))
c = Contract(limit=100) # hint, save time by reducing the limit while debugging
base_paths, flows, scopes = c.get_base_paths_and_flows()
logging.info('For each contract generate and install a library.')
for contract in c.contracts:
[category, call, link_href, file_name] = contract
source = os.path.join(Locations.cache_path, file_name)
name = f'{category}_{call}'
command = f' generate -l python -o {Locations.cache_path}/{name} -DpackageName={name} -i {source}'
if platform == 'darwin': # OS X or MacOS
command = '/usr/local/bin/swagger-codegen' + command
elif platform == 'linux': # Linux
command = 'java -jar swagger-codegen-cli.jar' + command
else:
assert False, f'Please extend main() for your {platform} platform.'
os.system(command)
destination = os.path.join(Locations.cache_path, 'base_paths.json')
with open(destination, 'w') as outfile:
json.dump(base_paths, outfile, sort_keys=True, indent=4)
destination = os.path.join(Locations.cache_path, 'flows.json')
with open(destination, 'w') as outfile:
json.dump(flows, outfile, sort_keys=True, indent=4)
destination = os.path.join(Locations.cache_path, 'scopes.json')
with open(destination, 'w') as outfile:
json.dump(scopes, outfile, sort_keys=True, indent=4)
# Refrain from altering the sequence of the method calls because there may be dependencies.
p = Process()
p.copy_libraries()
p.fix_imports()
p.merge_setup()
p.make_includes()
# p.remove_duplicates() # uncomment the method call when work on it resumes
p.make_methods(p.get_methods())
return
if __name__ == "__main__":
main()
| [
"logging.debug",
"sys.exit",
"os.path.islink",
"datetime.timedelta",
"logging.info",
"logging.error",
"os.walk",
"os.listdir",
"urllib.request.urlretrieve",
"os.path.isdir",
"os.mkdir",
"os.unlink",
"hashlib.sha256",
"logging.warning",
"requests.get",
"os.path.isfile",
"logging.fatal",
"urllib.parse.urljoin",
"logging.basicConfig",
"os.path.join",
"bs4.BeautifulSoup",
"json.load",
"shutil.copytree",
"datetime.datetime.now",
"shutil.rmtree",
"os.path.abspath",
"os.system",
"json.dump"
] | [((1082, 1118), 'os.path.join', 'os.path.join', (['cache_path', 'state_file'], {}), '(cache_path, state_file)\n', (1094, 1118), False, 'import os\n'), ((10587, 10613), 'os.listdir', 'os.listdir', (['path_to_folder'], {}), '(path_to_folder)\n', (10597, 10613), False, 'import os\n'), ((27888, 28012), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s %(filename)s %(funcName)s: %(message)s"""', 'level': 'logging.DEBUG'}), "(format=\n '%(asctime)s %(levelname)s %(filename)s %(funcName)s: %(message)s',\n level=logging.DEBUG)\n", (27907, 28012), False, 'import logging\n'), ((28046, 28081), 'os.path.isdir', 'os.path.isdir', (['Locations.cache_path'], {}), '(Locations.cache_path)\n', (28059, 28081), False, 'import os\n'), ((28810, 28875), 'logging.info', 'logging.info', (['"""For each contract generate and install a library."""'], {}), "('For each contract generate and install a library.')\n", (28822, 28875), False, 'import logging\n'), ((29539, 29592), 'os.path.join', 'os.path.join', (['Locations.cache_path', '"""base_paths.json"""'], {}), "(Locations.cache_path, 'base_paths.json')\n", (29551, 29592), False, 'import os\n'), ((29720, 29768), 'os.path.join', 'os.path.join', (['Locations.cache_path', '"""flows.json"""'], {}), "(Locations.cache_path, 'flows.json')\n", (29732, 29768), False, 'import os\n'), ((29891, 29940), 'os.path.join', 'os.path.join', (['Locations.cache_path', '"""scopes.json"""'], {}), "(Locations.cache_path, 'scopes.json')\n", (29903, 29940), False, 'import os\n'), ((2663, 2714), 'logging.info', 'logging.info', (['"""Find eBay OpenAPI 3 JSON contracts."""'], {}), "('Find eBay OpenAPI 3 JSON contracts.')\n", (2675, 2714), False, 'import logging\n'), ((4233, 4300), 'os.path.join', 'os.path.join', (['Locations.cache_path', '"""sell_fulfillment_v1_oas3.json"""'], {}), "(Locations.cache_path, 'sell_fulfillment_v1_oas3.json')\n", (4245, 4300), False, 'import os\n'), ((5111, 5172), 'os.path.join', 'os.path.join', (['Locations.cache_path', '"""buy_browse_v1_oas3.json"""'], {}), "(Locations.cache_path, 'buy_browse_v1_oas3.json')\n", (5123, 5172), False, 'import os\n'), ((6048, 6090), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html_content', '"""html.parser"""'], {}), "(html_content, 'html.parser')\n", (6061, 6090), False, 'from bs4 import BeautifulSoup\n'), ((9135, 9204), 'logging.info', 'logging.info', (['"""Install or update the package manager named HomeBrew."""'], {}), "('Install or update the package manager named HomeBrew.')\n", (9147, 9204), False, 'import logging\n'), ((9213, 9331), 'os.system', 'os.system', (['"""/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)\\""""'], {}), '(\n \'/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"\'\n )\n', (9222, 9331), False, 'import os\n'), ((9334, 9382), 'os.path.isfile', 'os.path.isfile', (['"""/usr/local/bin/swagger-codegen"""'], {}), "('/usr/local/bin/swagger-codegen')\n", (9348, 9382), False, 'import os\n'), ((9693, 9769), 'logging.info', 'logging.info', (['"""Test the generator installation by invoking its help screen."""'], {}), "('Test the generator installation by invoking its help screen.')\n", (9705, 9769), False, 'import logging\n'), ((9778, 9824), 'os.system', 'os.system', (['"""/usr/local/bin/swagger-codegen -h"""'], {}), "('/usr/local/bin/swagger-codegen -h')\n", (9787, 9824), False, 'import os\n'), ((10664, 10702), 'os.path.join', 'os.path.join', (['path_to_folder', 'filename'], {}), '(path_to_folder, filename)\n', (10676, 10702), False, 'import os\n'), ((11139, 11183), 'os.path.abspath', 'os.path.abspath', (['"""../src/ebay_rest/a_p_i.py"""'], {}), "('../src/ebay_rest/a_p_i.py')\n", (11154, 11183), False, 'import os\n'), ((11210, 11241), 'os.path.abspath', 'os.path.abspath', (['"""../setup.cfg"""'], {}), "('../setup.cfg')\n", (11225, 11241), False, 'import os\n'), ((11269, 11306), 'os.path.abspath', 'os.path.abspath', (['Locations.cache_path'], {}), '(Locations.cache_path)\n', (11284, 11306), False, 'import os\n'), ((11333, 11371), 'os.path.abspath', 'os.path.abspath', (['Locations.target_path'], {}), '(Locations.target_path)\n', (11348, 11371), False, 'import os\n'), ((11402, 11437), 'os.path.abspath', 'os.path.abspath', (['"""../src/ebay_rest"""'], {}), "('../src/ebay_rest')\n", (11417, 11437), False, 'import os\n'), ((11454, 11484), 'os.path.isdir', 'os.path.isdir', (['self.path_cache'], {}), '(self.path_cache)\n', (11467, 11484), False, 'import os\n'), ((11606, 11630), 'os.walk', 'os.walk', (['self.path_cache'], {}), '(self.path_cache)\n', (11613, 11630), False, 'import os\n'), ((12315, 12342), 'os.listdir', 'os.listdir', (['self.path_final'], {}), '(self.path_final)\n', (12325, 12342), False, 'import os\n'), ((13167, 13180), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (13174, 13180), False, 'import os\n'), ((25482, 25495), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (25489, 25495), False, 'import os\n'), ((26668, 26695), 'os.path.isfile', 'os.path.isfile', (['target_file'], {}), '(target_file)\n', (26682, 26695), False, 'import os\n'), ((28220, 28250), 'os.mkdir', 'os.mkdir', (['Locations.cache_path'], {}), '(Locations.cache_path)\n', (28228, 28250), False, 'import os\n'), ((28984, 29029), 'os.path.join', 'os.path.join', (['Locations.cache_path', 'file_name'], {}), '(Locations.cache_path, file_name)\n', (28996, 29029), False, 'import os\n'), ((29501, 29519), 'os.system', 'os.system', (['command'], {}), '(command)\n', (29510, 29519), False, 'import os\n'), ((29645, 29701), 'json.dump', 'json.dump', (['base_paths', 'outfile'], {'sort_keys': '(True)', 'indent': '(4)'}), '(base_paths, outfile, sort_keys=True, indent=4)\n', (29654, 29701), False, 'import json\n'), ((29821, 29872), 'json.dump', 'json.dump', (['flows', 'outfile'], {'sort_keys': '(True)', 'indent': '(4)'}), '(flows, outfile, sort_keys=True, indent=4)\n', (29830, 29872), False, 'import json\n'), ((29993, 30045), 'json.dump', 'json.dump', (['scopes', 'outfile'], {'sort_keys': '(True)', 'indent': '(4)'}), '(scopes, outfile, sort_keys=True, indent=4)\n', (30002, 30045), False, 'import json\n'), ((2341, 2386), 'os.path.join', 'os.path.join', (['Locations.cache_path', 'file_name'], {}), '(Locations.cache_path, file_name)\n', (2353, 2386), False, 'import os\n'), ((2399, 2434), 'urllib.request.urlretrieve', 'urlretrieve', (['link_href', 'destination'], {}), '(link_href, destination)\n', (2410, 2434), False, 'from urllib.request import urlretrieve\n'), ((2754, 2775), 'urllib.parse.urljoin', 'urljoin', (['base', '"""docs"""'], {}), "(base, 'docs')\n", (2761, 2775), False, 'from urllib.parse import urljoin\n'), ((5629, 5701), 'logging.warning', 'logging.warning', (['"""Patching buy_browse_v1_oas3.json is no longer needed."""'], {}), "('Patching buy_browse_v1_oas3.json is no longer needed.')\n", (5644, 5701), False, 'import logging\n'), ((5976, 5993), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (5988, 5993), False, 'import requests\n'), ((6633, 6678), 'os.path.join', 'os.path.join', (['Locations.cache_path', 'file_name'], {}), '(Locations.cache_path, file_name)\n', (6645, 6678), False, 'import os\n'), ((9396, 9472), 'logging.info', 'logging.info', (['"""Upgrade the code generator from Swagger. https://swagger.io/"""'], {}), "('Upgrade the code generator from Swagger. https://swagger.io/')\n", (9408, 9472), False, 'import logging\n'), ((9485, 9526), 'os.system', 'os.system', (['"""brew upgrade swagger-codegen"""'], {}), "('brew upgrade swagger-codegen')\n", (9494, 9526), False, 'import os\n'), ((9553, 9629), 'logging.info', 'logging.info', (['"""Install the code generator from Swagger. https://swagger.io/"""'], {}), "('Install the code generator from Swagger. https://swagger.io/')\n", (9565, 9629), False, 'import logging\n'), ((9642, 9683), 'os.system', 'os.system', (['"""brew install swagger-codegen"""'], {}), "('brew install swagger-codegen')\n", (9651, 9683), False, 'import os\n'), ((10238, 10314), 'logging.info', 'logging.info', (['"""Test the generator installation by invoking its help screen."""'], {}), "('Test the generator installation by invoking its help screen.')\n", (10250, 10314), False, 'import logging\n'), ((10323, 10372), 'os.system', 'os.system', (['"""java -jar swagger-codegen-cli.jar -h"""'], {}), "('java -jar swagger-codegen-cli.jar -h')\n", (10332, 10372), False, 'import os\n'), ((10472, 10494), 'logging.fatal', 'logging.fatal', (['message'], {}), '(message)\n', (10485, 10494), False, 'import logging\n'), ((10503, 10520), 'sys.exit', 'sys.exit', (['message'], {}), '(message)\n', (10511, 10520), False, 'import sys\n'), ((10714, 10739), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (10728, 10739), False, 'import os\n'), ((10743, 10768), 'os.path.islink', 'os.path.islink', (['file_path'], {}), '(file_path)\n', (10757, 10768), False, 'import os\n'), ((10782, 10824), 'logging.debug', 'logging.debug', (['"""deleting file:"""', 'file_path'], {}), "('deleting file:', file_path)\n", (10795, 10824), False, 'import logging\n'), ((10837, 10857), 'os.unlink', 'os.unlink', (['file_path'], {}), '(file_path)\n', (10846, 10857), False, 'import os\n'), ((10871, 10895), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (10884, 10895), False, 'import os\n'), ((11824, 11846), 'json.load', 'json.load', (['file_handle'], {}), '(file_handle)\n', (11833, 11846), False, 'import json\n'), ((11956, 11978), 'json.load', 'json.load', (['file_handle'], {}), '(file_handle)\n', (11965, 11978), False, 'import json\n'), ((12090, 12112), 'json.load', 'json.load', (['file_handle'], {}), '(file_handle)\n', (12099, 12112), False, 'import json\n'), ((12368, 12407), 'os.path.join', 'os.path.join', (['self.path_final', 'filename'], {}), '(self.path_final, filename)\n', (12380, 12407), False, 'import os\n'), ((12423, 12447), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (12436, 12447), False, 'import os\n'), ((12581, 12622), 'os.path.join', 'os.path.join', (['self.path_cache', 'name', 'name'], {}), '(self.path_cache, name, name)\n', (12593, 12622), False, 'import os\n'), ((12641, 12676), 'os.path.join', 'os.path.join', (['self.path_final', 'name'], {}), '(self.path_final, name)\n', (12653, 12676), False, 'import os\n'), ((12704, 12729), 'shutil.copytree', 'shutil.copytree', (['src', 'dst'], {}), '(src, dst)\n', (12719, 12729), False, 'import shutil\n'), ((14748, 14795), 'os.path.join', 'os.path.join', (['self.path_cache', 'name', '"""setup.py"""'], {}), "(self.path_cache, name, 'setup.py')\n", (14760, 14795), False, 'import os\n'), ((16480, 16528), 'os.path.join', 'os.path.join', (['self.path_cache', 'name', 'name', '"""api"""'], {}), "(self.path_cache, name, name, 'api')\n", (16492, 16528), False, 'import os\n'), ((16569, 16582), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (16576, 16582), False, 'import os\n'), ((27742, 27784), 'logging.error', 'logging.error', (['f"""Can\'t find {target_file}"""'], {}), '(f"Can\'t find {target_file}")\n', (27755, 27784), False, 'import logging\n'), ((1348, 1370), 'json.load', 'json.load', (['file_handle'], {}), '(file_handle)\n', (1357, 1370), False, 'import json\n'), ((1764, 1826), 'json.dump', 'json.dump', (['self._states', 'file_handle'], {'sort_keys': '(True)', 'indent': '(4)'}), '(self._states, file_handle, sort_keys=True, indent=4)\n', (1773, 1826), False, 'import json\n'), ((1932, 1954), 'logging.fatal', 'logging.fatal', (['message'], {}), '(message)\n', (1945, 1954), False, 'import logging\n'), ((1967, 1984), 'sys.exit', 'sys.exit', (['message'], {}), '(message)\n', (1975, 1984), False, 'import sys\n'), ((4390, 4412), 'json.load', 'json.load', (['file_handle'], {}), '(file_handle)\n', (4399, 4412), False, 'import json\n'), ((4963, 5008), 'logging.error', 'logging.error', (['f"""Can\'t open {file_location}."""'], {}), '(f"Can\'t open {file_location}.")\n', (4976, 5008), False, 'import logging\n'), ((5545, 5590), 'logging.error', 'logging.error', (['f"""Can\'t open {file_location}."""'], {}), '(f"Can\'t open {file_location}.")\n', (5558, 5590), False, 'import logging\n'), ((9947, 9988), 'os.path.isfile', 'os.path.isfile', (['"""swagger-codegen-cli.jar"""'], {}), "('swagger-codegen-cli.jar')\n", (9961, 9988), False, 'import os\n'), ((10002, 10175), 'os.system', 'os.system', (["('wget https://repo1.maven.org/maven2/io/swagger/codegen/v3/' +\n 'swagger-codegen-cli/3.0.26/swagger-codegen-cli-3.0.26.jar ' +\n '-O swagger-codegen-cli.jar')"], {}), "('wget https://repo1.maven.org/maven2/io/swagger/codegen/v3/' +\n 'swagger-codegen-cli/3.0.26/swagger-codegen-cli-3.0.26.jar ' +\n '-O swagger-codegen-cli.jar')\n", (10011, 10175), False, 'import os\n'), ((10909, 10953), 'logging.debug', 'logging.debug', (['"""deleting folder:"""', 'file_path'], {}), "('deleting folder:', file_path)\n", (10922, 10953), False, 'import logging\n'), ((10966, 10990), 'shutil.rmtree', 'shutil.rmtree', (['file_path'], {}), '(file_path)\n', (10979, 10990), False, 'import shutil\n'), ((11723, 11776), 'os.path.join', 'os.path.join', (['Locations.cache_path', '"""base_paths.json"""'], {}), "(Locations.cache_path, 'base_paths.json')\n", (11735, 11776), False, 'import os\n'), ((11865, 11913), 'os.path.join', 'os.path.join', (['Locations.cache_path', '"""flows.json"""'], {}), "(Locations.cache_path, 'flows.json')\n", (11877, 11913), False, 'import os\n'), ((11997, 12046), 'os.path.join', 'os.path.join', (['Locations.cache_path', '"""scopes.json"""'], {}), "(Locations.cache_path, 'scopes.json')\n", (12009, 12046), False, 'import os\n'), ((12465, 12489), 'shutil.rmtree', 'shutil.rmtree', (['file_path'], {}), '(file_path)\n', (12478, 12489), False, 'import shutil\n'), ((12952, 12987), 'os.path.join', 'os.path.join', (['self.path_final', 'name'], {}), '(self.path_final, name)\n', (12964, 12987), False, 'import os\n'), ((13642, 13666), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (13654, 13666), False, 'import os\n'), ((21992, 22016), 'logging.warning', 'logging.warning', (['message'], {}), '(message)\n', (22007, 22016), False, 'import logging\n'), ((22033, 22068), 'logging.warning', 'logging.warning', (['"""method: """', 'method'], {}), "('method: ', method)\n", (22048, 22068), False, 'import logging\n'), ((22085, 22120), 'logging.warning', 'logging.warning', (['"""scopes: """', 'scopes'], {}), "('scopes: ', scopes)\n", (22100, 22120), False, 'import logging\n'), ((22137, 22170), 'logging.warning', 'logging.warning', (['"""flows: """', 'flows'], {}), "('flows: ', flows)\n", (22152, 22170), False, 'import logging\n'), ((27628, 27720), 'logging.error', 'logging.error', (['f"""Can\'t find proper start or end anchors for {anchor} in {target_file}."""'], {}), '(\n f"Can\'t find proper start or end anchors for {anchor} in {target_file}.")\n', (27641, 27720), False, 'import logging\n'), ((28540, 28554), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (28552, 28554), False, 'from datetime import datetime, timedelta\n'), ((28557, 28574), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (28566, 28574), False, 'from datetime import datetime, timedelta\n'), ((3709, 3729), 'logging.info', 'logging.info', (['record'], {}), '(record)\n', (3721, 3729), False, 'import logging\n'), ((4838, 4916), 'logging.warning', 'logging.warning', (['"""Patching sell_fulfillment_v1_oas3.json is no longer needed."""'], {}), "('Patching sell_fulfillment_v1_oas3.json is no longer needed.')\n", (4853, 4916), False, 'import logging\n'), ((6773, 6795), 'json.load', 'json.load', (['file_handle'], {}), '(file_handle)\n', (6782, 6795), False, 'import json\n'), ((14355, 14384), 'os.path.join', 'os.path.join', (['path', 'directory'], {}), '(path, directory)\n', (14367, 14384), False, 'import os\n'), ((24589, 24624), 'os.path.join', 'os.path.join', (['self.path_final', 'name'], {}), '(self.path_final, name)\n', (24601, 24624), False, 'import os\n'), ((25629, 25653), 'os.path.join', 'os.path.join', (['path', 'file'], {}), '(path, file)\n', (25641, 25653), False, 'import os\n'), ((28621, 28635), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (28633, 28635), False, 'from datetime import datetime, timedelta\n'), ((4745, 4795), 'json.dump', 'json.dump', (['data', 'outfile'], {'sort_keys': '(True)', 'indent': '(4)'}), '(data, outfile, sort_keys=True, indent=4)\n', (4754, 4795), False, 'import json\n'), ((6909, 6931), 'logging.fatal', 'logging.fatal', (['message'], {}), '(message)\n', (6922, 6931), False, 'import logging\n'), ((7006, 7023), 'sys.exit', 'sys.exit', (['message'], {}), '(message)\n', (7014, 7023), False, 'import sys\n'), ((8475, 8514), 'logging.warning', 'logging.warning', (['"""Duplicate operation!"""'], {}), "('Duplicate operation!')\n", (8490, 8514), False, 'import logging\n'), ((8539, 8574), 'logging.warning', 'logging.warning', (['path', 'path_methods'], {}), '(path, path_methods)\n', (8554, 8574), False, 'import logging\n'), ((8599, 8635), 'logging.warning', 'logging.warning', (['method', 'method_dict'], {}), '(method, method_dict)\n', (8614, 8635), False, 'import logging\n'), ((25895, 25911), 'hashlib.sha256', 'hashlib.sha256', ([], {}), '()\n', (25909, 25911), False, 'import hashlib\n'), ((26158, 26187), 'os.path.join', 'os.path.join', (['path', 'directory'], {}), '(path, directory)\n', (26170, 26187), False, 'import os\n'), ((16736, 16760), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (16748, 16760), False, 'import os\n')] |
import datetime
def choice(true, false):
'''Create a choice type between a false or true value.'''
def parse(value):
value = value.lower()
if value == true:
return True
if value == false:
return False
message = 'Unknown value format, expected \'{0}\' or \'{1}\'.'
message = messageformat(true, false)
raise ValueError(message)
return parse
truefalse = choice('true', 'false')
yesno = choice('y', 'n')
def split_date(value):
return map(int, value.split('/'))
def date(value):
'''Convert a protocol date to `datetime.date`.'''
day, month, year = split_date(value)
return datetime.date(year, month, day)
def short_date(value):
'''Convert a protocol short date to `datetime.date`.'''
month, year = split_date(value)
return datetime.date(2000 + year, month, 1)
class Money:
def __init__(self, value):
'''Parse a money protocol value. '''
self.currency = value[0]
self.amount = float(value[1:])
def __str__(self):
return self.currency + str(self.amount)
| [
"datetime.date"
] | [((682, 713), 'datetime.date', 'datetime.date', (['year', 'month', 'day'], {}), '(year, month, day)\n', (695, 713), False, 'import datetime\n'), ((846, 882), 'datetime.date', 'datetime.date', (['(2000 + year)', 'month', '(1)'], {}), '(2000 + year, month, 1)\n', (859, 882), False, 'import datetime\n')] |
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
::
ipython -i $(which GScintillatorLib.py)
"""
from opticks.ana.nload import np_load
import matplotlib.pyplot as plt
if __name__ == '__main__':
#f = np_load("$IDPATH/GScintillatorLib/LiquidScintillator/FASTCOMPONENT.npy")
#print f
#plt.plot( f[:,0], f[:,1] )
#plt.show()
aa = np_load("$IDPATH/GScintillatorLib/GScintillatorLib.npy")
assert aa.shape == (2, 4096, 1)
assert np.all( aa[0] == aa[1] )
a = aa[0,:,0]
assert a.shape == (4096,)
b = np.linspace(0,1,len(a))
fig = plt.figure()
plt.title("Inverted Cumulative Distribution Function : for Scintillator Reemission " )
ax = fig.add_subplot(1,1,1)
#ax.plot( a, b )
ax.plot( b, a )
ax.set_ylabel("Wavelength (nm)")
ax.set_xlabel("Probability")
fig.show()
| [
"opticks.ana.nload.np_load",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure"
] | [((1037, 1093), 'opticks.ana.nload.np_load', 'np_load', (['"""$IDPATH/GScintillatorLib/GScintillatorLib.npy"""'], {}), "('$IDPATH/GScintillatorLib/GScintillatorLib.npy')\n", (1044, 1093), False, 'from opticks.ana.nload import np_load\n'), ((1261, 1273), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1271, 1273), True, 'import matplotlib.pyplot as plt\n'), ((1279, 1369), 'matplotlib.pyplot.title', 'plt.title', (['"""Inverted Cumulative Distribution Function : for Scintillator Reemission """'], {}), "(\n 'Inverted Cumulative Distribution Function : for Scintillator Reemission ')\n", (1288, 1369), True, 'import matplotlib.pyplot as plt\n')] |
# USAGE
"""
python detect_features.py \
--input_path "../data/celeba/faces/" \
--output_path "./" \
--save_video False
"""
import click
import numpy as np
import cv2
import imutils
from pathlib import Path
from fastai.vision.data import ImageItemList
from fastai.vision.learner import create_cnn
from fastai.vision import models
from fastai.vision.image import pil2tensor
@click.command()
@click.option(
"--input_path",
"-ip",
default="./",
required=True,
help="Path to model and labels file",
)
@click.option(
"--output_path", "-op", default="./", required=True, help="Path to the output video"
)
@click.option("--save_video", "-s", default=True, type=bool, help="Want to save video?")
def detect_facial_attributes(input_path, output_path, save_video):
path = Path(input_path)
# Creating a databunch
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
data = (
ImageItemList.from_csv(path, csv_name="labels.csv")
.split_by_idx([])
.label_from_df(sep=" ")
.transform(None, size=128)
.databunch(no_check=True)
.normalize(imagenet_stats)
)
# Loading our model
learn = create_cnn(data, models.resnet50, pretrained=False)
learn.load("ff_stage-2-rn50")
# Loading HAAR cascade
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
cap = cv2.VideoCapture(0)
if save_video:
out = cv2.VideoWriter(output_path + "output.avi", -1, 20.0, (640, 480))
while True:
# Capture frame-by-frame
_ , frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Find faces using Haar cascade
face_coord = face_cascade.detectMultiScale(gray, 1.1, 5, minSize=(30, 30))
## Looping through each face
for coords in face_coord:
## Finding co-ordinates of face
X, Y, w, h = coords
## Finding frame size
H, W, _ = frame.shape
## Computing larger face co-ordinates
X_1, X_2 = (max(0, X - int(w * 0.35)), min(X + int(1.35 * w), W))
Y_1, Y_2 = (max(0, Y - int(0.35 * h)), min(Y + int(1.35 * h), H))
## Cropping face and changing BGR To RGB
img_cp = frame[Y_1:Y_2, X_1:X_2].copy()
img_cp1 = cv2.cvtColor(img_cp, cv2.COLOR_BGR2RGB)
## Prediction of facial featues
prediction = str(
learn.predict(pil2tensor(img_cp1, np.float32).div_(255))[0]
).split(";")
label = (
" ".join(prediction)
if "Male" in prediction
else "Female " + " ".join(prediction)
)
label = (
" ".join(prediction)
if "No_Beard" in prediction
else "Beard " + " ".join(prediction)
)
## Drawing facial boundaries
cv2.rectangle(
img=frame,
pt1=(X, Y),
pt2=(X + w, Y + h),
color=(128, 128, 0),
thickness=2,
)
## Drawing facial attributes identified
label_list = label.split(" ")
for idx in range(1, len(label_list) + 1):
cv2.putText(
frame,
label_list[idx - 1],
(X, Y - 14 * idx),
cv2.FONT_HERSHEY_SIMPLEX,
0.45,
(0, 128, 0),
2,
)
# Display the resulting frame
cv2.imshow("frame", frame)
## Save the resulting frame
if save_video:
out.write(frame)
## Escape keys
if cv2.waitKey(1) & 0xFF == ord("q"):
break
# When everything done, release the capture
cap.release()
if save_video:
out.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
detect_facial_attributes()
| [
"cv2.rectangle",
"fastai.vision.data.ImageItemList.from_csv",
"pathlib.Path",
"click.option",
"fastai.vision.learner.create_cnn",
"fastai.vision.image.pil2tensor",
"cv2.VideoWriter",
"cv2.imshow",
"cv2.putText",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.CascadeClassifier",
"click.command",
"cv2.waitKey"
] | [((375, 390), 'click.command', 'click.command', ([], {}), '()\n', (388, 390), False, 'import click\n'), ((392, 499), 'click.option', 'click.option', (['"""--input_path"""', '"""-ip"""'], {'default': '"""./"""', 'required': '(True)', 'help': '"""Path to model and labels file"""'}), "('--input_path', '-ip', default='./', required=True, help=\n 'Path to model and labels file')\n", (404, 499), False, 'import click\n'), ((519, 622), 'click.option', 'click.option', (['"""--output_path"""', '"""-op"""'], {'default': '"""./"""', 'required': '(True)', 'help': '"""Path to the output video"""'}), "('--output_path', '-op', default='./', required=True, help=\n 'Path to the output video')\n", (531, 622), False, 'import click\n'), ((625, 717), 'click.option', 'click.option', (['"""--save_video"""', '"""-s"""'], {'default': '(True)', 'type': 'bool', 'help': '"""Want to save video?"""'}), "('--save_video', '-s', default=True, type=bool, help=\n 'Want to save video?')\n", (637, 717), False, 'import click\n'), ((791, 807), 'pathlib.Path', 'Path', (['input_path'], {}), '(input_path)\n', (795, 807), False, 'from pathlib import Path\n'), ((1182, 1233), 'fastai.vision.learner.create_cnn', 'create_cnn', (['data', 'models.resnet50'], {'pretrained': '(False)'}), '(data, models.resnet50, pretrained=False)\n', (1192, 1233), False, 'from fastai.vision.learner import create_cnn\n'), ((1315, 1375), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_default.xml"""'], {}), "('haarcascade_frontalface_default.xml')\n", (1336, 1375), False, 'import cv2\n'), ((1387, 1406), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1403, 1406), False, 'import cv2\n'), ((3954, 3977), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3975, 3977), False, 'import cv2\n'), ((1440, 1505), 'cv2.VideoWriter', 'cv2.VideoWriter', (["(output_path + 'output.avi')", '(-1)', '(20.0)', '(640, 480)'], {}), "(output_path + 'output.avi', -1, 20.0, (640, 480))\n", (1455, 1505), False, 'import cv2\n'), ((1651, 1690), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (1663, 1690), False, 'import cv2\n'), ((3638, 3664), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (3648, 3664), False, 'import cv2\n'), ((2367, 2406), 'cv2.cvtColor', 'cv2.cvtColor', (['img_cp', 'cv2.COLOR_BGR2RGB'], {}), '(img_cp, cv2.COLOR_BGR2RGB)\n', (2379, 2406), False, 'import cv2\n'), ((2974, 3069), 'cv2.rectangle', 'cv2.rectangle', ([], {'img': 'frame', 'pt1': '(X, Y)', 'pt2': '(X + w, Y + h)', 'color': '(128, 128, 0)', 'thickness': '(2)'}), '(img=frame, pt1=(X, Y), pt2=(X + w, Y + h), color=(128, 128, 0\n ), thickness=2)\n', (2987, 3069), False, 'import cv2\n'), ((3325, 3436), 'cv2.putText', 'cv2.putText', (['frame', 'label_list[idx - 1]', '(X, Y - 14 * idx)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.45)', '(0, 128, 0)', '(2)'], {}), '(frame, label_list[idx - 1], (X, Y - 14 * idx), cv2.\n FONT_HERSHEY_SIMPLEX, 0.45, (0, 128, 0), 2)\n', (3336, 3436), False, 'import cv2\n'), ((3789, 3803), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (3800, 3803), False, 'import cv2\n'), ((2512, 2543), 'fastai.vision.image.pil2tensor', 'pil2tensor', (['img_cp1', 'np.float32'], {}), '(img_cp1, np.float32)\n', (2522, 2543), False, 'from fastai.vision.image import pil2tensor\n'), ((925, 976), 'fastai.vision.data.ImageItemList.from_csv', 'ImageItemList.from_csv', (['path'], {'csv_name': '"""labels.csv"""'}), "(path, csv_name='labels.csv')\n", (947, 976), False, 'from fastai.vision.data import ImageItemList\n')] |
import json
import matplotlib.pyplot as plt
import numpy as np
from test_rankings import ignore_targets
from matplotlib.font_manager import FontProperties
def plot_target_fraction(target, fraction):
# plots information recovery for each target
plt.plot(range(len(fraction)), fraction, label=target)
plt.xlabel('library size')
plt.ylabel('information recovered')
plt.ylim(0,1)
plt.tight_layout()
plt.savefig('figures/fraction_test_atomic.png')
def mean_across_targets(fraction):
# calculates the mean information recovered across all targets for a particular library type
mean_fractions = []
fraction_lengths = [len(i) for i in fraction]
for i in range(min(fraction_lengths)):
mean_fractions.append(sum([x[i] for x in fraction])/len(fraction))
return mean_fractions
def target_improvement(target_fraction_list, random_fractions_target_list, lib_size):
# calculates improvement in information recovered between ranked library and random library (mean of many runs)
improvement = []
for i in range(len(target_fraction_list)):
target_fraction = target_fraction_list[i]
random_fractions_target = random_fractions_target_list[i]
assert len(target_fraction) == len(random_fractions_target)
if random_fractions_target[lib_size] > 0:
imp = (target_fraction[lib_size]-random_fractions_target[lib_size])/random_fractions_target[lib_size]
else:
imp = 0
improvement.append(imp)
return improvement
def plot_fraction_and_std(mean_fractions, mean_stds, label, color):
plt.plot(range(len(mean_fractions)), mean_fractions, label=label)
plt.fill_between(range(len(mean_fractions)),
[mean_fractions[i]-mean_stds[i] for i in range(len(mean_fractions))],
[mean_fractions[i]+mean_stds[i] for i in range(len(mean_fractions))],
color=color, alpha=0.1)
def plot_fractions(ranked_fractions, ranked_stds, random_fractions, random_stds, diverse_fractions, diverse_stds):
mean_ranked_fractions = mean_across_targets(ranked_fractions)
mean_ranked_stds = mean_across_targets(ranked_stds)
mean_random_fractions = mean_across_targets(random_fractions)
mean_random_stds = mean_across_targets(random_stds)
mean_diverse_fractions = mean_across_targets(diverse_fractions)
mean_diverse_stds = mean_across_targets(diverse_stds)
plt.figure(figsize=(12, 6))
plt.subplot(121)
for i in ranked_fractions:
plot_target_fraction('target', i)
plt.subplot(122)
plot_fraction_and_std(mean_ranked_fractions, mean_ranked_stds, 'ranked compounds', '#1f77b4')
plot_fraction_and_std(mean_random_fractions, mean_random_stds, 'random compounds', '#ff7f0e')
plot_fraction_and_std(mean_diverse_fractions, mean_diverse_stds, 'diverse compounds', '#2ca02c')
plt.ylim(0, 1)
plt.legend()
plt.xlabel('library size')
plt.ylabel('mean information recovered across all new targets')
plt.tight_layout()
plt.savefig('figures/fraction_test_atomic.png')
ranked_fractions = json.load(open('data/outputs/ranked_fractions_atomic.json', 'r'))
ranked_stds = json.load(open('data/outputs/ranked_stds_atomic.json', 'r'))
random_fractions = json.load(open('data/outputs/random_fractions_atomic.json', 'r'))
random_stds = json.load(open('data/outputs/random_stds_atomic.json', 'r'))
diverse_fractions = json.load(open('data/outputs/diverse_fractions_atomic.json', 'r'))
diverse_stds = json.load(open('data/outputs/diverse_stds_atomic.json', 'r'))
frequent_comps = json.load(open('data/datafiles/frequently_tested_compounds.json', 'r'))
target_screens = json.load(open('data/datafiles/target_full_screens.json', 'r'))
smiles_bits = json.load(open('data/datafiles/smiles_bits_atomic.json', 'r'))
smiles_bits = ignore_targets(smiles_bits, frequent_comps, target_screens, 500)
plot_fractions(ranked_fractions, ranked_stds, random_fractions, random_stds, diverse_fractions, diverse_stds)
lib_size = 100
bar_rank = [i[lib_size] for i in ranked_fractions]
error_rank = [i[lib_size] for i in ranked_stds]
bar_random = [i[lib_size] for i in random_fractions]
error_random = [i[lib_size] for i in random_stds]
bar_diverse = [i[lib_size] for i in diverse_fractions]
error_diverse = [i[lib_size] for i in diverse_stds]
plt.close()
plt.figure(figsize=(10, 6))
fig, ax = plt.subplots()
pos = np.arange(len(smiles_bits))
ax.bar(pos-0.25, bar_rank, yerr=error_rank, width=0.25, label='top-ranked compounds', alpha=0.7)
ax.bar(pos, bar_random, yerr=error_random, width=0.25,label='random compounds', alpha=0.7)
ax.bar(pos+0.25, bar_diverse, yerr=error_diverse, width=0.25,label='diverse compounds', alpha=0.7)
ax.set_xticks(pos)
ax.set_xticklabels([t for t in smiles_bits])
plt.xticks(rotation=90)
plt.ylabel(f'information recovered with library sizes of {lib_size}')
plt.xlabel('target')
plt.ylim(0, 0.65)
fontP = FontProperties()
fontP.set_size('x-small')
plt.legend(prop=fontP)
plt.tight_layout()
plt.savefig('figures/results_bar_atomic.png')
improvement_random = target_improvement(ranked_fractions, random_fractions, lib_size)
improvement_diverse = target_improvement(ranked_fractions, diverse_fractions, lib_size)
plt.close()
plt.figure(figsize=(10, 8))
fig, ax = plt.subplots()
pos = np.arange(len(smiles_bits))
ax.barh(pos+0.15, improvement_random, height=0.25, label='vs random runs', color= '#ff7f0e', alpha=0.7)
ax.barh(pos-0.15, improvement_diverse, height=0.25, label='vs MACCS-diverse libraries', color='#2ca02c', alpha=0.7)
ax.set_yticks(pos)
ax.set_yticklabels([t for t in smiles_bits])
plt.xlabel(f'mean factor of information improvement in DSiP-diverse {lib_size}')
plt.ylabel('target')
plt.legend(prop=fontP)
plt.tight_layout()
plt.savefig('figures/improvement_by_target_atomic.png') | [
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.font_manager.FontProperties",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ylim",
"test_rankings.ignore_targets",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend"
] | [((3860, 3924), 'test_rankings.ignore_targets', 'ignore_targets', (['smiles_bits', 'frequent_comps', 'target_screens', '(500)'], {}), '(smiles_bits, frequent_comps, target_screens, 500)\n', (3874, 3924), False, 'from test_rankings import ignore_targets\n'), ((4364, 4375), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4373, 4375), True, 'import matplotlib.pyplot as plt\n'), ((4376, 4403), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (4386, 4403), True, 'import matplotlib.pyplot as plt\n'), ((4414, 4428), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4426, 4428), True, 'import matplotlib.pyplot as plt\n'), ((4814, 4837), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(90)'}), '(rotation=90)\n', (4824, 4837), True, 'import matplotlib.pyplot as plt\n'), ((4838, 4907), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""information recovered with library sizes of {lib_size}"""'], {}), "(f'information recovered with library sizes of {lib_size}')\n", (4848, 4907), True, 'import matplotlib.pyplot as plt\n'), ((4908, 4928), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""target"""'], {}), "('target')\n", (4918, 4928), True, 'import matplotlib.pyplot as plt\n'), ((4929, 4946), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(0.65)'], {}), '(0, 0.65)\n', (4937, 4946), True, 'import matplotlib.pyplot as plt\n'), ((4955, 4971), 'matplotlib.font_manager.FontProperties', 'FontProperties', ([], {}), '()\n', (4969, 4971), False, 'from matplotlib.font_manager import FontProperties\n'), ((4998, 5020), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': 'fontP'}), '(prop=fontP)\n', (5008, 5020), True, 'import matplotlib.pyplot as plt\n'), ((5021, 5039), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5037, 5039), True, 'import matplotlib.pyplot as plt\n'), ((5040, 5085), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/results_bar_atomic.png"""'], {}), "('figures/results_bar_atomic.png')\n", (5051, 5085), True, 'import matplotlib.pyplot as plt\n'), ((5262, 5273), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5271, 5273), True, 'import matplotlib.pyplot as plt\n'), ((5274, 5301), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (5284, 5301), True, 'import matplotlib.pyplot as plt\n'), ((5312, 5326), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5324, 5326), True, 'import matplotlib.pyplot as plt\n'), ((5645, 5730), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['f"""mean factor of information improvement in DSiP-diverse {lib_size}"""'], {}), "(f'mean factor of information improvement in DSiP-diverse {lib_size}'\n )\n", (5655, 5730), True, 'import matplotlib.pyplot as plt\n'), ((5726, 5746), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""target"""'], {}), "('target')\n", (5736, 5746), True, 'import matplotlib.pyplot as plt\n'), ((5747, 5769), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'prop': 'fontP'}), '(prop=fontP)\n', (5757, 5769), True, 'import matplotlib.pyplot as plt\n'), ((5770, 5788), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5786, 5788), True, 'import matplotlib.pyplot as plt\n'), ((5789, 5844), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/improvement_by_target_atomic.png"""'], {}), "('figures/improvement_by_target_atomic.png')\n", (5800, 5844), True, 'import matplotlib.pyplot as plt\n'), ((314, 340), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""library size"""'], {}), "('library size')\n", (324, 340), True, 'import matplotlib.pyplot as plt\n'), ((345, 380), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""information recovered"""'], {}), "('information recovered')\n", (355, 380), True, 'import matplotlib.pyplot as plt\n'), ((385, 399), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (393, 399), True, 'import matplotlib.pyplot as plt\n'), ((403, 421), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (419, 421), True, 'import matplotlib.pyplot as plt\n'), ((426, 473), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/fraction_test_atomic.png"""'], {}), "('figures/fraction_test_atomic.png')\n", (437, 473), True, 'import matplotlib.pyplot as plt\n'), ((2457, 2484), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (2467, 2484), True, 'import matplotlib.pyplot as plt\n'), ((2489, 2505), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (2500, 2505), True, 'import matplotlib.pyplot as plt\n'), ((2584, 2600), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (2595, 2600), True, 'import matplotlib.pyplot as plt\n'), ((2904, 2918), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (2912, 2918), True, 'import matplotlib.pyplot as plt\n'), ((2923, 2935), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2933, 2935), True, 'import matplotlib.pyplot as plt\n'), ((2940, 2966), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""library size"""'], {}), "('library size')\n", (2950, 2966), True, 'import matplotlib.pyplot as plt\n'), ((2971, 3034), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""mean information recovered across all new targets"""'], {}), "('mean information recovered across all new targets')\n", (2981, 3034), True, 'import matplotlib.pyplot as plt\n'), ((3039, 3057), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3055, 3057), True, 'import matplotlib.pyplot as plt\n'), ((3062, 3109), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figures/fraction_test_atomic.png"""'], {}), "('figures/fraction_test_atomic.png')\n", (3073, 3109), True, 'import matplotlib.pyplot as plt\n')] |
import hashlib
import os
import time
import datetime
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
import logging
import tkinter as tk
import threading
import sys
from threading import Thread
from tkinter.scrolledtext import ScrolledText
from win10toast import ToastNotifier
import smtplib
from pathlib import Path
# GLOBALS
configDict = dict()
filesAndHashes = dict()
newFilesAndHashes = dict()
badIntegrity = list()
graphDate = list()
cantidadDeArchivos = [0, 1000]
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
interval = 0
running = bool()
window = tk.Tk()
entry = ScrolledText(window, width=80, height=20)
logBox = ScrolledText(window, width=80, height=20)
toaster = ToastNotifier()
def folderHash(pathName):
""" Params: ruta """
""" Return: devuelve un diccionario formato por la ruta y el hash: key=ruta, value=hash """
""" Se le pasa una ruta y viaja por todos los archivos y las subrutas de dicha ruta y calcula los hashes
de cada uno de los archivos encontrados """
fileAndHash = dict()
for root, dirs, files in os.walk(pathName):
for file in files:
with open(os.path.join(root, file), "rb") as fileRaw:
if(configDict["Selected Hash mode"].lower() == "sha3_256"):
fileAndHash[os.path.join(root, file).replace("\\", "/")] = hashlib.sha3_256(
fileRaw.read()).hexdigest()
elif(configDict["Selected Hash mode"].lower() == "sha3_384"):
fileAndHash[os.path.join(root, file).replace("\\", "/")] = hashlib.sha3_384(
fileRaw.read()).hexdigest()
elif(configDict["Selected Hash mode"].lower() == "sha3_512"):
fileAndHash[os.path.join(root, file).replace("\\", "/")] = hashlib.sha3_512(
fileRaw.read()).hexdigest()
elif(configDict["Selected Hash mode"].lower() == "md5"):
fileAndHash[os.path.join(root, file).replace("\\", "/")] = hashlib.md5(
fileRaw.read()).hexdigest()
return fileAndHash
def readLogFile():
text = str()
if (os.path.exists(os.path.join('c:/top_secret', 'log.log'))):
with open(os.path.join('c:/top_secret', 'log.log')) as reader:
text = reader.read()
else:
f = open(os.path.join('C:\\top_secret', 'log.log'), "x")
return text
def logBoxContainer():
logBox.delete("1.0", tk.END)
text = readLogFile()
logBox.insert(tk.INSERT, text)
logBox.insert(tk.END, "")
def importConfig():
""" Params: NONE """
""" Return: NONE """
""" Crea un archivo de configuración si no lo hay con las opciones de la plantilla de 'configs'
y en caso de que ya exista (que sería siempre menos la primera vez que se ejecute el script)
carga la configuración de dicho archivo y la importa al diccionario del script llamado 'configDict',
mediante este diccionario vamos a poder manejar dichas opciones indicadas en el archivo de configuración"""
path = os.path.abspath('.').split(os.path.sep)[
0]+os.path.sep+"top_secret\config.config"
if (os.path.exists(path)):
try:
with open(path, "r") as config:
for line in config:
if "#" not in line:
confSplitted = line.split("=")
configDict[confSplitted[0].strip(
)] = confSplitted[1].strip()
entry.insert(tk.INSERT, confSplitted[0].strip(
) + "=" + confSplitted[1].strip() + "\n")
else:
entry.insert(tk.INSERT, line)
entry.insert(tk.END, "")
logging.info("La configuración se ha importado correctamente!")
# entry.insert(tk.END, " in ScrolledText")
# print(configDict)
except:
logging.error("Error al importar la configuración!")
else:
configs = ["\nSelected Hash mode=\n",
"Directories to protect=\n", "Verify interval=\n", "email=\n", "smtpPass=\n", "toEmail=\n"]
try:
with open(os.path.abspath('.').split(os.path.sep)[0]+os.path.sep+"top_secret\config.config", "w") as file:
file.write(
"# Agregar los directorios a proteger, separados por una coma\n# Intervalo de tiempo entre examenes en minutos\n# Guardar la configuracion antes de iniciar el examen")
for config in configs:
file.write(config)
logging.info("Archivo de configuración creado satisfactoriamente!")
except:
logging.error(
"Error al crear el archivo de configuración, problema con los permisos?")
importConfig()
def exportConfig():
""" Params: NONE """
""" Return: NONE """
""" Escribe en el archivo 'C:\top_secret\config.config' las configuraciones reflejadas en la caja de texto del script """
with open(os.path.abspath('.').split(os.path.sep)[0]+os.path.sep+"top_secret\config.config", "w") as config:
config.write(entry.get("1.0", tk.END))
def exportHashedFiles():
""" Params: NONE """
""" Return: NONE """
""" Comprueba las rutas que hemos indicado en el archivo de configuración y carga todos los archivos de cada una
de ellas gracias a la función anterior 'folderHash', una vez hecho esto crea un archivo 'hashes.hash' si no lo hay y escribe
en el todas las rutas junto a su hash, separadas mediante un simbolo '=' """
# TIME
begin_time = datetime.datetime.now()
splittedPathsToHash = configDict["Directories to protect"].split(
",") # para ser mejor, hacer strip con un for para cada elemento por si acaso
for path in splittedPathsToHash:
filesAndHashes.update(folderHash(path))
with open(os.path.abspath('.').split(os.path.sep)[0]+os.path.sep+"top_secret\hashes.hash", "w") as writer:
for key, value in filesAndHashes.items():
writer.write(key + "=" + value + "\n")
end = datetime.datetime.now() - begin_time
strr = "Hashes exportados correctamente en: " + str(end)
logging.info(strr)
def importHashedFiles():
""" Params: NONE """
""" Return: NONE """
""" Lee el archivo 'C:\top_secret\hashes.hash' y carga cada una de las entradas en el diccionario 'newFilesAndHashes' presente en el script """
try:
with open(os.path.abspath('.').split(os.path.sep)[0]+os.path.sep+"top_secret\hashes.hash", "r") as reader:
line = reader.readline()
while line:
splittedLineList = line.split("=")
newFilesAndHashes[splittedLineList[0].replace(
"\n", "")] = splittedLineList[1].replace("\n", "")
line = reader.readline()
logging.info("Hashes importados correctamente!")
except:
logging.error("Error al importar los hashes!")
# print(newFilesAndHashes)
def calculateHashedFiles():
""" Params: NONE """
""" Return: NONE """
""" Calcula los hashes de los archivos nuevamente, y reutilizamos el diccionario creado al principio 'filesAndHashes' esto servirá
para comparar los items de este diccionario con los del 'newFilesAndHashes'. """
logging.info("Calculando los hashes de los archivos...")
splittedPathsToHash = configDict["Directories to protect"].split(
",") # para ser mejor, hacer strip con un for para cada elemento por si acaso
for path in splittedPathsToHash:
filesAndHashes.update(folderHash(path))
strr = "Hashes calculados satisfactoriamente!"
def compareHashes():
""" Params: NONE """
""" Return: NONE """
""" Compara los dos diccionarios, uno contiene los hashes cargados del archivo hashes.hash y el otro contiene los hashes recien calculados,
tras dicha comparación los resultados saldran por consola """
numberOfFilesOK = int()
numberOfFilesNoOk = int()
listOfNoMatches = list()
for key, value in filesAndHashes.items():
if newFilesAndHashes[key] == value:
numberOfFilesOK += 1
else:
numberOfFilesNoOk += 1
cadena = "DIR: " + str(key) + " ¡Los hashes no coinciden!"
listOfNoMatches.append(cadena)
badIntegrity.append(numberOfFilesNoOk)
graphDate.append(datetime.datetime.now().strftime("%M"))
str1 = "Número de archivos OK: " + str(numberOfFilesOK)
str2 = "Número de archivos MODIFICADOS: " + str(numberOfFilesNoOk)
logging.info(str1)
logging.info(str2)
if(listOfNoMatches):
str3 = "Archivos con integridad comprometida: "
noMatchesToPrint = list()
for entry in listOfNoMatches:
noMatchesToPrint.append(" "+entry)
logging.warning(str3 + "\n" + '\n'.join(noMatchesToPrint))
toaster.show_toast(
"HIDS", "Hay un problema integridad. Revisar LOG.", duration=interval, threaded=True)
sendEmail(str3 + "\n" + '\n'.join(noMatchesToPrint))
else:
toaster.show_toast(
"HIDS", "Examen finalizado. Se mantiene la integridad.", duration=interval, threaded=True)
def graph():
""" Params: NONE """
""" Return: NONE """
""" Muestra una gráfica en el navegador en base a los datos de las dos listas 'badIntegrity' y 'graphDate' """
layout_title = "Evolución de la integridad de los archivos fecha: " + \
str(datetime.datetime.now().strftime("%d-%m-%Y"))
df = pd.DataFrame(dict(
x=graphDate,
y=badIntegrity
))
fig = px.bar(df,
x='x', y='y',
color_discrete_sequence=[
'red']*3,
title=layout_title,
labels={'x': 'Hora', 'y': 'Numero de fallos de integridad'})
fig.show()
def run():
""" Params: NONE """
""" Return: NONE """
""" """
if running == True:
begin_time = datetime.datetime.now()
calculateHashedFiles()
compareHashes()
logBox.config(state=tk.NORMAL)
logBoxContainer() # AQUI EL LOG BOX
logBox.config(state=tk.DISABLED)
# graph()
threading.Timer(float(interval), run).start()
end = datetime.datetime.now() - begin_time
strr = "Comprobación realizada con éxito en: " + str(end)
logging.info(strr)
def runHandle():
t = Thread(target=run)
global running
running = True
t.start()
def initExam():
console = logging.StreamHandler(sys.stdout)
console.setLevel(100)
root_logger = logging.getLogger("")
root_logger.addHandler(console)
global interval
interval = int(configDict["Verify interval"])
# supuestamente el admin nos pasa a nosotros el hasheado de todos los archivos -> Si no, ejecutar exportHashedFiles()
exportHashedFiles()
importHashedFiles()
runHandle()
def sendEmail(bodyMsg):
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login(configDict["email"], configDict["smtpPass"])
subject = "¡Problema con la integridad de los archivos!"
body = bodyMsg
msg = f"Subject: {subject}\n\n{body}".encode('utf-8')
emailList = configDict["toEmail"].split(",")
for email in emailList:
server.sendmail("<EMAIL>", email, msg)
server.quit()
except:
print("Ha ocurrido un error enviando el mensaje.")
def gui():
window.resizable(0, 0)
window.geometry("1340x512")
labelInicio = tk.Label(window, text="Iniciar el examen ")
labelStop = tk.Label(window, text="Parar el examen ")
labelGraph = tk.Label(window, text="Abrir gráfico ")
labelConf = tk.Label(window, text="Fichero de configuración")
labelLog = tk.Label(window, text="Fichero de LOG")
labelInicio.pack()
labelInicio.place(x=510, y=410)
labelStop.pack()
labelStop.place(x=728, y=410)
labelGraph.pack()
labelGraph.place(x=630, y=410)
labelConf.pack()
labelConf.place(x=230, y=333)
labelLog.pack()
labelLog.place(x=950, y=333)
entry.pack()
entry.place(x=5, y=0)
window.title("HIDS")
btnGraph = tk.Button(window, text="Abrir grafico", command=graph)
btnGraph.pack(pady=15, padx=15)
btnGraph.place(x=628, y=435)
btnIniciar = tk.Button(window, text="Iniciar",
command=initExam)
btnIniciar.pack(pady=15, padx=15)
btnIniciar.place(x=535, y=435)
btnCerrar = tk.Button(window, text="Parar", command=stop)
btnCerrar.pack(pady=15, padx=15)
btnCerrar.place(x=751, y=435)
btnGuardar = tk.Button(
window, text="Guardar configuración", command=exportConfig)
btnGuardar.pack(pady=15, padx=15)
btnGuardar.place(x=532, y=330)
logBox.pack()
logBox.place(x=670, y=0)
window.protocol("WM_DELETE_WINDOW", stopAndClose)
window.mainloop()
def stop():
toaster.show_toast(
"HIDS", "Servicio interrumpido. El sistema NO está examinando los directorios.", threaded=True)
global running
running = False
logging.critical("EXAMEN INTERRUMPIDO")
def stopAndClose():
global running
running = False
logging.critical("HIDS CERRADO")
os._exit(1)
def iniciar():
try:
Path("C:\\top_secret").mkdir(parents=True)
except:
pass
readLogFile()
filename = os.path.abspath('.').split(os.path.sep)[
0]+os.path.sep+"top_secret\log.log"
logging.basicConfig(format='%(levelname)s:%(asctime)s: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S', filename=filename, level=logging.INFO)
importConfig()
gui()
if __name__ == "__main__":
iniciar()
| [
"logging.getLogger",
"logging.StreamHandler",
"tkinter.Button",
"tkinter.Label",
"logging.info",
"logging.error",
"os.walk",
"os.path.exists",
"pathlib.Path",
"logging.critical",
"win10toast.ToastNotifier",
"tkinter.scrolledtext.ScrolledText",
"logging.basicConfig",
"smtplib.SMTP",
"plotly.express.bar",
"os.path.join",
"datetime.datetime.now",
"tkinter.Tk",
"os._exit",
"os.path.abspath",
"threading.Thread"
] | [((604, 611), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (609, 611), True, 'import tkinter as tk\n'), ((620, 661), 'tkinter.scrolledtext.ScrolledText', 'ScrolledText', (['window'], {'width': '(80)', 'height': '(20)'}), '(window, width=80, height=20)\n', (632, 661), False, 'from tkinter.scrolledtext import ScrolledText\n'), ((671, 712), 'tkinter.scrolledtext.ScrolledText', 'ScrolledText', (['window'], {'width': '(80)', 'height': '(20)'}), '(window, width=80, height=20)\n', (683, 712), False, 'from tkinter.scrolledtext import ScrolledText\n'), ((723, 738), 'win10toast.ToastNotifier', 'ToastNotifier', ([], {}), '()\n', (736, 738), False, 'from win10toast import ToastNotifier\n'), ((1099, 1116), 'os.walk', 'os.walk', (['pathName'], {}), '(pathName)\n', (1106, 1116), False, 'import os\n'), ((3174, 3194), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3188, 3194), False, 'import os\n'), ((5624, 5647), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5645, 5647), False, 'import datetime\n'), ((6214, 6232), 'logging.info', 'logging.info', (['strr'], {}), '(strr)\n', (6226, 6232), False, 'import logging\n'), ((7333, 7389), 'logging.info', 'logging.info', (['"""Calculando los hashes de los archivos..."""'], {}), "('Calculando los hashes de los archivos...')\n", (7345, 7389), False, 'import logging\n'), ((8578, 8596), 'logging.info', 'logging.info', (['str1'], {}), '(str1)\n', (8590, 8596), False, 'import logging\n'), ((8601, 8619), 'logging.info', 'logging.info', (['str2'], {}), '(str2)\n', (8613, 8619), False, 'import logging\n'), ((9629, 9776), 'plotly.express.bar', 'px.bar', (['df'], {'x': '"""x"""', 'y': '"""y"""', 'color_discrete_sequence': "(['red'] * 3)", 'title': 'layout_title', 'labels': "{'x': 'Hora', 'y': 'Numero de fallos de integridad'}"}), "(df, x='x', y='y', color_discrete_sequence=['red'] * 3, title=\n layout_title, labels={'x': 'Hora', 'y': 'Numero de fallos de integridad'})\n", (9635, 9776), True, 'import plotly.express as px\n'), ((10443, 10461), 'threading.Thread', 'Thread', ([], {'target': 'run'}), '(target=run)\n', (10449, 10461), False, 'from threading import Thread\n'), ((10546, 10579), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (10567, 10579), False, 'import logging\n'), ((10624, 10645), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (10641, 10645), False, 'import logging\n'), ((11632, 11675), 'tkinter.Label', 'tk.Label', (['window'], {'text': '"""Iniciar el examen """'}), "(window, text='Iniciar el examen ')\n", (11640, 11675), True, 'import tkinter as tk\n'), ((11692, 11733), 'tkinter.Label', 'tk.Label', (['window'], {'text': '"""Parar el examen """'}), "(window, text='Parar el examen ')\n", (11700, 11733), True, 'import tkinter as tk\n'), ((11751, 11790), 'tkinter.Label', 'tk.Label', (['window'], {'text': '"""Abrir gráfico """'}), "(window, text='Abrir gráfico ')\n", (11759, 11790), True, 'import tkinter as tk\n'), ((11807, 11856), 'tkinter.Label', 'tk.Label', (['window'], {'text': '"""Fichero de configuración"""'}), "(window, text='Fichero de configuración')\n", (11815, 11856), True, 'import tkinter as tk\n'), ((11872, 11911), 'tkinter.Label', 'tk.Label', (['window'], {'text': '"""Fichero de LOG"""'}), "(window, text='Fichero de LOG')\n", (11880, 11911), True, 'import tkinter as tk\n'), ((12274, 12328), 'tkinter.Button', 'tk.Button', (['window'], {'text': '"""Abrir grafico"""', 'command': 'graph'}), "(window, text='Abrir grafico', command=graph)\n", (12283, 12328), True, 'import tkinter as tk\n'), ((12415, 12466), 'tkinter.Button', 'tk.Button', (['window'], {'text': '"""Iniciar"""', 'command': 'initExam'}), "(window, text='Iniciar', command=initExam)\n", (12424, 12466), True, 'import tkinter as tk\n'), ((12583, 12628), 'tkinter.Button', 'tk.Button', (['window'], {'text': '"""Parar"""', 'command': 'stop'}), "(window, text='Parar', command=stop)\n", (12592, 12628), True, 'import tkinter as tk\n'), ((12717, 12786), 'tkinter.Button', 'tk.Button', (['window'], {'text': '"""Guardar configuración"""', 'command': 'exportConfig'}), "(window, text='Guardar configuración', command=exportConfig)\n", (12726, 12786), True, 'import tkinter as tk\n'), ((13177, 13216), 'logging.critical', 'logging.critical', (['"""EXAMEN INTERRUMPIDO"""'], {}), "('EXAMEN INTERRUMPIDO')\n", (13193, 13216), False, 'import logging\n'), ((13282, 13314), 'logging.critical', 'logging.critical', (['"""HIDS CERRADO"""'], {}), "('HIDS CERRADO')\n", (13298, 13314), False, 'import logging\n'), ((13319, 13330), 'os._exit', 'os._exit', (['(1)'], {}), '(1)\n', (13327, 13330), False, 'import os\n'), ((13555, 13695), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s:%(asctime)s: %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'filename': 'filename', 'level': 'logging.INFO'}), "(format='%(levelname)s:%(asctime)s: %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S', filename=filename, level=logging.INFO)\n", (13574, 13695), False, 'import logging\n'), ((511, 534), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (532, 534), False, 'import datetime\n'), ((2191, 2231), 'os.path.join', 'os.path.join', (['"""c:/top_secret"""', '"""log.log"""'], {}), "('c:/top_secret', 'log.log')\n", (2203, 2231), False, 'import os\n'), ((6112, 6135), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6133, 6135), False, 'import datetime\n'), ((6877, 6925), 'logging.info', 'logging.info', (['"""Hashes importados correctamente!"""'], {}), "('Hashes importados correctamente!')\n", (6889, 6925), False, 'import logging\n'), ((9996, 10019), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10017, 10019), False, 'import datetime\n'), ((10397, 10415), 'logging.info', 'logging.info', (['strr'], {}), '(strr)\n', (10409, 10415), False, 'import logging\n'), ((10990, 11025), 'smtplib.SMTP', 'smtplib.SMTP', (['"""smtp.gmail.com"""', '(587)'], {}), "('smtp.gmail.com', 587)\n", (11002, 11025), False, 'import smtplib\n'), ((2366, 2407), 'os.path.join', 'os.path.join', (['"""C:\\\\top_secret"""', '"""log.log"""'], {}), "('C:\\\\top_secret', 'log.log')\n", (2378, 2407), False, 'import os\n'), ((3772, 3835), 'logging.info', 'logging.info', (['"""La configuración se ha importado correctamente!"""'], {}), "('La configuración se ha importado correctamente!')\n", (3784, 3835), False, 'import logging\n'), ((4609, 4676), 'logging.info', 'logging.info', (['"""Archivo de configuración creado satisfactoriamente!"""'], {}), "('Archivo de configuración creado satisfactoriamente!')\n", (4621, 4676), False, 'import logging\n'), ((6946, 6992), 'logging.error', 'logging.error', (['"""Error al importar los hashes!"""'], {}), "('Error al importar los hashes!')\n", (6959, 6992), False, 'import logging\n'), ((10286, 10309), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10307, 10309), False, 'import datetime\n'), ((2253, 2293), 'os.path.join', 'os.path.join', (['"""c:/top_secret"""', '"""log.log"""'], {}), "('c:/top_secret', 'log.log')\n", (2265, 2293), False, 'import os\n'), ((3951, 4003), 'logging.error', 'logging.error', (['"""Error al importar la configuración!"""'], {}), "('Error al importar la configuración!')\n", (3964, 4003), False, 'import logging\n'), ((4706, 4798), 'logging.error', 'logging.error', (['"""Error al crear el archivo de configuración, problema con los permisos?"""'], {}), "(\n 'Error al crear el archivo de configuración, problema con los permisos?')\n", (4719, 4798), False, 'import logging\n'), ((8403, 8426), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8424, 8426), False, 'import datetime\n'), ((13365, 13387), 'pathlib.Path', 'Path', (['"""C:\\\\top_secret"""'], {}), "('C:\\\\top_secret')\n", (13369, 13387), False, 'from pathlib import Path\n'), ((1167, 1191), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (1179, 1191), False, 'import os\n'), ((9494, 9517), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9515, 9517), False, 'import datetime\n'), ((3075, 3095), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (3090, 3095), False, 'import os\n'), ((13466, 13486), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (13481, 13486), False, 'import os\n'), ((1319, 1343), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (1331, 1343), False, 'import os\n'), ((5046, 5066), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (5061, 5066), False, 'import os\n'), ((5904, 5924), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (5919, 5924), False, 'import os\n'), ((1546, 1570), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (1558, 1570), False, 'import os\n'), ((6485, 6505), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (6500, 6505), False, 'import os\n'), ((1773, 1797), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (1785, 1797), False, 'import os\n'), ((4206, 4226), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (4221, 4226), False, 'import os\n'), ((1995, 2019), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (2007, 2019), False, 'import os\n')] |
import logging
import os
import requests
import urllib
import json
try:
from alerta.plugins import app # alerta >= 5.0
except ImportError:
from alerta.app import app # alerta < 5.0
from alerta.plugins import PluginBase
LOG = logging.getLogger("alerta.plugins.matrix")
MATRIX_HOMESERVER_URL = [
os.environ.get("MATRIX_HOMESERVER") or app.config["MATRIX_HOMESERVER"],
"/_matrix/client/r0/rooms/",
urllib.parse.quote(os.environ.get("MATRIX_ROOM") or app.config["MATRIX_ROOM"], ":"),
"/send/m.room.message"
]
MATRIX_ACCESS_TOKEN = os.environ.get("MATRIX_ACCESS_TOKEN") or app.config["MATRIX_ACCESS_TOKEN"]
MATRIX_MESSAGE_TYPE = os.environ.get("MATRIX_MESSAGE_TYPE") or app.config.get("MATRIX_MESSAGE_TYPE", "notice")
MATRIX_MESSAGE_TYPES = {
"notice": "m.notice",
"text": "m.text"
}
DASHBOARD_URL = os.environ.get("DASHBOARD_URL") or app.config.get("DASHBOARD_URL", "")
SEVERITY_ICON = {
"critical": "🔴 ",
"warning": "⚠️ ",
"ok": "✅ ",
"cleared": "✅ ",
"normal": "✅ ",
}
class SendMessage(PluginBase):
def pre_receive(self, alert):
return alert
def post_receive(self, alert):
if alert.repeat:
return
severity = SEVERITY_ICON.get(alert.severity, "")
body = "{}{}: {} alert for {} \n{} - {} - {} \n{} \nDate: {}".format(
severity,
alert.environment,
alert.severity.capitalize(),
",".join(alert.service),
alert.resource,
alert.event,
alert.value,
alert.text,
alert.create_time,
)
formatted_body = "{}<strong>{}: {} alert for {} </br>{} - {} - {} </strong></br>{} </br><strong>Date: </strong> {} | <a rel='noopener' href='{}/#/alert/{}'>View alert</a>".format(
severity,
alert.environment,
alert.severity.capitalize(),
",".join(alert.service),
alert.resource,
alert.event,
alert.value,
alert.text,
alert.create_time,
DASHBOARD_URL,
alert.id,
)
payload = {
"msgtype": MATRIX_MESSAGE_TYPES.get(MATRIX_MESSAGE_TYPE, "m.notice"),
"format": "org.matrix.custom.html",
"body": body,
"formatted_body": formatted_body,
}
LOG.debug("Matrix: %s", payload)
try:
r = requests.post(
"".join(MATRIX_HOMESERVER_URL),
headers={"Authorization": "Bearer " + MATRIX_ACCESS_TOKEN},
data=json.dumps(payload).encode("utf-8"),
timeout=2,
)
except Exception as e:
raise RuntimeError("Matrix: ERROR - %s" % e)
LOG.debug("Matrix: %s - %s", r.status_code, r.text)
def status_change(self, alert, status, text):
return
| [
"logging.getLogger",
"json.dumps",
"os.environ.get",
"alerta.app.app.config.get"
] | [((238, 280), 'logging.getLogger', 'logging.getLogger', (['"""alerta.plugins.matrix"""'], {}), "('alerta.plugins.matrix')\n", (255, 280), False, 'import logging\n'), ((557, 594), 'os.environ.get', 'os.environ.get', (['"""MATRIX_ACCESS_TOKEN"""'], {}), "('MATRIX_ACCESS_TOKEN')\n", (571, 594), False, 'import os\n'), ((654, 691), 'os.environ.get', 'os.environ.get', (['"""MATRIX_MESSAGE_TYPE"""'], {}), "('MATRIX_MESSAGE_TYPE')\n", (668, 691), False, 'import os\n'), ((695, 742), 'alerta.app.app.config.get', 'app.config.get', (['"""MATRIX_MESSAGE_TYPE"""', '"""notice"""'], {}), "('MATRIX_MESSAGE_TYPE', 'notice')\n", (709, 742), False, 'from alerta.app import app\n'), ((833, 864), 'os.environ.get', 'os.environ.get', (['"""DASHBOARD_URL"""'], {}), "('DASHBOARD_URL')\n", (847, 864), False, 'import os\n'), ((868, 903), 'alerta.app.app.config.get', 'app.config.get', (['"""DASHBOARD_URL"""', '""""""'], {}), "('DASHBOARD_URL', '')\n", (882, 903), False, 'from alerta.app import app\n'), ((312, 347), 'os.environ.get', 'os.environ.get', (['"""MATRIX_HOMESERVER"""'], {}), "('MATRIX_HOMESERVER')\n", (326, 347), False, 'import os\n'), ((440, 469), 'os.environ.get', 'os.environ.get', (['"""MATRIX_ROOM"""'], {}), "('MATRIX_ROOM')\n", (454, 469), False, 'import os\n'), ((2584, 2603), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (2594, 2603), False, 'import json\n')] |
import concurrent.futures
import glob
import json
import multiprocessing as mp
import os
import pickle
import re
import sys
import threading
import time
from multiprocessing import Process
from threading import Thread
#####################################################
# SETTINGS #
#####################################################
FILES = glob.glob(os.getcwd() + "/tmp_dumps/wd*")
PATH_TO_EXT_ID_PREDICATES = "dicts/identifier_predicates.pickle"
PATH_TO_GEO_PREDICATES = "dicts/geo_predicates.pickle"
PATH_TO_OUTPUT_FILE = "dumps/wikidata_clean.nt"
#####################################################
# CONSTANTS #
#####################################################
LABELS_PATTERN = re.compile('".*"@((?!en)[a-z][a-z])')
ENGLISH_LABELS_PATTERN = re.compile('".*"@en')
URI_PATTERN = re.compile("[A-z]*://[A-z.-/#]+.*")
LABELS = {}
DESCRIPTIONS = {}
write_lock = threading.Lock()
with open(PATH_TO_GEO_PREDICATES, "rb") as geo_file:
GEO_PREDS = pickle.load(geo_file)
with open(PATH_TO_EXT_ID_PREDICATES, "rb") as identifiers:
EXT_IDS = pickle.load(identifiers)
#####################################################
# FUNCTIONS #
#####################################################
def prune_triples(file, worker_id):
"""
Stepwise filter out triples.
In this version, you can select (comment) whatever filters you like.
Note, that the runtime is significantly slower than the runtim
of 'prune_triples_all'.
Return: None
"""
buf_triples_count = 0
buf_triples = ""
labels = {}
aliases = {}
descriptions = {}
wikipedia_mappings = {}
inverse_wikipedia_mappings = {}
with open(file, "r") as fp:
line = fp.readline()
while line:
currentLine = line
# note that o is not only the object, but the object + " .", the line ending
s, p, o = currentLine.split(" ", 2)
line = fp.readline()
"""
Extract english labels (+aliases) and descriptions.
This needs to be done before filtering predicates,
to ensure that predicate labels are extracted.
Further, wikipedia mappings need to be extracted before
skipping lines with non wikidata subjects.
"""
# extract wikipedia mappings
extract_wikipedia_mappings(s, p, o, wikipedia_mappings, inverse_wikipedia_mappings)
# filter triples without a wikidata id as subject
if filter_non_wikidata_id_subjects(s):
continue
# extract labels
extract_english_labels(s, p, o, labels)
# extract aliases
extract_english_aliases(s, p, o, aliases)
# extract descriptions
extract_english_descriptions(s, p, o, descriptions)
"""
Prune triples
"""
if filter_schema_predicates(p):
continue
if filter_values(s, o):
continue
if filter_labels(o):
continue
if filter_references(s, o):
continue
if filter_uri_objects(o):
continue
if filter_non_english_labels(o):
continue
if filter_external_id_predicates(p):
continue
if filter_unknown_values(s, o):
continue
if filter_geo_predicates(p):
continue
if filter_other_objects(o):
continue
# if triple was not filtered out, include it into output-buffer
buf_triples_count += 1
buf_triples += currentLine
# store triples, if buffer exceeded
if buf_triples_count > 1000000:
write_lock.acquire()
try:
with open(PATH_TO_OUTPUT_FILE, "a") as output:
output.write(buf_triples)
except Exception as e:
print(e)
finally:
write_lock.release()
buf_triples_count = 0
buf_triples = ""
# store remaining triples in buffer
write_lock.acquire()
try:
with open(PATH_TO_OUTPUT_FILE, "a") as output:
output.write(buf_triples)
except Exception as e:
print(e)
finally:
write_lock.release()
# store labels dict for worker
for k in labels:
labels[k] = list(labels[k])
with open("dicts/labels_" + str(worker_id) + ".json", "w") as outfile:
outfile.write(json.dumps(labels, separators=(",", ":")))
# store aliases dict for worker
for k in aliases:
aliases[k] = list(aliases[k])
with open("dicts/aliases_" + str(worker_id) + ".json", "w") as outfile:
outfile.write(json.dumps(aliases, separators=(",", ":")))
# store description dict for worker
with open("dicts/descriptions_" + str(worker_id) + ".json", "w") as outfile:
outfile.write(json.dumps(descriptions, separators=(",", ":")))
# store wikipedia_mappings dict for worker
with open("dicts/wikipedia_mappings_" + str(worker_id) + ".json", "w") as outfile:
outfile.write(json.dumps(wikipedia_mappings, separators=(",", ":")))
# store inverse_wikipedia_mappings dict for worker
with open("dicts/inverse_wikipedia_mappings_" + str(worker_id) + ".json", "w") as outfile:
outfile.write(json.dumps(inverse_wikipedia_mappings, separators=(",", ":")))
#####################################################
# Dict Extraction #
#####################################################
def extract_english_labels(s, p, o, labels):
if re.match(ENGLISH_LABELS_PATTERN, o):
if p == "<http://schema.org/name>" or p.endswith("altLabel"):
s = s.rsplit("/", 1)[1][:-1]
o = o.replace('"', "").split("@")[0]
if labels.get(s):
labels[s].add(o)
else:
labels[s] = set()
labels[s].add(o)
def extract_english_aliases(s, p, o, aliases):
# this method needs to be placed after the filter_non_wikidata_subjects function
if "abel" in p and re.match(ENGLISH_LABELS_PATTERN, o):
s = s.rsplit("/", 1)[1][:-1]
o = o.replace('"', "").split("@")[0]
if aliases.get(s):
aliases[s].add(o)
else:
aliases[s] = set()
aliases[s].add(o)
def extract_english_descriptions(s, p, o, descriptions):
if p == "<http://schema.org/description>" and re.match(ENGLISH_LABELS_PATTERN, o):
s = s.rsplit("/", 1)[1][:-1]
o = o.replace('"', "").split("@")[0]
descriptions[s] = o
def extract_wikipedia_mappings(s, p, o, wikipedia, inverse_wikipedia_mappings):
if s.startswith("<https://en.wikipedia.org/wiki/") and o.startswith("<http://www.wikidata.org/entity/"):
wikipedia_name = s.replace("<https://en.wikipedia.org/wiki/", "")[:-1]
wikidata_id = o.rsplit("/", 1)[1][:-4]
wikipedia[wikidata_id] = wikipedia_name
inverse_wikipedia_mappings[wikipedia_name] = wikidata_id
##########################################################
# Filters #
# more fine-grained than required for easier adjustment #
##########################################################
def filter_non_wikidata_subjects(s):
if not "<http://www.wikidata.org/entity/" in s:
return True
return False
def filter_non_wikidata_id_subjects(s):
if "<http://www.wikidata.org/entity/Q" in s:
return False
if "<http://www.wikidata.org/entity/statement/Q" in s:
return False
return True
def filter_schema_predicates(p):
if "<http://www.w3.org" in p:
return True
if "<http://wikiba.se" in p:
return True
if "<http://schema.org" in p:
return True
return False
def filter_external_id_predicates(p):
if p.rsplit("/", 1)[1][:-1] in EXT_IDS:
return True
return False
def filter_uri_objects(o):
if o.startswith("<http://www.wikidata.org"):
return False
if o.startswith("<http"):
return True
if re.match(URI_PATTERN, o):
return True
return False
def filter_non_english_labels(o):
if re.match(ENGLISH_LABELS_PATTERN, o):
return False
if re.match(LABELS_PATTERN, o):
return True
return False
def filter_predicates_as_subjects(s):
if "<http://www.wikidata.org/entity/P" in s:
return True
if "<http://www.wikidata.org/entity/p" in s:
return True
if "<http://www.wikidata.org/entity/statement/P" in s:
return True
if "<http://www.wikidata.org/entity/statement/p" in s:
return True
return False
def filter_values(s, o):
if "<http://www.wikidata.org/value" in o:
return True
return False
def filter_references(s, o):
if "<http://www.wikidata.org/reference" in o:
return True
return False
def filter_unknown_values(s, o):
if "_:genid" in o:
return True
return False
def filter_geo_predicates(p):
if p.rsplit("/", 1)[1][:-1] in GEO_PREDS:
return True
return False
def filter_other_objects(o):
if (
not o[0] == '"'
and not "<http://www.wikidata.org/entity/Q" in o
and not "<http://www.wikidata.org/entity/statement/Q" in o
):
return True
return False
def filter_labels(o):
if re.match(LABELS_PATTERN, o):
return True
return False
#####################################################
# MAIN #
#####################################################
if __name__ == "__main__":
workers = len(FILES)
start_time = time.time()
processes = []
#############################################################
# Start processes for different partitions of the dump #
#############################################################
for i in range(workers):
print(FILES[i])
p = Process(
target=prune_triples,
args=(
FILES[i],
i,
),
)
processes.append(p)
p.start()
#########################
# Join all threads #
#########################
for i in range(workers):
processes[i].join()
end = time.time()
###############################################
# Merge extracted dicts from all workers #
###############################################
labels = {}
aliases = {}
descriptions = {}
wikipedia_mappings = {}
inverse_wikipedia_mappings = {}
for i in range(workers):
# load label dict for worker
with open("dicts/labels_" + str(i) + ".json", "r") as file:
tmp_labels = json.load(file)
for k in tmp_labels:
if labels.get(k):
labels[k] += tmp_labels[k]
else:
labels[k] = tmp_labels[k]
# load aliases dict for worker
with open("dicts/aliases_" + str(i) + ".json", "r") as file:
tmp_aliases = json.load(file)
for k in tmp_aliases:
if aliases.get(k):
aliases[k] += tmp_aliases[k]
else:
aliases[k] = tmp_aliases[k]
# load description dict for worker
with open("dicts/descriptions_" + str(i) + ".json", "r") as file:
tmp_description = json.load(file)
descriptions.update(tmp_description)
# load label dict for worker
with open("dicts/wikipedia_mappings_" + str(i) + ".json", "r") as file:
tmp_wikipedia_mappings = json.load(file)
wikipedia_mappings.update(tmp_wikipedia_mappings)
# load description dict for worker
with open("dicts/inverse_wikipedia_mappings_" + str(i) + ".json", "r") as file:
tmp_inverse_wikipedia_mappings = json.load(file)
inverse_wikipedia_mappings.update(tmp_inverse_wikipedia_mappings)
# remove worker-dicts
os.remove("dicts/labels_" + str(i) + ".json")
os.remove("dicts/aliases_" + str(i) + ".json")
os.remove("dicts/descriptions_" + str(i) + ".json")
os.remove("dicts/wikipedia_mappings_" + str(i) + ".json")
os.remove("dicts/inverse_wikipedia_mappings_" + str(i) + ".json")
#########################
# Store all dicts #
#########################
# store labels dict
with open("dicts/labels_dict.json", "w") as outfile:
outfile.write(json.dumps(labels, separators=(",", ":")))
# store aliases dict
with open("dicts/aliases_dict.json", "w") as outfile:
outfile.write(json.dumps(aliases, separators=(",", ":")))
# store description dict
with open("dicts/descriptions_dict.json", "w") as outfile:
outfile.write(json.dumps(descriptions, separators=(",", ":")))
# store wikipedia_mappings dict
with open("dicts/wikipedia_mappings.json", "w") as outfile:
outfile.write(json.dumps(wikipedia_mappings, separators=(",", ":")))
# store inverse_wikipedia_mappings dict
with open("dicts/inverse_wikipedia_mappings.json", "w") as outfile:
outfile.write(json.dumps(inverse_wikipedia_mappings, separators=(",", ":")))
print("Time(filter_wikidata): " + str(end - start_time))
| [
"re.compile",
"threading.Lock",
"multiprocessing.Process",
"json.dumps",
"pickle.load",
"re.match",
"os.getcwd",
"json.load",
"time.time"
] | [((779, 816), 're.compile', 're.compile', (['"""".*"@((?!en)[a-z][a-z])"""'], {}), '(\'".*"@((?!en)[a-z][a-z])\')\n', (789, 816), False, 'import re\n'), ((842, 863), 're.compile', 're.compile', (['"""".*"@en"""'], {}), '(\'".*"@en\')\n', (852, 863), False, 'import re\n'), ((878, 913), 're.compile', 're.compile', (['"""[A-z]*://[A-z.-/#]+.*"""'], {}), "('[A-z]*://[A-z.-/#]+.*')\n", (888, 913), False, 'import re\n'), ((957, 973), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (971, 973), False, 'import threading\n'), ((1044, 1065), 'pickle.load', 'pickle.load', (['geo_file'], {}), '(geo_file)\n', (1055, 1065), False, 'import pickle\n'), ((1140, 1164), 'pickle.load', 'pickle.load', (['identifiers'], {}), '(identifiers)\n', (1151, 1164), False, 'import pickle\n'), ((5967, 6002), 're.match', 're.match', (['ENGLISH_LABELS_PATTERN', 'o'], {}), '(ENGLISH_LABELS_PATTERN, o)\n', (5975, 6002), False, 'import re\n'), ((8468, 8492), 're.match', 're.match', (['URI_PATTERN', 'o'], {}), '(URI_PATTERN, o)\n', (8476, 8492), False, 'import re\n'), ((8574, 8609), 're.match', 're.match', (['ENGLISH_LABELS_PATTERN', 'o'], {}), '(ENGLISH_LABELS_PATTERN, o)\n', (8582, 8609), False, 'import re\n'), ((8639, 8666), 're.match', 're.match', (['LABELS_PATTERN', 'o'], {}), '(LABELS_PATTERN, o)\n', (8647, 8666), False, 'import re\n'), ((9759, 9786), 're.match', 're.match', (['LABELS_PATTERN', 'o'], {}), '(LABELS_PATTERN, o)\n', (9767, 9786), False, 'import re\n'), ((10058, 10069), 'time.time', 'time.time', ([], {}), '()\n', (10067, 10069), False, 'import time\n'), ((10687, 10698), 'time.time', 'time.time', ([], {}), '()\n', (10696, 10698), False, 'import time\n'), ((399, 410), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (408, 410), False, 'import os\n'), ((6469, 6504), 're.match', 're.match', (['ENGLISH_LABELS_PATTERN', 'o'], {}), '(ENGLISH_LABELS_PATTERN, o)\n', (6477, 6504), False, 'import re\n'), ((6829, 6864), 're.match', 're.match', (['ENGLISH_LABELS_PATTERN', 'o'], {}), '(ENGLISH_LABELS_PATTERN, o)\n', (6837, 6864), False, 'import re\n'), ((10352, 10401), 'multiprocessing.Process', 'Process', ([], {'target': 'prune_triples', 'args': '(FILES[i], i)'}), '(target=prune_triples, args=(FILES[i], i))\n', (10359, 10401), False, 'from multiprocessing import Process\n'), ((11133, 11148), 'json.load', 'json.load', (['file'], {}), '(file)\n', (11142, 11148), False, 'import json\n'), ((11465, 11480), 'json.load', 'json.load', (['file'], {}), '(file)\n', (11474, 11480), False, 'import json\n'), ((11816, 11831), 'json.load', 'json.load', (['file'], {}), '(file)\n', (11825, 11831), False, 'import json\n'), ((12035, 12050), 'json.load', 'json.load', (['file'], {}), '(file)\n', (12044, 12050), False, 'import json\n'), ((12289, 12304), 'json.load', 'json.load', (['file'], {}), '(file)\n', (12298, 12304), False, 'import json\n'), ((12915, 12956), 'json.dumps', 'json.dumps', (['labels'], {'separators': "(',', ':')"}), "(labels, separators=(',', ':'))\n", (12925, 12956), False, 'import json\n'), ((13063, 13105), 'json.dumps', 'json.dumps', (['aliases'], {'separators': "(',', ':')"}), "(aliases, separators=(',', ':'))\n", (13073, 13105), False, 'import json\n'), ((13221, 13268), 'json.dumps', 'json.dumps', (['descriptions'], {'separators': "(',', ':')"}), "(descriptions, separators=(',', ':'))\n", (13231, 13268), False, 'import json\n'), ((13392, 13445), 'json.dumps', 'json.dumps', (['wikipedia_mappings'], {'separators': "(',', ':')"}), "(wikipedia_mappings, separators=(',', ':'))\n", (13402, 13445), False, 'import json\n'), ((13585, 13646), 'json.dumps', 'json.dumps', (['inverse_wikipedia_mappings'], {'separators': "(',', ':')"}), "(inverse_wikipedia_mappings, separators=(',', ':'))\n", (13595, 13646), False, 'import json\n'), ((4776, 4817), 'json.dumps', 'json.dumps', (['labels'], {'separators': "(',', ':')"}), "(labels, separators=(',', ':'))\n", (4786, 4817), False, 'import json\n'), ((5033, 5075), 'json.dumps', 'json.dumps', (['aliases'], {'separators': "(',', ':')"}), "(aliases, separators=(',', ':'))\n", (5043, 5075), False, 'import json\n'), ((5232, 5279), 'json.dumps', 'json.dumps', (['descriptions'], {'separators': "(',', ':')"}), "(descriptions, separators=(',', ':'))\n", (5242, 5279), False, 'import json\n'), ((5449, 5502), 'json.dumps', 'json.dumps', (['wikipedia_mappings'], {'separators': "(',', ':')"}), "(wikipedia_mappings, separators=(',', ':'))\n", (5459, 5502), False, 'import json\n'), ((5688, 5749), 'json.dumps', 'json.dumps', (['inverse_wikipedia_mappings'], {'separators': "(',', ':')"}), "(inverse_wikipedia_mappings, separators=(',', ':'))\n", (5698, 5749), False, 'import json\n')] |
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import Callback, History
import tensorflow.keras.backend as K
from keras.objectives import mean_squared_error
from PIL import Image
import numpy as np
import pickle, glob, random, os, zipfile
from tensorflow.contrib.tpu.python.tpu import keras_support
def enumerate_layers():
# 確認用。サマリーとレイヤー名とindexの対応を調べる
resnet = ResNet50(include_top=False, weights="imagenet", input_shape=(224, 224, 3))
resnet.summary()
for i, layer in enumerate(resnet.layers):
print(i, layer.name)
def create_resnet():
# 転移学習用モデル
resnet = ResNet50(include_top=False, weights="imagenet", input_shape=(224, 224, 3))
for i in range(82):
# res4a_branch2a(82)から訓練させる
resnet.layers[i].trainable=False
x = GlobalAveragePooling2D()(resnet.output)
# ランドマーク9×2点
x = Dense(18, activation="sigmoid")(x)
model = Model(resnet.inputs, x)
return model
class CatGenerator:
def __init__(self):
with open("cats-dataset/cat_annotation.dat", "rb") as fp:
self.annotation_data = pickle.load(fp)
def flow_from_directory(self, batch_size, train=True, shuffle=True, use_data_augmentation=True):
source_dir = "cats-dataset/train" if train else "cats-dataset/test"
images = glob.glob(source_dir+"/*.jpg")
X_cache, y_cache = [], []
while True:
if shuffle:
np.random.shuffle(images)
for img_path in images:
with Image.open(img_path) as img:
width, height = img.size
img_array = np.asarray(img.resize((224, 224), Image.BILINEAR))
basename = os.path.basename(img_path)
data = self.annotation_data[basename]
# アノテーションを0~1に変換
annotation = np.zeros((9,2), dtype=np.float32)
annotation[:, 0] = data[2][:, 0] / width
annotation[:, 1] = data[2][:, 1] / height
annotation = np.clip(annotation, 0.0, 1.0)
if train and use_data_augmentation:
# 水平反転
if random.random() >= 0.5:
img_array = img_array[:, ::-1, :]
annotation[:, 0] = 1 - annotation[:, 0]
# 左目と右目の反転
annotation[0, :], annotation[1, :] = annotation[1, :], annotation[0, :].copy()
# 左耳と右耳の反転
annotation[3:6, :], annotation[6:9, :] = annotation[6:9, :], annotation[3:6, :].copy()
# PCA Color Augmentation
img_array = self.pca_color_augmentation(img_array)
X_cache.append(img_array)
y_cache.append(np.ravel(annotation))
if len(X_cache) == batch_size:
X_batch = np.asarray(X_cache, dtype=np.float32) / 255.0
y_batch = np.asarray(y_cache, dtype=np.float32)
X_cache, y_cache = [], []
yield X_batch, y_batch
def pca_color_augmentation(self, image_array_input):
assert image_array_input.ndim == 3 and image_array_input.shape[2] == 3
assert image_array_input.dtype == np.uint8
img = image_array_input.reshape(-1, 3).astype(np.float32)
img = (img - np.mean(img, axis=0)) / np.std(img, axis=0)
cov = np.cov(img, rowvar=False)
lambd_eigen_value, p_eigen_vector = np.linalg.eig(cov)
rand = np.random.randn(3) * 0.1
delta = np.dot(p_eigen_vector, rand*lambd_eigen_value)
delta = (delta * 255.0).astype(np.int32)[np.newaxis, np.newaxis, :]
img_out = np.clip(image_array_input + delta, 0, 255).astype(np.uint8)
return img_out
def loss_function_simple(y_true, y_pred):
return mean_squared_error(y_true, y_pred)
def loss_function_with_distance(y_true, y_pred):
point_mse = mean_squared_error(y_true, y_pred)
distance_mse = mean_squared_error(y_true[:, 2:18]-y_true[:, 0:16], y_pred[:, 2:18]-y_pred[:, 0:16])
return point_mse + distance_mse
def loss_function_with_multiple_distance(y_true, y_pred):
error = mean_squared_error(y_true, y_pred)
for i in range(8):
error += mean_squared_error(y_true[:, ((i+1)*2):18]-y_true[:, 0:(16-i*2)], y_pred[:, ((i+1)*2):18]-y_pred[:, 0:(16-i*2)])
return error
# 三角形の面積を求める関数
def sarrus_formula(p1, p2, p3):
# 座標シフト
a = p2 - p1
b = p3 - p1
return K.abs(a[:,0]*b[:,1] - a[:,1]*b[:,0]) / 2.0
from itertools import combinations
def loss_function_multiple_distance_and_triangle(y_true, y_pred):
# 点損失
error = mean_squared_error(y_true, y_pred)
# 線の損失
for i in range(8):
error += mean_squared_error(y_true[:, ((i+1)*2):18]-y_true[:, 0:(16-i*2)], y_pred[:, ((i+1)*2):18]-y_pred[:, 0:(16-i*2)])
# 面の損失
for comb in combinations(range(9), 3):
s_true = sarrus_formula(
y_true[:, (comb[0]*2):(comb[0]*2+2)],
y_true[:, (comb[1]*2):(comb[1]*2+2)],
y_true[:, (comb[2]*2):(comb[2]*2+2)]
)
s_pred = sarrus_formula(
y_pred[:, (comb[0]*2):(comb[0]*2+2)],
y_pred[:, (comb[1]*2):(comb[1]*2+2)],
y_pred[:, (comb[2]*2):(comb[2]*2+2)]
)
error += K.abs(s_true - s_pred)
return error
def calc_area_loss(ear_true, ear_pred):
left_x = K.expand_dims(K.min(ear_true[:, ::2], axis=-1))
left_y = K.expand_dims(K.min(ear_true[:, 1::2], axis=-1))
right_x = K.expand_dims(K.max(ear_true[:, ::2], axis=-1))
right_y = K.expand_dims(K.max(ear_true[:, 1::2], axis=-1))
# 予測のX,y
pred_x = ear_pred[:, ::2]
pred_y = ear_pred[:, 1::2]
# ペナルティ
penalty_x = K.maximum(left_x - pred_x, 0.0) + K.maximum(pred_x - right_x, 0.0)
penalty_y = K.maximum(left_y - pred_y, 0.0) + K.maximum(pred_y - right_y, 0.0)
return K.mean(penalty_x + penalty_y, axis=-1)
def loss_function_multiple_distance_and_area(y_true, y_pred):
# 点損失
error = mean_squared_error(y_true, y_pred)
# 線の損失
for i in range(8):
error += mean_squared_error(y_true[:, ((i+1)*2):18]-y_true[:, 0:(16-i*2)], y_pred[:, ((i+1)*2):18]-y_pred[:, 0:(16-i*2)])
# 右耳と左耳のエリア
left_ear_true, left_ear_pred = y_true[:, 6:12], y_pred[:, 6:12]
right_ear_true, right_ear_pred = y_true[:, 12:18], y_pred[:, 12:18]
error += calc_area_loss(left_ear_true, left_ear_pred)
error += calc_area_loss(right_ear_true, right_ear_pred)
return error
class CatsCallback(Callback):
def __init__(self, model):
self.model = model
self.reset()
def reset(self):
self.min_val_loss = np.inf
def on_train_begin(self, logs):
self.reset()
def on_epoch_end(self, epoch, logs):
if logs["val_loss"] < self.min_val_loss:
self.model.save_weights("./cats_weights.hdf5", save_format="h5")
self.min_val_loss = logs["val_loss"]
print("Weights saved.", self.min_val_loss)
def train(batch_size, use_tpu, load_existing_weights):
model = create_resnet()
gen = CatGenerator()
if load_existing_weights:
model.load_weights("weights.hdf5")
model.compile(tf.train.MomentumOptimizer(1e-3, 0.9), loss=loss_function_multiple_distance_and_area, metrics=[loss_function_simple])
if use_tpu:
tpu_grpc_url = "grpc://"+os.environ["COLAB_TPU_ADDR"]
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(tpu_grpc_url)
strategy = keras_support.TPUDistributionStrategy(tpu_cluster_resolver)
model = tf.contrib.tpu.keras_to_tpu_model(model, strategy=strategy)
cb = CatsCallback(model)
history = History()
model.fit_generator(gen.flow_from_directory(batch_size, True), steps_per_epoch=6996//batch_size,
validation_data=gen.flow_from_directory(batch_size, False), validation_steps=2999//batch_size,
callbacks=[cb, history], epochs=200)
with open("history.dat", "wb") as fp:
pickle.dump(history.history, fp)
with zipfile.ZipFile("cats_result.zip", "w") as zip:
zip.write("history.dat")
zip.write("cats_weights.hdf5")
if __name__ == "__main__":
train(512, True, False)
| [
"numpy.clip",
"zipfile.ZipFile",
"tensorflow.contrib.tpu.python.tpu.keras_support.TPUDistributionStrategy",
"tensorflow.keras.layers.Dense",
"numpy.cov",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"keras.objectives.mean_squared_error",
"tensorflow.contrib.tpu.keras_to_tpu_model",
"numpy.mean",
"tensorflow.keras.backend.mean",
"tensorflow.keras.backend.maximum",
"numpy.asarray",
"numpy.dot",
"tensorflow.keras.backend.max",
"tensorflow.keras.models.Model",
"glob.glob",
"numpy.linalg.eig",
"tensorflow.train.MomentumOptimizer",
"pickle.load",
"numpy.std",
"tensorflow.keras.backend.min",
"numpy.random.randn",
"tensorflow.keras.applications.resnet50.ResNet50",
"PIL.Image.open",
"pickle.dump",
"tensorflow.keras.callbacks.History",
"tensorflow.contrib.cluster_resolver.TPUClusterResolver",
"numpy.zeros",
"os.path.basename",
"numpy.ravel",
"random.random",
"tensorflow.keras.backend.abs",
"numpy.random.shuffle"
] | [((548, 622), 'tensorflow.keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'input_shape': '(224, 224, 3)'}), "(include_top=False, weights='imagenet', input_shape=(224, 224, 3))\n", (556, 622), False, 'from tensorflow.keras.applications.resnet50 import ResNet50\n'), ((769, 843), 'tensorflow.keras.applications.resnet50.ResNet50', 'ResNet50', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'input_shape': '(224, 224, 3)'}), "(include_top=False, weights='imagenet', input_shape=(224, 224, 3))\n", (777, 843), False, 'from tensorflow.keras.applications.resnet50 import ResNet50\n'), ((1067, 1090), 'tensorflow.keras.models.Model', 'Model', (['resnet.inputs', 'x'], {}), '(resnet.inputs, x)\n', (1072, 1090), False, 'from tensorflow.keras.models import Model\n'), ((3995, 4029), 'keras.objectives.mean_squared_error', 'mean_squared_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (4013, 4029), False, 'from keras.objectives import mean_squared_error\n'), ((4096, 4130), 'keras.objectives.mean_squared_error', 'mean_squared_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (4114, 4130), False, 'from keras.objectives import mean_squared_error\n'), ((4150, 4242), 'keras.objectives.mean_squared_error', 'mean_squared_error', (['(y_true[:, 2:18] - y_true[:, 0:16])', '(y_pred[:, 2:18] - y_pred[:, 0:16])'], {}), '(y_true[:, 2:18] - y_true[:, 0:16], y_pred[:, 2:18] -\n y_pred[:, 0:16])\n', (4168, 4242), False, 'from keras.objectives import mean_squared_error\n'), ((4342, 4376), 'keras.objectives.mean_squared_error', 'mean_squared_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (4360, 4376), False, 'from keras.objectives import mean_squared_error\n'), ((4818, 4852), 'keras.objectives.mean_squared_error', 'mean_squared_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (4836, 4852), False, 'from keras.objectives import mean_squared_error\n'), ((6064, 6102), 'tensorflow.keras.backend.mean', 'K.mean', (['(penalty_x + penalty_y)'], {'axis': '(-1)'}), '(penalty_x + penalty_y, axis=-1)\n', (6070, 6102), True, 'import tensorflow.keras.backend as K\n'), ((6188, 6222), 'keras.objectives.mean_squared_error', 'mean_squared_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (6206, 6222), False, 'from keras.objectives import mean_squared_error\n'), ((7893, 7902), 'tensorflow.keras.callbacks.History', 'History', ([], {}), '()\n', (7900, 7902), False, 'from tensorflow.keras.callbacks import Callback, History\n'), ((955, 979), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (977, 979), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense\n'), ((1020, 1051), 'tensorflow.keras.layers.Dense', 'Dense', (['(18)'], {'activation': '"""sigmoid"""'}), "(18, activation='sigmoid')\n", (1025, 1051), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense\n'), ((1465, 1497), 'glob.glob', 'glob.glob', (["(source_dir + '/*.jpg')"], {}), "(source_dir + '/*.jpg')\n", (1474, 1497), False, 'import pickle, glob, random, os, zipfile\n'), ((3569, 3594), 'numpy.cov', 'np.cov', (['img'], {'rowvar': '(False)'}), '(img, rowvar=False)\n', (3575, 3594), True, 'import numpy as np\n'), ((3639, 3657), 'numpy.linalg.eig', 'np.linalg.eig', (['cov'], {}), '(cov)\n', (3652, 3657), True, 'import numpy as np\n'), ((3715, 3763), 'numpy.dot', 'np.dot', (['p_eigen_vector', '(rand * lambd_eigen_value)'], {}), '(p_eigen_vector, rand * lambd_eigen_value)\n', (3721, 3763), True, 'import numpy as np\n'), ((4417, 4546), 'keras.objectives.mean_squared_error', 'mean_squared_error', (['(y_true[:, (i + 1) * 2:18] - y_true[:, 0:16 - i * 2])', '(y_pred[:, (i + 1) * 2:18] - y_pred[:, 0:16 - i * 2])'], {}), '(y_true[:, (i + 1) * 2:18] - y_true[:, 0:16 - i * 2], \n y_pred[:, (i + 1) * 2:18] - y_pred[:, 0:16 - i * 2])\n', (4435, 4546), False, 'from keras.objectives import mean_squared_error\n'), ((4650, 4694), 'tensorflow.keras.backend.abs', 'K.abs', (['(a[:, 0] * b[:, 1] - a[:, 1] * b[:, 0])'], {}), '(a[:, 0] * b[:, 1] - a[:, 1] * b[:, 0])\n', (4655, 4694), True, 'import tensorflow.keras.backend as K\n'), ((4904, 5033), 'keras.objectives.mean_squared_error', 'mean_squared_error', (['(y_true[:, (i + 1) * 2:18] - y_true[:, 0:16 - i * 2])', '(y_pred[:, (i + 1) * 2:18] - y_pred[:, 0:16 - i * 2])'], {}), '(y_true[:, (i + 1) * 2:18] - y_true[:, 0:16 - i * 2], \n y_pred[:, (i + 1) * 2:18] - y_pred[:, 0:16 - i * 2])\n', (4922, 5033), False, 'from keras.objectives import mean_squared_error\n'), ((5472, 5494), 'tensorflow.keras.backend.abs', 'K.abs', (['(s_true - s_pred)'], {}), '(s_true - s_pred)\n', (5477, 5494), True, 'import tensorflow.keras.backend as K\n'), ((5580, 5612), 'tensorflow.keras.backend.min', 'K.min', (['ear_true[:, ::2]'], {'axis': '(-1)'}), '(ear_true[:, ::2], axis=-1)\n', (5585, 5612), True, 'import tensorflow.keras.backend as K\n'), ((5641, 5674), 'tensorflow.keras.backend.min', 'K.min', (['ear_true[:, 1::2]'], {'axis': '(-1)'}), '(ear_true[:, 1::2], axis=-1)\n', (5646, 5674), True, 'import tensorflow.keras.backend as K\n'), ((5704, 5736), 'tensorflow.keras.backend.max', 'K.max', (['ear_true[:, ::2]'], {'axis': '(-1)'}), '(ear_true[:, ::2], axis=-1)\n', (5709, 5736), True, 'import tensorflow.keras.backend as K\n'), ((5766, 5799), 'tensorflow.keras.backend.max', 'K.max', (['ear_true[:, 1::2]'], {'axis': '(-1)'}), '(ear_true[:, 1::2], axis=-1)\n', (5771, 5799), True, 'import tensorflow.keras.backend as K\n'), ((5903, 5934), 'tensorflow.keras.backend.maximum', 'K.maximum', (['(left_x - pred_x)', '(0.0)'], {}), '(left_x - pred_x, 0.0)\n', (5912, 5934), True, 'import tensorflow.keras.backend as K\n'), ((5937, 5969), 'tensorflow.keras.backend.maximum', 'K.maximum', (['(pred_x - right_x)', '(0.0)'], {}), '(pred_x - right_x, 0.0)\n', (5946, 5969), True, 'import tensorflow.keras.backend as K\n'), ((5986, 6017), 'tensorflow.keras.backend.maximum', 'K.maximum', (['(left_y - pred_y)', '(0.0)'], {}), '(left_y - pred_y, 0.0)\n', (5995, 6017), True, 'import tensorflow.keras.backend as K\n'), ((6020, 6052), 'tensorflow.keras.backend.maximum', 'K.maximum', (['(pred_y - right_y)', '(0.0)'], {}), '(pred_y - right_y, 0.0)\n', (6029, 6052), True, 'import tensorflow.keras.backend as K\n'), ((6274, 6403), 'keras.objectives.mean_squared_error', 'mean_squared_error', (['(y_true[:, (i + 1) * 2:18] - y_true[:, 0:16 - i * 2])', '(y_pred[:, (i + 1) * 2:18] - y_pred[:, 0:16 - i * 2])'], {}), '(y_true[:, (i + 1) * 2:18] - y_true[:, 0:16 - i * 2], \n y_pred[:, (i + 1) * 2:18] - y_pred[:, 0:16 - i * 2])\n', (6292, 6403), False, 'from keras.objectives import mean_squared_error\n'), ((7405, 7443), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', (['(0.001)', '(0.9)'], {}), '(0.001, 0.9)\n', (7431, 7443), True, 'import tensorflow as tf\n'), ((7633, 7693), 'tensorflow.contrib.cluster_resolver.TPUClusterResolver', 'tf.contrib.cluster_resolver.TPUClusterResolver', (['tpu_grpc_url'], {}), '(tpu_grpc_url)\n', (7679, 7693), True, 'import tensorflow as tf\n'), ((7713, 7772), 'tensorflow.contrib.tpu.python.tpu.keras_support.TPUDistributionStrategy', 'keras_support.TPUDistributionStrategy', (['tpu_cluster_resolver'], {}), '(tpu_cluster_resolver)\n', (7750, 7772), False, 'from tensorflow.contrib.tpu.python.tpu import keras_support\n'), ((7789, 7848), 'tensorflow.contrib.tpu.keras_to_tpu_model', 'tf.contrib.tpu.keras_to_tpu_model', (['model'], {'strategy': 'strategy'}), '(model, strategy=strategy)\n', (7822, 7848), True, 'import tensorflow as tf\n'), ((8236, 8268), 'pickle.dump', 'pickle.dump', (['history.history', 'fp'], {}), '(history.history, fp)\n', (8247, 8268), False, 'import pickle, glob, random, os, zipfile\n'), ((8279, 8318), 'zipfile.ZipFile', 'zipfile.ZipFile', (['"""cats_result.zip"""', '"""w"""'], {}), "('cats_result.zip', 'w')\n", (8294, 8318), False, 'import pickle, glob, random, os, zipfile\n'), ((1254, 1269), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (1265, 1269), False, 'import pickle, glob, random, os, zipfile\n'), ((3534, 3553), 'numpy.std', 'np.std', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (3540, 3553), True, 'import numpy as np\n'), ((3674, 3692), 'numpy.random.randn', 'np.random.randn', (['(3)'], {}), '(3)\n', (3689, 3692), True, 'import numpy as np\n'), ((1590, 1615), 'numpy.random.shuffle', 'np.random.shuffle', (['images'], {}), '(images)\n', (1607, 1615), True, 'import numpy as np\n'), ((1857, 1883), 'os.path.basename', 'os.path.basename', (['img_path'], {}), '(img_path)\n', (1873, 1883), False, 'import pickle, glob, random, os, zipfile\n'), ((2000, 2034), 'numpy.zeros', 'np.zeros', (['(9, 2)'], {'dtype': 'np.float32'}), '((9, 2), dtype=np.float32)\n', (2008, 2034), True, 'import numpy as np\n'), ((2178, 2207), 'numpy.clip', 'np.clip', (['annotation', '(0.0)', '(1.0)'], {}), '(annotation, 0.0, 1.0)\n', (2185, 2207), True, 'import numpy as np\n'), ((3510, 3530), 'numpy.mean', 'np.mean', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (3517, 3530), True, 'import numpy as np\n'), ((3857, 3899), 'numpy.clip', 'np.clip', (['(image_array_input + delta)', '(0)', '(255)'], {}), '(image_array_input + delta, 0, 255)\n', (3864, 3899), True, 'import numpy as np\n'), ((1673, 1693), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1683, 1693), False, 'from PIL import Image\n'), ((2931, 2951), 'numpy.ravel', 'np.ravel', (['annotation'], {}), '(annotation)\n', (2939, 2951), True, 'import numpy as np\n'), ((3107, 3144), 'numpy.asarray', 'np.asarray', (['y_cache'], {'dtype': 'np.float32'}), '(y_cache, dtype=np.float32)\n', (3117, 3144), True, 'import numpy as np\n'), ((2311, 2326), 'random.random', 'random.random', ([], {}), '()\n', (2324, 2326), False, 'import pickle, glob, random, os, zipfile\n'), ((3031, 3068), 'numpy.asarray', 'np.asarray', (['X_cache'], {'dtype': 'np.float32'}), '(X_cache, dtype=np.float32)\n', (3041, 3068), True, 'import numpy as np\n')] |
"""
MySQL4 - Simple to use pymysql wrappers. An understanding of SQL statements is preferred.
4.0 DatabaseConnection class created.
RawQuery class created.
TableQuest class created.
"""
import pymysql
import traceback
__author__ = 'CplBDJ'
__version__ = '4.0a'
username = 'root'
password = ''
class DatabaseConnection:
"""
Allows a connection to the database server.
"""
def __init__(self, host='localhost', user=None, passwd=None, port=3306):
self.host = host
self.user = user or username
self.passwd = passwd or password
self.port = port
def new(self):
return pymysql.connect(host=self.host,
user=self.user,
passwd=self.passwd,
port=self.port)
def query(self, sql:str) -> dict:
"""
Does the actual sql query. Returns a dict with keys:
'error' None or the traceback
'sql' The sql passed.
'data' The data that has been requested.
'columns' The data's column information.
"""
connection = self.new()
cursor = connection.cursor()
error = None
data = list()
columns = list()
try:
cursor.execute(sql)
data = cursor.fetchall()
columns = cursor.description
except pymysql.err.MySQLError:
error = traceback.format_exc()
finally:
connection.close()
return dict(error=error,
sql=sql,
data=data,
columns=columns)
def submit(self, sql: str) -> dict:
"""
Does the actual sql submission.
Returns a dict with keys:
'error' None or the traceback
'sql' The sql passed.
"""
connection = self.new()
cursor = connection.cursor()
error = None
try:
cursor.execute(sql)
connection.commit()
error = False
except pymysql.err.MySQLError:
connection.rollback()
error = traceback.format_exc()
finally:
connection.close()
return dict(error=error,
sql=sql)
class RawQuery:
"""
A more raw response to the SQL server. This can be used separately but it's purpose is to be subclassed.
This shows the use of TableQuery, defined elsewhere.
Allows you to use "with" statement or just call it normally.
"where" is the SQL WHERE statement.
"like" is the SQL LIKE statement.
"sort" is the SQL SORT BY statement.
The "where" and "like" parameters are structured the same. They can be used interchangablely.
>>> from pprint import pprint # For readability
You can pass the username & password to the class.
>>> with TableQuery(user='user', passwd='password') as query:
... pprint(query('Apps', 'Users', select='User', where={'User': 'nick'}))
[{'User': 'nick'}]
>>> with TableQuery(user='user', passwd='password') as query:
... pprint(query('Apps', 'Users', select='User, Name', where='User like "%k%"'))
[{'Name': 'Nick', 'User': 'nick'}, {'Name': 'Blake', 'User': 'blake'}]
You can also set the module's username and password, that way you don't have to pass them.
>>> username = 'user'
>>> password = 'password'
>>> query = TableQuery()
>>> pprint(query('Apps', 'Users'))
[{'Initals': None,
'Name': None,
'UID': 0,
'User': 'root'},
{'Initals': 'NRJ',
'Name': 'Nick',
'UID': 1,
'User': 'nick'},
{'Initals': 'TJ',
'Name': 'Tony',
'UID': 2,
'User': 'tony'},
{'Initals': 'JB',
'Name': 'Jesse',
'UID': 3,
'User': 'jesse'},
{'Initals': 'MM',
'Name': '<NAME>',
'UID': 4,
'User': 'mightymouse'},
{'Initals': 'BO',
'Name': 'Blake',
'UID': 5,
'User': 'blake'}]
>>> pprint(query('Apps', 'Users', like=[('User', '%m%'), ('User', '%n%')]))
[{'Initals': None,
'Name': None,
'UID': 0,
'User': 'root'},
{'Initals': 'NRJ',
'Name': 'Nick',
'UID': 1,
'User': 'nick'},
{'Initals': 'TJ',
'Name': 'Tony',
'UID': 2,
'User': 'tony'},
{'Initals': 'JB',
'Name': 'Jesse',
'UID': 3,
'User': 'jesse'},
{'Initals': 'MM',
'Name': '<NAME>',
'UID': 4,
'User': 'mightymouse'},
{'Initals': 'BO',
'Name': 'Blake',
'UID': 5,
'User': 'blake'}]
>>> pprint(query('Apps', 'Users', where='User="tony" or User="root"', sort='Name'))
[{'Initals': None,
'Name': None,
'UID': 0,
'User': 'root'},
{'Initals': 'TJ',
'Name': 'Tony',
'UID': 2,
'User': 'tony'}]
>>> pprint(query('Apps', 'Users', sql='WHERE User="tony" or User="root" ORDER BY `Name`'))
[{'Initals': None,
'Name': None,
'UID': 0,
'User': 'root'},
{'Initals': 'TJ',
'Name': 'Tony',
'UID': 2,
'User': 'tony'}]
# Errors can be accessed using the 'errors' method. It will only show the last query's error.
>>> pprint(query('Show', 'error'))
[]
>>> pprint(query.error)
('Traceback (most recent call last):\n'
' File "/home/nick/Scripts/python3/MySQL4/MySQL4.py", line 52, in query\n'
' cursor.execute(sql)\n'
' File '
'"/home/nick/Scripts/python3/MySQL4/lib/python3.8/site-packages/pymysql/cursors.py", '
'line 170, in execute\n'
' result = self._query(query)\n'
' File '
'"/home/nick/Scripts/python3/MySQL4/lib/python3.8/site-packages/pymysql/cursors.py", '
'line 328, in _query\n'
' conn.query(q)\n'
' File '
'"/home/nick/Scripts/python3/MySQL4/lib/python3.8/site-packages/pymysql/connections.py", '
'line 517, in query\n'
' self._affected_rows = self._read_query_result(unbuffered=unbuffered)\n'
' File '
'"/home/nick/Scripts/python3/MySQL4/lib/python3.8/site-packages/pymysql/connections.py", '
'line 732, in _read_query_result\n'
' result.read()\n'
' File '
'"/home/nick/Scripts/python3/MySQL4/lib/python3.8/site-packages/pymysql/connections.py", '
'line 1075, in read\n'
' first_packet = self.connection._read_packet()\n'
' File '
'"/home/nick/Scripts/python3/MySQL4/lib/python3.8/site-packages/pymysql/connections.py", '
'line 684, in _read_packet\n'
' packet.check_error()\n'
' File '
'"/home/nick/Scripts/python3/MySQL4/lib/python3.8/site-packages/pymysql/protocol.py", '
'line 220, in check_error\n'
' err.raise_mysql_exception(self._data)\n'
' File '
'"/home/nick/Scripts/python3/MySQL4/lib/python3.8/site-packages/pymysql/err.py", '
'line 109, in raise_mysql_exception\n'
' raise errorclass(errno, errval)\n'
'pymysql.err.ProgrammingError: (1146, "Table \'Show.error\' doesn\'t '
'exist")\n',
'SELECT * FROM `Show`.`error` None')
"""
def __init__(self, host='localhost', user=None, passwd=None, port=3306):
self._connection = DatabaseConnection(host, user, passwd, port)
self.error = None, None
def __call__(self, database: str, table: str, select:str = '*',
where: str or dict or list = None, # where x = "y"
like: str or dict or list = None, # where x like "y"
sort: str = None,
sql: str = None):
self.error = None, None
_sql = list()
if isinstance(where, dict):
spam = [f'`{key}`="{where[key]}"' for key in where]
_sql.append(f'WHERE {" and ".join(spam)}')
elif isinstance(where, list):
spam = [f'`{item[0]}`="{item[1]}"' for item in where]
elif where and isinstance(where, str):
_sql.append(f'WHERE {where}')
if where and like:
_sql.append(' and ')
if isinstance(like, dict):
spam = [f'`{key}`="{like[key]}"' for key in like]
_sql.append(f'WHERE {" and ".join(spam)}')
elif isinstance(like, list):
spam = [f'`{item[0]}`="{item[1]}"' for item in like]
elif where and isinstance(like, str):
_sql.append(f'WHERE {like}')
if sort:
_sql.append(f'ORDER BY `{sort}`')
if _sql:
sql = ' '.join(_sql)
return self._parse(self._connection.query(f'SELECT {select} FROM `{database}`.`{table}` {sql}'))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return None
def _parse(self, response):
"""
Subclasses should override this and return the data as requested.
"""
return response
class TableQuery(RawQuery):
"""
Subclasses RawQuery, check RawQuery for usage examples. Returns the table as a dict.
"""
def _parse(self, response):
"""
Overrides the parent classes method.
Returns the table data as a dict.
"""
if response['error']:
self.error = response['error'], response['sql']
return tuple()
keys = tuple(key[0] for key in response['columns'])
return [dict(zip(keys, line)) for line in response['data']]
| [
"traceback.format_exc",
"pymysql.connect"
] | [((650, 738), 'pymysql.connect', 'pymysql.connect', ([], {'host': 'self.host', 'user': 'self.user', 'passwd': 'self.passwd', 'port': 'self.port'}), '(host=self.host, user=self.user, passwd=self.passwd, port=\n self.port)\n', (665, 738), False, 'import pymysql\n'), ((1456, 1478), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1476, 1478), False, 'import traceback\n'), ((2180, 2202), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2200, 2202), False, 'import traceback\n')] |
import os
import torch
import torch.nn as nn
import torch.optim as optim
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import loggers as pl_loggers
from datetime import datetime
from argparse import ArgumentParser
from omegaconf import OmegaConf
class SimpleNet(nn.Module):
def __init__(self, ic, hc, oc):
super().__init__()
self.net = nn.Sequential(
nn.Linear(ic, hc), nn.ReLU(),
nn.Linear(hc, hc), nn.ReLU(),
nn.Linear(hc, oc)
)
def forward(self, x):
return self.net(x.reshape(-1, 28*28))
class BoilerNet(pl.LightningModule):
def __init__(self, args):
super().__init__()
self.save_hyperparameters(args)
cfg = OmegaConf.load(args.config)
self.cfg = cfg
self.model = SimpleNet(cfg.model.ic, cfg.model.hc, cfg.model.oc)
self.loss = nn.CrossEntropyLoss()
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
pred = self(x)
loss = self.loss(pred, y)
output = {"loss": loss}
self.logger.experiment.add_scalar("Training Loss", loss.item(), self.global_step)
return output
def validation_step(self, batch, batch_idx):
x, y = batch
pred = self(x)
loss = self.loss(pred, y)
acc = (pred.argmax(1) == y).sum() / float(x.shape[0])
output = {
"batch_val_loss": loss,
"batch_val_acc": acc
}
return output
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([o["batch_val_loss"] for o in outputs]).mean()
avg_acc = torch.stack([o["batch_val_acc"] for o in outputs]).mean()
output = {
"val_loss": avg_loss,
"val_acc": avg_acc
}
self.logger.experiment.add_scalar("Validation Loss", avg_loss.item(), self.global_step)
self.logger.experiment.add_scalar("Validation Acc", avg_acc.item(), self.global_step)
return output
def configure_optimizers(self):
lr = self.cfg.training.lr
beta1 = self.cfg.training.beta1
beta2 = self.cfg.training.beta2
opt = optim.Adam(self.model.parameters(), lr=lr, betas=(beta1, beta2))
return opt
def train_dataloader(self):
num_gpus = self.hparams.num_gpus
num_nodes = self.hparams.num_nodes
dist_mode = self.hparams.dist_mode
grad_acc = self.hparams.grad_acc
bs, n_workers = self.cfg.training.bs, self.cfg.training.n_workers
bs = inv_effective_bs(bs, num_gpus, num_nodes, dist_mode, grad_acc)
tf = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])
])
ds = datasets.MNIST(self.cfg.data.train_dir, train=True, download=True, transform=tf)
return DataLoader(ds, batch_size=bs, num_workers=n_workers)
def val_dataloader(self):
num_gpus = self.hparams.num_gpus
num_nodes = self.hparams.num_nodes
dist_mode = self.hparams.dist_mode
grad_acc = self.hparams.grad_acc
bs, n_workers = self.cfg.training.bs, self.cfg.training.n_workers
bs = inv_effective_bs(bs, num_gpus, num_nodes, dist_mode, grad_acc)
tf = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])
])
ds = datasets.MNIST(self.cfg.data.val_dir, train=False, download=True, transform=tf)
return DataLoader(ds, batch_size=bs, num_workers=n_workers)
def effective_bs(bs, num_gpus, num_nodes, dist_mode, grad_acc):
if dist_mode == 'dp':
eff_bs = bs
elif dist_mode == 'ddp' or dist_mode == 'horovod':
eff_bs = bs * num_gpus * num_nodes
elif dist_mode == 'ddp2':
eff_bs = bs * num_nodes
eff_bs *= grad_acc
return eff_bs
def inv_effective_bs(eff_bs, num_gpus, num_nodes, dist_mode, grad_acc):
if dist_mode == 'dp':
bs = eff_bs
elif dist_mode == 'ddp' or dist_mode == 'horovod':
bs = eff_bs // num_gpus // num_nodes
elif dist_mode == 'ddp2':
bs = eff_bs // num_nodes
bs //= grad_acc
return bs
parser = ArgumentParser()
parser.add_argument('-c', '--config', type=str, default='config.yaml', help='.yaml config file')
parser.add_argument('-g', '--num-gpus', type=int, default=2, help='gpus')
parser.add_argument('-n', '--num-nodes', type=int, default=1, help='nodes')
parser.add_argument('-d', '--dist-mode', type=str, default='ddp', help='distributed modes')
parser.add_argument('-a', '--grad-acc', type=int, default=1, help='accumulated gradients')
parser.add_argument('-m', '--model-path-ckpt', type=str, help='model checkpoint path')
parser.add_argument('-r', '--resume-path-ckpt', type=str, default=None, help='resume training checkpoint path')
parser.add_argument('-e', '--experiment-name', type=str, default=None, help='experiment name')
parser.add_argument('-t', '--top-k-save', type=int, default=5, help='save top k')
parser.add_argument('-f', '--fast-dev-run', action='store_true', help='perform fast dev run')
args = parser.parse_args()
model = BoilerNet(args)
cfg = OmegaConf.load(args.config)
experiment_name = args.experiment_name
if experiment_name is None:
experiment_name = datetime.now().strftime("%m%d%Y-%H:%M:%S")
ckpt_pth = os.path.join(args.model_path_ckpt, experiment_name)
log_dir = os.path.join(cfg.logging.log_dir, experiment_name)
os.makedirs(ckpt_pth, exist_ok=True)
ckpt_callback = ModelCheckpoint(
filepath=ckpt_pth,
monitor='val_loss',
verbose=True,
save_top_k=args.top_k_save
)
trainer = Trainer(
logger=pl_loggers.TensorBoardLogger(log_dir),
checkpoint_callback=ckpt_callback,
weights_save_path=ckpt_pth,
gpus=args.num_gpus,
accumulate_grad_batches=args.grad_acc,
distributed_backend='ddp',
resume_from_checkpoint=args.resume_path_ckpt,
gradient_clip_val=cfg.training.gradient_clip,
fast_dev_run=args.fast_dev_run,
max_epochs=cfg.training.epoch_num
)
trainer.fit(model) | [
"pytorch_lightning.callbacks.ModelCheckpoint",
"torch.nn.ReLU",
"torch.nn.CrossEntropyLoss",
"os.makedirs",
"argparse.ArgumentParser",
"torch.stack",
"os.path.join",
"omegaconf.OmegaConf.load",
"pytorch_lightning.loggers.TensorBoardLogger",
"datetime.datetime.now",
"torchvision.datasets.MNIST",
"torch.utils.data.DataLoader",
"torch.nn.Linear",
"torchvision.transforms.Normalize",
"torchvision.transforms.ToTensor"
] | [((4518, 4534), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (4532, 4534), False, 'from argparse import ArgumentParser\n'), ((5495, 5522), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['args.config'], {}), '(args.config)\n', (5509, 5522), False, 'from omegaconf import OmegaConf\n'), ((5668, 5719), 'os.path.join', 'os.path.join', (['args.model_path_ckpt', 'experiment_name'], {}), '(args.model_path_ckpt, experiment_name)\n', (5680, 5719), False, 'import os\n'), ((5730, 5780), 'os.path.join', 'os.path.join', (['cfg.logging.log_dir', 'experiment_name'], {}), '(cfg.logging.log_dir, experiment_name)\n', (5742, 5780), False, 'import os\n'), ((5782, 5818), 'os.makedirs', 'os.makedirs', (['ckpt_pth'], {'exist_ok': '(True)'}), '(ckpt_pth, exist_ok=True)\n', (5793, 5818), False, 'import os\n'), ((5835, 5935), 'pytorch_lightning.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'ckpt_pth', 'monitor': '"""val_loss"""', 'verbose': '(True)', 'save_top_k': 'args.top_k_save'}), "(filepath=ckpt_pth, monitor='val_loss', verbose=True,\n save_top_k=args.top_k_save)\n", (5850, 5935), False, 'from pytorch_lightning.callbacks import ModelCheckpoint\n'), ((921, 948), 'omegaconf.OmegaConf.load', 'OmegaConf.load', (['args.config'], {}), '(args.config)\n', (935, 948), False, 'from omegaconf import OmegaConf\n'), ((1074, 1095), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1093, 1095), True, 'import torch.nn as nn\n'), ((3077, 3162), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['self.cfg.data.train_dir'], {'train': '(True)', 'download': '(True)', 'transform': 'tf'}), '(self.cfg.data.train_dir, train=True, download=True, transform=tf\n )\n', (3091, 3162), False, 'from torchvision import datasets, transforms\n'), ((3173, 3225), 'torch.utils.data.DataLoader', 'DataLoader', (['ds'], {'batch_size': 'bs', 'num_workers': 'n_workers'}), '(ds, batch_size=bs, num_workers=n_workers)\n', (3183, 3225), False, 'from torch.utils.data import DataLoader\n'), ((3720, 3799), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['self.cfg.data.val_dir'], {'train': '(False)', 'download': '(True)', 'transform': 'tf'}), '(self.cfg.data.val_dir, train=False, download=True, transform=tf)\n', (3734, 3799), False, 'from torchvision import datasets, transforms\n'), ((3815, 3867), 'torch.utils.data.DataLoader', 'DataLoader', (['ds'], {'batch_size': 'bs', 'num_workers': 'n_workers'}), '(ds, batch_size=bs, num_workers=n_workers)\n', (3825, 3867), False, 'from torch.utils.data import DataLoader\n'), ((5981, 6018), 'pytorch_lightning.loggers.TensorBoardLogger', 'pl_loggers.TensorBoardLogger', (['log_dir'], {}), '(log_dir)\n', (6009, 6018), True, 'from pytorch_lightning import loggers as pl_loggers\n'), ((573, 590), 'torch.nn.Linear', 'nn.Linear', (['ic', 'hc'], {}), '(ic, hc)\n', (582, 590), True, 'import torch.nn as nn\n'), ((592, 601), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (599, 601), True, 'import torch.nn as nn\n'), ((615, 632), 'torch.nn.Linear', 'nn.Linear', (['hc', 'hc'], {}), '(hc, hc)\n', (624, 632), True, 'import torch.nn as nn\n'), ((634, 643), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (641, 643), True, 'import torch.nn as nn\n'), ((657, 674), 'torch.nn.Linear', 'nn.Linear', (['hc', 'oc'], {}), '(hc, oc)\n', (666, 674), True, 'import torch.nn as nn\n'), ((5613, 5627), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5625, 5627), False, 'from datetime import datetime\n'), ((1851, 1902), 'torch.stack', 'torch.stack', (["[o['batch_val_loss'] for o in outputs]"], {}), "([o['batch_val_loss'] for o in outputs])\n", (1862, 1902), False, 'import torch\n'), ((1928, 1978), 'torch.stack', 'torch.stack', (["[o['batch_val_acc'] for o in outputs]"], {}), "([o['batch_val_acc'] for o in outputs])\n", (1939, 1978), False, 'import torch\n'), ((2983, 3004), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3002, 3004), False, 'from torchvision import datasets, transforms\n'), ((3018, 3052), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.5]', '[0.5]'], {}), '([0.5], [0.5])\n', (3038, 3052), False, 'from torchvision import datasets, transforms\n'), ((3626, 3647), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3645, 3647), False, 'from torchvision import datasets, transforms\n'), ((3661, 3695), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.5]', '[0.5]'], {}), '([0.5], [0.5])\n', (3681, 3695), False, 'from torchvision import datasets, transforms\n')] |
import os
from merfi.collector import RepoCollector
import merfi
from merfi import logger, util
from merfi.backends import base
class Gpg(base.BaseBackend):
help_menu = 'gpg handler for signing files'
_help = """
Signs files with gpg. Crawls a given path looking for Debian repos.
Default behavior will perform these actions on 'Release' files:
gpg --armor --detach-sig --output Release.gpg Release
gpg --clearsign --output InRelease Release
%s
Options:
Positional Arguments:
[path] The path to crawl for signing repos. Defaults to current
working directory
"""
executable = 'gpg'
name = 'gpg'
def sign(self):
logger.info('Starting path collection, looking for files to sign')
repos = RepoCollector(self.path)
if repos:
logger.info('%s repos found' % len(repos))
else:
logger.warning('No paths found that matched')
for repo in repos:
# Debian "Release" files:
for path in repo.releases:
self.sign_release(path)
def sign_release(self, path):
""" Sign a "Release" file from a Debian repo. """
if merfi.config.get('check'):
new_gpg_path = path.split('Release')[0]+'Release.gpg'
new_in_path = path.split('Release')[0]+'InRelease'
logger.info('[CHECKMODE] signing: %s' % path)
logger.info('[CHECKMODE] signed: %s' % new_gpg_path)
logger.info('[CHECKMODE] signed: %s' % new_in_path)
else:
os.chdir(os.path.dirname(path))
detached = ['gpg', '--batch', '--yes', '--armor', '--detach-sig',
'--output', 'Release.gpg', 'Release']
clearsign = ['gpg', '--batch', '--yes', '--clearsign', '--output',
'InRelease', 'Release']
logger.info('signing: %s' % path)
util.run(detached)
util.run(clearsign)
| [
"merfi.logger.warning",
"merfi.collector.RepoCollector",
"merfi.config.get",
"merfi.logger.info",
"os.path.dirname",
"merfi.util.run"
] | [((676, 742), 'merfi.logger.info', 'logger.info', (['"""Starting path collection, looking for files to sign"""'], {}), "('Starting path collection, looking for files to sign')\n", (687, 742), False, 'from merfi import logger, util\n'), ((759, 783), 'merfi.collector.RepoCollector', 'RepoCollector', (['self.path'], {}), '(self.path)\n', (772, 783), False, 'from merfi.collector import RepoCollector\n'), ((1180, 1205), 'merfi.config.get', 'merfi.config.get', (['"""check"""'], {}), "('check')\n", (1196, 1205), False, 'import merfi\n'), ((884, 929), 'merfi.logger.warning', 'logger.warning', (['"""No paths found that matched"""'], {}), "('No paths found that matched')\n", (898, 929), False, 'from merfi import logger, util\n'), ((1348, 1393), 'merfi.logger.info', 'logger.info', (["('[CHECKMODE] signing: %s' % path)"], {}), "('[CHECKMODE] signing: %s' % path)\n", (1359, 1393), False, 'from merfi import logger, util\n'), ((1406, 1458), 'merfi.logger.info', 'logger.info', (["('[CHECKMODE] signed: %s' % new_gpg_path)"], {}), "('[CHECKMODE] signed: %s' % new_gpg_path)\n", (1417, 1458), False, 'from merfi import logger, util\n'), ((1471, 1522), 'merfi.logger.info', 'logger.info', (["('[CHECKMODE] signed: %s' % new_in_path)"], {}), "('[CHECKMODE] signed: %s' % new_in_path)\n", (1482, 1522), False, 'from merfi import logger, util\n'), ((1861, 1894), 'merfi.logger.info', 'logger.info', (["('signing: %s' % path)"], {}), "('signing: %s' % path)\n", (1872, 1894), False, 'from merfi import logger, util\n'), ((1907, 1925), 'merfi.util.run', 'util.run', (['detached'], {}), '(detached)\n', (1915, 1925), False, 'from merfi import logger, util\n'), ((1938, 1957), 'merfi.util.run', 'util.run', (['clearsign'], {}), '(clearsign)\n', (1946, 1957), False, 'from merfi import logger, util\n'), ((1558, 1579), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (1573, 1579), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""cmdln_args.py: Module to parse command line arguments in creating sample or
validation data
"""
import argparse
import os
from ..util import ext_to_delimiter
from .._version import __version__
__author__ = "<NAME>"
def get_create_sample():
"""Get the passed command line arguments
:return: a dictionary of command line arguments
+------------------+------------------------------------------------------+
| Key | Value |
+==================+======================================================+
| num_samples | (int, positive) The number of blocks/trajectories |
| | replications to compute the statistics of the |
| | elementary effects |
+------------------+------------------------------------------------------+
| num_dimensions | (int, positive) The number of dimensions/parameters |
+------------------+------------------------------------------------------+
| method | ("srs", "lhs", "sobol", "lhs-opt") Sampling scheme to|
| | the design of experiments. By default "srs" is chosen|
+------------------+------------------------------------------------------+
| filename | (None or str) The output filename. |
| | By default: "{}_{}_{}.{}" .format(method, |
| | num_samples, num_dimensions, delimiter) |
+------------------+------------------------------------------------------+
| delimiter | ("csv", "tsv", "txt") the delimiter of the design |
| | matrix file. By default: "csv" |
+------------------+------------------------------------------------------+
| seed_number | (None or int, >0) The random seed number (irrelevant |
| | for non-randomized Sobol' sequence) |
+------------------+------------------------------------------------------+
| direction_numbers| (str) the fullname (file+path) to the directions |
| | numbers file for Joe & Kuo Sobol' generator algorithm|
| | By default: "./dirnumfiles/new-joe-kuo-6.21201" |
+------------------+------------------------------------------------------+
| exclude_nominal | (bool) Flag whether to include or exclude the {0.5} |
| | parameter values from the design. By default: False |
+------------------+------------------------------------------------------+
| randomize_sobol | (bool) Flag whether to random shift the Sobol' |
| | sequence. By default: False |
+------------------+------------------------------------------------------+
| num_iterations | (100 or int, >0) the maximum number of outer |
| | iterations for optimizing the latin hypercube design |
+------------------+------------------------------------------------------+
"""
from .sobol import read_dirnumfile
parser = argparse.ArgumentParser(
description="gsa-module create_sample - Generate Design Matrix File"
)
# The number of samples
parser.add_argument(
"-n", "--num_samples",
type=int,
help="The number of samples",
required=True
)
# The number of dimension
parser.add_argument(
"-d", "--num_dimensions",
type=int,
help="The number of dimensions",
required=True
)
# The method to generate sample
parser.add_argument(
"-m", "--method",
type=str,
choices=["srs", "lhs", "sobol", "lhs-opt"],
required=False,
default="srs",
help="The statistical method to generate sample (default: %(default)s)"
)
# The random seed number
parser.add_argument(
"-s", "--seed_number",
type=int,
required=False,
help="The random seed number (irrelevant for non-randomized Sobol'"
" sequence)"
)
# the design matrix filename
parser.add_argument(
"-o", "--output_file",
type=str,
required=False,
help="The output filename"
)
# The delimiter
parser.add_argument(
"-sep", "--delimiter",
type=str,
choices=["csv", "tsv", "txt"],
required=False,
default="csv",
help="the delimiter for the file (default: %(default)s)"
)
# Print the version
parser.add_argument(
"-V", "--version",
action="version",
version="%(prog)s (gsa-module version {})" .format(__version__)
)
# Only for Sobol'
group_sobol = parser.add_argument_group("Sobol'",
"Options for Sobol' quasi-random")
# The path to Sobol' direction number file
group_sobol.add_argument(
"-dirnumfile", "--direction_numbers",
type=str,
required=False,
help="The path to Sobol' sequence generator direction numbers file"
" (default: built-in new-joe-kuo-6.21201)"
)
# Flag to include the nominal point in the design
group_sobol.add_argument(
"-excl_nom", "--exclude_nominal",
action="store_true",
required=False,
help="Exclude the nominal point in the design"
)
# Flag to randomize the sequence by random-shifting
group_sobol.add_argument(
"-rand", "--randomize_sobol",
action="store_true",
required=False,
help="Random shift the Sobol' sequence"
)
# Only for optimized lhs
group_lhs_opt = parser.add_argument_group("Optimized LHS",
"Options for optimized lhs")
# The number of iteration for optimization algorithm
group_lhs_opt.add_argument(
"-nopt", "--num_iterations",
type=int,
required=False,
default=100,
help="The maximum number of iterations for optimization of LHS"
" (default: 100 iterations)"
)
# Get the command line arguments
args = parser.parse_args()
# Check the validity of number of samples
if args.num_samples <= 0:
raise ValueError("Zero or negative number of samples")
# Check the validity of the number of dimensions
if args.num_dimensions <= 0:
raise ValueError("Zero or negative number of dimensions")
# Check the validity of inputs if Sobol' sequence is used
if args.method == "sobol":
if args.direction_numbers is not None:
if os.path.exists(args.direction_numbers):
direction_numbers = read_dirnumfile(args.direction_numbers,
args.num_dimensions)
else:
raise ValueError(
"Sobol' generator direction number file does not exist!")
else:
direction_numbers = None
else:
direction_numbers = None
# Check the delimiter
delimiter = ext_to_delimiter(args.delimiter)
# Check the random seed number
if args.seed_number is None:
seed_number = None
elif args.seed_number < 0:
raise ValueError
else:
seed_number = args.seed_number
# Create default filename if not passed
if args.output_file is None:
output_file = "{}_{}_{}.{}" .format(args.method, args.num_samples,
args.num_dimensions,
args.delimiter)
else:
extension = args.output_file.split("/")[-1].split(".")[-1]
# Override the delimiter if it is assigned directly as an extension
if extension in ["csv", "tsv", "txt"]:
delimiter = ext_to_delimiter(extension)
else:
delimiter = ext_to_delimiter(args.delimiter)
output_file = args.output_file
# Set default value for the number of iterations if opt-lhs is selected
if args.num_iterations is not None:
if args.num_iterations < 0:
raise ValueError("Number of iterations must be greater than zero!")
# Return the parsed command line arguments as a dictionary
inputs = {"num_samples": args.num_samples,
"num_dimensions": args.num_dimensions,
"method": args.method,
"filename": output_file,
"delimiter": delimiter,
"seed_number": seed_number,
"direction_numbers": direction_numbers,
"exclude_nominal": args.exclude_nominal,
"randomize_sobol": args.randomize_sobol,
"num_iterations": args.num_iterations
}
return inputs
def get_create_validset():
"""Get the passed command line arguments for creating sample"""
import argparse
parser = argparse.ArgumentParser(
description="gsa-module create_validset - Generate Validation Data Set"
)
# The design matrix fullname
parser.add_argument(
"-dm", "--dm_fullname",
type=str,
required=True,
help="The design matrix fullname (file + path)",
)
# The number of test points
parser.add_argument(
"-n", "--num_tests",
type=int,
required=True,
help="The number of test points to be generated"
)
# The validation data set filename
parser.add_argument(
"-o", "--output_file",
type=str,
required=False,
help="The output filename"
)
# The number of candidates
parser.add_argument(
"-nc", "--num_candidates",
type=int,
required=False,
default=10000,
help="The number of candidates from the Hammersley sequence"
)
# Get the command line arguments
args = parser.parse_args()
# Check the existence of the design matrix file
# Check the validity of number of test points
if args.num_tests <= 0:
raise ValueError
# Check the validity of number of candidates
if args.num_candidates <= 0:
raise ValueError
# Determine the delimiter inside the file
delimiter = args.dm_fullname.split("/")[-1].split(".")[-1]
if delimiter == "csv":
str_delimiter = ","
elif delimiter == "tsv":
str_delimiter = "\t"
else:
str_delimiter = " "
# Create default filename if not passed
if args.output_file is None:
dm_name = args.dm_fullname.split("/")[-1]
output_file = "{}_test_{}.{}" .format(dm_name.split(".")[0],
args.num_tests, delimiter)
else:
output_file = args.output_file
# Return the parsed command line arguments as a dictionary
inputs = {"dm_fullname": args.dm_fullname,
"num_tests": args.num_tests,
"num_candidates": args.num_candidates,
"filename": output_file,
"str_delimiter": str_delimiter
}
return inputs
| [
"os.path.exists",
"argparse.ArgumentParser"
] | [((3236, 3334), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""gsa-module create_sample - Generate Design Matrix File"""'}), "(description=\n 'gsa-module create_sample - Generate Design Matrix File')\n", (3259, 3334), False, 'import argparse\n'), ((9018, 9119), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""gsa-module create_validset - Generate Validation Data Set"""'}), "(description=\n 'gsa-module create_validset - Generate Validation Data Set')\n", (9041, 9119), False, 'import argparse\n'), ((6753, 6791), 'os.path.exists', 'os.path.exists', (['args.direction_numbers'], {}), '(args.direction_numbers)\n', (6767, 6791), False, 'import os\n')] |
from graphql_auth.schema import MeQuery
import graphene
import graphene.relay as relay
from .types import AllowAuthenticatedALUserType, AllowSelfALUserType
from ..utils import AllowAdministratorFilter
class UserQueries(MeQuery, graphene.ObjectType):
users = AllowAdministratorFilter(AllowAuthenticatedALUserType)
user = relay.Node.Field(AllowAuthenticatedALUserType)
me = graphene.Field(AllowSelfALUserType)
def resolve_me(root, info):
user = info.context.user
if user.is_authenticated:
return user
return None
| [
"graphene.Field",
"graphene.relay.Node.Field"
] | [((332, 378), 'graphene.relay.Node.Field', 'relay.Node.Field', (['AllowAuthenticatedALUserType'], {}), '(AllowAuthenticatedALUserType)\n', (348, 378), True, 'import graphene.relay as relay\n'), ((388, 423), 'graphene.Field', 'graphene.Field', (['AllowSelfALUserType'], {}), '(AllowSelfALUserType)\n', (402, 423), False, 'import graphene\n')] |
from ba_snake_js.envs.planar_snake_car.planar_snake_car_base import PlanarBase
from gym import spaces
import numpy as np
from utils.vrep_env import vrep
class PlanarLocomotion(PlanarBase):
# def __init__(self, server_addr='127.0.0.1', server_port=19997, scene_name='acmr_locomotion.ttt'):
# TODO change back?
def __init__(self, server_addr='127.0.0.1', server_port=19997, scene_name='2018-08-10-planar-locomotion2.ttt'):
super().__init__(server_addr=server_addr, server_port=server_port, scene_name=scene_name)
self.action_space = spaces.Box(low=-100, high=100, shape=(8,), dtype=np.float32)
def _make_action(self, action):
"""Query V-REP to make action
no return value
"""
# print(f'action: {action}')
assert isinstance(action, list) or isinstance(action, np.ndarray)
_, self.joint_positions, _, _ = self.RAPI_rc(
vrep.simxCallScriptFunction(self.cID, self.name_snake, 1, 'locomote', [], action, [],
bytearray(), vrep.simx_opmode_blocking))
# print(f'joint pos: {self.joint_positions}')
| [
"gym.spaces.Box"
] | [((561, 621), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-100)', 'high': '(100)', 'shape': '(8,)', 'dtype': 'np.float32'}), '(low=-100, high=100, shape=(8,), dtype=np.float32)\n', (571, 621), False, 'from gym import spaces\n')] |
import requests
def casesapi():
url = "https://moroccostats.herokuapp.com/stats/coronavirus/countries/morocco"
my_json = requests.get(url).json()
return (" عدد الحالات " + my_json['totalcases'] + " ماتو" + my_json[
'totaldeaths'] + " الله يرحمهم " + " وتشافاو الحمد لله" + my_json['recovered'])
# print(casesapi())
"""
def casesperregion(city):
url = "https://moroccostats.herokuapp.com/stats/coronavirus/countries/morocco/regions"
response = requests.request("GET", url)
my_json=json.loads(response.text)
print("CasaSettat: ", my_json['CasaSettat'])
print("RabatSalKenitra: ", my_json['RabatSalKenitra'])
print("MarrakechSafi: ", my_json['MarrakechSafi'])
print("Fsmeknes: ", my_json['Fsmeknes'])
print("TangerTetouanAlHoceima: ", my_json['TangerTetouanAlHoceima'])
print("BeniMellalKhnifra: ", my_json['BeniMellalKhnifra'])
print("Oriental: ", my_json['Oriental'])
print("SoussMassa: ", my_json['SoussMassa'])
print("DakhlaOuedEdDahab: ", my_json['DakhlaOuedEdDahab'])
print("GuelmimOuedNoun: ", my_json['GuelmimOuedNoun'])
print("LayouneSakiaElHamra: ", my_json['LayouneSakiaElHamra'])
print("Daraatafilalet: ", my_json['Daraatafilalet'])
casesperregion()
"""
| [
"requests.get"
] | [((131, 148), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (143, 148), False, 'import requests\n')] |
import ipaddress
from django.core.management import BaseCommand, CommandError
from django.db import transaction
from ralph.data_center.models import DataCenter, Rack, ServerRoom
from ralph.dhcp.models import DNSServer, DNSServerGroup, DNSServerGroupOrder
from ralph.networks.models import IPAddress, Network, NetworkEnvironment
class Command(BaseCommand):
"""
Generate a single, production ready network
"""
def handle(self, *args, **options):
dc_name = options.get('dc_name')
create_rack = options.get('create_rack')
server_room_name = options.get('server_room_name')
try:
dns1_address = ipaddress.ip_address(options.get('dns1'))
dns2_address = ipaddress.ip_address(options.get('dns2'))
network_address = ipaddress.ip_address(
options.get('network_address')
)
network = ipaddress.ip_network(
'{}/{}'.format(
str(network_address),
options.get('network_mask')
)
)
gateway_address = ipaddress.ip_address(options.get('gateway'))
except ValueError as e:
raise CommandError(e)
self.create_network(
network=network,
dns1_address=dns1_address,
dns2_address=dns2_address,
gateway_address=gateway_address,
dc_name=dc_name,
server_room_name=server_room_name,
create_rack=create_rack,
)
def add_arguments(self, parser):
parser.add_argument(
'-d', '--dc-name',
default='dc1',
dest='dc_name',
help='Data center name.'
)
parser.add_argument(
'--dns1',
default='10.0.0.11',
dest='dns1',
help='Primary DNS server.'
)
parser.add_argument(
'--dns2',
default='10.0.0.12',
dest='dns2',
help='Secondary DNS server.'
)
parser.add_argument(
'--network-address',
default='10.0.0.0',
dest='network_address',
help='Network address.'
)
parser.add_argument(
'--network-mask',
default='24',
dest='network_mask',
help='Network mask.'
)
parser.add_argument(
'--gateway',
default='10.0.0.1',
dest='gateway',
help='Default gateway.'
)
parser.add_argument(
'--server-room-name',
default="server room",
dest='server_room_name',
help='Server room name.'
)
parser.add_argument(
'--create-rack',
action='store_true',
help='Create rack for which the subnet will be used.'
)
@classmethod
@transaction.atomic
def create_network(
cls, network, dns1_address, dns2_address, gateway_address, dc_name,
server_room_name, create_rack=False
):
data_center, _ = DataCenter.objects.get_or_create(name=dc_name)
network_environment, _ = NetworkEnvironment.objects.get_or_create(
name='prod', data_center=data_center
)
server_room, _ = ServerRoom.objects.get_or_create(
data_center=data_center, name=server_room_name
)
rack = None
if create_rack:
rack = Rack.objects.create(
server_room=server_room, name="Rack {}".format(network)
)
IPAddress.objects.get_or_create(address=str(dns1_address))
IPAddress.objects.get_or_create(address=str(dns2_address))
dns1, _ = DNSServer.objects.get_or_create(ip_address=str(dns1_address))
dns2, _ = DNSServer.objects.get_or_create(ip_address=str(dns2_address))
dns_server_group, _ = DNSServerGroup.objects.get_or_create(
name='{}-dns-group'.format(dc_name)
)
dns_order = 10
for dns in [dns1, dns2]:
DNSServerGroupOrder.objects.get_or_create(
dns_server=dns, dns_server_group=dns_server_group,
order=dns_order
)
dns_order += 10
gateway_address, _ = IPAddress.objects.get_or_create(
address=str(gateway_address)
)
network, _ = Network.objects.get_or_create(
name=str(network),
address=str(network),
gateway=gateway_address,
network_environment=network_environment,
dns_servers_group=dns_server_group
)
if rack:
network.racks.add(rack)
| [
"ralph.dhcp.models.DNSServerGroupOrder.objects.get_or_create",
"ralph.data_center.models.ServerRoom.objects.get_or_create",
"ralph.networks.models.NetworkEnvironment.objects.get_or_create",
"django.core.management.CommandError",
"ralph.data_center.models.DataCenter.objects.get_or_create"
] | [((3104, 3150), 'ralph.data_center.models.DataCenter.objects.get_or_create', 'DataCenter.objects.get_or_create', ([], {'name': 'dc_name'}), '(name=dc_name)\n', (3136, 3150), False, 'from ralph.data_center.models import DataCenter, Rack, ServerRoom\n'), ((3184, 3262), 'ralph.networks.models.NetworkEnvironment.objects.get_or_create', 'NetworkEnvironment.objects.get_or_create', ([], {'name': '"""prod"""', 'data_center': 'data_center'}), "(name='prod', data_center=data_center)\n", (3224, 3262), False, 'from ralph.networks.models import IPAddress, Network, NetworkEnvironment\n'), ((3310, 3395), 'ralph.data_center.models.ServerRoom.objects.get_or_create', 'ServerRoom.objects.get_or_create', ([], {'data_center': 'data_center', 'name': 'server_room_name'}), '(data_center=data_center, name=server_room_name\n )\n', (3342, 3395), False, 'from ralph.data_center.models import DataCenter, Rack, ServerRoom\n'), ((4071, 4185), 'ralph.dhcp.models.DNSServerGroupOrder.objects.get_or_create', 'DNSServerGroupOrder.objects.get_or_create', ([], {'dns_server': 'dns', 'dns_server_group': 'dns_server_group', 'order': 'dns_order'}), '(dns_server=dns, dns_server_group=\n dns_server_group, order=dns_order)\n', (4112, 4185), False, 'from ralph.dhcp.models import DNSServer, DNSServerGroup, DNSServerGroupOrder\n'), ((1201, 1216), 'django.core.management.CommandError', 'CommandError', (['e'], {}), '(e)\n', (1213, 1216), False, 'from django.core.management import BaseCommand, CommandError\n')] |
import requests
import shutil
# Replace "coco" with your model name, and replace the link with you've desired
payload = {'modelname': 'coco'}
link = "https://c2.staticflickr.com/6/5093/5389312711_08e67fa19b_b.jpg"
def download_file(url):
local_filename = url.split('/')[-1]
r = requests.get(url, stream=True)
with open(local_filename, 'wb') as f:
shutil.copyfileobj(r.raw, f)
return local_filename
files = {'file': open(download_file(link), 'rb')}
r = requests.post('http://0.0.0.0:5000/detect', data=payload, files= files)
print(r.text)
# The output will be something like this:
# [{"confidence": 0.9928387999534607, "class": 53, "bounding_box": [0.11660429835319519, 0.5523781776428223, 0.5901567935943604, 0.8993778228759766], "label": "apple"}, {"confidence": 0.9891212582588196, "class": 55, "bounding_box": [0.07073500752449036, 0.18506604433059692, 0.5849615335464478, 0.5593685507774353], "label": "orange"}]
| [
"requests.post",
"shutil.copyfileobj",
"requests.get"
] | [((484, 554), 'requests.post', 'requests.post', (['"""http://0.0.0.0:5000/detect"""'], {'data': 'payload', 'files': 'files'}), "('http://0.0.0.0:5000/detect', data=payload, files=files)\n", (497, 554), False, 'import requests\n'), ((291, 321), 'requests.get', 'requests.get', (['url'], {'stream': '(True)'}), '(url, stream=True)\n', (303, 321), False, 'import requests\n'), ((372, 400), 'shutil.copyfileobj', 'shutil.copyfileobj', (['r.raw', 'f'], {}), '(r.raw, f)\n', (390, 400), False, 'import shutil\n')] |
# coding: utf-8
from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class Address(Base):
__tablename__ = 'address'
id = Column(UUID, primary_key=True)
street = Column(String(200), nullable=False)
interior_number = Column(String(200), nullable=False)
outside_number = Column(String(200), nullable=False)
zip_code = Column(String(200), nullable=False)
city = Column(String(200), nullable=False)
borough = Column(String(200), nullable=False)
state = Column(String(200), nullable=False)
country = Column(String(200), nullable=False)
class AppUser(Base):
__tablename__ = 'app_user'
password = Column(String(128), nullable=False)
last_login = Column(DateTime(True))
is_superuser = Column(Boolean, nullable=False)
username = Column(String(150), nullable=False, unique=True)
first_name = Column(String(30), nullable=False)
last_name = Column(String(150), nullable=False)
email = Column(String(254), nullable=False)
is_staff = Column(Boolean, nullable=False)
is_active = Column(Boolean, nullable=False)
date_joined = Column(DateTime(True), nullable=False)
id = Column(UUID, primary_key=True)
class AuthGroup(Base):
__tablename__ = 'auth_group'
id = Column(Integer, primary_key=True, server_default=text("nextval('auth_group_id_seq'::regclass)"))
name = Column(String(150), nullable=False, unique=True)
class DjangoCeleryBeatClockedschedule(Base):
__tablename__ = 'django_celery_beat_clockedschedule'
id = Column(Integer, primary_key=True, server_default=text("nextval('django_celery_beat_clockedschedule_id_seq'::regclass)"))
clocked_time = Column(DateTime(True), nullable=False)
enabled = Column(Boolean, nullable=False)
class DjangoCeleryBeatCrontabschedule(Base):
__tablename__ = 'django_celery_beat_crontabschedule'
id = Column(Integer, primary_key=True, server_default=text("nextval('django_celery_beat_crontabschedule_id_seq'::regclass)"))
minute = Column(String(240), nullable=False)
hour = Column(String(96), nullable=False)
day_of_week = Column(String(64), nullable=False)
day_of_month = Column(String(124), nullable=False)
month_of_year = Column(String(64), nullable=False)
timezone = Column(String(63), nullable=False)
class DjangoCeleryBeatIntervalschedule(Base):
__tablename__ = 'django_celery_beat_intervalschedule'
id = Column(Integer, primary_key=True, server_default=text("nextval('django_celery_beat_intervalschedule_id_seq'::regclass)"))
every = Column(Integer, nullable=False)
period = Column(String(24), nullable=False)
class DjangoCeleryBeatPeriodictask(Base):
__tablename__ = 'django_celery_beat_periodictasks'
ident = Column(SmallInteger, primary_key=True)
last_update = Column(DateTime(True), nullable=False)
class DjangoCeleryBeatSolarschedule(Base):
__tablename__ = 'django_celery_beat_solarschedule'
__table_args__ = (
UniqueConstraint('event', 'latitude', 'longitude'),
)
id = Column(Integer, primary_key=True, server_default=text("nextval('django_celery_beat_solarschedule_id_seq'::regclass)"))
event = Column(String(24), nullable=False)
latitude = Column(Numeric(9, 6), nullable=False)
longitude = Column(Numeric(9, 6), nullable=False)
class DjangoContentType(Base):
__tablename__ = 'django_content_type'
__table_args__ = (
UniqueConstraint('app_label', 'model'),
)
id = Column(Integer, primary_key=True, server_default=text("nextval('django_content_type_id_seq'::regclass)"))
app_label = Column(String(100), nullable=False)
model = Column(String(100), nullable=False)
class DjangoMigration(Base):
__tablename__ = 'django_migrations'
id = Column(Integer, primary_key=True, server_default=text("nextval('django_migrations_id_seq'::regclass)"))
app = Column(String(255), nullable=False)
name = Column(String(255), nullable=False)
applied = Column(DateTime(True), nullable=False)
class DjangoSession(Base):
__tablename__ = 'django_session'
session_key = Column(String(40), primary_key=True, index=True)
session_data = Column(Text, nullable=False)
expire_date = Column(DateTime(True), nullable=False, index=True)
class DjangoSite(Base):
__tablename__ = 'django_site'
id = Column(Integer, primary_key=True, server_default=text("nextval('django_site_id_seq'::regclass)"))
domain = Column(String(100), nullable=False, unique=True)
name = Column(String(50), nullable=False)
class Phone(Base):
__tablename__ = 'phone'
id = Column(UUID, primary_key=True)
number = Column(String(200), nullable=False)
extension = Column(String(200), nullable=False)
class SocialaccountSocialapp(Base):
__tablename__ = 'socialaccount_socialapp'
id = Column(Integer, primary_key=True, server_default=text("nextval('socialaccount_socialapp_id_seq'::regclass)"))
provider = Column(String(30), nullable=False)
name = Column(String(40), nullable=False)
client_id = Column(String(191), nullable=False)
secret = Column(String(191), nullable=False)
key = Column(String(191), nullable=False)
class AccountEmailaddres(Base):
__tablename__ = 'account_emailaddress'
id = Column(Integer, primary_key=True, server_default=text("nextval('account_emailaddress_id_seq'::regclass)"))
email = Column(String(254), nullable=False, unique=True)
verified = Column(Boolean, nullable=False)
primary = Column(Boolean, nullable=False)
user_id = Column(ForeignKey('app_user.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
user = relationship('AppUser')
class AppUserGroup(Base):
__tablename__ = 'app_user_groups'
__table_args__ = (
UniqueConstraint('user_id', 'group_id'),
)
id = Column(Integer, primary_key=True, server_default=text("nextval('app_user_groups_id_seq'::regclass)"))
user_id = Column(ForeignKey('app_user.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
group_id = Column(ForeignKey('auth_group.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
group = relationship('AuthGroup')
user = relationship('AppUser')
class AuthPermission(Base):
__tablename__ = 'auth_permission'
__table_args__ = (
UniqueConstraint('content_type_id', 'codename'),
)
id = Column(Integer, primary_key=True, server_default=text("nextval('auth_permission_id_seq'::regclass)"))
name = Column(String(255), nullable=False)
content_type_id = Column(ForeignKey('django_content_type.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
codename = Column(String(100), nullable=False)
content_type = relationship('DjangoContentType')
class AuthtokenToken(Base):
__tablename__ = 'authtoken_token'
key = Column(String(40), primary_key=True, index=True)
created = Column(DateTime(True), nullable=False)
user_id = Column(ForeignKey('app_user.id', deferrable=True, initially='DEFERRED'), nullable=False, unique=True)
user = relationship('AppUser', uselist=False)
class DjangoAdminLog(Base):
__tablename__ = 'django_admin_log'
__table_args__ = (
CheckConstraint('action_flag >= 0'),
)
id = Column(Integer, primary_key=True, server_default=text("nextval('django_admin_log_id_seq'::regclass)"))
action_time = Column(DateTime(True), nullable=False)
object_id = Column(Text)
object_repr = Column(String(200), nullable=False)
action_flag = Column(SmallInteger, nullable=False)
change_message = Column(Text, nullable=False)
content_type_id = Column(ForeignKey('django_content_type.id', deferrable=True, initially='DEFERRED'), index=True)
user_id = Column(ForeignKey('app_user.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
content_type = relationship('DjangoContentType')
user = relationship('AppUser')
class DjangoCeleryBeatPeriodictask(Base):
__tablename__ = 'django_celery_beat_periodictask'
__table_args__ = (
CheckConstraint('expire_seconds >= 0'),
CheckConstraint('priority >= 0'),
CheckConstraint('total_run_count >= 0')
)
id = Column(Integer, primary_key=True, server_default=text("nextval('django_celery_beat_periodictask_id_seq'::regclass)"))
name = Column(String(200), nullable=False, unique=True)
task = Column(String(200), nullable=False)
args = Column(Text, nullable=False)
kwargs = Column(Text, nullable=False)
queue = Column(String(200))
exchange = Column(String(200))
routing_key = Column(String(200))
expires = Column(DateTime(True))
enabled = Column(Boolean, nullable=False)
last_run_at = Column(DateTime(True))
total_run_count = Column(Integer, nullable=False)
date_changed = Column(DateTime(True), nullable=False)
description = Column(Text, nullable=False)
crontab_id = Column(ForeignKey('django_celery_beat_crontabschedule.id', deferrable=True, initially='DEFERRED'), index=True)
interval_id = Column(ForeignKey('django_celery_beat_intervalschedule.id', deferrable=True, initially='DEFERRED'), index=True)
solar_id = Column(ForeignKey('django_celery_beat_solarschedule.id', deferrable=True, initially='DEFERRED'), index=True)
one_off = Column(Boolean, nullable=False)
start_time = Column(DateTime(True))
priority = Column(Integer)
headers = Column(Text, nullable=False)
clocked_id = Column(ForeignKey('django_celery_beat_clockedschedule.id', deferrable=True, initially='DEFERRED'), index=True)
expire_seconds = Column(Integer)
clocked = relationship('DjangoCeleryBeatClockedschedule')
crontab = relationship('DjangoCeleryBeatCrontabschedule')
interval = relationship('DjangoCeleryBeatIntervalschedule')
solar = relationship('DjangoCeleryBeatSolarschedule')
class Person(Base):
__tablename__ = 'person'
id = Column(UUID, primary_key=True)
name = Column(String(200))
last_name = Column(String(200))
second_last_name = Column(String(200))
address_id = Column(ForeignKey('address.id', deferrable=True, initially='DEFERRED'), index=True)
phone_id = Column(ForeignKey('phone.id', deferrable=True, initially='DEFERRED'), index=True)
address = relationship('Addres')
phone = relationship('Phone')
class SocialaccountSocialaccount(Base):
__tablename__ = 'socialaccount_socialaccount'
__table_args__ = (
UniqueConstraint('provider', 'uid'),
)
id = Column(Integer, primary_key=True, server_default=text("nextval('socialaccount_socialaccount_id_seq'::regclass)"))
provider = Column(String(30), nullable=False)
uid = Column(String(191), nullable=False)
last_login = Column(DateTime(True), nullable=False)
date_joined = Column(DateTime(True), nullable=False)
extra_data = Column(Text, nullable=False)
user_id = Column(ForeignKey('app_user.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
user = relationship('AppUser')
class SocialaccountSocialappSite(Base):
__tablename__ = 'socialaccount_socialapp_sites'
__table_args__ = (
UniqueConstraint('socialapp_id', 'site_id'),
)
id = Column(Integer, primary_key=True, server_default=text("nextval('socialaccount_socialapp_sites_id_seq'::regclass)"))
socialapp_id = Column(ForeignKey('socialaccount_socialapp.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
site_id = Column(ForeignKey('django_site.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
site = relationship('DjangoSite')
socialapp = relationship('SocialaccountSocialapp')
class AccountEmailconfirmation(Base):
__tablename__ = 'account_emailconfirmation'
id = Column(Integer, primary_key=True, server_default=text("nextval('account_emailconfirmation_id_seq'::regclass)"))
created = Column(DateTime(True), nullable=False)
sent = Column(DateTime(True))
key = Column(String(64), nullable=False, unique=True)
email_address_id = Column(ForeignKey('account_emailaddress.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
email_address = relationship('AccountEmailaddres')
class AppUserUserPermission(Base):
__tablename__ = 'app_user_user_permissions'
__table_args__ = (
UniqueConstraint('user_id', 'permission_id'),
)
id = Column(Integer, primary_key=True, server_default=text("nextval('app_user_user_permissions_id_seq'::regclass)"))
user_id = Column(ForeignKey('app_user.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
permission_id = Column(ForeignKey('auth_permission.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
permission = relationship('AuthPermission')
user = relationship('AppUser')
class AuthGroupPermission(Base):
__tablename__ = 'auth_group_permissions'
__table_args__ = (
UniqueConstraint('group_id', 'permission_id'),
)
id = Column(Integer, primary_key=True, server_default=text("nextval('auth_group_permissions_id_seq'::regclass)"))
group_id = Column(ForeignKey('auth_group.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
permission_id = Column(ForeignKey('auth_permission.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
group = relationship('AuthGroup')
permission = relationship('AuthPermission')
class SocialaccountSocialtoken(Base):
"""
SocialaccountSocialtoken
"""
__tablename__ = 'socialaccount_socialtoken'
__table_args__ = (
UniqueConstraint('app_id', 'account_id'),
)
id = Column(Integer, primary_key=True, server_default=text("nextval('socialaccount_socialtoken_id_seq'::regclass)"))
token = Column(Text, nullable=False)
token_secret = Column(Text, nullable=False)
expires_at = Column(DateTime(True))
account_id = Column(ForeignKey('socialaccount_socialaccount.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
app_id = Column(ForeignKey('socialaccount_socialapp.id', deferrable=True, initially='DEFERRED'), nullable=False, index=True)
account = relationship('SocialaccountSocialaccount')
app = relationship('SocialaccountSocialapp')
| [
"sqlalchemy.orm.relationship",
"sqlalchemy.text",
"sqlalchemy.DateTime",
"sqlalchemy.Numeric",
"sqlalchemy.ForeignKey",
"sqlalchemy.UniqueConstraint",
"sqlalchemy.String",
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.CheckConstraint",
"sqlalchemy.Column"
] | [((316, 334), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (332, 334), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((423, 453), 'sqlalchemy.Column', 'Column', (['UUID'], {'primary_key': '(True)'}), '(UUID, primary_key=True)\n', (429, 453), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((1029, 1060), 'sqlalchemy.Column', 'Column', (['Boolean'], {'nullable': '(False)'}), '(Boolean, nullable=False)\n', (1035, 1060), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((1292, 1323), 'sqlalchemy.Column', 'Column', (['Boolean'], {'nullable': '(False)'}), '(Boolean, nullable=False)\n', (1298, 1323), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((1340, 1371), 'sqlalchemy.Column', 'Column', (['Boolean'], {'nullable': '(False)'}), '(Boolean, nullable=False)\n', (1346, 1371), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((1438, 1468), 'sqlalchemy.Column', 'Column', (['UUID'], {'primary_key': '(True)'}), '(UUID, primary_key=True)\n', (1444, 1468), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((2001, 2032), 'sqlalchemy.Column', 'Column', (['Boolean'], {'nullable': '(False)'}), '(Boolean, nullable=False)\n', (2007, 2032), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((2826, 2857), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)'}), '(Integer, nullable=False)\n', (2832, 2857), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((3018, 3056), 'sqlalchemy.Column', 'Column', (['SmallInteger'], {'primary_key': '(True)'}), '(SmallInteger, primary_key=True)\n', (3024, 3056), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((4438, 4466), 'sqlalchemy.Column', 'Column', (['Text'], {'nullable': '(False)'}), '(Text, nullable=False)\n', (4444, 4466), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((4871, 4901), 'sqlalchemy.Column', 'Column', (['UUID'], {'primary_key': '(True)'}), '(UUID, primary_key=True)\n', (4877, 4901), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((5720, 5751), 'sqlalchemy.Column', 'Column', (['Boolean'], {'nullable': '(False)'}), '(Boolean, nullable=False)\n', (5726, 5751), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((5766, 5797), 'sqlalchemy.Column', 'Column', (['Boolean'], {'nullable': '(False)'}), '(Boolean, nullable=False)\n', (5772, 5797), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((5925, 5948), 'sqlalchemy.orm.relationship', 'relationship', (['"""AppUser"""'], {}), "('AppUser')\n", (5937, 5948), False, 'from sqlalchemy.orm import relationship\n'), ((6451, 6476), 'sqlalchemy.orm.relationship', 'relationship', (['"""AuthGroup"""'], {}), "('AuthGroup')\n", (6463, 6476), False, 'from sqlalchemy.orm import relationship\n'), ((6488, 6511), 'sqlalchemy.orm.relationship', 'relationship', (['"""AppUser"""'], {}), "('AppUser')\n", (6500, 6511), False, 'from sqlalchemy.orm import relationship\n'), ((7030, 7063), 'sqlalchemy.orm.relationship', 'relationship', (['"""DjangoContentType"""'], {}), "('DjangoContentType')\n", (7042, 7063), False, 'from sqlalchemy.orm import relationship\n'), ((7373, 7411), 'sqlalchemy.orm.relationship', 'relationship', (['"""AppUser"""'], {'uselist': '(False)'}), "('AppUser', uselist=False)\n", (7385, 7411), False, 'from sqlalchemy.orm import relationship\n'), ((7741, 7753), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (7747, 7753), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((7826, 7862), 'sqlalchemy.Column', 'Column', (['SmallInteger'], {'nullable': '(False)'}), '(SmallInteger, nullable=False)\n', (7832, 7862), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((7884, 7912), 'sqlalchemy.Column', 'Column', (['Text'], {'nullable': '(False)'}), '(Text, nullable=False)\n', (7890, 7912), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((8166, 8199), 'sqlalchemy.orm.relationship', 'relationship', (['"""DjangoContentType"""'], {}), "('DjangoContentType')\n", (8178, 8199), False, 'from sqlalchemy.orm import relationship\n'), ((8211, 8234), 'sqlalchemy.orm.relationship', 'relationship', (['"""AppUser"""'], {}), "('AppUser')\n", (8223, 8234), False, 'from sqlalchemy.orm import relationship\n'), ((8746, 8774), 'sqlalchemy.Column', 'Column', (['Text'], {'nullable': '(False)'}), '(Text, nullable=False)\n', (8752, 8774), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((8788, 8816), 'sqlalchemy.Column', 'Column', (['Text'], {'nullable': '(False)'}), '(Text, nullable=False)\n', (8794, 8816), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((8973, 9004), 'sqlalchemy.Column', 'Column', (['Boolean'], {'nullable': '(False)'}), '(Boolean, nullable=False)\n', (8979, 9004), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((9068, 9099), 'sqlalchemy.Column', 'Column', (['Integer'], {'nullable': '(False)'}), '(Integer, nullable=False)\n', (9074, 9099), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((9176, 9204), 'sqlalchemy.Column', 'Column', (['Text'], {'nullable': '(False)'}), '(Text, nullable=False)\n', (9182, 9204), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((9601, 9632), 'sqlalchemy.Column', 'Column', (['Boolean'], {'nullable': '(False)'}), '(Boolean, nullable=False)\n', (9607, 9632), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((9688, 9703), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (9694, 9703), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((9718, 9746), 'sqlalchemy.Column', 'Column', (['Text'], {'nullable': '(False)'}), '(Text, nullable=False)\n', (9724, 9746), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((9896, 9911), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (9902, 9911), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((9927, 9974), 'sqlalchemy.orm.relationship', 'relationship', (['"""DjangoCeleryBeatClockedschedule"""'], {}), "('DjangoCeleryBeatClockedschedule')\n", (9939, 9974), False, 'from sqlalchemy.orm import relationship\n'), ((9989, 10036), 'sqlalchemy.orm.relationship', 'relationship', (['"""DjangoCeleryBeatCrontabschedule"""'], {}), "('DjangoCeleryBeatCrontabschedule')\n", (10001, 10036), False, 'from sqlalchemy.orm import relationship\n'), ((10052, 10100), 'sqlalchemy.orm.relationship', 'relationship', (['"""DjangoCeleryBeatIntervalschedule"""'], {}), "('DjangoCeleryBeatIntervalschedule')\n", (10064, 10100), False, 'from sqlalchemy.orm import relationship\n'), ((10113, 10158), 'sqlalchemy.orm.relationship', 'relationship', (['"""DjangoCeleryBeatSolarschedule"""'], {}), "('DjangoCeleryBeatSolarschedule')\n", (10125, 10158), False, 'from sqlalchemy.orm import relationship\n'), ((10220, 10250), 'sqlalchemy.Column', 'Column', (['UUID'], {'primary_key': '(True)'}), '(UUID, primary_key=True)\n', (10226, 10250), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((10574, 10596), 'sqlalchemy.orm.relationship', 'relationship', (['"""Addres"""'], {}), "('Addres')\n", (10586, 10596), False, 'from sqlalchemy.orm import relationship\n'), ((10609, 10630), 'sqlalchemy.orm.relationship', 'relationship', (['"""Phone"""'], {}), "('Phone')\n", (10621, 10630), False, 'from sqlalchemy.orm import relationship\n'), ((11147, 11175), 'sqlalchemy.Column', 'Column', (['Text'], {'nullable': '(False)'}), '(Text, nullable=False)\n', (11153, 11175), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((11303, 11326), 'sqlalchemy.orm.relationship', 'relationship', (['"""AppUser"""'], {}), "('AppUser')\n", (11315, 11326), False, 'from sqlalchemy.orm import relationship\n'), ((11894, 11920), 'sqlalchemy.orm.relationship', 'relationship', (['"""DjangoSite"""'], {}), "('DjangoSite')\n", (11906, 11920), False, 'from sqlalchemy.orm import relationship\n'), ((11937, 11975), 'sqlalchemy.orm.relationship', 'relationship', (['"""SocialaccountSocialapp"""'], {}), "('SocialaccountSocialapp')\n", (11949, 11975), False, 'from sqlalchemy.orm import relationship\n'), ((12488, 12522), 'sqlalchemy.orm.relationship', 'relationship', (['"""AccountEmailaddres"""'], {}), "('AccountEmailaddres')\n", (12500, 12522), False, 'from sqlalchemy.orm import relationship\n'), ((13074, 13104), 'sqlalchemy.orm.relationship', 'relationship', (['"""AuthPermission"""'], {}), "('AuthPermission')\n", (13086, 13104), False, 'from sqlalchemy.orm import relationship\n'), ((13116, 13139), 'sqlalchemy.orm.relationship', 'relationship', (['"""AppUser"""'], {}), "('AppUser')\n", (13128, 13139), False, 'from sqlalchemy.orm import relationship\n'), ((13682, 13707), 'sqlalchemy.orm.relationship', 'relationship', (['"""AuthGroup"""'], {}), "('AuthGroup')\n", (13694, 13707), False, 'from sqlalchemy.orm import relationship\n'), ((13725, 13755), 'sqlalchemy.orm.relationship', 'relationship', (['"""AuthPermission"""'], {}), "('AuthPermission')\n", (13737, 13755), False, 'from sqlalchemy.orm import relationship\n'), ((14102, 14130), 'sqlalchemy.Column', 'Column', (['Text'], {'nullable': '(False)'}), '(Text, nullable=False)\n', (14108, 14130), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((14150, 14178), 'sqlalchemy.Column', 'Column', (['Text'], {'nullable': '(False)'}), '(Text, nullable=False)\n', (14156, 14178), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((14500, 14542), 'sqlalchemy.orm.relationship', 'relationship', (['"""SocialaccountSocialaccount"""'], {}), "('SocialaccountSocialaccount')\n", (14512, 14542), False, 'from sqlalchemy.orm import relationship\n'), ((14553, 14591), 'sqlalchemy.orm.relationship', 'relationship', (['"""SocialaccountSocialapp"""'], {}), "('SocialaccountSocialapp')\n", (14565, 14591), False, 'from sqlalchemy.orm import relationship\n'), ((474, 485), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (480, 485), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((532, 543), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (538, 543), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((589, 600), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (595, 600), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((640, 651), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (646, 651), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((687, 698), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (693, 698), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((737, 748), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (743, 748), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((785, 796), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (791, 796), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((835, 846), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (841, 846), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((941, 952), 'sqlalchemy.String', 'String', (['(128)'], {}), '(128)\n', (947, 952), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((994, 1008), 'sqlalchemy.DateTime', 'DateTime', (['(True)'], {}), '(True)\n', (1002, 1008), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((1083, 1094), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (1089, 1094), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((1149, 1159), 'sqlalchemy.String', 'String', (['(30)'], {}), '(30)\n', (1155, 1159), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((1200, 1211), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (1206, 1211), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((1248, 1259), 'sqlalchemy.String', 'String', (['(254)'], {}), '(254)\n', (1254, 1259), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((1397, 1411), 'sqlalchemy.DateTime', 'DateTime', (['(True)'], {}), '(True)\n', (1405, 1411), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((1652, 1663), 'sqlalchemy.String', 'String', (['(150)'], {}), '(150)\n', (1658, 1663), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((1955, 1969), 'sqlalchemy.DateTime', 'DateTime', (['(True)'], {}), '(True)\n', (1963, 1969), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((2288, 2299), 'sqlalchemy.String', 'String', (['(240)'], {}), '(240)\n', (2294, 2299), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((2335, 2345), 'sqlalchemy.String', 'String', (['(96)'], {}), '(96)\n', (2341, 2345), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((2388, 2398), 'sqlalchemy.String', 'String', (['(64)'], {}), '(64)\n', (2394, 2398), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((2442, 2453), 'sqlalchemy.String', 'String', (['(124)'], {}), '(124)\n', (2448, 2453), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((2498, 2508), 'sqlalchemy.String', 'String', (['(64)'], {}), '(64)\n', (2504, 2508), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((2548, 2558), 'sqlalchemy.String', 'String', (['(63)'], {}), '(63)\n', (2554, 2558), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((2878, 2888), 'sqlalchemy.String', 'String', (['(24)'], {}), '(24)\n', (2884, 2888), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((3082, 3096), 'sqlalchemy.DateTime', 'DateTime', (['(True)'], {}), '(True)\n', (3090, 3096), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((3245, 3295), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""event"""', '"""latitude"""', '"""longitude"""'], {}), "('event', 'latitude', 'longitude')\n", (3261, 3295), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((3451, 3461), 'sqlalchemy.String', 'String', (['(24)'], {}), '(24)\n', (3457, 3461), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((3501, 3514), 'sqlalchemy.Numeric', 'Numeric', (['(9)', '(6)'], {}), '(9, 6)\n', (3508, 3514), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((3555, 3568), 'sqlalchemy.Numeric', 'Numeric', (['(9)', '(6)'], {}), '(9, 6)\n', (3562, 3568), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((3692, 3730), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""app_label"""', '"""model"""'], {}), "('app_label', 'model')\n", (3708, 3730), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((3877, 3888), 'sqlalchemy.String', 'String', (['(100)'], {}), '(100)\n', (3883, 3888), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((3925, 3936), 'sqlalchemy.String', 'String', (['(100)'], {}), '(100)\n', (3931, 3936), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((4156, 4167), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (4162, 4167), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((4203, 4214), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (4209, 4214), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((4253, 4267), 'sqlalchemy.DateTime', 'DateTime', (['(True)'], {}), '(True)\n', (4261, 4267), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((4377, 4387), 'sqlalchemy.String', 'String', (['(40)'], {}), '(40)\n', (4383, 4387), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((4492, 4506), 'sqlalchemy.DateTime', 'DateTime', (['(True)'], {}), '(True)\n', (4500, 4506), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((4724, 4735), 'sqlalchemy.String', 'String', (['(100)'], {}), '(100)\n', (4730, 4735), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((4784, 4794), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (4790, 4794), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((4922, 4933), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (4928, 4933), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((4974, 4985), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (4980, 4985), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((5229, 5239), 'sqlalchemy.String', 'String', (['(30)'], {}), '(30)\n', (5235, 5239), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((5275, 5285), 'sqlalchemy.String', 'String', (['(40)'], {}), '(40)\n', (5281, 5285), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((5326, 5337), 'sqlalchemy.String', 'String', (['(191)'], {}), '(191)\n', (5332, 5337), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((5375, 5386), 'sqlalchemy.String', 'String', (['(191)'], {}), '(191)\n', (5381, 5386), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((5421, 5432), 'sqlalchemy.String', 'String', (['(191)'], {}), '(191)\n', (5427, 5432), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((5663, 5674), 'sqlalchemy.String', 'String', (['(254)'], {}), '(254)\n', (5669, 5674), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((5819, 5883), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""app_user.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('app_user.id', deferrable=True, initially='DEFERRED')\n", (5829, 5883), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((6046, 6085), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""user_id"""', '"""group_id"""'], {}), "('user_id', 'group_id')\n", (6062, 6085), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((6226, 6290), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""app_user.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('app_user.id', deferrable=True, initially='DEFERRED')\n", (6236, 6290), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((6342, 6408), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""auth_group.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('auth_group.id', deferrable=True, initially='DEFERRED')\n", (6352, 6408), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((6611, 6658), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""content_type_id"""', '"""codename"""'], {}), "('content_type_id', 'codename')\n", (6627, 6658), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((6796, 6807), 'sqlalchemy.String', 'String', (['(255)'], {}), '(255)\n', (6802, 6807), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((6854, 6929), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""django_content_type.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('django_content_type.id', deferrable=True, initially='DEFERRED')\n", (6864, 6929), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((6981, 6992), 'sqlalchemy.String', 'String', (['(100)'], {}), '(100)\n', (6987, 6992), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((7150, 7160), 'sqlalchemy.String', 'String', (['(40)'], {}), '(40)\n', (7156, 7160), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((7213, 7227), 'sqlalchemy.DateTime', 'DateTime', (['(True)'], {}), '(True)\n', (7221, 7227), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((7266, 7330), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""app_user.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('app_user.id', deferrable=True, initially='DEFERRED')\n", (7276, 7330), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((7512, 7547), 'sqlalchemy.CheckConstraint', 'CheckConstraint', (['"""action_flag >= 0"""'], {}), "('action_flag >= 0')\n", (7527, 7547), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((7693, 7707), 'sqlalchemy.DateTime', 'DateTime', (['(True)'], {}), '(True)\n', (7701, 7707), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((7779, 7790), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (7785, 7790), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((7942, 8017), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""django_content_type.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('django_content_type.id', deferrable=True, initially='DEFERRED')\n", (7952, 8017), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((8052, 8116), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""app_user.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('app_user.id', deferrable=True, initially='DEFERRED')\n", (8062, 8116), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((8364, 8402), 'sqlalchemy.CheckConstraint', 'CheckConstraint', (['"""expire_seconds >= 0"""'], {}), "('expire_seconds >= 0')\n", (8379, 8402), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((8412, 8444), 'sqlalchemy.CheckConstraint', 'CheckConstraint', (['"""priority >= 0"""'], {}), "('priority >= 0')\n", (8427, 8444), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((8454, 8493), 'sqlalchemy.CheckConstraint', 'CheckConstraint', (['"""total_run_count >= 0"""'], {}), "('total_run_count >= 0')\n", (8469, 8493), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((8646, 8657), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (8652, 8657), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((8706, 8717), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (8712, 8717), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((8836, 8847), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (8842, 8847), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((8871, 8882), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (8877, 8882), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((8909, 8920), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (8915, 8920), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((8943, 8957), 'sqlalchemy.DateTime', 'DateTime', (['(True)'], {}), '(True)\n', (8951, 8957), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((9030, 9044), 'sqlalchemy.DateTime', 'DateTime', (['(True)'], {}), '(True)\n', (9038, 9044), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((9126, 9140), 'sqlalchemy.DateTime', 'DateTime', (['(True)'], {}), '(True)\n', (9134, 9140), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((9229, 9323), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""django_celery_beat_crontabschedule.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('django_celery_beat_crontabschedule.id', deferrable=True,\n initially='DEFERRED')\n", (9239, 9323), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((9358, 9453), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""django_celery_beat_intervalschedule.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('django_celery_beat_intervalschedule.id', deferrable=True,\n initially='DEFERRED')\n", (9368, 9453), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((9485, 9577), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""django_celery_beat_solarschedule.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('django_celery_beat_solarschedule.id', deferrable=True,\n initially='DEFERRED')\n", (9495, 9577), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((9657, 9671), 'sqlalchemy.DateTime', 'DateTime', (['(True)'], {}), '(True)\n', (9665, 9671), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((9771, 9865), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""django_celery_beat_clockedschedule.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('django_celery_beat_clockedschedule.id', deferrable=True,\n initially='DEFERRED')\n", (9781, 9865), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((10269, 10280), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (10275, 10280), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((10305, 10316), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (10311, 10316), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((10348, 10359), 'sqlalchemy.String', 'String', (['(200)'], {}), '(200)\n', (10354, 10359), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((10385, 10448), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""address.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('address.id', deferrable=True, initially='DEFERRED')\n", (10395, 10448), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((10484, 10545), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""phone.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('phone.id', deferrable=True, initially='DEFERRED')\n", (10494, 10545), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((10754, 10789), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""provider"""', '"""uid"""'], {}), "('provider', 'uid')\n", (10770, 10789), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((10943, 10953), 'sqlalchemy.String', 'String', (['(30)'], {}), '(30)\n', (10949, 10953), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((10988, 10999), 'sqlalchemy.String', 'String', (['(191)'], {}), '(191)\n', (10994, 10999), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((11041, 11055), 'sqlalchemy.DateTime', 'DateTime', (['(True)'], {}), '(True)\n', (11049, 11055), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((11098, 11112), 'sqlalchemy.DateTime', 'DateTime', (['(True)'], {}), '(True)\n', (11106, 11112), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((11197, 11261), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""app_user.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('app_user.id', deferrable=True, initially='DEFERRED')\n", (11207, 11261), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((11452, 11495), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""socialapp_id"""', '"""site_id"""'], {}), "('socialapp_id', 'site_id')\n", (11468, 11495), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((11655, 11734), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""socialaccount_socialapp.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('socialaccount_socialapp.id', deferrable=True, initially='DEFERRED')\n", (11665, 11734), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((11785, 11852), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""django_site.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('django_site.id', deferrable=True, initially='DEFERRED')\n", (11795, 11852), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((12207, 12221), 'sqlalchemy.DateTime', 'DateTime', (['(True)'], {}), '(True)\n', (12215, 12221), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((12257, 12271), 'sqlalchemy.DateTime', 'DateTime', (['(True)'], {}), '(True)\n', (12265, 12271), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((12290, 12300), 'sqlalchemy.String', 'String', (['(64)'], {}), '(64)\n', (12296, 12300), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((12361, 12437), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""account_emailaddress.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('account_emailaddress.id', deferrable=True, initially='DEFERRED')\n", (12371, 12437), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((12639, 12683), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""user_id"""', '"""permission_id"""'], {}), "('user_id', 'permission_id')\n", (12655, 12683), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((12834, 12898), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""app_user.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('app_user.id', deferrable=True, initially='DEFERRED')\n", (12844, 12898), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((12955, 13026), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""auth_permission.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('auth_permission.id', deferrable=True, initially='DEFERRED')\n", (12965, 13026), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((13251, 13296), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""group_id"""', '"""permission_id"""'], {}), "('group_id', 'permission_id')\n", (13267, 13296), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((13445, 13511), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""auth_group.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('auth_group.id', deferrable=True, initially='DEFERRED')\n", (13455, 13511), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((13568, 13639), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""auth_permission.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('auth_permission.id', deferrable=True, initially='DEFERRED')\n", (13578, 13639), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((13920, 13960), 'sqlalchemy.UniqueConstraint', 'UniqueConstraint', (['"""app_id"""', '"""account_id"""'], {}), "('app_id', 'account_id')\n", (13936, 13960), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((14203, 14217), 'sqlalchemy.DateTime', 'DateTime', (['(True)'], {}), '(True)\n', (14211, 14217), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((14243, 14331), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""socialaccount_socialaccount.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('socialaccount_socialaccount.id', deferrable=True, initially=\n 'DEFERRED')\n", (14253, 14331), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((14376, 14455), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""socialaccount_socialapp.id"""'], {'deferrable': '(True)', 'initially': '"""DEFERRED"""'}), "('socialaccount_socialapp.id', deferrable=True, initially='DEFERRED')\n", (14386, 14455), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((1586, 1632), 'sqlalchemy.text', 'text', (['"""nextval(\'auth_group_id_seq\'::regclass)"""'], {}), '("nextval(\'auth_group_id_seq\'::regclass)")\n', (1590, 1632), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((1857, 1927), 'sqlalchemy.text', 'text', (['"""nextval(\'django_celery_beat_clockedschedule_id_seq\'::regclass)"""'], {}), '("nextval(\'django_celery_beat_clockedschedule_id_seq\'::regclass)")\n', (1861, 1927), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((2196, 2266), 'sqlalchemy.text', 'text', (['"""nextval(\'django_celery_beat_crontabschedule_id_seq\'::regclass)"""'], {}), '("nextval(\'django_celery_beat_crontabschedule_id_seq\'::regclass)")\n', (2200, 2266), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((2741, 2812), 'sqlalchemy.text', 'text', (['"""nextval(\'django_celery_beat_intervalschedule_id_seq\'::regclass)"""'], {}), '("nextval(\'django_celery_beat_intervalschedule_id_seq\'::regclass)")\n', (2745, 2812), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((3362, 3430), 'sqlalchemy.text', 'text', (['"""nextval(\'django_celery_beat_solarschedule_id_seq\'::regclass)"""'], {}), '("nextval(\'django_celery_beat_solarschedule_id_seq\'::regclass)")\n', (3366, 3430), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((3797, 3852), 'sqlalchemy.text', 'text', (['"""nextval(\'django_content_type_id_seq\'::regclass)"""'], {}), '("nextval(\'django_content_type_id_seq\'::regclass)")\n', (3801, 3852), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((4084, 4137), 'sqlalchemy.text', 'text', (['"""nextval(\'django_migrations_id_seq\'::regclass)"""'], {}), '("nextval(\'django_migrations_id_seq\'::regclass)")\n', (4088, 4137), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((4655, 4702), 'sqlalchemy.text', 'text', (['"""nextval(\'django_site_id_seq\'::regclass)"""'], {}), '("nextval(\'django_site_id_seq\'::regclass)")\n', (4659, 4702), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((5146, 5205), 'sqlalchemy.text', 'text', (['"""nextval(\'socialaccount_socialapp_id_seq\'::regclass)"""'], {}), '("nextval(\'socialaccount_socialapp_id_seq\'::regclass)")\n', (5150, 5205), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((5586, 5642), 'sqlalchemy.text', 'text', (['"""nextval(\'account_emailaddress_id_seq\'::regclass)"""'], {}), '("nextval(\'account_emailaddress_id_seq\'::regclass)")\n', (5590, 5642), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((6152, 6203), 'sqlalchemy.text', 'text', (['"""nextval(\'app_user_groups_id_seq\'::regclass)"""'], {}), '("nextval(\'app_user_groups_id_seq\'::regclass)")\n', (6156, 6203), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((6725, 6776), 'sqlalchemy.text', 'text', (['"""nextval(\'auth_permission_id_seq\'::regclass)"""'], {}), '("nextval(\'auth_permission_id_seq\'::regclass)")\n', (6729, 6776), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((7614, 7666), 'sqlalchemy.text', 'text', (['"""nextval(\'django_admin_log_id_seq\'::regclass)"""'], {}), '("nextval(\'django_admin_log_id_seq\'::regclass)")\n', (7618, 7666), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((8559, 8626), 'sqlalchemy.text', 'text', (['"""nextval(\'django_celery_beat_periodictask_id_seq\'::regclass)"""'], {}), '("nextval(\'django_celery_beat_periodictask_id_seq\'::regclass)")\n', (8563, 8626), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((10856, 10919), 'sqlalchemy.text', 'text', (['"""nextval(\'socialaccount_socialaccount_id_seq\'::regclass)"""'], {}), '("nextval(\'socialaccount_socialaccount_id_seq\'::regclass)")\n', (10860, 10919), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((11562, 11627), 'sqlalchemy.text', 'text', (['"""nextval(\'socialaccount_socialapp_sites_id_seq\'::regclass)"""'], {}), '("nextval(\'socialaccount_socialapp_sites_id_seq\'::regclass)")\n', (11566, 11627), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((12123, 12184), 'sqlalchemy.text', 'text', (['"""nextval(\'account_emailconfirmation_id_seq\'::regclass)"""'], {}), '("nextval(\'account_emailconfirmation_id_seq\'::regclass)")\n', (12127, 12184), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((12750, 12811), 'sqlalchemy.text', 'text', (['"""nextval(\'app_user_user_permissions_id_seq\'::regclass)"""'], {}), '("nextval(\'app_user_user_permissions_id_seq\'::regclass)")\n', (12754, 12811), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((13363, 13421), 'sqlalchemy.text', 'text', (['"""nextval(\'auth_group_permissions_id_seq\'::regclass)"""'], {}), '("nextval(\'auth_group_permissions_id_seq\'::regclass)")\n', (13367, 13421), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n'), ((14027, 14088), 'sqlalchemy.text', 'text', (['"""nextval(\'socialaccount_socialtoken_id_seq\'::regclass)"""'], {}), '("nextval(\'socialaccount_socialtoken_id_seq\'::regclass)")\n', (14031, 14088), False, 'from sqlalchemy import Boolean, CheckConstraint, Column, DateTime, ForeignKey, Integer, Numeric, SmallInteger, String, Text, UniqueConstraint, text\n')] |
from util.data.data import Data, flatten, merge_on_column
# TODO: Test for file that has a type digression happen on a line
# with empty strings in it. This caused a crash [2019-12-17].
# TODO: Test data with no names getting other data added in place,
# should overwrite the names!
# Some tests for the data
def test_data():
import os
import numpy as np
print("Testing Data...", end=" ")
# ----------------------------------------------------------------
a = Data()
# Verify append
a.append([1,"a"])
a.append([2,"b"])
a.append([3,"c"])
# Verify add column (with edge case, none type)
a.add_column([None,None,None])
assert(a.types[-1] == type(None))
# Reassign the missing value column to floats, verify type update
a[a.names[-1]] = [1.2, 3.0, 2.4]
assert(a.types[-1] == float)
# WARNING: Need to check doing (<int>, <str>) assignment
# Verify automatic type generation AND automatic type-casting
a.append([1,2,3])
assert(tuple(a[-1]) == tuple([1,"2",3.0]))
assert(tuple(map(type,a[-1])) == (int, str, float))
# Add some missing values.
a.append([-1,None,None])
# Done modifying "a", rest of tests leave it constant.
# ----------------------------------------------------------------
# Verify that "index" works correctly.
assert( a.index([1,"2",3.0]) == 3 )
assert( a.index([-1,None,None]) == 4 )
# Verify that slicing works on descirptors.
assert(a.names[:-1] == ['0','1'])
# Verify that slicing works on descriptors of views.
b = a[:,1:]
assert(b.names[1:] == ['2'])
# Verify in-place addition of rows (different length data with same column names)
b = a[:-2].copy()
c = a[1:-2]
b += c
assert(tuple(b["0"]) == tuple([1,2,3,2,3]))
# Verify in-place addition of new columns (same length data with new columns)
b = a[:,:-1].copy()
c = a[:,-1:].copy()
c["3"] = range(len(c))
b += c
assert(tuple(b[-2]) == (1,"2",3.0,3))
assert(tuple(b[-1]) == (-1,None,None,4))
# Verify in-place addition of new columns (same length data with fewer columns)
b = a[:,:-1].copy()
c = a[:,:].copy()
c["3"] = range(len(c))
c += b
assert(tuple(c[-2]) == (1,"2",None,None))
assert(tuple(c[:,-1]) == tuple(range(5)) + (None,)*5)
# Verify the addition of in place addition of new columns AND rows.
b = a[:]
b['3'] = range(len(b))
c = a[:]
c += b
assert(tuple(c['3']) == tuple([None]*len(b) + list(range(len(c)-len(b)))))
# Verify in-place addition of a data set with *some* duplicate
# column names, but the same number of rows (adds columns).
b = a[:,1:].copy()
c = a[:,:-1].copy()
c.names = range(3,3+c.shape[1])
b += c
assert(b.shape == (5,4))
assert(tuple(b.names) == tuple(map(str,range(1,5))))
assert(len(list(b[:,-1] == c[:,-1])) == b.shape[0])
# Verify slicing-based singleton value assignment
b = a[:-2].copy()
b[:,0] = 1
assert(tuple(b["0"]) == tuple([1,1,1]))
# Verify slicing-based multiple value assignment
b = a[:-2].copy()
b[:,0] = [3,1,4]
assert(tuple(b["0"]) == tuple([3,1,4]))
# Verify double indexing with integers
assert(a[0,1] == "a")
# Verify double indexing with slices
assert(tuple(a[::-1,1])[2:] == tuple(["c","b","a"]))
# Verify standard index access
assert(tuple(a[0]) == tuple([1,"a",1.2]))
# Verify column-access and automatic column naming
assert(tuple(a["0"])[:-2] == tuple([1,2,3]))
# Verify slicing by index
assert(tuple(a[:2][0]) == tuple(a[0]))
# Verify slicing by names
assert(tuple(a["0","2"][0]) == tuple([1,1.2]))
# Verify that copies with "copy" are deep
b = a.copy()
b.retype([str,str,str])
assert(a.types[0] != str)
# Assert that the two forms of full-data slicing make copies.
assert(id(a[:].data) != id(a.data))
assert(id(a[:,:].data) != id(a.data))
# Verify that copies with slicing are deep and that retype works.
b = a[:]
b.retype([str,str,str])
assert(a.types[0] != str)
# Verify that in-place addition of data with same names in
# different order is handled correctly.
b = a[:,:]
c = a[:,['1','0','2']]
b += c
assert(tuple(b[:,1]) == 2*tuple(a[:,1]))
# Verify that multiple transformations on data yield correct view.
b = a[:]
b += a
b += a
c = b[::2]
c = c[['1', '2']].unique()
assert(c.shape[0] == len(a))
assert(tuple(c.names) == tuple(b.names[1:]))
# Verify that sorting a data object and a view object both work.
b = a[:]
b.sort()
assert(tuple(b[:,0]) == tuple(sorted(a[:,0])))
b = a[:-1, [0,1]]
b.sort()
assert(tuple(b[:,1]) == tuple(sorted(a[:-1,1])))
# Assert that "a" (the object being viewed) is unmodified.
assert(tuple(a['1']) == ('a','b','c','2',None))
# Verify the stack process.
b = a[:]
c = a[:]
c['2'] += 1.0
b += c
b.stack('2')
assert(tuple(b[0,-1]) == (a[0,-1], a[0,-1]+1.0))
# Testing the "unstack" operation.
b = a[:]
b += a
b.pop(-1)
b.stack('2')
# Convert the "stacked" column into generator ogbjects.
b['2'] = ((v for v in row) for row in b['2'])
b[-1,'2'] = None
# Unstack, and verify that the expected output happens.
b.unstack('2')
assert(tuple(b['1']) == ('a','a','b','b','c','c','2','2',None))
# Verify that reorder works.
b = a[:]
b.reorder(["2","0"])
assert(tuple(b.names) == ("2","0","1"))
assert(tuple(b.types) == (float,int,str))
for i in range(len(b)):
for n in b.names:
assert(a[i,n] == b[i,n])
# Verify the ability to construct real-valued arrays (and go back)
numeric = a.to_matrix()
c = Data(map(numeric.from_real, numeric))
assert(tuple(a[:-1]["1"]) == tuple(c[:-1]["1"]))
# Verify column item retrieval and assignment
assert(a["0"][0] == a[0][0])
a["0"][0] = -10
a["0"][0] = 1
# Verify total column assignment
first_col = list(a["0"])
new_col = list(map(str,a["0"]))
a["0"] = new_col
assert(tuple(a["0"]) == tuple(new_col))
a["0"] = first_col
# Verify new column assignment
b = a[:]
first_col = list(b["0"])
new_col = list(map(str,b["0"]))
b["0-str"] = new_col
assert(tuple(map(str,b["0"])) == tuple(b["0-str"]))
# Verify copying a data object that has a 'None' in the first row
b = Data(names=["0","1"], types=[int,str])
b.append([0,None])
b.append([1,'b'])
c = b[:]
assert(tuple(b[0]) == tuple(c[0]))
# Verify accessing multiple rows by list-access
b = a[[0,2,3]]
assert(tuple(b["0"]) == tuple([1,3,1]))
# Verify load and save of a csv file
a.save("a-test.csv")
b = Data().load("a-test.csv")
assert(tuple(a["0"]) == tuple(b["0"]))
os.remove("a-test.csv")
# Verify the writing of a CSV with quoted content and correct read.
b = a[:]
b[-1,1] = "string with comma, we'll see"
b.save("a-test.csv")
c = Data.load("a-test.csv")
assert(tuple(c[-1]) == tuple(b[-1]))
os.remove("a-test.csv")
# Verify load and save of a pkl file
a.save("a-test.pkl")
b = Data.load("a-test.pkl")
assert(tuple(a["0"]) == tuple(b["0"]))
os.remove("a-test.pkl")
# Verify load and save of a gzipped dill file
a.save("a-test.dill.gz")
b = Data().load("a-test.dill.gz")
assert(tuple(a["0"]) == tuple(b["0"]))
os.remove("a-test.dill.gz")
# Verify load and save of a gzipped csv file
a.save("a-test.csv.gz")
b = Data().load("a-test.csv.gz")
assert(tuple(a["0"]) == tuple(b["0"]))
os.remove("a-test.csv.gz")
# Verify the behavior of the 'unpack' and 'pack' functions.
b = a[:]
b['iter'] = [None] + [list(range(i)) for i in range(len(b)-1)]
_ = b.shape[1]
b.unpack("iter")
assert((b.shape[1] - _) == (len(b) - 3))
b.pack("iter")
assert(b.shape[1] == _)
assert(b[0,-1] is None)
assert(b[1,-1] is None)
# TODO: Verify load of a large file (sample only)
# TODO: Verify load of a large compressed file (sample only)
# Verify column equivalence / equality checker by element
b = a[a["0"] == 1]
assert(tuple(b["0"]) == tuple([1,1]))
# Verify one of the inequality operators
b = a[a["0"] < 2]
assert(tuple(b["0"]) == tuple([1,1,-1]))
# Verify column membership checker by set
b = a[a["1"] == {'a','b','2'}]
assert(tuple(b["1"]) == tuple(['a','b','2']))
# Verify column index can be an integer or a string.
for i in range(len(a)):
for j,name in enumerate(a.names):
assert(a[i,j] == a[i,name])
# Verify that the "in" operator works on rows.
assert(1 in a[0])
assert('a' in a[0])
assert(1.2 in a[0])
# Verify that "in" works on Data and View.
assert(not ([1, "a"] in a))
assert(not ([3, "a"] in a[:,:2]))
assert([1, "a"] in a[:,:2])
# Verify that "equality" works for rows.
assert(a[0] == (1,'a',1.2))
assert(not all(a[0] != a[0][:]))
# Verify that "equality" works for Data objects.
assert(tuple(a == [1,'a',1.2]) == (0,))
# Verify that "equality" fails when the equality operator for
# elements of a row is NotImplemented.
b = a[:]
assert( len(list(b == (i for i in (1,2,3,4)))) == 0 )
assert( tuple(b[0] == (i for i in (1,2,3,4))) ==
(NotImplemented, NotImplemented, NotImplemented) )
# Verify that "addition" with sequences works correctly (left and right).
b = a[:]
b[:,1] = map(lambda v: (ord(v) if (v != None) else None), b[:,1])
assert(sum(b[0] + [-1,-97,-1.2]) == 0)
assert(sum([-1,-97,-1.2] + b[0]) == 0)
assert(tuple(a[0] + a[0]) == (2,'aa',2.4))
# Verify that "subtraction" with sequences works correctly (left and right).
assert(sum(b[0] - [1,97,1.2]) == 0)
assert(sum([1,97,1.2] - b[0]) == 0)
# WARNING: Not individually verifying *all* comparison operators.
# Verify generator-based index assignment.
b = a[:]
b[(i for i in range(0,len(b),2)), "1"] = "new"
assert(b[0,"1"] == b[2,"1"] == b[4,"1"] == "new")
# Verify list-based generator assignment.
b = a[:]
b[[i for i in range(0,len(b),2)], "1"] = str("test")
assert(b[0,"1"] == b[2,"1"] == b[4,"1"] == "test")
# Verify that assignment from a generator works.
b = a[:]
b['3'] = map(str, b['2'])
assert(b[0,'3'] == str(b[0,'2']))
assert(b[-1,'3'] == str(b[-1,'2']))
# Verify assignment of a non-iterable being broadcast automatically.
b['4'] = None
assert(tuple(b[-2]) == (1,'2',3.0,'3.0',None))
# Vefify addition of a column to empty data works.
b = Data()
b.add_column([1,2,3])
assert(tuple(b[:,0]) == (1,2,3))
# Verify the generation of a random k-fold cross validation.
b = Data()
for i in range(37):
b.append([i])
# Collect the list of all the "testing" rows seen
all_test = []
for (train, test) in b.k_fold(k=11, seed=1):
all_test += list(test["0"])
assert(sorted(all_test) == list(range(len(b))))
# Verify the identification of unique rows and collection process.
b = a[:]
b += a
cols = ['0','1']
b = b[cols].unique().copy().collect(b)
assert(tuple(b[0,-1]) == tuple(2*[a[0,-1]]))
# Verify slicing data down to one column.
assert(tuple(a[:,-1]) == (1.2,3.0,2.4,3.0,None))
# Test slicing to access columns and rows.
b = a[:,:-1]
c = b.unique()
assert(len(b) == len(c))
# Test the access of a single column by name in a view.
b = a[:,:-1]
assert( tuple(b[:,"0"]) == (1,2,3,1,-1) )
# Verify that the printed data set handles edge cases (one more
# row or equal rows to number desired) correctly.
b = a[:]
b = b + b
b.append(b[0])
sb = str(b)
# Test the correctness of truncated rows and columns.
b.max_display = 2
b_printout = '''
========================
Size: (11 x 3)
0 | ... | 2
int | ... | float
------------------
1 | ... | 1.2
... ... ...
1 | ... | 1.2
missing 4 of 33 entries
at rows: [5, 10]
at columns: [1, 2]
========================
'''
assert( str(b) == b_printout)
# Test the correctness of truncated rows only.
b.max_display = 4
b_printout = '''
========================
Size: (11 x 3)
0 | 1 | 2
int | str | float
-------------------
1 | "a" | 1.2
2 | "b" | 3.0
... ... ...
-1 | None | None
1 | "a" | 1.2
missing 4 of 33 entries
at rows: [5, 10]
at columns: [1, 2]
========================
'''
assert( str(b) == b_printout)
# Test the correctness of exact match (no truncation).
b.max_display = 11
b_printout = '''
========================
Size: (11 x 3)
0 | 1 | 2
int | str | float
-------------------
1 | "a" | 1.2
2 | "b" | 3.0
3 | "c" | 2.4
1 | "2" | 3.0
-1 | None | None
1 | "a" | 1.2
2 | "b" | 3.0
3 | "c" | 2.4
1 | "2" | 3.0
-1 | None | None
1 | "a" | 1.2
missing 4 of 33 entries
at rows: [5, 10]
at columns: [1, 2]
========================
'''
assert(str(b) == b_printout)
# Verify that the names and types attributes are the correct type.
assert("Descriptor" in str(type(a.types)))
assert("Descriptor" in str(type(a.names)))
# Check the 'flatten' function to make sure it correctly flattens
# structures with nested, differential depth, and different types.
complex_sequence = [(l for l in ((1,2,3)+tuple("abc"))),
"def", "ghi", ("jk",tuple("lmn"))]
simplified_sequence = (1,2,3)+tuple('abcdefghijklmn')
assert(tuple(flatten(complex_sequence)) == simplified_sequence)
# Verify that merging two data sets works as expected.
d1 = Data(
[[0, 'a'], [1, 'b'], [2, 'b'], [7, 'd']],
names = ['index', 'letter']
)
d2 = Data(
[[None, 3, 'a'], [None, 4, 'b'], [None, 5, 'c'], [None, 6, 'b']],
names = ['none', 'index', 'letter']
)
d3 = Data([
['a', 0, None, 3],
['b', 1, None, 4],
['b', 1, None, 6],
['b', 2, None, 4],
['b', 2, None, 6],
['c', None, None, 5],
['d', 7, None, None],
])
out = merge_on_column(d1, d2, 'letter', d1_name="hoorah", d2_name="loopy")
# Make sure the output columns are named as expected.
assert(all(n1 == n2 for (n1, n2) in zip(
out.columns, ['letter', 'index_hoorah', 'none', 'index_loopy'])))
# Make sure all rows only show True or None in equality operator.
assert(all(len(set(list(r1 == r2)) ^ {True,None}) == 0
for (r1,r2) in zip(d3, out)))
# ----------------------------------------------------------------
# Testing expected exceptions.
# Verify that indexing an element that doesn't exist fails.
try: a.index([1,2,3])
except ValueError: pass
else: assert(False)
# Verify that incorrect names are detected in "Data.effect".
b = a[:]
try: b.effect("bad name")
except Data.BadIndex: pass
else: assert(False)
# Verify that a data object refuses to be added to itself in place.
b = a[:]
try: b += b
except Data.Unsupported: pass
else: assert(False)
# Try assigning with a generator that is too short.
try: a['3'] = (i for i in range(len(a)-1))
except Data.BadData: pass
else: assert(False)
# Try assigning with a generator that is too long.
try: a['3'] = (i for i in range(len(a)+1))
except Data.BadData: pass
else: assert(False)
# Try providing a generator that does not give strictly integers
try: a[(v for v in ('a',1,'b'))]
except Data.BadIndex: pass
else: assert(False)
# Try adding a too-short row
b = a[:]
try: b.append([9,"z"])
except Data.BadElement: pass
else: assert(False)
# Try a too-short column
b = a[:]
try: b.add_column([1])
except Data.BadData: pass
else: assert(False)
# Try a too-long column
b = a[:]
try: b.add_column(map(float,range(1000)))
except Data.BadData: pass
else: assert(False)
# Try adding an empty column
b = a[:]
try: b.add_column([])
except Data.BadData: pass
else: assert(False)
# Try adding a name that is not a string
b = a[:]
try: b.add_column([1,2,3,4,5], name=1)
except Data.BadSpecifiedName: pass
else: assert(False)
# Try adding a column with multiple types
b = a[:]
try: b.add_column([1,2,3,4,5.0])
except Data.BadValue: pass
else: assert(False)
# Try a mismatched-type slice assignment
b = a[:]
try: b[:,0] = [1.0, 1, 1, 1, 1]
except Data.BadAssignment: pass
else: assert(False)
# Try a mismatched-type column assignment operation
try: a["0"] = list(a["0"])[:-1] + ["0"]
except Data.BadAssignment: pass
else: assert(False)
# Try a too-short column assignment operation
try: a["0"] = list(map(str,a["0"]))[:-1]
except Data.BadValue: pass
else: assert(False)
# Try a too-long column assignment operation
try: a["0"] = list(map(str,a["0"])) + [1]
except Data.BadAssignment: pass
else: assert(False)
# Test for an error when an added data set has duplicate columns.
b = a[:]
c = a[:]
c.names[2] = '1'
try: b.collect(c)
except: Data.BadData
else: assert(False)
# Try a bad combination of names and types
try: Data(names=["a","b","c"], types=[str, str])
except Data.NameTypeMismatch: pass
else: assert(False)
# Try a bad value in "names"
try: Data(names=[1,2,3], types=["a","b","c"])
except Data.BadSpecifiedName: pass
else: assert(False)
# Try a bad value in "types"
try: Data(names=["a","b","c"], types=[1,2,3])
except Data.BadSpecifiedType: pass
else: assert(False)
# Try bad value
try: a.append(["a","b","c"])
except Data.BadValue: pass
else: assert(False)
# Try bad element
try: a.append([1,2])
except Data.BadElement: pass
else: assert(False)
# Try non-existing column index
try: a["hello"]
except Data.UnknownName: pass
else: assert(False)
# Attempt to set item on an empty data
b = Data()
try: b[0] = 1
except Data.Empty: pass
else: assert(False)
# Try get item on an uninitialized data
try: Data()["a"]
except Data.Empty: pass
else: assert(False)
# Try retyping an uninitialized data
try: Data().retype([])
except Data.Empty: pass
else: assert(False)
# Try reordering an uninitialized data
try: Data().reorder([])
except Data.Empty: pass
else: assert(False)
# Try copying an uninitialized data
try: Data().copy()
except Data.Empty: pass
else: assert(False)
# Try popping from an uninitialized data
try: Data().pop()
except Data.Empty: pass
else: assert(False)
# Try saving an uninitialized data
try: Data().save("")
except Data.Empty: pass
else: assert(False)
# Done testing
print("passed.")
# =============================================================
# END OF Python 'Data' with named and typed columns
# =============================================================
# Run test cases
if __name__ == "__main__":
# Run the tests without coverage.
test_data()
exit()
# Try to run the tests with code coverage, otherwise just run them.
try:
# Get the path to be watching for code coverage.
import os
path_to_watch = os.path.abspath(os.curdir)
# Activate a code coverage module.
import coverage
cov = coverage.Coverage(source=[path_to_watch])
cov.start()
# Run the tests.
test_data()
# Save data from the coverage tracking (for report).
cov.stop()
cov.save()
# Create a temporary directory for holding test results.
from tempfile import TemporaryDirectory
temp_dir = TemporaryDirectory()
results_dir = temp_dir.name
cov.html_report(directory=results_dir)
# Open the results file.
import webbrowser
webbrowser.open("file://"+os.path.join(results_dir, "index.html"))
# Wait for load, then delete the temporary directory.
# (user might decide to kill this process early).
import time
time.sleep(60*10)
temp_dir.cleanup()
del temp_dir
except ModuleNotFoundError:
pass
| [
"tempfile.TemporaryDirectory",
"util.data.data.flatten",
"coverage.Coverage",
"time.sleep",
"os.path.join",
"util.data.data.merge_on_column",
"util.data.data.Data.load",
"os.path.abspath",
"util.data.data.Data",
"os.remove"
] | [((502, 508), 'util.data.data.Data', 'Data', ([], {}), '()\n', (506, 508), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((6490, 6530), 'util.data.data.Data', 'Data', ([], {'names': "['0', '1']", 'types': '[int, str]'}), "(names=['0', '1'], types=[int, str])\n", (6494, 6530), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((6890, 6913), 'os.remove', 'os.remove', (['"""a-test.csv"""'], {}), "('a-test.csv')\n", (6899, 6913), False, 'import os\n'), ((7078, 7101), 'util.data.data.Data.load', 'Data.load', (['"""a-test.csv"""'], {}), "('a-test.csv')\n", (7087, 7101), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((7147, 7170), 'os.remove', 'os.remove', (['"""a-test.csv"""'], {}), "('a-test.csv')\n", (7156, 7170), False, 'import os\n'), ((7246, 7269), 'util.data.data.Data.load', 'Data.load', (['"""a-test.pkl"""'], {}), "('a-test.pkl')\n", (7255, 7269), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((7317, 7340), 'os.remove', 'os.remove', (['"""a-test.pkl"""'], {}), "('a-test.pkl')\n", (7326, 7340), False, 'import os\n'), ((7506, 7533), 'os.remove', 'os.remove', (['"""a-test.dill.gz"""'], {}), "('a-test.dill.gz')\n", (7515, 7533), False, 'import os\n'), ((7696, 7722), 'os.remove', 'os.remove', (['"""a-test.csv.gz"""'], {}), "('a-test.csv.gz')\n", (7705, 7722), False, 'import os\n'), ((10746, 10752), 'util.data.data.Data', 'Data', ([], {}), '()\n', (10750, 10752), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((10890, 10896), 'util.data.data.Data', 'Data', ([], {}), '()\n', (10894, 10896), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((14219, 14292), 'util.data.data.Data', 'Data', (["[[0, 'a'], [1, 'b'], [2, 'b'], [7, 'd']]"], {'names': "['index', 'letter']"}), "([[0, 'a'], [1, 'b'], [2, 'b'], [7, 'd']], names=['index', 'letter'])\n", (14223, 14292), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((14326, 14435), 'util.data.data.Data', 'Data', (["[[None, 3, 'a'], [None, 4, 'b'], [None, 5, 'c'], [None, 6, 'b']]"], {'names': "['none', 'index', 'letter']"}), "([[None, 3, 'a'], [None, 4, 'b'], [None, 5, 'c'], [None, 6, 'b']],\n names=['none', 'index', 'letter'])\n", (14330, 14435), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((14465, 14614), 'util.data.data.Data', 'Data', (["[['a', 0, None, 3], ['b', 1, None, 4], ['b', 1, None, 6], ['b', 2, None, 4],\n ['b', 2, None, 6], ['c', None, None, 5], ['d', 7, None, None]]"], {}), "([['a', 0, None, 3], ['b', 1, None, 4], ['b', 1, None, 6], ['b', 2,\n None, 4], ['b', 2, None, 6], ['c', None, None, 5], ['d', 7, None, None]])\n", (14469, 14614), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((14684, 14752), 'util.data.data.merge_on_column', 'merge_on_column', (['d1', 'd2', '"""letter"""'], {'d1_name': '"""hoorah"""', 'd2_name': '"""loopy"""'}), "(d1, d2, 'letter', d1_name='hoorah', d2_name='loopy')\n", (14699, 14752), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((18713, 18719), 'util.data.data.Data', 'Data', ([], {}), '()\n', (18717, 18719), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((17915, 17960), 'util.data.data.Data', 'Data', ([], {'names': "['a', 'b', 'c']", 'types': '[str, str]'}), "(names=['a', 'b', 'c'], types=[str, str])\n", (17919, 17960), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((18068, 18112), 'util.data.data.Data', 'Data', ([], {'names': '[1, 2, 3]', 'types': "['a', 'b', 'c']"}), "(names=[1, 2, 3], types=['a', 'b', 'c'])\n", (18072, 18112), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((18218, 18262), 'util.data.data.Data', 'Data', ([], {'names': "['a', 'b', 'c']", 'types': '[1, 2, 3]'}), "(names=['a', 'b', 'c'], types=[1, 2, 3])\n", (18222, 18262), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((20055, 20081), 'os.path.abspath', 'os.path.abspath', (['os.curdir'], {}), '(os.curdir)\n', (20070, 20081), False, 'import os\n'), ((20163, 20204), 'coverage.Coverage', 'coverage.Coverage', ([], {'source': '[path_to_watch]'}), '(source=[path_to_watch])\n', (20180, 20204), False, 'import coverage\n'), ((20503, 20523), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {}), '()\n', (20521, 20523), False, 'from tempfile import TemporaryDirectory\n'), ((20891, 20910), 'time.sleep', 'time.sleep', (['(60 * 10)'], {}), '(60 * 10)\n', (20901, 20910), False, 'import time\n'), ((6817, 6823), 'util.data.data.Data', 'Data', ([], {}), '()\n', (6821, 6823), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((7429, 7435), 'util.data.data.Data', 'Data', ([], {}), '()\n', (7433, 7435), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((7620, 7626), 'util.data.data.Data', 'Data', ([], {}), '()\n', (7624, 7626), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((14098, 14123), 'util.data.data.flatten', 'flatten', (['complex_sequence'], {}), '(complex_sequence)\n', (14105, 14123), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((18849, 18855), 'util.data.data.Data', 'Data', ([], {}), '()\n', (18853, 18855), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((18967, 18973), 'util.data.data.Data', 'Data', ([], {}), '()\n', (18971, 18973), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((19093, 19099), 'util.data.data.Data', 'Data', ([], {}), '()\n', (19097, 19099), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((19217, 19223), 'util.data.data.Data', 'Data', ([], {}), '()\n', (19221, 19223), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((19341, 19347), 'util.data.data.Data', 'Data', ([], {}), '()\n', (19345, 19347), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((19458, 19464), 'util.data.data.Data', 'Data', ([], {}), '()\n', (19462, 19464), False, 'from util.data.data import Data, flatten, merge_on_column\n'), ((20700, 20739), 'os.path.join', 'os.path.join', (['results_dir', '"""index.html"""'], {}), "(results_dir, 'index.html')\n", (20712, 20739), False, 'import os\n')] |
# This is a simplified version of Cases2Beds with generic parameters.
# DELPHI Group
# Contact <EMAIL> if you have any questions
# Input Files:
# Opt 1a. distribution_example.csv:
# A set of values to split the cases by age group (no other functionality)
# Opt 1b. cases_example.csv:
# The number of cases per day - can be output from covidcast.
# OR 2. Cases_Split.csv: Case inputs broken down by demographics
# 3. input_parameters_generic.csv: Sample parameters for C2B
# Output Files:
# 1. C2B_output.csv: The Cases2Beds Outputs as a csv file
# 2. Plot of C2B
from datetime import date
from datetime import datetime
from datetime import timedelta
from scipy.stats import expon
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import mean_squared_error
import datetime
import getopt
import glob
import math
import numpy as np
import os
import os.path
import pandas as pd
import plotly
import plotly.express as px
import plotly.graph_objects as go
import psutil
import random
import requests
import sys
import scipy.stats as st
import scipy.stats as stats
import statsmodels.api as sm
import time
def o_d(p1, p2, p3):
""" Creating the offset dictionary with the three parameters:
1. Fraction of COVID Admissions that tested positive on
Admission Date (or one day before/after)
2. Fraction of COVID Admissions that tested positive more than
one day AFTER being admitted
3. Mean number of days from testing date to admission date,
among admissions that happened more than one day after testing
Outputs a dictionary
These three parameters can be modified in the file,
"distribution_example.csv"
"""
negative_mean = 4 # hardcoded
offset_dict = {}
off_bott = p2
off_top = 1-off_bott-p1
offset_dict[-1] = round(p1/3, 10)
offset_dict[0] = round(p1/3, 10)
offset_dict[1] = round(p1/3, 10)
exp_sum = 0
exp_sum_2 = 0
if p2 != 0:
for i in range(0, 9):
exp_sum += round(expon.pdf(i, 0, negative_mean), 10)
for j in range(-10, -1):
offset_dict[j] = round(round(expon.pdf(
(j * -1)-2, 0, negative_mean), 10) * off_bott * round(
(1 / exp_sum), 10), 10)
if p3 != 0:
for i in range(0, 29):
exp_sum_2 += round(expon.pdf(i, 0, p3), 10)
for j in range(2, 31):
offset_dict[j] = round(round(expon.pdf(
j-2, 0, p3), 10) * off_top * round((1 / exp_sum_2), 10), 10)
return offset_dict
def d_d(p1, p2):
""" Creating the duration dictionary with the two parameters:
1. Mean Length of Stay
2. Fraction of Hospitalizations that last 0 days [Optional]
Outputs a dictionary
These parameters can be modified in the file, "distribution_example.csv"
"""
duration_dict = {}
duration_dict[0] = p2
exp_sum = 0
if p1 > 0:
for j in range(0, 40):
exp_sum = round(exp_sum + expon.pdf(j, 0, p1), 10)
for j in range(1, 41):
duration_dict[j] = round(round(expon.pdf(
j-1, 0, p1), 10) * (1-p2) * round((1/exp_sum), 10), 10)
tmp_sum_2 = 0
return duration_dict
def gen_distribution_params(age, race, gender, parameters):
"""
A method with inputs of a specific age group, race, gender,
and pre-set parameters list. This method returns either the
parameters row that's an exact match to the queried age group,
race, and gender, or extrapolates it from
other rows in the parameters list.
It outputs a data frame row of parameters.
"""
delta = 0.0000000000001
col_list = ["hosp_rate", "pos_one_day",
"frac_less_1", "frac_0"]
if [age, race, gender].count("unspecified") >= 2:
age_l = parameters[parameters["age"] == age]
race_l = age_l[age_l["race"] == race]
new_params = race_l[race_l["gender"] == gender].reset_index(
drop=True).iloc[:, 3:]
else: # at least one of the following is unspecified
new_params = pd.DataFrame()
age_params = parameters[(parameters["age"] == age) & (
parameters["race"] == "unspecified") & (
parameters["gender"] == "unspecified")].reset_index(drop=True)
if race != "unspecified" and gender == "unspecified":
race_val = parameters[parameters["race"] == race].reset_index(
drop=True)
race_list = parameters[parameters["race"] != "unspecified"]
race_average = race_list.mean()
for index, col in enumerate(race_val.columns[3:]):
rel_gain = (age_params[col].values[0] *
race_val[col].values[0]) / [race_average[col].mean(
) + delta]
if col in (col_list and rel_gain >= 1):
rel_gain = 0.99
new_params.loc[0, col] = round(rel_gain[0], 10)
elif gender != "unspecified" and race == "unspecified":
gen_val = parameters[parameters["gender"] == gender].reset_index(
drop=True)
gen_average = parameters.query(
"gender!='unspecified'").mean()
for index, col in enumerate(gen_val.columns[3:]):
rel_gain_2 = age_params[col].values[0]
rel_gain_num = (rel_gain_2*gen_val[col].values[0])
rel_gain = rel_gain_num/[gen_average[col].mean() + delta]
if col in col_list and rel_gain >= 1:
rel_gain = 0.99
new_params.loc[0, col] = round(rel_gain[0], 10)
else: # both are specified
gen_val = parameters[parameters["gender"] == gender].reset_index(
drop=True)
gen_average = parameters.query("gender!='unspecified'").mean()
race_val = parameters[parameters["race"] == race].reset_index(
drop=True)
race_l = parameters[parameters["race"] != "unspecified"]
race_average = race_l.mean()
for index, col in enumerate(gen_val.columns[3:]):
rel_gain = age_params[col].values[0] * (
gen_val[col].values[0] /
[gen_average[col].mean() + delta] *
race_val[col].values[0] /
[race_average[col].mean() + delta]) ** 0.5
if col in col_list and rel_gain >= 1:
rel_gain = 0.99
new_params.loc[0, col] = round(rel_gain[0], 10)
return new_params
def gen_distribution(age, race, gender, params):
"""
A method with inputs of a specific age group, race, gender,
and pre-set parameters list, which are passed to the
gen_distribution_params function. With the
respective parameters for this specific age group, race, and gender,
we create the offset_dict and duration_dict.
The method returns the parameters, the offset dictionary, and
the duration dictionary for a specific row.
"""
new_params = gen_distribution_params(age, race, gender, params)
offset_dict = o_d(new_params["pos_one_day"].values[0],
new_params["frac_less_1"].values[0],
new_params["mean_greater_1"].values[0])
duration_dict = d_d(new_params["mean"].values[0],
new_params["frac_0"].values[0])
return(duration_dict, offset_dict, new_params)
def gen_mean_var(params_in, cases_in):
"""
The inputs are the parameters and the cases.
This method creates the mean and variance
dictionaries by iterating over every row
in the cases file.
For more details on the methodology, see:
https://delphi.cmu.edu/blog/2021/03/10/
cases2beds-a-case-study-in-actionable-intelligence/
The outputs are two dictionaries that
correspond to the day and the mean beds for that day
or the variance as calculated by C2B.
"""
mean_d = {}
var_d = {}
for i, case in cases_in.iterrows():
if i >= 0:
OF = [0] * 41
age_group = case["Age Group (optional but highly desirable)"]
race = case["Race/Ethnicity (Black, White," +
" Asian, Hispanic, or unspecified)"]
gen = case["Gender (F, M, or unspecified)"]
cases = case["Number of such Cases (optional, defaults to 1)"]
date = pd.to_datetime(case["Specimen Testing Date (mandatory)"])
if not pd.isnull(date):
D, O, params = gen_distribution(
age_group, race, gen, params_in)
hosp_factor = float(params["hosp_rate"])
for o in range(min(list(O.keys())), max(list(O.keys()))+1):
curr_o = O[o]
for d in range(min(list(D.keys())), max(list(D.keys()))+1):
t_val = D[d]*curr_o*hosp_factor
for k in range(o, o+d+1):
if (k < 31):
OF[k+10] += t_val
for j in range(-10, 31):
curr_date = date + timedelta(days=j)
p = OF[j+10]
mean_d[curr_date] = mean_d.get(curr_date, 0) + (p*cases)
var_d[curr_date] = var_d.get(curr_date, 0) + (p*cases*(1-p))
return mean_d, var_d
def C2B(params_in, cases_in):
"""
The inputs are the parameters and the cases.
This method takes the generated mean and variance dictionaries to
calculate the values of the (5, 25, 50, 75, and 95) quantiles.
The method outputs a data frame with these quantiles, as well as the
mean and variance data frames.
"""
mean_d, var_d = gen_mean_var(params_in, cases_in)
mean_df = pd.DataFrame.from_dict(mean_d, orient='index')
var_df = pd.DataFrame.from_dict(var_d, orient='index')
plotting_df = pd.DataFrame()
plotting_df["dates"] = list(mean_d.keys())
plotting_df["mean"] = list(mean_d.values())
plotting_df["var"] = list(var_d.values())
plotting_df["stdev"] = [math.sqrt(x) for x in var_d.values()]
plotting_df["Q5"] = (-1.645*plotting_df["stdev"]) + plotting_df["mean"]
plotting_df["Q25"] = (-0.675*plotting_df["stdev"]) + plotting_df["mean"]
plotting_df["Q75"] = (0.675*plotting_df["stdev"]) + plotting_df["mean"]
plotting_df["Q95"] = (1.645*plotting_df["stdev"]) + plotting_df["mean"]
plotting_df["Q5"][plotting_df["Q5"] < 0] = 0
plotting_df["Q25"][plotting_df["Q25"] < 0] = 0
return plotting_df, mean_df, var_df
def ae_plot(plotting_df):
"""
This method takes the data frame with the
quantiles and returns a Plotly figure.
"""
x = plotting_df["dates"]
fig = go.Figure()
fig.add_trace(go.Scatter(x=x, y=plotting_df["Q5"],
name="Q5", fill=None, mode='lines',
line_color='lightblue'))
fig.add_trace(go.Scatter(x=x, y=plotting_df["Q25"],
showlegend=False, fill='tonexty', mode='lines',
line_color='lightblue'))
fig.add_trace(go.Scatter(x=x, y=plotting_df["Q25"],
name="Q25", fill=None, mode='lines',
line_color='blue'))
fig.add_trace(go.Scatter(x=x, y=plotting_df["mean"],
showlegend=False, fill='tonexty',
mode='lines', line_color='blue'))
fig.add_trace(go.Scatter(x=x, y=plotting_df["mean"],
name="Q50", fill=None, mode='lines',
line_color='black'))
fig.add_trace(go.Scatter(x=x, y=plotting_df["Q75"],
showlegend=False, fill='tonexty', mode='lines',
line_color='blue'))
fig.add_trace(go.Scatter(x=x, y=plotting_df["Q75"],
name="Q75", fill=None, mode='lines',
line_color='blue'))
fig.add_trace(go.Scatter(x=x, y=plotting_df["Q95"],
showlegend=False, fill='tonexty',
mode='lines', line_color='lightblue'))
fig.add_trace(go.Scatter(x=x, y=plotting_df["Q95"],
name="Q95", fill=None, mode='lines',
line_color='lightblue'))
fig.update_layout(title_text='Cases2Beds Outputs',
xaxis_title="Dates", yaxis_title="Beds Needed")
return fig
def C2B_outputs_w_input_params(cases, dem_breakdown):
"""
This method replicates the "Easy Input" tab on the Excel worksheet.
If the user inputs case numbers and the demographics they are using, then
this method splits the total cases by the demographic breakdowns.
Currently, only age is considered. This method creates a "Cases_Split.csv"
file, which is then used with the rest of C2B.
"""
cases["date"] = pd.to_datetime(cases["date"])
age_list = []
for index, row in cases[["cases", "date"]].iterrows():
left_over = row["cases"]
for age_gr in ["0-9", "10-19", "20-29", "30-39",
"40-49", "50-59", "60-69", "70-79", "80+",
"unspecified"]:
age_dict = {}
p_change = dem_breakdown.loc[:, age_gr].mean()
age_dict["Specimen Testing Date" +
" (mandatory)"] = row["date"].strftime(
'%m/%d/%Y')
cases_used = round(
row["cases"] * p_change)
left_over = left_over - cases_used
if age_gr == "unspecified":
cases_used += left_over
age_dict["Number of such Cases" +
" (optional, defaults to 1)"] = cases_used
age_dict["Age Group (optional but highly desirable)"] = age_gr
age_dict["Race/Ethnicity (Black, White," +
" Asian, Hispanic, or unspecified)"] = "unspecified"
age_dict["Gender (F, M, or unspecified)"] = "unspecified"
age_list.append(age_dict)
age_df = pd.DataFrame(age_list).fillna("unspecified")
age_df.to_csv("Cases_Split.csv", date_format='%m/%d/%Y')
return(age_df)
def gen_plot_C2B(age_df, in_params):
"""
This method creates the plot for Cases2Beds.
The inputs are the cases data frame from the "Cases (the input)" tab
and the parameters from the assumptions tab in the spreadsheet.
The output is the output to C2B.
"""
plot_df, mean, var = C2B(in_params, age_df)
plot_df = plot_df.sort_values(by="dates").reset_index(drop=True)
plot_df.to_csv("C2B_output.csv")
fig = ae_plot(plot_df)
fig.show()
# fig.write_image("graphs/C2B.png")
return plot_df
def main(argv):
"""
This is the main method.
There are two options to recreate the "Cases (the input)" tab.
F1 and F2 correspond to .csv files that have the cases by day
and the demographic breakdown, respectively ("Easy Input" tab).
Alternatively, you can use -c to pass a file that has the cases split.
-ip are the parameters for C2B that match the
"Assumptions" tab in the worksheet.
"""
cases = ""
dem_breakdown = ""
cases_split = ""
in_params = ""
cases_split_frame = pd.DataFrame()
try:
opts, args = getopt.getopt(argv, "hc:s:d:i:")
except getopt.GetoptError:
print('C2B_basic.py -c <cases> -d <dem_breakdown> -s ' +
'<cases_split> -i <input_params>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('C2B_basic.py -c <cases> -d <dem_breakdown> -s' +
' <cases_split> -i <input_params>')
sys.exit()
elif opt in ("-c"):
cases = arg
elif opt in ("-d"):
dem_breakdown = arg
elif opt in ("-s"):
cases_split = arg
elif opt in ("-i"):
in_params = arg
if cases != "" and dem_breakdown != "":
cases = pd.read_csv(cases)
dem_breakdown = pd.read_csv(dem_breakdown, index_col=0).reset_index()
if abs(1-dem_breakdown.sum(axis=1).values[0]) > 1e-5:
print("The age distribution " +
"doesn't sum to 1. Err:", 1-dem_breakdown.sum(
axis=1).values)
sys.exit(2)
_ = C2B_outputs_w_input_params(cases, dem_breakdown)
if cases_split_frame.empty:
if cases_split != "":
cases_split_frame = pd.read_csv(cases_split,
index_col=0, header=0).dropna(
how="all")
else:
cases_split_frame = pd.read_csv("Cases_Split.csv",
index_col=0, header=0).dropna(
how="all")
cases_split_frame["Specimen Testing Date " +
"(mandatory)"] = pd.to_datetime(
cases_split_frame["Specimen " +
"Testing Date (mandatory)"])
cases_split_frame["Number of such Cases " +
"(optional, defaults to 1)"] = cases_split_frame[
"Number of such Cases" +
" (optional, defaults to 1)"].replace(
np.nan, 1)
cases_split_frame["Age Group (optional but" +
" highly desirable)"] = cases_split_frame[
"Age Group (optional but" +
" highly desirable)"].replace(
np.nan, "unspecified")
cases_split_frame["Race/Ethnicity " +
"(Black, White, " +
"Asian, Hispanic, or" +
" unspecified)"] = cases_split_frame[
"Race/Ethnicity " +
"(Black, White, " +
"Asian, Hispanic, " +
"or unspecified)"].replace(
np.nan, "unspecified")
cases_split_frame["Gender (F, M, " +
"or unspecified)"] = cases_split_frame[
"Gender (F, M, " +
"or unspecif" +
"ied)"].replace(
np.nan, "unspecified")
if in_params == "":
in_params = pd.read_csv("input_parameters_generic.csv")
else:
in_params = pd.read_csv(in_params)
_ = gen_plot_C2B(cases_split_frame, in_params)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"pandas.isnull",
"getopt.getopt",
"pandas.read_csv",
"scipy.stats.expon.pdf",
"math.sqrt",
"pandas.DataFrame.from_dict",
"plotly.graph_objects.Figure",
"plotly.graph_objects.Scatter",
"sys.exit",
"pandas.DataFrame",
"datetime.timedelta",
"pandas.to_datetime"
] | [((9827, 9873), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['mean_d'], {'orient': '"""index"""'}), "(mean_d, orient='index')\n", (9849, 9873), True, 'import pandas as pd\n'), ((9887, 9932), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['var_d'], {'orient': '"""index"""'}), "(var_d, orient='index')\n", (9909, 9932), True, 'import pandas as pd\n'), ((9951, 9965), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (9963, 9965), True, 'import pandas as pd\n'), ((10790, 10801), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (10799, 10801), True, 'import plotly.graph_objects as go\n'), ((12991, 13020), 'pandas.to_datetime', 'pd.to_datetime', (["cases['date']"], {}), "(cases['date'])\n", (13005, 13020), True, 'import pandas as pd\n'), ((15351, 15365), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (15363, 15365), True, 'import pandas as pd\n'), ((4081, 4095), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4093, 4095), True, 'import pandas as pd\n'), ((10135, 10147), 'math.sqrt', 'math.sqrt', (['x'], {}), '(x)\n', (10144, 10147), False, 'import math\n'), ((10820, 10920), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x', 'y': "plotting_df['Q5']", 'name': '"""Q5"""', 'fill': 'None', 'mode': '"""lines"""', 'line_color': '"""lightblue"""'}), "(x=x, y=plotting_df['Q5'], name='Q5', fill=None, mode='lines',\n line_color='lightblue')\n", (10830, 10920), True, 'import plotly.graph_objects as go\n'), ((10994, 11107), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x', 'y': "plotting_df['Q25']", 'showlegend': '(False)', 'fill': '"""tonexty"""', 'mode': '"""lines"""', 'line_color': '"""lightblue"""'}), "(x=x, y=plotting_df['Q25'], showlegend=False, fill='tonexty',\n mode='lines', line_color='lightblue')\n", (11004, 11107), True, 'import plotly.graph_objects as go\n'), ((11182, 11279), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x', 'y': "plotting_df['Q25']", 'name': '"""Q25"""', 'fill': 'None', 'mode': '"""lines"""', 'line_color': '"""blue"""'}), "(x=x, y=plotting_df['Q25'], name='Q25', fill=None, mode='lines',\n line_color='blue')\n", (11192, 11279), True, 'import plotly.graph_objects as go\n'), ((11353, 11462), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x', 'y': "plotting_df['mean']", 'showlegend': '(False)', 'fill': '"""tonexty"""', 'mode': '"""lines"""', 'line_color': '"""blue"""'}), "(x=x, y=plotting_df['mean'], showlegend=False, fill='tonexty',\n mode='lines', line_color='blue')\n", (11363, 11462), True, 'import plotly.graph_objects as go\n'), ((11537, 11636), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x', 'y': "plotting_df['mean']", 'name': '"""Q50"""', 'fill': 'None', 'mode': '"""lines"""', 'line_color': '"""black"""'}), "(x=x, y=plotting_df['mean'], name='Q50', fill=None, mode='lines',\n line_color='black')\n", (11547, 11636), True, 'import plotly.graph_objects as go\n'), ((11711, 11819), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x', 'y': "plotting_df['Q75']", 'showlegend': '(False)', 'fill': '"""tonexty"""', 'mode': '"""lines"""', 'line_color': '"""blue"""'}), "(x=x, y=plotting_df['Q75'], showlegend=False, fill='tonexty',\n mode='lines', line_color='blue')\n", (11721, 11819), True, 'import plotly.graph_objects as go\n'), ((11893, 11990), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x', 'y': "plotting_df['Q75']", 'name': '"""Q75"""', 'fill': 'None', 'mode': '"""lines"""', 'line_color': '"""blue"""'}), "(x=x, y=plotting_df['Q75'], name='Q75', fill=None, mode='lines',\n line_color='blue')\n", (11903, 11990), True, 'import plotly.graph_objects as go\n'), ((12064, 12177), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x', 'y': "plotting_df['Q95']", 'showlegend': '(False)', 'fill': '"""tonexty"""', 'mode': '"""lines"""', 'line_color': '"""lightblue"""'}), "(x=x, y=plotting_df['Q95'], showlegend=False, fill='tonexty',\n mode='lines', line_color='lightblue')\n", (12074, 12177), True, 'import plotly.graph_objects as go\n'), ((12251, 12353), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'x', 'y': "plotting_df['Q95']", 'name': '"""Q95"""', 'fill': 'None', 'mode': '"""lines"""', 'line_color': '"""lightblue"""'}), "(x=x, y=plotting_df['Q95'], name='Q95', fill=None, mode='lines',\n line_color='lightblue')\n", (12261, 12353), True, 'import plotly.graph_objects as go\n'), ((15396, 15428), 'getopt.getopt', 'getopt.getopt', (['argv', '"""hc:s:d:i:"""'], {}), "(argv, 'hc:s:d:i:')\n", (15409, 15428), False, 'import getopt\n'), ((16076, 16094), 'pandas.read_csv', 'pd.read_csv', (['cases'], {}), '(cases)\n', (16087, 16094), True, 'import pandas as pd\n'), ((18548, 18591), 'pandas.read_csv', 'pd.read_csv', (['"""input_parameters_generic.csv"""'], {}), "('input_parameters_generic.csv')\n", (18559, 18591), True, 'import pandas as pd\n'), ((18622, 18644), 'pandas.read_csv', 'pd.read_csv', (['in_params'], {}), '(in_params)\n', (18633, 18644), True, 'import pandas as pd\n'), ((8463, 8520), 'pandas.to_datetime', 'pd.to_datetime', (["case['Specimen Testing Date (mandatory)']"], {}), "(case['Specimen Testing Date (mandatory)'])\n", (8477, 8520), True, 'import pandas as pd\n'), ((14157, 14179), 'pandas.DataFrame', 'pd.DataFrame', (['age_list'], {}), '(age_list)\n', (14169, 14179), True, 'import pandas as pd\n'), ((15583, 15594), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (15591, 15594), False, 'import sys\n'), ((15779, 15789), 'sys.exit', 'sys.exit', ([], {}), '()\n', (15787, 15789), False, 'import sys\n'), ((16392, 16403), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (16400, 16403), False, 'import sys\n'), ((17025, 17100), 'pandas.to_datetime', 'pd.to_datetime', (["cases_split_frame['Specimen ' + 'Testing Date (mandatory)']"], {}), "(cases_split_frame['Specimen ' + 'Testing Date (mandatory)'])\n", (17039, 17100), True, 'import pandas as pd\n'), ((2048, 2078), 'scipy.stats.expon.pdf', 'expon.pdf', (['i', '(0)', 'negative_mean'], {}), '(i, 0, negative_mean)\n', (2057, 2078), False, 'from scipy.stats import expon\n'), ((2358, 2377), 'scipy.stats.expon.pdf', 'expon.pdf', (['i', '(0)', 'p3'], {}), '(i, 0, p3)\n', (2367, 2377), False, 'from scipy.stats import expon\n'), ((8540, 8555), 'pandas.isnull', 'pd.isnull', (['date'], {}), '(date)\n', (8549, 8555), True, 'import pandas as pd\n'), ((16119, 16158), 'pandas.read_csv', 'pd.read_csv', (['dem_breakdown'], {'index_col': '(0)'}), '(dem_breakdown, index_col=0)\n', (16130, 16158), True, 'import pandas as pd\n'), ((3001, 3020), 'scipy.stats.expon.pdf', 'expon.pdf', (['j', '(0)', 'p1'], {}), '(j, 0, p1)\n', (3010, 3020), False, 'from scipy.stats import expon\n'), ((16559, 16606), 'pandas.read_csv', 'pd.read_csv', (['cases_split'], {'index_col': '(0)', 'header': '(0)'}), '(cases_split, index_col=0, header=0)\n', (16570, 16606), True, 'import pandas as pd\n'), ((16760, 16813), 'pandas.read_csv', 'pd.read_csv', (['"""Cases_Split.csv"""'], {'index_col': '(0)', 'header': '(0)'}), "('Cases_Split.csv', index_col=0, header=0)\n", (16771, 16813), True, 'import pandas as pd\n'), ((9183, 9200), 'datetime.timedelta', 'timedelta', ([], {'days': 'j'}), '(days=j)\n', (9192, 9200), False, 'from datetime import timedelta\n'), ((2158, 2197), 'scipy.stats.expon.pdf', 'expon.pdf', (['(j * -1 - 2)', '(0)', 'negative_mean'], {}), '(j * -1 - 2, 0, negative_mean)\n', (2167, 2197), False, 'from scipy.stats import expon\n'), ((2455, 2478), 'scipy.stats.expon.pdf', 'expon.pdf', (['(j - 2)', '(0)', 'p3'], {}), '(j - 2, 0, p3)\n', (2464, 2478), False, 'from scipy.stats import expon\n'), ((3100, 3123), 'scipy.stats.expon.pdf', 'expon.pdf', (['(j - 1)', '(0)', 'p1'], {}), '(j - 1, 0, p1)\n', (3109, 3123), False, 'from scipy.stats import expon\n')] |
#Get each patient file. Get different views from each file. Get 3 axes from each view
#t1 - T1
#t1c - T1 + contrast
#t2 - T2
#pwi - perfusion-weight imaging
#ax - axial
#cor - coronal
#sag - sagittal
#sat - saturated
#fs - fat saturated
#un - neither
#make a nested dictionary - key = patientID, value = dictionary with keys as each possible combo of above descriptors, value is associated image
import nrrd #libary to read and write NRRD files into and from numpy arrays (nrrd - multi-dimensional image file type)
import os #interacting with terminal
import shutil
from segmentation import calculate_largest_slice, select_slice, bounding_box, crop, resize #file with segmentation functions
from config import config #configuration file
import matplotlib.pyplot as plt
#from path import Path #path = wrapper for os.path
from filenames import SKIP
import csv
import itertools
def deleteUselessFiles(path):
"""
Param: file path
Function: Remove all files that are not .nrrd files
Returns: None
"""
d = Path(path)
allList = d.walkfiles('*')
for file in allList:
if not file.endswith(".nrrd"):
file.remove()
return
def load_image(image_path, segmentation_path, axis, verbose=False):
"""
Param: path to image, path to image segmentation
Function: Load, resize, and mask image by the segmentation of its largest slice
Returns: List masked images representing each of the 3 views
"""
image, _ = nrrd.read(image_path)
segmentation, _ = nrrd.read(segmentation_path)
if verbose:
print("""
image: {}
seg: {}
""".format(image.shape, segmentation.shape))
largest_plane = calculate_largest_slice(segmentation, axis) #find largest cross section
image, segmentation = select_slice(image, segmentation, largest_plane, axis) #image and segmentation pattern of the largest cross section
bounds = bounding_box(segmentation) #Bound image by largest slice
image, segmentation = crop(image, segmentation, bounds) #Crop image to fit bounds
masked = image * segmentation #Mask everything not of interest (#keep things in segment 1, lose things in segment 0)
masked = resize(masked, (config.IMAGE_SIZE, config.IMAGE_SIZE)) #Standardize masked image to given size
return masked
def processFiles(path):
"""
Param: path to raw images
Returns: all png's to folder to look through for QA
"""
save_path = "/Volumes/external/bone_master/bone_qa/"
messed_up = []
#available_sequences = {'t1':[],'t2':[],'t1c':[],'pd':[], 'none':[], 't2-t1c':[], 't2-t1':[], 'all_patients':[]}
seq_by_ID = [["patientID", "t1", "t1c", "t2"]]
print("Loading images...")
if not os.path.isdir(path):
print("Error: Path does not exist.")
return
data_all = os.listdir(path) #all sources of data (ex. PENN, China, CHOP)
total_patients = 0
num_sources = 0
for source in data_all:
num_sources+=1
if source == '.DS_Store':
continue
print("Current source: "+ source)
data_source = os.listdir(path + "/" + source) #all patients from particular source
num_patients = float(len(data_source))
print("Number of patients: " + str(num_patients))
completed = 0
threshold = 10
for data_patient in data_source: #for each patient
total_patients += 1
new_input = [data_patient,0,0,0]
if data_patient == '.DS_Store' or data_patient == '._.DS_Store':#or data_patient in SKIP:
continue
print(data_patient)
patient_views = os.listdir(path + "/" + source + "/" + data_patient)
#added_views = 0
t1_dup = 0
t2_dup = 0
t1c_dup = 0
for view in patient_views:
view_path = path + "/" + source + "/" + data_patient + "/" + view + "/"
if view == '.DS_Store' or not os.path.isdir(view_path):
continue
imageVolume = view_path + "imagingVolume.nrrd"
segMask = view_path + "segMask_tumor.nrrd"
segMask_GTV = view_path + "segMask_GTV.nrrd"
if os.path.isfile(segMask_GTV):
os.rename(view_path + "segMask_GTV.nrrd", view_path + "segMask_tumor.nrrd")
has_volumes = True
if not (os.path.isfile(imageVolume) and os.path.isfile(segMask)):
has_volumes = False
nrrds = []
for file in os.listdir(view_path):
if file.endswith(".nrrd"):
nrrds.append(file)
print("{}, {}, {}".format(data_patient, view, nrrds))
continue
if "t1" in view and has_volumes:
if "t1c" in view:
new_input[2] = 1
try:
os.rename(view_path, path + "/" + source + "/" + data_patient + "/t1c")
except:
os.rename(view_path, path + "/" + source + "/" + data_patient + "/t1c" + "_{}".format(str(t1c_dup)))
t1c_dup += 1
else:
new_input[1] = 1
try:
os.rename(view_path, path + "/" + source + "/" + data_patient + "/t1")
except:
os.rename(view_path, path + "/" + source + "/" + data_patient + "/t1" + "_{}".format(str(t1_dup)))
t1_dup += 1
elif "t2" in view and has_volumes:
new_input[3] = 1
try:
os.rename(view_path, path + "/" + source + "/" + data_patient + "/t2")
except:
os.rename(view_path, path + "/" + source + "/" + data_patient + "/t2" + "_{}".format(str(t2_dup)))
t2_dup += 1
else:
pass
seq_by_ID.append(new_input)
#try:
#if data_patient not in available_sequences[view]:
#available_sequences[view].append(data_patient)
#added_views += 1
#except:
#pass
#if not (os.path.isfile(imageVolume) and os.path.isfile(segMask)):
# continue
#try:
# images = [load_image(imageVolume, segMask, i) for i in range(3)]
# plt.imsave(save_path+data_patient+"_"+view+"_"+"0.png", images[0])
# plt.imsave(save_path+data_patient+"_"+view+"_"+"1.png", images[1])
# plt.imsave(save_path+data_patient+"_"+view+"_"+"2.png", images[2])
#except:
# messed_up.append(data_patient)
# continue
#if not os.path.isfile(save_path+data_patient+"_"+view+"_"+"0.png"):
#plt.imsave(save_path+data_patient+"_"+view+"_"+"0.png", images[0])
#plt.imsave(save_path+data_patient+"_"+view+"_"+"1.png", images[1])
#plt.imsave(save_path+data_patient+"_"+view+"_"+"2.png", images[2])
#available_sequences['all_patients'].append(data_patient)
#if not added_views:
#available_sequences['none'].append(patientID)
#completed = completed + 1
if completed/num_patients*100 > threshold:
print(str(threshold) + "% complete")
threshold = threshold + 10
#keys = sorted(available_sequences.keys())
#t1_available = available_sequences['t1']
#t1c_available = available_sequences['t1c']
#t2_available = available_sequences['t2']
#for patient in t2_available:
#if patient in t1c_available:
# available_sequences['t2-t1c'].append(patient)
# if patient in t1_available:
# available_sequences['t2-t1'].append(patient)
#keys = sorted(available_sequences.keys())
#print("total sources = {}".format(str(num_sources)))
#print("total patients = {}".format(str(total_patients)))
#print("%t1 = {}%".format(str(len(available_sequences["t1"])/total_patients*100)))
#print("%t2 = {}%".format(str(len(available_sequences["t2"])/total_patients*100)))
#print("%t1c = {}%".format(str(len(available_sequences["t1c"])/total_patients*100)))
#with open(config.SEQ_AVAIL, "w") as outfile:
# writer = csv.writer(outfile, delimiter = ",")
# writer.writerow(keys)
# writer.writerows(itertools.zip_longest(*[available_sequences[key] for key in keys]))
with open('output.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(seq_by_ID)
print(messed_up)
#Test code
path = "/Volumes/external/bone_raw"#"/home/user1/Documents/Bone-MRI/bone_raw"
processFiles(path)
| [
"os.listdir",
"nrrd.read",
"segmentation.select_slice",
"segmentation.crop",
"csv.writer",
"os.rename",
"os.path.isfile",
"segmentation.bounding_box",
"os.path.isdir",
"segmentation.calculate_largest_slice",
"segmentation.resize"
] | [((1504, 1525), 'nrrd.read', 'nrrd.read', (['image_path'], {}), '(image_path)\n', (1513, 1525), False, 'import nrrd\n'), ((1548, 1576), 'nrrd.read', 'nrrd.read', (['segmentation_path'], {}), '(segmentation_path)\n', (1557, 1576), False, 'import nrrd\n'), ((1719, 1762), 'segmentation.calculate_largest_slice', 'calculate_largest_slice', (['segmentation', 'axis'], {}), '(segmentation, axis)\n', (1742, 1762), False, 'from segmentation import calculate_largest_slice, select_slice, bounding_box, crop, resize\n'), ((1817, 1871), 'segmentation.select_slice', 'select_slice', (['image', 'segmentation', 'largest_plane', 'axis'], {}), '(image, segmentation, largest_plane, axis)\n', (1829, 1871), False, 'from segmentation import calculate_largest_slice, select_slice, bounding_box, crop, resize\n'), ((1951, 1977), 'segmentation.bounding_box', 'bounding_box', (['segmentation'], {}), '(segmentation)\n', (1963, 1977), False, 'from segmentation import calculate_largest_slice, select_slice, bounding_box, crop, resize\n'), ((2034, 2067), 'segmentation.crop', 'crop', (['image', 'segmentation', 'bounds'], {}), '(image, segmentation, bounds)\n', (2038, 2067), False, 'from segmentation import calculate_largest_slice, select_slice, bounding_box, crop, resize\n'), ((2233, 2287), 'segmentation.resize', 'resize', (['masked', '(config.IMAGE_SIZE, config.IMAGE_SIZE)'], {}), '(masked, (config.IMAGE_SIZE, config.IMAGE_SIZE))\n', (2239, 2287), False, 'from segmentation import calculate_largest_slice, select_slice, bounding_box, crop, resize\n'), ((2884, 2900), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2894, 2900), False, 'import os\n'), ((2783, 2802), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2796, 2802), False, 'import os\n'), ((3159, 3190), 'os.listdir', 'os.listdir', (["(path + '/' + source)"], {}), "(path + '/' + source)\n", (3169, 3190), False, 'import os\n'), ((9038, 9057), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (9048, 9057), False, 'import csv\n'), ((3711, 3763), 'os.listdir', 'os.listdir', (["(path + '/' + source + '/' + data_patient)"], {}), "(path + '/' + source + '/' + data_patient)\n", (3721, 3763), False, 'import os\n'), ((4310, 4337), 'os.path.isfile', 'os.path.isfile', (['segMask_GTV'], {}), '(segMask_GTV)\n', (4324, 4337), False, 'import os\n'), ((4359, 4434), 'os.rename', 'os.rename', (["(view_path + 'segMask_GTV.nrrd')", "(view_path + 'segMask_tumor.nrrd')"], {}), "(view_path + 'segMask_GTV.nrrd', view_path + 'segMask_tumor.nrrd')\n", (4368, 4434), False, 'import os\n'), ((4655, 4676), 'os.listdir', 'os.listdir', (['view_path'], {}), '(view_path)\n', (4665, 4676), False, 'import os\n'), ((4036, 4060), 'os.path.isdir', 'os.path.isdir', (['view_path'], {}), '(view_path)\n', (4049, 4060), False, 'import os\n'), ((4494, 4521), 'os.path.isfile', 'os.path.isfile', (['imageVolume'], {}), '(imageVolume)\n', (4508, 4521), False, 'import os\n'), ((4526, 4549), 'os.path.isfile', 'os.path.isfile', (['segMask'], {}), '(segMask)\n', (4540, 4549), False, 'import os\n'), ((5081, 5152), 'os.rename', 'os.rename', (['view_path', "(path + '/' + source + '/' + data_patient + '/t1c')"], {}), "(view_path, path + '/' + source + '/' + data_patient + '/t1c')\n", (5090, 5152), False, 'import os\n'), ((5479, 5549), 'os.rename', 'os.rename', (['view_path', "(path + '/' + source + '/' + data_patient + '/t1')"], {}), "(view_path, path + '/' + source + '/' + data_patient + '/t1')\n", (5488, 5549), False, 'import os\n'), ((5915, 5985), 'os.rename', 'os.rename', (['view_path', "(path + '/' + source + '/' + data_patient + '/t2')"], {}), "(view_path, path + '/' + source + '/' + data_patient + '/t2')\n", (5924, 5985), False, 'import os\n')] |
# This file is public domain, it can be freely copied without restrictions.
# SPDX-License-Identifier: CC0-1.0
import numpy as np
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import FallingEdge
from cocotb.binary import BinaryValue
DUT_VECTOR_SIZE = 13
def np2bv(int_arr, n_bits=8):
""" Convert a n_bits integer numpy array to cocotb BinaryValue """
# Step 1: Turn ndarray into a list of integers
int_list = int_arr.tolist()
# Step 2: Format each number as two's complement strings
binarized = [format(x & 2 ** n_bits - 1, f'0{n_bits}b') if x < 0 else
format(x, f'0{n_bits}b')
for x in int_list]
# Step 3: Join all strings into one large binary string
bin_string = ''.join(binarized)
# Step 4: Convert to cocotb BinaryValue and return
return BinaryValue(bin_string)
@cocotb.test()
async def test_conv_sipo(dut):
""" Test Serial-In, Parallel Out Module """
# Create a 10us period clock on port clk
clock = Clock(dut.clk_i, 10, units="us")
cocotb.fork(clock.start())
# Reset DUT
await FallingEdge(dut.clk_i)
dut.rst_n_i <= 0
dut.data_i <= 0
dut.valid_i <= 0
dut.last_i <= 0
dut.ready_i <= 0
for _ in range(20):
await FallingEdge(dut.clk_i)
dut.rst_n_i <= 1
dut.ready_i <= 1
# Load Complete Sequential Stream
for i in range(50 * 8):
await FallingEdge(dut.clk_i)
dut.data_i <= i
dut.valid_i <= 1
if (i + 1) % 50 == 0 and i != 0:
dut.last_i <= 1
else:
dut.last_i <= 0
# Deassert End-Of-Packet Signals
await FallingEdge(dut.clk_i)
dut.last_i <= 0
dut.valid_i <= 0
# INFO: This is not a real unit test, but it works in integration tests
# Output Checking
await FallingEdge(dut.clk_i)
for i in range(50):
observed = dut.data_o.value
expected = i % 50
await FallingEdge(dut.clk_i)
# assert observed == expected, "observed = %d, expected = %d," %\
# (observed, expected)
| [
"cocotb.test",
"cocotb.triggers.FallingEdge",
"cocotb.clock.Clock",
"cocotb.binary.BinaryValue"
] | [((868, 881), 'cocotb.test', 'cocotb.test', ([], {}), '()\n', (879, 881), False, 'import cocotb\n'), ((841, 864), 'cocotb.binary.BinaryValue', 'BinaryValue', (['bin_string'], {}), '(bin_string)\n', (852, 864), False, 'from cocotb.binary import BinaryValue\n'), ((1018, 1050), 'cocotb.clock.Clock', 'Clock', (['dut.clk_i', '(10)'], {'units': '"""us"""'}), "(dut.clk_i, 10, units='us')\n", (1023, 1050), False, 'from cocotb.clock import Clock\n'), ((1109, 1131), 'cocotb.triggers.FallingEdge', 'FallingEdge', (['dut.clk_i'], {}), '(dut.clk_i)\n', (1120, 1131), False, 'from cocotb.triggers import FallingEdge\n'), ((1659, 1681), 'cocotb.triggers.FallingEdge', 'FallingEdge', (['dut.clk_i'], {}), '(dut.clk_i)\n', (1670, 1681), False, 'from cocotb.triggers import FallingEdge\n'), ((1832, 1854), 'cocotb.triggers.FallingEdge', 'FallingEdge', (['dut.clk_i'], {}), '(dut.clk_i)\n', (1843, 1854), False, 'from cocotb.triggers import FallingEdge\n'), ((1274, 1296), 'cocotb.triggers.FallingEdge', 'FallingEdge', (['dut.clk_i'], {}), '(dut.clk_i)\n', (1285, 1296), False, 'from cocotb.triggers import FallingEdge\n'), ((1428, 1450), 'cocotb.triggers.FallingEdge', 'FallingEdge', (['dut.clk_i'], {}), '(dut.clk_i)\n', (1439, 1450), False, 'from cocotb.triggers import FallingEdge\n'), ((1955, 1977), 'cocotb.triggers.FallingEdge', 'FallingEdge', (['dut.clk_i'], {}), '(dut.clk_i)\n', (1966, 1977), False, 'from cocotb.triggers import FallingEdge\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# the demo to calculate the features from NNscore.
# =============================================================================
import sys, os, csv, glob
import subprocess
import multiprocessing
from multiprocessing import Manager
#from multiprocessing.dummy import Pool
import pandas as pd
## set the path of module
sys.path.append('/home/shenchao/AI_pose_filter/nnscore')
from NNScore2module import PDB, binana, command_line_parameters
vina_output_list = ['vina_affinity', 'vina_gauss_1', 'vina_gauss_2', 'vina_repulsion', 'vina_hydrophobic',
'vina_hydrogen']
ligand_receptor_atom_type_pairs_less_than_two_half_list = ['A_MN', 'OA_SA', 'HD_N', 'N_ZN', 'A_MG', 'HD_NA', 'A_CL',
'MG_OA', 'FE_HD', 'A_OA', 'NA_ZN', 'A_N', 'C_OA', 'F_HD',
'C_HD', 'NA_SA', 'A_ZN', 'C_NA', 'N_N', 'MN_N', 'F_N',
'FE_OA', 'HD_I', 'BR_C', 'MG_NA', 'C_ZN', 'CL_MG', 'BR_OA',
'A_FE', 'CL_OA', 'CL_N', 'NA_OA', 'F_ZN', 'HD_P', 'CL_ZN',
'C_C', 'C_CL', 'FE_N', 'HD_S', 'HD_MG', 'C_F', 'A_NA',
'BR_HD', 'HD_OA', 'HD_MN', 'A_SA', 'A_F', 'HD_SA', 'A_C',
'A_A', 'F_SA', 'C_N', 'HD_ZN', 'OA_OA', 'N_SA', 'CL_FE',
'C_MN', 'CL_HD', 'OA_ZN', 'MN_OA', 'C_MG', 'F_OA', 'CD_OA',
'S_ZN', 'N_OA', 'C_SA', 'N_NA', 'A_HD', 'HD_HD', 'SA_ZN']
ligand_receptor_atom_type_pairs_less_than_four_list = ['I_N', 'OA_SA', 'FE_NA', 'HD_NA', 'A_CL', 'MG_SA', 'A_CU',
'P_SA', 'C_NA', 'MN_NA', 'F_N', 'HD_N', 'HD_I', 'CL_MG', 'HD_S',
'CL_MN', 'F_OA', 'HD_OA', 'F_HD', 'A_SA', 'A_BR', 'BR_HD',
'SA_SA', 'A_MN', 'N_ZN', 'A_MG', 'I_OA', 'C_C', 'N_S', 'N_N',
'FE_N', 'NA_SA', 'BR_N', 'MN_N', 'A_P', 'BR_C', 'A_FE', 'MN_P',
'CL_OA', 'CU_HD', 'MN_S', 'A_S', 'FE_OA', 'NA_ZN', 'P_ZN', 'A_F',
'A_C', 'A_A', 'A_N', 'HD_MN', 'A_I', 'N_SA', 'C_OA', 'MG_P',
'BR_SA', 'CU_N', 'MN_OA', 'MG_N', 'HD_HD', 'C_FE', 'CL_NA',
'MG_OA', 'A_OA', 'CL_ZN', 'BR_OA', 'HD_ZN', 'HD_P', 'OA_P',
'OA_S', 'N_P', 'A_NA', 'CL_FE', 'HD_SA', 'C_MN', 'CL_HD', 'C_MG',
'FE_HD', 'MG_S', 'NA_S', 'NA_P', 'FE_SA', 'P_S', 'C_HD', 'A_ZN',
'CL_P', 'S_SA', 'CL_S', 'OA_ZN', 'N_NA', 'MN_SA', 'CL_N',
'NA_OA', 'C_ZN', 'C_CD', 'HD_MG', 'C_F', 'C_I', 'C_CL', 'C_N',
'C_P', 'C_S', 'A_HD', 'F_SA', 'MG_NA', 'OA_OA', 'CL_SA', 'S_ZN',
'N_OA', 'C_SA', 'SA_ZN']
ligand_atom_types_list = ['A', 'C', 'CL', 'I', 'N', 'P', 'S', 'BR', 'HD', 'NA', 'F', 'OA', 'SA']
ligand_receptor_atom_type_pairs_electrostatic_list = ['I_N', 'OA_SA', 'FE_NA', 'HD_NA', 'A_CL', 'MG_SA', 'P_SA', 'C_NA',
'MN_NA', 'F_N', 'HD_N', 'HD_I', 'CL_MG', 'HD_S', 'CL_MN', 'F_OA',
'HD_OA', 'F_HD', 'A_SA', 'A_BR', 'BR_HD', 'SA_SA', 'A_MN', 'N_ZN',
'A_MG', 'I_OA', 'C_C', 'N_S', 'N_N', 'FE_N', 'NA_SA', 'BR_N',
'MN_N', 'A_P', 'BR_C', 'A_FE', 'MN_P', 'CL_OA', 'CU_HD', 'MN_S',
'A_S', 'FE_OA', 'NA_ZN', 'P_ZN', 'A_F', 'A_C', 'A_A', 'A_N',
'HD_MN', 'A_I', 'N_SA', 'C_OA', 'MG_P', 'BR_SA', 'CU_N', 'MN_OA',
'MG_N', 'HD_HD', 'C_FE', 'CL_NA', 'MG_OA', 'A_OA', 'CL_ZN',
'BR_OA', 'HD_ZN', 'HD_P', 'OA_P', 'OA_S', 'N_P', 'A_NA', 'CL_FE',
'HD_SA', 'C_MN', 'CL_HD', 'C_MG', 'FE_HD', 'MG_S', 'NA_S', 'NA_P',
'FE_SA', 'P_S', 'C_HD', 'A_ZN', 'CL_P', 'S_SA', 'CL_S', 'OA_ZN',
'N_NA', 'MN_SA', 'CL_N', 'NA_OA', 'F_ZN', 'C_ZN', 'HD_MG', 'C_F',
'C_I', 'C_CL', 'C_N', 'C_P', 'C_S', 'A_HD', 'F_SA', 'MG_NA',
'OA_OA', 'CL_SA', 'S_ZN', 'N_OA', 'C_SA', 'SA_ZN']
rotateable_bonds_count_list = ['rot_bonds']
active_site_flexibility_list = ['SIDECHAIN_OTHER', 'SIDECHAIN_ALPHA', 'BACKBONE_ALPHA', 'SIDECHAIN_BETA',
'BACKBONE_BETA', 'BACKBONE_OTHER']
hbonds_list = ['HDONOR-LIGAND_SIDECHAIN_BETA', 'HDONOR-LIGAND_BACKBONE_OTHER', 'HDONOR-LIGAND_SIDECHAIN_ALPHA',
'HDONOR-RECEPTOR_SIDECHAIN_OTHER', 'HDONOR-RECEPTOR_BACKBONE_ALPHA', 'HDONOR-RECEPTOR_SIDECHAIN_BETA',
'HDONOR-RECEPTOR_SIDECHAIN_ALPHA', 'HDONOR-LIGAND_SIDECHAIN_OTHER', 'HDONOR-LIGAND_BACKBONE_BETA',
'HDONOR-RECEPTOR_BACKBONE_BETA', 'HDONOR-RECEPTOR_BACKBONE_OTHER', 'HDONOR-LIGAND_BACKBONE_ALPHA']
hydrophobics_list = ['SIDECHAIN_OTHER', 'SIDECHAIN_ALPHA', 'BACKBONE_ALPHA', 'SIDECHAIN_BETA', 'BACKBONE_BETA','BACKBONE_OTHER']
stacking_list = ['ALPHA', 'BETA', 'OTHER']
pi_cation_list = ['LIGAND-CHARGED_BETA', 'LIGAND-CHARGED_ALPHA', 'RECEPTOR-CHARGED_BETA', 'RECEPTOR-CHARGED_OTHER','RECEPTOR-CHARGED_ALPHA', 'LIGAND-CHARGED_OTHER']
t_shaped_list = ['ALPHA', 'BETA', 'OTHER']
salt_bridges_list = ['ALPHA', 'BETA', 'OTHER']
def get_hearder_list():
header_list = vina_output_list + ['atp2_%s' % it for it in ligand_receptor_atom_type_pairs_less_than_two_half_list] \
+ ['atp4_%s' % it for it in ligand_receptor_atom_type_pairs_less_than_four_list] + ['lat_%s' % it for
it in
ligand_atom_types_list] \
+ ['ele_%s' % it for it in
ligand_receptor_atom_type_pairs_electrostatic_list] + rotateable_bonds_count_list + [
'siteflex_%s' % it for it in active_site_flexibility_list] \
+ ['hbond_%s' % it for it in hbonds_list] + ['hydrophobic_%s' % it for it in hydrophobics_list] + [
'stacking_%s' % it for it in stacking_list] \
+ ['pi_cation_%s' % it for it in pi_cation_list] + ['t_shaped_%s' % it for it in t_shaped_list] + [
'salt_bridges_%s' % it for it in salt_bridges_list]
return header_list
def obtain_features(rec, lig):
cmd = "NNScore2.py -receptor %s -ligand %s" % (rec, lig)
params_list = cmd.split()
cmd_params = command_line_parameters(params_list)
receptor = PDB()
receptor.LoadPDB_from_file(rec)
receptor.OrigFileName = rec
d = binana(lig, receptor, cmd_params, "", "", "")
final_list = d.vina_output + d.ligand_receptor_atom_type_pairs_less_than_two_half.values() + d.ligand_receptor_atom_type_pairs_less_than_four.values() \
+ d.ligand_atom_types.values() + d.ligand_receptor_atom_type_pairs_electrostatic.values() + d.rotateable_bonds_count.values() \
+ d.active_site_flexibility.values() + d.hbonds.values() + d.hydrophobics.values() + d.stacking.values() + d.pi_cation.values() \
+ d.t_shaped.values() + d.salt_bridges.values()
return final_list
def write_file(output_file, outline):
buffer = open(output_file, 'w')
buffer.write(outline)
buffer.close()
def prot_pre(name):
'''prepare the protein with mgltools'''
cmdline = 'module purge &&'
cmdline += 'module load vina &&'
#####remove the irons in advance because prepare_receptor4.py could not recognize the irons
cmdline += 'cat %s_p.pdb | sed \'/HETATM/\'d > %s_p2.pdb &&'%(name, name)
cmdline += 'prepare_receptor4.py -r %s_p2.pdb -o %s_p.pdbqt -A checkhydrogens -U nphs_lps_waters_nonstdres &&' % (name, name)
cmdline += 'rm -rf %s_p2.pdb'%name
p = subprocess.Popen([cmdline], shell=True, cwd="%s/%s_prot"%(name, name))
p.wait()
def lig_pre(name, ligname):
'''prepare the docking poses with mgltools'''
cmdline = 'module purge &&'
cmdline += 'module load vina &&'
cmdline += 'prepare_ligand4.py -l %s.mol2 -o %s_temp.pdbqt -A checkhydrogens' % (ligname, ligname)
p = subprocess.Popen([cmdline], shell=True, cwd="%s/%s_surflex/%s"%(name, name, ligname))
p.wait()
####if not conducting this operation, some errors may occur for some ligands.
lines = open('%s/%s_surflex/%s/%s_temp.pdbqt' % (name, name, ligname, ligname), 'r').readlines()
lines_new = []
for line in lines:
if line.startswith('ATOM'):
lines_new.append(line[:23]+' ' +line[26:])
else:
lines_new.append(line)
final = ''.join(lines_new)
write_file('%s/%s_surflex/%s/%s.pdbqt' % (name, name, ligname, ligname), final)
p = subprocess.Popen(["rm -rf %s_temp.pdbqt"%ligname], shell=True, cwd="%s/%s_surflex/%s"%(name, name, ligname))
p.wait()
def lig_pre0(name):
'''prepare the native poses with mgltools'''
cmdline = 'module purge &&'
cmdline += 'module load vina &&'
cmdline += 'prepare_ligand4.py -l %s_l.mol2 -o %s_temp.pdbqt -A checkhydrogens' % (name, name)
p = subprocess.Popen([cmdline], shell=True, cwd="%s/%s_prot"%(name, name))
p.wait()
####if not conducting this operation, some errors may occur for some ligands.
lines = open('%s/%s_prot/%s_temp.pdbqt' % (name, name, name), 'r').readlines()
lines_new = []
for line in lines:
if line.startswith('ATOM'):
lines_new.append(line[:23]+' ' +line[26:])
else:
lines_new.append(line)
final = ''.join(lines_new)
write_file('%s/%s_prot/%s_l.pdbqt' % (name, name, name), final)
p = subprocess.Popen(["rm -rf %s_temp.pdbqt"%name], shell=True, cwd="%s/%s_prot"%(name, name))
p.wait()
def zhenghe(name, i, return_dict):
if not os.path.exists("%s/%s_prot/%s_p.pdbqt"%(name, name, name)):
##generate protein.pdbqt
prot_pre(name)
if not os.path.exists("%s/%s_prot/%s_l.pdbqt"%(name, name, name)):
##generate ligand.pdbqt
lig_pre0(name)
lignames = [x for x in os.listdir('./%s/%s_surflex'%(name, name)) if (os.path.isdir('./%s/%s_surflex/%s'%(name, name, x)) and x!='1_000x')]
features = obtain_features('%s/%s_prot/%s_p.pdbqt' % (name, name, name), '%s/%s_prot/%s_l.pdbqt' % (name, name, name))
features.insert(0, '1_000x')
features.insert(0, name)
featuress = [features]
for ligname in lignames:
lig_pre(name, ligname)
features = obtain_features('%s/%s_prot/%s_p.pdbqt' % (name, name, name),
'%s/%s_surflex/%s/%s.pdbqt' % (name, name, ligname, ligname))
features.insert(0, ligname)
features.insert(0, name)
featuress.append(features)
return_dict[i] = featuress
def main():
names = [x for x in os.listdir('.') if os.path.isdir(x)]
manger = Manager()
return_dict = manger.dict()
jobs = []
pool = multiprocessing.Pool(30)
for i, name in enumerate(names):
p = pool.apply_async(zhenghe, args=(name, i, return_dict))
jobs.append(p)
pool.close()
pool.join()
df = pd.DataFrame(sum(return_dict.values(), []))
header_list = get_hearder_list()
df.columns = ['pdb_id', 'lig_id'] + header_list
df.sort_values(by=['pdb_id','lig_id'], inplace=True)
df.reset_index(inplace=True)
del df['index']
df.to_csv("NNscore2_out.csv")
os.system("bzip2 NNscore2_out.csv")
if __name__ == '__main__':
main()
| [
"NNScore2module.binana",
"os.path.exists",
"NNScore2module.command_line_parameters",
"os.listdir",
"subprocess.Popen",
"os.path.isdir",
"multiprocessing.Pool",
"multiprocessing.Manager",
"os.system",
"NNScore2module.PDB",
"sys.path.append"
] | [((450, 506), 'sys.path.append', 'sys.path.append', (['"""/home/shenchao/AI_pose_filter/nnscore"""'], {}), "('/home/shenchao/AI_pose_filter/nnscore')\n", (465, 506), False, 'import sys, os, csv, glob\n'), ((7741, 7777), 'NNScore2module.command_line_parameters', 'command_line_parameters', (['params_list'], {}), '(params_list)\n', (7764, 7777), False, 'from NNScore2module import PDB, binana, command_line_parameters\n'), ((7793, 7798), 'NNScore2module.PDB', 'PDB', ([], {}), '()\n', (7796, 7798), False, 'from NNScore2module import PDB, binana, command_line_parameters\n'), ((7875, 7920), 'NNScore2module.binana', 'binana', (['lig', 'receptor', 'cmd_params', '""""""', '""""""', '""""""'], {}), "(lig, receptor, cmd_params, '', '', '')\n", (7881, 7920), False, 'from NNScore2module import PDB, binana, command_line_parameters\n'), ((9078, 9150), 'subprocess.Popen', 'subprocess.Popen', (['[cmdline]'], {'shell': '(True)', 'cwd': "('%s/%s_prot' % (name, name))"}), "([cmdline], shell=True, cwd='%s/%s_prot' % (name, name))\n", (9094, 9150), False, 'import subprocess\n'), ((9426, 9517), 'subprocess.Popen', 'subprocess.Popen', (['[cmdline]'], {'shell': '(True)', 'cwd': "('%s/%s_surflex/%s' % (name, name, ligname))"}), "([cmdline], shell=True, cwd='%s/%s_surflex/%s' % (name,\n name, ligname))\n", (9442, 9517), False, 'import subprocess\n'), ((10025, 10142), 'subprocess.Popen', 'subprocess.Popen', (["['rm -rf %s_temp.pdbqt' % ligname]"], {'shell': '(True)', 'cwd': "('%s/%s_surflex/%s' % (name, name, ligname))"}), "(['rm -rf %s_temp.pdbqt' % ligname], shell=True, cwd=\n '%s/%s_surflex/%s' % (name, name, ligname))\n", (10041, 10142), False, 'import subprocess\n'), ((10406, 10478), 'subprocess.Popen', 'subprocess.Popen', (['[cmdline]'], {'shell': '(True)', 'cwd': "('%s/%s_prot' % (name, name))"}), "([cmdline], shell=True, cwd='%s/%s_prot' % (name, name))\n", (10422, 10478), False, 'import subprocess\n'), ((10956, 11055), 'subprocess.Popen', 'subprocess.Popen', (["['rm -rf %s_temp.pdbqt' % name]"], {'shell': '(True)', 'cwd': "('%s/%s_prot' % (name, name))"}), "(['rm -rf %s_temp.pdbqt' % name], shell=True, cwd=\n '%s/%s_prot' % (name, name))\n", (10972, 11055), False, 'import subprocess\n'), ((12061, 12070), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (12068, 12070), False, 'from multiprocessing import Manager\n'), ((12119, 12143), 'multiprocessing.Pool', 'multiprocessing.Pool', (['(30)'], {}), '(30)\n', (12139, 12143), False, 'import multiprocessing\n'), ((12552, 12587), 'os.system', 'os.system', (['"""bzip2 NNscore2_out.csv"""'], {}), "('bzip2 NNscore2_out.csv')\n", (12561, 12587), False, 'import sys, os, csv, glob\n'), ((11106, 11166), 'os.path.exists', 'os.path.exists', (["('%s/%s_prot/%s_p.pdbqt' % (name, name, name))"], {}), "('%s/%s_prot/%s_p.pdbqt' % (name, name, name))\n", (11120, 11166), False, 'import sys, os, csv, glob\n'), ((11220, 11280), 'os.path.exists', 'os.path.exists', (["('%s/%s_prot/%s_l.pdbqt' % (name, name, name))"], {}), "('%s/%s_prot/%s_l.pdbqt' % (name, name, name))\n", (11234, 11280), False, 'import sys, os, csv, glob\n'), ((11349, 11393), 'os.listdir', 'os.listdir', (["('./%s/%s_surflex' % (name, name))"], {}), "('./%s/%s_surflex' % (name, name))\n", (11359, 11393), False, 'import sys, os, csv, glob\n'), ((12014, 12029), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (12024, 12029), False, 'import sys, os, csv, glob\n'), ((12033, 12049), 'os.path.isdir', 'os.path.isdir', (['x'], {}), '(x)\n', (12046, 12049), False, 'import sys, os, csv, glob\n'), ((11396, 11449), 'os.path.isdir', 'os.path.isdir', (["('./%s/%s_surflex/%s' % (name, name, x))"], {}), "('./%s/%s_surflex/%s' % (name, name, x))\n", (11409, 11449), False, 'import sys, os, csv, glob\n')] |
from src.data.text_preprocessor import TextPreprocessor
# def preclean(text_list):
# preprocessor = TextPreprocessor()
# word_list = list()
# for text in text_list:
# clean_text = preprocessor.clean_sentence(text)
# word_list.append(' '.join(preprocessor.tokenize_text(clean_text)))
# print(word_list)
# return(word_list)
# text_list = ['Generar empleo y garantizara la población campesina el bienestar y su participación e incorporación en el desarrollo nacional, y fomentará la actividadagro pecuaria y forestal para el óptimo uso de la tierra, con obras de infraestructura, insumos, créditos, servicios de capacitación y asistencia técnica',
# 'El Programa incentivará a los sujetos agrarios a establecer sistemas productivos agroforestales, el cual combina la producción de los cultivos tradicionales en conjunto con árboles frutícolas y maderables, y el sistema de Milpa Intercalada entre Árboles Frutales (MIAF), con lo que se contribuirá a generar empleos, se incentivará la autosuficiencia alimentaria, se mejorarán los ingresos de las y los pobladores y se recuperará la cobertura forestal de un millón de hectáreas en el país.']
# preclean(text_list=text_list)
def preclean_entireDoc(text_list):
preprocessor = TextPreprocessor()
clean_text = preprocessor.clean_sentence(text)
word_list = preprocessor.tokenize_text(clean_text)
words = ' '.join(word_list)
return(words) | [
"src.data.text_preprocessor.TextPreprocessor"
] | [((1263, 1281), 'src.data.text_preprocessor.TextPreprocessor', 'TextPreprocessor', ([], {}), '()\n', (1279, 1281), False, 'from src.data.text_preprocessor import TextPreprocessor\n')] |
import numpy as np
import cv2
img = cv2.imread('../Images/logan.jpg',cv2.IMREAD_COLOR)
kernel = np.ones((4,4),np.float32)/16
img = cv2.filter2D(img,-1,kernel)
cv2.imwrite('../Images/12conv.jpg', img)
cv2.imshow('Image',img)
cv2.waitKey(0) | [
"cv2.imwrite",
"numpy.ones",
"cv2.filter2D",
"cv2.imshow",
"cv2.waitKey",
"cv2.imread"
] | [((40, 91), 'cv2.imread', 'cv2.imread', (['"""../Images/logan.jpg"""', 'cv2.IMREAD_COLOR'], {}), "('../Images/logan.jpg', cv2.IMREAD_COLOR)\n", (50, 91), False, 'import cv2\n'), ((137, 166), 'cv2.filter2D', 'cv2.filter2D', (['img', '(-1)', 'kernel'], {}), '(img, -1, kernel)\n', (149, 166), False, 'import cv2\n'), ((168, 208), 'cv2.imwrite', 'cv2.imwrite', (['"""../Images/12conv.jpg"""', 'img'], {}), "('../Images/12conv.jpg', img)\n", (179, 208), False, 'import cv2\n'), ((210, 234), 'cv2.imshow', 'cv2.imshow', (['"""Image"""', 'img'], {}), "('Image', img)\n", (220, 234), False, 'import cv2\n'), ((235, 249), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (246, 249), False, 'import cv2\n'), ((101, 128), 'numpy.ones', 'np.ones', (['(4, 4)', 'np.float32'], {}), '((4, 4), np.float32)\n', (108, 128), True, 'import numpy as np\n')] |
from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate,\
GlobalAveragePooling2D, add, UpSampling2D
from keras.layers.merge import Concatenate
from keras.layers.core import Activation, Dropout
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from keras.models import Model
from keras import backend as K
import tensorflow as tf
from .losses import *
from .multi_gpu import ModelMGPU
import json
def unet(model_path,
num_channels,
loss="binary_crossentropy",
ds=2,
lr=1e-4,
num_gpus=1,
verbose=0,):
inputs = Input((None, None, num_channels))
conv1 = Conv2D(64//ds, 3, activation='relu', padding='same', )(inputs)
conv1 = Conv2D(64//ds, 3, activation='relu', padding='same', )(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128//ds, 3, activation='relu', padding='same',)(pool1)
conv2 = Conv2D(128//ds, 3, activation='relu', padding='same', )(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256//ds, 3, activation='relu', padding='same', )(pool2)
conv3 = Conv2D(256//ds, 3, activation='relu', padding='same', )(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512//ds, 3, activation='relu', padding='same', )(pool3)
conv4 = Conv2D(512//ds, 3, activation='relu', padding='same', )(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024//ds, 3, activation='relu', padding='same', )(pool4)
conv5 = Conv2D(1024//ds, 3, activation='relu', padding='same', )(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512//ds, 2, activation='relu', padding='same')(UpSampling2D(size=(2, 2))(drop5))
merge6 = concatenate([drop4, up6], axis=3)
conv6 = Conv2D(512//ds, 3, activation='relu', padding='same')(merge6)
conv6 = Conv2D(512//ds, 3, activation='relu', padding='same')(conv6)
up7 = Conv2D(256//ds, 2, activation='relu', padding='same')(UpSampling2D(size=(2, 2))(conv6))
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(256//ds, 3, activation='relu', padding='same')(merge7)
conv7 = Conv2D(256//ds, 3, activation='relu', padding='same')(conv7)
up8 = Conv2D(128//ds, 2, activation='relu', padding='same')(UpSampling2D(size=(2, 2))(conv7))
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(128//ds, 3, activation='relu', padding='same')(merge8)
conv8 = Conv2D(128//ds, 3, activation='relu', padding='same')(conv8)
up9 = Conv2D(64//ds, 2, activation='relu', padding='same')(UpSampling2D(size=(2, 2))(conv8))
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(64//ds, 3, activation='relu', padding='same')(merge9)
conv9 = Conv2D(64//ds, 3, activation='relu', padding='same')(conv9)
conv9 = Conv2D(2, 3, activation='relu', padding='same', )(conv9)
conv10 = Conv2D(1, 1, activation='sigmoid')(conv9)
model = Model(input=inputs, output=conv10)
# dice as a human-readble metric
model.compile(optimizer=Adam(lr=lr),
metrics=[dice_coef],
loss=loss)
# save json before checking if multi-gpu
json_string = model.to_json()
with open(model_path, 'w') as f:
json.dump(json_string, f)
if verbose:
print(model.summary())
# recompile if multi-gpu model
if num_gpus > 1:
model = ModelMGPU(model, num_gpus)
model.compile(optimizer=Adam(lr=lr),
metrics=[dice_coef],
loss=loss)
return model
| [
"keras.optimizers.Adam",
"keras.layers.Conv2D",
"keras.layers.MaxPooling2D",
"keras.layers.UpSampling2D",
"keras.layers.Input",
"keras.layers.concatenate",
"keras.models.Model",
"keras.layers.core.Dropout",
"json.dump"
] | [((644, 677), 'keras.layers.Input', 'Input', (['(None, None, num_channels)'], {}), '((None, None, num_channels))\n', (649, 677), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((1809, 1842), 'keras.layers.concatenate', 'concatenate', (['[drop4, up6]'], {'axis': '(3)'}), '([drop4, up6], axis=3)\n', (1820, 1842), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((2102, 2135), 'keras.layers.concatenate', 'concatenate', (['[conv3, up7]'], {'axis': '(3)'}), '([conv3, up7], axis=3)\n', (2113, 2135), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((2395, 2428), 'keras.layers.concatenate', 'concatenate', (['[conv2, up8]'], {'axis': '(3)'}), '([conv2, up8], axis=3)\n', (2406, 2428), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((2687, 2720), 'keras.layers.concatenate', 'concatenate', (['[conv1, up9]'], {'axis': '(3)'}), '([conv1, up9], axis=3)\n', (2698, 2720), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((3004, 3038), 'keras.models.Model', 'Model', ([], {'input': 'inputs', 'output': 'conv10'}), '(input=inputs, output=conv10)\n', (3009, 3038), False, 'from keras.models import Model\n'), ((691, 745), 'keras.layers.Conv2D', 'Conv2D', (['(64 // ds)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64 // ds, 3, activation='relu', padding='same')\n", (697, 745), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((766, 820), 'keras.layers.Conv2D', 'Conv2D', (['(64 // ds)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64 // ds, 3, activation='relu', padding='same')\n", (772, 820), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((840, 870), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (852, 870), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((891, 946), 'keras.layers.Conv2D', 'Conv2D', (['(128 // ds)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(128 // ds, 3, activation='relu', padding='same')\n", (897, 946), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((965, 1020), 'keras.layers.Conv2D', 'Conv2D', (['(128 // ds)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(128 // ds, 3, activation='relu', padding='same')\n", (971, 1020), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((1040, 1070), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1052, 1070), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((1091, 1146), 'keras.layers.Conv2D', 'Conv2D', (['(256 // ds)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(256 // ds, 3, activation='relu', padding='same')\n", (1097, 1146), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((1166, 1221), 'keras.layers.Conv2D', 'Conv2D', (['(256 // ds)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(256 // ds, 3, activation='relu', padding='same')\n", (1172, 1221), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((1241, 1271), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1253, 1271), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((1292, 1347), 'keras.layers.Conv2D', 'Conv2D', (['(512 // ds)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(512 // ds, 3, activation='relu', padding='same')\n", (1298, 1347), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((1367, 1422), 'keras.layers.Conv2D', 'Conv2D', (['(512 // ds)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(512 // ds, 3, activation='relu', padding='same')\n", (1373, 1422), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((1442, 1454), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1449, 1454), False, 'from keras.layers.core import Activation, Dropout\n'), ((1474, 1504), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1486, 1504), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((1525, 1581), 'keras.layers.Conv2D', 'Conv2D', (['(1024 // ds)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(1024 // ds, 3, activation='relu', padding='same')\n", (1531, 1581), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((1601, 1657), 'keras.layers.Conv2D', 'Conv2D', (['(1024 // ds)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(1024 // ds, 3, activation='relu', padding='same')\n", (1607, 1657), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((1677, 1689), 'keras.layers.core.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1684, 1689), False, 'from keras.layers.core import Activation, Dropout\n'), ((1708, 1763), 'keras.layers.Conv2D', 'Conv2D', (['(512 // ds)', '(2)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(512 // ds, 2, activation='relu', padding='same')\n", (1714, 1763), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((1855, 1910), 'keras.layers.Conv2D', 'Conv2D', (['(512 // ds)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(512 // ds, 3, activation='relu', padding='same')\n", (1861, 1910), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((1929, 1984), 'keras.layers.Conv2D', 'Conv2D', (['(512 // ds)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(512 // ds, 3, activation='relu', padding='same')\n", (1935, 1984), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((2001, 2056), 'keras.layers.Conv2D', 'Conv2D', (['(256 // ds)', '(2)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(256 // ds, 2, activation='relu', padding='same')\n", (2007, 2056), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((2148, 2203), 'keras.layers.Conv2D', 'Conv2D', (['(256 // ds)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(256 // ds, 3, activation='relu', padding='same')\n", (2154, 2203), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((2222, 2277), 'keras.layers.Conv2D', 'Conv2D', (['(256 // ds)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(256 // ds, 3, activation='relu', padding='same')\n", (2228, 2277), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((2294, 2349), 'keras.layers.Conv2D', 'Conv2D', (['(128 // ds)', '(2)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(128 // ds, 2, activation='relu', padding='same')\n", (2300, 2349), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((2441, 2496), 'keras.layers.Conv2D', 'Conv2D', (['(128 // ds)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(128 // ds, 3, activation='relu', padding='same')\n", (2447, 2496), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((2515, 2570), 'keras.layers.Conv2D', 'Conv2D', (['(128 // ds)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(128 // ds, 3, activation='relu', padding='same')\n", (2521, 2570), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((2587, 2641), 'keras.layers.Conv2D', 'Conv2D', (['(64 // ds)', '(2)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64 // ds, 2, activation='relu', padding='same')\n", (2593, 2641), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((2733, 2787), 'keras.layers.Conv2D', 'Conv2D', (['(64 // ds)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64 // ds, 3, activation='relu', padding='same')\n", (2739, 2787), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((2806, 2860), 'keras.layers.Conv2D', 'Conv2D', (['(64 // ds)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64 // ds, 3, activation='relu', padding='same')\n", (2812, 2860), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((2879, 2926), 'keras.layers.Conv2D', 'Conv2D', (['(2)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(2, 3, activation='relu', padding='same')\n", (2885, 2926), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((2949, 2983), 'keras.layers.Conv2D', 'Conv2D', (['(1)', '(1)'], {'activation': '"""sigmoid"""'}), "(1, 1, activation='sigmoid')\n", (2955, 2983), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((3311, 3336), 'json.dump', 'json.dump', (['json_string', 'f'], {}), '(json_string, f)\n', (3320, 3336), False, 'import json\n'), ((1762, 1787), 'keras.layers.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (1774, 1787), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((2055, 2080), 'keras.layers.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (2067, 2080), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((2348, 2373), 'keras.layers.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (2360, 2373), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((2640, 2665), 'keras.layers.UpSampling2D', 'UpSampling2D', ([], {'size': '(2, 2)'}), '(size=(2, 2))\n', (2652, 2665), False, 'from keras.layers import Input, Conv2D, MaxPooling2D, AveragePooling2D, concatenate, GlobalAveragePooling2D, add, UpSampling2D\n'), ((3105, 3116), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (3109, 3116), False, 'from keras.optimizers import Adam\n'), ((3517, 3528), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'lr'}), '(lr=lr)\n', (3521, 3528), False, 'from keras.optimizers import Adam\n')] |
from import_new_tournaments.process_hh_files.process.tournament.extract.id import get_id
import unittest
class test(unittest.TestCase):
def test_id(self):
fake_tournament_title = "HH20210112 SITGOID-G23315209T1 TN-$1{FULLSTOP}50 Hold'Em Turbo - On Demand GAMETYPE-Hold'em LIMIT-no CUR-REAL OND-T BUYIN-0.txt"
self.assertEqual(
get_id(fake_tournament_title),
23315209
) | [
"import_new_tournaments.process_hh_files.process.tournament.extract.id.get_id"
] | [((365, 394), 'import_new_tournaments.process_hh_files.process.tournament.extract.id.get_id', 'get_id', (['fake_tournament_title'], {}), '(fake_tournament_title)\n', (371, 394), False, 'from import_new_tournaments.process_hh_files.process.tournament.extract.id import get_id\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='ingestion',
version='0.1.0',
description='Ingestion is a Python package for converting data files to other formats',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/apoclyps/ingestion',
packages=[
'ingestion',
],
package_dir={
'ingestion': 'ingestion'
},
include_package_data=True,
install_requires=[],
license="Unlicense",
zip_safe=False,
keywords='ingestion',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3.5',
],
test_suite='tests'
)
| [
"distutils.core.setup"
] | [((143, 739), 'distutils.core.setup', 'setup', ([], {'name': '"""ingestion"""', 'version': '"""0.1.0"""', 'description': '"""Ingestion is a Python package for converting data files to other formats"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'url': '"""https://github.com/apoclyps/ingestion"""', 'packages': "['ingestion']", 'package_dir': "{'ingestion': 'ingestion'}", 'include_package_data': '(True)', 'install_requires': '[]', 'license': '"""Unlicense"""', 'zip_safe': '(False)', 'keywords': '"""ingestion"""', 'classifiers': "['Development Status :: 3 - Alpha', 'Intended Audience :: Developers',\n 'Natural Language :: English', 'Programming Language :: Python :: 3.5']", 'test_suite': '"""tests"""'}), "(name='ingestion', version='0.1.0', description=\n 'Ingestion is a Python package for converting data files to other formats',\n author='<NAME>', author_email='<EMAIL>', url=\n 'https://github.com/apoclyps/ingestion', packages=['ingestion'],\n package_dir={'ingestion': 'ingestion'}, include_package_data=True,\n install_requires=[], license='Unlicense', zip_safe=False, keywords=\n 'ingestion', classifiers=['Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers', 'Natural Language :: English',\n 'Programming Language :: Python :: 3.5'], test_suite='tests')\n", (148, 739), False, 'from distutils.core import setup\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.common import log
from fate_arch.common.data_utils import default_output_info, default_output_fs_path
from fate_arch.session import Session
from fate_arch.storage import StorageEngine, StorageTableMeta
from fate_flow.components._base import (
BaseParam,
ComponentBase,
ComponentInputProtocol,
ComponentMeta,
)
from fate_flow.entity import Metric
from fate_flow.manager.data_manager import TableStorage, DataTableTracker
LOGGER = log.getLogger()
writer_cpn_meta = ComponentMeta("Writer")
@writer_cpn_meta.bind_param
class WriterParam(BaseParam):
def __init__(self,
table_name=None,
namespace=None,
storage_engine=None,
address=None,
output_table_name=None,
output_namespace=None,
partitions=None):
self.table_name = table_name
self.namespace = namespace
self.storage_engine = storage_engine
self.address = address
self.output_table_name = output_table_name
self.output_namespace = output_namespace
self.partitions = partitions
def check(self):
return True
@writer_cpn_meta.bind_runner.on_guest.on_host.on_local
class Writer(ComponentBase):
def __init__(self):
super(Writer, self).__init__()
self.parameters = None
self.job_parameters = None
def _run(self, cpn_input: ComponentInputProtocol):
self.parameters = cpn_input.parameters
if self.parameters.get("namespace") and self.parameters.get("table_name"):
namespace = self.parameters.get("namespace")
name = self.parameters.get("table_name")
elif cpn_input.flow_feeded_parameters.get("table_info"):
namespace = cpn_input.flow_feeded_parameters.get("table_info")[0].get("namespace")
name = cpn_input.flow_feeded_parameters.get("table_info")[0].get("name")
else:
raise Exception("no found name or namespace in input parameters")
LOGGER.info(f"writer parameters:{self.parameters}")
src_table = self._get_storage_table(namespace=namespace, name=name)
output_name = self.parameters.get("output_table_name")
output_namespace = self.parameters.get("output_namespace")
if not output_namespace or not output_name:
LOGGER.info("start create table info")
output_namespace, output_name = self._create_output_table_info()
LOGGER.info(f"output_namespace: {output_namespace}, output_name: {output_name}")
engine = self.parameters.get("storage_engine").upper()
dest_table = self._create_storage_table(engine=engine,
address_dict=self.parameters.get("address"),
name=output_name,
namespace=output_namespace,
partitions=self.parameters.get("partitions", src_table.meta.get_partitions()),
id_delimiter=src_table.meta.get_id_delimiter() if src_table.meta.get_id_delimiter() else ",")
_, dest_table.meta = dest_table.meta.update_metas(schema=src_table.meta.get_schema(),
id_delimiter=src_table.meta.get_id_delimiter() if src_table.meta.get_id_delimiter() else ',')
count = TableStorage.copy_table(src_table, dest_table, deserialize_value=True)
LOGGER.info("save success")
# output table track
DataTableTracker.create_table_tracker(
output_name,
output_namespace,
entity_info={
"have_parent": True,
"parent_table_namespace": namespace,
"parent_table_name": name,
"job_id": self.tracker.job_id,
},
)
self.tracker.log_output_data_info(
data_name="writer",
table_namespace=output_namespace,
table_name=output_name,
)
self.tracker.log_metric_data(
metric_namespace="writer",
metric_name="writer",
metrics=[Metric("output_table_name", output_name),
Metric("output_namespace", output_namespace),
Metric("count", count)],
)
@staticmethod
def _get_storage_table(namespace, name):
return Session.get_global().get_table(name=name, namespace=namespace)
@staticmethod
def _create_storage_table(engine, address_dict, name, namespace, partitions, id_delimiter):
if not address_dict:
address_dict = {}
if engine == StorageEngine.MYSQL:
if not address_dict.get("db") or not address_dict.get("name"):
address_dict.update({"db": namespace, "name": name})
elif engine == StorageEngine.EGGROLL:
address_dict.update({"name": name, "namespace": namespace})
elif engine == StorageEngine.STANDALONE:
address_dict.update({"name": name, "namespace": namespace})
elif engine == StorageEngine.HIVE:
address_dict.update({"database": namespace, "name": f"{name}"})
elif engine == StorageEngine.HDFS:
if not address_dict.get("path"):
address_dict.update({"path": default_output_fs_path(name=name, namespace=namespace, prefix=address_dict.get("path_prefix"))})
elif engine == StorageEngine.LOCALFS:
if not address_dict.get("path"):
address_dict.update({"path": default_output_fs_path(name=name, namespace=namespace, storage_engine=StorageEngine.LOCALFS)})
else:
raise RuntimeError(f"{engine} storage is not supported")
output_table_address = StorageTableMeta.create_address(
storage_engine=engine, address_dict=address_dict
)
storage_session = Session.get_global().storage(storage_engine=engine)
output_table = storage_session.create_table(
address=output_table_address,
name=name,
namespace=namespace,
partitions=partitions,
id_delimiter=id_delimiter
)
return output_table
def _create_output_table_info(self):
(
output_namespace,
output_name
) = default_output_info(
task_id=self.tracker.task_id,
task_version=self.tracker.task_version,
output_type="data"
)
return output_namespace, output_name
| [
"fate_flow.components._base.ComponentMeta",
"fate_arch.common.log.getLogger",
"fate_flow.manager.data_manager.DataTableTracker.create_table_tracker",
"fate_arch.storage.StorageTableMeta.create_address",
"fate_arch.common.data_utils.default_output_fs_path",
"fate_arch.session.Session.get_global",
"fate_flow.entity.Metric",
"fate_arch.common.data_utils.default_output_info",
"fate_flow.manager.data_manager.TableStorage.copy_table"
] | [((1123, 1138), 'fate_arch.common.log.getLogger', 'log.getLogger', ([], {}), '()\n', (1136, 1138), False, 'from fate_arch.common import log\n'), ((1158, 1181), 'fate_flow.components._base.ComponentMeta', 'ComponentMeta', (['"""Writer"""'], {}), "('Writer')\n", (1171, 1181), False, 'from fate_flow.components._base import BaseParam, ComponentBase, ComponentInputProtocol, ComponentMeta\n'), ((4121, 4191), 'fate_flow.manager.data_manager.TableStorage.copy_table', 'TableStorage.copy_table', (['src_table', 'dest_table'], {'deserialize_value': '(True)'}), '(src_table, dest_table, deserialize_value=True)\n', (4144, 4191), False, 'from fate_flow.manager.data_manager import TableStorage, DataTableTracker\n'), ((4265, 4471), 'fate_flow.manager.data_manager.DataTableTracker.create_table_tracker', 'DataTableTracker.create_table_tracker', (['output_name', 'output_namespace'], {'entity_info': "{'have_parent': True, 'parent_table_namespace': namespace,\n 'parent_table_name': name, 'job_id': self.tracker.job_id}"}), "(output_name, output_namespace,\n entity_info={'have_parent': True, 'parent_table_namespace': namespace,\n 'parent_table_name': name, 'job_id': self.tracker.job_id})\n", (4302, 4471), False, 'from fate_flow.manager.data_manager import TableStorage, DataTableTracker\n'), ((6494, 6580), 'fate_arch.storage.StorageTableMeta.create_address', 'StorageTableMeta.create_address', ([], {'storage_engine': 'engine', 'address_dict': 'address_dict'}), '(storage_engine=engine, address_dict=\n address_dict)\n', (6525, 6580), False, 'from fate_arch.storage import StorageEngine, StorageTableMeta\n'), ((7057, 7171), 'fate_arch.common.data_utils.default_output_info', 'default_output_info', ([], {'task_id': 'self.tracker.task_id', 'task_version': 'self.tracker.task_version', 'output_type': '"""data"""'}), "(task_id=self.tracker.task_id, task_version=self.tracker\n .task_version, output_type='data')\n", (7076, 7171), False, 'from fate_arch.common.data_utils import default_output_info, default_output_fs_path\n'), ((5134, 5154), 'fate_arch.session.Session.get_global', 'Session.get_global', ([], {}), '()\n', (5152, 5154), False, 'from fate_arch.session import Session\n'), ((6625, 6645), 'fate_arch.session.Session.get_global', 'Session.get_global', ([], {}), '()\n', (6643, 6645), False, 'from fate_arch.session import Session\n'), ((4889, 4929), 'fate_flow.entity.Metric', 'Metric', (['"""output_table_name"""', 'output_name'], {}), "('output_table_name', output_name)\n", (4895, 4929), False, 'from fate_flow.entity import Metric\n'), ((4952, 4996), 'fate_flow.entity.Metric', 'Metric', (['"""output_namespace"""', 'output_namespace'], {}), "('output_namespace', output_namespace)\n", (4958, 4996), False, 'from fate_flow.entity import Metric\n'), ((5019, 5041), 'fate_flow.entity.Metric', 'Metric', (['"""count"""', 'count'], {}), "('count', count)\n", (5025, 5041), False, 'from fate_flow.entity import Metric\n'), ((6285, 6382), 'fate_arch.common.data_utils.default_output_fs_path', 'default_output_fs_path', ([], {'name': 'name', 'namespace': 'namespace', 'storage_engine': 'StorageEngine.LOCALFS'}), '(name=name, namespace=namespace, storage_engine=\n StorageEngine.LOCALFS)\n', (6307, 6382), False, 'from fate_arch.common.data_utils import default_output_info, default_output_fs_path\n')] |
"""Selector for git provider."""
from nestor_api.adapters.git.abstract_git_provider import AbstractGitProvider
from nestor_api.adapters.git.github_git_provider import GitHubGitProvider
def get_git_provider(project_config: dict) -> AbstractGitProvider:
"""Retrieve git provider corresponding to project configuration"""
provider = project_config.get("git", {}).get("provider")
if provider is None:
raise ValueError("Git provider is not set in your project configuration file")
if provider == "github":
return GitHubGitProvider()
raise NotImplementedError("Adapter for this git provider is not implemented")
| [
"nestor_api.adapters.git.github_git_provider.GitHubGitProvider"
] | [((544, 563), 'nestor_api.adapters.git.github_git_provider.GitHubGitProvider', 'GitHubGitProvider', ([], {}), '()\n', (561, 563), False, 'from nestor_api.adapters.git.github_git_provider import GitHubGitProvider\n')] |
import os
import requests
import json
server = os.environ['matrix_server']
data = '{"msgtype": "m.text", "body": "' + str(os.environ[
"message"]) + '", "format": "org.matrix.custom.html", "formatted_body": "' + \
str(os.environ["message"]) + '"}'
data = json.loads(data)
params = (
('access_token', os.environ["access_token"]),
)
room_id = os.environ['matrix_room']
address = "https://" + server + "/_matrix/client/r0/rooms/" + room_id + "/send/m.room.message"
response = requests.post(address, params=params, data=json.dumps(data))
print(response.content)
| [
"json.loads",
"json.dumps"
] | [((266, 282), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (276, 282), False, 'import json\n'), ((531, 547), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (541, 547), False, 'import json\n')] |
"""
Tests for the tools in the wrapper.
"""
import pytest
import numpy as np
from py21cmfast import cache_tools
def test_query(ic):
things = list(cache_tools.query_cache())
print(things)
classes = [t[1] for t in things]
assert ic in classes
def test_bad_fname(tmpdirec):
with pytest.raises(ValueError):
cache_tools.readbox(direc=str(tmpdirec), fname="a_really_fake_file.h5")
def test_readbox_data(tmpdirec, ic):
box = cache_tools.readbox(direc=str(tmpdirec), fname=ic.filename)
assert np.all(box.hires_density == ic.hires_density)
def test_readbox_filter(ic, tmpdirec):
ic2 = cache_tools.readbox(
kind="InitialConditions", hsh=ic._md5, direc=str(tmpdirec)
)
assert np.all(ic2.hires_density == ic.hires_density)
def test_readbox_seed(ic, tmpdirec):
ic2 = cache_tools.readbox(
kind="InitialConditions",
hsh=ic._md5,
seed=ic.random_seed,
direc=str(tmpdirec),
)
assert np.all(ic2.hires_density == ic.hires_density)
def test_readbox_nohash(ic, tmpdirec):
with pytest.raises(ValueError):
cache_tools.readbox(
kind="InitialConditions", seed=ic.random_seed, direc=str(tmpdirec)
)
| [
"py21cmfast.cache_tools.query_cache",
"numpy.all",
"pytest.raises"
] | [((533, 578), 'numpy.all', 'np.all', (['(box.hires_density == ic.hires_density)'], {}), '(box.hires_density == ic.hires_density)\n', (539, 578), True, 'import numpy as np\n'), ((735, 780), 'numpy.all', 'np.all', (['(ic2.hires_density == ic.hires_density)'], {}), '(ic2.hires_density == ic.hires_density)\n', (741, 780), True, 'import numpy as np\n'), ((981, 1026), 'numpy.all', 'np.all', (['(ic2.hires_density == ic.hires_density)'], {}), '(ic2.hires_density == ic.hires_density)\n', (987, 1026), True, 'import numpy as np\n'), ((155, 180), 'py21cmfast.cache_tools.query_cache', 'cache_tools.query_cache', ([], {}), '()\n', (178, 180), False, 'from py21cmfast import cache_tools\n'), ((305, 330), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (318, 330), False, 'import pytest\n'), ((1077, 1102), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1090, 1102), False, 'import pytest\n')] |
#
# Copyright (c) 2018, Salesforce, Inc.
# The Board of Trustees of the Leland Stanford Junior University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from collections import OrderedDict
import torch
from .data_utils.progbar import progress_bar
from .metrics import compute_metrics
from .models import TransformerForSequenceClassification, TransformerForTokenClassification
from .util import GenerationOutput, merge_translated_sentences
def generate_with_model(
model,
data_iterator,
numericalizer,
task,
args,
output_predictions_only=False,
output_confidence_features=False,
original_order=None,
confidence_estimators=None,
disable_progbar=True,
):
if isinstance(model, TransformerForTokenClassification) or isinstance(model, TransformerForSequenceClassification):
return generate_with_classification_model(
model, data_iterator, numericalizer, task, original_order=original_order, disable_progbar=disable_progbar
)
else:
return generate_with_seq2seq_model(
model,
data_iterator,
numericalizer,
task,
args,
output_predictions_only=output_predictions_only,
output_confidence_features=output_confidence_features,
original_order=original_order,
confidence_estimators=confidence_estimators,
disable_progbar=disable_progbar,
)
def generate_with_seq2seq_model(
model,
data_iterator,
numericalizer,
task,
args,
output_predictions_only=False,
output_confidence_features=False,
original_order=None,
confidence_estimators=None,
disable_progbar=True,
) -> GenerationOutput:
"""
Inputs:
original_order: List of indices. If provided, we will sort the results according to this order
confidence_estimator: if provided, will use it to calculate and output confidence scores
Outputs: predictions if `output_predictions_only` == True, (loss, predictions, answers, contexts) otherwise
loss
predictions: a List of Lists of strings
answers
contexts
"""
output_confidence_scores = confidence_estimators is not None
predictions = []
confidence_features = []
example_ids = []
answers = []
contexts = []
for batch in progress_bar(data_iterator, desc='Generating', disable=disable_progbar):
batch_size = len(batch.example_id)
batch_prediction = [[] for _ in range(batch_size)]
batch_confidence_features = [[] for _ in range(batch_size)]
batch_example_ids = batch.example_id
example_ids += batch_example_ids
if not output_predictions_only:
batch_answer = numericalizer.reverse(batch.answer.value.data, 'answer')
batch_answer = [
task.postprocess_prediction(batch_example_ids[i], batch_answer[i]) for i in range(len(batch_answer))
]
answers += batch_answer
batch_context = numericalizer.reverse(batch.context.value.data, 'context')
contexts += batch_context
elif output_confidence_features:
# need gold answer for confidence estimation
batch_answer = numericalizer.reverse(batch.answer.value.data, 'answer')
answers += batch_answer
for hyperparameter_idx in range(len(args.temperature)):
generated = model.generate(
batch,
max_output_length=args.max_output_length,
num_outputs=args.num_outputs[hyperparameter_idx] if args.temperature[hyperparameter_idx] != 0 else 1,
temperature=args.temperature[hyperparameter_idx] if args.temperature[hyperparameter_idx] > 0 else 1.0,
repetition_penalty=args.repetition_penalty[hyperparameter_idx],
top_k=args.top_k[hyperparameter_idx],
top_p=args.top_p[hyperparameter_idx],
num_beams=args.num_beams[hyperparameter_idx],
num_beam_groups=args.num_beam_groups[hyperparameter_idx],
diversity_penalty=args.diversity_penalty[hyperparameter_idx],
no_repeat_ngram_size=args.no_repeat_ngram_size[hyperparameter_idx],
do_sample=args.temperature[hyperparameter_idx] != 0, # if temperature==0, we do not sample
)
partial_batch_prediction_ids = generated.sequences
cross_attentions = getattr(generated, 'cross_attentions', None)
if cross_attentions is not None:
# stack tensors to shape (max_output_length, num_layers, batch_size, num_heads, 1, max_input_length)
cross_attentions = torch.stack(([torch.stack(tuple) for tuple in cross_attentions]))
# reshape to (num_layers, batch_size, num_heads, max_output_length, max_input_length)
cross_attentions = cross_attentions.squeeze(4)
cross_attentions = cross_attentions.permute(1, 2, 3, 0, 4).contiguous()
# choose only last layer attentions
# cross_attentions = torch.mean(cross_attentions[-3:, ...], dim=0)
cross_attentions = cross_attentions[-1, ...]
# postprocess prediction ids
kwargs = {'numericalizer': numericalizer, 'cross_attentions': cross_attentions}
partial_batch_prediction_ids = task.batch_postprocess_prediction_ids(
batch_example_ids, batch.context.value.data, partial_batch_prediction_ids, **kwargs
)
if output_confidence_features or output_confidence_scores:
partial_batch_confidence_features = model.confidence_features(
batch=batch, predictions=partial_batch_prediction_ids, mc_dropout_num=args.mc_dropout_num
)
partial_batch_prediction = numericalizer.reverse(partial_batch_prediction_ids, 'answer')
def get_example_index(i):
return (i // args.num_outputs[hyperparameter_idx]) % batch_size
# post-process predictions
for i in range(len(partial_batch_prediction)):
partial_batch_prediction[i] = task.postprocess_prediction(
batch_example_ids[get_example_index(i)], partial_batch_prediction[i]
)
# put them into the right array
for i in range(len(partial_batch_prediction)):
batch_prediction[get_example_index(i)].append(partial_batch_prediction[i])
if output_confidence_features or output_confidence_scores:
batch_confidence_features[get_example_index(i)].append(partial_batch_confidence_features[i])
predictions += batch_prediction
confidence_features += batch_confidence_features
if original_order is not None:
# sort back to the original order
original_order, example_ids, predictions, answers, contexts, confidence_features = [
list(a)
for a in tuple(
zip(*sorted(list(zip(original_order, example_ids, predictions, answers, contexts, confidence_features))))
)
]
if getattr(args, 'translate_example_split', False):
# stitch sentences back together
example_ids, predictions, answers, contexts, confidence_features = merge_translated_sentences(
example_ids,
predictions,
answers,
contexts,
confidence_features,
numericalizer._tokenizer.src_lang,
numericalizer._tokenizer.tgt_lang,
)
# TODO calculate and return loss
loss = None
output = GenerationOutput(loss=loss)
if output_predictions_only:
output.predictions = predictions
else:
output.example_ids, output.predictions, output.answers, output.contexts = example_ids, predictions, answers, contexts
if output_confidence_features:
output.confidence_features = confidence_features
if args.override_confidence_labels:
for i, example in enumerate(confidence_features):
for confidence in example:
confidence.label = answers[i] == args.override_confidence_labels
if output_confidence_scores:
output.confidence_scores = []
for estimator in confidence_estimators:
confidence_scores = estimator.estimate(confidence_features)
output.confidence_scores.append(confidence_scores)
return output
def generate_with_classification_model(
model, data_iterator, numericalizer, task, original_order=None, disable_progbar=True
) -> GenerationOutput:
all_example_ids = []
all_answers = []
all_contexts = []
all_predictions = []
for batch in progress_bar(data_iterator, desc='Generating', disable=disable_progbar):
batch_example_ids = batch.example_id
batch_context = numericalizer.reverse(batch.context.value.data, 'context')
all_example_ids += batch_example_ids
output = model(input_ids=batch.context.value, attention_mask=(batch.context.value != numericalizer.pad_id))
labels = batch.answer.value.tolist()
logits = output.logits
predictions = torch.argmax(logits, dim=-1).tolist()
# logits for sequence classification is 2 dimensional
if logits.dim() == 2:
predictions = [[p] for p in predictions]
# Remove ignored index (special tokens)
processed_preds = []
processed_labels = []
for pred, label in zip(predictions, labels):
preds_list = []
labels_list = []
for p_, l_ in zip(pred, label):
if l_ == numericalizer.answer_pad_id:
continue
preds_list.append(task.id2label[p_])
labels_list.append(task.id2label[l_])
processed_preds.append([" ".join(preds_list)])
processed_labels.append(" ".join(labels_list))
all_contexts += batch_context
all_answers += processed_labels
all_predictions += processed_preds
if original_order is not None:
# sort back to the original order
original_order, all_example_ids, all_predictions, all_answers, all_contexts = [
list(a)
for a in tuple(
zip(*sorted(list(zip(original_order, all_example_ids, all_predictions, all_answers, all_contexts))))
)
]
# TODO calculate and return loss
loss = None
output = GenerationOutput(
loss=loss, example_ids=all_example_ids, contexts=all_contexts, answers=all_answers, predictions=all_predictions
)
return output
def calculate_and_reduce_metrics(predictions, answers, metrics_to_compute, reduce_metrics, lang):
metrics = OrderedDict()
for i in range(len(predictions[0])):
partial_metrics, _ = compute_metrics([p[i] for p in predictions], answers, metrics_to_compute, lang)
for k, v in partial_metrics.items():
if reduce_metrics == 'max':
metrics[k] = max(metrics.get(k, 0), v)
else:
raise ValueError('Invalid reduce_metrics argument')
return metrics
def print_results(keys, values, num_print=1):
print()
start = 0
end = start + num_print
values = [val[start:end] for val in values]
for ex_idx in range(len(values[0])):
for key_idx, key in enumerate(keys):
value = values[key_idx][ex_idx]
v = value[0] if isinstance(value, list) else value
print(f'{key:>11}: {repr(v)}')
print()
sys.stdout.flush()
def validate(task, val_iter, model, numericalizer, args, num_print=10):
with torch.no_grad():
model.eval()
if isinstance(model, torch.nn.DataParallel):
# get rid of the DataParallel wrapper
model = model.module
names = ['beam search', 'answer', 'context']
output = generate_with_model(model, val_iter, numericalizer, task, args)
metrics = calculate_and_reduce_metrics(
output.predictions, output.answers, task.metrics, args.reduce_metrics, model.tgt_lang
)
results = [output.predictions, output.answers, output.contexts]
print_results(names, results, num_print=num_print)
return output, metrics
| [
"collections.OrderedDict",
"torch.stack",
"torch.no_grad",
"sys.stdout.flush",
"torch.argmax"
] | [((12333, 12346), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12344, 12346), False, 'from collections import OrderedDict\n'), ((13148, 13166), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13164, 13166), False, 'import sys\n'), ((13250, 13265), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13263, 13265), False, 'import torch\n'), ((10757, 10785), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (10769, 10785), False, 'import torch\n'), ((6212, 6230), 'torch.stack', 'torch.stack', (['tuple'], {}), '(tuple)\n', (6223, 6230), False, 'import torch\n')] |
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("OriginalJoin").getOrCreate()
sc = spark.sparkContext
def map_genres(x):
x = x.split(",")
return (x[0], x[1])
def map_ratings(x):
x = x.split(",")
return (x[1], (x[0], x[2], x[3]))
rdd1 = sc.textFile("hdfs://master:9000/movies/reduced_genres.csv") \
.map(lambda x: map_genres(x))
rdd2 = sc.textFile("hdfs://master:9000/movies/ratings.csv") \
.map(lambda x: map_ratings(x))
# Join with join column = key of rdd
rdd = rdd1.join(rdd2).collect()
print("-------------------------------------------------------------")
print("Original RDD Api Join Output")
for i in rdd:
print(i)
print("-------------------------------------------------------------")
| [
"pyspark.sql.SparkSession.builder.appName"
] | [((46, 90), 'pyspark.sql.SparkSession.builder.appName', 'SparkSession.builder.appName', (['"""OriginalJoin"""'], {}), "('OriginalJoin')\n", (74, 90), False, 'from pyspark.sql import SparkSession\n')] |
from flask import Flask
from database import init_db
from api import user_router
from bin.my_batch import job
import logging
app = Flask(__name__)
app.register_blueprint(user_router, url_prefix='/api')
app.logger.setLevel(logging.DEBUG)
handle = logging.FileHandler('./log/DEBUG.log')
handle.setLevel(logging.INFO)
app.logger.addHandler(handle)
app.config.from_object('config.Config')
init_db(app)
app.cli.add_command(job)
if __name__ == '__main__':
app.run()
| [
"database.init_db",
"logging.FileHandler",
"flask.Flask"
] | [((133, 148), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (138, 148), False, 'from flask import Flask\n'), ((249, 287), 'logging.FileHandler', 'logging.FileHandler', (['"""./log/DEBUG.log"""'], {}), "('./log/DEBUG.log')\n", (268, 287), False, 'import logging\n'), ((390, 402), 'database.init_db', 'init_db', (['app'], {}), '(app)\n', (397, 402), False, 'from database import init_db\n')] |
import geomstats.backend as gs
from geomstats.algebra_utils import from_vector_to_diagonal_matrix
from geomstats.geometry.skew_symmetric_matrices import SkewSymmetricMatrices
from geomstats.geometry.special_euclidean import SpecialEuclidean
from geomstats.geometry.special_orthogonal import SpecialOrthogonal
from tests.data_generation import TestData
class TestDataLieAlgebra(TestData):
def dimension_test_data(self):
smoke_data = [dict(algebra=SkewSymmetricMatrices(4), expected=6)]
return self.generate_tests(smoke_data)
def matrix_representation_and_belongs_test_data(self):
smoke_data = [
dict(algebra=SkewSymmetricMatrices(4), point=gs.random.rand(2, 6))
]
return self.generate_tests(smoke_data)
def orthonormal_basis_test_data(self):
smoke_data = [
dict(group=SpecialOrthogonal(3), metric_mat_at_identity=None),
dict(
group=SpecialOrthogonal(3),
metric_mat_at_identity=from_vector_to_diagonal_matrix(
gs.array([1.0, 2.0, 3.0])
),
),
]
return self.generate_tests(smoke_data)
def orthonormal_basis_se3_test_data(self):
smoke_data = [
dict(group=SpecialEuclidean(3), metric_mat_at_identity=None),
dict(
group=SpecialEuclidean(3),
metric_mat_at_identity=from_vector_to_diagonal_matrix(
gs.cast(gs.arange(1, SpecialEuclidean(3).dim + 1), gs.float32)
),
),
]
return self.generate_tests(smoke_data)
| [
"geomstats.backend.random.rand",
"geomstats.geometry.special_orthogonal.SpecialOrthogonal",
"geomstats.backend.array",
"geomstats.geometry.special_euclidean.SpecialEuclidean",
"geomstats.geometry.skew_symmetric_matrices.SkewSymmetricMatrices"
] | [((460, 484), 'geomstats.geometry.skew_symmetric_matrices.SkewSymmetricMatrices', 'SkewSymmetricMatrices', (['(4)'], {}), '(4)\n', (481, 484), False, 'from geomstats.geometry.skew_symmetric_matrices import SkewSymmetricMatrices\n'), ((654, 678), 'geomstats.geometry.skew_symmetric_matrices.SkewSymmetricMatrices', 'SkewSymmetricMatrices', (['(4)'], {}), '(4)\n', (675, 678), False, 'from geomstats.geometry.skew_symmetric_matrices import SkewSymmetricMatrices\n'), ((686, 706), 'geomstats.backend.random.rand', 'gs.random.rand', (['(2)', '(6)'], {}), '(2, 6)\n', (700, 706), True, 'import geomstats.backend as gs\n'), ((855, 875), 'geomstats.geometry.special_orthogonal.SpecialOrthogonal', 'SpecialOrthogonal', (['(3)'], {}), '(3)\n', (872, 875), False, 'from geomstats.geometry.special_orthogonal import SpecialOrthogonal\n'), ((947, 967), 'geomstats.geometry.special_orthogonal.SpecialOrthogonal', 'SpecialOrthogonal', (['(3)'], {}), '(3)\n', (964, 967), False, 'from geomstats.geometry.special_orthogonal import SpecialOrthogonal\n'), ((1271, 1290), 'geomstats.geometry.special_euclidean.SpecialEuclidean', 'SpecialEuclidean', (['(3)'], {}), '(3)\n', (1287, 1290), False, 'from geomstats.geometry.special_euclidean import SpecialEuclidean\n'), ((1362, 1381), 'geomstats.geometry.special_euclidean.SpecialEuclidean', 'SpecialEuclidean', (['(3)'], {}), '(3)\n', (1378, 1381), False, 'from geomstats.geometry.special_euclidean import SpecialEuclidean\n'), ((1060, 1085), 'geomstats.backend.array', 'gs.array', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (1068, 1085), True, 'import geomstats.backend as gs\n'), ((1495, 1514), 'geomstats.geometry.special_euclidean.SpecialEuclidean', 'SpecialEuclidean', (['(3)'], {}), '(3)\n', (1511, 1514), False, 'from geomstats.geometry.special_euclidean import SpecialEuclidean\n')] |
###
# Set up a file scan of a directory
# inputs, directory to scan,
###
import os.path
from argparse import ArgumentParser
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def create_parser():
parser: ArgumentParser = ArgumentParser(description="""
Scan a directory, inspect each file for a list of forbidden terms
""")
parser.add_argument("-d", "--dir", help="Directory to scan, full path works best", required=False)
parser.add_argument("-r", "--recursive_depth", type=int, help="Recursive Depth", default=-1, required=False)
parser.add_argument("-e", "--exclude", help="File types to exclude, comma sep list", required=False)
parser.add_argument("-c", "--config", help="config json file with bad words", default="dirtyword.json", required=False)
return parser
def validate_args():
###
# Validate all args in arg parse
###
validated_good = True
# Validate file exclude list
# Any possible exploits to happen here?
file_extensions = args.exclude.split(',')
for ex in file_extensions:
if ex[0] == '.':
excludeList.append(ex[1:])
else:
excludeList.append(ex)
# anything tha would actually fail? --tennixpl
# Validate file exists
if os.path.isfile(args.config):
logger.info("Config File Exists")
else:
validated_good = False
print(f"{args.config} file given for config doesnt exist")
logger.error(f"Config file {args.config} is not reachable in current path '{os.path.abspath(__file__)}'")
# Validate dir
if os.path.isdir(args.dir):
logger.info("Given directory exists")
else:
validated_good = False
print(f"'{args.dir}' directory given for dir doesnt exist or is not accessible by user '{os.getlogin()}' ")
logger.error(f"Directory'{args.dir}' doesn't exist or is not reachable by '{os.getlogin()}'")
# Validate depth
if int(args.recursive_depth) >= -1:
logger.info(f"Recursive Depth of {args.recursive_depth} is validated")
else:
validated_good = False
print(f"Recursive dpeth '{args.recursive_depth}' is not valid, must be bigger than -1, -1 means full depth.")
print("Resetting recursive depth to -1")
args.recursive_depth = -1
return validated_good
def main():
global args
global excludeList
excludeList = []
args = create_parser().parse_args()
validate_args()
print(args)
if __name__ == "__main__":
main()
else:
print("test")
| [
"logging.basicConfig",
"logging.getLogger",
"argparse.ArgumentParser"
] | [((142, 181), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (161, 181), False, 'import logging\n'), ((191, 218), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (208, 218), False, 'import logging\n'), ((270, 389), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""\n Scan a directory, inspect each file for a list of forbidden terms\n """'}), '(description=\n """\n Scan a directory, inspect each file for a list of forbidden terms\n """\n )\n', (284, 389), False, 'from argparse import ArgumentParser\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2017/8/23 下午12:54
# @Author : chenyuelong
# @Mail : <EMAIL>
# @File : read.py
# @Software: PyCharm
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))))
class read():
'''
fastq中每条read
'''
def __init__(self,*args):
self._readID = args[0]
self._readseq = args[1]
self._readinfo = args[2]
self._readq = args[3]
def main():
pass
if __name__ == '__main__':
main() | [
"os.path.abspath"
] | [((231, 259), 'os.path.abspath', 'os.path.abspath', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (246, 259), False, 'import os, sys\n')] |
from flask import Blueprint
user = Blueprint('user', __name__)
from dark_chess_app.modules.user import routes | [
"flask.Blueprint"
] | [((36, 63), 'flask.Blueprint', 'Blueprint', (['"""user"""', '__name__'], {}), "('user', __name__)\n", (45, 63), False, 'from flask import Blueprint\n')] |
from builtins import range
from builtins import object
# -*- coding: utf-8 -*-
import os
from qgis.PyQt.QtCore import QCoreApplication, Qt
from qgis.PyQt.QtWidgets import QAction, QDockWidget, QListWidgetItem, QMenu, QToolBar
from qgis.PyQt.QtGui import QIcon
from qgis.core import QgsCoordinateReferenceSystem
from qgis.utils import iface, plugins
from buildings.gui.dockwidget import BuildingsDockwidget
from buildings.gui.menu_frame import MenuFrame
from buildings.settings.project import get_attribute_dialog_setting, set_attribute_dialog_setting
# Get the path for the parent directory of this file.
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
class Buildings(object):
"""QGIS Plugin Implementation."""
stop = False
def __init__(self, iface):
"""Constructor."""
# Store original enter attribute values dialog setting
self.attribute_dialog_setting = get_attribute_dialog_setting()
self.iface = iface
self.plugin_dir = __location__
self.image_dir = os.path.join(__location__, "..", "images")
self.menu_frame = None
# declare instance attributes
self.actions = []
self.menu = self.tr(u"&Building Maintenance")
self.main_toolbar = iface.addToolBar(u"Building Maintenance")
self.main_toolbar.setObjectName(u"Building Maintenance")
# set up the customizable toolbar
iface.building_toolbar = None
self.is_active = False
self.dockwidget = None
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
"""
return QCoreApplication.translate("BuildingMaintenance", message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=True,
whats_this=None,
parent=None,
):
""" Add a toolbar icon to the toolbar.
@param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
@param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
@type icon_path: str
@param text: Text that should be shown in menu items for this action.
@type text: str
@param callback: Function to be called when the action is triggered.
@type callback: function
@param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
@type enabled_flag: bool
@param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
@type add_to_menu: bool
@param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
@type add_to_toolbar: bool
@param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
@type status_tip: str
@param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
@param parent: Parent widget for the new action. Defaults None.
@type parent: QWidget
@return action: The action that was created.
Note that the action is also added to self.actions list.
@rtype action: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.main_toolbar.addAction(action)
if add_to_menu:
iface.addPluginToMenu(self.menu, action)
self.actions.append(action)
return action
def initGui(self):
"""Initiate buildings plugin"""
home_dir = os.path.dirname(__file__)
icon_path = os.path.join(home_dir, "icons", "buildings_plugin.png")
self.add_action(icon_path, text=self.tr(u"Building Maintenance"), callback=self.run, parent=iface.mainWindow())
try:
dw = plugins["buildings"].dockwidget
exists = False
if dw is not None:
self.dockwidget = plugins["buildings"].dockwidget
for row in range(0, (dw.lst_options.count()), 1):
if dw.lst_options.item(row).text() == "Buildings":
exists = True
if exists is False:
self.run()
except KeyError:
pass
set_attribute_dialog_setting(True)
def unload(self):
"""Removes the buildings plugin."""
# Close dockwidget and delete widget completely
if self.is_active:
self.dockwidget.close()
self.dockwidget.setParent(None)
del self.dockwidget
try:
dw = self.dockwidget
if dw is not None:
for row in range(0, (dw.lst_options.count()), 1):
if dw.lst_options.item(row).text() == "Buildings":
dw.lst_options.takeItem(row)
dw.frames = {}
if dw.stk_options.count() == 2:
dw.stk_options.setCurrentIndex(1)
dw.stk_options.removeWidget(dw.stk_options.currentWidget())
dw.stk_options.setCurrentIndex(0)
# Remove main toolbar
for action in self.actions:
iface.removePluginMenu(self.tr(u"&Building Maintenance"), action)
iface.removeToolBarIcon(action)
del self.main_toolbar
for toolbar in iface.mainWindow().findChildren(QToolBar, "Building Tools"):
iface.mainWindow().removeToolBar(toolbar)
# Setting parent to None, deletes the widget completely
toolbar.setParent(None)
# Remove action triggering toolbar from ToolBar menu
toolbar_menu = iface.mainWindow().findChildren(QMenu, "mToolbarMenu")[0]
for act in toolbar_menu.actions():
if act.text() == u"Building Tools":
toolbar_menu.removeAction(act)
except KeyError:
pass
# Remove Dockwidget from Panel menu
panel = iface.mainWindow().findChildren(QMenu, "mPanelMenu")[0]
for act in panel.actions():
if act.text() == u"Buildings":
panel.removeAction(act)
# Delete the mainWindow reference to the buildings dockwidget
for dock in iface.mainWindow().findChildren(QDockWidget, u"BuildingsDockWidgetBase"):
dock.setParent(None)
def run(self):
"""Run method that loads and starts the plugin"""
if not iface.building_toolbar:
# Set up toolbar
iface.building_toolbar = QToolBar(u"Building Tools")
iface.addToolBar(iface.building_toolbar, Qt.RightToolBarArea)
# Create the dockwidget and dialog and keep reference
if not self.dockwidget:
self.dockwidget = BuildingsDockwidget()
# Connect with close
self.dockwidget.closed.connect(self.on_dockwidget_closed)
# Show the dockwidget as a tab
layerdock = iface.mainWindow().findChild(QDockWidget, "Layers")
iface.addDockWidget(Qt.LeftDockWidgetArea, self.dockwidget)
iface.mainWindow().tabifyDockWidget(layerdock, self.dockwidget)
self.setup_main_toolbar()
dw = self.dockwidget
# no base layers
self.menu_frame = MenuFrame(self.dockwidget)
dw.insert_into_frames("menu_frame", self.menu_frame)
home_dir = os.path.dirname(__file__)
if dw.lst_options.item(0) is None:
icon_path = os.path.join(home_dir, "icons", "buildings_plugin.png")
item = QListWidgetItem("Buildings")
item.setIcon(QIcon(icon_path))
dw.lst_options.addItem(item)
dw.lst_options.setCurrentItem(item)
icon_path = os.path.join(home_dir, "icons", "capture_source.png")
item = QListWidgetItem("Capture Sources")
item.setIcon(QIcon(icon_path))
dw.lst_sub_menu.addItem(item)
icon_path = os.path.join(home_dir, "icons", "bulk_load.png")
item = QListWidgetItem("Bulk Load")
item.setIcon(QIcon(icon_path))
dw.lst_sub_menu.addItem(item)
icon_path = os.path.join(home_dir, "icons", "edit.png")
item = QListWidgetItem("Edit Outlines")
item.setIcon(QIcon(icon_path))
dw.lst_sub_menu.addItem(item)
icon_path = os.path.join(home_dir, "icons", "settings.png")
item = QListWidgetItem("Settings")
item.setIcon(QIcon(icon_path))
dw.lst_sub_menu.addItem(item)
icon_path = os.path.join(home_dir, "icons", "reference.png")
item = QListWidgetItem("Reference Data")
item.setIcon(QIcon(icon_path))
dw.lst_sub_menu.addItem(item)
canvas = iface.mapCanvas()
selectedcrs = "EPSG:2193"
target_crs = QgsCoordinateReferenceSystem()
target_crs.createFromUserInput(selectedcrs)
canvas.setDestinationCrs(target_crs)
self.on_click()
self.dockwidget.show()
self.dockwidget.raise_()
def on_click(self):
""" """
dw = self.dockwidget
if dw.stk_options.count() == 2: # 4th widget is not empty
dw.stk_options.setCurrentIndex(1) # set to fourth
dw.stk_options.removeWidget(dw.stk_options.currentWidget())
dw.stk_options.setCurrentIndex(0)
dw.stk_options.addWidget(MenuFrame(self.dockwidget))
dw.stk_options.setCurrentIndex(1)
def setup_main_toolbar(self):
""" Set up the custom tool bar in its most basic state """
try:
iface.building_toolbar.clear()
iface.building_toolbar.setObjectName(u"Building Tools")
iface.building_toolbar.hide()
# Choose necessary basic tools
for nav in iface.mapNavToolToolBar().actions():
if nav.objectName() in ["mActionPan"]:
iface.building_toolbar.addAction(nav)
except AttributeError:
# iface.building_toolbar hadn't been created yet
pass
iface.actionPan().trigger()
def on_dockwidget_closed(self):
"""Cleanup necessary items here when plugin dockwidget is closed"""
# from buildings.settings.project import set_attribute_dialog_setting
set_attribute_dialog_setting(self.attribute_dialog_setting)
# Set up toolbar
self.setup_main_toolbar()
self.is_active = False
| [
"qgis.PyQt.QtWidgets.QToolBar",
"buildings.gui.menu_frame.MenuFrame",
"buildings.settings.project.set_attribute_dialog_setting",
"buildings.gui.dockwidget.BuildingsDockwidget",
"qgis.utils.iface.building_toolbar.setObjectName",
"buildings.settings.project.get_attribute_dialog_setting",
"qgis.utils.iface.building_toolbar.clear",
"qgis.PyQt.QtWidgets.QListWidgetItem",
"qgis.utils.iface.removeToolBarIcon",
"qgis.PyQt.QtWidgets.QAction",
"qgis.utils.iface.mainWindow",
"qgis.utils.iface.addDockWidget",
"qgis.PyQt.QtCore.QCoreApplication.translate",
"qgis.utils.iface.actionPan",
"os.path.dirname",
"qgis.utils.iface.mapCanvas",
"qgis.utils.iface.addPluginToMenu",
"qgis.utils.iface.building_toolbar.addAction",
"qgis.core.QgsCoordinateReferenceSystem",
"qgis.utils.iface.mapNavToolToolBar",
"qgis.PyQt.QtGui.QIcon",
"os.path.join",
"os.getcwd",
"qgis.utils.iface.building_toolbar.hide",
"qgis.utils.iface.addToolBar"
] | [((655, 666), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (664, 666), False, 'import os\n'), ((668, 693), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (683, 693), False, 'import os\n'), ((942, 972), 'buildings.settings.project.get_attribute_dialog_setting', 'get_attribute_dialog_setting', ([], {}), '()\n', (970, 972), False, 'from buildings.settings.project import get_attribute_dialog_setting, set_attribute_dialog_setting\n'), ((1065, 1107), 'os.path.join', 'os.path.join', (['__location__', '""".."""', '"""images"""'], {}), "(__location__, '..', 'images')\n", (1077, 1107), False, 'import os\n'), ((1286, 1327), 'qgis.utils.iface.addToolBar', 'iface.addToolBar', (['u"""Building Maintenance"""'], {}), "(u'Building Maintenance')\n", (1302, 1327), False, 'from qgis.utils import iface, plugins\n'), ((1731, 1789), 'qgis.PyQt.QtCore.QCoreApplication.translate', 'QCoreApplication.translate', (['"""BuildingMaintenance"""', 'message'], {}), "('BuildingMaintenance', message)\n", (1757, 1789), False, 'from qgis.PyQt.QtCore import QCoreApplication, Qt\n'), ((3675, 3691), 'qgis.PyQt.QtGui.QIcon', 'QIcon', (['icon_path'], {}), '(icon_path)\n', (3680, 3691), False, 'from qgis.PyQt.QtGui import QIcon\n'), ((3709, 3736), 'qgis.PyQt.QtWidgets.QAction', 'QAction', (['icon', 'text', 'parent'], {}), '(icon, text, parent)\n', (3716, 3736), False, 'from qgis.PyQt.QtWidgets import QAction, QDockWidget, QListWidgetItem, QMenu, QToolBar\n'), ((4196, 4221), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4211, 4221), False, 'import os\n'), ((4242, 4297), 'os.path.join', 'os.path.join', (['home_dir', '"""icons"""', '"""buildings_plugin.png"""'], {}), "(home_dir, 'icons', 'buildings_plugin.png')\n", (4254, 4297), False, 'import os\n'), ((4896, 4930), 'buildings.settings.project.set_attribute_dialog_setting', 'set_attribute_dialog_setting', (['(True)'], {}), '(True)\n', (4924, 4930), False, 'from buildings.settings.project import get_attribute_dialog_setting, set_attribute_dialog_setting\n'), ((11170, 11229), 'buildings.settings.project.set_attribute_dialog_setting', 'set_attribute_dialog_setting', (['self.attribute_dialog_setting'], {}), '(self.attribute_dialog_setting)\n', (11198, 11229), False, 'from buildings.settings.project import get_attribute_dialog_setting, set_attribute_dialog_setting\n'), ((4013, 4053), 'qgis.utils.iface.addPluginToMenu', 'iface.addPluginToMenu', (['self.menu', 'action'], {}), '(self.menu, action)\n', (4034, 4053), False, 'from qgis.utils import iface, plugins\n'), ((7320, 7347), 'qgis.PyQt.QtWidgets.QToolBar', 'QToolBar', (['u"""Building Tools"""'], {}), "(u'Building Tools')\n", (7328, 7347), False, 'from qgis.PyQt.QtWidgets import QAction, QDockWidget, QListWidgetItem, QMenu, QToolBar\n'), ((7360, 7421), 'qgis.utils.iface.addToolBar', 'iface.addToolBar', (['iface.building_toolbar', 'Qt.RightToolBarArea'], {}), '(iface.building_toolbar, Qt.RightToolBarArea)\n', (7376, 7421), False, 'from qgis.utils import iface, plugins\n'), ((7547, 7568), 'buildings.gui.dockwidget.BuildingsDockwidget', 'BuildingsDockwidget', ([], {}), '()\n', (7566, 7568), False, 'from buildings.gui.dockwidget import BuildingsDockwidget\n'), ((7805, 7864), 'qgis.utils.iface.addDockWidget', 'iface.addDockWidget', (['Qt.LeftDockWidgetArea', 'self.dockwidget'], {}), '(Qt.LeftDockWidgetArea, self.dockwidget)\n', (7824, 7864), False, 'from qgis.utils import iface, plugins\n'), ((8072, 8098), 'buildings.gui.menu_frame.MenuFrame', 'MenuFrame', (['self.dockwidget'], {}), '(self.dockwidget)\n', (8081, 8098), False, 'from buildings.gui.menu_frame import MenuFrame\n'), ((8188, 8213), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (8203, 8213), False, 'import os\n'), ((8567, 8620), 'os.path.join', 'os.path.join', (['home_dir', '"""icons"""', '"""capture_source.png"""'], {}), "(home_dir, 'icons', 'capture_source.png')\n", (8579, 8620), False, 'import os\n'), ((8640, 8674), 'qgis.PyQt.QtWidgets.QListWidgetItem', 'QListWidgetItem', (['"""Capture Sources"""'], {}), "('Capture Sources')\n", (8655, 8674), False, 'from qgis.PyQt.QtWidgets import QAction, QDockWidget, QListWidgetItem, QMenu, QToolBar\n'), ((8785, 8833), 'os.path.join', 'os.path.join', (['home_dir', '"""icons"""', '"""bulk_load.png"""'], {}), "(home_dir, 'icons', 'bulk_load.png')\n", (8797, 8833), False, 'import os\n'), ((8853, 8881), 'qgis.PyQt.QtWidgets.QListWidgetItem', 'QListWidgetItem', (['"""Bulk Load"""'], {}), "('Bulk Load')\n", (8868, 8881), False, 'from qgis.PyQt.QtWidgets import QAction, QDockWidget, QListWidgetItem, QMenu, QToolBar\n'), ((8992, 9035), 'os.path.join', 'os.path.join', (['home_dir', '"""icons"""', '"""edit.png"""'], {}), "(home_dir, 'icons', 'edit.png')\n", (9004, 9035), False, 'import os\n'), ((9055, 9087), 'qgis.PyQt.QtWidgets.QListWidgetItem', 'QListWidgetItem', (['"""Edit Outlines"""'], {}), "('Edit Outlines')\n", (9070, 9087), False, 'from qgis.PyQt.QtWidgets import QAction, QDockWidget, QListWidgetItem, QMenu, QToolBar\n'), ((9198, 9245), 'os.path.join', 'os.path.join', (['home_dir', '"""icons"""', '"""settings.png"""'], {}), "(home_dir, 'icons', 'settings.png')\n", (9210, 9245), False, 'import os\n'), ((9265, 9292), 'qgis.PyQt.QtWidgets.QListWidgetItem', 'QListWidgetItem', (['"""Settings"""'], {}), "('Settings')\n", (9280, 9292), False, 'from qgis.PyQt.QtWidgets import QAction, QDockWidget, QListWidgetItem, QMenu, QToolBar\n'), ((9403, 9451), 'os.path.join', 'os.path.join', (['home_dir', '"""icons"""', '"""reference.png"""'], {}), "(home_dir, 'icons', 'reference.png')\n", (9415, 9451), False, 'import os\n'), ((9471, 9504), 'qgis.PyQt.QtWidgets.QListWidgetItem', 'QListWidgetItem', (['"""Reference Data"""'], {}), "('Reference Data')\n", (9486, 9504), False, 'from qgis.PyQt.QtWidgets import QAction, QDockWidget, QListWidgetItem, QMenu, QToolBar\n'), ((9612, 9629), 'qgis.utils.iface.mapCanvas', 'iface.mapCanvas', ([], {}), '()\n', (9627, 9629), False, 'from qgis.utils import iface, plugins\n'), ((9693, 9723), 'qgis.core.QgsCoordinateReferenceSystem', 'QgsCoordinateReferenceSystem', ([], {}), '()\n', (9721, 9723), False, 'from qgis.core import QgsCoordinateReferenceSystem\n'), ((10269, 10295), 'buildings.gui.menu_frame.MenuFrame', 'MenuFrame', (['self.dockwidget'], {}), '(self.dockwidget)\n', (10278, 10295), False, 'from buildings.gui.menu_frame import MenuFrame\n'), ((10466, 10496), 'qgis.utils.iface.building_toolbar.clear', 'iface.building_toolbar.clear', ([], {}), '()\n', (10494, 10496), False, 'from qgis.utils import iface, plugins\n'), ((10509, 10564), 'qgis.utils.iface.building_toolbar.setObjectName', 'iface.building_toolbar.setObjectName', (['u"""Building Tools"""'], {}), "(u'Building Tools')\n", (10545, 10564), False, 'from qgis.utils import iface, plugins\n'), ((10577, 10606), 'qgis.utils.iface.building_toolbar.hide', 'iface.building_toolbar.hide', ([], {}), '()\n', (10604, 10606), False, 'from qgis.utils import iface, plugins\n'), ((4398, 4416), 'qgis.utils.iface.mainWindow', 'iface.mainWindow', ([], {}), '()\n', (4414, 4416), False, 'from qgis.utils import iface, plugins\n'), ((7030, 7048), 'qgis.utils.iface.mainWindow', 'iface.mainWindow', ([], {}), '()\n', (7046, 7048), False, 'from qgis.utils import iface, plugins\n'), ((8290, 8345), 'os.path.join', 'os.path.join', (['home_dir', '"""icons"""', '"""buildings_plugin.png"""'], {}), "(home_dir, 'icons', 'buildings_plugin.png')\n", (8302, 8345), False, 'import os\n'), ((8369, 8397), 'qgis.PyQt.QtWidgets.QListWidgetItem', 'QListWidgetItem', (['"""Buildings"""'], {}), "('Buildings')\n", (8384, 8397), False, 'from qgis.PyQt.QtWidgets import QAction, QDockWidget, QListWidgetItem, QMenu, QToolBar\n'), ((8700, 8716), 'qgis.PyQt.QtGui.QIcon', 'QIcon', (['icon_path'], {}), '(icon_path)\n', (8705, 8716), False, 'from qgis.PyQt.QtGui import QIcon\n'), ((8907, 8923), 'qgis.PyQt.QtGui.QIcon', 'QIcon', (['icon_path'], {}), '(icon_path)\n', (8912, 8923), False, 'from qgis.PyQt.QtGui import QIcon\n'), ((9113, 9129), 'qgis.PyQt.QtGui.QIcon', 'QIcon', (['icon_path'], {}), '(icon_path)\n', (9118, 9129), False, 'from qgis.PyQt.QtGui import QIcon\n'), ((9318, 9334), 'qgis.PyQt.QtGui.QIcon', 'QIcon', (['icon_path'], {}), '(icon_path)\n', (9323, 9334), False, 'from qgis.PyQt.QtGui import QIcon\n'), ((9530, 9546), 'qgis.PyQt.QtGui.QIcon', 'QIcon', (['icon_path'], {}), '(icon_path)\n', (9535, 9546), False, 'from qgis.PyQt.QtGui import QIcon\n'), ((10941, 10958), 'qgis.utils.iface.actionPan', 'iface.actionPan', ([], {}), '()\n', (10956, 10958), False, 'from qgis.utils import iface, plugins\n'), ((6764, 6782), 'qgis.utils.iface.mainWindow', 'iface.mainWindow', ([], {}), '()\n', (6780, 6782), False, 'from qgis.utils import iface, plugins\n'), ((7741, 7759), 'qgis.utils.iface.mainWindow', 'iface.mainWindow', ([], {}), '()\n', (7757, 7759), False, 'from qgis.utils import iface, plugins\n'), ((7877, 7895), 'qgis.utils.iface.mainWindow', 'iface.mainWindow', ([], {}), '()\n', (7893, 7895), False, 'from qgis.utils import iface, plugins\n'), ((8427, 8443), 'qgis.PyQt.QtGui.QIcon', 'QIcon', (['icon_path'], {}), '(icon_path)\n', (8432, 8443), False, 'from qgis.PyQt.QtGui import QIcon\n'), ((10674, 10699), 'qgis.utils.iface.mapNavToolToolBar', 'iface.mapNavToolToolBar', ([], {}), '()\n', (10697, 10699), False, 'from qgis.utils import iface, plugins\n'), ((10786, 10823), 'qgis.utils.iface.building_toolbar.addAction', 'iface.building_toolbar.addAction', (['nav'], {}), '(nav)\n', (10818, 10823), False, 'from qgis.utils import iface, plugins\n'), ((5934, 5965), 'qgis.utils.iface.removeToolBarIcon', 'iface.removeToolBarIcon', (['action'], {}), '(action)\n', (5957, 5965), False, 'from qgis.utils import iface, plugins\n'), ((6044, 6062), 'qgis.utils.iface.mainWindow', 'iface.mainWindow', ([], {}), '()\n', (6060, 6062), False, 'from qgis.utils import iface, plugins\n'), ((6129, 6147), 'qgis.utils.iface.mainWindow', 'iface.mainWindow', ([], {}), '()\n', (6145, 6147), False, 'from qgis.utils import iface, plugins\n'), ((6416, 6434), 'qgis.utils.iface.mainWindow', 'iface.mainWindow', ([], {}), '()\n', (6432, 6434), False, 'from qgis.utils import iface, plugins\n')] |
# All rights reserved by forest fairy.
# You cannot modify or share anything without sacrifice.
# If you don't agree, keep calm and don't look at code bellow!
__author__ = "VirtualV <https://github.com/virtualvfix>"
__date__ = "12/11/2017 4:26 PM"
from config import CONFIG
from unittest import result
from libs.core.template import TEST_ERROR
from ..config import RESULT_DELIMITER_1, RESULT_DELIMITER_2
from libs.core.logger import getLogger, getSysLogger, LEVEL
class LoggerResult(result.TestResult):
"""
Custom unittest result class
"""
def __init__(self, logger=None):
super(LoggerResult, self).__init__()
self.logger = logger or getLogger(__file__)
self.syslogger = getSysLogger()
self.failfast = False
self.testIndex = 0
self.time = []
self.errors = []
self.skipped = []
self.success = []
self.failures = []
self.expectedFailures = []
self.unexpectedSuccesses = []
def getDescription(self, test):
doc_first_line = test.shortDescription()
if doc_first_line:
return ['%s' % TEST_ERROR.safe_substitute(index=test.testIndex, test=test)] \
+ [x for x in doc_first_line.split('\n') if x != '']
else:
return ['%s' % TEST_ERROR.safe_substitute(index=test.testIndex, test=test)]
def startTest(self, test):
self.testIndex += 1
test.testIndex = self.testIndex
super(LoggerResult, self).startTest(test)
def addTime(self, test, time):
""" Add spent time """
self.time.append((test, time))
def addSubTest(self, test, subtest, err):
# add testIndex to sub test
if not hasattr(subtest, 'testIndex'):
subtest.testIndex = self.testIndex
if err is not None:
if getattr(self, 'failfast', False):
self.stop()
if issubclass(err[0], test.failureException):
self.addFailure(subtest, err)
else:
self.addError(subtest, err)
def addSuccess(self, test):
self.success.append((self.testIndex, test))
def addError(self, test, err):
# add test index
if not hasattr(test, 'testIndex'):
test.testIndex = self.testIndex
self.errors.append((self.testIndex, test, self._exc_info_to_string(err, test)))
self.printError('ERROR', test, self.errors)
def addFailure(self, test, err):
self.failures.append((self.testIndex, test, self._exc_info_to_string(err, test)))
self.printError('FAIL', test, self.failures)
def addSkip(self, test, reason):
self.skipped.append((self.testIndex, test, reason))
self.logger.warning('<skipped>', self.syslogger)
def addExpectedFailure(self, test, err):
self.expectedFailures.append((self.testIndex, test, self._exc_info_to_string(err, test)))
self.logger.info('<expected failure>', self.syslogger)
def addUnexpectedSuccess(self, test):
self.unexpectedSuccesses.append((self.testIndex, test))
self.logger.warning('<unexpected success>', self.syslogger)
def __printErrorViaLogger(self, flavour, level, test, err, inside=True, print_prepare_errors=False):
"""
Print error using logger. Used in :func:`printError` and :func:`printErrorList` functions.
Args:
flavour (str): Error or Fail string
level (int): Error level
test ({shortDescription}): Description of test
err (str): Error traceback
inside (bool, default True): Use different delimiter. True if error inside test run and False
if error in total result
print_prepare_errors (bool, default False): Don't print errors in **setUpClass**
and **tearDownClass** functions
"""
# skip prepare errors if required
if str(test).startswith(('setUpClass', 'tearDownClass')) and not print_prepare_errors :
return
# test name
self.logger.newline(self.syslogger)
self.logger.table('%s*' % (RESULT_DELIMITER_2 if inside else RESULT_DELIMITER_1), ' *', self.syslogger,
border_delimiter='', level=level)
for i, msg in enumerate(self.getDescription(test)):
self.logger.table('%s%s' % ((flavour + ': ') if i == 0 else '', msg), self.syslogger,
column_delimiter='', level=level)
# error
self.logger.table('%s*' % RESULT_DELIMITER_2, ' *', self.syslogger, border_delimiter='', level=level)
if CONFIG.SYSTEM.DEBUG:
for e in [x for x in err.split('\n') if x != '']:
self.logger.table('. %s' % e, self.syslogger, column_delimiter='', level=level)
else:
self.logger.table('. %s' % [x for x in err.split('\n') if x != ''][-1], self.syslogger,
column_delimiter='', level=level)
for e in [x for x in err.split('\n') if x != '']:
self.syslogger.table('. %s' % e, column_delimiter='', level=level)
self.logger.table('%s*' % RESULT_DELIMITER_2, ' *', self.syslogger, border_delimiter='', level=level)
def printError(self, flavour, test, errors):
""" Print error or fail """
level = LEVEL.ERROR if 'error' in flavour.lower() else LEVEL.WARNING
for index, _test, err in errors:
# if test == _test:
if self.testIndex == index and test == _test and str(test) == str(_test):
self.__printErrorViaLogger(flavour, level, test, err)
break
def printErrors(self):
self.printErrorList('ERROR', self.errors, print_prepare_errors=True)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors, print_prepare_errors=False):
level = LEVEL.ERROR if 'error' in flavour.lower() else LEVEL.WARNING
for index, test, err in errors:
self.__printErrorViaLogger(flavour, level, test, err, inside=False,
print_prepare_errors=print_prepare_errors)
| [
"libs.core.logger.getLogger",
"libs.core.logger.getSysLogger",
"libs.core.template.TEST_ERROR.safe_substitute"
] | [((716, 730), 'libs.core.logger.getSysLogger', 'getSysLogger', ([], {}), '()\n', (728, 730), False, 'from libs.core.logger import getLogger, getSysLogger, LEVEL\n'), ((671, 690), 'libs.core.logger.getLogger', 'getLogger', (['__file__'], {}), '(__file__)\n', (680, 690), False, 'from libs.core.logger import getLogger, getSysLogger, LEVEL\n'), ((1304, 1363), 'libs.core.template.TEST_ERROR.safe_substitute', 'TEST_ERROR.safe_substitute', ([], {'index': 'test.testIndex', 'test': 'test'}), '(index=test.testIndex, test=test)\n', (1330, 1363), False, 'from libs.core.template import TEST_ERROR\n'), ((1128, 1187), 'libs.core.template.TEST_ERROR.safe_substitute', 'TEST_ERROR.safe_substitute', ([], {'index': 'test.testIndex', 'test': 'test'}), '(index=test.testIndex, test=test)\n', (1154, 1187), False, 'from libs.core.template import TEST_ERROR\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 25 10:43:37 2019
@author: <NAME>
"""
import numpy as np
import pandas as pd
def get_headers(header_row):
""" Takes array of headers and sets up a dictionary corresponding to columns. """
ans = {}
for i in range(len(header_row)):
ans[header_row[i]] = i
return ans
def split_on_model(df, header):
""" Split the sections into arrays based on the model type. """
res = []
tmp = []
last_row = None
for row in df:
if len(tmp) != 0 and last_row[header['epoch']] == 'break':
res.append(tmp)
tmp = []
else:
tmp.append(row)
last_row = row
return res
def filter_data(df, header, restraints = {}):
""" Filters the data based off of the restraints given, and formats it appropriately. """
filtered = []
for row in df:
to_add = True
for r in restraints.keys():
idx = header[r]
val = row[-1][idx] # take the last row
to_add = to_add and (val in restraints[r])
if to_add:
filtered.append(row)
return filtered
def extract_plotable_data(filtered, header, x_axis, y_axis):
""" Takes filtered data, and gets the x-axis and y-axis from that row. """
idx_x, idx_y = header[x_axis], header[y_axis]
all_x, all_y, all_h = [], [], []
for model in filtered:
x, y = [], []
all_h.append(model[0]) # Just append all the information from the first row.
for row in model:
try:
x_val = float(row[idx_x])
y_val = float(row[idx_y])
if not(np.isnan(x_val) or np.isnan(y_val)):
x.append(x_val)
y.append(y_val)
except:
continue
if len(x) > 0 and len(y) > 0:
all_x.append(x)
all_y.append(y)
return all_x, all_y, all_h
def load(f = './attn_logs_val.csv'):
""" Loads the data, and gets a proper extraction of the header and data. """
df = pd.read_csv(f, header = 0)
df['attention_acceptance'].fillna("auc", inplace = True)
df['epoch'].fillna("break", inplace = True)
loh = list(df.columns)
header = get_headers(loh)
df = np.asarray(df)
return split_on_model(df, header), header | [
"numpy.asarray",
"pandas.read_csv",
"numpy.isnan"
] | [((2154, 2178), 'pandas.read_csv', 'pd.read_csv', (['f'], {'header': '(0)'}), '(f, header=0)\n', (2165, 2178), True, 'import pandas as pd\n'), ((2358, 2372), 'numpy.asarray', 'np.asarray', (['df'], {}), '(df)\n', (2368, 2372), True, 'import numpy as np\n'), ((1721, 1736), 'numpy.isnan', 'np.isnan', (['x_val'], {}), '(x_val)\n', (1729, 1736), True, 'import numpy as np\n'), ((1740, 1755), 'numpy.isnan', 'np.isnan', (['y_val'], {}), '(y_val)\n', (1748, 1755), True, 'import numpy as np\n')] |
"""Benchmarking tools for estimating the runtime overhead of Cockpit."""
import os
import sys
import warnings
from collections import defaultdict
import pandas
from cockpit import Cockpit
from cockpit.plotter import CockpitPlotter
from cockpit.quantities import GradHist2d, Time
from cockpit.quantities.bin_adaptation import GradAbsMax, ParamAbsMax
from deepobs.pytorch.config import set_default_device
from torch.optim import SGD
from utils import get_train_size
sys.path.append(os.getcwd())
from experiments.utils.deepobs_runner import _DeepOBSRunner # noqa
def _check_timer(quantities, steps):
"""Run checks to make sure the benchmark has access to time information."""
timers = [q for q in quantities if isinstance(q, Time)]
num_timers = len(timers)
if num_timers != 1:
raise ValueError(f"Got {num_timers} Time quantities. Expect 1.")
timer = timers[0]
for step in [0, steps]:
if not timer.should_compute(step):
raise ValueError(f"Time quantity must track at step {step}")
def _check_quantities(quantities):
"""Run checks on quantities to make sure the benchmark runs."""
if quantities is None:
raise ValueError("Expect list of quantities but got None")
for q in quantities:
if not q.output == defaultdict(dict):
raise ValueError(
f"Quantity {q} has already been used or not been initialized"
)
def _runner_stop_after(step=None):
class BenchmarkRunner(_DeepOBSRunner):
"""Runner with disabled computation DeepOBS' additional metrics."""
STOP_AFTER = step
def _maybe_stop_iteration(self, global_step, batch_count):
"""Don't interrupt."""
if self.STOP_AFTER is None:
return
if getattr(self, "_already_stopped", False):
raise RuntimeError("Runner must stop at last epoch.")
if global_step > self.STOP_AFTER:
warnings.warn(f"BenchmarkRunner stopping epoch at step {global_step}")
self._already_stopped = True
raise StopIteration
def _should_eval(self):
"""Disable DeepOBS' evaluation of test/train/valid losses and accuracies."""
return False
return BenchmarkRunner
def _make_runner(quantities, steps):
"""Return a DeepOBS runner that tracks the specified quantities."""
optimizer_class = SGD
hyperparams = {
"lr": {"type": float, "default": 0.001},
"momentum": {"type": float, "default": 0.0},
"nesterov": {"type": bool, "default": False},
}
def plot_schedule(global_step):
"""Never plot."""
return False
runner_cls = _runner_stop_after(step=steps)
return runner_cls(
optimizer_class,
hyperparams,
quantities=quantities,
plot=False,
plot_schedule=plot_schedule,
)
def _get_num_epochs(runner, testproblem, steps):
"""Convert maximum number of steps into number of epochs."""
batch_size = runner._use_default(testproblem, "batch_size")
train_size = get_train_size(testproblem)
steps_per_epoch, _ = divmod(train_size, batch_size)
num_epochs, rest = divmod(steps, steps_per_epoch)
if rest > 0:
num_epochs += 1
return num_epochs
def constant_lr_schedule(num_epochs):
"""Constant learning rate schedule."""
return lambda epoch: 1.0
def _read_tracking_data(runner):
"""Return the tracked data from a completed run of a runner.
Abuses the CockpitPlotter to read data.
Args:
runner (deepobs.runner): A DeepOBS runner to access the logpath.
Returns:
[Pandas.DataFrame]: DataFrame holding the tracked data.
"""
plotter = CockpitPlotter()
plotter._read_tracking_results(runner._get_cockpit_logpath())
return plotter.tracking_data
def run_benchmark(testproblem, quantities, steps, random_seed, save_memory=True):
"""Return average time per iteration.
Args:
testproblem (str): Label of a DeepOBS problem.
quantities ([Quantity]): List of quantities used in the cockpit.
steps (int): Maximum number of iterations used for average
time estimation.
random_seed (int): Random seed used at initialization.
save_memory (bool, optional): Enable memory saving in ``Cockpit``.
Default: ``True``.
Returns:
[float] : Average run time.
"""
_check_timer(quantities, steps)
_check_quantities(quantities)
# set memory saving
old_save_memory = Cockpit.BACKPACK_CONV_SAVE_MEMORY
Cockpit.BACKPACK_CONV_SAVE_MEMORY = save_memory
runner = _make_runner(quantities, steps)
num_epochs = _get_num_epochs(runner, testproblem, steps)
runner.run(
testproblem=testproblem,
num_epochs=num_epochs,
l2_reg=0.0, # necessary for backobs!
lr_schedule=constant_lr_schedule,
random_seed=random_seed,
track_interval=float("nan"), # irrelevant
# turn plotting off, everything below is irrelevant
plot_interval=float("nan"),
show_plots=False,
save_plots=False,
save_final_plot=False,
save_animation=False,
)
# restore old value of memory saving
Cockpit.BACKPACK_CONV_SAVE_MEMORY = old_save_memory
data = _read_tracking_data(runner)
return extract_average_time(data, steps)
def extract_average_time(data, steps):
"""Extract average run time per iteration from tracked data."""
data = data[["iteration", "Time"]].dropna()
data = data.loc[data["iteration"].isin([0, steps])]
iterations = data["iteration"].to_list()
values = data["Time"].to_list()
assert iterations == [0, steps]
assert len(values) == 2
return (values[1] - values[0]) / (iterations[1] - iterations[0])
def _compute_steps(steps, track_events, track_interval):
"""Compute steps and check if large enough to allow min. ``track_events`` events."""
if steps is None:
return track_events * track_interval
else:
num_events = steps // track_interval
if num_events < track_events:
raise ValueError(
f"steps is too small! Want {num_events}>={track_events} track events."
)
return steps
def benchmark( # noqa: C901
testproblems,
configs,
track_intervals,
num_seeds,
devices,
steps=None,
track_events=20,
savefile=None,
header=None,
):
"""Benchmark the cockpit."""
columns = [
"testproblem",
"quantities",
"track_interval",
"steps",
"random_seed",
"device",
"time_per_step",
]
if savefile is not None and os.path.exists(savefile):
# try loading
data = pandas.read_csv(savefile, comment="#", index_col=[0])
assert set(data.columns) == set(columns), (
f"Loaded file {savefile} has inconsistent columns:"
+ f"\n\tFound: {list(data.columns)}"
+ f"\n\tRequire: {columns}"
)
else:
data = pandas.DataFrame(columns=columns)
for device in devices:
set_default_device(device)
for testproblem in testproblems:
for name, config in configs.items():
for track_interval in track_intervals:
for random_seed in range(num_seeds):
this_steps = _compute_steps(steps, track_events, track_interval)
run_data = {
"testproblem": testproblem,
"quantities": name,
"track_interval": track_interval,
"steps": this_steps,
"random_seed": random_seed,
"device": device,
}
run_exists = (
(data["testproblem"] == testproblem)
& (data["quantities"] == name)
& (data["steps"] == this_steps)
& (data["track_interval"] == track_interval)
& (data["random_seed"] == random_seed)
& (data["device"] == device)
).any()
if run_exists:
print(f"Setting already exists and is skipped: {run_data}")
else:
print(f"Running setting: {run_data}")
def track_schedule(global_step):
return (
global_step >= 0
and global_step % track_interval == 0
)
quantities = _build_quantities(config, track_schedule)
runtime = run_benchmark(
testproblem, quantities, this_steps, random_seed
)
run_data["time_per_step"] = runtime
data = data.append(run_data, ignore_index=True)
if savefile is not None:
with open(savefile, "w") as f:
if header is not None:
header_comment = "\n".join(
"# " + line for line in header.splitlines()
)
f.write(header_comment + "\n")
data.to_csv(f)
return data
def _build_quantities(config, track_schedule):
"""Initialize quantities. Add bin adaptation to 2d histogram for performance."""
quantities = []
for q in config:
if q == GradHist2d:
adapt = (GradAbsMax(track_schedule), ParamAbsMax(track_schedule))
q_inst = q(track_schedule, adapt=adapt)
else:
q_inst = q(track_schedule=track_schedule)
quantities.append(q_inst)
return quantities
| [
"os.path.exists",
"pandas.read_csv",
"cockpit.quantities.bin_adaptation.GradAbsMax",
"os.getcwd",
"cockpit.quantities.bin_adaptation.ParamAbsMax",
"collections.defaultdict",
"deepobs.pytorch.config.set_default_device",
"cockpit.plotter.CockpitPlotter",
"pandas.DataFrame",
"warnings.warn",
"utils.get_train_size"
] | [((483, 494), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (492, 494), False, 'import os\n'), ((3110, 3137), 'utils.get_train_size', 'get_train_size', (['testproblem'], {}), '(testproblem)\n', (3124, 3137), False, 'from utils import get_train_size\n'), ((3754, 3770), 'cockpit.plotter.CockpitPlotter', 'CockpitPlotter', ([], {}), '()\n', (3768, 3770), False, 'from cockpit.plotter import CockpitPlotter\n'), ((6739, 6763), 'os.path.exists', 'os.path.exists', (['savefile'], {}), '(savefile)\n', (6753, 6763), False, 'import os\n'), ((6802, 6855), 'pandas.read_csv', 'pandas.read_csv', (['savefile'], {'comment': '"""#"""', 'index_col': '[0]'}), "(savefile, comment='#', index_col=[0])\n", (6817, 6855), False, 'import pandas\n'), ((7098, 7131), 'pandas.DataFrame', 'pandas.DataFrame', ([], {'columns': 'columns'}), '(columns=columns)\n', (7114, 7131), False, 'import pandas\n'), ((7168, 7194), 'deepobs.pytorch.config.set_default_device', 'set_default_device', (['device'], {}), '(device)\n', (7186, 7194), False, 'from deepobs.pytorch.config import set_default_device\n'), ((1289, 1306), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1300, 1306), False, 'from collections import defaultdict\n'), ((1970, 2040), 'warnings.warn', 'warnings.warn', (['f"""BenchmarkRunner stopping epoch at step {global_step}"""'], {}), "(f'BenchmarkRunner stopping epoch at step {global_step}')\n", (1983, 2040), False, 'import warnings\n'), ((9937, 9963), 'cockpit.quantities.bin_adaptation.GradAbsMax', 'GradAbsMax', (['track_schedule'], {}), '(track_schedule)\n', (9947, 9963), False, 'from cockpit.quantities.bin_adaptation import GradAbsMax, ParamAbsMax\n'), ((9965, 9992), 'cockpit.quantities.bin_adaptation.ParamAbsMax', 'ParamAbsMax', (['track_schedule'], {}), '(track_schedule)\n', (9976, 9992), False, 'from cockpit.quantities.bin_adaptation import GradAbsMax, ParamAbsMax\n')] |
from gr_nlp_toolkit.data.downloader import Downloader
import gdown
class GDriveDownloader(Downloader):
def __init__(self):
self.urls = {
'pos': 'https://drive.google.com/uc?id=1Or5HDk1kVnxI3_w0fwgR8-dzO0jvcc_L', # pos link
'ner': 'https://drive.google.com/uc?id=1fx0pHtcN7F2Vj9L8y5TUpbjSqKTUaT3i', # ner link
'dp': 'https://drive.google.com/uc?id=1NhEqmLBf67Ydw-LdI7eB-f0afMPgNSmG' # dp link
}
def download_processor(self, processor_name: str, target_path: str):
gdown.download(self.urls[processor_name], output=target_path, quiet=False)
| [
"gdown.download"
] | [((537, 611), 'gdown.download', 'gdown.download', (['self.urls[processor_name]'], {'output': 'target_path', 'quiet': '(False)'}), '(self.urls[processor_name], output=target_path, quiet=False)\n', (551, 611), False, 'import gdown\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @author : microfat
# @time : 01/26/21 20:40:44
# @File : operation_dept.py
import time
import logging
# logger config
logger = logging.getLogger("operation ")
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
formatter = logging.Formatter("\x1b[93;21m[%(asctime)s] %(name)s:%(levelname)s: %(message)s\x1b[0m")
ch.setFormatter(formatter)
logger.addHandler(ch)
# ops job description
def ops(production_line, total_workshop):
logger.info("Operation Dept start...")
while True:
mq_log = "monitor line size:"
for i in range(total_workshop):
mq_log += "line"+str(i)+"-> ("+str(production_line[i].qsize())+"), "
logger.info(mq_log)
time.sleep(1) | [
"logging.getLogger",
"logging.Formatter",
"logging.StreamHandler",
"time.sleep"
] | [((182, 220), 'logging.getLogger', 'logging.getLogger', (['"""operation """'], {}), "('operation ')\n", (199, 220), False, 'import logging\n'), ((257, 280), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (278, 280), False, 'import logging\n'), ((293, 386), 'logging.Formatter', 'logging.Formatter', (['"""\x1b[93;21m[%(asctime)s] %(name)s:%(levelname)s: %(message)s\x1b[0m"""'], {}), "(\n '\\x1b[93;21m[%(asctime)s] %(name)s:%(levelname)s: %(message)s\\x1b[0m')\n", (310, 386), False, 'import logging\n'), ((752, 765), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (762, 765), False, 'import time\n')] |
# Authored By- <NAME>
# Github Id - @charlie219
# Email - <EMAIL>
# Date - 25-6-2021
import socket
from PyQt5.QtWidgets import *
import sys
import pickle
import json
from mediaplayer import Window
import gui
class Client:
def __init__(self, user_name, choice, group_id = None, previousFrame = None):
#print(user_name, choice, group_id, frame)
if previousFrame:
previousFrame.destroy()
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# If you are running the server in local device, host_ip will be '127.0.0.1'
self.host_ip = "127.0.0.1" # For this to work on your machine this must be equal to the ipv4 address of the machine running the server
# You can find this address by typing ipconfig in CMD and copying the ipv4 address. Again this must be the servers
# ipv4 address. This feild will be the same for all your clients.
#self.host_ip = '172.16.31.10'
self.host_port = 5690 # Same as the server
self.HEADER = 4
# Important static variables
self.userName = user_name
self.choice = choice
self.group_id = group_id
self.addr = (self.host_ip, self.host_port)
self.connect()
def connect(self):
self.client_socket.connect(self.addr)
#print("Connected to server")
payload = {
'Username' : self.userName,
'Choice' : self.choice,
'isAdmin' : True,
'Movie' : None
}
if self.choice == 2:
payload['Group ID'] = int(self.group_id)
payload['isAdmin'] = False
msg = pickle.dumps(payload)
msg = bytes(f'{len(msg):<{self.HEADER}}', 'utf-8') + msg
self.client_socket.send(msg)
# Recieve Acknowledgement from Server
messageHeader = self.client_socket.recv(self.HEADER)
msg_len = int(messageHeader.decode('utf-8'))
msg = self.client_socket.recv(msg_len)
acknowledgement = pickle.loads(msg)
#print(acknowledgement)
if acknowledgement['Group ID'] is None:
gui.Application(self.userName)
else:
payload['Group ID'] = acknowledgement['Group ID']
payload['Movie'] = acknowledgement['Movie']
payload['Group Admin'] = acknowledgement['Group Admin']
# Initilize the mediaPlayer
app = QApplication(sys.argv)
window = Window(self.client_socket, payload)
sys.exit(app.exec_())
| [
"socket.socket",
"pickle.dumps",
"mediaplayer.Window",
"pickle.loads",
"gui.Application"
] | [((450, 499), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (463, 499), False, 'import socket\n'), ((1756, 1777), 'pickle.dumps', 'pickle.dumps', (['payload'], {}), '(payload)\n', (1768, 1777), False, 'import pickle\n'), ((2115, 2132), 'pickle.loads', 'pickle.loads', (['msg'], {}), '(msg)\n', (2127, 2132), False, 'import pickle\n'), ((2226, 2256), 'gui.Application', 'gui.Application', (['self.userName'], {}), '(self.userName)\n', (2241, 2256), False, 'import gui\n'), ((2561, 2596), 'mediaplayer.Window', 'Window', (['self.client_socket', 'payload'], {}), '(self.client_socket, payload)\n', (2567, 2596), False, 'from mediaplayer import Window\n')] |
#!/usr/bin/env python
import sys
from setuptools import setup, find_packages
def get_version():
with open('VERSION') as f:
return f.read().strip()
install_requires = [
'django',
'nose',
'mock',
]
setup_requires = []
if 'nosetests' in sys.argv[0:]:
setup_requires.append('nose')
setup(
name='nose-template-usage',
version=get_version(),
author='DISQUS',
author_email='<EMAIL>',
url='http://github.com/disqus/nose-template-usage',
packages=find_packages(exclude=["tests"]),
zip_safe=False,
install_requires=install_requires,
setup_requires=setup_requires,
entry_points={
'nose.plugins.0.10': [
'template-usage-report = templateusage:TemplateUsageReportPlugin'
]
},
license='Apache License 2.0',
include_package_data=True,
test_suite='nose.collector',
)
| [
"setuptools.find_packages"
] | [((498, 530), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests']"}), "(exclude=['tests'])\n", (511, 530), False, 'from setuptools import setup, find_packages\n')] |
# -*- coding: utf-8 -*-
import random
import secrets
import unittest
from starlette.testclient import TestClient
from app.com_lib.file_functions import save_json
from app.main import app
client = TestClient(app)
directory_to__files: str = "data"
# api/v1/groups/list?delay=1&qty=10&offset=1&active=true&groupType=approval
class Test(unittest.TestCase):
def test_groups_post_error(self):
test_data = {
"name": "test 1",
"description": "test group",
"group_type": "approval",
"is_active": True,
}
url = f"/api/v1/groups/create"
response = client.post(url, json=test_data)
assert response.status_code == 422
def test_groups_post_error_type(self):
test_data = {
"name": f"test{secrets.token_hex(1)}",
"description": f"test group {secrets.token_hex(2)}",
"group_type": secrets.token_hex(1),
"is_active": True,
}
url = f"/api/v1/groups/create"
response = client.post(url, json=test_data)
assert response.status_code == 400
def test_groups_post(self):
test_data = {
"name": f"test{secrets.token_hex(4)}",
"description": "test group",
"group_type": "approval",
"is_active": True,
}
save_json("test_data_test_user.json", test_data)
url = f"/api/v1/groups/create?delay=1"
response = client.post(url, json=test_data)
assert response.status_code == 201
data = response.json()
save_json("test_data_group.json", data)
# duplicate
response = client.post(url, json=test_data)
assert response.status_code == 400
def test_groups_post_many(self):
for _ in range(20):
test_data = {
"name": f"test{secrets.token_hex(4)}",
"description": "test group",
"group_type": "notification",
"is_active": random.choice([True, False]),
}
url = f"/api/v1/groups/create"
response = client.post(url, json=test_data)
assert response.status_code == 201
def test_groups_post_two_error(self):
test_data = {
"name": f"test{secrets.token_hex(4)}",
"description": "test group",
"group_type": "notification",
"is_active": False,
}
url = f"/api/v1/groups/create"
response = client.post(url, json=test_data)
response = client.post(url, json=test_data)
assert response.status_code == 400
| [
"starlette.testclient.TestClient",
"app.com_lib.file_functions.save_json",
"random.choice",
"secrets.token_hex"
] | [((199, 214), 'starlette.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (209, 214), False, 'from starlette.testclient import TestClient\n'), ((1346, 1394), 'app.com_lib.file_functions.save_json', 'save_json', (['"""test_data_test_user.json"""', 'test_data'], {}), "('test_data_test_user.json', test_data)\n", (1355, 1394), False, 'from app.com_lib.file_functions import save_json\n'), ((1578, 1617), 'app.com_lib.file_functions.save_json', 'save_json', (['"""test_data_group.json"""', 'data'], {}), "('test_data_group.json', data)\n", (1587, 1617), False, 'from app.com_lib.file_functions import save_json\n'), ((913, 933), 'secrets.token_hex', 'secrets.token_hex', (['(1)'], {}), '(1)\n', (930, 933), False, 'import secrets\n'), ((2001, 2029), 'random.choice', 'random.choice', (['[True, False]'], {}), '([True, False])\n', (2014, 2029), False, 'import random\n'), ((798, 818), 'secrets.token_hex', 'secrets.token_hex', (['(1)'], {}), '(1)\n', (815, 818), False, 'import secrets\n'), ((863, 883), 'secrets.token_hex', 'secrets.token_hex', (['(2)'], {}), '(2)\n', (880, 883), False, 'import secrets\n'), ((1194, 1214), 'secrets.token_hex', 'secrets.token_hex', (['(4)'], {}), '(4)\n', (1211, 1214), False, 'import secrets\n'), ((2285, 2305), 'secrets.token_hex', 'secrets.token_hex', (['(4)'], {}), '(4)\n', (2302, 2305), False, 'import secrets\n'), ((1857, 1877), 'secrets.token_hex', 'secrets.token_hex', (['(4)'], {}), '(4)\n', (1874, 1877), False, 'import secrets\n')] |
from typing import Tuple, Any
from cryptography.hazmat.primitives.asymmetric import rsa, padding
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.backends import default_backend
from cryptography.exceptions import InvalidSignature
def generate_keys() -> Tuple[rsa.RSAPrivateKey, rsa.RSAPublicKey]:
priv = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
return priv, priv.public_key()
def sign(message: Any, private_key: rsa.RSAPrivateKey) -> bytes:
message = bytes(str(message), 'utf-8')
sig = private_key.sign(
message,
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
return sig
def verify(message: Any, signature: bytes, public_key) -> bool:
message = bytes(str(message), 'utf-8')
try:
public_key.verify(
signature,
message,
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
return True
except InvalidSignature:
return False
def serialize_private_key(key: rsa.RSAPrivateKeyWithSerialization, password: bytes = b'') -> bytes:
return key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.BestAvailableEncryption(password)
)
def serialize_public_key(key: rsa.RSAPublicKey) -> bytes:
return key.public_bytes(
serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo
)
def load_private_key(pem_data: bytes, password: bytes = None) -> rsa.RSAPrivateKey:
return serialization.load_pem_private_key(pem_data, password, default_backend())
def load_public_key(pem_data: bytes) -> rsa.RSAPublicKey:
return serialization.load_pem_public_key(pem_data, default_backend())
def compute_hash(data: Any) -> bytes:
if type(data) != bytes:
data = bytes(str(data), 'utf8')
digest = hashes.Hash(hashes.SHA256(), default_backend())
digest.update(data)
return digest.finalize()
| [
"cryptography.hazmat.backends.default_backend",
"cryptography.hazmat.primitives.serialization.BestAvailableEncryption",
"cryptography.hazmat.primitives.hashes.SHA256"
] | [((796, 811), 'cryptography.hazmat.primitives.hashes.SHA256', 'hashes.SHA256', ([], {}), '()\n', (809, 811), False, 'from cryptography.hazmat.primitives import hashes, serialization\n'), ((1491, 1538), 'cryptography.hazmat.primitives.serialization.BestAvailableEncryption', 'serialization.BestAvailableEncryption', (['password'], {}), '(password)\n', (1528, 1538), False, 'from cryptography.hazmat.primitives import hashes, serialization\n'), ((1884, 1901), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (1899, 1901), False, 'from cryptography.hazmat.backends import default_backend\n'), ((2018, 2035), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (2033, 2035), False, 'from cryptography.hazmat.backends import default_backend\n'), ((2170, 2185), 'cryptography.hazmat.primitives.hashes.SHA256', 'hashes.SHA256', ([], {}), '()\n', (2183, 2185), False, 'from cryptography.hazmat.primitives import hashes, serialization\n'), ((2187, 2204), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (2202, 2204), False, 'from cryptography.hazmat.backends import default_backend\n'), ((448, 465), 'cryptography.hazmat.backends.default_backend', 'default_backend', ([], {}), '()\n', (463, 465), False, 'from cryptography.hazmat.backends import default_backend\n'), ((1176, 1191), 'cryptography.hazmat.primitives.hashes.SHA256', 'hashes.SHA256', ([], {}), '()\n', (1189, 1191), False, 'from cryptography.hazmat.primitives import hashes, serialization\n'), ((712, 727), 'cryptography.hazmat.primitives.hashes.SHA256', 'hashes.SHA256', ([], {}), '()\n', (725, 727), False, 'from cryptography.hazmat.primitives import hashes, serialization\n'), ((1080, 1095), 'cryptography.hazmat.primitives.hashes.SHA256', 'hashes.SHA256', ([], {}), '()\n', (1093, 1095), False, 'from cryptography.hazmat.primitives import hashes, serialization\n')] |
"""
FilterGui module of the Gui utility package MyGui
Naturally used within MainGui
stand-alone also possible with certain limitations
usage:
from MyGui import Log
Log.run()
"""
#get compatible to python3
from __future__ import absolute_import, division, print_function
import os
#enable compatibility to both pyqt4 and pyqt5
_modname = os.environ.setdefault('QT_API', 'pyqt')
assert _modname in ('pyqt', 'pyqt5')
if os.environ['QT_API'].startswith('pyqt'):
try:
if os.environ['QT_API'] == 'pyqt5':
from PyQt5.QtWidgets import (QApplication, QDockWidget, QFileDialog,
QMainWindow)
### import ui created with qtdesigner
### create python file with:
### pyuic5 Log.ui > ui_Log_qt5.py
from .ui_Log_qt5 import Ui_DockWidget
from PyQt5 import QtCore
else:
from PyQt4.QtGui import (QApplication, QDockWidget, QFileDialog,
QMainWindow)
### import ui created with qtdesigner
### create python file with:
### pyuic4 Log.ui > ui_Log_qt4.py
from .ui_Log_qt4 import Ui_DockWidget
from PyQt4 import QtCore
except ImportError as e:
print (e)
raise ImportError("GUI Logger requires PyQt4 or PyQt5. "
"QT_API: {!s}".format(os.environ['QT_API']))
import logging
### dummy class to define logging signal
class LogSignal(QtCore.QObject):
"""
Dummy class to define customized logging signals
usage:
logSig = LogSignal()
### connect
logSig.log.connect(SLOT)
...
### emit
logSig.log.emit_log(str)
"""
### the signal
log = QtCore.pyqtSignal(str)
### emit the signal
def emit_log(self, text):
self.log.emit(text)
class LogHandler(logging.Handler):
"""
Customized logging.Handler class with color scheme own emit method
usage (within Qt4 widget):
self.logger = logging.getLogger()
self.handler = LogHandler(self) ### pass Qt4 widget here!
self.logger.addHandler(self.handler)
"""
def __init__(self, parent=None):
### run regular Handler __init__
logging.Handler.__init__(self)
self.parent = None
### create SIGNAL and connect to parent SLOT if parent widget was passed
### parent widget must have SLOT method defined
if parent:
self.parent = parent
self.logSig = LogSignal()
self.logSig.log.connect(parent.printLog)
_use_colors = True
COLORS = {
'DEBUG': 'blue',
'INFO': 'green',
'WARNING': 'orange',
'ERROR': 'red',
'CRITICAL': 'red'
}
def emit(self, record):
text = str(self.format(record))
### emit signal if parent was passed
if self.parent:
### color text
if LogHandler._use_colors:
text = "<font color='%s'> %s </font>" \
% ( LogHandler.COLORS[record.levelname], text)
self.logSig.emit_log(text)
### otherwise just print log message
else:
print(text)
class LogFormat(logging.Formatter):
"""
Customized logging.Formatter with different formatting for the levels
usage (within Qt4 widget):
self.logger = logging.getLogger()
self.handler = LogHandler(self) ### pass Qt4 widget here!
self.handler.setFormatter(LogFormat())
self.logger.addHandler(self.handler)
"""
#Andreas's Formatters:
# dbg_fmt = "[DEBUG] %(module)s:%(funcName)s line:%(lineno)d >> %(msg)s"
# info_fmt = "[INFO] %(module)s:%(funcName)s >> %(msg)s"
# warn_fmt = "[WARNING] %(module)s:%(funcName)s >> %(msg)s"
# err_fmt = "[ERROR] %(module)s:%(funcName)s >> %(msg)s"
#My Formattes (prints just the messages):
#
level_translation = {"10: ":"[DEBUG] ",
"20: ":"[INFO] ",
"30: ":"[WARNING] ",
"40: ":"ERROR ",
"50: ":"CRITICAL "}
datefmt = ""
def __init__(self, fmt="%(levelno)s: %(asctime)s %(msg)s", datefmt=None,
infoString=True):
logging.Formatter.__init__(self, fmt,datefmt)
self.datefmt = datefmt
if not infoString:
self.level_translation["20: "] = ""
# self.info_fmt = "%(asctime)s %(msg)s"
def format(self, record):
# Call the base class formatter to do the grunt work
result = logging.Formatter.format(self, record)
#replace the numeric logging levels with strings
result = self.level_translation[result[0:4]] + result[4:]
return result
class QtDockLog(QDockWidget):
"""
Log Dock Widget to use in a PyQt GUI
Will output all the logging.info(), logging.debug() etc. info
usage:
log_dock = QtDockLog()
addDockWidget(QtCore.Qt.BottomDockWidgetArea, log_dock)
"""
def __init__(self,datefmt=None,infoString = True):
"""Constructor of the QtDockLog
Log Dock Widget to use in a PyQt GUI.
Will output all info written to logging.info(), logging..debug() etc.
usage:
log_dock = QtDockLog()
addDockWidget(QtCore.Qt.BottomDockWidgetArea, log_dock)
Parameters
---------
datefmt : format string, optional
format string for date time in log messages
infoString : bool, optional
if false info messages are not prefixed with the log level string
"""
QDockWidget.__init__(self)
# Set up the user interface from Designer.
self.ui = Ui_DockWidget()
self.ui.setupUi(self)
### define logger and handler
self.logger = logging.getLogger()
self.handler = LogHandler(self)
self.handler.setFormatter(LogFormat(datefmt=datefmt,infoString=infoString))
self.logger.setLevel(logging.DEBUG)
self.logger.addHandler(self.handler)
# define file for the second handler, saving the complete log to file
self.filename = "log.out"
# remove possibly existing log file (otherwise it appends to it)
try:
os.remove(self.filename)
except:
pass
# define second file handler, saving all of the log to self.filename
self.filehandler = logging.FileHandler(self.filename)
self.filehandler.setLevel(logging.DEBUG) # set to DEBUG --> saves all
self.filehandler.setFormatter(LogFormat(datefmt=datefmt,infoString=infoString)) # use the same format
self.logger.addHandler(self.filehandler) # attach to logger
### connections
self.ui.comboBox.currentIndexChanged.connect(
lambda: self.setLevel(self.ui.comboBox.currentText()))
self.ui.pushButtonSave.clicked.connect(self.saveLog)
def saveLog(self):
"""
Saves the shown log to file 'filename'
"""
savePath =QFileDialog.getSaveFileName(self,caption='select save file',
filter="Text files (*.txt);;All files (*)")
#in pyqt5 a tuple is returned, unpack it
if os.environ['QT_API'] == 'pyqt5':
savePath, _ = savePath
if savePath != '':
text = str(self.ui.textBrowser.toPlainText()) # get log text
f = open(savePath, 'w') # open file
f.writelines(text) # write text to file
f.close() # close file
def setLevel(self, level):
"""
Sets the the widget to show only log entries above and equal to
the given level.
"""
level = str(level).upper()
level_int = eval("logging.%s" %(level))
# clear the text window
self.ui.textBrowser.clear()
# open the log file
with open(self.filename, 'r') as f:
# go through lines in log file
for line in f:
line = line.strip() # delete end of line
try:
level_line = line.split('[')[1].split(']')[0] # get level of line
except IndexError:
level_line = "INFO" #default to info
level_line_int = eval("logging.%s" %(level_line))
# if number of line level is greater equal to number of
# the global level, append to text in color
if level_line_int >= level_int:
text = "<font color='%s'> %s </font>" \
% ( self.handler.COLORS[level_line], line)
self.ui.textBrowser.append(text)
cmd = "self.handler.setLevel(logging.%s)" % (level)
eval(cmd)
### print new level
self.logger.debug("Logging level set to %s." %(level))
### pyQt SLOT used to receive emitted SIGNAL from LogHandler()
### pass self to LogHandler to make this work
@QtCore.pyqtSlot(str)
def printLog(self, text):
self.ui.textBrowser.append(text)
def run():
import sys
app = QApplication(sys.argv)
win = QMainWindow()
win.addDockWidget(QtCore.Qt.TopDockWidgetArea, QtDockLog())
win.show()
sys.exit(app.exec_())
if __name__ == '__main__':
run()
| [
"os.environ.setdefault",
"PyQt4.QtGui.QApplication",
"logging.getLogger",
"PyQt4.QtGui.QDockWidget.__init__",
"PyQt4.QtCore.pyqtSlot",
"PyQt4.QtGui.QMainWindow",
"PyQt4.QtCore.pyqtSignal",
"logging.Formatter.format",
"PyQt4.QtGui.QFileDialog.getSaveFileName",
"logging.Formatter.__init__",
"logging.FileHandler",
"logging.Handler.__init__",
"os.remove"
] | [((349, 388), 'os.environ.setdefault', 'os.environ.setdefault', (['"""QT_API"""', '"""pyqt"""'], {}), "('QT_API', 'pyqt')\n", (370, 388), False, 'import os\n'), ((1866, 1888), 'PyQt4.QtCore.pyqtSignal', 'QtCore.pyqtSignal', (['str'], {}), '(str)\n', (1883, 1888), False, 'from PyQt4 import QtCore\n'), ((9637, 9657), 'PyQt4.QtCore.pyqtSlot', 'QtCore.pyqtSlot', (['str'], {}), '(str)\n', (9652, 9657), False, 'from PyQt4 import QtCore\n'), ((9785, 9807), 'PyQt4.QtGui.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (9797, 9807), False, 'from PyQt4.QtGui import QApplication, QDockWidget, QFileDialog, QMainWindow\n'), ((9818, 9831), 'PyQt4.QtGui.QMainWindow', 'QMainWindow', ([], {}), '()\n', (9829, 9831), False, 'from PyQt4.QtGui import QApplication, QDockWidget, QFileDialog, QMainWindow\n'), ((2412, 2442), 'logging.Handler.__init__', 'logging.Handler.__init__', (['self'], {}), '(self)\n', (2436, 2442), False, 'import logging\n'), ((4517, 4563), 'logging.Formatter.__init__', 'logging.Formatter.__init__', (['self', 'fmt', 'datefmt'], {}), '(self, fmt, datefmt)\n', (4543, 4563), False, 'import logging\n'), ((4839, 4877), 'logging.Formatter.format', 'logging.Formatter.format', (['self', 'record'], {}), '(self, record)\n', (4863, 4877), False, 'import logging\n'), ((5999, 6025), 'PyQt4.QtGui.QDockWidget.__init__', 'QDockWidget.__init__', (['self'], {}), '(self)\n', (6019, 6025), False, 'from PyQt4.QtGui import QApplication, QDockWidget, QFileDialog, QMainWindow\n'), ((6220, 6239), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (6237, 6239), False, 'import logging\n'), ((6844, 6878), 'logging.FileHandler', 'logging.FileHandler', (['self.filename'], {}), '(self.filename)\n', (6863, 6878), False, 'import logging\n'), ((7485, 7595), 'PyQt4.QtGui.QFileDialog.getSaveFileName', 'QFileDialog.getSaveFileName', (['self'], {'caption': '"""select save file"""', 'filter': '"""Text files (*.txt);;All files (*)"""'}), "(self, caption='select save file', filter=\n 'Text files (*.txt);;All files (*)')\n", (7512, 7595), False, 'from PyQt4.QtGui import QApplication, QDockWidget, QFileDialog, QMainWindow\n'), ((6672, 6696), 'os.remove', 'os.remove', (['self.filename'], {}), '(self.filename)\n', (6681, 6696), False, 'import os\n')] |
# Natural Language Toolkit: Tokenizers
#
# Copyright (C) 2001-2022 NLTK Project
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>> (minor additions)
# Contributors: matthewmc, clouds56
# URL: <https://www.nltk.org/>
# For license information, see LICENSE.TXT
r"""
NLTK Tokenizer Package
Tokenizers divide strings into lists of substrings. For example,
tokenizers can be used to find the words and punctuation in a string:
>>> from nltk.tokenize import word_tokenize
>>> s = '''Good muffins cost $3.88\nin New York. Please buy me
... two of them.\n\nThanks.'''
>>> word_tokenize(s)
['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.',
'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
This particular tokenizer requires the Punkt sentence tokenization
models to be installed. NLTK also provides a simpler,
regular-expression based tokenizer, which splits text on whitespace
and punctuation:
>>> from nltk.tokenize import wordpunct_tokenize
>>> wordpunct_tokenize(s)
['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York', '.',
'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
We can also operate at the level of sentences, using the sentence
tokenizer directly as follows:
>>> from nltk.tokenize import sent_tokenize, word_tokenize
>>> sent_tokenize(s)
['Good muffins cost $3.88\nin New York.', 'Please buy me\ntwo of them.', 'Thanks.']
>>> [word_tokenize(t) for t in sent_tokenize(s)]
[['Good', 'muffins', 'cost', '$', '3.88', 'in', 'New', 'York', '.'],
['Please', 'buy', 'me', 'two', 'of', 'them', '.'], ['Thanks', '.']]
Caution: when tokenizing a Unicode string, make sure you are not
using an encoded version of the string (it may be necessary to
decode it first, e.g. with ``s.decode("utf8")``.
NLTK tokenizers can produce token-spans, represented as tuples of integers
having the same semantics as string slices, to support efficient comparison
of tokenizers. (These methods are implemented as generators.)
>>> from nltk.tokenize import WhitespaceTokenizer
>>> list(WhitespaceTokenizer().span_tokenize(s))
[(0, 4), (5, 12), (13, 17), (18, 23), (24, 26), (27, 30), (31, 36), (38, 44),
(45, 48), (49, 51), (52, 55), (56, 58), (59, 64), (66, 73)]
There are numerous ways to tokenize text. If you need more control over
tokenization, see the other methods provided in this package.
For further information, please see Chapter 3 of the NLTK book.
"""
import re
from nltk.data import load
from nltk.tokenize.casual import TweetTokenizer, casual_tokenize
from nltk.tokenize.destructive import NLTKWordTokenizer
from nltk.tokenize.legality_principle import LegalitySyllableTokenizer
from nltk.tokenize.mwe import MWETokenizer
from nltk.tokenize.punkt import PunktSentenceTokenizer
from nltk.tokenize.regexp import (
BlanklineTokenizer,
RegexpTokenizer,
WhitespaceTokenizer,
WordPunctTokenizer,
blankline_tokenize,
regexp_tokenize,
wordpunct_tokenize,
)
from nltk.tokenize.repp import ReppTokenizer
from nltk.tokenize.sexpr import SExprTokenizer, sexpr_tokenize
from nltk.tokenize.simple import (
LineTokenizer,
SpaceTokenizer,
TabTokenizer,
line_tokenize,
)
from nltk.tokenize.sonority_sequencing import SyllableTokenizer
from nltk.tokenize.stanford_segmenter import StanfordSegmenter
from nltk.tokenize.texttiling import TextTilingTokenizer
from nltk.tokenize.toktok import ToktokTokenizer
from nltk.tokenize.treebank import TreebankWordDetokenizer, TreebankWordTokenizer
from nltk.tokenize.util import regexp_span_tokenize, string_span_tokenize
# Standard sentence tokenizer.
def sent_tokenize(text, language="english"):
"""
Return a sentence-tokenized copy of *text*,
using NLTK's recommended sentence tokenizer
(currently :class:`.PunktSentenceTokenizer`
for the specified language).
:param text: text to split into sentences
:param language: the model name in the Punkt corpus
"""
tokenizer = load(f"tokenizers/punkt/{language}.pickle")
return tokenizer.tokenize(text)
# Standard word tokenizer.
_treebank_word_tokenizer = NLTKWordTokenizer()
def word_tokenize(text, language="english", preserve_line=False):
"""
Return a tokenized copy of *text*,
using NLTK's recommended word tokenizer
(currently an improved :class:`.TreebankWordTokenizer`
along with :class:`.PunktSentenceTokenizer`
for the specified language).
:param text: text to split into words
:type text: str
:param language: the model name in the Punkt corpus
:type language: str
:param preserve_line: A flag to decide whether to sentence tokenize the text or not.
:type preserve_line: bool
"""
sentences = [text] if preserve_line else sent_tokenize(text, language)
return [
token for sent in sentences for token in _treebank_word_tokenizer.tokenize(sent)
]
| [
"nltk.tokenize.destructive.NLTKWordTokenizer",
"nltk.data.load"
] | [((4282, 4301), 'nltk.tokenize.destructive.NLTKWordTokenizer', 'NLTKWordTokenizer', ([], {}), '()\n', (4299, 4301), False, 'from nltk.tokenize.destructive import NLTKWordTokenizer\n'), ((4141, 4184), 'nltk.data.load', 'load', (['f"""tokenizers/punkt/{language}.pickle"""'], {}), "(f'tokenizers/punkt/{language}.pickle')\n", (4145, 4184), False, 'from nltk.data import load\n')] |
# -*- coding: utf-8 -*-
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///../proveit.db'
app.secret_key = '<KEY>'
db = SQLAlchemy(app)
| [
"flask.ext.sqlalchemy.SQLAlchemy",
"flask.Flask"
] | [((99, 114), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (104, 114), False, 'from flask import Flask\n'), ((211, 226), 'flask.ext.sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (221, 226), False, 'from flask.ext.sqlalchemy import SQLAlchemy\n')] |
from fabric.decorators import task
from refabric.contrib import blueprints
__all__ = ['setup']
blueprint = blueprints.get(__name__)
@task
def setup():
"""
Install glusterfs-client
"""
install()
@task
def install():
from refabric.context_managers import sudo
from .debian import add_apt_ppa, apt_get
with sudo():
apt_get('install', 'software-properties-common')
add_apt_ppa('gluster/glusterfs-3.5', src=True)
apt_get('install', 'glusterfs-client')
| [
"refabric.contrib.blueprints.get",
"refabric.context_managers.sudo"
] | [((111, 135), 'refabric.contrib.blueprints.get', 'blueprints.get', (['__name__'], {}), '(__name__)\n', (125, 135), False, 'from refabric.contrib import blueprints\n'), ((341, 347), 'refabric.context_managers.sudo', 'sudo', ([], {}), '()\n', (345, 347), False, 'from refabric.context_managers import sudo\n')] |
from abc import ABCMeta, abstractmethod
from six import with_metaclass
from . import afnbase
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
class AFnFbx(with_metaclass(ABCMeta, afnbase.AFnBase)):
"""
Overload of AFnBase that outlines function set behaviour for DCC fbx operations.
"""
__slots__ = ()
@abstractmethod
def setMeshExportParams(self, **kwargs):
"""
Adopts the export settings from the supplied kwargs.
:rtype: None
"""
pass
@abstractmethod
def setAnimExportParams(self, **kwargs):
"""
Adopts the animation settings from the supplied kwargs.
:rtype: None
"""
pass
@abstractmethod
def exportSelection(self, filePath):
"""
Exports the active selection to the specified file path.
:type filePath: str
:rtype: None
"""
pass
| [
"logging.basicConfig",
"logging.getLogger",
"six.with_metaclass"
] | [((110, 131), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (129, 131), False, 'import logging\n'), ((138, 165), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (155, 165), False, 'import logging\n'), ((208, 248), 'six.with_metaclass', 'with_metaclass', (['ABCMeta', 'afnbase.AFnBase'], {}), '(ABCMeta, afnbase.AFnBase)\n', (222, 248), False, 'from six import with_metaclass\n')] |
# Copyright (C) 2021 <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import time
from openhtf.util import conf
from openhtf_common.plugs import Aardvark, BK1785B, RequiresPlug
conf.declare(
'fixture_psu_current',
default_value=0.50,
description='Current (in amps) to supply to fixture.')
conf.declare(
'fixture_psu_voltage',
default_value=5.0,
description='Voltage (in volts) to supply to fixture.')
conf.declare(
'fixture_settle_s',
default_value=1.0,
description='Time (in seconds) to wait after supplying power to fixture.')
class Fixture(RequiresPlug):
"""Plug that provides access to a PiDP-11 I/O Expander test fixture.
The test fixture contains a simple interposer with discrete logic to
connect or isolate I/O pins over GPIO and is programmed using a Total
Phase Aardvark. Power is controlled using a B&K Precision BK1785B power
supply over serial.
Pin isolation is controlled using the 5V target power pins provided by the
Aardvark. This signal is pulled down, effectively making it an additional
GPIO output.
"""
GPIO_A0 = 1<<2 # AA_GPIO_MISO (Pin 5)
GPIO_A1 = 1<<3 # AA_GPIO_SCK (Pin 7)
GPIO_A2 = 1<<4 # AA_GPIO_MOSI (Pin 8)
GPIO_INT = 1<<5 # AA_GPIO_SS (Pin 9)
GPIO_DIRECTION = GPIO_A0 | GPIO_A1 | GPIO_A2 #| GPIO_INT
@conf.inject_positional_args
def __init__(self, fixture_psu_current, fixture_psu_voltage, fixture_settle_s):
super(Fixture, self).__init__(aardvark=Aardvark, psu=BK1785B)
self.aardvark.set_gpio_direction(Fixture.GPIO_DIRECTION)
self.psu.set_current(fixture_psu_current)
self.psu.set_voltage(fixture_psu_voltage)
self.psu.set_output(True)
self.logger.debug('Waiting for DUT to settle.')
time.sleep(fixture_settle_s)
def connect_pins(self, pin):
# Test probes were connected in reversed order. Rather than rewire the
# test fixture, we account for it here:
reversed_pin = pin ^ 0x7
self.logger.debug('Connecting GP0 and GP1 pin %d.', pin)
mask = sum(map(lambda x: reversed_pin & 1<<x[0] and x[1],
enumerate([Fixture.GPIO_A0, Fixture.GPIO_A1, Fixture.GPIO_A2])))
self.aardvark.gpio_set(mask)
self.aardvark.set_target_power(True)
def isolate_pins(self):
self.logger.debug('Isolating GP0 and GP1 pins.')
self.aardvark.set_target_power(False)
def has_interrupt(self):
self.logger.debug('Checking for interrupt.')
mask = self.aardvark.gpio_get()
return mask & Fixture.GPIO_INT == 0
def wait_for_interrupt(self):
self.logger.debug('Waiting for interrupt.')
mask = self.aardvark.gpio_get()
while True:
if mask & Fixture.GPIO_INT == 0:
break
mask = self.aardvark.gpio_wait()
| [
"openhtf.util.conf.declare",
"time.sleep"
] | [((1414, 1528), 'openhtf.util.conf.declare', 'conf.declare', (['"""fixture_psu_current"""'], {'default_value': '(0.5)', 'description': '"""Current (in amps) to supply to fixture."""'}), "('fixture_psu_current', default_value=0.5, description=\n 'Current (in amps) to supply to fixture.')\n", (1426, 1528), False, 'from openhtf.util import conf\n'), ((1539, 1654), 'openhtf.util.conf.declare', 'conf.declare', (['"""fixture_psu_voltage"""'], {'default_value': '(5.0)', 'description': '"""Voltage (in volts) to supply to fixture."""'}), "('fixture_psu_voltage', default_value=5.0, description=\n 'Voltage (in volts) to supply to fixture.')\n", (1551, 1654), False, 'from openhtf.util import conf\n'), ((1664, 1795), 'openhtf.util.conf.declare', 'conf.declare', (['"""fixture_settle_s"""'], {'default_value': '(1.0)', 'description': '"""Time (in seconds) to wait after supplying power to fixture."""'}), "('fixture_settle_s', default_value=1.0, description=\n 'Time (in seconds) to wait after supplying power to fixture.')\n", (1676, 1795), False, 'from openhtf.util import conf\n'), ((3025, 3053), 'time.sleep', 'time.sleep', (['fixture_settle_s'], {}), '(fixture_settle_s)\n', (3035, 3053), False, 'import time\n')] |
"""Controllers for endpoints of the API"""
from apiflask import APIFlask, Schema, input, output, abort, doc
from apiflask.fields import Float, Integer, String
from marshmallow.exceptions import ValidationError
from werkzeug.wrappers import Response as FlaskResponse
from meteofrenchapi.core.accwea import (
get_vis_prcpt,
get_uvidx,
AwException,
)
from meteofrenchapi import logger
# INPUTS SCHEMAS
class GeolocationParams(Schema):
"""
Schema for Geolocation params
"""
lt = Float(
required=True,
metadata={
"title": "Lt",
"description": "The lt of the geoposition where to get weather information.",
"example": 48.870502,
},
)
lg = Float(
required=True,
metadata={
"title": "Lg",
"description": "The lg of the geoposition where to get weather information.",
"example": 2.304897,
},
)
# OUTPUT SCHEMAS
class ApiInfoResponse(Schema):
"""
Schema for index response
"""
name = String(
metadata={
"title": "Name",
"description": "The name of the API.",
"example": "apiname",
}
)
version = String(
metadata={
"title": "Version",
"description": "The version of the API.",
"example": "0.0.1",
}
)
class PrcptResponse(Schema):
"""
Schema for prcpt response
"""
vis = Float(
metadata={
"title": "Vis",
"description": "The current vis in meters.",
"example": 9700.0,
}
)
prcpt = Float(
metadata={
"title": "Prcpt",
"description": "Amount of prcpt that has fallen in the past hour in meters.",
"example": 0.001,
}
)
class UvResponse(Schema):
"""
Schema for uv response
"""
uvidx = Integer(metadata={"title": "Uvidx", "description": "uvidx", "example": 1})
# ENDPOINTS
def register_endpoints(app: APIFlask) -> None:
"""
function to register endpoints into the given app
"""
@app.get("/")
@doc(tag="Weather", operation_id="getApiInfo")
@output(ApiInfoResponse, description="Successful response. API info")
def index() -> FlaskResponse:
"""
Returns a json containing the generic information about the current API.
"""
response = ApiInfoResponse().load(
{
"name": app.name,
"version": app.config["VERSION"],
}
)
return response
@app.get("/prcpt")
@doc(tag="Weather", operation_id="getPrcpt")
@input(GeolocationParams, location="query")
@output(
PrcptResponse,
description="Successful response. Prcpt information",
)
def get_prcpt(geolocation: GeolocationParams) -> FlaskResponse:
"""
Returns a json containing the vis
and Amount of prcpt for a specific
location defined with lt and lg.
"""
try:
vis, prcpt = get_vis_prcpt(geolocation["lt"], geolocation["lg"])
response = PrcptResponse().load(
{
"vis": vis,
"prcpt": prcpt,
}
)
except (AwException, ValidationError):
abort(500)
return response
@app.get("/uvidx")
@doc(tag="Weather", operation_id="getUv")
@input(GeolocationParams, location="query")
@output(UvResponse, description="Successful response. uvidx information")
def get_uv(geolocation: GeolocationParams) -> FlaskResponse:
"""
Return the current uvidx for a specific
location defined with lt and lg.
"""
try:
uvidx = get_uvidx(geolocation["lt"], geolocation["lg"])
response = UvResponse().load(
{
"uvidx": uvidx,
}
)
except (AwException, ValidationError):
abort(500)
return response
| [
"apiflask.output",
"apiflask.input",
"apiflask.fields.Integer",
"apiflask.abort",
"apiflask.fields.String",
"meteofrenchapi.core.accwea.get_uvidx",
"meteofrenchapi.core.accwea.get_vis_prcpt",
"apiflask.fields.Float",
"apiflask.doc"
] | [((511, 665), 'apiflask.fields.Float', 'Float', ([], {'required': '(True)', 'metadata': "{'title': 'Lt', 'description':\n 'The lt of the geoposition where to get weather information.',\n 'example': 48.870502}"}), "(required=True, metadata={'title': 'Lt', 'description':\n 'The lt of the geoposition where to get weather information.',\n 'example': 48.870502})\n", (516, 665), False, 'from apiflask.fields import Float, Integer, String\n'), ((737, 890), 'apiflask.fields.Float', 'Float', ([], {'required': '(True)', 'metadata': "{'title': 'Lg', 'description':\n 'The lg of the geoposition where to get weather information.',\n 'example': 2.304897}"}), "(required=True, metadata={'title': 'Lg', 'description':\n 'The lg of the geoposition where to get weather information.',\n 'example': 2.304897})\n", (742, 890), False, 'from apiflask.fields import Float, Integer, String\n'), ((1063, 1162), 'apiflask.fields.String', 'String', ([], {'metadata': "{'title': 'Name', 'description': 'The name of the API.', 'example': 'apiname'}"}), "(metadata={'title': 'Name', 'description': 'The name of the API.',\n 'example': 'apiname'})\n", (1069, 1162), False, 'from apiflask.fields import Float, Integer, String\n'), ((1234, 1337), 'apiflask.fields.String', 'String', ([], {'metadata': "{'title': 'Version', 'description': 'The version of the API.', 'example':\n '0.0.1'}"}), "(metadata={'title': 'Version', 'description':\n 'The version of the API.', 'example': '0.0.1'})\n", (1240, 1337), False, 'from apiflask.fields import Float, Integer, String\n'), ((1483, 1583), 'apiflask.fields.Float', 'Float', ([], {'metadata': "{'title': 'Vis', 'description': 'The current vis in meters.', 'example': 9700.0\n }"}), "(metadata={'title': 'Vis', 'description': 'The current vis in meters.',\n 'example': 9700.0})\n", (1488, 1583), False, 'from apiflask.fields import Float, Integer, String\n'), ((1653, 1791), 'apiflask.fields.Float', 'Float', ([], {'metadata': "{'title': 'Prcpt', 'description':\n 'Amount of prcpt that has fallen in the past hour in meters.',\n 'example': 0.001}"}), "(metadata={'title': 'Prcpt', 'description':\n 'Amount of prcpt that has fallen in the past hour in meters.',\n 'example': 0.001})\n", (1658, 1791), False, 'from apiflask.fields import Float, Integer, String\n'), ((1929, 2003), 'apiflask.fields.Integer', 'Integer', ([], {'metadata': "{'title': 'Uvidx', 'description': 'uvidx', 'example': 1}"}), "(metadata={'title': 'Uvidx', 'description': 'uvidx', 'example': 1})\n", (1936, 2003), False, 'from apiflask.fields import Float, Integer, String\n'), ((2161, 2206), 'apiflask.doc', 'doc', ([], {'tag': '"""Weather"""', 'operation_id': '"""getApiInfo"""'}), "(tag='Weather', operation_id='getApiInfo')\n", (2164, 2206), False, 'from apiflask import APIFlask, Schema, input, output, abort, doc\n'), ((2212, 2280), 'apiflask.output', 'output', (['ApiInfoResponse'], {'description': '"""Successful response. API info"""'}), "(ApiInfoResponse, description='Successful response. API info')\n", (2218, 2280), False, 'from apiflask import APIFlask, Schema, input, output, abort, doc\n'), ((2638, 2681), 'apiflask.doc', 'doc', ([], {'tag': '"""Weather"""', 'operation_id': '"""getPrcpt"""'}), "(tag='Weather', operation_id='getPrcpt')\n", (2641, 2681), False, 'from apiflask import APIFlask, Schema, input, output, abort, doc\n'), ((2687, 2729), 'apiflask.input', 'input', (['GeolocationParams'], {'location': '"""query"""'}), "(GeolocationParams, location='query')\n", (2692, 2729), False, 'from apiflask import APIFlask, Schema, input, output, abort, doc\n'), ((2735, 2810), 'apiflask.output', 'output', (['PrcptResponse'], {'description': '"""Successful response. Prcpt information"""'}), "(PrcptResponse, description='Successful response. Prcpt information')\n", (2741, 2810), False, 'from apiflask import APIFlask, Schema, input, output, abort, doc\n'), ((3428, 3468), 'apiflask.doc', 'doc', ([], {'tag': '"""Weather"""', 'operation_id': '"""getUv"""'}), "(tag='Weather', operation_id='getUv')\n", (3431, 3468), False, 'from apiflask import APIFlask, Schema, input, output, abort, doc\n'), ((3474, 3516), 'apiflask.input', 'input', (['GeolocationParams'], {'location': '"""query"""'}), "(GeolocationParams, location='query')\n", (3479, 3516), False, 'from apiflask import APIFlask, Schema, input, output, abort, doc\n'), ((3522, 3594), 'apiflask.output', 'output', (['UvResponse'], {'description': '"""Successful response. uvidx information"""'}), "(UvResponse, description='Successful response. uvidx information')\n", (3528, 3594), False, 'from apiflask import APIFlask, Schema, input, output, abort, doc\n'), ((3090, 3141), 'meteofrenchapi.core.accwea.get_vis_prcpt', 'get_vis_prcpt', (["geolocation['lt']", "geolocation['lg']"], {}), "(geolocation['lt'], geolocation['lg'])\n", (3103, 3141), False, 'from meteofrenchapi.core.accwea import get_vis_prcpt, get_uvidx, AwException\n'), ((3806, 3853), 'meteofrenchapi.core.accwea.get_uvidx', 'get_uvidx', (["geolocation['lt']", "geolocation['lg']"], {}), "(geolocation['lt'], geolocation['lg'])\n", (3815, 3853), False, 'from meteofrenchapi.core.accwea import get_vis_prcpt, get_uvidx, AwException\n'), ((3364, 3374), 'apiflask.abort', 'abort', (['(500)'], {}), '(500)\n', (3369, 3374), False, 'from apiflask import APIFlask, Schema, input, output, abort, doc\n'), ((4041, 4051), 'apiflask.abort', 'abort', (['(500)'], {}), '(500)\n', (4046, 4051), False, 'from apiflask import APIFlask, Schema, input, output, abort, doc\n')] |
import uuid
import pytest
import sirius_sdk
from sirius_sdk.agent.wallet.abstract.non_secrets import RetrieveRecordOptions
from .helpers import ServerTestSuite
@pytest.mark.asyncio
async def test_record_value_ops(test_suite: ServerTestSuite):
params = test_suite.get_agent_params('agent4')
async with sirius_sdk.context(params['server_address'], params['credentials'], params['p2p']):
value = 'my-value'
my_id = 'my-id-' + uuid.uuid4().hex
await sirius_sdk.NonSecrets.add_wallet_record('type', my_id, value)
opts = RetrieveRecordOptions()
opts.check_all()
value_info = await sirius_sdk.NonSecrets.get_wallet_record('type', my_id, opts)
assert value_info['id'] == my_id
assert value_info['tags'] == {}
assert value_info['value'] == value
assert value_info['type'] == 'type'
value_new = 'my-new-value'
await sirius_sdk.NonSecrets.update_wallet_record_value('type', my_id, value_new)
value_info = await sirius_sdk.NonSecrets.get_wallet_record('type', my_id, opts)
assert value_info['value'] == value_new
await sirius_sdk.NonSecrets.delete_wallet_record('type', my_id)
@pytest.mark.asyncio
async def test_record_tags_ops(test_suite: ServerTestSuite):
params = test_suite.get_agent_params('agent4')
async with sirius_sdk.context(params['server_address'], params['credentials'], params['p2p']):
my_id = 'my-id-' + uuid.uuid4().hex
value = 'my-value'
tags = {
'tag1': 'val1',
'~tag2': 'val2'
}
await sirius_sdk.NonSecrets.add_wallet_record('type', my_id, value, tags)
opts = RetrieveRecordOptions()
opts.check_all()
value_info = await sirius_sdk.NonSecrets.get_wallet_record('type', my_id, opts)
assert value_info['id'] == my_id
assert value_info['tags'] == tags
assert value_info['value'] == value
assert value_info['type'] == 'type'
upd_tags = {
'ext-tag': 'val3'
}
await sirius_sdk.NonSecrets.update_wallet_record_tags('type', my_id, upd_tags)
value_info = await sirius_sdk.NonSecrets.get_wallet_record('type', my_id, opts)
assert value_info['tags'] == upd_tags
await sirius_sdk.NonSecrets.add_wallet_record_tags('type', my_id, tags)
value_info = await sirius_sdk.NonSecrets.get_wallet_record('type', my_id, opts)
assert value_info['tags'] == dict(**upd_tags, **tags)
await sirius_sdk.NonSecrets.delete_wallet_record_tags('type', my_id, ['ext-tag'])
value_info = await sirius_sdk.NonSecrets.get_wallet_record('type', my_id, opts)
assert value_info['tags'] == tags
@pytest.mark.asyncio
async def test_maintain_tags_only_update_ops(test_suite: ServerTestSuite):
params = test_suite.get_agent_params('agent4')
async with sirius_sdk.context(params['server_address'], params['credentials'], params['p2p']):
my_id = 'my-id-' + uuid.uuid4().hex
value = 'my-value'
await sirius_sdk.NonSecrets.add_wallet_record('type', my_id, value, )
opts = RetrieveRecordOptions()
opts.check_all()
value_info = await sirius_sdk.NonSecrets.get_wallet_record('type', my_id, opts)
assert value_info['id'] == my_id
assert value_info['tags'] == {}
assert value_info['value'] == value
assert value_info['type'] == 'type'
tags1 = {
'tag1': 'val1',
'~tag2': 'val2'
}
await sirius_sdk.NonSecrets.update_wallet_record_tags('type', my_id, tags1)
value_info = await sirius_sdk.NonSecrets.get_wallet_record('type', my_id, opts)
assert value_info['tags'] == tags1
tags2 = {
'tag3': 'val3',
}
await sirius_sdk.NonSecrets.update_wallet_record_tags('type', my_id, tags2)
value_info = await sirius_sdk.NonSecrets.get_wallet_record('type', my_id, opts)
assert value_info['tags'] == tags2
@pytest.mark.asyncio
async def test_wallet_search_sqlite(test_suite: ServerTestSuite):
params = test_suite.get_agent_params('agent4')
async with sirius_sdk.context(params['server_address'], params['credentials'], params['p2p']):
my_id1 = 'my-id-' + uuid.uuid4().hex
my_id2 = 'my-id-' + uuid.uuid4().hex
type_ = 'type_' + uuid.uuid4().hex
opts = RetrieveRecordOptions()
opts.check_all()
tags1 = {
'tag1': 'val1',
'~tag2': '5',
'marker': 'A'
}
tags2 = {
'tag3': 'val3',
'~tag4': '6',
'marker': 'B'
}
await sirius_sdk.NonSecrets.add_wallet_record(type_, my_id1, 'value1', tags1)
await sirius_sdk.NonSecrets.add_wallet_record(type_, my_id2, 'value2', tags2)
query = {
"tag1": "val1"
}
records, total = await sirius_sdk.NonSecrets.wallet_search(type_, query, opts)
assert total == 1
assert 'value1' in str(records)
query = {
"$or": [{"tag1": "val1"}, {"~tag4": "6"}]
}
records, total = await sirius_sdk.NonSecrets.wallet_search(type_, query, opts)
assert len(records) == 1
assert total == 2
records, total = await sirius_sdk.NonSecrets.wallet_search(type_, query, opts, limit=1000)
assert len(records) == 2
assert total == 2
query = {
"marker": {"$in": ["A", "C"]}
}
records, total = await sirius_sdk.NonSecrets.wallet_search(type_, query, opts)
assert 1 == total
| [
"sirius_sdk.NonSecrets.update_wallet_record_value",
"sirius_sdk.NonSecrets.wallet_search",
"sirius_sdk.NonSecrets.delete_wallet_record_tags",
"sirius_sdk.NonSecrets.add_wallet_record",
"sirius_sdk.context",
"uuid.uuid4",
"sirius_sdk.NonSecrets.add_wallet_record_tags",
"sirius_sdk.agent.wallet.abstract.non_secrets.RetrieveRecordOptions",
"sirius_sdk.NonSecrets.get_wallet_record",
"sirius_sdk.NonSecrets.delete_wallet_record",
"sirius_sdk.NonSecrets.update_wallet_record_tags"
] | [((313, 400), 'sirius_sdk.context', 'sirius_sdk.context', (["params['server_address']", "params['credentials']", "params['p2p']"], {}), "(params['server_address'], params['credentials'], params[\n 'p2p'])\n", (331, 400), False, 'import sirius_sdk\n'), ((559, 582), 'sirius_sdk.agent.wallet.abstract.non_secrets.RetrieveRecordOptions', 'RetrieveRecordOptions', ([], {}), '()\n', (580, 582), False, 'from sirius_sdk.agent.wallet.abstract.non_secrets import RetrieveRecordOptions\n'), ((1348, 1435), 'sirius_sdk.context', 'sirius_sdk.context', (["params['server_address']", "params['credentials']", "params['p2p']"], {}), "(params['server_address'], params['credentials'], params[\n 'p2p'])\n", (1366, 1435), False, 'import sirius_sdk\n'), ((1684, 1707), 'sirius_sdk.agent.wallet.abstract.non_secrets.RetrieveRecordOptions', 'RetrieveRecordOptions', ([], {}), '()\n', (1705, 1707), False, 'from sirius_sdk.agent.wallet.abstract.non_secrets import RetrieveRecordOptions\n'), ((2891, 2978), 'sirius_sdk.context', 'sirius_sdk.context', (["params['server_address']", "params['credentials']", "params['p2p']"], {}), "(params['server_address'], params['credentials'], params[\n 'p2p'])\n", (2909, 2978), False, 'import sirius_sdk\n'), ((3139, 3162), 'sirius_sdk.agent.wallet.abstract.non_secrets.RetrieveRecordOptions', 'RetrieveRecordOptions', ([], {}), '()\n', (3160, 3162), False, 'from sirius_sdk.agent.wallet.abstract.non_secrets import RetrieveRecordOptions\n'), ((4173, 4260), 'sirius_sdk.context', 'sirius_sdk.context', (["params['server_address']", "params['credentials']", "params['p2p']"], {}), "(params['server_address'], params['credentials'], params[\n 'p2p'])\n", (4191, 4260), False, 'import sirius_sdk\n'), ((4405, 4428), 'sirius_sdk.agent.wallet.abstract.non_secrets.RetrieveRecordOptions', 'RetrieveRecordOptions', ([], {}), '()\n', (4426, 4428), False, 'from sirius_sdk.agent.wallet.abstract.non_secrets import RetrieveRecordOptions\n'), ((482, 543), 'sirius_sdk.NonSecrets.add_wallet_record', 'sirius_sdk.NonSecrets.add_wallet_record', (['"""type"""', 'my_id', 'value'], {}), "('type', my_id, value)\n", (521, 543), False, 'import sirius_sdk\n'), ((635, 695), 'sirius_sdk.NonSecrets.get_wallet_record', 'sirius_sdk.NonSecrets.get_wallet_record', (['"""type"""', 'my_id', 'opts'], {}), "('type', my_id, opts)\n", (674, 695), False, 'import sirius_sdk\n'), ((915, 989), 'sirius_sdk.NonSecrets.update_wallet_record_value', 'sirius_sdk.NonSecrets.update_wallet_record_value', (['"""type"""', 'my_id', 'value_new'], {}), "('type', my_id, value_new)\n", (963, 989), False, 'import sirius_sdk\n'), ((1017, 1077), 'sirius_sdk.NonSecrets.get_wallet_record', 'sirius_sdk.NonSecrets.get_wallet_record', (['"""type"""', 'my_id', 'opts'], {}), "('type', my_id, opts)\n", (1056, 1077), False, 'import sirius_sdk\n'), ((1140, 1197), 'sirius_sdk.NonSecrets.delete_wallet_record', 'sirius_sdk.NonSecrets.delete_wallet_record', (['"""type"""', 'my_id'], {}), "('type', my_id)\n", (1182, 1197), False, 'import sirius_sdk\n'), ((1601, 1668), 'sirius_sdk.NonSecrets.add_wallet_record', 'sirius_sdk.NonSecrets.add_wallet_record', (['"""type"""', 'my_id', 'value', 'tags'], {}), "('type', my_id, value, tags)\n", (1640, 1668), False, 'import sirius_sdk\n'), ((1760, 1820), 'sirius_sdk.NonSecrets.get_wallet_record', 'sirius_sdk.NonSecrets.get_wallet_record', (['"""type"""', 'my_id', 'opts'], {}), "('type', my_id, opts)\n", (1799, 1820), False, 'import sirius_sdk\n'), ((2068, 2140), 'sirius_sdk.NonSecrets.update_wallet_record_tags', 'sirius_sdk.NonSecrets.update_wallet_record_tags', (['"""type"""', 'my_id', 'upd_tags'], {}), "('type', my_id, upd_tags)\n", (2115, 2140), False, 'import sirius_sdk\n'), ((2168, 2228), 'sirius_sdk.NonSecrets.get_wallet_record', 'sirius_sdk.NonSecrets.get_wallet_record', (['"""type"""', 'my_id', 'opts'], {}), "('type', my_id, opts)\n", (2207, 2228), False, 'import sirius_sdk\n'), ((2290, 2355), 'sirius_sdk.NonSecrets.add_wallet_record_tags', 'sirius_sdk.NonSecrets.add_wallet_record_tags', (['"""type"""', 'my_id', 'tags'], {}), "('type', my_id, tags)\n", (2334, 2355), False, 'import sirius_sdk\n'), ((2383, 2443), 'sirius_sdk.NonSecrets.get_wallet_record', 'sirius_sdk.NonSecrets.get_wallet_record', (['"""type"""', 'my_id', 'opts'], {}), "('type', my_id, opts)\n", (2422, 2443), False, 'import sirius_sdk\n'), ((2521, 2596), 'sirius_sdk.NonSecrets.delete_wallet_record_tags', 'sirius_sdk.NonSecrets.delete_wallet_record_tags', (['"""type"""', 'my_id', "['ext-tag']"], {}), "('type', my_id, ['ext-tag'])\n", (2568, 2596), False, 'import sirius_sdk\n'), ((2624, 2684), 'sirius_sdk.NonSecrets.get_wallet_record', 'sirius_sdk.NonSecrets.get_wallet_record', (['"""type"""', 'my_id', 'opts'], {}), "('type', my_id, opts)\n", (2663, 2684), False, 'import sirius_sdk\n'), ((3060, 3121), 'sirius_sdk.NonSecrets.add_wallet_record', 'sirius_sdk.NonSecrets.add_wallet_record', (['"""type"""', 'my_id', 'value'], {}), "('type', my_id, value)\n", (3099, 3121), False, 'import sirius_sdk\n'), ((3215, 3275), 'sirius_sdk.NonSecrets.get_wallet_record', 'sirius_sdk.NonSecrets.get_wallet_record', (['"""type"""', 'my_id', 'opts'], {}), "('type', my_id, opts)\n", (3254, 3275), False, 'import sirius_sdk\n'), ((3545, 3614), 'sirius_sdk.NonSecrets.update_wallet_record_tags', 'sirius_sdk.NonSecrets.update_wallet_record_tags', (['"""type"""', 'my_id', 'tags1'], {}), "('type', my_id, tags1)\n", (3592, 3614), False, 'import sirius_sdk\n'), ((3642, 3702), 'sirius_sdk.NonSecrets.get_wallet_record', 'sirius_sdk.NonSecrets.get_wallet_record', (['"""type"""', 'my_id', 'opts'], {}), "('type', my_id, opts)\n", (3681, 3702), False, 'import sirius_sdk\n'), ((3817, 3886), 'sirius_sdk.NonSecrets.update_wallet_record_tags', 'sirius_sdk.NonSecrets.update_wallet_record_tags', (['"""type"""', 'my_id', 'tags2'], {}), "('type', my_id, tags2)\n", (3864, 3886), False, 'import sirius_sdk\n'), ((3914, 3974), 'sirius_sdk.NonSecrets.get_wallet_record', 'sirius_sdk.NonSecrets.get_wallet_record', (['"""type"""', 'my_id', 'opts'], {}), "('type', my_id, opts)\n", (3953, 3974), False, 'import sirius_sdk\n'), ((4684, 4755), 'sirius_sdk.NonSecrets.add_wallet_record', 'sirius_sdk.NonSecrets.add_wallet_record', (['type_', 'my_id1', '"""value1"""', 'tags1'], {}), "(type_, my_id1, 'value1', tags1)\n", (4723, 4755), False, 'import sirius_sdk\n'), ((4770, 4841), 'sirius_sdk.NonSecrets.add_wallet_record', 'sirius_sdk.NonSecrets.add_wallet_record', (['type_', 'my_id2', '"""value2"""', 'tags2'], {}), "(type_, my_id2, 'value2', tags2)\n", (4809, 4841), False, 'import sirius_sdk\n'), ((4929, 4984), 'sirius_sdk.NonSecrets.wallet_search', 'sirius_sdk.NonSecrets.wallet_search', (['type_', 'query', 'opts'], {}), '(type_, query, opts)\n', (4964, 4984), False, 'import sirius_sdk\n'), ((5165, 5220), 'sirius_sdk.NonSecrets.wallet_search', 'sirius_sdk.NonSecrets.wallet_search', (['type_', 'query', 'opts'], {}), '(type_, query, opts)\n', (5200, 5220), False, 'import sirius_sdk\n'), ((5312, 5379), 'sirius_sdk.NonSecrets.wallet_search', 'sirius_sdk.NonSecrets.wallet_search', (['type_', 'query', 'opts'], {'limit': '(1000)'}), '(type_, query, opts, limit=1000)\n', (5347, 5379), False, 'import sirius_sdk\n'), ((5541, 5596), 'sirius_sdk.NonSecrets.wallet_search', 'sirius_sdk.NonSecrets.wallet_search', (['type_', 'query', 'opts'], {}), '(type_, query, opts)\n', (5576, 5596), False, 'import sirius_sdk\n'), ((451, 463), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (461, 463), False, 'import uuid\n'), ((1459, 1471), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1469, 1471), False, 'import uuid\n'), ((3002, 3014), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3012, 3014), False, 'import uuid\n'), ((4285, 4297), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4295, 4297), False, 'import uuid\n'), ((4330, 4342), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4340, 4342), False, 'import uuid\n'), ((4373, 4385), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4383, 4385), False, 'import uuid\n')] |
from unicodedata import name
from . import views
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('',views.Home,name='home'),
path('confess/',views.confess,name='confess'),
]
| [
"django.urls.path"
] | [((133, 166), 'django.urls.path', 'path', (['""""""', 'views.Home'], {'name': '"""home"""'}), "('', views.Home, name='home')\n", (137, 166), False, 'from django.urls import path\n'), ((170, 217), 'django.urls.path', 'path', (['"""confess/"""', 'views.confess'], {'name': '"""confess"""'}), "('confess/', views.confess, name='confess')\n", (174, 217), False, 'from django.urls import path\n')] |
import numpy as np
from enum import Enum
class StepType(Enum):
HEAVISIDE = 1
class Perceptron:
def __init__(self, max_iter=1000, n_iter_no_change=5, verbose=False, learning_rate=1, initial_bias=1):
self.max_iter = max_iter
self.n_iter_no_change = n_iter_no_change
self.verbose = verbose
self.learning_rate = learning_rate
self.initial_bias = initial_bias
def step(self, value, type=StepType.HEAVISIDE):
if type == StepType.HEAVISIDE:
return 1.0 if (value >= 0) else 0.0
else:
return value
# train the weights for the perceptron using a
# basic implementation of stochastic gradient descent
def train(self, features, labels):
# set weights to zero with additional feature for bias
w = [0.0 for i in range(features.shape[1]+1)]
w[0] = self.initial_bias
w_delta = np.array([w])
misclassified_ = []
n_iter = 0
iter_no_change = 0
if self.learning_rate == 1 and self.initial_bias == 0:
return self.train_simple
for epoch in range(self.max_iter):
n_iter += 1
misclassified = 0
sum_error = 0.0
for x, label in zip(features, labels):
prediction = self.predict(x, w)
error = (label - prediction)
sum_error += error**2
w[0] += (self.learning_rate * error)
if(error): # misclassified
misclassified += 1
w[1:] += (self.learning_rate * error * x)
w_delta = np.append(w_delta, [w], axis=0)
misclassified_.append(misclassified)
if self.verbose:
print('>epoch=%d, learning rate=%.3f, error=%.3f' %
(epoch, self.learning_rate, sum_error))
if misclassified == 0:
iter_no_change += 1
if iter_no_change >= self.n_iter_no_change:
break
epochs = [i+1 for i in range(n_iter)]
return (w, misclassified_, epochs, w_delta)
# pretty much the same thing as train
# when learning rate is 1 and initial bias is 0
# keeping it around to see a different way to do the math
def train_simple(self, features, labels):
# set weights to zero with additional
# feature for bias that always outputs 1
w = np.zeros(shape=(1, features.shape[1]+1))
misclassified_ = []
n_iter = 0
iter_no_change = 0
for epoch in range(self.max_iter):
n_iter += 1
misclassified = 0
for x, label in zip(features, labels):
x = np.insert(x, 0, 1)
z = np.dot(w, x.transpose())
target = self.step(z)
delta = (label - target)
if(delta): # misclassified
misclassified += 1
w += (delta * x)
misclassified_.append(misclassified)
if misclassified == 0:
iter_no_change += 1
if iter_no_change >= self.n_iter_no_change:
break
epochs = np.arange(1, n_iter+1)
return (w[0], misclassified_, epochs)
# predict on one or more rows given the weights
def predict(self, features, weights):
f_shape = features.shape
len_f_shape = len(f_shape)
len_w = len(weights)
bias = weights[0]
if len_f_shape == 1 and f_shape[0] == len_w - 1:
activation = np.dot(weights[1:], features.transpose()) + bias
return self.step(activation)
elif len_f_shape == 2 and f_shape[1] == len_w - 1:
predictions = []
for i in range(f_shape[0]):
predictions.append(self.predict(features[i], weights))
return predictions
# run predictions on all your data and score your results
def score(self, features, labels, weights):
total = len(labels)
n_correct = 0.0
predictions = self.predict(features, weights)
for prediction, label in zip(predictions, labels):
n_correct += 1.0 if prediction == label else 0.0
# if self.verbose:
# print("Expected=%d, Predicted=%d" % (label, prediction))
return (n_correct/total)
| [
"numpy.insert",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.arange"
] | [((904, 917), 'numpy.array', 'np.array', (['[w]'], {}), '([w])\n', (912, 917), True, 'import numpy as np\n'), ((2436, 2478), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, features.shape[1] + 1)'}), '(shape=(1, features.shape[1] + 1))\n', (2444, 2478), True, 'import numpy as np\n'), ((3205, 3229), 'numpy.arange', 'np.arange', (['(1)', '(n_iter + 1)'], {}), '(1, n_iter + 1)\n', (3214, 3229), True, 'import numpy as np\n'), ((2721, 2739), 'numpy.insert', 'np.insert', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (2730, 2739), True, 'import numpy as np\n'), ((1633, 1664), 'numpy.append', 'np.append', (['w_delta', '[w]'], {'axis': '(0)'}), '(w_delta, [w], axis=0)\n', (1642, 1664), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import sys
import os.path as osp
import torch
import numpy as np
import cv2
import argparse
import json
import pickle
import smplx
from datetime import datetime
from demo.demo_options import DemoOptions
from bodymocap.body_mocap_api import BodyMocap
import mocap_utils.demo_utils as demo_utils
import mocap_utils.general_utils as gnu
from bodymocap.models import SMPL, SMPLX
from handmocap.hand_modules.h3dw_model import extract_hand_output
from mocap_utils.coordconv import convert_smpl_to_bbox, convert_bbox_to_oriIm
def __get_data_type(pkl_files):
for pkl_file in pkl_files:
saved_data = gnu.load_pkl(pkl_file)
return saved_data['demo_type'], saved_data['smpl_type']
def __get_smpl_model(demo_type, smpl_type):
smplx_model_path = './extra_data/smpl/SMPLX_NEUTRAL.pkl'
smpl_model_path = './extra_data/smpl//basicModel_neutral_lbs_10_207_0_v1.0.0.pkl'
if demo_type == 'hand':
# use original smpl-x
smpl = smplx.create(
smplx_model_path,
model_type = "smplx",
batch_size = 1,
gender = 'neutral',
num_betas = 10,
use_pca = False,
ext='pkl'
)
else:
if smpl_type == 'smplx':
# use modified smpl-x from body module
smpl = SMPLX(
smplx_model_path,
batch_size=1,
num_betas = 10,
use_pca = False,
create_transl=False)
else:
# use modified smpl from body module
assert smpl_type == 'smpl'
smpl = SMPL(
smpl_model_path,
batch_size=1,
create_transl=False)
return smpl
def __calc_hand_mesh(hand_type, pose_params, betas, smplx_model):
hand_rotation = pose_params[:, :3]
hand_pose = pose_params[:, 3:]
body_pose = torch.zeros((1, 63)).float()
assert hand_type in ['left_hand', 'right_hand']
if hand_type == 'right_hand':
body_pose[:, 60:] = hand_rotation # set right hand rotation
right_hand_pose = hand_pose
left_hand_pose = torch.zeros((1, 45), dtype=torch.float32)
else:
body_pose[:, 57:60] = hand_rotation # set right hand rotation
left_hand_pose = hand_pose
right_hand_pose = torch.zeros((1, 45), dtype=torch.float32)
output = smplx_model(
global_orient = torch.zeros((1,3)),
body_pose = body_pose,
betas = betas,
left_hand_pose = left_hand_pose,
right_hand_pose = right_hand_pose,
return_verts = True)
hand_info_file = "extra_data/hand_module/SMPLX_HAND_INFO.pkl"
hand_info = gnu.load_pkl(hand_info_file)
hand_output = extract_hand_output(
output,
hand_type = hand_type.split("_")[0],
hand_info = hand_info,
top_finger_joints_type = 'ave',
use_cuda = False)
pred_verts = hand_output['hand_vertices_shift'].detach().numpy()
faces = hand_info[f'{hand_type}_faces_local']
return pred_verts[0], faces
def _calc_body_mesh(smpl_type, smpl_model, body_pose, betas,
left_hand_pose, right_hand_pose):
if smpl_type == 'smpl':
smpl_output = smpl_model(
global_orient = body_pose[:, :3],
body_pose = body_pose[:, 3:],
betas = betas,
)
else:
smpl_output = smpl_model(
global_orient = body_pose[:, :3],
body_pose = body_pose[:, 3:],
betas = betas,
left_hand_pose = left_hand_pose,
right_hand_pose = right_hand_pose,
)
vertices = smpl_output.vertices.detach().cpu().numpy()[0]
faces = smpl_model.faces
return vertices, faces
def __calc_mesh(demo_type, smpl_type, smpl_model, img_shape, pred_output_list):
for pred_output in pred_output_list:
if pred_output is not None:
# hand
if demo_type == 'hand':
assert 'left_hand' in pred_output and 'right_hand' in pred_output
for hand_type in pred_output:
hand_pred = pred_output[hand_type]
if hand_pred is not None:
pose_params = torch.from_numpy(hand_pred['pred_hand_pose'])
betas = torch.from_numpy(hand_pred['pred_hand_betas'])
pred_verts, hand_faces = __calc_hand_mesh(hand_type, pose_params, betas, smpl_model)
hand_pred['pred_vertices_smpl'] = pred_verts
cam_scale = hand_pred['pred_camera'][0]
cam_trans = hand_pred['pred_camera'][1:]
vert_bboxcoord = convert_smpl_to_bbox(
pred_verts, cam_scale, cam_trans, bAppTransFirst=True) # SMPL space -> bbox space
bbox_scale_ratio = hand_pred['bbox_scale_ratio']
bbox_top_left = hand_pred['bbox_top_left']
vert_imgcoord = convert_bbox_to_oriIm(
vert_bboxcoord, bbox_scale_ratio, bbox_top_left,
img_shape[1], img_shape[0])
pred_output[hand_type]['pred_vertices_img'] = vert_imgcoord
# body
else:
pose_params = torch.from_numpy(pred_output['pred_body_pose'])
betas = torch.from_numpy(pred_output['pred_betas'])
if 'pred_right_hand_pose' in pred_output:
pred_right_hand_pose = torch.from_numpy(pred_output['pred_right_hand_pose'])
else:
pred_right_hand_pose = torch.zeros((1, 45), dtype=torch.float32)
if 'pred_left_hand_pose' in pred_output:
pred_left_hand_pose = torch.from_numpy(pred_output['pred_left_hand_pose'])
else:
pred_left_hand_pose = torch.zeros((1, 45), dtype=torch.float32)
pred_verts, faces = _calc_body_mesh(
smpl_type, smpl_model, pose_params, betas, pred_left_hand_pose, pred_right_hand_pose)
pred_output['pred_vertices_smpl'] = pred_verts
pred_output['faces'] = faces
cam_scale = pred_output['pred_camera'][0]
cam_trans = pred_output['pred_camera'][1:]
vert_bboxcoord = convert_smpl_to_bbox(
pred_verts, cam_scale, cam_trans, bAppTransFirst=False) # SMPL space -> bbox space
bbox_scale_ratio = pred_output['bbox_scale_ratio']
bbox_top_left = pred_output['bbox_top_left']
vert_imgcoord = convert_bbox_to_oriIm(
vert_bboxcoord, bbox_scale_ratio, bbox_top_left,
img_shape[1], img_shape[0])
pred_output['pred_vertices_img'] = vert_imgcoord
def visualize_prediction(args, demo_type, smpl_type, smpl_model, pkl_files, visualizer):
for pkl_file in pkl_files:
# load data
saved_data = gnu.load_pkl(pkl_file)
image_path = saved_data['image_path']
img_original_bgr = cv2.imread(image_path)
if img_original_bgr is None:
print(f"{image_path} does not exists, skip")
print("--------------------------------------")
demo_type = saved_data['demo_type']
assert saved_data['smpl_type'] == smpl_type
hand_bbox_list = saved_data['hand_bbox_list']
body_bbox_list = saved_data['body_bbox_list']
pred_output_list = saved_data['pred_output_list']
if not saved_data['save_mesh']:
__calc_mesh(
demo_type, smpl_type, smpl_model, img_original_bgr.shape[:2], pred_output_list)
else:
pass
pred_mesh_list = demo_utils.extract_mesh_from_output(pred_output_list)
# visualization
res_img = visualizer.visualize(
img_original_bgr,
pred_mesh_list = pred_mesh_list,
body_bbox_list = body_bbox_list,
hand_bbox_list = hand_bbox_list)
# save result image
demo_utils.save_res_img(args.out_dir, image_path, res_img)
# save predictions to pkl
if args.save_pred_pkl:
args.use_smplx = smpl_type == 'smplx'
demo_utils.save_pred_to_pkl(
args, demo_type, image_path, body_bbox_list, hand_bbox_list, pred_output_list)
def main():
args = DemoOptions().parse()
# load pkl files
pkl_files = gnu.get_all_files(args.pkl_dir, ".pkl", "full")
# get smpl type
demo_type, smpl_type = __get_data_type(pkl_files)
# get smpl model
smpl_model = __get_smpl_model(demo_type, smpl_type)
# Set Visualizer
assert args.renderer_type in ['pytorch3d', 'opendr'], \
f"{args.renderer_type} not implemented yet."
from renderer.screen_free_visualizer import Visualizer
visualizer = Visualizer(args.renderer_type)
# load smpl model
visualize_prediction(args, demo_type, smpl_type, smpl_model, pkl_files, visualizer)
if __name__ == '__main__':
main() | [
"mocap_utils.general_utils.get_all_files",
"bodymocap.models.SMPL",
"mocap_utils.coordconv.convert_bbox_to_oriIm",
"demo.demo_options.DemoOptions",
"smplx.create",
"bodymocap.models.SMPLX",
"torch.from_numpy",
"mocap_utils.coordconv.convert_smpl_to_bbox",
"renderer.screen_free_visualizer.Visualizer",
"mocap_utils.demo_utils.extract_mesh_from_output",
"mocap_utils.demo_utils.save_pred_to_pkl",
"mocap_utils.demo_utils.save_res_img",
"mocap_utils.general_utils.load_pkl",
"torch.zeros",
"cv2.imread"
] | [((2739, 2767), 'mocap_utils.general_utils.load_pkl', 'gnu.load_pkl', (['hand_info_file'], {}), '(hand_info_file)\n', (2751, 2767), True, 'import mocap_utils.general_utils as gnu\n'), ((8587, 8634), 'mocap_utils.general_utils.get_all_files', 'gnu.get_all_files', (['args.pkl_dir', '""".pkl"""', '"""full"""'], {}), "(args.pkl_dir, '.pkl', 'full')\n", (8604, 8634), True, 'import mocap_utils.general_utils as gnu\n'), ((8998, 9028), 'renderer.screen_free_visualizer.Visualizer', 'Visualizer', (['args.renderer_type'], {}), '(args.renderer_type)\n', (9008, 9028), False, 'from renderer.screen_free_visualizer import Visualizer\n'), ((668, 690), 'mocap_utils.general_utils.load_pkl', 'gnu.load_pkl', (['pkl_file'], {}), '(pkl_file)\n', (680, 690), True, 'import mocap_utils.general_utils as gnu\n'), ((1022, 1149), 'smplx.create', 'smplx.create', (['smplx_model_path'], {'model_type': '"""smplx"""', 'batch_size': '(1)', 'gender': '"""neutral"""', 'num_betas': '(10)', 'use_pca': '(False)', 'ext': '"""pkl"""'}), "(smplx_model_path, model_type='smplx', batch_size=1, gender=\n 'neutral', num_betas=10, use_pca=False, ext='pkl')\n", (1034, 1149), False, 'import smplx\n'), ((2189, 2230), 'torch.zeros', 'torch.zeros', (['(1, 45)'], {'dtype': 'torch.float32'}), '((1, 45), dtype=torch.float32)\n', (2200, 2230), False, 'import torch\n'), ((2372, 2413), 'torch.zeros', 'torch.zeros', (['(1, 45)'], {'dtype': 'torch.float32'}), '((1, 45), dtype=torch.float32)\n', (2383, 2413), False, 'import torch\n'), ((7093, 7115), 'mocap_utils.general_utils.load_pkl', 'gnu.load_pkl', (['pkl_file'], {}), '(pkl_file)\n', (7105, 7115), True, 'import mocap_utils.general_utils as gnu\n'), ((7198, 7220), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (7208, 7220), False, 'import cv2\n'), ((7871, 7924), 'mocap_utils.demo_utils.extract_mesh_from_output', 'demo_utils.extract_mesh_from_output', (['pred_output_list'], {}), '(pred_output_list)\n', (7906, 7924), True, 'import mocap_utils.demo_utils as demo_utils\n'), ((8191, 8249), 'mocap_utils.demo_utils.save_res_img', 'demo_utils.save_res_img', (['args.out_dir', 'image_path', 'res_img'], {}), '(args.out_dir, image_path, res_img)\n', (8214, 8249), True, 'import mocap_utils.demo_utils as demo_utils\n'), ((1364, 1455), 'bodymocap.models.SMPLX', 'SMPLX', (['smplx_model_path'], {'batch_size': '(1)', 'num_betas': '(10)', 'use_pca': '(False)', 'create_transl': '(False)'}), '(smplx_model_path, batch_size=1, num_betas=10, use_pca=False,\n create_transl=False)\n', (1369, 1455), False, 'from bodymocap.models import SMPL, SMPLX\n'), ((1658, 1714), 'bodymocap.models.SMPL', 'SMPL', (['smpl_model_path'], {'batch_size': '(1)', 'create_transl': '(False)'}), '(smpl_model_path, batch_size=1, create_transl=False)\n', (1662, 1714), False, 'from bodymocap.models import SMPL, SMPLX\n'), ((1944, 1964), 'torch.zeros', 'torch.zeros', (['(1, 63)'], {}), '((1, 63))\n', (1955, 1964), False, 'import torch\n'), ((2465, 2484), 'torch.zeros', 'torch.zeros', (['(1, 3)'], {}), '((1, 3))\n', (2476, 2484), False, 'import torch\n'), ((8378, 8488), 'mocap_utils.demo_utils.save_pred_to_pkl', 'demo_utils.save_pred_to_pkl', (['args', 'demo_type', 'image_path', 'body_bbox_list', 'hand_bbox_list', 'pred_output_list'], {}), '(args, demo_type, image_path, body_bbox_list,\n hand_bbox_list, pred_output_list)\n', (8405, 8488), True, 'import mocap_utils.demo_utils as demo_utils\n'), ((8527, 8540), 'demo.demo_options.DemoOptions', 'DemoOptions', ([], {}), '()\n', (8538, 8540), False, 'from demo.demo_options import DemoOptions\n'), ((5375, 5422), 'torch.from_numpy', 'torch.from_numpy', (["pred_output['pred_body_pose']"], {}), "(pred_output['pred_body_pose'])\n", (5391, 5422), False, 'import torch\n'), ((5447, 5490), 'torch.from_numpy', 'torch.from_numpy', (["pred_output['pred_betas']"], {}), "(pred_output['pred_betas'])\n", (5463, 5490), False, 'import torch\n'), ((6430, 6506), 'mocap_utils.coordconv.convert_smpl_to_bbox', 'convert_smpl_to_bbox', (['pred_verts', 'cam_scale', 'cam_trans'], {'bAppTransFirst': '(False)'}), '(pred_verts, cam_scale, cam_trans, bAppTransFirst=False)\n', (6450, 6506), False, 'from mocap_utils.coordconv import convert_smpl_to_bbox, convert_bbox_to_oriIm\n'), ((6716, 6818), 'mocap_utils.coordconv.convert_bbox_to_oriIm', 'convert_bbox_to_oriIm', (['vert_bboxcoord', 'bbox_scale_ratio', 'bbox_top_left', 'img_shape[1]', 'img_shape[0]'], {}), '(vert_bboxcoord, bbox_scale_ratio, bbox_top_left,\n img_shape[1], img_shape[0])\n', (6737, 6818), False, 'from mocap_utils.coordconv import convert_smpl_to_bbox, convert_bbox_to_oriIm\n'), ((5592, 5645), 'torch.from_numpy', 'torch.from_numpy', (["pred_output['pred_right_hand_pose']"], {}), "(pred_output['pred_right_hand_pose'])\n", (5608, 5645), False, 'import torch\n'), ((5711, 5752), 'torch.zeros', 'torch.zeros', (['(1, 45)'], {'dtype': 'torch.float32'}), '((1, 45), dtype=torch.float32)\n', (5722, 5752), False, 'import torch\n'), ((5852, 5904), 'torch.from_numpy', 'torch.from_numpy', (["pred_output['pred_left_hand_pose']"], {}), "(pred_output['pred_left_hand_pose'])\n", (5868, 5904), False, 'import torch\n'), ((5969, 6010), 'torch.zeros', 'torch.zeros', (['(1, 45)'], {'dtype': 'torch.float32'}), '((1, 45), dtype=torch.float32)\n', (5980, 6010), False, 'import torch\n'), ((4272, 4317), 'torch.from_numpy', 'torch.from_numpy', (["hand_pred['pred_hand_pose']"], {}), "(hand_pred['pred_hand_pose'])\n", (4288, 4317), False, 'import torch\n'), ((4350, 4396), 'torch.from_numpy', 'torch.from_numpy', (["hand_pred['pred_hand_betas']"], {}), "(hand_pred['pred_hand_betas'])\n", (4366, 4396), False, 'import torch\n'), ((4746, 4821), 'mocap_utils.coordconv.convert_smpl_to_bbox', 'convert_smpl_to_bbox', (['pred_verts', 'cam_scale', 'cam_trans'], {'bAppTransFirst': '(True)'}), '(pred_verts, cam_scale, cam_trans, bAppTransFirst=True)\n', (4766, 4821), False, 'from mocap_utils.coordconv import convert_smpl_to_bbox, convert_bbox_to_oriIm\n'), ((5059, 5161), 'mocap_utils.coordconv.convert_bbox_to_oriIm', 'convert_bbox_to_oriIm', (['vert_bboxcoord', 'bbox_scale_ratio', 'bbox_top_left', 'img_shape[1]', 'img_shape[0]'], {}), '(vert_bboxcoord, bbox_scale_ratio, bbox_top_left,\n img_shape[1], img_shape[0])\n', (5080, 5161), False, 'from mocap_utils.coordconv import convert_smpl_to_bbox, convert_bbox_to_oriIm\n')] |
import requests
import time
import json
from datetime import date, timedelta
import telegram_send
URL1 = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id=571&date="
URL2 = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id=572&date="
URL3 = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id=565&date="
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
def avail_check(slot_data, vac_name, vac_age, vac_dose):
result_slots = []
for item in slot_data:
for session in item["sessions"]:
if(session["min_age_limit"] == vac_age and session[vac_dose] > 0 and session["vaccine"]==vac_name):
results = {}
results["name"] = item["name"]
results[vac_dose] = session[vac_dose]
results["address"] = item["address"]
results["pincode"] = item["pincode"]
results["date"] = session["date"]
results["vaccine"] = session["vaccine"]
result_slots.append(results)
elif(session["min_age_limit"] == vac_age or session[vac_dose] > 0):
results = {}
results["name"] = item["name"]
results[vac_dose] = session[vac_dose]
results["address"] = item["address"]
results["pincode"] = item["pincode"]
results["date"] = session["date"]
results["vaccine"] = session["vaccine"]
result_slots.append(results)
return(result_slots)
vac_name = None
vac_age = 0
vac_dose = -1
while vac_name is None:
name_param = input("Vaccine Name (Covishield/Covaxin): Leave blank if no preference \n")
if(name_param.upper() in ["COVISHIELD", "COVAXIN", ""]):
vac_name = name_param
else:
print("Incorrect Option, try again")
while vac_dose == -1:
dose_param = input("Dose No (1/2): \n")
if(dose_param in ["1", "2"]):
vac_dose = "available_capacity_dose"+dose_param
else:
print("Incorrect Option, try again")
while vac_age == 0:
age_param = input("Minimum Age: \n")
if(age_param in ["18", "45"]):
vac_age = int(age_param)
else:
print("Incorrect Option, try again")
while True:
tom_date = (date.today() + timedelta(days=1)).strftime("%d-%m-%Y")
# Checks slots for next 3 days of current date
slot_tom = json.loads(requests.get(url = URL1+tom_date, headers = headers).text)
slot_tom1 = json.loads(requests.get(url = URL2+tom_date, headers = headers).text)
slot_tom2 = json.loads(requests.get(url = URL3+tom_date, headers = headers).text)
slot_list = avail_check(slot_tom["centers"], vac_name, vac_age, vac_dose)
slot_list2 = avail_check(slot_tom1["centers"], vac_name, vac_age, vac_dose)
slot_list3 = avail_check(slot_tom2["centers"], vac_name, vac_age, vac_dose)
message = ""
for slot in slot_list+slot_list2+slot_list3:
message += str(slot) + "\n"
if(slot_list+slot_list2+slot_list3 != []):
telegram_send.send(messages=[message])
print(slot_list+slot_list2+slot_list3)
else:
print("No Slots Available")
time.sleep(10)
| [
"datetime.date.today",
"requests.get",
"time.sleep",
"datetime.timedelta",
"telegram_send.send"
] | [((3375, 3389), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (3385, 3389), False, 'import time\n'), ((3238, 3276), 'telegram_send.send', 'telegram_send.send', ([], {'messages': '[message]'}), '(messages=[message])\n', (3256, 3276), False, 'import telegram_send\n'), ((2605, 2655), 'requests.get', 'requests.get', ([], {'url': '(URL1 + tom_date)', 'headers': 'headers'}), '(url=URL1 + tom_date, headers=headers)\n', (2617, 2655), False, 'import requests\n'), ((2691, 2741), 'requests.get', 'requests.get', ([], {'url': '(URL2 + tom_date)', 'headers': 'headers'}), '(url=URL2 + tom_date, headers=headers)\n', (2703, 2741), False, 'import requests\n'), ((2777, 2827), 'requests.get', 'requests.get', ([], {'url': '(URL3 + tom_date)', 'headers': 'headers'}), '(url=URL3 + tom_date, headers=headers)\n', (2789, 2827), False, 'import requests\n'), ((2468, 2480), 'datetime.date.today', 'date.today', ([], {}), '()\n', (2478, 2480), False, 'from datetime import date, timedelta\n'), ((2483, 2500), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2492, 2500), False, 'from datetime import date, timedelta\n')] |
import argparse
from dddssd.lib.core.config import cfg, cfg_from_file
from dddssd.lib.dataset.dataloader import choose_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Trainer')
parser.add_argument('--cfg', required=True, help='Config file for training')
parser.add_argument('--split', default='training', help='Dataset split: training')
parser.add_argument('--img_list', default='train', help='Train/Val/Trainval/Test list')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
cfg_from_file(args.cfg)
if args.img_list == 'test':
# if test, no ground truth available
cfg.TEST.WITH_GT = False
if args.img_list == 'test':
# if val, no mix up dataset
cfg.TRAIN.AUGMENTATIONS.MIXUP.OPEN = False
dataset_func = choose_dataset()
dataset = dataset_func('preprocessing', split=args.split, img_list=args.img_list, is_training=False)
dataset.preprocess_batch()
| [
"dddssd.lib.dataset.dataloader.choose_dataset",
"dddssd.lib.core.config.cfg_from_file",
"argparse.ArgumentParser"
] | [((161, 207), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Trainer"""'}), "(description='Trainer')\n", (184, 207), False, 'import argparse\n'), ((574, 597), 'dddssd.lib.core.config.cfg_from_file', 'cfg_from_file', (['args.cfg'], {}), '(args.cfg)\n', (587, 597), False, 'from dddssd.lib.core.config import cfg, cfg_from_file\n'), ((849, 865), 'dddssd.lib.dataset.dataloader.choose_dataset', 'choose_dataset', ([], {}), '()\n', (863, 865), False, 'from dddssd.lib.dataset.dataloader import choose_dataset\n')] |
from logging import getLogger
from django.core.management.base import BaseCommand
from django_pglocks import advisory_lock
from datahub.search.apps import get_search_apps, get_search_apps_by_name
from datahub.search.migrate import migrate_apps
logger = getLogger(__name__)
class Command(BaseCommand):
"""
Command for migrating an Elasticsearch index.
This will also create Elasticsearch indices the first time it is run.
"""
help = """Migrate modified mapping types for Elasticsearch indices.
For new indices, the command creates each index and schedules a Celery task to synchronise
data to the new index.
For existing indices, the command creates new indices for modified search models
and schedules Celery tasks to synchronises data to the new indices and then delete the old
indices.
See docs/Elasticsearch migrations.md for further details."""
def add_arguments(self, parser):
"""Handle arguments."""
parser.add_argument(
'--model',
action='append',
choices=[search_app.name for search_app in get_search_apps()],
help='Search apps to migrate. If empty, all are migrated.',
)
def handle(self, *args, **options):
"""Executes the command."""
apps = get_search_apps_by_name(options['model'])
with advisory_lock('leeloo_migrate_es'):
migrate_apps(apps)
| [
"logging.getLogger",
"datahub.search.migrate.migrate_apps",
"datahub.search.apps.get_search_apps",
"django_pglocks.advisory_lock",
"datahub.search.apps.get_search_apps_by_name"
] | [((256, 275), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (265, 275), False, 'from logging import getLogger\n'), ((1279, 1320), 'datahub.search.apps.get_search_apps_by_name', 'get_search_apps_by_name', (["options['model']"], {}), "(options['model'])\n", (1302, 1320), False, 'from datahub.search.apps import get_search_apps, get_search_apps_by_name\n'), ((1335, 1369), 'django_pglocks.advisory_lock', 'advisory_lock', (['"""leeloo_migrate_es"""'], {}), "('leeloo_migrate_es')\n", (1348, 1369), False, 'from django_pglocks import advisory_lock\n'), ((1383, 1401), 'datahub.search.migrate.migrate_apps', 'migrate_apps', (['apps'], {}), '(apps)\n', (1395, 1401), False, 'from datahub.search.migrate import migrate_apps\n'), ((1085, 1102), 'datahub.search.apps.get_search_apps', 'get_search_apps', ([], {}), '()\n', (1100, 1102), False, 'from datahub.search.apps import get_search_apps, get_search_apps_by_name\n')] |
import pymysql
import config
def lambda_handler(event, context):
# db setting
try:
conn = pymysql.connect(
host=config.db_hostname,
user=config.db_username,
password=config.db_password,
db=config.db_name
)
except pymysql.MySQLError as e:
return {
"success": False,
"message": "Database Error"
}
cursor = conn.cursor(pymysql.cursors.DictCursor)
cafe_id = event['params']['path']['cafe-id']
sql = '\
SELECT c.id, c.name, c.phone, c.address, c.latitude, c.longitude, \
ROUND(AVG(r.star), 2) as avg_star, \
ROUND(AVG(r.noise), 1) as avg_noise, ROUND(AVG(r.light), 1) as avg_light, ROUND(AVG(r.chair), 1) as avg_chair,\
ROUND(AVG(r.consent), 0) as avg_consent, ROUND(AVG(r.wifi), 0) as avg_wifi, \
(SELECT customer \
FROM review \
GROUP BY customer \
HAVING count(*) = (SELECT max(custom_count) \
FROM (SELECT customer, count(*) as custom_count \
FROM review \
WHERE c.id = %s \
GROUP BY customer \
) \
as result \
) LIMIT 1) as customer \
FROM cafe as c \
LEFT JOIN review as r ON c.id = r.cafe_id \
WHERE c.id = %s \
'
cursor.execute(sql, [cafe_id, cafe_id])
data = cursor.fetchall()
cafe_list = list(data[0].values())
# 카페 정보 없는 경우
if cafe_list == [None]*(len(data[0])):
return {
"success": False,
"message": "카페 정보가 존재하지 않습니다."
}
# 대표 리뷰 조회
sql = '\
SELECT detail \
FROM review \
WHERE `cafe_id` = %s AND detail IS NOT NULL \
ORDER BY id DESC LIMIT 1 \
'
cursor.execute(sql, cafe_id)
review = cursor.fetchone()
if not review:
data[0]['review'] = None
else:
data[0]['review'] = review.get('detail')
conn.commit()
conn.close()
return {
"success": True,
"data": data[0]
}
| [
"pymysql.connect"
] | [((108, 226), 'pymysql.connect', 'pymysql.connect', ([], {'host': 'config.db_hostname', 'user': 'config.db_username', 'password': 'config.db_password', 'db': 'config.db_name'}), '(host=config.db_hostname, user=config.db_username, password=\n config.db_password, db=config.db_name)\n', (123, 226), False, 'import pymysql\n')] |
from BridgePython import Bridge
bridge = Bridge(api_key='myapikey') # new code: using public key
class ChatHandler(object):
def message(self, sender, message):
print (sender + ':' + message)
def join_callback(channel, name):
print ("Joined channel : " + name)
channel.message('steve', 'Bridge is pretty nifty')
auth = bridge.get_service('auth')
auth.join('bridge-lovers', '<PASSWORD>', ChatHandler(), join_callback)
bridge.connect()
| [
"BridgePython.Bridge"
] | [((46, 72), 'BridgePython.Bridge', 'Bridge', ([], {'api_key': '"""myapikey"""'}), "(api_key='myapikey')\n", (52, 72), False, 'from BridgePython import Bridge\n')] |
from unittest import mock
import pytest
from django.core.exceptions import ValidationError
from django.test import override_settings
from rest_framework.fields import CharField
from django_documents_tools.api.serializers import (
clone_serializer_field, get_change_serializer_class,
get_snapshot_serializer, get_documented_model_serializer,
get_change_attachment_serializer, BaseChangeSerializer,
BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer,
BaseChangeAttachmentSerializer)
from django_documents_tools.utils import validate_change_attrs
from .serializers import (
BookSerializer, CustomChangeSerializer, CustomSnapshotSerializer,
CustomDocumentedModelLinkSerializer, CustomChangeAttachmentSerializer)
from .models import Book
from .test_models import _create_book_change, _create_book
UNKNOWN_SERIALIZER_PATH = 'tests.serializers.UnknownBookSerializer'
def test_field_clone():
src_field = CharField(required=False)
dst_field = clone_serializer_field(src_field, required=True)
assert dst_field.required
class TestGetChangeSerializerClass:
setting_name = 'BASE_CHANGE_SERIALIZER'
custom_serializer_path = 'tests.serializers.CustomChangeSerializer'
expected_error_msg = (
'UnknownBookSerializer must be subclass of BaseChangeSerializer')
@staticmethod
def test_get_default(book_change_model):
book_change_serializer = get_change_serializer_class(
book_change_model, BookSerializer)
assert issubclass(book_change_serializer, BaseChangeSerializer)
assert book_change_serializer.Meta.fields == (
'guid', 'type', 'version', 'created',
'updated', 'document_name', 'document_date', 'document_link',
'document_is_draft', 'document_fields', 'attachment', 'snapshot',
'title', 'author', 'summary', 'isbn', 'is_published', 'book')
def test_get_custom(self, book_change_model):
custom_settings = {
self.setting_name: self.custom_serializer_path
}
with override_settings(DOCUMENTS_TOOLS=custom_settings):
book_change_serializer = get_change_serializer_class(
book_change_model, BookSerializer)
assert issubclass(book_change_serializer, CustomChangeSerializer)
assert book_change_serializer.Meta.fields == (
'guid', 'type', 'version', 'created',
'updated', 'document_name', 'document_date', 'document_link',
'document_is_draft', 'document_fields', 'attachment', 'snapshot',
'custom_field', 'title', 'author', 'summary', 'isbn',
'is_published', 'book')
def test_get_unknown(self, book_change_model):
custom_settings = {
self.setting_name: UNKNOWN_SERIALIZER_PATH
}
with override_settings(DOCUMENTS_TOOLS=custom_settings):
with pytest.raises(Exception) as exc_info:
get_change_serializer_class(book_change_model, BookSerializer)
assert exc_info.value.args[0] == self.expected_error_msg
def test_get_for_model(self, book_change_model):
with mock.patch.object(
book_change_model, '_base_serializer',
self.custom_serializer_path):
book_change_serializer = get_change_serializer_class(
book_change_model, BookSerializer)
assert issubclass(book_change_serializer, CustomChangeSerializer)
assert book_change_serializer.Meta.fields == (
'guid', 'type', 'version', 'created',
'updated', 'document_name', 'document_date', 'document_link',
'document_is_draft', 'document_fields', 'attachment', 'snapshot',
'custom_field', 'title', 'author', 'summary', 'isbn',
'is_published', 'book')
class TestGetSnapshotSerializerClass:
setting_name = 'BASE_SNAPSHOT_SERIALIZER'
custom_serializer_path = 'tests.serializers.CustomSnapshotSerializer'
expected_error_msg = (
'UnknownBookSerializer must be subclass of BaseSnapshotSerializer')
@staticmethod
def test_get_default(book_change_model, book_snapshot_model):
book_change_serializer = get_change_serializer_class(
book_change_model, BookSerializer)
book_snapshot_serializer = get_snapshot_serializer(
book_snapshot_model, book_change_serializer)
assert issubclass(book_snapshot_serializer, BaseSnapshotSerializer)
assert book_snapshot_serializer.Meta.fields == (
'guid', 'type', 'version', 'created',
'updated', 'document_fields', 'history_date', 'title', 'author',
'summary', 'isbn', 'is_published', 'book')
def test_get_custom(self, book_change_model, book_snapshot_model):
book_change_serializer = get_change_serializer_class(
book_change_model, BookSerializer)
custom_settings = {
self.setting_name: self.custom_serializer_path
}
with override_settings(DOCUMENTS_TOOLS=custom_settings):
book_snapshot_serializer = get_snapshot_serializer(
book_snapshot_model, book_change_serializer)
assert issubclass(book_snapshot_serializer, CustomSnapshotSerializer)
assert book_snapshot_serializer.Meta.fields == (
'guid', 'type', 'version', 'created',
'updated', 'document_fields', 'history_date', 'custom_field',
'title', 'author', 'summary', 'isbn', 'is_published', 'book')
def test_get_unknown(self, book_change_model, book_snapshot_model):
book_change_serializer = get_change_serializer_class(
book_change_model, BookSerializer)
custom_settings = {
self.setting_name: UNKNOWN_SERIALIZER_PATH
}
with override_settings(DOCUMENTS_TOOLS=custom_settings):
with pytest.raises(Exception) as exc_info:
get_snapshot_serializer(
book_snapshot_model, book_change_serializer)
assert exc_info.value.args[0] == self.expected_error_msg
def test_get_for_model(self, book_change_model, book_snapshot_model):
book_change_serializer = get_change_serializer_class(
book_change_model, BookSerializer)
with mock.patch.object(
book_snapshot_model, '_base_serializer',
self.custom_serializer_path):
book_snapshot_serializer = get_snapshot_serializer(
book_snapshot_model, book_change_serializer)
assert issubclass(book_snapshot_serializer, CustomSnapshotSerializer)
assert book_snapshot_serializer.Meta.fields == (
'guid', 'type', 'version', 'created',
'updated', 'document_fields', 'history_date', 'custom_field',
'title', 'author', 'summary', 'isbn', 'is_published', 'book')
class TestGetDocumentedModelLinkSerializerClass:
setting_name = 'BASE_DOCUMENTED_MODEL_LINK_SERIALIZER'
custom_serializer_path = (
'tests.serializers.CustomDocumentedModelLinkSerializer')
expected_error_msg = (
'UnknownBookSerializer must be subclass of '
'BaseDocumentedModelLinkSerializer')
@staticmethod
def test_get_default():
book_link_serializer = get_documented_model_serializer(Book)
assert issubclass(
book_link_serializer, BaseDocumentedModelLinkSerializer)
assert book_link_serializer.Meta.fields == (
'guid', 'type', 'version', 'created',
'updated')
def test_get_custom(self):
custom_settings = {
self.setting_name: self.custom_serializer_path
}
with override_settings(DOCUMENTS_TOOLS=custom_settings):
book_link_serializer = get_documented_model_serializer(Book)
assert issubclass(
book_link_serializer, CustomDocumentedModelLinkSerializer)
assert book_link_serializer.Meta.fields == (
'guid', 'type', 'version', 'created',
'updated', 'custom_field')
def test_get_unknown(self, book_change_model):
custom_settings = {
self.setting_name: UNKNOWN_SERIALIZER_PATH
}
with override_settings(DOCUMENTS_TOOLS=custom_settings):
with pytest.raises(Exception) as exc_info:
get_documented_model_serializer(Book)
assert exc_info.value.args[0] == self.expected_error_msg
class TestGetChangeAttachmentSerializerClass:
setting_name = 'BASE_CHANGE_ATTACHMENT_SERIALIZER'
custom_serializer_path = (
'tests.serializers.CustomChangeAttachmentSerializer')
expected_error_msg = (
'UnknownBookSerializer must be subclass of '
'BaseChangeAttachmentSerializer')
@staticmethod
def test_get_default(book_change_attachment_model):
book_change_attachment_serializer = get_change_attachment_serializer(
book_change_attachment_model)
assert issubclass(
book_change_attachment_serializer, BaseChangeAttachmentSerializer)
assert book_change_attachment_serializer.Meta.fields == (
'guid', 'type', 'version', 'created',
'updated', 'file')
def test_get_custom(self, book_change_attachment_model):
custom_settings = {
self.setting_name: self.custom_serializer_path
}
with override_settings(DOCUMENTS_TOOLS=custom_settings):
book_change_attachment_serializer = (
get_change_attachment_serializer(book_change_attachment_model))
assert issubclass(
book_change_attachment_serializer,
CustomChangeAttachmentSerializer)
assert book_change_attachment_serializer.Meta.fields == (
'guid', 'type', 'version', 'created',
'updated', 'file', 'custom_field')
def test_get_unknown(self, book_change_attachment_model):
custom_settings = {
self.setting_name: UNKNOWN_SERIALIZER_PATH
}
with override_settings(DOCUMENTS_TOOLS=custom_settings):
with pytest.raises(Exception) as exc_info:
get_change_attachment_serializer(book_change_attachment_model)
assert exc_info.value.args[0] == self.expected_error_msg
def test_get_for_model(self, book_change_attachment_model):
with mock.patch.object(
book_change_attachment_model, '_base_serializer',
self.custom_serializer_path):
book_change_attachment_serializer = (
get_change_attachment_serializer(book_change_attachment_model))
assert issubclass(
book_change_attachment_serializer,
CustomChangeAttachmentSerializer)
assert book_change_attachment_serializer.Meta.fields == (
'guid', 'type', 'version', 'created',
'updated', 'file', 'custom_field')
@pytest.mark.django_db
class TestValidateChangeSerializer:
CHANGE_MODEL = Book.changes.model
ERROR_MESSAGE = {'title': ['This field cannot be null.']}
def validate(self, change, attrs):
validate_change_attrs(self.CHANGE_MODEL, change, attrs)
def test_apply_with_valid_attrs(self):
book = _create_book()
book_change = _create_book_change(
document_is_draft=False, book=book)
document_fields = book_change.get_documented_fields()
kwargs = {
'title': book_change.title,
'author': book_change.author,
'isbn': book_change.isbn,
'is_published': book_change.is_published,
'summary': book_change.summary,
'document_fields': document_fields}
assert self.validate(book_change, kwargs) is None
def test_apply_with_not_valid_attrs(self):
book = _create_book()
book_change = _create_book_change(book=book, title=None)
document_fields = book_change.get_documented_fields()
kwargs = {
'title': book_change.title,
'author': book_change.author,
'isbn': book_change.isbn,
'is_published': book_change.is_published,
'summary': book_change.summary,
'document_fields': document_fields}
with pytest.raises(ValidationError) as exc_info:
self.validate(book_change, kwargs)
assert exc_info.value.message_dict == self.ERROR_MESSAGE
@override_settings(DOCUMENTS_TOOLS={
'CREATE_BUSINESS_ENTITY_AFTER_CHANGE_CREATED': True})
def test_update_with_valid_attrs(self):
book_change = _create_book_change(document_is_draft=False)
book_change.title = 'new_title'
book_change.save()
document_fields = book_change.get_documented_fields()
kwargs = {
'title': book_change.title,
'author': book_change.author,
'isbn': book_change.isbn,
'is_published': book_change.is_published,
'summary': book_change.summary,
'document_fields': document_fields}
assert self.validate(book_change, kwargs) is None
@override_settings(DOCUMENTS_TOOLS={
'CREATE_BUSINESS_ENTITY_AFTER_CHANGE_CREATED': True})
def test_update_with_not_valid_attrs(self):
book_change = _create_book_change()
book_change.title = None
book_change.save()
document_fields = book_change.get_documented_fields()
kwargs = {
'title': book_change.title,
'author': book_change.author,
'isbn': book_change.isbn,
'is_published': book_change.is_published,
'summary': book_change.summary,
'document_fields': document_fields}
with pytest.raises(ValidationError) as exc_info:
self.validate(book_change, kwargs)
assert exc_info.value.message_dict == self.ERROR_MESSAGE
@override_settings(DOCUMENTS_TOOLS={
'CREATE_BUSINESS_ENTITY_AFTER_CHANGE_CREATED': True})
def test_create_with_valid_attrs(self):
book_change = _create_book_change(document_is_draft=False)
document_fields = book_change.get_documented_fields()
kwargs = {
'title': book_change.title,
'author': book_change.author,
'isbn': book_change.isbn,
'is_published': book_change.is_published,
'summary': book_change.summary,
'document_fields': document_fields}
assert self.validate(book_change, kwargs) is None
@override_settings(DOCUMENTS_TOOLS={
'CREATE_BUSINESS_ENTITY_AFTER_CHANGE_CREATED': True})
def test_create_with_not_valid_attrs(self):
book_change = _create_book_change(title=None)
document_fields = book_change.get_documented_fields()
kwargs = {
'title': book_change.title,
'author': book_change.author,
'isbn': book_change.isbn,
'is_published': book_change.is_published,
'summary': book_change.summary,
'document_fields': document_fields}
with pytest.raises(ValidationError) as exc_info:
self.validate(book_change, kwargs)
assert exc_info.value.message_dict == self.ERROR_MESSAGE
| [
"django_documents_tools.api.serializers.clone_serializer_field",
"django_documents_tools.api.serializers.get_change_attachment_serializer",
"django_documents_tools.api.serializers.get_change_serializer_class",
"django_documents_tools.api.serializers.get_documented_model_serializer",
"django_documents_tools.utils.validate_change_attrs",
"django.test.override_settings",
"pytest.raises",
"unittest.mock.patch.object",
"rest_framework.fields.CharField",
"django_documents_tools.api.serializers.get_snapshot_serializer"
] | [((940, 965), 'rest_framework.fields.CharField', 'CharField', ([], {'required': '(False)'}), '(required=False)\n', (949, 965), False, 'from rest_framework.fields import CharField\n'), ((982, 1030), 'django_documents_tools.api.serializers.clone_serializer_field', 'clone_serializer_field', (['src_field'], {'required': '(True)'}), '(src_field, required=True)\n', (1004, 1030), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n'), ((12373, 12466), 'django.test.override_settings', 'override_settings', ([], {'DOCUMENTS_TOOLS': "{'CREATE_BUSINESS_ENTITY_AFTER_CHANGE_CREATED': True}"}), "(DOCUMENTS_TOOLS={\n 'CREATE_BUSINESS_ENTITY_AFTER_CHANGE_CREATED': True})\n", (12390, 12466), False, 'from django.test import override_settings\n'), ((13062, 13155), 'django.test.override_settings', 'override_settings', ([], {'DOCUMENTS_TOOLS': "{'CREATE_BUSINESS_ENTITY_AFTER_CHANGE_CREATED': True}"}), "(DOCUMENTS_TOOLS={\n 'CREATE_BUSINESS_ENTITY_AFTER_CHANGE_CREATED': True})\n", (13079, 13155), False, 'from django.test import override_settings\n'), ((13836, 13929), 'django.test.override_settings', 'override_settings', ([], {'DOCUMENTS_TOOLS': "{'CREATE_BUSINESS_ENTITY_AFTER_CHANGE_CREATED': True}"}), "(DOCUMENTS_TOOLS={\n 'CREATE_BUSINESS_ENTITY_AFTER_CHANGE_CREATED': True})\n", (13853, 13929), False, 'from django.test import override_settings\n'), ((14457, 14550), 'django.test.override_settings', 'override_settings', ([], {'DOCUMENTS_TOOLS': "{'CREATE_BUSINESS_ENTITY_AFTER_CHANGE_CREATED': True}"}), "(DOCUMENTS_TOOLS={\n 'CREATE_BUSINESS_ENTITY_AFTER_CHANGE_CREATED': True})\n", (14474, 14550), False, 'from django.test import override_settings\n'), ((1413, 1475), 'django_documents_tools.api.serializers.get_change_serializer_class', 'get_change_serializer_class', (['book_change_model', 'BookSerializer'], {}), '(book_change_model, BookSerializer)\n', (1440, 1475), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n'), ((4191, 4253), 'django_documents_tools.api.serializers.get_change_serializer_class', 'get_change_serializer_class', (['book_change_model', 'BookSerializer'], {}), '(book_change_model, BookSerializer)\n', (4218, 4253), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n'), ((4302, 4370), 'django_documents_tools.api.serializers.get_snapshot_serializer', 'get_snapshot_serializer', (['book_snapshot_model', 'book_change_serializer'], {}), '(book_snapshot_model, book_change_serializer)\n', (4325, 4370), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n'), ((4805, 4867), 'django_documents_tools.api.serializers.get_change_serializer_class', 'get_change_serializer_class', (['book_change_model', 'BookSerializer'], {}), '(book_change_model, BookSerializer)\n', (4832, 4867), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n'), ((5609, 5671), 'django_documents_tools.api.serializers.get_change_serializer_class', 'get_change_serializer_class', (['book_change_model', 'BookSerializer'], {}), '(book_change_model, BookSerializer)\n', (5636, 5671), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n'), ((6182, 6244), 'django_documents_tools.api.serializers.get_change_serializer_class', 'get_change_serializer_class', (['book_change_model', 'BookSerializer'], {}), '(book_change_model, BookSerializer)\n', (6209, 6244), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n'), ((7262, 7299), 'django_documents_tools.api.serializers.get_documented_model_serializer', 'get_documented_model_serializer', (['Book'], {}), '(Book)\n', (7293, 7299), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n'), ((8853, 8915), 'django_documents_tools.api.serializers.get_change_attachment_serializer', 'get_change_attachment_serializer', (['book_change_attachment_model'], {}), '(book_change_attachment_model)\n', (8885, 8915), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n'), ((11080, 11135), 'django_documents_tools.utils.validate_change_attrs', 'validate_change_attrs', (['self.CHANGE_MODEL', 'change', 'attrs'], {}), '(self.CHANGE_MODEL, change, attrs)\n', (11101, 11135), False, 'from django_documents_tools.utils import validate_change_attrs\n'), ((2055, 2105), 'django.test.override_settings', 'override_settings', ([], {'DOCUMENTS_TOOLS': 'custom_settings'}), '(DOCUMENTS_TOOLS=custom_settings)\n', (2072, 2105), False, 'from django.test import override_settings\n'), ((2144, 2206), 'django_documents_tools.api.serializers.get_change_serializer_class', 'get_change_serializer_class', (['book_change_model', 'BookSerializer'], {}), '(book_change_model, BookSerializer)\n', (2171, 2206), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n'), ((2817, 2867), 'django.test.override_settings', 'override_settings', ([], {'DOCUMENTS_TOOLS': 'custom_settings'}), '(DOCUMENTS_TOOLS=custom_settings)\n', (2834, 2867), False, 'from django.test import override_settings\n'), ((3139, 3229), 'unittest.mock.patch.object', 'mock.patch.object', (['book_change_model', '"""_base_serializer"""', 'self.custom_serializer_path'], {}), "(book_change_model, '_base_serializer', self.\n custom_serializer_path)\n", (3156, 3229), False, 'from unittest import mock\n'), ((3296, 3358), 'django_documents_tools.api.serializers.get_change_serializer_class', 'get_change_serializer_class', (['book_change_model', 'BookSerializer'], {}), '(book_change_model, BookSerializer)\n', (3323, 3358), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n'), ((4992, 5042), 'django.test.override_settings', 'override_settings', ([], {'DOCUMENTS_TOOLS': 'custom_settings'}), '(DOCUMENTS_TOOLS=custom_settings)\n', (5009, 5042), False, 'from django.test import override_settings\n'), ((5083, 5151), 'django_documents_tools.api.serializers.get_snapshot_serializer', 'get_snapshot_serializer', (['book_snapshot_model', 'book_change_serializer'], {}), '(book_snapshot_model, book_change_serializer)\n', (5106, 5151), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n'), ((5792, 5842), 'django.test.override_settings', 'override_settings', ([], {'DOCUMENTS_TOOLS': 'custom_settings'}), '(DOCUMENTS_TOOLS=custom_settings)\n', (5809, 5842), False, 'from django.test import override_settings\n'), ((6272, 6364), 'unittest.mock.patch.object', 'mock.patch.object', (['book_snapshot_model', '"""_base_serializer"""', 'self.custom_serializer_path'], {}), "(book_snapshot_model, '_base_serializer', self.\n custom_serializer_path)\n", (6289, 6364), False, 'from unittest import mock\n'), ((6433, 6501), 'django_documents_tools.api.serializers.get_snapshot_serializer', 'get_snapshot_serializer', (['book_snapshot_model', 'book_change_serializer'], {}), '(book_snapshot_model, book_change_serializer)\n', (6456, 6501), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n'), ((7665, 7715), 'django.test.override_settings', 'override_settings', ([], {'DOCUMENTS_TOOLS': 'custom_settings'}), '(DOCUMENTS_TOOLS=custom_settings)\n', (7682, 7715), False, 'from django.test import override_settings\n'), ((7752, 7789), 'django_documents_tools.api.serializers.get_documented_model_serializer', 'get_documented_model_serializer', (['Book'], {}), '(Book)\n', (7783, 7789), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n'), ((8190, 8240), 'django.test.override_settings', 'override_settings', ([], {'DOCUMENTS_TOOLS': 'custom_settings'}), '(DOCUMENTS_TOOLS=custom_settings)\n', (8207, 8240), False, 'from django.test import override_settings\n'), ((9356, 9406), 'django.test.override_settings', 'override_settings', ([], {'DOCUMENTS_TOOLS': 'custom_settings'}), '(DOCUMENTS_TOOLS=custom_settings)\n', (9373, 9406), False, 'from django.test import override_settings\n'), ((9474, 9536), 'django_documents_tools.api.serializers.get_change_attachment_serializer', 'get_change_attachment_serializer', (['book_change_attachment_model'], {}), '(book_change_attachment_model)\n', (9506, 9536), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n'), ((9992, 10042), 'django.test.override_settings', 'override_settings', ([], {'DOCUMENTS_TOOLS': 'custom_settings'}), '(DOCUMENTS_TOOLS=custom_settings)\n', (10009, 10042), False, 'from django.test import override_settings\n'), ((10326, 10427), 'unittest.mock.patch.object', 'mock.patch.object', (['book_change_attachment_model', '"""_base_serializer"""', 'self.custom_serializer_path'], {}), "(book_change_attachment_model, '_base_serializer', self.\n custom_serializer_path)\n", (10343, 10427), False, 'from unittest import mock\n'), ((10523, 10585), 'django_documents_tools.api.serializers.get_change_attachment_serializer', 'get_change_attachment_serializer', (['book_change_attachment_model'], {}), '(book_change_attachment_model)\n', (10555, 10585), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n'), ((12211, 12241), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (12224, 12241), False, 'import pytest\n'), ((13674, 13704), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (13687, 13704), False, 'import pytest\n'), ((15018, 15048), 'pytest.raises', 'pytest.raises', (['ValidationError'], {}), '(ValidationError)\n', (15031, 15048), False, 'import pytest\n'), ((2886, 2910), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2899, 2910), False, 'import pytest\n'), ((2940, 3002), 'django_documents_tools.api.serializers.get_change_serializer_class', 'get_change_serializer_class', (['book_change_model', 'BookSerializer'], {}), '(book_change_model, BookSerializer)\n', (2967, 3002), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n'), ((5861, 5885), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (5874, 5885), False, 'import pytest\n'), ((5915, 5983), 'django_documents_tools.api.serializers.get_snapshot_serializer', 'get_snapshot_serializer', (['book_snapshot_model', 'book_change_serializer'], {}), '(book_snapshot_model, book_change_serializer)\n', (5938, 5983), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n'), ((8259, 8283), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8272, 8283), False, 'import pytest\n'), ((8313, 8350), 'django_documents_tools.api.serializers.get_documented_model_serializer', 'get_documented_model_serializer', (['Book'], {}), '(Book)\n', (8344, 8350), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n'), ((10061, 10085), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (10074, 10085), False, 'import pytest\n'), ((10115, 10177), 'django_documents_tools.api.serializers.get_change_attachment_serializer', 'get_change_attachment_serializer', (['book_change_attachment_model'], {}), '(book_change_attachment_model)\n', (10147, 10177), False, 'from django_documents_tools.api.serializers import clone_serializer_field, get_change_serializer_class, get_snapshot_serializer, get_documented_model_serializer, get_change_attachment_serializer, BaseChangeSerializer, BaseSnapshotSerializer, BaseDocumentedModelLinkSerializer, BaseChangeAttachmentSerializer\n')] |
import mpspdz.instructions_base as base
import mpspdz.instructions as spdz
import mpspdz.tools as tools
import collections
import itertools
class SecretBitsAF(base.RegisterArgFormat):
reg_type = "sb"
class ClearBitsAF(base.RegisterArgFormat):
reg_type = "cb"
base.ArgFormats["sb"] = SecretBitsAF
base.ArgFormats["sbw"] = SecretBitsAF
base.ArgFormats["cb"] = ClearBitsAF
base.ArgFormats["cbw"] = ClearBitsAF
opcodes = dict(
XORS=0x200,
XORM=0x201,
ANDRS=0x202,
BITDECS=0x203,
BITCOMS=0x204,
CONVSINT=0x205,
LDMSDI=0x206,
STMSDI=0x207,
LDMSD=0x208,
STMSD=0x209,
LDBITS=0x20A,
ANDS=0x20B,
TRANS=0x20C,
BITB=0x20D,
ANDM=0x20E,
NOTS=0x20F,
LDMSB=0x240,
STMSB=0x241,
LDMSBI=0x242,
STMSBI=0x243,
MOVSB=0x244,
INPUTB=0x246,
INPUTBVEC=0x247,
SPLIT=0x248,
CONVCBIT2S=0x249,
XORCBI=0x210,
BITDECC=0x211,
CONVCINT=0x213,
REVEAL=0x214,
STMSDCI=0x215,
LDMCB=0x217,
STMCB=0x218,
XORCB=0x219,
ADDCB=0x21A,
ADDCBI=0x21B,
MULCBI=0x21C,
SHRCBI=0x21D,
SHLCBI=0x21E,
CONVCINTVEC=0x21F,
PRINTREGSIGNED=0x220,
PRINTREGB=0x221,
PRINTREGPLAINB=0x222,
PRINTFLOATPLAINB=0x223,
CONDPRINTSTRB=0x224,
CONVCBIT=0x230,
CONVCBITVEC=0x231,
)
class BinaryVectorInstruction(base.Instruction):
is_vec = lambda self: True
def copy(self, size, subs):
return type(self)(*self.get_new_args(size, subs))
class NonVectorInstruction(base.Instruction):
is_vec = lambda self: False
def __init__(self, *args, **kwargs):
assert args[0].n <= args[0].unit
super(NonVectorInstruction, self).__init__(*args, **kwargs)
class NonVectorInstruction1(base.Instruction):
is_vec = lambda self: False
def __init__(self, *args, **kwargs):
assert args[1].n <= args[1].unit
super(NonVectorInstruction1, self).__init__(*args, **kwargs)
class xors(BinaryVectorInstruction):
code = opcodes["XORS"]
arg_format = tools.cycle(["int", "sbw", "sb", "sb"])
class xorm(NonVectorInstruction):
code = opcodes["XORM"]
arg_format = ["int", "sbw", "sb", "cb"]
class xorcb(NonVectorInstruction):
code = opcodes["XORCB"]
arg_format = ["cbw", "cb", "cb"]
class xorcbi(NonVectorInstruction):
code = opcodes["XORCBI"]
arg_format = ["cbw", "cb", "int"]
class andrs(BinaryVectorInstruction):
code = opcodes["ANDRS"]
arg_format = tools.cycle(["int", "sbw", "sb", "sb"])
def add_usage(self, req_node):
req_node.increment(("bit", "triple"), sum(self.args[::4]))
class ands(BinaryVectorInstruction):
code = opcodes["ANDS"]
arg_format = tools.cycle(["int", "sbw", "sb", "sb"])
def add_usage(self, req_node):
req_node.increment(("bit", "triple"), sum(self.args[::4]))
class andm(BinaryVectorInstruction):
code = opcodes["ANDM"]
arg_format = ["int", "sbw", "sb", "cb"]
class nots(BinaryVectorInstruction):
code = opcodes["NOTS"]
arg_format = ["int", "sbw", "sb"]
class addcb(NonVectorInstruction):
code = opcodes["ADDCB"]
arg_format = ["cbw", "cb", "cb"]
class addcbi(NonVectorInstruction):
code = opcodes["ADDCBI"]
arg_format = ["cbw", "cb", "int"]
class mulcbi(NonVectorInstruction):
code = opcodes["MULCBI"]
arg_format = ["cbw", "cb", "int"]
class bitdecs(NonVectorInstruction, base.VarArgsInstruction):
code = opcodes["BITDECS"]
arg_format = tools.chain(["sb"], itertools.repeat("sbw"))
class bitcoms(NonVectorInstruction, base.VarArgsInstruction):
code = opcodes["BITCOMS"]
arg_format = tools.chain(["sbw"], itertools.repeat("sb"))
class bitdecc(NonVectorInstruction, base.VarArgsInstruction):
code = opcodes["BITDECC"]
arg_format = tools.chain(["cb"], itertools.repeat("cbw"))
class shrcbi(NonVectorInstruction):
code = opcodes["SHRCBI"]
arg_format = ["cbw", "cb", "int"]
class shlcbi(NonVectorInstruction):
code = opcodes["SHLCBI"]
arg_format = ["cbw", "cb", "int"]
class ldbits(NonVectorInstruction):
code = opcodes["LDBITS"]
arg_format = ["sbw", "i", "i"]
class ldmsb(
base.DirectMemoryInstruction, base.ReadMemoryInstruction, base.VectorInstruction
):
code = opcodes["LDMSB"]
arg_format = ["sbw", "int"]
class stmsb(base.DirectMemoryWriteInstruction, base.VectorInstruction):
code = opcodes["STMSB"]
arg_format = ["sb", "int"]
# def __init__(self, *args, **kwargs):
# super(type(self), self).__init__(*args, **kwargs)
# import inspect
# self.caller = [frame[1:] for frame in inspect.stack()[1:]]
class ldmcb(
base.DirectMemoryInstruction, base.ReadMemoryInstruction, base.VectorInstruction
):
code = opcodes["LDMCB"]
arg_format = ["cbw", "int"]
class stmcb(base.DirectMemoryWriteInstruction, base.VectorInstruction):
code = opcodes["STMCB"]
arg_format = ["cb", "int"]
class ldmsbi(base.ReadMemoryInstruction, base.VectorInstruction):
code = opcodes["LDMSBI"]
arg_format = ["sbw", "ci"]
class stmsbi(base.WriteMemoryInstruction, base.VectorInstruction):
code = opcodes["STMSBI"]
arg_format = ["sb", "ci"]
class ldmsdi(base.ReadMemoryInstruction):
code = opcodes["LDMSDI"]
arg_format = tools.cycle(["sbw", "cb", "int"])
class stmsdi(base.WriteMemoryInstruction):
code = opcodes["STMSDI"]
arg_format = tools.cycle(["sb", "cb"])
class ldmsd(base.ReadMemoryInstruction):
code = opcodes["LDMSD"]
arg_format = tools.cycle(["sbw", "int", "int"])
class stmsd(base.WriteMemoryInstruction):
code = opcodes["STMSD"]
arg_format = tools.cycle(["sb", "int"])
class stmsdci(base.WriteMemoryInstruction):
code = opcodes["STMSDCI"]
arg_format = tools.cycle(["cb", "cb"])
class convsint(NonVectorInstruction1):
code = opcodes["CONVSINT"]
arg_format = ["int", "sbw", "ci"]
class convcint(NonVectorInstruction):
code = opcodes["CONVCINT"]
arg_format = ["cbw", "ci"]
class convcbit(NonVectorInstruction1):
code = opcodes["CONVCBIT"]
arg_format = ["ciw", "cb"]
@base.vectorize
class convcintvec(base.Instruction):
code = opcodes["CONVCINTVEC"]
arg_format = tools.chain(["c"], tools.cycle(["cbw"]))
class convcbitvec(BinaryVectorInstruction):
code = opcodes["CONVCBITVEC"]
arg_format = ["int", "ciw", "cb"]
def __init__(self, *args):
super(convcbitvec, self).__init__(*args)
assert args[2].n == args[0]
args[1].set_size(args[0])
class convcbit2s(BinaryVectorInstruction):
code = opcodes["CONVCBIT2S"]
arg_format = ["int", "sbw", "cb"]
@base.vectorize
class split(base.Instruction):
code = opcodes["SPLIT"]
arg_format = tools.chain(["int", "s"], tools.cycle(["sbw"]))
def __init__(self, *args, **kwargs):
super(split_class, self).__init__(*args, **kwargs)
assert (len(args) - 2) % args[0] == 0
class movsb(NonVectorInstruction):
code = opcodes["MOVSB"]
arg_format = ["sbw", "sb"]
class trans(base.VarArgsInstruction):
code = opcodes["TRANS"]
is_vec = lambda self: True
def __init__(self, *args):
self.arg_format = (
["int"] + ["sbw"] * args[0] + ["sb"] * (len(args) - 1 - args[0])
)
super(trans, self).__init__(*args)
class bitb(NonVectorInstruction):
code = opcodes["BITB"]
arg_format = ["sbw"]
class reveal(BinaryVectorInstruction, base.VarArgsInstruction, base.Mergeable):
code = opcodes["REVEAL"]
arg_format = tools.cycle(["int", "cbw", "sb"])
class inputb(base.DoNotEliminateInstruction, base.VarArgsInstruction):
__slots__ = []
code = opcodes["INPUTB"]
arg_format = tools.cycle(["p", "int", "int", "sbw"])
is_vec = lambda self: True
class inputbvec(
base.DoNotEliminateInstruction, base.VarArgsInstruction, base.Mergeable
):
__slots__ = []
code = opcodes["INPUTBVEC"]
def __init__(self, *args, **kwargs):
self.arg_format = []
i = 0
while i < len(args):
self.arg_format += ["int", "int", "p"] + ["sbw"] * (args[i] - 3)
i += args[i]
assert i == len(args)
super(inputbvec, self).__init__(*args, **kwargs)
def merge(self, other):
self.args += other.args
self.arg_format += other.arg_format
class print_regb(base.VectorInstruction, base.IOInstruction):
code = opcodes["PRINTREGB"]
arg_format = ["cb", "i"]
def __init__(self, reg, comment=""):
super(print_regb, self).__init__(reg, self.str_to_int(comment))
class print_reg_plainb(NonVectorInstruction, base.IOInstruction):
code = opcodes["PRINTREGPLAINB"]
arg_format = ["cb"]
class print_reg_signed(base.IOInstruction):
code = opcodes["PRINTREGSIGNED"]
arg_format = ["int", "cb"]
is_vec = lambda self: True
class print_float_plainb(base.IOInstruction):
__slots__ = []
code = opcodes["PRINTFLOATPLAINB"]
arg_format = ["cb", "cb", "cb", "cb", "cb"]
class cond_print_strb(base.IOInstruction):
r""" Print a 4 character string. """
code = opcodes["CONDPRINTSTRB"]
arg_format = ["cb", "int"]
def __init__(self, cond, val):
super(cond_print_str, self).__init__(cond, self.str_to_int(val))
| [
"mpspdz.tools.cycle",
"itertools.repeat"
] | [((2027, 2066), 'mpspdz.tools.cycle', 'tools.cycle', (["['int', 'sbw', 'sb', 'sb']"], {}), "(['int', 'sbw', 'sb', 'sb'])\n", (2038, 2066), True, 'import mpspdz.tools as tools\n'), ((2466, 2505), 'mpspdz.tools.cycle', 'tools.cycle', (["['int', 'sbw', 'sb', 'sb']"], {}), "(['int', 'sbw', 'sb', 'sb'])\n", (2477, 2505), True, 'import mpspdz.tools as tools\n'), ((2692, 2731), 'mpspdz.tools.cycle', 'tools.cycle', (["['int', 'sbw', 'sb', 'sb']"], {}), "(['int', 'sbw', 'sb', 'sb'])\n", (2703, 2731), True, 'import mpspdz.tools as tools\n'), ((5276, 5309), 'mpspdz.tools.cycle', 'tools.cycle', (["['sbw', 'cb', 'int']"], {}), "(['sbw', 'cb', 'int'])\n", (5287, 5309), True, 'import mpspdz.tools as tools\n'), ((5401, 5426), 'mpspdz.tools.cycle', 'tools.cycle', (["['sb', 'cb']"], {}), "(['sb', 'cb'])\n", (5412, 5426), True, 'import mpspdz.tools as tools\n'), ((5515, 5549), 'mpspdz.tools.cycle', 'tools.cycle', (["['sbw', 'int', 'int']"], {}), "(['sbw', 'int', 'int'])\n", (5526, 5549), True, 'import mpspdz.tools as tools\n'), ((5639, 5665), 'mpspdz.tools.cycle', 'tools.cycle', (["['sb', 'int']"], {}), "(['sb', 'int'])\n", (5650, 5665), True, 'import mpspdz.tools as tools\n'), ((5759, 5784), 'mpspdz.tools.cycle', 'tools.cycle', (["['cb', 'cb']"], {}), "(['cb', 'cb'])\n", (5770, 5784), True, 'import mpspdz.tools as tools\n'), ((7522, 7555), 'mpspdz.tools.cycle', 'tools.cycle', (["['int', 'cbw', 'sb']"], {}), "(['int', 'cbw', 'sb'])\n", (7533, 7555), True, 'import mpspdz.tools as tools\n'), ((7694, 7733), 'mpspdz.tools.cycle', 'tools.cycle', (["['p', 'int', 'int', 'sbw']"], {}), "(['p', 'int', 'int', 'sbw'])\n", (7705, 7733), True, 'import mpspdz.tools as tools\n'), ((3492, 3515), 'itertools.repeat', 'itertools.repeat', (['"""sbw"""'], {}), "('sbw')\n", (3508, 3515), False, 'import itertools\n'), ((3649, 3671), 'itertools.repeat', 'itertools.repeat', (['"""sb"""'], {}), "('sb')\n", (3665, 3671), False, 'import itertools\n'), ((3804, 3827), 'itertools.repeat', 'itertools.repeat', (['"""cbw"""'], {}), "('cbw')\n", (3820, 3827), False, 'import itertools\n'), ((6225, 6245), 'mpspdz.tools.cycle', 'tools.cycle', (["['cbw']"], {}), "(['cbw'])\n", (6236, 6245), True, 'import mpspdz.tools as tools\n'), ((6752, 6772), 'mpspdz.tools.cycle', 'tools.cycle', (["['sbw']"], {}), "(['sbw'])\n", (6763, 6772), True, 'import mpspdz.tools as tools\n')] |
# -*- coding: utf-8 -*-
import os
from collections import ChainMap, defaultdict
from enum import Enum
from pathlib import Path
from dotenv import load_dotenv
from loguru import logger
from starlette.datastructures import CommaSeparatedStrings, Secret
APP_LOCATION = Path(__file__).parents[1]
class ENVMAP(str, Enum):
env = ".env"
local = ".env.local"
development = ".env.dev"
production = ".env.prod"
settings = ChainMap(os.environ, defaultdict(lambda: None))
try:
load_dotenv(str(APP_LOCATION / ENVMAP.env.value))
env_ = ENVMAP[settings.get("ENV", "development").lower()]
load_dotenv(str(APP_LOCATION / env_.value))
settings = ChainMap(os.environ, defaultdict(lambda: None))
except FileNotFoundError:
logger.warning("No environment file could be found. Skipping...")
class Config(object):
ENV = settings.get("ENV", "development")
SECRET_KEY = Secret(settings.get("SECRET_KEY", "its_a_secret_to_everybody"))
PROJECT_NAME = settings.get("PROJECT_NAME", "Pycloud")
ALLOWED_HOSTS = CommaSeparatedStrings(settings.get("ALLOWED_HOSTS", "*"))
API_V1_STR = "/api"
JWT_TOKEN_PREFIX = "Token"
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 60 * 24 * 7 # one week
AUTH0_DOMAIN = settings.get("AUTH0_DOMAIN")
AUTH0_API_AUDIENCE = settings.get("AUTH0_API_AUDIENCE")
AUTH0_CLIENT_ID = settings.get("AUTH0_CLIENT_ID")
AUTH0_CLIENT_SECRET = settings.get("AUTH0_CLIENT_SECRET")
AUTH0_CLIENT_SECRETS_JSON = settings.get("AUTH0_CLIENT_SECRETS_JSON")
AUTH0_SCOPE = settings.get("AUTH0_SCOPE", "openid profile email read:docs")
SWAP_TOKEN_ENDPOINT = "/swap_token"
SUCCESS_ROUTE = "/users/me"
ERROR_ROUTE = "/login_error"
AWS_ACCESS_KEY_ID = settings["AWS_ACCESS_KEY_ID"]
AWS_SECRET_ACCESS_KEY = settings["AWS_SECRET_ACCESS_KEY"]
AWS_S3_BUCKET_NAME = settings["AWS_S3_BUCKET_NAME"]
MONGO_URI = settings["MONGO_URI"] # deploying without docker-compose
MAX_CONNECTIONS_COUNT = int(settings.get("MAX_CONNECTIONS_COUNT", 10))
MIN_CONNECTIONS_COUNT = int(settings.get("MIN_CONNECTIONS_COUNT", 10))
SENTRY_DSN = settings["SENTRY_DSN"]
def get(self, item, default=None):
return getattr(self, item, default)
| [
"loguru.logger.warning",
"collections.defaultdict",
"pathlib.Path"
] | [((455, 481), 'collections.defaultdict', 'defaultdict', (['(lambda : None)'], {}), '(lambda : None)\n', (466, 481), False, 'from collections import ChainMap, defaultdict\n'), ((268, 282), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (272, 282), False, 'from pathlib import Path\n'), ((689, 715), 'collections.defaultdict', 'defaultdict', (['(lambda : None)'], {}), '(lambda : None)\n', (700, 715), False, 'from collections import ChainMap, defaultdict\n'), ((747, 812), 'loguru.logger.warning', 'logger.warning', (['"""No environment file could be found. Skipping..."""'], {}), "('No environment file could be found. Skipping...')\n", (761, 812), False, 'from loguru import logger\n')] |
# coding=utf-8
"""
@author: xing
@contact: <EMAIL>
@file: config.py
@date: 2021/3/5 12:50
@desc:
"""
import json
import pathlib
import argparse
def init_config(config_path: str = "/root/data/config.json"):
if not pathlib.Path(config_path).is_file():
raise Exception(f"Configuration file not exist: {pathlib.Path(config_path).absolute().resolve()}")
with open(config_path, "r") as f:
return json.load(f)
class Hparams_Evaluate:
parser = argparse.ArgumentParser()
parser.add_argument('--testsuite', type=str, default=r"/root/data/mutation_result", help='path to the testsuite')
parser.add_argument('--log_save_dir', type=str, default=r"/root/data/mutation_result/log", help='path to the testsuite')
parser.add_argument('--clear_classifier', default=False, type=bool, help='empty the classifier database')
class Hparams_Coverage:
parser = argparse.ArgumentParser()
parser.add_argument('--fuzzer', type=str, default="comfort", help="select a fuzzer from [comfort, die, fuzzilli, montage, deepsmith, codealchemist]")
parser.add_argument('--reporter_dir', type=str, default="/root/data/codeCoverage/coverageReporters", help="Saved path of coverage results")
parser.add_argument('--coverage_files', type=str, default=r"", help='path to the coverageFiles')
class Hparams_Reduce:
parser = argparse.ArgumentParser()
parser.add_argument('--file_path', type=str, default=r"/root/data/testcases/testcase.js", help='path to the reduce file')
parser.add_argument('--file_dir', type=str, default=r"/root/data/interesting_testcases", help='directory to the reduce file')
| [
"json.load",
"argparse.ArgumentParser",
"pathlib.Path"
] | [((471, 496), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (494, 496), False, 'import argparse\n'), ((892, 917), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (915, 917), False, 'import argparse\n'), ((1358, 1383), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1381, 1383), False, 'import argparse\n'), ((419, 431), 'json.load', 'json.load', (['f'], {}), '(f)\n', (428, 431), False, 'import json\n'), ((222, 247), 'pathlib.Path', 'pathlib.Path', (['config_path'], {}), '(config_path)\n', (234, 247), False, 'import pathlib\n'), ((316, 341), 'pathlib.Path', 'pathlib.Path', (['config_path'], {}), '(config_path)\n', (328, 341), False, 'import pathlib\n')] |
#!/usr/bin/env python
from typing import List, Tuple
from bisect import bisect_left
"""
Code for https://leetcode.com/problems/search-in-rotated-sorted-array/
"""
def search(nums: List[int], target: int) -> int:
pivot_idx = find_pivot(nums, 0, len(nums) - 1)
if pivot_idx == -1:
return binary_search(nums, target, 0, len(nums) - 1)
if nums[pivot_idx] == target:
return pivot_idx
if nums[0] <= target:
return binary_search(nums, target, 0, pivot_idx - 1)
return binary_search(nums, target, pivot_idx + 1, len(nums) - 1)
def binary_search(a: List[int], x: int, lo: int, hi: int) -> int:
idx = bisect_left(a, x, lo, hi)
return idx if idx != len(a) and a[idx] == x else -1
def find_pivot(nums: List[int], lo: int, hi: int) -> int:
"""
Find index of pivot element if nums is indeed rotated, else return -1
"""
# Base cases to prevent endless recursion
if lo > hi:
return -1
if lo == hi:
return lo
mid = (lo + hi) // 2
if mid < hi and nums[mid] > nums[mid + 1]:
return mid
if mid > lo and nums[mid] < nums[mid - 1]:
return mid - 1
if nums[lo] >= nums[mid]:
return find_pivot(nums, lo, mid - 1)
return find_pivot(nums, mid + 1, hi)
def main():
xs = [3, 1]
xs2 = [4, 5, 6, 7, 0, 1, 2]
xs3 = [6, 7, 1, 2, 3, 4, 5]
result = search(xs, 3)
result2 = search(xs2, 0)
result3 = search(xs3, 6)
print(result)
print(result2)
print(result3)
if __name__ == "__main__":
main()
| [
"bisect.bisect_left"
] | [((646, 671), 'bisect.bisect_left', 'bisect_left', (['a', 'x', 'lo', 'hi'], {}), '(a, x, lo, hi)\n', (657, 671), False, 'from bisect import bisect_left\n')] |
from django.utils.translation import ugettext_lazy as _
from wagtail.core.fields import StreamField
import graphene_django
from wagtail_graphql.types.streamfields import StreamFieldSerializer
def create_model_type(model, fields, meta_attrs=None):
"""
Create a generic GraphQL type for a Django model.
:param model: Django model.
:param fields: A list of :class:`wagtail_graphql.models.GraphQLField`
instances to be used on the type.
:param meta_attrs: Additional meta attributes to be passed to the new
GraphQL object type.
"""
attrs = {}
new_meta_attrs = {
'model': model,
'only_fields': (
tuple(field.name for field in fields) or (model._meta.pk.name, )
),
'description': (
_('Auto-generated GraphQL type for the "%s" model of app "%s".') %
(model.__name__, model._meta.app_label)
),
}
if meta_attrs is not None:
new_meta_attrs.update(meta_attrs)
# Set custom field types and resolve functions
for field in fields:
if field.graphql_type is not None:
attrs[field.name] = field.graphql_type
if field.resolve_func is not None:
attrs[f'resolve_{field.name}'] = field.resolve_func
else:
# Set a custom resolve function for stream fields
if isinstance(model._meta.get_field(field.name), StreamField):
def resolve_stream_field(name):
def inner(self, info, **kwargs):
init_kwargs = {
'request': info.context,
'absolute_urls': kwargs.get('absolute', True),
}
serializer = StreamFieldSerializer(**init_kwargs)
return serializer.serialize(getattr(self, name))
return inner
attrs[f'resolve_{field.name}'] = resolve_stream_field(
field.name
)
meta = type('Meta', tuple(), new_meta_attrs)
attrs['Meta'] = meta
return type(
f'{model._meta.app_label.capitalize()}{model.__name__}ObjectType',
(graphene_django.DjangoObjectType, ), attrs
)
| [
"django.utils.translation.ugettext_lazy",
"wagtail_graphql.types.streamfields.StreamFieldSerializer"
] | [((813, 877), 'django.utils.translation.ugettext_lazy', '_', (['"""Auto-generated GraphQL type for the "%s" model of app "%s"."""'], {}), '(\'Auto-generated GraphQL type for the "%s" model of app "%s".\')\n', (814, 877), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1785, 1821), 'wagtail_graphql.types.streamfields.StreamFieldSerializer', 'StreamFieldSerializer', ([], {}), '(**init_kwargs)\n', (1806, 1821), False, 'from wagtail_graphql.types.streamfields import StreamFieldSerializer\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.