code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import json
class TrainingSpecification:
template = """
{
"TrainingSpecification": {
"TrainingImage": "IMAGE_REPLACE_ME",
"SupportedHyperParameters": [
{
"Description": "Grow a tree with max_leaf_nodes in best-first fashion. Best nodes are defined as relative reduction in impurity. If None then unlimited number of leaf nodes",
"Name": "max_leaf_nodes",
"Type": "Integer",
"Range": {
"IntegerParameterRangeSpecification": {
"MinValue": "1",
"MaxValue": "100000"
}
},
"IsTunable": true,
"IsRequired": false,
"DefaultValue": "100"
}
],
"SupportedTrainingInstanceTypes": INSTANCES_REPLACE_ME,
"SupportsDistributedTraining": false,
"MetricDefinitions": METRICS_REPLACE_ME,
"TrainingChannels": CHANNELS_REPLACE_ME,
"SupportedTuningJobObjectiveMetrics": TUNING_OBJECTIVES_REPLACE_ME
}
}
"""
def get_training_specification_dict(
self,
ecr_image,
supports_gpu,
supported_channels=None,
supported_metrics=None,
supported_tuning_job_objective_metrics=None,
):
return json.loads(
self.get_training_specification_json(
ecr_image,
supports_gpu,
supported_channels,
supported_metrics,
supported_tuning_job_objective_metrics,
)
)
def get_training_specification_json(
self,
ecr_image,
supports_gpu,
supported_channels=None,
supported_metrics=None,
supported_tuning_job_objective_metrics=None,
):
if supported_channels is None:
print("Please provide at least one supported channel")
raise ValueError("Please provide at least one supported channel")
if supported_metrics is None:
supported_metrics = []
if supported_tuning_job_objective_metrics is None:
supported_tuning_job_objective_metrics = []
return (
self.template.replace("IMAGE_REPLACE_ME", ecr_image)
.replace("INSTANCES_REPLACE_ME", self.get_supported_instances(supports_gpu))
.replace(
"CHANNELS_REPLACE_ME",
json.dumps([ob.__dict__ for ob in supported_channels], indent=4, sort_keys=True),
)
.replace(
"METRICS_REPLACE_ME",
json.dumps([ob.__dict__ for ob in supported_metrics], indent=4, sort_keys=True),
)
.replace(
"TUNING_OBJECTIVES_REPLACE_ME",
json.dumps(
[ob.__dict__ for ob in supported_tuning_job_objective_metrics],
indent=4,
sort_keys=True,
),
)
)
@staticmethod
def get_supported_instances(supports_gpu):
cpu_list = [
"ml.m4.xlarge",
"ml.m4.2xlarge",
"ml.m4.4xlarge",
"ml.m4.10xlarge",
"ml.m4.16xlarge",
"ml.m5.large",
"ml.m5.xlarge",
"ml.m5.2xlarge",
"ml.m5.4xlarge",
"ml.m5.12xlarge",
"ml.m5.24xlarge",
"ml.c4.xlarge",
"ml.c4.2xlarge",
"ml.c4.4xlarge",
"ml.c4.8xlarge",
"ml.c5.xlarge",
"ml.c5.2xlarge",
"ml.c5.4xlarge",
"ml.c5.9xlarge",
"ml.c5.18xlarge",
]
gpu_list = [
"ml.p2.xlarge",
"ml.p2.8xlarge",
"ml.p2.16xlarge",
"ml.p3.2xlarge",
"ml.p3.8xlarge",
"ml.p3.16xlarge",
]
list_to_return = cpu_list
if supports_gpu:
list_to_return = cpu_list + gpu_list
return json.dumps(list_to_return)
|
[
"json.dumps"
] |
[((3938, 3964), 'json.dumps', 'json.dumps', (['list_to_return'], {}), '(list_to_return)\n', (3948, 3964), False, 'import json\n'), ((2725, 2829), 'json.dumps', 'json.dumps', (['[ob.__dict__ for ob in supported_tuning_job_objective_metrics]'], {'indent': '(4)', 'sort_keys': '(True)'}), '([ob.__dict__ for ob in supported_tuning_job_objective_metrics],\n indent=4, sort_keys=True)\n', (2735, 2829), False, 'import json\n'), ((2544, 2623), 'json.dumps', 'json.dumps', (['[ob.__dict__ for ob in supported_metrics]'], {'indent': '(4)', 'sort_keys': '(True)'}), '([ob.__dict__ for ob in supported_metrics], indent=4, sort_keys=True)\n', (2554, 2623), False, 'import json\n'), ((2372, 2457), 'json.dumps', 'json.dumps', (['[ob.__dict__ for ob in supported_channels]'], {'indent': '(4)', 'sort_keys': '(True)'}), '([ob.__dict__ for ob in supported_channels], indent=4, sort_keys=True\n )\n', (2382, 2457), False, 'import json\n')]
|
import sys
import tkinter
from .customtkinter_tk import CTk
from .customtkinter_frame import CTkFrame
from .appearance_mode_tracker import AppearanceModeTracker
from .customtkinter_color_manager import CTkColorManager
class CTkProgressBar(tkinter.Frame):
""" tkinter custom progressbar, always horizontal, values are from 0 to 1 """
def __init__(self, *args,
variable=None,
bg_color=None,
border_color="CTkColorManager",
fg_color="CTkColorManager",
progress_color="CTkColorManager",
width=160,
height=10,
border_width=0,
**kwargs):
super().__init__(*args, **kwargs)
# overwrite configure methods of master when master is tkinter widget, so that bg changes get applied on child CTk widget too
if isinstance(self.master, (tkinter.Tk, tkinter.Frame)) and not isinstance(self.master, (CTk, CTkFrame)):
master_old_configure = self.master.config
def new_configure(*args, **kwargs):
if "bg" in kwargs:
self.configure(bg_color=kwargs["bg"])
elif "background" in kwargs:
self.configure(bg_color=kwargs["background"])
# args[0] is dict when attribute gets changed by widget[<attribut>] syntax
elif len(args) > 0 and type(args[0]) == dict:
if "bg" in args[0]:
self.configure(bg_color=args[0]["bg"])
elif "background" in args[0]:
self.configure(bg_color=args[0]["background"])
master_old_configure(*args, **kwargs)
self.master.config = new_configure
self.master.configure = new_configure
AppearanceModeTracker.add(self.change_appearance_mode, self)
self.appearance_mode = AppearanceModeTracker.get_mode() # 0: "Light" 1: "Dark"
self.bg_color = self.detect_color_of_master() if bg_color is None else bg_color
self.border_color = CTkColorManager.PROGRESS_BG if border_color == "CTkColorManager" else border_color
self.fg_color = CTkColorManager.PROGRESS_BG if fg_color == "CTkColorManager" else fg_color
self.progress_color = CTkColorManager.MAIN if progress_color == "CTkColorManager" else progress_color
self.variable = variable
self.variable_callback_blocked = False
self.variabel_callback_name = None
self.width = width
self.height = self.calc_optimal_height(height)
self.border_width = round(border_width)
self.value = 0.5
self.configure(width=self.width, height=self.height)
self.canvas = tkinter.Canvas(master=self,
highlightthicknes=0,
width=self.width,
height=self.height)
self.canvas.place(x=0, y=0)
# Each time an item is resized due to pack position mode, the binding Configure is called on the widget
self.bind('<Configure>', self.update_dimensions)
self.draw() # initial draw
if self.variable is not None:
self.variabel_callback_name = self.variable.trace_add("write", self.variable_callback)
self.variable_callback_blocked = True
self.set(self.variable.get(), from_variable_callback=True)
self.variable_callback_blocked = False
def destroy(self):
AppearanceModeTracker.remove(self.change_appearance_mode)
if self.variable is not None:
self.variable.trace_remove("write", self.variabel_callback_name)
super().destroy()
def detect_color_of_master(self):
if isinstance(self.master, CTkFrame):
return self.master.fg_color
else:
return self.master.cget("bg")
@staticmethod
def calc_optimal_height(user_height):
if sys.platform == "darwin":
return user_height # on macOS just use given value (canvas has Antialiasing)
else:
# make sure the value is always with uneven for better rendering of the ovals
if user_height == 0:
return 0
elif user_height % 2 == 0:
return user_height + 1
else:
return user_height
def update_dimensions(self, event):
# only redraw if dimensions changed (for performance)
if self.width != event.width or self.height != event.height:
self.width = event.width
self.height = event.height
self.draw()
def draw(self, no_color_updates=False):
# decide the drawing method
if sys.platform == "darwin":
# on macOS draw button with polygons (positions are more accurate, macOS has Antialiasing)
self.draw_with_polygon_shapes()
else:
# on Windows and other draw with ovals (corner_radius can be optimised to look better than with polygons)
self.draw_with_ovals_and_rects()
if no_color_updates is False:
self.canvas.configure(bg=CTkColorManager.single_color(self.bg_color, self.appearance_mode))
self.canvas.itemconfig("border_parts", fill=CTkColorManager.single_color(self.border_color, self.appearance_mode))
self.canvas.itemconfig("inner_parts", fill=CTkColorManager.single_color(self.fg_color, self.appearance_mode))
self.canvas.itemconfig("progress_parts", fill=CTkColorManager.single_color(self.progress_color, self.appearance_mode))
def draw_with_polygon_shapes(self):
""" draw the progress bar parts with just three polygons that have a rounded border """
coordinate_shift = -1
width_reduced = -1
# create border button parts (only if border exists)
if self.border_width > 0:
if not self.canvas.find_withtag("border_parts"):
self.canvas.create_line((0, 0, 0, 0), tags=("border_line_1", "border_parts"))
self.canvas.coords("border_line_1",
(self.height / 2,
self.height / 2,
self.width - self.height / 2 + coordinate_shift,
self.height / 2))
self.canvas.itemconfig("border_line_1",
capstyle=tkinter.ROUND,
width=self.height + width_reduced)
self.canvas.lower("border_parts")
# create inner button parts
if not self.canvas.find_withtag("inner_parts"):
self.canvas.create_line((0, 0, 0, 0), tags=("inner_line_1", "inner_parts"))
self.canvas.coords("inner_line_1",
(self.height / 2,
self.height / 2,
self.width - self.height / 2 + coordinate_shift,
self.height / 2))
self.canvas.itemconfig("inner_line_1",
capstyle=tkinter.ROUND,
width=self.height - self.border_width * 2 + width_reduced)
# progress parts
if not self.canvas.find_withtag("progress_parts"):
self.canvas.create_line((0, 0, 0, 0), tags=("progress_line_1", "progress_parts"))
self.canvas.coords("progress_line_1",
(self.height / 2,
self.height / 2,
self.height / 2 + (self.width + coordinate_shift - self.height) * self.value,
self.height / 2))
self.canvas.itemconfig("progress_line_1",
capstyle=tkinter.ROUND,
width=self.height - self.border_width * 2 + width_reduced)
def draw_with_ovals_and_rects(self):
""" draw the progress bar parts with ovals and rectangles """
if sys.platform == "darwin":
oval_bottom_right_shift = 0
rect_bottom_right_shift = 0
else:
# ovals and rects are always rendered too large on Windows and need to be made smaller by -1
oval_bottom_right_shift = -1
rect_bottom_right_shift = -0
# frame_border
if self.border_width > 0:
if not self.canvas.find_withtag("border_parts"):
self.canvas.create_oval((0, 0, 0, 0), tags=("border_oval_1", "border_parts"), width=0)
self.canvas.create_rectangle((0, 0, 0, 0), tags=("border_rect_1", "border_parts"), width=0)
self.canvas.create_oval((0, 0, 0, 0), tags=("border_oval_2", "border_parts"), width=0)
self.canvas.coords("border_oval_1", (0,
0,
self.height + oval_bottom_right_shift,
self.height + oval_bottom_right_shift))
self.canvas.coords("border_rect_1", (self.height/2,
0,
self.width-(self.height/2) + rect_bottom_right_shift,
self.height + rect_bottom_right_shift))
self.canvas.coords("border_oval_2", (self.width-self.height,
0,
self.width + oval_bottom_right_shift,
self.height + oval_bottom_right_shift))
# foreground
if not self.canvas.find_withtag("inner_parts"):
self.canvas.create_oval((0, 0, 0, 0), tags=("inner_oval_1", "inner_parts"), width=0)
self.canvas.create_rectangle((0, 0, 0, 0), tags=("inner_rect_1", "inner_parts"), width=0)
self.canvas.create_oval((0, 0, 0, 0), tags=("inner_oval_2", "inner_parts"), width=0)
self.canvas.coords("inner_oval_1", (self.border_width,
self.border_width,
self.height-self.border_width + oval_bottom_right_shift,
self.height-self.border_width + oval_bottom_right_shift))
self.canvas.coords("inner_rect_1", (self.height/2,
self.border_width,
self.width-(self.height/2 + rect_bottom_right_shift),
self.height-self.border_width + rect_bottom_right_shift))
self.canvas.coords("inner_oval_2", (self.width-self.height+self.border_width,
self.border_width,
self.width-self.border_width + oval_bottom_right_shift,
self.height-self.border_width + oval_bottom_right_shift))
# progress parts
if not self.canvas.find_withtag("progress_parts"):
self.canvas.create_oval((0, 0, 0, 0), tags=("progress_oval_1", "progress_parts"), width=0)
self.canvas.create_rectangle((0, 0, 0, 0), tags=("progress_rect_1", "progress_parts"), width=0)
self.canvas.create_oval((0, 0, 0, 0), tags=("progress_oval_2", "progress_parts"), width=0)
self.canvas.coords("progress_oval_1", (self.border_width,
self.border_width,
self.height - self.border_width + oval_bottom_right_shift,
self.height - self.border_width + oval_bottom_right_shift))
self.canvas.coords("progress_rect_1", (self.height / 2,
self.border_width,
self.height / 2 + (self.width - self.height) * self.value + rect_bottom_right_shift,
self.height - self.border_width + rect_bottom_right_shift))
self.canvas.coords("progress_oval_2",
(self.height / 2 + (self.width - self.height) * self.value - self.height / 2 + self.border_width,
self.border_width,
self.height / 2 + (self.width - self.height) * self.value + self.height / 2 - self.border_width + oval_bottom_right_shift,
self.height - self.border_width + oval_bottom_right_shift))
def configure(self, *args, **kwargs):
require_redraw = False # some attribute changes require a call of self.draw() at the end
if "bg_color" in kwargs:
self.bg_color = kwargs["bg_color"]
del kwargs["bg_color"]
require_redraw = True
if "fg_color" in kwargs:
self.fg_color = kwargs["fg_color"]
del kwargs["fg_color"]
require_redraw = True
if "border_color" in kwargs:
self.border_color = kwargs["border_color"]
del kwargs["border_color"]
require_redraw = True
if "progress_color" in kwargs:
self.progress_color = kwargs["progress_color"]
del kwargs["progress_color"]
require_redraw = True
if "border_width" in kwargs:
self.border_width = kwargs["border_width"]
del kwargs["border_width"]
require_redraw = True
if "variable" in kwargs:
if self.variable is not None:
self.variable.trace_remove("write", self.variabel_callback_name)
self.variable = kwargs["variable"]
if self.variable is not None and self.variable != "":
self.variabel_callback_name = self.variable.trace_add("write", self.variable_callback)
self.set(self.variable.get(), from_variable_callback=True)
else:
self.variable = None
del kwargs["variable"]
super().configure(*args, **kwargs)
if require_redraw is True:
self.draw()
def variable_callback(self, var_name, index, mode):
if not self.variable_callback_blocked:
self.set(self.variable.get(), from_variable_callback=True)
def set(self, value, from_variable_callback=False):
self.value = value
if self.value > 1:
self.value = 1
elif self.value < 0:
self.value = 0
self.draw(no_color_updates=True)
if self.variable is not None and not from_variable_callback:
self.variable_callback_blocked = True
self.variable.set(round(self.value) if isinstance(self.variable, tkinter.IntVar) else self.value)
self.variable_callback_blocked = False
def change_appearance_mode(self, mode_string):
if mode_string.lower() == "dark":
self.appearance_mode = 1
elif mode_string.lower() == "light":
self.appearance_mode = 0
if isinstance(self.master, CTkFrame):
self.bg_color = self.master.fg_color
else:
self.bg_color = self.master.cget("bg")
self.draw()
|
[
"tkinter.Canvas"
] |
[((2757, 2848), 'tkinter.Canvas', 'tkinter.Canvas', ([], {'master': 'self', 'highlightthicknes': '(0)', 'width': 'self.width', 'height': 'self.height'}), '(master=self, highlightthicknes=0, width=self.width, height=\n self.height)\n', (2771, 2848), False, 'import tkinter\n')]
|
import pytest
from backtest.strategy import BuyAndHoldEqualAllocation
@pytest.fixture
def strategy():
symbols = ('AAPL', 'GOOG')
strategy = BuyAndHoldEqualAllocation(relevant_symbols=symbols)
return strategy
def test_strategy_execute(strategy):
strategy.execute()
assert len(strategy.holdings) > 0
assert len(strategy.trades) > 0
def test_holdings_at(strategy):
strategy.execute()
assert (strategy._holdings_at('2018-05-05') =={})
assert (strategy._holdings_at('2021-05-06') == {'AAPL': 7466})
assert (strategy._holdings_at('2021-05-07') == {'AAPL': 3862, 'GOOG': 209})
assert (strategy._holdings_at('2021-05-08') == {'AAPL': 3862, 'GOOG': 209})
|
[
"backtest.strategy.BuyAndHoldEqualAllocation"
] |
[((150, 201), 'backtest.strategy.BuyAndHoldEqualAllocation', 'BuyAndHoldEqualAllocation', ([], {'relevant_symbols': 'symbols'}), '(relevant_symbols=symbols)\n', (175, 201), False, 'from backtest.strategy import BuyAndHoldEqualAllocation\n')]
|
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import onnx
from ..base import Base
from . import expect
class Constant(Base):
@staticmethod
def export(): # type: () -> None
values = np.random.randn(5, 5).astype(np.float32)
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['values'],
value=onnx.helper.make_tensor(
name='const_tensor',
data_type=onnx.TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float),
),
)
expect(node, inputs=[], outputs=[values],
name='test_constant')
|
[
"numpy.random.randn"
] |
[((364, 385), 'numpy.random.randn', 'np.random.randn', (['(5)', '(5)'], {}), '(5, 5)\n', (379, 385), True, 'import numpy as np\n')]
|
"""
Date: 2021/09/23
Target: config utilities for yml file.
implementation adapted from Slimmable: https://github.com/JiahuiYu/slimmable_networks.git
"""
import os
import yaml
class LoaderMeta(type):
"""
Constructor for supporting `!include`.
"""
def __new__(mcs, __name__, __bases__, __dict__):
"""Add include constructer to class."""
# register the include constructor on the class
cls = super().__new__(mcs, __name__, __bases__, __dict__)
cls.add_constructor('!include', cls.construct_include)
return cls
class Loader(yaml.Loader, metaclass=LoaderMeta):
"""
YAML Loader with `!include` constructor.
"""
def __init__(self, stream):
try:
self._root = os.path.split(stream.name)[0]
except AttributeError:
self._root = os.path.curdir
super().__init__(stream)
def construct_include(self, node):
"""
Include file referenced at node.
"""
filename = os.path.abspath(
os.path.join(self._root, self.construct_scalar(node)))
extension = os.path.splitext(filename)[1].lstrip('.')
with open(filename, 'r') as f:
if extension in ('yaml', 'yml'):
return yaml.load(f, Loader)
else:
return ''.join(f.readlines())
class AttrDict(dict):
"""
Dict as attribute trick.
"""
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, dict):
self.__dict__[key] = AttrDict(value)
elif isinstance(value, list):
if isinstance(value[0], dict):
self.__dict__[key] = [AttrDict(item) for item in value]
else:
self.__dict__[key] = value
def yaml(self):
"""
Convert object to yaml dict and return.
"""
yaml_dict = {}
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, AttrDict):
yaml_dict[key] = value.yaml()
elif isinstance(value, list):
if isinstance(value[0], AttrDict):
new_l = []
for item in value:
new_l.append(item.yaml())
yaml_dict[key] = new_l
else:
yaml_dict[key] = value
else:
yaml_dict[key] = value
return yaml_dict
def __repr__(self):
"""
Print all variables.
"""
ret_str = []
for key in self.__dict__:
value = self.__dict__[key]
if isinstance(value, AttrDict):
ret_str.append('{}:'.format(key))
child_ret_str = value.__repr__().split('\n')
for item in child_ret_str:
ret_str.append(' ' + item)
elif isinstance(value, list):
if isinstance(value[0], AttrDict):
ret_str.append('{}:'.format(key))
for item in value:
# treat as AttrDict above
child_ret_str = item.__repr__().split('\n')
for item in child_ret_str:
ret_str.append(' ' + item)
else:
ret_str.append('{}: {}'.format(key, value))
else:
ret_str.append('{}: {}'.format(key, value))
return '\n'.join(ret_str)
class Config(AttrDict):
def __init__(self, filename=None):
try:
with open(filename, 'r') as f:
cfg_dict = yaml.load(f, Loader)
except EnvironmentError:
print('Please check the file with name of "%s"', filename)
super(Config, self).__init__(cfg_dict)
def get_config(config_file):
assert os.path.exists(config_file), 'File {} not exist.'.format(config_file)
return Config(config_file)
|
[
"os.path.exists",
"os.path.splitext",
"yaml.load",
"os.path.split"
] |
[((4014, 4041), 'os.path.exists', 'os.path.exists', (['config_file'], {}), '(config_file)\n', (4028, 4041), False, 'import os\n'), ((749, 775), 'os.path.split', 'os.path.split', (['stream.name'], {}), '(stream.name)\n', (762, 775), False, 'import os\n'), ((1260, 1280), 'yaml.load', 'yaml.load', (['f', 'Loader'], {}), '(f, Loader)\n', (1269, 1280), False, 'import yaml\n'), ((3801, 3821), 'yaml.load', 'yaml.load', (['f', 'Loader'], {}), '(f, Loader)\n', (3810, 3821), False, 'import yaml\n'), ((1111, 1137), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1127, 1137), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
DRAC Management Driver
"""
from oslo.utils import excutils
from oslo.utils import importutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.drivers import base
from ironic.drivers.modules.drac import common as drac_common
from ironic.drivers.modules.drac import resource_uris
from ironic.openstack.common import log as logging
pywsman = importutils.try_import('pywsman')
LOG = logging.getLogger(__name__)
_BOOT_DEVICES_MAP = {
boot_devices.DISK: 'HardDisk',
boot_devices.PXE: 'NIC',
boot_devices.CDROM: 'Optical',
}
# IsNext constants
PERSISTENT = '1'
""" Is the next boot config the system will use. """
NOT_NEXT = '2'
""" Is not the next boot config the system will use. """
ONE_TIME_BOOT = '3'
""" Is the next boot config the system will use, one time boot only. """
def _get_next_boot_mode(node):
"""Get the next boot mode.
To see a list of supported boot modes see: http://goo.gl/aEsvUH
(Section 7.2)
:param node: an ironic node object.
:raises: DracClientError on an error from pywsman library.
:returns: a dictionary containing:
:instance_id: the instance id of the boot device.
:is_next: whether it's the next device to boot or not. One of
PERSISTENT, NOT_NEXT, ONE_TIME_BOOT constants.
"""
client = drac_common.get_wsman_client(node)
options = pywsman.ClientOptions()
filter_query = ('select * from DCIM_BootConfigSetting where IsNext=%s '
'or IsNext=%s' % (PERSISTENT, ONE_TIME_BOOT))
try:
doc = client.wsman_enumerate(resource_uris.DCIM_BootConfigSetting,
options, filter_query=filter_query)
except exception.DracClientError as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE('DRAC driver failed to get next boot mode for '
'node %(node_uuid)s. Reason: %(error)s.'),
{'node_uuid': node.uuid, 'error': exc})
items = drac_common.find_xml(doc, 'DCIM_BootConfigSetting',
resource_uris.DCIM_BootConfigSetting,
find_all=True)
# This list will have 2 items maximum, one for the persistent element
# and another one for the OneTime if set
boot_mode = None
for i in items:
instance_id = drac_common.find_xml(i, 'InstanceID',
resource_uris.DCIM_BootConfigSetting).text
is_next = drac_common.find_xml(i, 'IsNext',
resource_uris.DCIM_BootConfigSetting).text
boot_mode = {'instance_id': instance_id, 'is_next': is_next}
# If OneTime is set we should return it, because that's
# where the next boot device is
if is_next == ONE_TIME_BOOT:
break
return boot_mode
def _create_config_job(node):
"""Create a configuration job.
This method is used to apply the pending values created by
set_boot_device().
:param node: an ironic node object.
:raises: DracClientError on an error from pywsman library.
:raises: DracConfigJobCreationError on an error when creating the job.
"""
client = drac_common.get_wsman_client(node)
options = pywsman.ClientOptions()
options.add_selector('CreationClassName', 'DCIM_BIOSService')
options.add_selector('Name', 'DCIM:BIOSService')
options.add_selector('SystemCreationClassName', 'DCIM_ComputerSystem')
options.add_selector('SystemName', 'DCIM:ComputerSystem')
options.add_property('Target', 'BIOS.Setup.1-1')
options.add_property('ScheduledStartTime', 'TIME_NOW')
doc = client.wsman_invoke(resource_uris.DCIM_BIOSService,
options, 'CreateTargetedConfigJob')
return_value = drac_common.find_xml(doc, 'ReturnValue',
resource_uris.DCIM_BIOSService).text
# NOTE(lucasagomes): Possible return values are: RET_ERROR for error
# or RET_CREATED job created (but changes will be
# applied after the reboot)
# Boot Management Documentation: http://goo.gl/aEsvUH (Section 8.4)
if return_value == drac_common.RET_ERROR:
error_message = drac_common.find_xml(doc, 'Message',
resource_uris.DCIM_BIOSService).text
raise exception.DracConfigJobCreationError(error=error_message)
def _check_for_config_job(node):
"""Check if a configuration job is already created.
:param node: an ironic node object.
:raises: DracClientError on an error from pywsman library.
:raises: DracConfigJobCreationError if the job is already created.
"""
client = drac_common.get_wsman_client(node)
options = pywsman.ClientOptions()
try:
doc = client.wsman_enumerate(resource_uris.DCIM_LifecycleJob, options)
except exception.DracClientError as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE('DRAC driver failed to list the configuration jobs '
'for node %(node_uuid)s. Reason: %(error)s.'),
{'node_uuid': node.uuid, 'error': exc})
items = drac_common.find_xml(doc, 'DCIM_LifecycleJob',
resource_uris.DCIM_LifecycleJob,
find_all=True)
for i in items:
name = drac_common.find_xml(i, 'Name', resource_uris.DCIM_LifecycleJob)
if 'BIOS.Setup.1-1' not in name.text:
continue
job_status = drac_common.find_xml(i, 'JobStatus',
resource_uris.DCIM_LifecycleJob).text
# If job is already completed or failed we can
# create another one.
# Job Control Documentation: http://goo.gl/o1dDD3 (Section 7.2.3.2)
if job_status.lower() not in ('completed', 'failed'):
job_id = drac_common.find_xml(i, 'InstanceID',
resource_uris.DCIM_LifecycleJob).text
reason = (_('Another job with ID "%s" is already created '
'to configure the BIOS. Wait until existing job '
'is completed or is cancelled') % job_id)
raise exception.DracConfigJobCreationError(error=reason)
class DracManagement(base.ManagementInterface):
def get_properties(self):
return drac_common.COMMON_PROPERTIES
def validate(self, task):
"""Validate the driver-specific info supplied.
This method validates whether the 'driver_info' property of the
supplied node contains the required information for this driver to
manage the node.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if required driver_info attribute
is missing or invalid on the node.
"""
return drac_common.parse_driver_info(task.node)
def get_supported_boot_devices(self):
"""Get a list of the supported boot devices.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return list(_BOOT_DEVICES_MAP.keys())
def set_boot_device(self, task, device, persistent=False):
"""Set the boot device for a node.
Set the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
:raises: DracClientError on an error from pywsman library.
:raises: InvalidParameterValue if an invalid boot device is
specified.
:raises: DracConfigJobCreationError on an error when creating the job.
"""
# Check for an existing configuration job
_check_for_config_job(task.node)
client = drac_common.get_wsman_client(task.node)
options = pywsman.ClientOptions()
filter_query = ("select * from DCIM_BootSourceSetting where "
"InstanceID like '%%#%s%%'" %
_BOOT_DEVICES_MAP[device])
try:
doc = client.wsman_enumerate(resource_uris.DCIM_BootSourceSetting,
options, filter_query=filter_query)
except exception.DracClientError as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE('DRAC driver failed to set the boot device '
'for node %(node_uuid)s. Can\'t find the ID '
'for the %(device)s type. Reason: %(error)s.'),
{'node_uuid': task.node.uuid, 'error': exc,
'device': device})
instance_id = drac_common.find_xml(doc, 'InstanceID',
resource_uris.DCIM_BootSourceSetting).text
source = 'OneTime'
if persistent:
source = drac_common.find_xml(doc, 'BootSourceType',
resource_uris.DCIM_BootSourceSetting).text
# NOTE(lucasagomes): Don't ask me why 'BootSourceType' is set
# for 'InstanceID' and 'InstanceID' is set for 'source'! You
# know enterprisey...
options = pywsman.ClientOptions()
options.add_selector('InstanceID', source)
options.add_property('source', instance_id)
doc = client.wsman_invoke(resource_uris.DCIM_BootConfigSetting,
options, 'ChangeBootOrderByInstanceID')
return_value = drac_common.find_xml(doc, 'ReturnValue',
resource_uris.DCIM_BootConfigSetting).text
# NOTE(lucasagomes): Possible return values are: RET_ERROR for error,
# RET_SUCCESS for success or RET_CREATED job
# created (but changes will be applied after
# the reboot)
# Boot Management Documentation: http://goo.gl/aEsvUH (Section 8.7)
if return_value == drac_common.RET_ERROR:
error_message = drac_common.find_xml(doc, 'Message',
resource_uris.DCIM_BootConfigSetting).text
raise exception.DracOperationError(operation='set_boot_device',
error=error_message)
# Create a configuration job
_create_config_job(task.node)
def get_boot_device(self, task):
"""Get the current boot device for a node.
Returns the current boot device of the node.
:param task: a task from TaskManager.
:raises: DracClientError on an error from pywsman library.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` or None if it is unknown.
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
"""
client = drac_common.get_wsman_client(task.node)
boot_mode = _get_next_boot_mode(task.node)
persistent = boot_mode['is_next'] == PERSISTENT
instance_id = boot_mode['instance_id']
options = pywsman.ClientOptions()
filter_query = ('select * from DCIM_BootSourceSetting where '
'PendingAssignedSequence=0 and '
'BootSourceType="%s"' % instance_id)
try:
doc = client.wsman_enumerate(resource_uris.DCIM_BootSourceSetting,
options, filter_query=filter_query)
except exception.DracClientError as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE('DRAC driver failed to get the current boot '
'device for node %(node_uuid)s. '
'Reason: %(error)s.'),
{'node_uuid': task.node.uuid, 'error': exc})
instance_id = drac_common.find_xml(doc, 'InstanceID',
resource_uris.DCIM_BootSourceSetting).text
boot_device = next((key for (key, value) in _BOOT_DEVICES_MAP.items()
if value in instance_id), None)
return {'boot_device': boot_device, 'persistent': persistent}
def get_sensors_data(self, task):
"""Get sensors data.
:param task: a TaskManager instance.
:raises: FailedToGetSensorData when getting the sensor data fails.
:raises: FailedToParseSensorData when parsing sensor data fails.
:returns: returns a consistent format dict of sensor data grouped by
sensor type, which can be processed by Ceilometer.
"""
raise NotImplementedError()
|
[
"oslo.utils.excutils.save_and_reraise_exception",
"ironic.common.exception.DracOperationError",
"ironic.common.i18n._",
"ironic.drivers.modules.drac.common.parse_driver_info",
"ironic.drivers.modules.drac.common.get_wsman_client",
"ironic.common.exception.DracConfigJobCreationError",
"ironic.common.i18n._LE",
"oslo.utils.importutils.try_import",
"ironic.drivers.modules.drac.common.find_xml",
"ironic.openstack.common.log.getLogger"
] |
[((1108, 1141), 'oslo.utils.importutils.try_import', 'importutils.try_import', (['"""pywsman"""'], {}), "('pywsman')\n", (1130, 1141), False, 'from oslo.utils import importutils\n'), ((1149, 1176), 'ironic.openstack.common.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1166, 1176), True, 'from ironic.openstack.common import log as logging\n'), ((2069, 2103), 'ironic.drivers.modules.drac.common.get_wsman_client', 'drac_common.get_wsman_client', (['node'], {}), '(node)\n', (2097, 2103), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((2757, 2866), 'ironic.drivers.modules.drac.common.find_xml', 'drac_common.find_xml', (['doc', '"""DCIM_BootConfigSetting"""', 'resource_uris.DCIM_BootConfigSetting'], {'find_all': '(True)'}), "(doc, 'DCIM_BootConfigSetting', resource_uris.\n DCIM_BootConfigSetting, find_all=True)\n", (2777, 2866), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((3967, 4001), 'ironic.drivers.modules.drac.common.get_wsman_client', 'drac_common.get_wsman_client', (['node'], {}), '(node)\n', (3995, 4001), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((5489, 5523), 'ironic.drivers.modules.drac.common.get_wsman_client', 'drac_common.get_wsman_client', (['node'], {}), '(node)\n', (5517, 5523), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((5974, 6073), 'ironic.drivers.modules.drac.common.find_xml', 'drac_common.find_xml', (['doc', '"""DCIM_LifecycleJob"""', 'resource_uris.DCIM_LifecycleJob'], {'find_all': '(True)'}), "(doc, 'DCIM_LifecycleJob', resource_uris.\n DCIM_LifecycleJob, find_all=True)\n", (5994, 6073), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((4555, 4627), 'ironic.drivers.modules.drac.common.find_xml', 'drac_common.find_xml', (['doc', '"""ReturnValue"""', 'resource_uris.DCIM_BIOSService'], {}), "(doc, 'ReturnValue', resource_uris.DCIM_BIOSService)\n", (4575, 4627), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((5143, 5200), 'ironic.common.exception.DracConfigJobCreationError', 'exception.DracConfigJobCreationError', ([], {'error': 'error_message'}), '(error=error_message)\n', (5179, 5200), False, 'from ironic.common import exception\n'), ((6170, 6234), 'ironic.drivers.modules.drac.common.find_xml', 'drac_common.find_xml', (['i', '"""Name"""', 'resource_uris.DCIM_LifecycleJob'], {}), "(i, 'Name', resource_uris.DCIM_LifecycleJob)\n", (6190, 6234), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((7689, 7729), 'ironic.drivers.modules.drac.common.parse_driver_info', 'drac_common.parse_driver_info', (['task.node'], {}), '(task.node)\n', (7718, 7729), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((8878, 8917), 'ironic.drivers.modules.drac.common.get_wsman_client', 'drac_common.get_wsman_client', (['task.node'], {}), '(task.node)\n', (8906, 8917), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((12041, 12080), 'ironic.drivers.modules.drac.common.get_wsman_client', 'drac_common.get_wsman_client', (['task.node'], {}), '(task.node)\n', (12069, 12080), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((3111, 3186), 'ironic.drivers.modules.drac.common.find_xml', 'drac_common.find_xml', (['i', '"""InstanceID"""', 'resource_uris.DCIM_BootConfigSetting'], {}), "(i, 'InstanceID', resource_uris.DCIM_BootConfigSetting)\n", (3131, 3186), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((3247, 3318), 'ironic.drivers.modules.drac.common.find_xml', 'drac_common.find_xml', (['i', '"""IsNext"""', 'resource_uris.DCIM_BootConfigSetting'], {}), "(i, 'IsNext', resource_uris.DCIM_BootConfigSetting)\n", (3267, 3318), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((5012, 5080), 'ironic.drivers.modules.drac.common.find_xml', 'drac_common.find_xml', (['doc', '"""Message"""', 'resource_uris.DCIM_BIOSService'], {}), "(doc, 'Message', resource_uris.DCIM_BIOSService)\n", (5032, 5080), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((6324, 6393), 'ironic.drivers.modules.drac.common.find_xml', 'drac_common.find_xml', (['i', '"""JobStatus"""', 'resource_uris.DCIM_LifecycleJob'], {}), "(i, 'JobStatus', resource_uris.DCIM_LifecycleJob)\n", (6344, 6393), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((7024, 7074), 'ironic.common.exception.DracConfigJobCreationError', 'exception.DracConfigJobCreationError', ([], {'error': 'reason'}), '(error=reason)\n', (7060, 7074), False, 'from ironic.common import exception\n'), ((9778, 9855), 'ironic.drivers.modules.drac.common.find_xml', 'drac_common.find_xml', (['doc', '"""InstanceID"""', 'resource_uris.DCIM_BootSourceSetting'], {}), "(doc, 'InstanceID', resource_uris.DCIM_BootSourceSetting)\n", (9798, 9855), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((10581, 10659), 'ironic.drivers.modules.drac.common.find_xml', 'drac_common.find_xml', (['doc', '"""ReturnValue"""', 'resource_uris.DCIM_BootConfigSetting'], {}), "(doc, 'ReturnValue', resource_uris.DCIM_BootConfigSetting)\n", (10601, 10659), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((11254, 11332), 'ironic.common.exception.DracOperationError', 'exception.DracOperationError', ([], {'operation': '"""set_boot_device"""', 'error': 'error_message'}), "(operation='set_boot_device', error=error_message)\n", (11282, 11332), False, 'from ironic.common import exception\n'), ((13028, 13105), 'ironic.drivers.modules.drac.common.find_xml', 'drac_common.find_xml', (['doc', '"""InstanceID"""', 'resource_uris.DCIM_BootSourceSetting'], {}), "(doc, 'InstanceID', resource_uris.DCIM_BootSourceSetting)\n", (13048, 13105), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((2500, 2537), 'oslo.utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (2535, 2537), False, 'from oslo.utils import excutils\n'), ((5708, 5745), 'oslo.utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (5743, 5745), False, 'from oslo.utils import excutils\n'), ((6681, 6751), 'ironic.drivers.modules.drac.common.find_xml', 'drac_common.find_xml', (['i', '"""InstanceID"""', 'resource_uris.DCIM_LifecycleJob'], {}), "(i, 'InstanceID', resource_uris.DCIM_LifecycleJob)\n", (6701, 6751), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((6817, 6946), 'ironic.common.i18n._', '_', (['"""Another job with ID "%s" is already created to configure the BIOS. Wait until existing job is completed or is cancelled"""'], {}), '(\'Another job with ID "%s" is already created to configure the BIOS. Wait until existing job is completed or is cancelled\'\n )\n', (6818, 6946), False, 'from ironic.common.i18n import _\n'), ((9970, 10056), 'ironic.drivers.modules.drac.common.find_xml', 'drac_common.find_xml', (['doc', '"""BootSourceType"""', 'resource_uris.DCIM_BootSourceSetting'], {}), "(doc, 'BootSourceType', resource_uris.\n DCIM_BootSourceSetting)\n", (9990, 10056), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((11119, 11193), 'ironic.drivers.modules.drac.common.find_xml', 'drac_common.find_xml', (['doc', '"""Message"""', 'resource_uris.DCIM_BootConfigSetting'], {}), "(doc, 'Message', resource_uris.DCIM_BootConfigSetting)\n", (11139, 11193), True, 'from ironic.drivers.modules.drac import common as drac_common\n'), ((2561, 2656), 'ironic.common.i18n._LE', '_LE', (['"""DRAC driver failed to get next boot mode for node %(node_uuid)s. Reason: %(error)s."""'], {}), "('DRAC driver failed to get next boot mode for node %(node_uuid)s. Reason: %(error)s.'\n )\n", (2564, 2656), False, 'from ironic.common.i18n import _LE\n'), ((5769, 5873), 'ironic.common.i18n._LE', '_LE', (['"""DRAC driver failed to list the configuration jobs for node %(node_uuid)s. Reason: %(error)s."""'], {}), "('DRAC driver failed to list the configuration jobs for node %(node_uuid)s. Reason: %(error)s.'\n )\n", (5772, 5873), False, 'from ironic.common.i18n import _LE\n'), ((9371, 9408), 'oslo.utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (9406, 9408), False, 'from oslo.utils import excutils\n'), ((12702, 12739), 'oslo.utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (12737, 12739), False, 'from oslo.utils import excutils\n'), ((9436, 9575), 'ironic.common.i18n._LE', '_LE', (['"""DRAC driver failed to set the boot device for node %(node_uuid)s. Can\'t find the ID for the %(device)s type. Reason: %(error)s."""'], {}), '("DRAC driver failed to set the boot device for node %(node_uuid)s. Can\'t find the ID for the %(device)s type. Reason: %(error)s."\n )\n', (9439, 9575), False, 'from ironic.common.i18n import _LE\n'), ((12767, 12871), 'ironic.common.i18n._LE', '_LE', (['"""DRAC driver failed to get the current boot device for node %(node_uuid)s. Reason: %(error)s."""'], {}), "('DRAC driver failed to get the current boot device for node %(node_uuid)s. Reason: %(error)s.'\n )\n", (12770, 12871), False, 'from ironic.common.i18n import _LE\n')]
|
import numpy as np
import matplotlib.pyplot as plt
#grid number on half space (without the origin)
N=150
#total grid number = 2*N + 1 (with origin)
N_g=2*N+1
#finite barrier potential value = 300 (meV)
potential_value=300
#building potential:
def potential(potential_value):
V=np.zeros((1,N_g),dtype=float)
V[0,0:100]=potential_value
V[0,100:201]=0
V[0,201:]=potential_value
return V
# #Hamiltonian matrix:
def Hamiltonian(V):
H=np.zeros((N_g,N_g),dtype=float)
dx=10 #0.1 (nanometer)
for i in range(0,N_g):
for j in range(0,N_g):
if i==j:
x=dx*(i-N) #position
H[i,j]=1/(dx**2)+V[0,i]
elif j==i-1 or j==i+1:
H[i,j]=-0.5/(dx**2)
return H
V=potential(potential_value)
H=Hamiltonian(V)
#sort the eigenvalue and get the corresponding eigenvector
eigenvalue,eigenvector=np.linalg.eig(H)
idx=np.argsort(eigenvalue)
eigenvalue=eigenvalue[idx]
eigenvector=eigenvector[:,idx]
#visualize
fig=plt.figure(figsize=(18,6))
ax1=fig.add_subplot(131)
x=np.linspace(0,10,11)
ax1.plot(x,eigenvalue[0:11],'r.',label='numerical')
ax1.set_xlabel('n')
ax1.set_ylabel('$E_n (meV)$')
ax1.set_title('eigen energies')
ax1.grid(True)
ax1.legend()
ax2=fig.add_subplot(132)
x=np.linspace(-5,5,301)
#x/lamda_0
x=x/(np.sqrt(2)*10**(10-9)/np.pi)
y1=eigenvector[:,0]
y2=eigenvector[:,1]
y3=eigenvector[:,2]
y4=eigenvector[:,3]
y5=eigenvector[:,4]
ax2.plot(x,(y1),label='$Ψ_{n=0}(x)$')
ax2.plot(x,(y2),label='$Ψ_{n=1}(x)$')
ax2.plot(x,(y3),label='$Ψ_{n=2}(x)$')
ax2.set_xlabel('position ($x/λ_0$) ')
ax2.set_ylabel('wavefunction')
ax2.set_title('wave function in different eigen state')
ax2.legend()
ax2.grid(True)
ax3=fig.add_subplot(133)
ax3.plot(x,(y1**2),label='$Ψ^2_{n=0}(x)$')
ax3.plot(x,(y2**2),label='$Ψ^2_{n=1}(x)$')
ax3.plot(x,(y3**2),label='$Ψ^2_{n=2}(x)$')
ax3.set_xlabel('position ($x/λ_0$) ')
ax3.set_ylabel('square wavefunction')
ax3.set_title('probability distribution in finite barrier well')
ax3.grid(True)
ax3.legend()
plt.show()
|
[
"numpy.sqrt",
"numpy.linalg.eig",
"numpy.argsort",
"matplotlib.pyplot.figure",
"numpy.linspace",
"numpy.zeros",
"matplotlib.pyplot.show"
] |
[((886, 902), 'numpy.linalg.eig', 'np.linalg.eig', (['H'], {}), '(H)\n', (899, 902), True, 'import numpy as np\n'), ((907, 929), 'numpy.argsort', 'np.argsort', (['eigenvalue'], {}), '(eigenvalue)\n', (917, 929), True, 'import numpy as np\n'), ((1004, 1031), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 6)'}), '(figsize=(18, 6))\n', (1014, 1031), True, 'import matplotlib.pyplot as plt\n'), ((1058, 1080), 'numpy.linspace', 'np.linspace', (['(0)', '(10)', '(11)'], {}), '(0, 10, 11)\n', (1069, 1080), True, 'import numpy as np\n'), ((1269, 1292), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(301)'], {}), '(-5, 5, 301)\n', (1280, 1292), True, 'import numpy as np\n'), ((2027, 2037), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2035, 2037), True, 'import matplotlib.pyplot as plt\n'), ((283, 314), 'numpy.zeros', 'np.zeros', (['(1, N_g)'], {'dtype': 'float'}), '((1, N_g), dtype=float)\n', (291, 314), True, 'import numpy as np\n'), ((456, 489), 'numpy.zeros', 'np.zeros', (['(N_g, N_g)'], {'dtype': 'float'}), '((N_g, N_g), dtype=float)\n', (464, 489), True, 'import numpy as np\n'), ((1307, 1317), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1314, 1317), True, 'import numpy as np\n')]
|
import os
import click
import logging
from pathlib import Path
from dotenv import find_dotenv, load_dotenv
from keras.callbacks import ModelCheckpoint, EarlyStopping
import src.utils.utils as ut
import src.utils.model_utils as mu
import src.models.model as md
import src.models.data_generator as dg
import src.data.dataframe as dat
def train(classmode, modelmode, batch_size, epochs, learning_rate):
train = dat.read_df(os.path.join(ut.dirs.processed_dir, ut.df_names.train_df))
nclasses = mu.ref_n_classes(classmode)
valid = dat.read_df(os.path.join(ut.dirs.processed_dir, ut.df_names.valid_df))
traindata = dg.DataSequence(train,
ut.dirs.train_dir,
batch_size=batch_size,
classmode=classmode,
modelmode=modelmode)
validdata = dg.DataSequence(valid,
ut.dirs.validation_dir,
batch_size=batch_size,
classmode=classmode,
modelmode=modelmode)
model = md.custom(classmode, modelmode, nclasses).make_compiled_model(learning_rate)
model.summary()
save_model_to = os.path.join(ut.dirs.model_dir, classmode + '_' + modelmode + '.h5')
Checkpoint = ModelCheckpoint(save_model_to,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
period=1)
Earlystop = EarlyStopping(monitor='val_loss',
min_delta=0,
patience=5,
verbose=0,
mode='auto',
baseline=None)
model.fit_generator(generator=traindata,
steps_per_epoch=len(train)//batch_size,
validation_data=validdata,
validation_steps=len(valid)//batch_size,
epochs=epochs,
callbacks=[mu.TrainValTensorBoard(write_graph=False),
Checkpoint],
#verbose=1,
use_multiprocessing=False,
workers=1)
@click.command()
@click.option('--classmode', type=str, default=ut.params.classmode,
help='choose a classmode:\n\
multilabel, multiclass\n\
(default: multilabel)')
@click.option('--modelmode', type=str, default=ut.params.modelmode,
help='choose a modelmode:\n\
image, text, combined\n\
(default: combined)')
@click.option('--ep', type=float, default=ut.params.epochs,
help='number of epochs (default: {})'.
format(ut.params.epochs))
@click.option('--lr', type=float, default=ut.params.learning_rate,
help='learning rate (default: {})'.
format(ut.params.learning_rate))
@click.option('--bs', type=int, default=ut.params.batch_size,
help='batch size (default: {})'.
format(ut.params.batch_size))
def main(classmode, modelmode, bs, ep, lr):
classmode, modelmode = ut.check_modes(classmode, modelmode)
train(classmode, modelmode, bs, ep, lr)
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
project_dir = Path(__file__).resolve().parents[2]
load_dotenv(find_dotenv())
main()
|
[
"logging.basicConfig",
"dotenv.find_dotenv",
"keras.callbacks.ModelCheckpoint",
"pathlib.Path",
"click.option",
"src.utils.model_utils.TrainValTensorBoard",
"src.models.model.custom",
"os.path.join",
"src.utils.utils.check_modes",
"src.utils.model_utils.ref_n_classes",
"src.models.data_generator.DataSequence",
"keras.callbacks.EarlyStopping",
"click.command"
] |
[((2450, 2465), 'click.command', 'click.command', ([], {}), '()\n', (2463, 2465), False, 'import click\n'), ((2467, 2676), 'click.option', 'click.option', (['"""--classmode"""'], {'type': 'str', 'default': 'ut.params.classmode', 'help': '"""choose a classmode:\n multilabel, multiclass\n (default: multilabel)"""'}), '(\'--classmode\', type=str, default=ut.params.classmode, help=\n """choose a classmode:\n multilabel, multiclass\n (default: multilabel)"""\n )\n', (2479, 2676), False, 'import click\n'), ((2690, 2896), 'click.option', 'click.option', (['"""--modelmode"""'], {'type': 'str', 'default': 'ut.params.modelmode', 'help': '"""choose a modelmode:\n image, text, combined\n (default: combined)"""'}), '(\'--modelmode\', type=str, default=ut.params.modelmode, help=\n """choose a modelmode:\n image, text, combined\n (default: combined)"""\n )\n', (2702, 2896), False, 'import click\n'), ((503, 530), 'src.utils.model_utils.ref_n_classes', 'mu.ref_n_classes', (['classmode'], {}), '(classmode)\n', (519, 530), True, 'import src.utils.model_utils as mu\n'), ((632, 743), 'src.models.data_generator.DataSequence', 'dg.DataSequence', (['train', 'ut.dirs.train_dir'], {'batch_size': 'batch_size', 'classmode': 'classmode', 'modelmode': 'modelmode'}), '(train, ut.dirs.train_dir, batch_size=batch_size, classmode=\n classmode, modelmode=modelmode)\n', (647, 743), True, 'import src.models.data_generator as dg\n'), ((883, 998), 'src.models.data_generator.DataSequence', 'dg.DataSequence', (['valid', 'ut.dirs.validation_dir'], {'batch_size': 'batch_size', 'classmode': 'classmode', 'modelmode': 'modelmode'}), '(valid, ut.dirs.validation_dir, batch_size=batch_size,\n classmode=classmode, modelmode=modelmode)\n', (898, 998), True, 'import src.models.data_generator as dg\n'), ((1254, 1322), 'os.path.join', 'os.path.join', (['ut.dirs.model_dir', "(classmode + '_' + modelmode + '.h5')"], {}), "(ut.dirs.model_dir, classmode + '_' + modelmode + '.h5')\n", (1266, 1322), False, 'import os\n'), ((1341, 1476), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['save_model_to'], {'monitor': '"""val_loss"""', 'verbose': '(0)', 'save_best_only': '(False)', 'save_weights_only': '(False)', 'mode': '"""auto"""', 'period': '(1)'}), "(save_model_to, monitor='val_loss', verbose=0,\n save_best_only=False, save_weights_only=False, mode='auto', period=1)\n", (1356, 1476), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((1687, 1789), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': '(5)', 'verbose': '(0)', 'mode': '"""auto"""', 'baseline': 'None'}), "(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode=\n 'auto', baseline=None)\n", (1700, 1789), False, 'from keras.callbacks import ModelCheckpoint, EarlyStopping\n'), ((3486, 3522), 'src.utils.utils.check_modes', 'ut.check_modes', (['classmode', 'modelmode'], {}), '(classmode, modelmode)\n', (3500, 3522), True, 'import src.utils.utils as ut\n'), ((3670, 3725), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'log_fmt'}), '(level=logging.INFO, format=log_fmt)\n', (3689, 3725), False, 'import logging\n'), ((429, 486), 'os.path.join', 'os.path.join', (['ut.dirs.processed_dir', 'ut.df_names.train_df'], {}), '(ut.dirs.processed_dir, ut.df_names.train_df)\n', (441, 486), False, 'import os\n'), ((556, 613), 'os.path.join', 'os.path.join', (['ut.dirs.processed_dir', 'ut.df_names.valid_df'], {}), '(ut.dirs.processed_dir, ut.df_names.valid_df)\n', (568, 613), False, 'import os\n'), ((3798, 3811), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (3809, 3811), False, 'from dotenv import find_dotenv, load_dotenv\n'), ((1136, 1177), 'src.models.model.custom', 'md.custom', (['classmode', 'modelmode', 'nclasses'], {}), '(classmode, modelmode, nclasses)\n', (1145, 1177), True, 'import src.models.model as md\n'), ((2234, 2275), 'src.utils.model_utils.TrainValTensorBoard', 'mu.TrainValTensorBoard', ([], {'write_graph': '(False)'}), '(write_graph=False)\n', (2256, 2275), True, 'import src.utils.model_utils as mu\n'), ((3745, 3759), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3749, 3759), False, 'from pathlib import Path\n')]
|
# Copyright (c) 2016-2019. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import logging
from Bio.Seq import reverse_complement
from pyensembl import Transcript
from ..common import groupby_field
from .transcript_helpers import interval_offset_on_transcript
from .effect_helpers import changes_exonic_splice_site
from .effect_collection import EffectCollection
from .effect_prediction_coding import predict_variant_coding_effect_on_transcript
from .effect_classes import (
Failure,
Intergenic,
Intragenic,
NoncodingTranscript,
IncompleteTranscript,
FivePrimeUTR,
ThreePrimeUTR,
Intronic,
IntronicSpliceSite,
SpliceAcceptor,
SpliceDonor,
StartLoss,
ExonLoss,
ExonicSpliceSite,
)
logger = logging.getLogger(__name__)
def predict_variant_effects(variant, raise_on_error=False):
"""Determine the effects of a variant on any transcripts it overlaps.
Returns an EffectCollection object.
Parameters
----------
variant : Variant
raise_on_error : bool
Raise an exception if we encounter an error while trying to
determine the effect of this variant on a transcript, or simply
log the error and continue.
"""
# if this variant isn't overlapping any genes, return a
# Intergenic effect
# TODO: look for nearby genes and mark those as Upstream and Downstream
# effects
try:
gene_ids = variant.gene_ids
transcripts = variant.transcripts
except:
if raise_on_error:
raise
else:
return []
if len(gene_ids) == 0:
effects = [Intergenic(variant)]
else:
# list of all MutationEffects for all genes & transcripts
effects = []
# group transcripts by their gene ID
transcripts_grouped_by_gene = \
groupby_field(transcripts, 'gene_id')
# want effects in the list grouped by the gene they come from
for gene_id in sorted(gene_ids):
if gene_id not in transcripts_grouped_by_gene:
# intragenic variant overlaps a gene but not any transcripts
gene = variant.genome.gene_by_id(gene_id)
effects.append(Intragenic(variant, gene))
else:
# gene ID has transcripts overlapped by this variant
for transcript in transcripts_grouped_by_gene[gene_id]:
if raise_on_error:
effect = predict_variant_effect_on_transcript(
variant=variant,
transcript=transcript)
else:
effect = predict_variant_effect_on_transcript_or_failure(
variant=variant,
transcript=transcript)
effects.append(effect)
return EffectCollection(effects)
def predict_variant_effect_on_transcript_or_failure(variant, transcript):
"""
Try predicting the effect of a variant on a particular transcript but
suppress raised exceptions by converting them into `Failure` effect
values.
"""
try:
return predict_variant_effect_on_transcript(
variant=variant,
transcript=transcript)
except (AssertionError, ValueError) as error:
logger.warn(
"Encountered error annotating %s for %s: %s",
variant,
transcript,
error)
return Failure(variant, transcript)
def predict_variant_effect_on_transcript(variant, transcript):
"""Return the transcript effect (such as FrameShift) that results from
applying this genomic variant to a particular transcript.
Parameters
----------
transcript : Transcript
Transcript we're going to apply mutation to.
"""
if transcript.__class__ is not Transcript:
raise TypeError(
"Expected %s : %s to have type Transcript" % (
transcript, type(transcript)))
# check for non-coding transcripts first, since
# every non-coding transcript is "incomplete".
if not transcript.is_protein_coding:
return NoncodingTranscript(variant, transcript)
if not transcript.complete:
return IncompleteTranscript(variant, transcript)
# since we're using inclusive base-1 coordinates,
# checking for overlap requires special logic for insertions
is_insertion = variant.is_insertion
# determine if any exons are deleted, and if not,
# what is the closest exon and how far is this variant
# from that exon (overlapping the exon = 0 distance)
completely_lost_exons = []
# list of which (exon #, Exon) pairs this mutation overlaps
overlapping_exon_numbers_and_exons = []
distance_to_nearest_exon = float("inf")
start_in_exon = False
end_in_exon = False
nearest_exon = None
variant_start = variant.trimmed_base1_start
variant_end = variant.trimmed_base1_end
for i, exon in enumerate(transcript.exons):
if variant_start <= exon.start and variant_end >= exon.end:
completely_lost_exons.append(exon)
if is_insertion and exon.strand == "+" and variant_end == exon.end:
# insertions after an exon don't overlap the exon
distance = 1
elif is_insertion and exon.strand == "-" and variant_start == exon.start:
distance = 1
else:
distance = exon.distance_to_interval(variant_start, variant_end)
if distance == 0:
overlapping_exon_numbers_and_exons.append((i + 1, exon))
# start is contained in current exon
if exon.start <= variant_start <= exon.end:
start_in_exon = True
# end is contained in current exon
if exon.end >= variant_end >= exon.start:
end_in_exon = True
elif distance < distance_to_nearest_exon:
distance_to_nearest_exon = distance
nearest_exon = exon
if len(overlapping_exon_numbers_and_exons) == 0:
intronic_effect_class = choose_intronic_effect_class(
variant=variant,
nearest_exon=nearest_exon,
distance_to_exon=distance_to_nearest_exon)
return intronic_effect_class(
variant=variant,
transcript=transcript,
nearest_exon=nearest_exon,
distance_to_exon=distance_to_nearest_exon)
elif len(completely_lost_exons) > 0 or (
len(overlapping_exon_numbers_and_exons) > 1):
# if spanning multiple exons, or completely deleted an exon
# then consider that an ExonLoss mutation
exons = [exon for (_, exon) in overlapping_exon_numbers_and_exons]
return ExonLoss(variant, transcript, exons)
assert len(overlapping_exon_numbers_and_exons) == 1
exon_number, exon = overlapping_exon_numbers_and_exons[0]
exonic_effect_annotation = exonic_transcript_effect(
variant, exon, exon_number, transcript)
# simple case: both start and end are in the same
if start_in_exon and end_in_exon:
return exonic_effect_annotation
elif isinstance(exonic_effect_annotation, ExonicSpliceSite):
# if mutation bleeds over into intro but even just
# the exonic portion got annotated as an exonic splice site
# then return it
return exonic_effect_annotation
return ExonicSpliceSite(
variant=variant,
transcript=transcript,
exon=exon,
alternate_effect=exonic_effect_annotation)
def choose_intronic_effect_class(
variant,
nearest_exon,
distance_to_exon):
"""
Infer effect of variant which does not overlap any exon of
the given transcript.
"""
assert distance_to_exon > 0, \
"Expected intronic effect to have distance_to_exon > 0, got %d" % (
distance_to_exon,)
if nearest_exon.strand == "+":
# if exon on positive strand
start_before = variant.trimmed_base1_start < nearest_exon.start
start_same = variant.trimmed_base1_start == nearest_exon.start
before_exon = start_before or (variant.is_insertion and start_same)
else:
# if exon on negative strand
end_after = variant.trimmed_base1_end > nearest_exon.end
end_same = variant.trimmed_base1_end == nearest_exon.end
before_exon = end_after or (variant.is_insertion and end_same)
# distance cutoffs based on consensus splice sequences from
# http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2947103/
# 5' splice site: MAG|GURAGU consensus
# M is A or C; R is purine; | is the exon-intron boundary
# 3' splice site: YAG|R
if distance_to_exon <= 2:
if before_exon:
# 2 last nucleotides of intron before exon are the splice
# acceptor site, typically "AG"
return SpliceAcceptor
else:
# 2 first nucleotides of intron after exon are the splice donor
# site, typically "GT"
return SpliceDonor
elif not before_exon and distance_to_exon <= 6:
# variants in nucleotides 3-6 at start of intron aren't as certain
# to cause problems as nucleotides 1-2 but still implicated in
# alternative splicing
return IntronicSpliceSite
elif before_exon and distance_to_exon <= 3:
# nucleotide -3 before exon is part of the 3' splicing
# motif but allows for more degeneracy than the -2, -1 nucleotides
return IntronicSpliceSite
else:
# intronic mutation unrelated to splicing
return Intronic
def exonic_transcript_effect(variant, exon, exon_number, transcript):
"""Effect of this variant on a Transcript, assuming we already know
that this variant overlaps some exon of the transcript.
Parameters
----------
variant : Variant
exon : pyensembl.Exon
Exon which this variant overlaps
exon_number : int
Index (starting from 1) of the given exon in the transcript's
sequence of exons.
transcript : pyensembl.Transcript
"""
genome_ref = variant.trimmed_ref
genome_alt = variant.trimmed_alt
variant_start = variant.trimmed_base1_start
variant_end = variant.trimmed_base1_end
# clip mutation to only affect the current exon
if variant_start < exon.start:
# if mutation starts before current exon then only look
# at nucleotides which overlap the exon
logger.info('Mutation in variant %s starts before exon %s', variant, exon)
assert len(genome_ref) > 0, "Unexpected insertion into intron"
n_skip_start = exon.start - variant_start
genome_ref = genome_ref[n_skip_start:]
genome_alt = genome_alt[n_skip_start:]
genome_start = exon.start
else:
genome_start = variant_start
if variant_end > exon.end:
# if mutation goes past exon end then only look at nucleotides
# which overlap the exon
logger.info('Mutation in variant %s ends after exon %s', variant, exon)
n_skip_end = variant_end - exon.end
genome_ref = genome_ref[:-n_skip_end]
genome_alt = genome_alt[:len(genome_ref)]
genome_end = exon.end
else:
genome_end = variant_end
transcript_offset = interval_offset_on_transcript(
genome_start, genome_end, transcript)
if transcript.on_backward_strand:
cdna_ref = reverse_complement(genome_ref)
cdna_alt = reverse_complement(genome_alt)
else:
cdna_ref = genome_ref
cdna_alt = genome_alt
n_ref = len(cdna_ref)
expected_ref = str(
transcript.sequence[transcript_offset:transcript_offset + n_ref])
if cdna_ref != expected_ref:
raise ValueError(
("Found ref nucleotides '%s' in sequence"
" of %s at offset %d (chromosome positions %d:%d)"
" but variant %s has '%s'") % (
expected_ref,
transcript,
transcript_offset,
genome_start,
genome_end,
variant,
cdna_ref))
utr5_length = min(transcript.start_codon_spliced_offsets)
# does the variant start inside the 5' UTR?
if utr5_length > transcript_offset:
# does the variant end after the 5' UTR, within the coding region?
if utr5_length < transcript_offset + n_ref:
# TODO: we *might* lose the Kozak sequence or the start codon
# but without looking at the modified sequence how can we tell
# for sure that this is a start-loss variant?
return StartLoss(variant, transcript)
else:
# if variant contained within 5' UTR
return FivePrimeUTR(variant, transcript)
utr3_offset = max(transcript.stop_codon_spliced_offsets) + 1
if transcript_offset >= utr3_offset:
return ThreePrimeUTR(variant, transcript)
exon_start_offset = interval_offset_on_transcript(
exon.start, exon.end, transcript)
exon_end_offset = exon_start_offset + len(exon) - 1
# Further below we're going to try to predict exonic splice site
# modifications, which will take this effect_annotation as their
# alternative hypothesis for what happens if splicing doesn't change.
# If the mutation doesn't affect an exonic splice site, then
# we'll just return this effect.
coding_effect_annotation = predict_variant_coding_effect_on_transcript(
variant=variant,
transcript=transcript,
trimmed_cdna_ref=cdna_ref,
trimmed_cdna_alt=cdna_alt,
transcript_offset=transcript_offset)
if changes_exonic_splice_site(
transcript=transcript,
transcript_ref=cdna_ref,
transcript_alt=cdna_alt,
transcript_offset=transcript_offset,
exon_start_offset=exon_start_offset,
exon_end_offset=exon_end_offset,
exon_number=exon_number):
return ExonicSpliceSite(
variant=variant,
transcript=transcript,
exon=exon,
alternate_effect=coding_effect_annotation)
return coding_effect_annotation
|
[
"logging.getLogger",
"Bio.Seq.reverse_complement"
] |
[((1344, 1371), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1361, 1371), False, 'import logging\n'), ((12409, 12439), 'Bio.Seq.reverse_complement', 'reverse_complement', (['genome_ref'], {}), '(genome_ref)\n', (12427, 12439), False, 'from Bio.Seq import reverse_complement\n'), ((12459, 12489), 'Bio.Seq.reverse_complement', 'reverse_complement', (['genome_alt'], {}), '(genome_alt)\n', (12477, 12489), False, 'from Bio.Seq import reverse_complement\n')]
|
# OS-Level Imports
import os
import sys
import multiprocessing
from multiprocessing import cpu_count
# Library Imports
import tensorflow as tf
from tensorflow.keras import mixed_precision
from tensorflow.python.distribute.distribute_lib import Strategy
# Internal Imports
from Utils.enums import Environment, Accelerator
# Global Configuration Variables
environment = Environment.GoogleColab
accelerator = Accelerator.GPU
strategy = None
cpu_no = multiprocessing.cpu_count()
batch_size = 64
latent_dim = 100
epochs = 10
supervised_samples_ratio = 0.05
save_interval = 17
super_batches = 1
unsuper_batches = 1
prefetch_no = tf.data.AUTOTUNE
eager_execution = True
model_summary = False
resume_training = False
result_path = './results/'
dataset_path = './dataset/'
def parse_args():
global environment
global accelerator
global batch_size
global latent_dim
global epochs
global supervised_samples_ratio
global save_interval
global super_batches
global unsuper_batches
global prefetch_no
global eager_execution
global model_summary
for arg in sys.argv:
if arg.lower().__contains__("envi"):
param = arg[arg.index("=") + 1:]
if param.lower() == "local":
environment = Environment.Local
elif param.lower() == "colab":
environment = Environment.GoogleColab
elif param.lower() == "research":
environment = Environment.GoogleResearch
if arg.lower().__contains__("accel"):
param = arg[arg.index("=") + 1:]
if param.lower() == "gpu":
accelerator = Accelerator.GPU
elif param.lower() == "tpu":
accelerator = Accelerator.TPU
if arg.lower().__contains__("batch"):
param = arg[arg.index("=") + 1:]
batch_size = int(param)
if arg.lower().__contains__("epoch"):
param = arg[arg.index("=") + 1:]
epochs = int(param)
if arg.lower().__contains__("sample_ratio"):
param = arg[arg.index("=") + 1:]
supervised_samples_ratio = float(param)
if arg.lower().__contains__("save_interval"):
param = arg[arg.index("=") + 1:]
save_interval = int(param)
if arg.lower().__contains__("super_batches"):
param = arg[arg.index("=") + 1:]
super_batches = int(param)
if arg.lower().__contains__("unsuper_batches"):
param = arg[arg.index("=") + 1:]
unsuper_batches = int(param)
if arg.lower().__contains__("eager"):
param = arg[arg.index("=") + 1:]
if param.lower().__contains__("false"):
eager_execution = False
else:
eager_execution = True
if arg.lower().__contains__("model_sum"):
param = arg[arg.index("=") + 1:]
if param.lower().__contains__("false"):
model_summery = False
else:
model_summery = True
def print_args():
global environment
global accelerator
global batch_size
global latent_dim
global epochs
global supervised_samples_ratio
global save_interval
global super_batches
global unsuper_batches
global prefetch_no
global eager_execution
global model_summary
print(environment)
print(accelerator)
print("Batch Size: ", batch_size)
print("Epochs: ", epochs)
print("Supervised Ratio: ", supervised_samples_ratio)
print("Save Interval: ", save_interval)
print("Supervised Batches per Interval: ", super_batches)
print("Unsupervised Batches per Interval: ", unsuper_batches)
print("Eager Execution: ", eager_execution)
print("Print Model Summery: ", model_summary)
def configure(enable_xla: bool = True,
print_device_placement: bool = False,
enable_eager_execution: bool = True,
only_cpu: bool = False,
enable_memory_growth: bool = True,
enable_mixed_float16: bool = False):
global environment
global accelerator
global batch_size
global latent_dim
global epochs
global supervised_samples_ratio
global save_interval
global super_batches
global unsuper_batches
global prefetch_no
global eager_execution
global model_summary
global strategy
global result_path
global dataset_path
# Configurations
#########################################################
# To enable xla compiler
if enable_xla:
os.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'
#########################################################
# To print out on which device operation is taking place
if print_device_placement:
tf.debugging.set_log_device_placement(True)
#########################################################
# To disable eager execution and use graph functions
if not enable_eager_execution:
tf.compat.v1.disable_eager_execution()
#########################################################
# To disable GPUs
if only_cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
#########################################################
# Setting memory growth
gpus = tf.config.list_physical_devices('GPU')
if enable_memory_growth and gpus:
try:
tf.config.experimental.set_memory_growth(gpus[0], True)
except Exception as ex:
# Invalid device or cannot modify virtual devices once initialized.
pass
#########################################################
# Create 2 virtual GPUs with 1GB memory each
# if gpus:
# try:
# tf.config.experimental.set_virtual_device_configuration(
# gpus[0],
# [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024),
# tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024)])
# logical_gpus = tf.config.experimental.list_logical_devices('GPU')
# print(len(gpus), "Physical GPU,", len(logical_gpus), "Logical GPUs")
# except RuntimeError as e:
# # Virtual devices must be set before GPUs have been initialized
# print(e)
#########################################################
# Using mixed_precision to activate Tensor Cores
if enable_mixed_float16:
mixed_precision.set_global_policy('mixed_float16')
#########################################################
# Configurations
# House keeping
#########################################################
# Storing the default TF strategy, we will use it in case we don`t set our own
strategy = tf.distribute.get_strategy()
if environment == Environment.Local:
accelerator = Accelerator.GPU
if accelerator == Accelerator.TPU and \
(environment == Environment.GoogleColab or environment == Environment.GoogleResearch):
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
# This is the TPU initialization code that has to be called at the beginning of program.
tf.tpu.experimental.initialize_tpu_system(resolver)
print("TPUs: ", tf.config.list_logical_devices('TPU'))
strategy = tf.distribute.TPUStrategy(resolver)
if environment == Environment.GoogleColab and accelerator == Accelerator.GPU:
strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0")
dataset_path = '/content/drive/MyDrive/Share/UM-PDD/dataset/'
result_path = '/content/drive/MyDrive/Share/UM-PDD/results/'
if environment == Environment.GoogleColab and accelerator == Accelerator.TPU:
dataset_path = '/content/dataset/'
result_path = '/content/drive/MyDrive/Share/UM-PDD/results/'
#########################################################
# House keeping
|
[
"tensorflow.tpu.experimental.initialize_tpu_system",
"tensorflow.distribute.OneDeviceStrategy",
"tensorflow.distribute.cluster_resolver.TPUClusterResolver",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.distribute.TPUStrategy",
"tensorflow.config.list_logical_devices",
"multiprocessing.cpu_count",
"tensorflow.config.experimental_connect_to_cluster",
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.config.list_physical_devices",
"tensorflow.distribute.get_strategy",
"tensorflow.keras.mixed_precision.set_global_policy",
"tensorflow.debugging.set_log_device_placement"
] |
[((471, 498), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (496, 498), False, 'import multiprocessing\n'), ((5486, 5524), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (5517, 5524), True, 'import tensorflow as tf\n'), ((6988, 7016), 'tensorflow.distribute.get_strategy', 'tf.distribute.get_strategy', ([], {}), '()\n', (7014, 7016), True, 'import tensorflow as tf\n'), ((4978, 5021), 'tensorflow.debugging.set_log_device_placement', 'tf.debugging.set_log_device_placement', (['(True)'], {}), '(True)\n', (5015, 5021), True, 'import tensorflow as tf\n'), ((5188, 5226), 'tensorflow.compat.v1.disable_eager_execution', 'tf.compat.v1.disable_eager_execution', ([], {}), '()\n', (5224, 5226), True, 'import tensorflow as tf\n'), ((6666, 6716), 'tensorflow.keras.mixed_precision.set_global_policy', 'mixed_precision.set_global_policy', (['"""mixed_float16"""'], {}), "('mixed_float16')\n", (6699, 6716), False, 'from tensorflow.keras import mixed_precision\n'), ((7267, 7324), 'tensorflow.distribute.cluster_resolver.TPUClusterResolver', 'tf.distribute.cluster_resolver.TPUClusterResolver', ([], {'tpu': '""""""'}), "(tpu='')\n", (7316, 7324), True, 'import tensorflow as tf\n'), ((7334, 7385), 'tensorflow.config.experimental_connect_to_cluster', 'tf.config.experimental_connect_to_cluster', (['resolver'], {}), '(resolver)\n', (7375, 7385), True, 'import tensorflow as tf\n'), ((7493, 7544), 'tensorflow.tpu.experimental.initialize_tpu_system', 'tf.tpu.experimental.initialize_tpu_system', (['resolver'], {}), '(resolver)\n', (7534, 7544), True, 'import tensorflow as tf\n'), ((7629, 7664), 'tensorflow.distribute.TPUStrategy', 'tf.distribute.TPUStrategy', (['resolver'], {}), '(resolver)\n', (7654, 7664), True, 'import tensorflow as tf\n'), ((7770, 7818), 'tensorflow.distribute.OneDeviceStrategy', 'tf.distribute.OneDeviceStrategy', ([], {'device': '"""/gpu:0"""'}), "(device='/gpu:0')\n", (7801, 7818), True, 'import tensorflow as tf\n'), ((5591, 5646), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpus[0]', '(True)'], {}), '(gpus[0], True)\n', (5631, 5646), True, 'import tensorflow as tf\n'), ((7570, 7607), 'tensorflow.config.list_logical_devices', 'tf.config.list_logical_devices', (['"""TPU"""'], {}), "('TPU')\n", (7600, 7607), True, 'import tensorflow as tf\n')]
|
from turbo.flux import Mutation, register, dispatch, register_dispatch
import mutation_types
@register_dispatch('user', mutation_types.INCREASE)
def increase(rank):
pass
def decrease(rank):
return dispatch('user', mutation_types.DECREASE, rank)
|
[
"turbo.flux.register_dispatch",
"turbo.flux.dispatch"
] |
[((97, 147), 'turbo.flux.register_dispatch', 'register_dispatch', (['"""user"""', 'mutation_types.INCREASE'], {}), "('user', mutation_types.INCREASE)\n", (114, 147), False, 'from turbo.flux import Mutation, register, dispatch, register_dispatch\n'), ((210, 257), 'turbo.flux.dispatch', 'dispatch', (['"""user"""', 'mutation_types.DECREASE', 'rank'], {}), "('user', mutation_types.DECREASE, rank)\n", (218, 257), False, 'from turbo.flux import Mutation, register, dispatch, register_dispatch\n')]
|
import logging
import os
import io
from fastapi import APIRouter, Depends, Header
from fastapi.responses import FileResponse, StreamingResponse
from fastapi import HTTPException, status
import pyarrow as pa
import pyarrow.parquet as pq
from data_service.api.query_models import (
InputTimePeriodQuery, InputTimeQuery, InputFixedQuery
)
from data_service.config import config
from data_service.api.response_models import ErrorMessage
from data_service.config.config import get_settings
from data_service.config.dependencies import get_processor
from data_service.core.processor import Processor
from data_service.api.auth import authorize_user
data_router = APIRouter()
log = logging.getLogger(__name__)
@data_router.get("/data/resultSet", responses={
204: {}, 404: {"model": ErrorMessage}})
def retrieve_result_set(file_name: str,
authorization: str = Header(None),
settings: config.BaseSettings = Depends(get_settings)):
"""
Stream a generated result parquet file.
"""
log.info(
f"Entering /data/resultSet with request for file name: {file_name}"
)
user_id = authorize_user(authorization)
log.info(f"Authorized token for user: {user_id}")
file_path = (
f"{settings.RESULTSET_DIR}/{file_name}"
)
if not os.path.isfile(file_path):
log.warning(f"No file found for path: {file_path}")
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail='Result set not found'
)
else:
return FileResponse(
file_path, media_type='application/octet-stream'
)
@data_router.post("/data/event/generate-file",
responses={404: {"model": ErrorMessage}})
def create_result_file_event(input_query: InputTimePeriodQuery,
authorization: str = Header(None),
processor: Processor = Depends(get_processor)):
"""
Create result set of data with temporality type event,
and write result to file. Returns name of file in response.
"""
log.info(
f'Entering /data/event/generate-file with input query: {input_query}'
)
user_id = authorize_user(authorization)
log.info(f"Authorized token for user: {user_id}")
result_data = processor.process_event_request(input_query)
resultset_file_name = processor.write_table(result_data)
log.info(f'File name for event result set: {resultset_file_name}')
return {
'filename': resultset_file_name,
}
@data_router.post("/data/status/generate-file",
responses={404: {"model": ErrorMessage}})
def create_result_file_status(input_query: InputTimeQuery,
authorization: str = Header(None),
processor: Processor = Depends(get_processor)):
"""
Create result set of data with temporality type status,
and write result to file. Returns name of file in response.
"""
log.info(
f'Entering /data/status/generate-file with input query: {input_query}'
)
user_id = authorize_user(authorization)
log.info(f"Authorized token for user: {user_id}")
result_data = processor.process_status_request(input_query)
resultset_file_name = processor.write_table(result_data)
log.info(f'File name for event result set: {resultset_file_name}')
return {
'filename': resultset_file_name,
}
@data_router.post("/data/fixed/generate-file",
responses={404: {"model": ErrorMessage}})
def create_file_result_fixed(input_query: InputFixedQuery,
authorization: str = Header(None),
processor: Processor = Depends(get_processor)):
"""
Create result set of data with temporality type fixed,
and write result to file. Returns name of file in response.
"""
log.info(
f'Entering /data/fixed/generate-file with input query: {input_query}'
)
user_id = authorize_user(authorization)
log.info(f"Authorized token for user: {user_id}")
result_data = processor.process_fixed_request(input_query)
resultset_file_name = processor.write_table(result_data)
log.info(f'File name for event result set: {resultset_file_name}')
return {
'filename': resultset_file_name,
}
@data_router.post("/data/event/stream",
responses={404: {"model": ErrorMessage}})
def stream_result_event(input_query: InputTimePeriodQuery,
authorization: str = Header(None),
processor: Processor = Depends(get_processor)):
"""
Create Result set of data with temporality type event,
and stream result as response.
"""
log.info(f'Entering /data/event/stream with input query: {input_query}')
user_id = authorize_user(authorization)
log.info(f"Authorized token for user: {user_id}")
result_data = processor.process_event_request(input_query)
buffer_stream = pa.BufferOutputStream()
pq.write_table(result_data, buffer_stream)
return StreamingResponse(
io.BytesIO(buffer_stream.getvalue().to_pybytes())
)
@data_router.post("/data/status/stream",
responses={404: {"model": ErrorMessage}})
def stream_result_status(input_query: InputTimeQuery,
authorization: str = Header(None),
processor: Processor = Depends(get_processor)):
"""
Create result set of data with temporality type status,
and stream result as response.
"""
log.info(f'Entering /data/status/stream with input query: {input_query}')
user_id = authorize_user(authorization)
log.info(f"Authorized token for user: {user_id}")
result_data = processor.process_status_request(input_query)
buffer_stream = pa.BufferOutputStream()
pq.write_table(result_data, buffer_stream)
return StreamingResponse(
io.BytesIO(buffer_stream.getvalue().to_pybytes())
)
@data_router.post("/data/fixed/stream",
responses={404: {"model": ErrorMessage}})
def stream_result_fixed(input_query: InputFixedQuery,
authorization: str = Header(None),
processor: Processor = Depends(get_processor)):
"""
Create result set of data with temporality type fixed,
and stream result as response.
"""
log.info(f'Entering /data/fixed/stream with input query: {input_query}')
user_id = authorize_user(authorization)
log.info(f"Authorized token for user: {user_id}")
result_data = processor.process_fixed_request(input_query)
buffer_stream = pa.BufferOutputStream()
pq.write_table(result_data, buffer_stream)
return StreamingResponse(
io.BytesIO(buffer_stream.getvalue().to_pybytes())
)
|
[
"logging.getLogger",
"fastapi.Header",
"pyarrow.BufferOutputStream",
"fastapi.HTTPException",
"fastapi.responses.FileResponse",
"data_service.api.auth.authorize_user",
"os.path.isfile",
"fastapi.APIRouter",
"pyarrow.parquet.write_table",
"fastapi.Depends"
] |
[((664, 675), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (673, 675), False, 'from fastapi import APIRouter, Depends, Header\n'), ((682, 709), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (699, 709), False, 'import logging\n'), ((902, 914), 'fastapi.Header', 'Header', (['None'], {}), '(None)\n', (908, 914), False, 'from fastapi import APIRouter, Depends, Header\n'), ((972, 993), 'fastapi.Depends', 'Depends', (['get_settings'], {}), '(get_settings)\n', (979, 993), False, 'from fastapi import APIRouter, Depends, Header\n'), ((1166, 1195), 'data_service.api.auth.authorize_user', 'authorize_user', (['authorization'], {}), '(authorization)\n', (1180, 1195), False, 'from data_service.api.auth import authorize_user\n'), ((1886, 1898), 'fastapi.Header', 'Header', (['None'], {}), '(None)\n', (1892, 1898), False, 'from fastapi import APIRouter, Depends, Header\n'), ((1952, 1974), 'fastapi.Depends', 'Depends', (['get_processor'], {}), '(get_processor)\n', (1959, 1974), False, 'from fastapi import APIRouter, Depends, Header\n'), ((2229, 2258), 'data_service.api.auth.authorize_user', 'authorize_user', (['authorization'], {}), '(authorization)\n', (2243, 2258), False, 'from data_service.api.auth import authorize_user\n'), ((2790, 2802), 'fastapi.Header', 'Header', (['None'], {}), '(None)\n', (2796, 2802), False, 'from fastapi import APIRouter, Depends, Header\n'), ((2857, 2879), 'fastapi.Depends', 'Depends', (['get_processor'], {}), '(get_processor)\n', (2864, 2879), False, 'from fastapi import APIRouter, Depends, Header\n'), ((3136, 3165), 'data_service.api.auth.authorize_user', 'authorize_user', (['authorization'], {}), '(authorization)\n', (3150, 3165), False, 'from data_service.api.auth import authorize_user\n'), ((3696, 3708), 'fastapi.Header', 'Header', (['None'], {}), '(None)\n', (3702, 3708), False, 'from fastapi import APIRouter, Depends, Header\n'), ((3762, 3784), 'fastapi.Depends', 'Depends', (['get_processor'], {}), '(get_processor)\n', (3769, 3784), False, 'from fastapi import APIRouter, Depends, Header\n'), ((4039, 4068), 'data_service.api.auth.authorize_user', 'authorize_user', (['authorization'], {}), '(authorization)\n', (4053, 4068), False, 'from data_service.api.auth import authorize_user\n'), ((4586, 4598), 'fastapi.Header', 'Header', (['None'], {}), '(None)\n', (4592, 4598), False, 'from fastapi import APIRouter, Depends, Header\n'), ((4647, 4669), 'fastapi.Depends', 'Depends', (['get_processor'], {}), '(get_processor)\n', (4654, 4669), False, 'from fastapi import APIRouter, Depends, Header\n'), ((4874, 4903), 'data_service.api.auth.authorize_user', 'authorize_user', (['authorization'], {}), '(authorization)\n', (4888, 4903), False, 'from data_service.api.auth import authorize_user\n'), ((5042, 5065), 'pyarrow.BufferOutputStream', 'pa.BufferOutputStream', ([], {}), '()\n', (5063, 5065), True, 'import pyarrow as pa\n'), ((5070, 5112), 'pyarrow.parquet.write_table', 'pq.write_table', (['result_data', 'buffer_stream'], {}), '(result_data, buffer_stream)\n', (5084, 5112), True, 'import pyarrow.parquet as pq\n'), ((5410, 5422), 'fastapi.Header', 'Header', (['None'], {}), '(None)\n', (5416, 5422), False, 'from fastapi import APIRouter, Depends, Header\n'), ((5472, 5494), 'fastapi.Depends', 'Depends', (['get_processor'], {}), '(get_processor)\n', (5479, 5494), False, 'from fastapi import APIRouter, Depends, Header\n'), ((5701, 5730), 'data_service.api.auth.authorize_user', 'authorize_user', (['authorization'], {}), '(authorization)\n', (5715, 5730), False, 'from data_service.api.auth import authorize_user\n'), ((5870, 5893), 'pyarrow.BufferOutputStream', 'pa.BufferOutputStream', ([], {}), '()\n', (5891, 5893), True, 'import pyarrow as pa\n'), ((5898, 5940), 'pyarrow.parquet.write_table', 'pq.write_table', (['result_data', 'buffer_stream'], {}), '(result_data, buffer_stream)\n', (5912, 5940), True, 'import pyarrow.parquet as pq\n'), ((6236, 6248), 'fastapi.Header', 'Header', (['None'], {}), '(None)\n', (6242, 6248), False, 'from fastapi import APIRouter, Depends, Header\n'), ((6297, 6319), 'fastapi.Depends', 'Depends', (['get_processor'], {}), '(get_processor)\n', (6304, 6319), False, 'from fastapi import APIRouter, Depends, Header\n'), ((6523, 6552), 'data_service.api.auth.authorize_user', 'authorize_user', (['authorization'], {}), '(authorization)\n', (6537, 6552), False, 'from data_service.api.auth import authorize_user\n'), ((6691, 6714), 'pyarrow.BufferOutputStream', 'pa.BufferOutputStream', ([], {}), '()\n', (6712, 6714), True, 'import pyarrow as pa\n'), ((6719, 6761), 'pyarrow.parquet.write_table', 'pq.write_table', (['result_data', 'buffer_stream'], {}), '(result_data, buffer_stream)\n', (6733, 6761), True, 'import pyarrow.parquet as pq\n'), ((1334, 1359), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (1348, 1359), False, 'import os\n'), ((1435, 1523), 'fastapi.HTTPException', 'HTTPException', ([], {'status_code': 'status.HTTP_404_NOT_FOUND', 'detail': '"""Result set not found"""'}), "(status_code=status.HTTP_404_NOT_FOUND, detail=\n 'Result set not found')\n", (1448, 1523), False, 'from fastapi import HTTPException, status\n'), ((1578, 1640), 'fastapi.responses.FileResponse', 'FileResponse', (['file_path'], {'media_type': '"""application/octet-stream"""'}), "(file_path, media_type='application/octet-stream')\n", (1590, 1640), False, 'from fastapi.responses import FileResponse, StreamingResponse\n')]
|
import json
import argparse
from argus.callbacks import MonitorCheckpoint, \
EarlyStopping, LoggingToFile, ReduceLROnPlateau
from torch.utils.data import DataLoader
from src.datasets import FreesoundDataset, FreesoundNoisyDataset, RandomDataset
from src.datasets import get_corrected_noisy_data, FreesoundCorrectedNoisyDataset
from src.mixers import RandomMixer, AddMixer, SigmoidConcatMixer, UseMixerWithProb
from src.transforms import get_transforms
from src.argus_models import FreesoundModel
from src.utils import load_noisy_data, load_folds_data
from src import config
parser = argparse.ArgumentParser()
parser.add_argument('--experiment', required=True, type=str)
args = parser.parse_args()
BATCH_SIZE = 128
CROP_SIZE = 256
DATASET_SIZE = 128 * 256
NOISY_PROB = 0.01
CORR_NOISY_PROB = 0.42
MIXER_PROB = 0.8
WRAP_PAD_PROB = 0.5
CORRECTIONS = True
if config.kernel:
NUM_WORKERS = 2
else:
NUM_WORKERS = 8
SAVE_DIR = config.experiments_dir / args.experiment
PARAMS = {
'nn_module': ('AuxSkipAttention', {
'num_classes': len(config.classes),
'base_size': 64,
'dropout': 0.4,
'ratio': 16,
'kernel_size': 7,
'last_filters': 8,
'last_fc': 4
}),
'loss': ('OnlyNoisyLSoftLoss', {
'beta': 0.7,
'noisy_weight': 0.5,
'curated_weight': 0.5
}),
'optimizer': ('Adam', {'lr': 0.0009}),
'device': 'cuda',
'aux': {
'weights': [1.0, 0.4, 0.2, 0.1]
},
'amp': {
'opt_level': 'O2',
'keep_batchnorm_fp32': True,
'loss_scale': "dynamic"
}
}
def train_fold(save_dir, train_folds, val_folds,
folds_data, noisy_data, corrected_noisy_data):
train_transfrom = get_transforms(train=True,
size=CROP_SIZE,
wrap_pad_prob=WRAP_PAD_PROB,
resize_scale=(0.8, 1.0),
resize_ratio=(1.7, 2.3),
resize_prob=0.33,
spec_num_mask=2,
spec_freq_masking=0.15,
spec_time_masking=0.20,
spec_prob=0.5)
mixer = RandomMixer([
SigmoidConcatMixer(sigmoid_range=(3, 12)),
AddMixer(alpha_dist='uniform')
], p=[0.6, 0.4])
mixer = UseMixerWithProb(mixer, prob=MIXER_PROB)
curated_dataset = FreesoundDataset(folds_data, train_folds,
transform=train_transfrom,
mixer=mixer)
noisy_dataset = FreesoundNoisyDataset(noisy_data,
transform=train_transfrom,
mixer=mixer)
corr_noisy_dataset = FreesoundCorrectedNoisyDataset(corrected_noisy_data,
transform=train_transfrom,
mixer=mixer)
dataset_probs = [NOISY_PROB, CORR_NOISY_PROB, 1 - NOISY_PROB - CORR_NOISY_PROB]
print("Dataset probs", dataset_probs)
print("Dataset lens", len(noisy_dataset), len(corr_noisy_dataset), len(curated_dataset))
train_dataset = RandomDataset([noisy_dataset, corr_noisy_dataset, curated_dataset],
p=dataset_probs,
size=DATASET_SIZE)
val_dataset = FreesoundDataset(folds_data, val_folds,
get_transforms(False, CROP_SIZE))
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE,
shuffle=True, drop_last=True,
num_workers=NUM_WORKERS)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE * 2,
shuffle=False, num_workers=NUM_WORKERS)
model = FreesoundModel(PARAMS)
callbacks = [
MonitorCheckpoint(save_dir, monitor='val_lwlrap', max_saves=1),
ReduceLROnPlateau(monitor='val_lwlrap', patience=6, factor=0.6, min_lr=1e-8),
EarlyStopping(monitor='val_lwlrap', patience=18),
LoggingToFile(save_dir / 'log.txt'),
]
model.fit(train_loader,
val_loader=val_loader,
max_epochs=700,
callbacks=callbacks,
metrics=['multi_accuracy', 'lwlrap'])
if __name__ == "__main__":
if not SAVE_DIR.exists():
SAVE_DIR.mkdir(parents=True, exist_ok=True)
else:
print(f"Folder {SAVE_DIR} already exists.")
with open(SAVE_DIR / 'source.py', 'w') as outfile:
outfile.write(open(__file__).read())
print("Model params", PARAMS)
with open(SAVE_DIR / 'params.json', 'w') as outfile:
json.dump(PARAMS, outfile)
folds_data = load_folds_data(use_corrections=CORRECTIONS)
noisy_data = load_noisy_data()
corrected_noisy_data = get_corrected_noisy_data()
for fold in config.folds:
val_folds = [fold]
train_folds = list(set(config.folds) - set(val_folds))
save_fold_dir = SAVE_DIR / f'fold_{fold}'
print(f"Val folds: {val_folds}, Train folds: {train_folds}")
print(f"Fold save dir {save_fold_dir}")
train_fold(save_fold_dir, train_folds, val_folds,
folds_data, noisy_data, corrected_noisy_data)
|
[
"src.transforms.get_transforms",
"src.mixers.SigmoidConcatMixer",
"src.mixers.AddMixer",
"argus.callbacks.EarlyStopping",
"src.datasets.get_corrected_noisy_data",
"argparse.ArgumentParser",
"src.utils.load_noisy_data",
"argus.callbacks.ReduceLROnPlateau",
"argus.callbacks.MonitorCheckpoint",
"src.datasets.FreesoundCorrectedNoisyDataset",
"src.argus_models.FreesoundModel",
"src.datasets.RandomDataset",
"src.utils.load_folds_data",
"src.mixers.UseMixerWithProb",
"src.datasets.FreesoundDataset",
"torch.utils.data.DataLoader",
"src.datasets.FreesoundNoisyDataset",
"json.dump",
"argus.callbacks.LoggingToFile"
] |
[((592, 617), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (615, 617), False, 'import argparse\n'), ((1727, 1957), 'src.transforms.get_transforms', 'get_transforms', ([], {'train': '(True)', 'size': 'CROP_SIZE', 'wrap_pad_prob': 'WRAP_PAD_PROB', 'resize_scale': '(0.8, 1.0)', 'resize_ratio': '(1.7, 2.3)', 'resize_prob': '(0.33)', 'spec_num_mask': '(2)', 'spec_freq_masking': '(0.15)', 'spec_time_masking': '(0.2)', 'spec_prob': '(0.5)'}), '(train=True, size=CROP_SIZE, wrap_pad_prob=WRAP_PAD_PROB,\n resize_scale=(0.8, 1.0), resize_ratio=(1.7, 2.3), resize_prob=0.33,\n spec_num_mask=2, spec_freq_masking=0.15, spec_time_masking=0.2,\n spec_prob=0.5)\n', (1741, 1957), False, 'from src.transforms import get_transforms\n'), ((2430, 2470), 'src.mixers.UseMixerWithProb', 'UseMixerWithProb', (['mixer'], {'prob': 'MIXER_PROB'}), '(mixer, prob=MIXER_PROB)\n', (2446, 2470), False, 'from src.mixers import RandomMixer, AddMixer, SigmoidConcatMixer, UseMixerWithProb\n'), ((2494, 2580), 'src.datasets.FreesoundDataset', 'FreesoundDataset', (['folds_data', 'train_folds'], {'transform': 'train_transfrom', 'mixer': 'mixer'}), '(folds_data, train_folds, transform=train_transfrom, mixer=\n mixer)\n', (2510, 2580), False, 'from src.datasets import FreesoundDataset, FreesoundNoisyDataset, RandomDataset\n'), ((2674, 2747), 'src.datasets.FreesoundNoisyDataset', 'FreesoundNoisyDataset', (['noisy_data'], {'transform': 'train_transfrom', 'mixer': 'mixer'}), '(noisy_data, transform=train_transfrom, mixer=mixer)\n', (2695, 2747), False, 'from src.datasets import FreesoundDataset, FreesoundNoisyDataset, RandomDataset\n'), ((2857, 2954), 'src.datasets.FreesoundCorrectedNoisyDataset', 'FreesoundCorrectedNoisyDataset', (['corrected_noisy_data'], {'transform': 'train_transfrom', 'mixer': 'mixer'}), '(corrected_noisy_data, transform=\n train_transfrom, mixer=mixer)\n', (2887, 2954), False, 'from src.datasets import get_corrected_noisy_data, FreesoundCorrectedNoisyDataset\n'), ((3301, 3409), 'src.datasets.RandomDataset', 'RandomDataset', (['[noisy_dataset, corr_noisy_dataset, curated_dataset]'], {'p': 'dataset_probs', 'size': 'DATASET_SIZE'}), '([noisy_dataset, corr_noisy_dataset, curated_dataset], p=\n dataset_probs, size=DATASET_SIZE)\n', (3314, 3409), False, 'from src.datasets import FreesoundDataset, FreesoundNoisyDataset, RandomDataset\n'), ((3620, 3728), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'BATCH_SIZE', 'shuffle': '(True)', 'drop_last': '(True)', 'num_workers': 'NUM_WORKERS'}), '(train_dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=\n True, num_workers=NUM_WORKERS)\n', (3630, 3728), False, 'from torch.utils.data import DataLoader\n'), ((3801, 3895), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': '(BATCH_SIZE * 2)', 'shuffle': '(False)', 'num_workers': 'NUM_WORKERS'}), '(val_dataset, batch_size=BATCH_SIZE * 2, shuffle=False,\n num_workers=NUM_WORKERS)\n', (3811, 3895), False, 'from torch.utils.data import DataLoader\n'), ((3933, 3955), 'src.argus_models.FreesoundModel', 'FreesoundModel', (['PARAMS'], {}), '(PARAMS)\n', (3947, 3955), False, 'from src.argus_models import FreesoundModel\n'), ((4844, 4888), 'src.utils.load_folds_data', 'load_folds_data', ([], {'use_corrections': 'CORRECTIONS'}), '(use_corrections=CORRECTIONS)\n', (4859, 4888), False, 'from src.utils import load_noisy_data, load_folds_data\n'), ((4906, 4923), 'src.utils.load_noisy_data', 'load_noisy_data', ([], {}), '()\n', (4921, 4923), False, 'from src.utils import load_noisy_data, load_folds_data\n'), ((4951, 4977), 'src.datasets.get_corrected_noisy_data', 'get_corrected_noisy_data', ([], {}), '()\n', (4975, 4977), False, 'from src.datasets import get_corrected_noisy_data, FreesoundCorrectedNoisyDataset\n'), ((3567, 3599), 'src.transforms.get_transforms', 'get_transforms', (['(False)', 'CROP_SIZE'], {}), '(False, CROP_SIZE)\n', (3581, 3599), False, 'from src.transforms import get_transforms\n'), ((3983, 4045), 'argus.callbacks.MonitorCheckpoint', 'MonitorCheckpoint', (['save_dir'], {'monitor': '"""val_lwlrap"""', 'max_saves': '(1)'}), "(save_dir, monitor='val_lwlrap', max_saves=1)\n", (4000, 4045), False, 'from argus.callbacks import MonitorCheckpoint, EarlyStopping, LoggingToFile, ReduceLROnPlateau\n'), ((4055, 4132), 'argus.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_lwlrap"""', 'patience': '(6)', 'factor': '(0.6)', 'min_lr': '(1e-08)'}), "(monitor='val_lwlrap', patience=6, factor=0.6, min_lr=1e-08)\n", (4072, 4132), False, 'from argus.callbacks import MonitorCheckpoint, EarlyStopping, LoggingToFile, ReduceLROnPlateau\n'), ((4141, 4189), 'argus.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_lwlrap"""', 'patience': '(18)'}), "(monitor='val_lwlrap', patience=18)\n", (4154, 4189), False, 'from argus.callbacks import MonitorCheckpoint, EarlyStopping, LoggingToFile, ReduceLROnPlateau\n'), ((4199, 4234), 'argus.callbacks.LoggingToFile', 'LoggingToFile', (["(save_dir / 'log.txt')"], {}), "(save_dir / 'log.txt')\n", (4212, 4234), False, 'from argus.callbacks import MonitorCheckpoint, EarlyStopping, LoggingToFile, ReduceLROnPlateau\n'), ((4799, 4825), 'json.dump', 'json.dump', (['PARAMS', 'outfile'], {}), '(PARAMS, outfile)\n', (4808, 4825), False, 'import json\n'), ((2315, 2356), 'src.mixers.SigmoidConcatMixer', 'SigmoidConcatMixer', ([], {'sigmoid_range': '(3, 12)'}), '(sigmoid_range=(3, 12))\n', (2333, 2356), False, 'from src.mixers import RandomMixer, AddMixer, SigmoidConcatMixer, UseMixerWithProb\n'), ((2366, 2396), 'src.mixers.AddMixer', 'AddMixer', ([], {'alpha_dist': '"""uniform"""'}), "(alpha_dist='uniform')\n", (2374, 2396), False, 'from src.mixers import RandomMixer, AddMixer, SigmoidConcatMixer, UseMixerWithProb\n')]
|
import pytest
@pytest.fixture(scope="session")
def test_data():
from pathlib import Path
module_dir = Path(__file__).resolve().parent
test_dir = module_dir / "test_data"
return test_dir.resolve()
@pytest.fixture(scope="session")
def database():
return "jobflow_test"
@pytest.fixture(scope="session")
def mongo_jobstore(database):
from maggma.stores import MongoStore
from jobflow import JobStore
store = JobStore(MongoStore(database, "outputs"))
store.connect()
return store
@pytest.fixture(scope="function")
def memory_jobstore():
from maggma.stores import MemoryStore
from jobflow import JobStore
store = JobStore(MemoryStore())
store.connect()
return store
@pytest.fixture(scope="function")
def memory_data_jobstore():
from maggma.stores import MemoryStore
from jobflow import JobStore
store = JobStore(MemoryStore(), additional_stores={"data": MemoryStore()})
store.connect()
return store
@pytest.fixture
def clean_dir():
import os
import shutil
import tempfile
old_cwd = os.getcwd()
newpath = tempfile.mkdtemp()
os.chdir(newpath)
yield
os.chdir(old_cwd)
shutil.rmtree(newpath)
@pytest.fixture(scope="session")
def debug_mode():
return False
@pytest.fixture(scope="session")
def lpad(database, debug_mode):
from fireworks import LaunchPad
lpad = LaunchPad(name=database)
lpad.reset("", require_password=False)
yield lpad
if not debug_mode:
lpad.reset("", require_password=False)
for coll in lpad.db.list_collection_names():
lpad.db[coll].drop()
@pytest.fixture
def no_pydot(monkeypatch):
import builtins
import_orig = builtins.__import__
def mocked_import(name, *args, **kwargs):
if name == "pydot":
raise ImportError()
return import_orig(name, *args, **kwargs)
monkeypatch.setattr(builtins, "__import__", mocked_import)
@pytest.fixture
def no_matplotlib(monkeypatch):
import builtins
import_orig = builtins.__import__
def mocked_import(name, *args, **kwargs):
if name == "matplotlib":
raise ImportError()
return import_orig(name, *args, **kwargs)
monkeypatch.setattr(builtins, "__import__", mocked_import)
|
[
"pathlib.Path",
"fireworks.LaunchPad",
"os.getcwd",
"os.chdir",
"tempfile.mkdtemp",
"maggma.stores.MemoryStore",
"shutil.rmtree",
"pytest.fixture",
"maggma.stores.MongoStore"
] |
[((17, 48), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (31, 48), False, 'import pytest\n'), ((218, 249), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (232, 249), False, 'import pytest\n'), ((295, 326), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (309, 326), False, 'import pytest\n'), ((527, 559), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (541, 559), False, 'import pytest\n'), ((737, 769), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (751, 769), False, 'import pytest\n'), ((1225, 1256), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1239, 1256), False, 'import pytest\n'), ((1295, 1326), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1309, 1326), False, 'import pytest\n'), ((1094, 1105), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1103, 1105), False, 'import os\n'), ((1120, 1138), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1136, 1138), False, 'import tempfile\n'), ((1143, 1160), 'os.chdir', 'os.chdir', (['newpath'], {}), '(newpath)\n', (1151, 1160), False, 'import os\n'), ((1177, 1194), 'os.chdir', 'os.chdir', (['old_cwd'], {}), '(old_cwd)\n', (1185, 1194), False, 'import os\n'), ((1199, 1221), 'shutil.rmtree', 'shutil.rmtree', (['newpath'], {}), '(newpath)\n', (1212, 1221), False, 'import shutil\n'), ((1407, 1431), 'fireworks.LaunchPad', 'LaunchPad', ([], {'name': 'database'}), '(name=database)\n', (1416, 1431), False, 'from fireworks import LaunchPad\n'), ((454, 485), 'maggma.stores.MongoStore', 'MongoStore', (['database', '"""outputs"""'], {}), "(database, 'outputs')\n", (464, 485), False, 'from maggma.stores import MongoStore\n'), ((681, 694), 'maggma.stores.MemoryStore', 'MemoryStore', ([], {}), '()\n', (692, 694), False, 'from maggma.stores import MemoryStore\n'), ((896, 909), 'maggma.stores.MemoryStore', 'MemoryStore', ([], {}), '()\n', (907, 909), False, 'from maggma.stores import MemoryStore\n'), ((113, 127), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (117, 127), False, 'from pathlib import Path\n'), ((938, 951), 'maggma.stores.MemoryStore', 'MemoryStore', ([], {}), '()\n', (949, 951), False, 'from maggma.stores import MemoryStore\n')]
|
# This script allows to download a single file from a remote ZIP archive
# without downloading the whole ZIP file itself.
# The hosting server needs to support the HTTP range header for it to work
import zipfile
import requests
import argparse
class HTTPIO(object):
def __init__(self, url):
self.url = url
r = requests.head(self.url)
self.size = int(r.headers['content-length'])
assert self.size > 0
self.offset = 0
def seek(self, offset, whence=0):
if whence == 0:
self.offset = offset
elif whence == 1:
self.offset += offset
elif whence == 2:
self.offset = self.size + offset
else:
raise Exception('Unknown value for parameter whence')
def read(self, size = None):
if size is None:
r = requests.get(self.url,
headers={"range": "bytes={}-{}".format(self.offset, self.size - 1)},
stream=True)
else:
r = requests.get(self.url,
headers={"range": "bytes={}-{}".format(self.offset, min(self.size - 1, self.offset+size - 1))},
stream=True)
r.raise_for_status()
r.raw.decode_content = True
content = r.raw.read()
self.offset += len(content)
return content
def tell(self):
return self.offset
def download_file(zip_url, relative_path, output_file):
with zipfile.ZipFile(HTTPIO(zip_url)) as zz:
with open(output_file, 'wb') as f:
f.write(zz.read(relative_path))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('URL', type=str, help='URL to zip file, e.g. https://example.com/myfile.zip')
parser.add_argument('FILE_PATH', type=str, help='Path of the desired file in the ZIP file, e.g. myfolder/mydocument.docx')
parser.add_argument('OUTPUT_FILE', type=str, help='Local path to write the file to, e.g. /home/user/mydocument.docx')
args = parser.parse_args()
download_file(args.URL, args.FILE_PATH, args.OUTPUT_FILE)
|
[
"requests.head",
"argparse.ArgumentParser"
] |
[((1672, 1697), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1695, 1697), False, 'import argparse\n'), ((333, 356), 'requests.head', 'requests.head', (['self.url'], {}), '(self.url)\n', (346, 356), False, 'import requests\n')]
|
""" Example 035: Scheduled sending and delayed routing """
from os import path
from docusign_esign.client.api_exception import ApiException
from flask import render_template, session, Blueprint, request
from ..examples.eg035_scheduled_sending import Eg035ScheduledSendingController
from ...docusign import authenticate
from ...ds_config import DS_CONFIG
from ...error_handlers import process_error
from ...consts import pattern
eg = "eg035" # reference (and url) for this example
eg035 = Blueprint("eg035", __name__)
def get_args():
"""Get request and session arguments"""
# More data validation would be a good idea here
# Strip anything other than characters listed
signer_email = pattern.sub("", request.form.get("signer_email"))
signer_name = pattern.sub("", request.form.get("signer_name"))
resume_date = request.form.get("resume_date")
envelope_args = {
"signer_email": signer_email,
"signer_name": signer_name,
"resume_date": resume_date,
"status": "sent",
}
args = {
"account_id": session["ds_account_id"],
"base_path": session["ds_base_path"],
"access_token": session["ds_access_token"],
"envelope_args": envelope_args
}
return args
@eg035.route("/eg035", methods=["POST"])
@authenticate(eg=eg)
def sign_by_email():
"""
1. Get required arguments
2. Call the worker method
3. Render success response with envelopeId
"""
# 1. Get required arguments
args = get_args()
try:
# 1. Call the worker method
results = Eg035ScheduledSendingController.worker(args)
print(results)
except ApiException as err:
return process_error(err)
# 2. Render success response with envelopeId
return render_template(
"example_done.html",
title="Envelope sent",
h1="Envelope sent",
message=f"The envelope has been created and scheduled!<br/>Envelope ID: {results['envelope_id']}."
)
@eg035.route("/eg035", methods=["GET"])
@authenticate(eg=eg)
def get_view():
"""responds with the form for the example"""
return render_template(
"eg035_scheduled_sending.html",
title="Scheduled sending",
source_file="eg035_scheduled_sending.py",
source_url=DS_CONFIG["github_example_url"] + "eg035_scheduled_sending.py",
documentation=DS_CONFIG["documentation"] + eg,
show_doc=DS_CONFIG["documentation"],
signer_name=DS_CONFIG["signer_name"],
signer_email=DS_CONFIG["signer_email"]
)
|
[
"flask.render_template",
"flask.Blueprint",
"flask.request.form.get"
] |
[((494, 522), 'flask.Blueprint', 'Blueprint', (['"""eg035"""', '__name__'], {}), "('eg035', __name__)\n", (503, 522), False, 'from flask import render_template, session, Blueprint, request\n'), ((842, 873), 'flask.request.form.get', 'request.form.get', (['"""resume_date"""'], {}), "('resume_date')\n", (858, 873), False, 'from flask import render_template, session, Blueprint, request\n'), ((1779, 1973), 'flask.render_template', 'render_template', (['"""example_done.html"""'], {'title': '"""Envelope sent"""', 'h1': '"""Envelope sent"""', 'message': 'f"""The envelope has been created and scheduled!<br/>Envelope ID: {results[\'envelope_id\']}."""'}), '(\'example_done.html\', title=\'Envelope sent\', h1=\n \'Envelope sent\', message=\n f"The envelope has been created and scheduled!<br/>Envelope ID: {results[\'envelope_id\']}."\n )\n', (1794, 1973), False, 'from flask import render_template, session, Blueprint, request\n'), ((2137, 2513), 'flask.render_template', 'render_template', (['"""eg035_scheduled_sending.html"""'], {'title': '"""Scheduled sending"""', 'source_file': '"""eg035_scheduled_sending.py"""', 'source_url': "(DS_CONFIG['github_example_url'] + 'eg035_scheduled_sending.py')", 'documentation': "(DS_CONFIG['documentation'] + eg)", 'show_doc': "DS_CONFIG['documentation']", 'signer_name': "DS_CONFIG['signer_name']", 'signer_email': "DS_CONFIG['signer_email']"}), "('eg035_scheduled_sending.html', title='Scheduled sending',\n source_file='eg035_scheduled_sending.py', source_url=DS_CONFIG[\n 'github_example_url'] + 'eg035_scheduled_sending.py', documentation=\n DS_CONFIG['documentation'] + eg, show_doc=DS_CONFIG['documentation'],\n signer_name=DS_CONFIG['signer_name'], signer_email=DS_CONFIG[\n 'signer_email'])\n", (2152, 2513), False, 'from flask import render_template, session, Blueprint, request\n'), ((723, 755), 'flask.request.form.get', 'request.form.get', (['"""signer_email"""'], {}), "('signer_email')\n", (739, 755), False, 'from flask import render_template, session, Blueprint, request\n'), ((791, 822), 'flask.request.form.get', 'request.form.get', (['"""signer_name"""'], {}), "('signer_name')\n", (807, 822), False, 'from flask import render_template, session, Blueprint, request\n')]
|
## Unit 4 Project - Two Player Game
## <NAME> - Computer Programming II
## The Elder Scrolls X
# A fan made 2 player game successor the The Elder Scrolls Series
# Two players start off in an arena
# Can choose starting items
# Can choose classes
## Libraries
import time # Self explanatory
import random # Self explanatory
import os # Used for Linux commands
import os, platform # For Linux intro
## Functions
def sleep(): # This function just automates what I usually do manually
time.sleep(0.1)
print("\n")
return
## Code
class Player1(object): # This is the class for Player 1
def __init__(self, name, health, attack, stamina, defense):
self.name = name # Player's name
self.health = health # Player's max health
self.attack = attack # Player's attack power, can be changed
self.stamina = stamina # How many attacks you can do
self.defense = defense # How much damage you take
def Stats(self):
sleep()
print(self.name + "'s currents stats are: ")
sleep()
print("Health = " + str(self.health))
print("Attack = " + str(self.attack))
print("Stamina = " + str(self.stamina))
print("Defense = " + str(self.defense))
sleep()
class Player2(object): # This is the class for Player 2
def __init__(self, name, health, attack, stamina, defense):
self.name = name
self.health = health
self.attack = attack
self.stamina = stamina
self.defense = defense
def Stats(self):
sleep()
print(self.name + "'s currents stats are: ")
sleep()
print("Health = " + str(self.health))
print("Attack = " + str(self.attack))
print("Stamina = " + str(self.stamina))
print("Defense = " + str(self.defense))
sleep()
def intro1(): # This is an intro for Linux
sleep()
os.system("figlet Elder Scrolls X")
sleep()
return
def intro2(): # Intro for anything else
sleep()
print("\n\t Elder Scrolls X")
sleep()
return
if platform.system() == "Linux":
intro1()
else:
intro2()
def CharCreation(): # Function to ask questions for class choosing
sleep()
print("=> What kind of class do you want?")
sleep()
print("> 1 - Knight")
#sleep()
print("> 2 - Thief")
#sleep()
print("> 3 - Lancer")
sleep()
return
sleep()
print("=> Player 1 : What is your name?")
name1 = input("> ") # "name1" is Player 1's name
sleep()
print("=> Player 1,")
CharCreation()
CharCreationChoice1 = input("> ")
if CharCreationChoice1 == ("1"): # Knight
player1 = Player1(name1, 200, 150, 50, 200)
if CharCreationChoice1 == ("2"): # Thief
player1 = Player1(name1, 100, 200, 100, 50)
if CharCreationChoice1 == ("3"): # Lancer
player1 = Player1(name1, 100, 100, 100, 100)
sleep()
player1.Stats() # Prints the stats for Player 1
sleep()
print("=> Player 2 : What is your name?")
name2 = input("> ") # "name2" is Player 2's name
CharCreation()
CharCreationChoice2 = input("> ")
if CharCreationChoice2 == ("1"): # Knight
player2 = Player2(name2, 200, 150, 50, 200)
if CharCreationChoice2 == ("2"): # Thief
player2 = Player2(name2, 100, 200, 100, 50)
if CharCreationChoice2 == ("3"): # Lancer
player2 = Player2(name2, 100, 100, 100, 100)
player2.Stats() # Prints Player 2's stats
|
[
"os.system",
"platform.system",
"time.sleep"
] |
[((501, 516), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (511, 516), False, 'import time\n'), ((1911, 1946), 'os.system', 'os.system', (['"""figlet Elder Scrolls X"""'], {}), "('figlet Elder Scrolls X')\n", (1920, 1946), False, 'import os, platform\n'), ((2087, 2104), 'platform.system', 'platform.system', ([], {}), '()\n', (2102, 2104), False, 'import os, platform\n')]
|
#!/usr/bin/env python3
# ===================================================================================
# Copyright (C) 2019 Fraunhofer Gesellschaft. All rights reserved.
# ===================================================================================
# This Acumos software file is distributed by Fraunhofer Gesellschaft
# under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============LICENSE_END==========================================================
"""
Provides an example of Docker URI cli on-boarding
"""
import requests
import os
import json
# properties of the model
model_name = "my-model-1"
dockerImageURI = "cicd.ai4eu-dev.eu:7444/myimages/onboardingtest:v3" #Docker image URI looks like: example.com:port/image-tag:version
license_file = "./license-1.0.0.json"
protobuf_file = "./model.proto"
# setup parameters
host = os.environ['ACUMOS_HOST'] # FQHN like aiexp-preprod.ai4europe.eu
token = os.environ['ACUMOS_TOKEN'] # format is 'acumos_username:API_TOKEN'
advanced_api = "https://" + host + ":443/onboarding-app/v2/advancedModel"
files= {'license': ('license.json', open(license_file, 'rb'), 'application.json'),
'protobuf': ('model.proto', open(protobuf_file, 'rb'), 'text/plain')}
headers = {"Accept": "application/json",
"modelname": model_name,
"Authorization": token,
"dockerFileURL": dockerImageURI,
'isCreateMicroservice': 'false'}
#send request
response = requests.post(advanced_api, files=files, headers=headers)
#check response
if response.status_code == 201:
body = json.loads(response.text)
solution_id = body['result']['solutionId']
print("Docker uri is pushed successfully on {" + host + "}, response is: ", response.status_code, " - solutionId: ", solution_id)
else:
print("Docker uri is not pushed on {" + host + "}, response is: ", response.status_code)
|
[
"json.loads",
"requests.post"
] |
[((1846, 1903), 'requests.post', 'requests.post', (['advanced_api'], {'files': 'files', 'headers': 'headers'}), '(advanced_api, files=files, headers=headers)\n', (1859, 1903), False, 'import requests\n'), ((1964, 1989), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (1974, 1989), False, 'import json\n')]
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 20 08:40:22 2017
@author: fabio
"""
import ee
import ee.mapclient
ee.Initialize()
collection = ee.ImageCollection('MODIS/MCD43A4_NDVI')
lista = collection.toList(10)
#print lista.getInfo()
image = ee.Image('LC8_L1T/LC81910312016217LGN00')
#print image.getInfo()
bandNames = image.bandNames()
print('Band Names: ', bandNames.getInfo())
b1scale = image.select('B1').projection().nominalScale()
print('Band 1 scale: ', b1scale.getInfo())
b8scale = image.select('B8').projection().nominalScale()
print('Band 8 scale: ', b8scale.getInfo())
ndvi = image.normalizedDifference(['B5', 'B4'])
ee.mapclient.addToMap(ndvi,
{'min' : -1,
"max": 1},
"NDVI")
ee.mapclient.centerMap(12.3536,41.7686,9)
|
[
"ee.mapclient.addToMap",
"ee.Image",
"ee.ImageCollection",
"ee.Initialize",
"ee.mapclient.centerMap"
] |
[((139, 154), 'ee.Initialize', 'ee.Initialize', ([], {}), '()\n', (152, 154), False, 'import ee\n'), ((169, 209), 'ee.ImageCollection', 'ee.ImageCollection', (['"""MODIS/MCD43A4_NDVI"""'], {}), "('MODIS/MCD43A4_NDVI')\n", (187, 209), False, 'import ee\n'), ((273, 314), 'ee.Image', 'ee.Image', (['"""LC8_L1T/LC81910312016217LGN00"""'], {}), "('LC8_L1T/LC81910312016217LGN00')\n", (281, 314), False, 'import ee\n'), ((667, 725), 'ee.mapclient.addToMap', 'ee.mapclient.addToMap', (['ndvi', "{'min': -1, 'max': 1}", '"""NDVI"""'], {}), "(ndvi, {'min': -1, 'max': 1}, 'NDVI')\n", (688, 725), False, 'import ee\n'), ((794, 837), 'ee.mapclient.centerMap', 'ee.mapclient.centerMap', (['(12.3536)', '(41.7686)', '(9)'], {}), '(12.3536, 41.7686, 9)\n', (816, 837), False, 'import ee\n')]
|
from django.contrib import admin
from .models import Contato, Venda, FormaPagamento
admin.site.register(Contato)
admin.site.register(Venda)
admin.site.register(FormaPagamento)
|
[
"django.contrib.admin.site.register"
] |
[((85, 113), 'django.contrib.admin.site.register', 'admin.site.register', (['Contato'], {}), '(Contato)\n', (104, 113), False, 'from django.contrib import admin\n'), ((114, 140), 'django.contrib.admin.site.register', 'admin.site.register', (['Venda'], {}), '(Venda)\n', (133, 140), False, 'from django.contrib import admin\n'), ((141, 176), 'django.contrib.admin.site.register', 'admin.site.register', (['FormaPagamento'], {}), '(FormaPagamento)\n', (160, 176), False, 'from django.contrib import admin\n')]
|
from __future__ import unicode_literals
from __future__ import print_function
import moya
from moya.compat import text_type
from requests_oauthlib import OAuth1Session
def get_credentials(provider, credentials):
client_id = credentials.client_id or provider.get('client_id', None)
client_secret = credentials.client_secret or provider.get('client_secret', None)
return client_id, client_secret
@moya.expose.macro('get_oauth_resource_owner')
def get_oauth_resource_owner(app, provider, credentials):
client_id, client_secret = get_credentials(provider, credentials)
oauth = OAuth1Session(client_id, client_secret=client_secret)
request_token_url = provider['request_token_url']
response = oauth.fetch_request_token(request_token_url)
resource_owner_key = response.get('oauth_token')
resource_owner_secret = response.get('oauth_token_secret')
result = {
"key": resource_owner_key,
"secret": resource_owner_secret
}
return result
@moya.expose.macro('get_oauth_authorize_url')
def get_oauth_authorize_url(app, provider, credentials):
context = moya.pilot.context
client_id, client_secret = get_credentials(provider, credentials)
resource_owner_key = context['.session.oauth1.resource_owner.key']
resource_owner_secret = context['.session.oauth1.resource_owner.secret']
oauth = OAuth1Session(client_id,
client_secret=client_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret)
authorization_url = oauth.authorization_url(provider['authorization_base_url'])
return authorization_url
@moya.expose.macro('get_oauth_access_token')
def get_oauth_access_token(app, provider, credentials, verifier):
context = moya.pilot.context
client_id, client_secret = get_credentials(provider, credentials)
resource_owner_key = context['.session.oauth1.resource_owner.key']
resource_owner_secret = context['.session.oauth1.resource_owner.secret']
oauth = OAuth1Session(client_id,
client_secret=client_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
verifier=verifier)
access_token_url = provider['access_token_url']
oauth_tokens = oauth.fetch_access_token(access_token_url)
return oauth_tokens
@moya.expose.macro('get_oauth_profile')
def get_oauth_profile(app, provider, credentials, verifier):
context = moya.pilot.context
client_id, client_secret = get_credentials(provider, credentials)
resource_owner_key = context['.session.oauth1.resource_owner.key']
resource_owner_secret = context['.session.oauth1.resource_owner.secret']
resources = provider.get('resources', {})
session = OAuth1Session(client_id,
client_secret=client_secret,
resource_owner_key=resource_owner_key,
resource_owner_secret=resource_owner_secret,
verifier=verifier)
access_token_url = provider['access_token_url']
try:
oauth_tokens = session.fetch_access_token(access_token_url)
except Exception as e:
app.throw('moya.logins.access-fail',
text_type(e))
info = {}
for scope, scope_url in sorted(resources.items()):
try:
response = session.get(scope_url)
except Exception as e:
app.throw('moya.logins.get-scope-fail',
text_type(e),
diagnosis="There may be a connectivity issue getting scope information.",
scope=scope,
scope_url=scope_url)
try:
info[scope] = scope_data = response.json()
#if(context['.debug']):
# context['.console'].obj(context, scope_data)
except:
pass
provider_profile = provider.get('profile', {})
profile = {}
context['_oauth_info'] = info
with context.frame('_oauth_info'):
for k, v in provider_profile.items():
try:
profile[k] = context.eval(v)
except:
pass
return {'profile': profile, 'info': info}
|
[
"requests_oauthlib.OAuth1Session",
"moya.compat.text_type",
"moya.expose.macro"
] |
[((413, 458), 'moya.expose.macro', 'moya.expose.macro', (['"""get_oauth_resource_owner"""'], {}), "('get_oauth_resource_owner')\n", (430, 458), False, 'import moya\n'), ((1003, 1047), 'moya.expose.macro', 'moya.expose.macro', (['"""get_oauth_authorize_url"""'], {}), "('get_oauth_authorize_url')\n", (1020, 1047), False, 'import moya\n'), ((1701, 1744), 'moya.expose.macro', 'moya.expose.macro', (['"""get_oauth_access_token"""'], {}), "('get_oauth_access_token')\n", (1718, 1744), False, 'import moya\n'), ((2477, 2515), 'moya.expose.macro', 'moya.expose.macro', (['"""get_oauth_profile"""'], {}), "('get_oauth_profile')\n", (2494, 2515), False, 'import moya\n'), ((599, 652), 'requests_oauthlib.OAuth1Session', 'OAuth1Session', (['client_id'], {'client_secret': 'client_secret'}), '(client_id, client_secret=client_secret)\n', (612, 652), False, 'from requests_oauthlib import OAuth1Session\n'), ((1368, 1510), 'requests_oauthlib.OAuth1Session', 'OAuth1Session', (['client_id'], {'client_secret': 'client_secret', 'resource_owner_key': 'resource_owner_key', 'resource_owner_secret': 'resource_owner_secret'}), '(client_id, client_secret=client_secret, resource_owner_key=\n resource_owner_key, resource_owner_secret=resource_owner_secret)\n', (1381, 1510), False, 'from requests_oauthlib import OAuth1Session\n'), ((2074, 2239), 'requests_oauthlib.OAuth1Session', 'OAuth1Session', (['client_id'], {'client_secret': 'client_secret', 'resource_owner_key': 'resource_owner_key', 'resource_owner_secret': 'resource_owner_secret', 'verifier': 'verifier'}), '(client_id, client_secret=client_secret, resource_owner_key=\n resource_owner_key, resource_owner_secret=resource_owner_secret,\n verifier=verifier)\n', (2087, 2239), False, 'from requests_oauthlib import OAuth1Session\n'), ((2888, 3053), 'requests_oauthlib.OAuth1Session', 'OAuth1Session', (['client_id'], {'client_secret': 'client_secret', 'resource_owner_key': 'resource_owner_key', 'resource_owner_secret': 'resource_owner_secret', 'verifier': 'verifier'}), '(client_id, client_secret=client_secret, resource_owner_key=\n resource_owner_key, resource_owner_secret=resource_owner_secret,\n verifier=verifier)\n', (2901, 3053), False, 'from requests_oauthlib import OAuth1Session\n'), ((3377, 3389), 'moya.compat.text_type', 'text_type', (['e'], {}), '(e)\n', (3386, 3389), False, 'from moya.compat import text_type\n'), ((3625, 3637), 'moya.compat.text_type', 'text_type', (['e'], {}), '(e)\n', (3634, 3637), False, 'from moya.compat import text_type\n')]
|
"""
Cartesian genetic programming
"""
import operator as op
import random
import copy
import math
from settings import VERBOSE, N_COLS, LEVEL_BACK
class Function:
"""
A general function
"""
def __init__(self, f, arity, name=None):
self.f = f
self.arity = arity
self.name = f.__name__ if name is None else name
def __call__(self, *args, **kwargs):
return self.f(*args, **kwargs)
class Node:
"""
A node in CGP graph
"""
def __init__(self, max_arity):
"""
Initialize this node randomly
"""
self.i_func = None
self.i_inputs = [None] * max_arity
self.weights = [None] * max_arity
self.i_output = None
self.output = None
self.active = False
class Individual:
"""
An individual (chromosome, genotype, etc.) in evolution
"""
function_set = None
weight_range = [-1, 1]
max_arity = 3
n_inputs = 3
n_outputs = 1
n_cols = N_COLS
level_back = LEVEL_BACK
def __init__(self):
self.nodes = []
for pos in range(self.n_cols):
self.nodes.append(self._create_random_node(pos))
for i in range(1, self.n_outputs + 1):
self.nodes[-i].active = True
self.fitness = None
self._active_determined = False
def _create_random_node(self, pos):
node = Node(self.max_arity)
node.i_func = random.randint(0, len(self.function_set) - 1)
for i in range(self.function_set[node.i_func].arity):
node.i_inputs[i] = random.randint(max(pos - self.level_back, -self.n_inputs), pos - 1)
node.weights[i] = random.uniform(self.weight_range[0], self.weight_range[1])
node.i_output = pos
return node
def _determine_active_nodes(self):
"""
Determine which nodes in the CGP graph are active
"""
# check each node in reverse order
n_active = 0
for node in reversed(self.nodes):
if node.active:
n_active += 1
for i in range(self.function_set[node.i_func].arity):
i_input = node.i_inputs[i]
if i_input >= 0: # a node (not an input)
self.nodes[i_input].active = True
if VERBOSE:
print("# active genes: ", n_active)
def eval(self, *args):
"""
Given inputs, evaluate the output of this CGP individual.
:return the final output value
"""
if not self._active_determined:
self._determine_active_nodes()
self._active_determined = True
# forward pass: evaluate
for node in self.nodes:
if node.active:
inputs = []
for i in range(self.function_set[node.i_func].arity):
i_input = node.i_inputs[i]
w = node.weights[i]
if i_input < 0:
inputs.append(args[-i_input - 1] * w)
else:
inputs.append(self.nodes[i_input].output * w)
node.output = self.function_set[node.i_func](*inputs)
return self.nodes[-1].output
def mutate(self, mut_rate=0.01):
"""
Mutate this individual. Each gene is varied with probability *mut_rate*.
:param mut_rate: mutation probability
:return a child after mutation
"""
child = copy.deepcopy(self)
for pos, node in enumerate(child.nodes):
# mutate the function gene
if random.random() < mut_rate:
node.i_func = random.choice(range(len(self.function_set)))
# mutate the input genes (connection genes)
arity = self.function_set[node.i_func].arity
for i in range(arity):
if node.i_inputs[i] is None or random.random() < mut_rate: # if the mutated function requires more arguments, then the last ones are None
node.i_inputs[i] = random.randint(max(pos - self.level_back, -self.n_inputs), pos - 1)
if node.weights[i] is None or random.random() < mut_rate:
node.weights[i] = random.uniform(self.weight_range[0], self.weight_range[1])
# initially an individual is not active except the last output node
node.active = False
for i in range(1, self.n_outputs + 1):
child.nodes[-i].active = True
child.fitness = None
child._active_determined = False
return child
def save(self):
file_object = open(r"SavedBrain", 'w+')
for pos, node in enumerate(self.nodes):
file_object.write(str(node.i_func))
file_object.write("\n")
file_object.write("\n")
if not self._active_determined:
self._determine_active_nodes()
self._active_determined = True
for pos, node in enumerate(self.nodes):
if node.active:
file_object.write(str(node.i_func))
file_object.write("\n")
file_object.write("\n")
activeNodes = []
for node in self.nodes:
if node.active:
activeNodes.append(node)
file_object.write(str(self.function_set[node.i_func].f))
file_object.close()
# function set
def protected_div(a, b):
if abs(b) < 1e-6:
return a
return a / b
fs = [Function(op.add, 2), Function(op.sub, 2), Function(op.mul, 2), Function(protected_div, 2), Function(op.neg, 1), Function(math.cos, 1), Function(math.sin, 1), Function(math.tan, 1), Function(math.atan2, 2)]
Individual.function_set = fs
Individual.max_arity = max(f.arity for f in fs)
def evolve(pop, mut_rate, mu, lambda_):
"""
Evolve the population *pop* using the mu + lambda evolutionary strategy
:param pop: a list of individuals, whose size is mu + lambda. The first mu ones are previous parents.
:param mut_rate: mutation rate
:return: a new generation of individuals of the same size
"""
pop = sorted(pop, key=lambda ind: ind.fitness) # stable sorting
parents = pop[-mu:]
# generate lambda new children via mutation
offspring = []
for _ in range(lambda_):
parent = random.choice(parents)
offspring.append(parent.mutate(mut_rate))
return parents + offspring
def create_population(n):
"""
Create a random population composed of n individuals.
"""
return [Individual() for _ in range(n)]
|
[
"random.random",
"random.uniform",
"random.choice",
"copy.deepcopy"
] |
[((3494, 3513), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (3507, 3513), False, 'import copy\n'), ((6349, 6371), 'random.choice', 'random.choice', (['parents'], {}), '(parents)\n', (6362, 6371), False, 'import random\n'), ((1672, 1730), 'random.uniform', 'random.uniform', (['self.weight_range[0]', 'self.weight_range[1]'], {}), '(self.weight_range[0], self.weight_range[1])\n', (1686, 1730), False, 'import random\n'), ((3617, 3632), 'random.random', 'random.random', ([], {}), '()\n', (3630, 3632), False, 'import random\n'), ((4243, 4301), 'random.uniform', 'random.uniform', (['self.weight_range[0]', 'self.weight_range[1]'], {}), '(self.weight_range[0], self.weight_range[1])\n', (4257, 4301), False, 'import random\n'), ((3915, 3930), 'random.random', 'random.random', ([], {}), '()\n', (3928, 3930), False, 'import random\n'), ((4177, 4192), 'random.random', 'random.random', ([], {}), '()\n', (4190, 4192), False, 'import random\n')]
|
import torch as t
import torch_geometric.utils as utils
def qw_score(graph):
"""
未实现qw_score,采用度数代替
:param graph:
"""
score = utils.degree(graph.edge_index[0])
return score.sort()
def pre_processing(graph, m, score, trees):
score, indices = score
indices.squeeze_()
old_edges = graph.edge_index
trees[-1] = [-1] * m
def graft(root):
"""
找到分值最大的2阶节点并与源节点连接
和论文有一些不一样,会在加入二阶节点后把它视为一阶节点
:param root: 源节点(度小于m)
"""
nodes_1_hop, _, _, _ = utils.k_hop_subgraph(root, 1, graph.edge_index)
if nodes_1_hop.shape[0] > m:
return
nodes_2_hop, _, _, _ = utils.k_hop_subgraph(root, 2, graph.edge_index)
ma = 0
for node in nodes_2_hop:
if node not in nodes_1_hop:
node = int(node.item())
idx = t.nonzero(indices == node, as_tuple=False).item()
ma = max(ma, idx)
new_edge = t.tensor([[indices[ma], root], [root, indices[ma]]])
degree[root] += 1
graph.edge_index = t.cat((graph.edge_index, new_edge), dim=1)
if degree[root] < m:
graft(root)
elif degree[root] == m:
nodes_1_hop, _, _, _ = utils.k_hop_subgraph(root, 1, graph.edge_index)
trees[root] = ([i.item() for i in nodes_1_hop if i != root])
graph.edge_index = old_edges
def prune(root):
"""
找到分值最小的1阶节点并删除连接
默认图为简单图
:param root: 源节点
"""
nodes_1_hop, _, _, mask = utils.k_hop_subgraph(root, 1, graph.edge_index)
if nodes_1_hop.shape[0] == m + 1:
return
mi = graph.num_nodes + 1
for node in nodes_1_hop:
if node != root:
node = int(node.item())
idx = t.nonzero(indices == node, as_tuple=False).item()
mi = min(idx, mi)
mask = mask.nonzero(as_tuple=False)
edges = graph.edge_index
l, r = 0, 0
for i in mask:
i = i.item()
if edges[0][i] == indices[mi] and edges[1][i] == root:
l = i
elif edges[1][i] == indices[mi] and edges[0][i] == root:
r = i
l, r = sorted([l, r])
graph.edge_index = t.cat((edges[:, :l], edges[:, l + 1:r], edges[:, r + 1:]), dim=1)
degree[root] -= 1
if degree[root] > m:
prune(root)
elif degree[root] == m:
nodes_1_hop, _, _, _ = utils.k_hop_subgraph(root, 1, graph.edge_index)
trees[root] = ([i.item() for i in nodes_1_hop if i != root])
graph.edge_index = old_edges
degree = utils.degree(graph.edge_index[0])
for node, d in enumerate(degree):
tmp = degree[node]
if d > m:
prune(node)
elif d < m:
graft(node)
else:
nodes_1_hop, _, _, _ = utils.k_hop_subgraph(node, 1, graph.edge_index)
trees[node] = ([i.item() for i in nodes_1_hop if i != node])
degree[node] = tmp
for tree in trees:
while len(trees[tree]) < m:
trees[tree].append(-1)
# 对于孤立点对它的子树加哑节点
graph.edge_index = old_edges
return trees
def construct_node_tree(graph, node, trees, opt):
"""
生成目标节点的 K_level, m_ary 树
:param graph:
:param node:
:param opt:
"""
m = opt.m
K = opt.K
tree = [node]
now = 0
for i in range(K - 1):
for j in range(m ** i):
root = tree[now]
tree += trees[root]
now += 1
zero = t.zeros(graph.x[-1].shape)
x = graph.x
graph.x = t.cat([graph.x, zero[None, :]], dim=0)
tree = graph.x[tree]
graph.x = x
return tree
|
[
"torch_geometric.utils.degree",
"torch.nonzero",
"torch.tensor",
"torch_geometric.utils.k_hop_subgraph",
"torch.zeros",
"torch.cat"
] |
[((148, 181), 'torch_geometric.utils.degree', 'utils.degree', (['graph.edge_index[0]'], {}), '(graph.edge_index[0])\n', (160, 181), True, 'import torch_geometric.utils as utils\n'), ((2664, 2697), 'torch_geometric.utils.degree', 'utils.degree', (['graph.edge_index[0]'], {}), '(graph.edge_index[0])\n', (2676, 2697), True, 'import torch_geometric.utils as utils\n'), ((3577, 3603), 'torch.zeros', 't.zeros', (['graph.x[-1].shape'], {}), '(graph.x[-1].shape)\n', (3584, 3603), True, 'import torch as t\n'), ((3634, 3672), 'torch.cat', 't.cat', (['[graph.x, zero[None, :]]'], {'dim': '(0)'}), '([graph.x, zero[None, :]], dim=0)\n', (3639, 3672), True, 'import torch as t\n'), ((531, 578), 'torch_geometric.utils.k_hop_subgraph', 'utils.k_hop_subgraph', (['root', '(1)', 'graph.edge_index'], {}), '(root, 1, graph.edge_index)\n', (551, 578), True, 'import torch_geometric.utils as utils\n'), ((666, 713), 'torch_geometric.utils.k_hop_subgraph', 'utils.k_hop_subgraph', (['root', '(2)', 'graph.edge_index'], {}), '(root, 2, graph.edge_index)\n', (686, 713), True, 'import torch_geometric.utils as utils\n'), ((967, 1019), 'torch.tensor', 't.tensor', (['[[indices[ma], root], [root, indices[ma]]]'], {}), '([[indices[ma], root], [root, indices[ma]]])\n', (975, 1019), True, 'import torch as t\n'), ((1073, 1115), 'torch.cat', 't.cat', (['(graph.edge_index, new_edge)'], {'dim': '(1)'}), '((graph.edge_index, new_edge), dim=1)\n', (1078, 1115), True, 'import torch as t\n'), ((1544, 1591), 'torch_geometric.utils.k_hop_subgraph', 'utils.k_hop_subgraph', (['root', '(1)', 'graph.edge_index'], {}), '(root, 1, graph.edge_index)\n', (1564, 1591), True, 'import torch_geometric.utils as utils\n'), ((2276, 2341), 'torch.cat', 't.cat', (['(edges[:, :l], edges[:, l + 1:r], edges[:, r + 1:])'], {'dim': '(1)'}), '((edges[:, :l], edges[:, l + 1:r], edges[:, r + 1:]), dim=1)\n', (2281, 2341), True, 'import torch as t\n'), ((1236, 1283), 'torch_geometric.utils.k_hop_subgraph', 'utils.k_hop_subgraph', (['root', '(1)', 'graph.edge_index'], {}), '(root, 1, graph.edge_index)\n', (1256, 1283), True, 'import torch_geometric.utils as utils\n'), ((2488, 2535), 'torch_geometric.utils.k_hop_subgraph', 'utils.k_hop_subgraph', (['root', '(1)', 'graph.edge_index'], {}), '(root, 1, graph.edge_index)\n', (2508, 2535), True, 'import torch_geometric.utils as utils\n'), ((2898, 2945), 'torch_geometric.utils.k_hop_subgraph', 'utils.k_hop_subgraph', (['node', '(1)', 'graph.edge_index'], {}), '(node, 1, graph.edge_index)\n', (2918, 2945), True, 'import torch_geometric.utils as utils\n'), ((864, 906), 'torch.nonzero', 't.nonzero', (['(indices == node)'], {'as_tuple': '(False)'}), '(indices == node, as_tuple=False)\n', (873, 906), True, 'import torch as t\n'), ((1810, 1852), 'torch.nonzero', 't.nonzero', (['(indices == node)'], {'as_tuple': '(False)'}), '(indices == node, as_tuple=False)\n', (1819, 1852), True, 'import torch as t\n')]
|
#/*
# * Copyright (C) 2017 - This file is part of libecc project
# *
# * Authors:
# * <NAME> <<EMAIL>>
# * <NAME> <<EMAIL>>
# * <NAME> <<EMAIL>>
# *
# * Contributors:
# * <NAME> <<EMAIL>>
# * <NAME> <<EMAIL>>
# *
# * This software is licensed under a dual BSD and GPL v2 license.
# * See LICENSE file at the root folder of the project.
# */
import struct
keccak_rc = [
0x0000000000000001, 0x0000000000008082, 0x800000000000808A, 0x8000000080008000,
0x000000000000808B, 0x0000000080000001, 0x8000000080008081, 0x8000000000008009,
0x000000000000008A, 0x0000000000000088, 0x0000000080008009, 0x000000008000000A,
0x000000008000808B, 0x800000000000008B, 0x8000000000008089, 0x8000000000008003,
0x8000000000008002, 0x8000000000000080, 0x000000000000800A, 0x800000008000000A,
0x8000000080008081, 0x8000000000008080, 0x0000000080000001, 0x8000000080008008
]
keccak_rot = [
[ 0, 36, 3, 41, 18 ],
[ 1, 44, 10, 45, 2 ],
[ 62, 6, 43, 15, 61 ],
[ 28, 55, 25, 21, 56 ],
[ 27, 20, 39, 8, 14 ],
]
# Keccak function
def keccak_rotl(x, l):
return (((x << l) ^ (x >> (64 - l))) & (2**64-1))
def keccakround(bytestate, rc):
# Import little endian state
state = [0] * 25
for i in range(0, 25):
(state[i],) = struct.unpack('<Q', ''.join(bytestate[(8*i):(8*i)+8]))
# Proceed with the KECCAK core
bcd = [0] * 25
# Theta
for i in range(0, 5):
bcd[i] = state[i] ^ state[i + (5*1)] ^ state[i + (5*2)] ^ state[i + (5*3)] ^ state[i + (5*4)]
for i in range(0, 5):
tmp = bcd[(i+4)%5] ^ keccak_rotl(bcd[(i+1)%5], 1)
for j in range(0, 5):
state[i + (5 * j)] = state[i + (5 * j)] ^ tmp
# Rho and Pi
for i in range(0, 5):
for j in range(0, 5):
bcd[j + (5*(((2*i)+(3*j)) % 5))] = keccak_rotl(state[i + (5*j)], keccak_rot[i][j])
# Chi
for i in range(0, 5):
for j in range(0, 5):
state[i + (5*j)] = bcd[i + (5*j)] ^ (~bcd[((i+1)%5) + (5*j)] & bcd[((i+2)%5) + (5*j)])
# Iota
state[0] = state[0] ^ keccak_rc[rc]
# Pack the output state
output = [0] * (25 * 8)
for i in range(0, 25):
output[(8*i):(8*i)+1] = struct.pack('<Q', state[i])
return output
def keccakf(bytestate):
for rnd in range(0, 24):
bytestate = keccakround(bytestate, rnd)
return bytestate
# SHA-3 context class
class Sha3_ctx(object):
def __init__(self, digest_size):
self.digest_size = digest_size / 8
self.block_size = (25*8) - (2 * (digest_size / 8))
self.idx = 0
self.state = [chr(0)] * (25 * 8)
def digest_size(self):
return self.digest_size
def block_size(self):
return self.block_size
def update(self, message):
for i in range(0, len(message)):
self.state[self.idx] = chr(ord(self.state[self.idx]) ^ ord(message[i]))
self.idx = self.idx + 1
if (self.idx == self.block_size):
self.state = keccakf(self.state)
self.idx = 0
def digest(self):
self.state[self.idx] = chr(ord(self.state[self.idx]) ^ 0x06)
self.state[self.block_size - 1] = chr(ord(self.state[self.block_size - 1]) ^ 0x80)
self.state = keccakf(self.state)
return ''.join(self.state[:self.digest_size])
|
[
"struct.pack"
] |
[((2141, 2168), 'struct.pack', 'struct.pack', (['"""<Q"""', 'state[i]'], {}), "('<Q', state[i])\n", (2152, 2168), False, 'import struct\n')]
|
import torch
import torch.nn as nn
import optflow.compute_tvl1_energy as compute_tvl1_energy
def EPE(input_flow, target_flow, sparse=False, mean=True):
EPE_map = torch.norm(target_flow-input_flow,2,1)
if sparse:
EPE_map = EPE_map[target_flow != 0]
if mean:
return EPE_map.mean()
else:
return EPE_map.sum()
def multiscale_energy_loss(network_output_energy, target_flow,img1,img2, weights=None, sparse=False):
def one_scale_mod(output, target, sparse,img1,img2):
b, _, h, w = target.size()
down_sample_img1 =nn.functional.adaptive_avg_pool2d(img1, (h, w))
down_sample_img2 = nn.functional.adaptive_avg_pool2d(img2, (h, w))
target_energy = compute_tvl1_energy.compute_tvl1_energy_optimized_batch(down_sample_img1,
down_sample_img2,
target)
l1_loss = (output - target_energy).abs().sum() / target_energy.size(0)
return l1_loss
if type(network_output_energy) not in [tuple, list]:
network_output_energy = [network_output_energy]
if weights is None:
weights = [0.46,0.23,0.23,0.46] # more preference for starting layers
assert(len(weights) == len(network_output_energy))
loss = 0
flow_index = 0
for output, weight in zip(network_output_energy, weights):
loss += weight * one_scale_mod(output, target_flow[flow_index], sparse,img1,img2)
flow_index = flow_index + 1
return loss
def realEPE(output, target, sparse=False):
b, _, h, w = target.size()
upsampled_output = nn.functional.upsample(output, size=(h,w), mode='bilinear')
return EPE(upsampled_output, target, sparse, mean=True)
|
[
"torch.nn.functional.upsample",
"torch.norm",
"torch.nn.functional.adaptive_avg_pool2d",
"optflow.compute_tvl1_energy.compute_tvl1_energy_optimized_batch"
] |
[((167, 209), 'torch.norm', 'torch.norm', (['(target_flow - input_flow)', '(2)', '(1)'], {}), '(target_flow - input_flow, 2, 1)\n', (177, 209), False, 'import torch\n'), ((1656, 1716), 'torch.nn.functional.upsample', 'nn.functional.upsample', (['output'], {'size': '(h, w)', 'mode': '"""bilinear"""'}), "(output, size=(h, w), mode='bilinear')\n", (1678, 1716), True, 'import torch.nn as nn\n'), ((570, 617), 'torch.nn.functional.adaptive_avg_pool2d', 'nn.functional.adaptive_avg_pool2d', (['img1', '(h, w)'], {}), '(img1, (h, w))\n', (603, 617), True, 'import torch.nn as nn\n'), ((645, 692), 'torch.nn.functional.adaptive_avg_pool2d', 'nn.functional.adaptive_avg_pool2d', (['img2', '(h, w)'], {}), '(img2, (h, w))\n', (678, 692), True, 'import torch.nn as nn\n'), ((718, 821), 'optflow.compute_tvl1_energy.compute_tvl1_energy_optimized_batch', 'compute_tvl1_energy.compute_tvl1_energy_optimized_batch', (['down_sample_img1', 'down_sample_img2', 'target'], {}), '(down_sample_img1,\n down_sample_img2, target)\n', (773, 821), True, 'import optflow.compute_tvl1_energy as compute_tvl1_energy\n')]
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import sys
from django.test import TestCase
from django.utils.module_loading import import_string
from pipeline.tests.mock import * # noqa
from pipeline.tests.mock_settings import * # noqa
class EngineDataAPITestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.mock_settings = MagicMock()
cls.settings_patch = patch(ENGINE_DATA_API_SETTINGS, cls.mock_settings)
cls.import_backend_patch = patch(ENGINE_DATA_API_IMPORT_BACKEND, MagicMock())
cls.settings_patch.start()
cls.import_backend_patch.start()
cls.api = import_string("pipeline.engine.core.data.api")
cls.write_methods = ["set_object", "del_object", "expire_cache"]
cls.read_methods = ["get_object", "cache_for"]
cls.method_params = {
"set_object": ["key", "obj"],
"del_object": ["key"],
"expire_cache": ["key", "obj", "expires"],
"cache_for": ["key"],
"get_object": ["key"],
}
@classmethod
def tearDownClass(cls):
cls.settings_patch.stop()
cls.import_backend_patch.stop()
def setUp(self):
self.backend = MagicMock()
self.candidate_backend = MagicMock()
self.mock_settings.PIPELINE_DATA_BACKEND_AUTO_EXPIRE = False
def test_write__without_candidate(self):
for method in self.write_methods:
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, None):
getattr(self.api, method)(*self.method_params[method])
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_not_called()
sys.stdout.write(
"{} pass test_write__without_candidate test\n".format(method)
)
def test_write__without_candiate_raise_err(self):
for method in self.write_methods:
setattr(self.backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, None):
self.assertRaises(
Exception,
getattr(self.api, method),
*self.method_params[method]
)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_not_called()
sys.stdout.write(
"{} pass test_write__without_candiate_raise_err test\n".format(method)
)
def test_write__with_candidate(self):
for method in self.write_methods:
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
getattr(self.api, method)(*self.method_params[method])
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write("{} pass test_write__with_candidate test\n".format(method))
def test_write__with_candidate_main_raise_err(self):
for method in self.write_methods:
setattr(self.backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
getattr(self.api, method)(*self.method_params[method])
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_write__with_candidate_main_raise_err test\n".format(
method
)
)
def test_write__with_candidate_raise_err(self):
for method in self.write_methods:
setattr(self.candidate_backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
getattr(self.api, method)(*self.method_params[method])
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_write__with_candidate_raise_err test\n".format(method)
)
def test_write__with_candidate_both_raise_err(self):
for method in self.write_methods:
setattr(self.backend, method, MagicMock(side_effect=Exception))
setattr(self.candidate_backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
self.assertRaises(
Exception,
getattr(self.api, method),
*self.method_params[method]
)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_write__with_candidate_both_raise_err test\n".format(
method
)
)
def test_write__with_auto_expire(self):
self.mock_settings.PIPELINE_DATA_BACKEND_AUTO_EXPIRE = True
self.mock_settings.PIPELINE_DATA_BACKEND_AUTO_EXPIRE_SECONDS = 30
for method in self.write_methods:
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
getattr(self.api, method)(*self.method_params[method])
if method == "set_object":
getattr(self.backend, "expire_cache").assert_called_once_with(
*self.method_params[method], expires=30
)
self.backend.expire_cache.reset_mock()
else:
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_write__with_candidate_both_raise_err test\n".format(
method
)
)
def test_read__without_candidate(self):
for method in self.read_methods:
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, None):
data = getattr(self.api, method)(*self.method_params[method])
self.assertIsNotNone(data)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_not_called()
sys.stdout.write(
"{} pass test_read__without_candidate test\n".format(method)
)
def test_read__without_candidate_raise_err(self):
for method in self.read_methods:
setattr(self.backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, None):
self.assertRaises(
Exception,
getattr(self.api, method),
*self.method_params[method]
)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_not_called()
sys.stdout.write(
"{} pass test_read__without_candidate_raise_err test\n".format(method)
)
def test_read__with_candidate_not_use(self):
for method in self.read_methods:
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
data = getattr(self.api, method)(*self.method_params[method])
self.assertIsNotNone(data)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_not_called()
sys.stdout.write(
"{} pass test_read__with_candidate_not_use test\n".format(method)
)
def test_read__with_candidate_use(self):
for method in self.read_methods:
setattr(self.backend, method, MagicMock(return_value=None))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
data = getattr(self.api, method)(*self.method_params[method])
self.assertIsNotNone(data)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_read__with_candidate_use test\n".format(method)
)
def test_read__with_candidate_err(self):
for method in self.read_methods:
setattr(self.backend, method, MagicMock(return_value=None))
setattr(self.candidate_backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
data = getattr(self.api, method)(*self.method_params[method])
self.assertIsNone(data)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_read__with_candidate_err test\n".format(method)
)
def test_read__with_candidate_main_raise_err(self):
for method in self.read_methods:
setattr(self.backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
data = getattr(self.api, method)(*self.method_params[method])
self.assertIsNotNone(data)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_read__with_candidate_main_raise_err test\n".format(method)
)
def test_read__with_candidate_both_raise_err(self):
for method in self.read_methods:
setattr(self.backend, method, MagicMock(side_effect=Exception))
setattr(self.candidate_backend, method, MagicMock(side_effect=Exception))
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
self.assertRaises(
Exception,
getattr(self.api, method),
*self.method_params[method]
)
getattr(self.backend, method).assert_called_once_with(
*self.method_params[method]
)
getattr(self.candidate_backend, method).assert_called_once_with(
*self.method_params[method]
)
sys.stdout.write(
"{} pass test_read__with_candidate_both_raise_err test\n".format(method)
)
def test_set_schedule_data(self):
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
self.api.set_schedule_data("key", "data")
self.backend.set_object.assert_called_once_with(
"key_schedule_parent_data", "data"
)
self.candidate_backend.set_object.assert_called_once_with(
"key_schedule_parent_data", "data"
)
def test_delete_parent_data(self):
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
self.api.delete_parent_data("key")
self.backend.del_object.assert_called_once_with(
"key_schedule_parent_data"
)
self.candidate_backend.del_object.assert_called_once_with(
"key_schedule_parent_data"
)
def test_get_schedule_parent_data(self):
with patch(ENGINE_DATA_API_BACKEND, self.backend):
with patch(ENGINE_DATA_API_CANDIDATE_BACKEND, self.candidate_backend):
data = self.api.get_schedule_parent_data("key")
self.assertIsNotNone(data)
self.backend.get_object.assert_called_once_with(
"key_schedule_parent_data"
)
self.candidate_backend.get_object.assert_not_called()
|
[
"django.utils.module_loading.import_string"
] |
[((1310, 1356), 'django.utils.module_loading.import_string', 'import_string', (['"""pipeline.engine.core.data.api"""'], {}), "('pipeline.engine.core.data.api')\n", (1323, 1356), False, 'from django.utils.module_loading import import_string\n')]
|
import numpy as np
def _check_mne(name):
"""Helper to check if h5py is installed"""
try:
import mne
except ImportError:
raise ImportError('Please install MNE-python to use %s.' % name)
return mne
def raw_to_mask(raw, ixs, events=None, tmin=None, tmax=None):
"""
A function to transform MNE data into pactools input signals.
It select the one channel on which you to estimate PAC, or two channels
for cross-channel PAC. It also returns a mask generator, that mask the
data outside a given window around an event. The mask generator returns
a number of masks equal to the number of events times the number of
windows (i.e. the number of pairs (tmin, tmax)).
Warning: events is stored in indices, tmin and tmax are stored in seconds.
Parameters
----------
raw : an instance of Raw, containing data of shape (n_channels, n_times)
The data used to calculate PAC
ixs : int or couple of int
The indices for the low/high frequency channels. If only one is given,
the same channel is used for both low_sig and high_sig.
events : array, shape (n_events, 3) | array, shape (n_events,) | None
MNE events array. To be supplied if data is 2D and output should be
split by events. In this case, `tmin` and `tmax` must be provided. If
`ndim == 1`, it is assumed to be event indices, and all events will be
grouped together. Otherwise, events will be grouped along the third
dimension.
tmin : float | list of floats, shape (n_windows, ) | None
If `events` is not provided, it is the start time to use in `raw`.
If `events` is provided, it is the time (in seconds) to include before
each event index. If a list of floats is given, then PAC is calculated
for each pair of `tmin` and `tmax`. Defaults to `min(raw.times)`.
tmax : float | list of floats, shape (n_windows, ) | None
If `events` is not provided, it is the stop time to use in `raw`.
If `events` is provided, it is the time (in seconds) to include after
each event index. If a list of floats is given, then PAC is calculated
for each pair of `tmin` and `tmax`. Defaults to `max(raw.times)`.
Attributes
----------
low_sig : array, shape (1, n_points)
Input data for the phase signal
high_sig : array or None, shape (1, n_points)
Input data for the amplitude signal.
If None, we use low_sig for both signals.
mask : MaskIterator instance
Object that behaves like a list of mask, without storing them all.
The PAC will only be evaluated where the mask is False.
Examples
--------
>>> from pactools import raw_to_mask
>>> low_sig, high_sig, mask = raw_to_mask(raw, ixs, events, tmin, tmax)
>>> n_masks = len(mask)
>>> for one_mask in mask:
... pass
"""
mne = _check_mne('raw_to_mask')
if not isinstance(raw, mne.io.BaseRaw):
raise ValueError('Must supply Raw as input')
ixs = np.atleast_1d(ixs)
fs = raw.info['sfreq']
data = raw[:][0]
n_channels, n_points = data.shape
low_sig = data[ixs[0]][None, :]
if ixs.shape[0] > 1:
high_sig = data[ixs[1]][None, :]
else:
high_sig = None
mask = MaskIterator(events, tmin, tmax, n_points, fs)
return low_sig, high_sig, mask
class MaskIterator(object):
"""Iterator that creates the masks one at a time.
Examples
--------
>>> from pactools import MaskIterator
>>> all_masks = MaskIterator(events, tmin, tmax, n_points, fs)
>>> n_masks = len(all_masks)
>>> for one_mask in all_masks:
... pass
"""
def __init__(self, events, tmin, tmax, n_points, fs):
self.events = events
self.tmin = tmin
self.tmax = tmax
self.n_points = n_points
self.fs = float(fs)
self._init()
def _init(self):
self.tmin = np.atleast_1d(self.tmin)
self.tmax = np.atleast_1d(self.tmax)
if len(self.tmin) != len(self.tmax):
raise ValueError('tmin and tmax have differing lengths')
n_windows = len(self.tmin)
if self.events is None:
self.events = np.array([0.])
n_events = 1
if self.events.ndim == 1:
n_events = 1 # number of different event kinds
else:
n_events = np.unique(self.events[:, -1]).shape[0]
self._n_iter = n_windows * n_events
def __iter__(self):
return self.next()
def __len__(self):
return self._n_iter
def next(self):
if self.events.ndim == 1:
event_names = [None, ]
else:
event_names = np.unique(self.events[:, -1])
mask = np.empty((1, self.n_points), dtype=bool)
for event_name in event_names:
if self.events.ndim == 1:
# select all the events since their kind is not specified
these_events = self.events
else:
# select the event indices of one kind of event
these_events = self.events[self.events[:, -1] == event_name, 0]
for tmin, tmax in zip(self.tmin, self.tmax):
mask.fill(True) # it masks everything
for event in these_events:
start, stop = None, None
if tmin is not None:
start = int(event + tmin * self.fs)
if tmax is not None:
stop = int(event + tmax * self.fs)
mask[:, start:stop] = False
yield mask
|
[
"numpy.array",
"numpy.empty",
"numpy.unique",
"numpy.atleast_1d"
] |
[((3066, 3084), 'numpy.atleast_1d', 'np.atleast_1d', (['ixs'], {}), '(ixs)\n', (3079, 3084), True, 'import numpy as np\n'), ((3978, 4002), 'numpy.atleast_1d', 'np.atleast_1d', (['self.tmin'], {}), '(self.tmin)\n', (3991, 4002), True, 'import numpy as np\n'), ((4023, 4047), 'numpy.atleast_1d', 'np.atleast_1d', (['self.tmax'], {}), '(self.tmax)\n', (4036, 4047), True, 'import numpy as np\n'), ((4793, 4833), 'numpy.empty', 'np.empty', (['(1, self.n_points)'], {'dtype': 'bool'}), '((1, self.n_points), dtype=bool)\n', (4801, 4833), True, 'import numpy as np\n'), ((4257, 4272), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (4265, 4272), True, 'import numpy as np\n'), ((4747, 4776), 'numpy.unique', 'np.unique', (['self.events[:, -1]'], {}), '(self.events[:, -1])\n', (4756, 4776), True, 'import numpy as np\n'), ((4429, 4458), 'numpy.unique', 'np.unique', (['self.events[:, -1]'], {}), '(self.events[:, -1])\n', (4438, 4458), True, 'import numpy as np\n')]
|
import random
async def magicEightBall(ctx, message=True):
if message:
eightBall = random.randint(0, 19)
outlooks = [
"As I see it, yes.",
"Ask again later.",
"Better not tell you now.",
"Cannot predict now.",
"Concentrate and ask again.",
"Don’t count on it.",
"It is certain.",
"It is decidedly so.",
"Most likely.",
"My reply is no.",
"My sources say no.",
"Outlook not so good.",
"Outlook good.",
"Reply hazy, try again.",
"Signs point to yes.",
"Very doubtful.",
"Without a doubt.",
"Yes.",
"Yes – definitely.",
"You may rely on it.",
]
await ctx.send('Magic 8: ' + outlooks[eightBall])
|
[
"random.randint"
] |
[((97, 118), 'random.randint', 'random.randint', (['(0)', '(19)'], {}), '(0, 19)\n', (111, 118), False, 'import random\n')]
|
import time
import board
import busio
from digitalio import DigitalInOut
from adafruit_esp32spi import adafruit_esp32spi
from adafruit_esp32spi import adafruit_esp32spi_wifimanager
import neopixel
# Import Philips Hue Bridge
from adafruit_hue import Bridge
# Get wifi details and more from a secrets.py file
try:
from secrets import secrets
except ImportError:
print("WiFi and API secrets are kept in secrets.py, please add them there!")
raise
# ESP32 SPI
esp32_cs = DigitalInOut(board.ESP_CS)
esp32_ready = DigitalInOut(board.ESP_BUSY)
esp32_reset = DigitalInOut(board.ESP_RESET)
spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
esp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)
status_light = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=0.2)
wifi = adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager(esp, secrets, status_light)
# Attempt to load bridge username and IP address from secrets.py
try:
username = secrets['hue_username']
bridge_ip = secrets['bridge_ip']
my_bridge = Bridge(wifi, bridge_ip, username)
except:
# Perform first-time bridge setup
my_bridge = Bridge(wifi)
ip = my_bridge.discover_bridge()
username = my_bridge.register_username()
print('ADD THESE VALUES TO SECRETS.PY: \
\n\t"bridge_ip":"{0}", \
\n\t"hue_username":"{1}"'.format(ip, username))
raise
# Enumerate all lights on the bridge
my_bridge.get_lights()
# Turn on the light
my_bridge.set_light(1, on=True)
# RGB colors to Hue-Compatible HSL colors
hsl_y = my_bridge.rgb_to_hsb([255, 255, 0])
hsl_b = my_bridge.rgb_to_hsb([0, 0, 255])
hsl_w = my_bridge.rgb_to_hsb([255, 255, 255])
hsl_colors = [hsl_y, hsl_b, hsl_w]
# Set the light to Python colors!
for color in hsl_colors:
my_bridge.set_light(1, hue=int(color[0]), sat=int(color[1]), bri=int(color[2]))
time.sleep(5)
# Set a predefinedscene
# my_bridge.set_group(1, scene='AB34EF5')
# Turn off the light
my_bridge.set_light(1, on=False)
|
[
"adafruit_esp32spi.adafruit_esp32spi.ESP_SPIcontrol",
"adafruit_hue.Bridge",
"busio.SPI",
"time.sleep",
"adafruit_esp32spi.adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager",
"neopixel.NeoPixel",
"digitalio.DigitalInOut"
] |
[((499, 525), 'digitalio.DigitalInOut', 'DigitalInOut', (['board.ESP_CS'], {}), '(board.ESP_CS)\n', (511, 525), False, 'from digitalio import DigitalInOut\n'), ((541, 569), 'digitalio.DigitalInOut', 'DigitalInOut', (['board.ESP_BUSY'], {}), '(board.ESP_BUSY)\n', (553, 569), False, 'from digitalio import DigitalInOut\n'), ((585, 614), 'digitalio.DigitalInOut', 'DigitalInOut', (['board.ESP_RESET'], {}), '(board.ESP_RESET)\n', (597, 614), False, 'from digitalio import DigitalInOut\n'), ((622, 666), 'busio.SPI', 'busio.SPI', (['board.SCK', 'board.MOSI', 'board.MISO'], {}), '(board.SCK, board.MOSI, board.MISO)\n', (631, 666), False, 'import busio\n'), ((674, 747), 'adafruit_esp32spi.adafruit_esp32spi.ESP_SPIcontrol', 'adafruit_esp32spi.ESP_SPIcontrol', (['spi', 'esp32_cs', 'esp32_ready', 'esp32_reset'], {}), '(spi, esp32_cs, esp32_ready, esp32_reset)\n', (706, 747), False, 'from adafruit_esp32spi import adafruit_esp32spi\n'), ((764, 816), 'neopixel.NeoPixel', 'neopixel.NeoPixel', (['board.NEOPIXEL', '(1)'], {'brightness': '(0.2)'}), '(board.NEOPIXEL, 1, brightness=0.2)\n', (781, 816), False, 'import neopixel\n'), ((825, 901), 'adafruit_esp32spi.adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager', 'adafruit_esp32spi_wifimanager.ESPSPI_WiFiManager', (['esp', 'secrets', 'status_light'], {}), '(esp, secrets, status_light)\n', (873, 901), False, 'from adafruit_esp32spi import adafruit_esp32spi_wifimanager\n'), ((1071, 1104), 'adafruit_hue.Bridge', 'Bridge', (['wifi', 'bridge_ip', 'username'], {}), '(wifi, bridge_ip, username)\n', (1077, 1104), False, 'from adafruit_hue import Bridge\n'), ((1944, 1957), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1954, 1957), False, 'import time\n'), ((1170, 1182), 'adafruit_hue.Bridge', 'Bridge', (['wifi'], {}), '(wifi)\n', (1176, 1182), False, 'from adafruit_hue import Bridge\n')]
|
#
# Copyright (c) 2013-2018 <NAME> <<EMAIL>>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import kore
import socket
class EchoServer:
# Setup socket + wrap it inside of a kore socket so we can use it.
def __init__(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(False)
sock.bind(("127.0.0.1", 6969))
sock.listen()
self.conn = kore.socket_wrap(sock)
# Wait for a new client to connect, then create a new task
# that calls handle_client with the ocnnected client as
# the argument.
async def run(self):
while True:
try:
client = await self.conn.accept()
kore.task_create(self.handle_client(client))
client = None
except Exception as e:
kore.fatal("exception %s" % e)
# Each client will run as this co-routine.
async def handle_client(self, client):
while True:
try:
data = await client.recv(1024)
if data is None:
break
await client.send(data)
except Exception as e:
print("client got exception %s" % e)
client.close()
# Setup the server object.
server = EchoServer()
# Create a task that will execute inside of Kore as a co-routine.
kore.task_create(server.run())
|
[
"kore.socket_wrap",
"kore.fatal",
"socket.socket"
] |
[((920, 969), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (933, 969), False, 'import socket\n'), ((1084, 1106), 'kore.socket_wrap', 'kore.socket_wrap', (['sock'], {}), '(sock)\n', (1100, 1106), False, 'import kore\n'), ((1505, 1535), 'kore.fatal', 'kore.fatal', (["('exception %s' % e)"], {}), "('exception %s' % e)\n", (1515, 1535), False, 'import kore\n')]
|
import asyncio
import logging
import traceback
import uuid
from typing import Optional, Tuple, Any, Callable
from pesto.ws.core.payload_parser import PayloadParser, PestoConfig
from pesto.ws.core.pesto_feature import PestoFeatures
from pesto.ws.core.utils import load_class, async_exec
from pesto.ws.features.algorithm_wrapper import AlgorithmWrapper
from pesto.ws.features.converter.image.image_roi import ImageROI, DummyImageROI
from pesto.ws.features.payload_converter import PayloadConverter
from pesto.ws.features.payload_debug import PayloadDebug
from pesto.ws.features.response_serializer import ResponseSerializer
from pesto.ws.features.schema_validation import SchemaValidation
from pesto.ws.features.stateful_response import StatefulResponse
from pesto.ws.features.stateless_response import StatelessResponse
from pesto.ws.service.describe import DescribeService
from pesto.ws.service.job_result import ResultType
log = logging.getLogger(__name__)
class ProcessService:
PROCESS_CLASS_NAME = 'algorithm.process.Process'
_algorithm: Optional[Callable] = None
_describe = None
@staticmethod
def init():
if ProcessService._algorithm is not None:
raise ValueError('Process Service already loaded !')
try:
log.info('ProcessService.init() ...')
ProcessService._algorithm = load_class(ProcessService.PROCESS_CLASS_NAME)()
if hasattr(ProcessService._algorithm, 'on_start'):
log.info('ProcessService.on_start() ...')
ProcessService._algorithm.on_start()
log.info('ProcessService.on_start() ... Done !')
log.info('ProcessService.init() ... Done !')
except:
traceback.print_exc()
log.warning('Algorithm {}.on_start() failure !'.format(ProcessService.PROCESS_CLASS_NAME))
def __init__(self, url_root: str):
self.url_root = url_root
@property
def service_description(self):
if ProcessService._describe is None:
ProcessService._describe = DescribeService(self.url_root).compute_describe()
return ProcessService._describe
def process(self, payload: dict) -> dict:
config = PayloadParser.parse(payload)
image_roi: Optional[ImageROI] = config.get(PestoConfig.roi) # if no ROI: None
active_roi: ImageROI = image_roi or DummyImageROI() # bypass compute crop info and remove margins in pipeline
job_id = str(uuid.uuid4().time_low)
is_stateful = self.service_description['asynchronous'] is True
input_schema = self.service_description['input']
output_schema = self.service_description['output']
common_pipeline = filter(None, [
SchemaValidation(schema=input_schema),
active_roi.compute_crop_infos(),
PayloadConverter(image_roi=image_roi, schema=input_schema),
PayloadDebug(schema=input_schema),
AlgorithmWrapper(ProcessService._algorithm),
active_roi.remove_margin(),
ResponseSerializer(schema=output_schema, job_id=job_id),
])
if is_stateful:
pipeline = [
*common_pipeline,
StatefulResponse(self.url_root, job_id)
]
else:
pipeline = [
*common_pipeline,
StatelessResponse(self.url_root, job_id, output_schema)
]
return PestoFeatures(pipeline).process(payload)
async def async_process(self, request_payload: dict) -> Tuple[Any, ResultType]:
return await asyncio.wait_for(
async_exec(lambda: self.process(request_payload)),
timeout=None
)
|
[
"logging.getLogger",
"pesto.ws.core.utils.load_class",
"pesto.ws.service.describe.DescribeService",
"pesto.ws.features.payload_debug.PayloadDebug",
"uuid.uuid4",
"pesto.ws.features.payload_converter.PayloadConverter",
"pesto.ws.features.converter.image.image_roi.DummyImageROI",
"pesto.ws.core.payload_parser.PayloadParser.parse",
"pesto.ws.features.algorithm_wrapper.AlgorithmWrapper",
"pesto.ws.features.response_serializer.ResponseSerializer",
"pesto.ws.features.stateful_response.StatefulResponse",
"pesto.ws.core.pesto_feature.PestoFeatures",
"traceback.print_exc",
"pesto.ws.features.stateless_response.StatelessResponse",
"pesto.ws.features.schema_validation.SchemaValidation"
] |
[((932, 959), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (949, 959), False, 'import logging\n'), ((2215, 2243), 'pesto.ws.core.payload_parser.PayloadParser.parse', 'PayloadParser.parse', (['payload'], {}), '(payload)\n', (2234, 2243), False, 'from pesto.ws.core.payload_parser import PayloadParser, PestoConfig\n'), ((2376, 2391), 'pesto.ws.features.converter.image.image_roi.DummyImageROI', 'DummyImageROI', ([], {}), '()\n', (2389, 2391), False, 'from pesto.ws.features.converter.image.image_roi import ImageROI, DummyImageROI\n'), ((1355, 1400), 'pesto.ws.core.utils.load_class', 'load_class', (['ProcessService.PROCESS_CLASS_NAME'], {}), '(ProcessService.PROCESS_CLASS_NAME)\n', (1365, 1400), False, 'from pesto.ws.core.utils import load_class, async_exec\n'), ((1729, 1750), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1748, 1750), False, 'import traceback\n'), ((2473, 2485), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2483, 2485), False, 'import uuid\n'), ((2738, 2775), 'pesto.ws.features.schema_validation.SchemaValidation', 'SchemaValidation', ([], {'schema': 'input_schema'}), '(schema=input_schema)\n', (2754, 2775), False, 'from pesto.ws.features.schema_validation import SchemaValidation\n'), ((2834, 2892), 'pesto.ws.features.payload_converter.PayloadConverter', 'PayloadConverter', ([], {'image_roi': 'image_roi', 'schema': 'input_schema'}), '(image_roi=image_roi, schema=input_schema)\n', (2850, 2892), False, 'from pesto.ws.features.payload_converter import PayloadConverter\n'), ((2906, 2939), 'pesto.ws.features.payload_debug.PayloadDebug', 'PayloadDebug', ([], {'schema': 'input_schema'}), '(schema=input_schema)\n', (2918, 2939), False, 'from pesto.ws.features.payload_debug import PayloadDebug\n'), ((2953, 2996), 'pesto.ws.features.algorithm_wrapper.AlgorithmWrapper', 'AlgorithmWrapper', (['ProcessService._algorithm'], {}), '(ProcessService._algorithm)\n', (2969, 2996), False, 'from pesto.ws.features.algorithm_wrapper import AlgorithmWrapper\n'), ((3050, 3105), 'pesto.ws.features.response_serializer.ResponseSerializer', 'ResponseSerializer', ([], {'schema': 'output_schema', 'job_id': 'job_id'}), '(schema=output_schema, job_id=job_id)\n', (3068, 3105), False, 'from pesto.ws.features.response_serializer import ResponseSerializer\n'), ((3218, 3257), 'pesto.ws.features.stateful_response.StatefulResponse', 'StatefulResponse', (['self.url_root', 'job_id'], {}), '(self.url_root, job_id)\n', (3234, 3257), False, 'from pesto.ws.features.stateful_response import StatefulResponse\n'), ((3361, 3416), 'pesto.ws.features.stateless_response.StatelessResponse', 'StatelessResponse', (['self.url_root', 'job_id', 'output_schema'], {}), '(self.url_root, job_id, output_schema)\n', (3378, 3416), False, 'from pesto.ws.features.stateless_response import StatelessResponse\n'), ((3447, 3470), 'pesto.ws.core.pesto_feature.PestoFeatures', 'PestoFeatures', (['pipeline'], {}), '(pipeline)\n', (3460, 3470), False, 'from pesto.ws.core.pesto_feature import PestoFeatures\n'), ((2061, 2091), 'pesto.ws.service.describe.DescribeService', 'DescribeService', (['self.url_root'], {}), '(self.url_root)\n', (2076, 2091), False, 'from pesto.ws.service.describe import DescribeService\n')]
|
# -*- coding: utf-8 -*-
import logging
import os
import re
import sys
class Helpers:
def __init__(self, logger=None):
if logger is None:
self.logger = logging.getLogger(__name__)
else:
self.logger = logger
@staticmethod
def extract_sequence_num(filename):
sequence_num = re.search(
'([0-9]+)[^0-9].+',
os.path.basename(filename)
).group(1)
return int(sequence_num)
def append_migration(self, migrations, filename):
try:
migrations.append((self.extract_sequence_num(filename), filename))
except AttributeError:
self.logger.error("Invalid filename found: {}".format(filename))
sys.exit(1)
def find_migrations(self, sql_directory):
migrations = []
for filename in os.listdir(sql_directory):
if filename.endswith(".sql"):
self.append_migration(
migrations,
str(os.path.join(sql_directory, filename))
)
return migrations
@staticmethod
def sort_migrations(migrations):
if (
all(isinstance(tup, tuple) for tup in migrations) and
all(isinstance(tup[0], int) for tup in migrations) and
all(isinstance(tup[1], str) for tup in migrations)
):
migrations.sort(key=lambda tup: tup[0])
else:
raise TypeError(
"Migrations list did not contain only tuple(int, str)")
def populate_migrations(self, sql_directory):
migrations = self.find_migrations(sql_directory)
self.sort_migrations(migrations)
return migrations
@staticmethod
def get_unprocessed_migrations(db_version, migrations):
return [tup for tup in migrations if tup[0] > int(db_version)]
|
[
"logging.getLogger",
"os.listdir",
"os.path.join",
"os.path.basename",
"sys.exit"
] |
[((843, 868), 'os.listdir', 'os.listdir', (['sql_directory'], {}), '(sql_directory)\n', (853, 868), False, 'import os\n'), ((177, 204), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (194, 204), False, 'import logging\n'), ((736, 747), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (744, 747), False, 'import sys\n'), ((389, 415), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (405, 415), False, 'import os\n'), ((1007, 1044), 'os.path.join', 'os.path.join', (['sql_directory', 'filename'], {}), '(sql_directory, filename)\n', (1019, 1044), False, 'import os\n')]
|
import configparser
import os
import typing
from sitri.providers.base import ConfigProvider
class IniConfigProvider(ConfigProvider):
"""Config provider for Initialization file (Ini)."""
provider_code = "ini"
def __init__(
self,
ini_path: str = "./config.ini",
):
"""
:param ini_path: path to ini file
"""
self.configparser = configparser.ConfigParser()
with open(os.path.abspath(ini_path)) as f:
self.configparser.read_file(f)
self._sections = None
@property
def sections(self):
if not self._sections:
self._sections = list(self.configparser.keys())
return self._sections
def get(self, key: str, section: str, **kwargs) -> typing.Optional[typing.Any]: # type: ignore
"""Get value from ini file.
:param key: key or path for search
:param section: section of ini file
"""
if section not in self.sections:
return None
return self.configparser[section].get(key)
def keys(self, section: str, **kwargs) -> typing.List[str]: # type: ignore
"""Get keys of section.
:param section: section of ini file
"""
if section not in self.sections:
return []
return list(self.configparser[section].keys())
|
[
"os.path.abspath",
"configparser.ConfigParser"
] |
[((395, 422), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (420, 422), False, 'import configparser\n'), ((442, 467), 'os.path.abspath', 'os.path.abspath', (['ini_path'], {}), '(ini_path)\n', (457, 467), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
"""
import scrapy
from tennis_model.tennis_model_scraper.tennis_model_scraper import items
class TennisDataCoUkSpider(scrapy.Spider):
name = "tennis_data_co_uk"
allowed_domains = ["www.tennis-data.co.uk"]
start_urls = ["http://www.tennis-data.co.uk/alldata.php"]
custom_settings = {'ITEM_PIPELINES': {'tennis_model_scraper.pipelines.TennisDataCoUkPipeline': 1}}
def _correct_ext(self, link):
if ".zip" in link:
return link
elif "zip" in link:
return ".zip".join(link.split("zip"))
else:
raise Exception("Unknown file extension from url - {0}. 'zip' is expected".format(link))
def parse(self, response):
archive_links = response.xpath("/html/body/table[5]/tr[2]/td[3]/a/@href")
for link in archive_links:
short_file_url = self._correct_ext(link.extract())
is_man_archives = 'w' not in short_file_url.split("/")[0]
if is_man_archives:
full_file_url = response.urljoin(short_file_url)
item = items.TennisDataCoUkItem()
item["file_urls"] = [full_file_url]
yield item
if __name__ == '__main__':
pass
|
[
"tennis_model.tennis_model_scraper.tennis_model_scraper.items.TennisDataCoUkItem"
] |
[((1093, 1119), 'tennis_model.tennis_model_scraper.tennis_model_scraper.items.TennisDataCoUkItem', 'items.TennisDataCoUkItem', ([], {}), '()\n', (1117, 1119), False, 'from tennis_model.tennis_model_scraper.tennis_model_scraper import items\n')]
|
import json
import requests
from .exceptions import (
RequestsError,
RequestsTimeoutError,
RPCError
)
_default_endpoint = 'http://localhost:9500'
_default_timeout = 30
def base_request(method, params=None, endpoint=_default_endpoint, timeout=_default_timeout) -> str:
"""
Basic RPC request
Parameters
---------
method: str
RPC Method to call
params: :obj:`list`, optional
Parameters for the RPC method
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
str
Raw output from the request
Raises
------
TypeError
If params is not a list or None
RequestsTimeoutError
If request timed out
RequestsError
If other request error occured
"""
if params is None:
params = []
elif not isinstance(params, list):
raise TypeError(f'invalid type {params.__class__}')
try:
payload = {
"id": "1",
"jsonrpc": "2.0",
"method": method,
"params": params
}
headers = {
'Content-Type': 'application/json'
}
resp = requests.request('POST', endpoint, headers=headers, data=json.dumps(payload),
timeout=timeout, allow_redirects=True)
return resp.content
except requests.exceptions.Timeout as err:
raise RequestsTimeoutError(endpoint) from err
except requests.exceptions.RequestException as err:
raise RequestsError(endpoint) from err
def rpc_request(method, params=None, endpoint=_default_endpoint, timeout=_default_timeout) -> dict:
"""
RPC request
Parameters
---------
method: str
RPC Method to call
params: :obj:`list`, optional
Parameters for the RPC method
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
dict
Returns dictionary representation of RPC response
Example format:
{
"jsonrpc": "2.0",
"id": 1,
"result": ...
}
Raises
------
RPCError
If RPC response returned a blockchain error
See Also
--------
base_request
"""
raw_resp = base_request(method, params, endpoint, timeout)
try:
resp = json.loads(raw_resp)
if 'error' in resp:
raise RPCError(method, endpoint, str(resp['error']))
return resp
except json.decoder.JSONDecodeError as err:
raise RPCError(method, endpoint, raw_resp) from err
# TODO: Add GET requests
|
[
"json.loads",
"json.dumps"
] |
[((2470, 2490), 'json.loads', 'json.loads', (['raw_resp'], {}), '(raw_resp)\n', (2480, 2490), False, 'import json\n'), ((1302, 1321), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (1312, 1321), False, 'import json\n')]
|
import sys
import numpy
import numpy as np
from snappy import Product
from snappy import ProductData
from snappy import ProductIO
from snappy import ProductUtils
from snappy import FlagCoding
##############
import csv
###############MSVR
from sklearn.svm import SVR
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
########################
if len(sys.argv) != 2:
print("usage: %s <file>" % sys.argv[0])
sys.exit(1)
file = sys.argv[1]
print("Reading...")
product = ProductIO.readProduct(file)
width = product.getSceneRasterWidth()
height = product.getSceneRasterHeight()
name = product.getName()
description = product.getDescription()
band_names = product.getBandNames()
print("Product: %s, %s" % (name, description))
print("Raster size: %d x %d pixels" % (width, height))
print("Start time: " + str(product.getStartTime()))
print("End time: " + str(product.getEndTime()))
print("Bands: %s" % (list(band_names)))
##---------------------------------------------------------------------------------
with open('rice_LUT.csv','r') as dest_f:
data_iter = csv.reader(dest_f,
delimiter = ',',
quotechar = '"')
data = [data for data in data_iter]
data_array = np.asarray(data, dtype = np.float32)
VV = data_array[:,1]
VH = data_array[:,2]
PAI = data_array[:,0]
X=np.column_stack((VV,VH))
Y = PAI
#SVR training
pipeline = make_pipeline(StandardScaler(),
SVR(kernel='rbf', epsilon=0.105, C=250, gamma = 2.8),
)
SVRmodel=pipeline.fit(X,Y)
# Predictfor validation data
valX = X;
y_out = pipeline.predict(valX);
##---------------------------------------------------------------------------------
bandc11 = product.getBand('C11')
bandc22 = product.getBand('C22')
laiProduct = Product('LAI', 'LAI', width, height)
laiBand = laiProduct.addBand('lai', ProductData.TYPE_FLOAT32)
laiFlagsBand = laiProduct.addBand('lai_flags', ProductData.TYPE_UINT8)
writer = ProductIO.getProductWriter('BEAM-DIMAP')
ProductUtils.copyGeoCoding(product, laiProduct)
ProductUtils.copyMetadata(product, laiProduct)
ProductUtils.copyTiePointGrids(product, laiProduct)
laiFlagCoding = FlagCoding('lai_flags')
laiFlagCoding.addFlag("LAI_LOW", 1, "LAI below 0")
laiFlagCoding.addFlag("LAI_HIGH", 2, "LAI above 5")
group = laiProduct.getFlagCodingGroup()
#print(dir(group))
group.add(laiFlagCoding)
laiFlagsBand.setSampleCoding(laiFlagCoding)
laiProduct.setProductWriter(writer)
laiProduct.writeHeader('LAImap_output.dim')
c11 = numpy.zeros(width, dtype=numpy.float32)
c22 = numpy.zeros(width, dtype=numpy.float32)
print("Writing...")
for y in range(height):
print("processing line ", y, " of ", height)
c11 = bandc11.readPixels(0, y, width, 1, c11)
c22 = bandc22.readPixels(0, y, width, 1, c22)
Z=np.column_stack((c11,c22))
#ndvi = (r10 - r7) / (r10 + r7)
lai = pipeline.predict(Z);
laiBand.writePixels(0, y, width, 1, lai)
laiLow = lai < 0.0
laiHigh = lai > 5.0
laiFlags = numpy.array(laiLow + 2 * laiHigh, dtype=numpy.int32)
laiFlagsBand.writePixels(0, y, width, 1, laiFlags)
laiProduct.closeIO()
print("Done.")
|
[
"snappy.ProductIO.readProduct",
"snappy.ProductUtils.copyGeoCoding",
"snappy.Product",
"snappy.ProductIO.getProductWriter",
"numpy.asarray",
"numpy.column_stack",
"snappy.FlagCoding",
"sklearn.preprocessing.StandardScaler",
"numpy.zeros",
"numpy.array",
"snappy.ProductUtils.copyMetadata",
"csv.reader",
"sys.exit",
"sklearn.svm.SVR",
"snappy.ProductUtils.copyTiePointGrids"
] |
[((520, 547), 'snappy.ProductIO.readProduct', 'ProductIO.readProduct', (['file'], {}), '(file)\n', (541, 547), False, 'from snappy import ProductIO\n'), ((1284, 1318), 'numpy.asarray', 'np.asarray', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (1294, 1318), True, 'import numpy as np\n'), ((1389, 1414), 'numpy.column_stack', 'np.column_stack', (['(VV, VH)'], {}), '((VV, VH))\n', (1404, 1414), True, 'import numpy as np\n'), ((1806, 1842), 'snappy.Product', 'Product', (['"""LAI"""', '"""LAI"""', 'width', 'height'], {}), "('LAI', 'LAI', width, height)\n", (1813, 1842), False, 'from snappy import Product\n'), ((1985, 2025), 'snappy.ProductIO.getProductWriter', 'ProductIO.getProductWriter', (['"""BEAM-DIMAP"""'], {}), "('BEAM-DIMAP')\n", (2011, 2025), False, 'from snappy import ProductIO\n'), ((2027, 2074), 'snappy.ProductUtils.copyGeoCoding', 'ProductUtils.copyGeoCoding', (['product', 'laiProduct'], {}), '(product, laiProduct)\n', (2053, 2074), False, 'from snappy import ProductUtils\n'), ((2075, 2121), 'snappy.ProductUtils.copyMetadata', 'ProductUtils.copyMetadata', (['product', 'laiProduct'], {}), '(product, laiProduct)\n', (2100, 2121), False, 'from snappy import ProductUtils\n'), ((2122, 2173), 'snappy.ProductUtils.copyTiePointGrids', 'ProductUtils.copyTiePointGrids', (['product', 'laiProduct'], {}), '(product, laiProduct)\n', (2152, 2173), False, 'from snappy import ProductUtils\n'), ((2191, 2214), 'snappy.FlagCoding', 'FlagCoding', (['"""lai_flags"""'], {}), "('lai_flags')\n", (2201, 2214), False, 'from snappy import FlagCoding\n'), ((2535, 2574), 'numpy.zeros', 'numpy.zeros', (['width'], {'dtype': 'numpy.float32'}), '(width, dtype=numpy.float32)\n', (2546, 2574), False, 'import numpy\n'), ((2581, 2620), 'numpy.zeros', 'numpy.zeros', (['width'], {'dtype': 'numpy.float32'}), '(width, dtype=numpy.float32)\n', (2592, 2620), False, 'import numpy\n'), ((457, 468), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (465, 468), False, 'import sys\n'), ((1124, 1172), 'csv.reader', 'csv.reader', (['dest_f'], {'delimiter': '""","""', 'quotechar': '"""\\""""'}), '(dest_f, delimiter=\',\', quotechar=\'"\')\n', (1134, 1172), False, 'import csv\n'), ((1462, 1478), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1476, 1478), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1484, 1534), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""', 'epsilon': '(0.105)', 'C': '(250)', 'gamma': '(2.8)'}), "(kernel='rbf', epsilon=0.105, C=250, gamma=2.8)\n", (1487, 1534), False, 'from sklearn.svm import SVR\n'), ((2827, 2854), 'numpy.column_stack', 'np.column_stack', (['(c11, c22)'], {}), '((c11, c22))\n', (2842, 2854), True, 'import numpy as np\n'), ((3034, 3086), 'numpy.array', 'numpy.array', (['(laiLow + 2 * laiHigh)'], {'dtype': 'numpy.int32'}), '(laiLow + 2 * laiHigh, dtype=numpy.int32)\n', (3045, 3086), False, 'import numpy\n')]
|
from __future__ import print_function
import argparse, sys
from .utils import is_textfile
def contains_crlf(filename):
with open(filename, mode='rb') as file_checked:
for line in file_checked.readlines():
if line.endswith(b'\r\n'):
return True
return False
def main(argv=None):
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*', help='filenames to check')
args = parser.parse_args(argv)
text_files = [f for f in args.filenames if is_textfile(f)]
files_with_crlf = [f for f in text_files if contains_crlf(f)]
return_code = 0
for file_with_crlf in files_with_crlf:
print('CRLF end-lines detected in file: {0}'.format(file_with_crlf))
return_code = 1
return return_code
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
[
"argparse.ArgumentParser"
] |
[((337, 362), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (360, 362), False, 'import argparse, sys\n')]
|
from dataclasses import dataclass
from typing import List
from typing import Union
from postmanparser.description import Description
from postmanparser.exceptions import InvalidObjectException
from postmanparser.exceptions import MissingRequiredFieldException
@dataclass
class FormParameter:
key: str
value: str = ""
src: Union[List, str, None] = None
disabled: bool = False
form_param_type: str = ""
content_type: str = "" # should override content-type in header
description: Union[Description, None, str] = None
@classmethod
def parse(cls, data: dict):
key = data.get("key")
if key is None:
raise MissingRequiredFieldException(
"'formparameter' object should have 'key' property"
)
value = data.get("value", "")
src = data.get("src")
if value and src is not None:
raise InvalidObjectException(
"'formparamter' object can eiher have src or value and not both."
)
description = data.get("description")
if isinstance(description, dict):
description = Description.parse(description)
return cls(
key,
value=value,
src=src,
disabled=data.get("disabled", False),
form_param_type=data.get("type", ""),
content_type=data.get("contentType", ""),
description=description,
)
|
[
"postmanparser.exceptions.MissingRequiredFieldException",
"postmanparser.exceptions.InvalidObjectException",
"postmanparser.description.Description.parse"
] |
[((669, 756), 'postmanparser.exceptions.MissingRequiredFieldException', 'MissingRequiredFieldException', (['"""\'formparameter\' object should have \'key\' property"""'], {}), '(\n "\'formparameter\' object should have \'key\' property")\n', (698, 756), False, 'from postmanparser.exceptions import MissingRequiredFieldException\n'), ((906, 1000), 'postmanparser.exceptions.InvalidObjectException', 'InvalidObjectException', (['"""\'formparamter\' object can eiher have src or value and not both."""'], {}), '(\n "\'formparamter\' object can eiher have src or value and not both.")\n', (928, 1000), False, 'from postmanparser.exceptions import InvalidObjectException\n'), ((1140, 1170), 'postmanparser.description.Description.parse', 'Description.parse', (['description'], {}), '(description)\n', (1157, 1170), False, 'from postmanparser.description import Description\n')]
|
from __future__ import absolute_import, division, print_function
import numpy as np
import wx
from dials.array_family import flex
from dials_viewer_ext import rgb_img
class wxbmp_from_np_array(object):
def __init__(
self, lst_data_in, show_nums=True, palette="black2white", lst_data_mask_in=None
):
self.wx_bmp_arr = rgb_img()
if lst_data_in is None and lst_data_mask_in is None:
self._ini_wx_bmp_lst = None
else:
self._ini_wx_bmp_lst = []
for lst_pos in range(len(lst_data_in)):
data_3d_in = lst_data_in[lst_pos]
xmax = data_3d_in.shape[1]
ymax = data_3d_in.shape[2]
# remember to put here some assertion to check that
# both arrays have the same shape
if lst_data_mask_in is not None:
data_3d_in_mask = lst_data_mask_in[lst_pos]
self.vl_max = float(np.amax(data_3d_in))
self.vl_min = float(np.amin(data_3d_in))
tmp_data2d = np.zeros((xmax, ymax), "double")
tmp_data2d_mask = np.zeros((xmax, ymax), "double")
z_dp = data_3d_in.shape[0]
single_block_lst_01 = []
for z in range(z_dp):
# print "z =", z
tmp_data2d[:, :] = data_3d_in[z : z + 1, :, :]
if lst_data_mask_in is not None:
tmp_data2d_mask[:, :] = data_3d_in_mask[z : z + 1, :, :]
else:
tmp_data2d_mask = None
data_sigle_img = self._wx_img_w_cpp(
tmp_data2d, show_nums, palette, tmp_data2d_mask
)
single_block_lst_01.append(data_sigle_img)
self._ini_wx_bmp_lst.append(single_block_lst_01)
def bmp_lst_scaled(self, scale=1.0):
if self._ini_wx_bmp_lst is None:
NewW = 350
wx_image = wx.Image(NewW, NewW)
wxBitmap = wx_image.ConvertToBitmap()
dc = wx.MemoryDC(wxBitmap)
text = "No Shoebox data"
w, h = dc.GetSize()
tw, th = dc.GetTextExtent(text)
dc.Clear()
dc.DrawText(text, (w - tw) / 2, (h - th) / 2) # display text in center
dc.SelectObject(wxBitmap)
del dc
wx_bmp_lst = [[wxBitmap]]
else:
wx_bmp_lst = []
for data_3d in self._ini_wx_bmp_lst:
single_block_lst = []
for sigle_img_data in data_3d:
single_block_lst.append(self._wx_bmp_scaled(sigle_img_data, scale))
wx_bmp_lst.append(single_block_lst)
return wx_bmp_lst
def _wx_img_w_cpp(self, np_2d_tmp, show_nums, palette, np_2d_mask=None):
xmax = np_2d_tmp.shape[1]
ymax = np_2d_tmp.shape[0]
if np_2d_mask is None:
np_2d_mask = np.zeros((ymax, xmax), "double")
transposed_data = np.zeros((ymax, xmax), "double")
transposed_mask = np.zeros((ymax, xmax), "double")
transposed_data[:, :] = np_2d_tmp
transposed_mask[:, :] = np_2d_mask
flex_data_in = flex.double(transposed_data)
flex_mask_in = flex.double(transposed_mask)
if palette == "black2white":
palette_num = 1
elif palette == "white2black":
palette_num = 2
elif palette == "hot ascend":
palette_num = 3
else: # assuming "hot descend"
palette_num = 4
img_array_tmp = self.wx_bmp_arr.gen_bmp(
flex_data_in, flex_mask_in, show_nums, palette_num
)
np_img_array = img_array_tmp.as_numpy_array()
height = np.size(np_img_array[:, 0:1, 0:1])
width = np.size(np_img_array[0:1, :, 0:1])
img_array = np.empty((height, width, 3), "uint8")
img_array[:, :, :] = np_img_array[:, :, :]
self._wx_image = wx.Image(width, height)
self._wx_image.SetData(img_array.tostring())
data_to_become_bmp = (self._wx_image, width, height)
return data_to_become_bmp
def _wx_bmp_scaled(self, data_to_become_bmp, scale):
to_become_bmp = data_to_become_bmp[0]
width = data_to_become_bmp[1]
height = data_to_become_bmp[2]
NewW = int(width * scale)
NewH = int(height * scale)
to_become_bmp = to_become_bmp.Scale(NewW, NewH, wx.IMAGE_QUALITY_NORMAL)
wxBitmap = to_become_bmp.ConvertToBitmap()
return wxBitmap
|
[
"numpy.amax",
"numpy.amin",
"dials_viewer_ext.rgb_img",
"wx.MemoryDC",
"numpy.size",
"wx.Image",
"numpy.zeros",
"numpy.empty",
"dials.array_family.flex.double"
] |
[((345, 354), 'dials_viewer_ext.rgb_img', 'rgb_img', ([], {}), '()\n', (352, 354), False, 'from dials_viewer_ext import rgb_img\n'), ((3055, 3087), 'numpy.zeros', 'np.zeros', (['(ymax, xmax)', '"""double"""'], {}), "((ymax, xmax), 'double')\n", (3063, 3087), True, 'import numpy as np\n'), ((3114, 3146), 'numpy.zeros', 'np.zeros', (['(ymax, xmax)', '"""double"""'], {}), "((ymax, xmax), 'double')\n", (3122, 3146), True, 'import numpy as np\n'), ((3257, 3285), 'dials.array_family.flex.double', 'flex.double', (['transposed_data'], {}), '(transposed_data)\n', (3268, 3285), False, 'from dials.array_family import flex\n'), ((3309, 3337), 'dials.array_family.flex.double', 'flex.double', (['transposed_mask'], {}), '(transposed_mask)\n', (3320, 3337), False, 'from dials.array_family import flex\n'), ((3801, 3835), 'numpy.size', 'np.size', (['np_img_array[:, 0:1, 0:1]'], {}), '(np_img_array[:, 0:1, 0:1])\n', (3808, 3835), True, 'import numpy as np\n'), ((3852, 3886), 'numpy.size', 'np.size', (['np_img_array[0:1, :, 0:1]'], {}), '(np_img_array[0:1, :, 0:1])\n', (3859, 3886), True, 'import numpy as np\n'), ((3907, 3944), 'numpy.empty', 'np.empty', (['(height, width, 3)', '"""uint8"""'], {}), "((height, width, 3), 'uint8')\n", (3915, 3944), True, 'import numpy as np\n'), ((4022, 4045), 'wx.Image', 'wx.Image', (['width', 'height'], {}), '(width, height)\n', (4030, 4045), False, 'import wx\n'), ((2020, 2040), 'wx.Image', 'wx.Image', (['NewW', 'NewW'], {}), '(NewW, NewW)\n', (2028, 2040), False, 'import wx\n'), ((2109, 2130), 'wx.MemoryDC', 'wx.MemoryDC', (['wxBitmap'], {}), '(wxBitmap)\n', (2120, 2130), False, 'import wx\n'), ((2995, 3027), 'numpy.zeros', 'np.zeros', (['(ymax, xmax)', '"""double"""'], {}), "((ymax, xmax), 'double')\n", (3003, 3027), True, 'import numpy as np\n'), ((1074, 1106), 'numpy.zeros', 'np.zeros', (['(xmax, ymax)', '"""double"""'], {}), "((xmax, ymax), 'double')\n", (1082, 1106), True, 'import numpy as np\n'), ((1141, 1173), 'numpy.zeros', 'np.zeros', (['(xmax, ymax)', '"""double"""'], {}), "((xmax, ymax), 'double')\n", (1149, 1173), True, 'import numpy as np\n'), ((967, 986), 'numpy.amax', 'np.amax', (['data_3d_in'], {}), '(data_3d_in)\n', (974, 986), True, 'import numpy as np\n'), ((1024, 1043), 'numpy.amin', 'np.amin', (['data_3d_in'], {}), '(data_3d_in)\n', (1031, 1043), True, 'import numpy as np\n')]
|
import numpy as np
def check_x_y(x, y):
assert isinstance(x, np.ndarray) and isinstance(y, np.ndarray)
assert np.ndim(x) <= 3 and np.ndim(y) <= 2
assert len(x) == len(y)
|
[
"numpy.ndim"
] |
[((125, 135), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (132, 135), True, 'import numpy as np\n'), ((145, 155), 'numpy.ndim', 'np.ndim', (['y'], {}), '(y)\n', (152, 155), True, 'import numpy as np\n')]
|
"""
CSharp (С#) domain for sphinx
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sphinxsharp Pro (with custom styling)
:copyright: Copyright 2021 by MadTeddy
"""
import re
import warnings
from os import path
from collections import defaultdict, namedtuple
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from sphinx.locale import get_translation
from sphinx.domains import Domain, Index, ObjType
from sphinx.roles import XRefRole
from sphinx.directives import ObjectDescription
from sphinx.util.docfields import DocFieldTransformer
from sphinx.util.nodes import make_refnode
from sphinx import addnodes
from sphinx.util.fileutil import copy_asset
MODIFIERS = ('public', 'private', 'protected', 'internal',
'static', 'sealed', 'abstract', 'const', 'partial',
'readonly', 'virtual', 'extern', 'new', 'override',
'unsafe', 'async', 'event', 'delegate')
VALUE_KEYWORDS = ('char', 'ulong', 'byte', 'decimal',
'double', 'bool', 'int', 'null', 'sbyte',
'float', 'long', 'object', 'short', 'string',
'uint', 'ushort', 'void')
PARAM_MODIFIERS = ('ref', 'out', 'params')
MODIFIERS_RE = '|'.join(MODIFIERS)
PARAM_MODIFIERS_RE = '|'.join(PARAM_MODIFIERS)
TYPE_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE
+ r')\s+)*)?(\w+)\s([\w\.]+)(?:<(.+)>)?(?:\s?\:\s?(.+))?$')
REF_TYPE_RE = re.compile(r'^(?:(new)\s+)?([\w\.]+)\s*(?:<(.+)>)*(\[\])*\s?(?:\((.*)\))?$')
METHOD_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE
+ r')\s+)*)?([^\s=\(\)]+\s+)?([^\s=\(\)]+)\s?(?:\<(.+)\>)?\s?(?:\((.+)*\))$')
PARAM_SIG_RE = re.compile(r'^(?:(?:(' + PARAM_MODIFIERS_RE + r')\s)*)?([^=]+)\s+([^=]+)\s*(?:=\s?(.+))?$')
VAR_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE + r')\s+)*)?([^=]+)\s+([^\s=]+)\s*(?:=\s*(.+))?$')
PROP_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE
+ r')\s+)*)?(.+)\s+([^\s]+)\s*(?:{(\s*get;\s*)?((?:'
+ MODIFIERS_RE + r')?\s*set;\s*)?})$')
ENUM_SIG_RE = re.compile(r'^((?:(?:' + MODIFIERS_RE + r')\s+)*)?(?:enum)\s?(\w+)$')
_ = get_translation('sphinxsharp')
class CSharpObject(ObjectDescription):
PARENT_ATTR_NAME = 'sphinxsharp:parent'
PARENT_TYPE_NAME = 'sphinxsharp:type'
ParentType = namedtuple('ParentType', ['parent', 'name', 'type', 'override'])
option_spec = {
'noindex': directives.flag
}
def __init__(self, *args, **kwargs):
super(CSharpObject, self).__init__(*args, **kwargs)
self.parentname_set = None
self.parentname_saved = None
def run(self):
if ':' in self.name:
self.domain, self.objtype = self.name.split(':', 1)
else:
self.domain, self.objtype = '', self.name
self.indexnode = addnodes.index(entries=[])
node = addnodes.desc()
node.document = self.state.document
node['domain'] = self.domain
node['classes'].append('csharp')
node['objtype'] = node['desctype'] = self.objtype
node['noindex'] = noindex = ('noindex' in self.options)
self.names = []
signatures = self.get_signatures()
for i, sig in enumerate(signatures):
beforesignode = CSNodes.EmptyNode()
node.append(beforesignode)
signode = addnodes.desc_signature(sig, '')
signode['first'] = False
node.append(signode)
self.before_sig(beforesignode)
try:
name = self.handle_signature(sig, signode)
except ValueError:
signode.clear()
signode += addnodes.desc_name(sig, sig)
continue
if name not in self.names:
self.names.append(name)
if not noindex:
self.add_target_and_index(name, sig, signode)
aftersignode = CSNodes.EmptyNode()
node.append(aftersignode)
self.after_sig(aftersignode)
contentnode = addnodes.desc_content()
node.append(contentnode)
self.before_content_node(contentnode)
if self.names:
self.env.temp_data['object'] = self.names[0]
self.before_content()
self.state.nested_parse(self.content, self.content_offset, contentnode)
self.after_content_node(contentnode)
DocFieldTransformer(self).transform_all(contentnode)
self.env.temp_data['object'] = None
self.after_content()
return [self.indexnode, node]
def before_sig(self, signode):
"""
Called before main ``signode`` appends
"""
pass
def after_sig(self, signode):
"""
Called after main ``signode`` appends
"""
pass
def before_content_node(self, node):
"""
Get ``contentnode`` before main content will append
"""
pass
def after_content_node(self, node):
"""
Get ``contentnode`` after main content was appended
"""
pass
def before_content(self):
obj = self.env.temp_data['object']
if obj:
self.parentname_set = True
self.parentname_saved = self.env.ref_context.get(self.PARENT_ATTR_NAME)
self.env.ref_context[self.PARENT_ATTR_NAME] = obj
else:
self.parentname_set = False
def after_content(self):
if self.parentname_set:
self.env.ref_context[self.PARENT_ATTR_NAME] = self.parentname_saved
def has_parent(self):
return self._check_parent(self.PARENT_ATTR_NAME)
def has_parent_type(self):
return self._check_parent(self.PARENT_TYPE_NAME)
def _check_parent(self, attr):
return attr in self.env.ref_context and \
self.env.ref_context[attr] is not None
def get_parent(self):
return self.env.ref_context.get(self.PARENT_ATTR_NAME)
def get_type_parent(self):
return self.env.ref_context.get(self.PARENT_TYPE_NAME)
def get_index_text(self, sig, name, typ):
raise NotImplementedError('Must be implemented in subclass')
def parse_signature(self, sig):
raise NotImplementedError('Must be implemented in subclass')
def add_target_and_index(self, name, sig, signode):
objname, objtype = self.get_obj_name(sig)
type_parent = self.get_type_parent() if self.has_parent_type() else None
if self.objtype != 'type' and type_parent:
self.env.ref_context[self.PARENT_ATTR_NAME] = '{}{}'.format(type_parent.parent + '.' \
if type_parent.parent else '',
type_parent.name)
name = self.get_fullname(objname)
self.names.clear()
self.names.append(name)
anchor = '{}-{}'.format(self.objtype, name)
if anchor not in self.state.document.ids:
signode['names'].append(anchor)
signode['ids'].append(anchor)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
objects = self.env.domaindata['sphinxsharp']['objects']
key = (self.objtype, name)
if key in objects:
warnings.warn('duplicate description of {}, other instance in {}'.format(
key, self.env.doc2path(objects[key][0])), Warning)
objects[key] = (self.env.docname, 'delegate' if self.objtype == 'method' else objtype)
index_text = self.get_index_text(sig, objname, objtype)
if index_text:
parent = self.get_parent() if self.has_parent() else None
if type_parent and type_parent.override and type_parent.name != objname:
type_parent = self.ParentType(parent=type_parent.parent, name=type_parent.name, type=type_parent.type,
override=None)
index_format = '{parent} (C# {namespace});{text}' \
if (type_parent and type_parent.parent and (type_parent.name == objname and self.objtype == 'type') \
and not type_parent.override) or (parent and not type_parent) \
else '{name} (C# {type} {in_text} {parent});{text}' if type_parent and type_parent.name else '{text}'
self.indexnode['entries'].append(('single', index_format.format(
parent=type_parent.parent if type_parent else parent if parent else '',
namespace=_('namespace'),
text=index_text,
name=type_parent.override if type_parent and type_parent.override \
else type_parent.name if type_parent else '',
type=_(type_parent.type) if type_parent else '',
in_text=_('in')
), anchor, None, None))
def get_fullname(self, name):
fullname = '{parent}{name}'.format(
parent=self.get_parent() + '.' if self.has_parent() else '', name=name)
return fullname
def get_obj_name(self, sig):
raise NotImplementedError('Must be implemented in subclass')
def append_ref_signature(self, typname, signode, append_generic=True):
match = REF_TYPE_RE.match(typname.strip())
if not match:
raise Exception('Invalid reference type signature. Got: {}'.format(typname))
is_new, name, generic, is_array, constr = match.groups()
tnode = addnodes.desc_type()
if is_new:
tnode += CSNodes.Keyword(text='new')
tnode += CSNodes.TextNode(text=' ')
types = name.split('.')
explicit_path = []
i = 1
for t in types:
styp = t.strip()
refnode = None
if styp not in VALUE_KEYWORDS:
explicit_path.append(styp)
refnode = addnodes.pending_xref('', refdomain='sphinxsharp', reftype=None,
reftarget=styp, modname=None, classname=None)
if not self.has_parent():
refnode[self.PARENT_ATTR_NAME] = None
else:
refnode[self.PARENT_ATTR_NAME] = self.get_parent()
if len(explicit_path) > 1:
target_path = '.'.join(explicit_path[:-1])
type_par = self.get_type_parent() if self.has_parent_type() else None
refnode[self.PARENT_ATTR_NAME] = (type_par.parent + '.' \
if type_par and type_par.parent \
else '') + target_path
refnode += CSNodes.UnknownType(typ=None, text=styp)
else:
refnode = CSNodes.Keyword(text=styp)
tnode += refnode
if i < len(types):
tnode += CSNodes.TextNode(text='.')
i += 1
if append_generic and generic:
gnode = CSNodes.EmptyNode()
gnode += CSNodes.TextNode(text='<')
gen_groups = split_sig(generic)
i = 1
for g in gen_groups:
self.append_ref_signature(g, gnode, append_generic)
if i < len(gen_groups):
gnode += CSNodes.TextNode(text=', ')
i += 1
gnode += CSNodes.TextNode(text='>')
tnode += gnode
if is_array:
tnode += CSNodes.TextNode(text='[]')
if constr is not None:
tnode += CSNodes.TextNode(text='()')
signode += tnode
def append_generic(self, generic, signode):
gnode = CSNodes.EmptyNode()
gnode += CSNodes.TextNode(text='<')
generics = generic.split(',')
i = 1
for g in generics:
gnode += CSNodes.Generic(text=g)
if i < len(generics):
gnode += CSNodes.TextNode(text=', ')
i += 1
gnode += CSNodes.TextNode(text='>')
signode += gnode
class CSharpType(CSharpObject):
option_spec = {
**CSharpObject.option_spec,
'nonamespace': directives.flag,
'parent': directives.unchanged
}
def before_sig(self, signode):
if 'nonamespace' not in self.options and self.has_parent():
signode += CSNodes.Description(title=_('namespace'), desc=self.get_parent())
def handle_signature(self, sig, signode):
mod, typ, name, generic, inherits = self.parse_signature(sig)
tnode = CSNodes.EmptyNode()
tnode += CSNodes.Modificator(text='{}'.format(mod if mod else 'private'))
tnode += CSNodes.TextNode(text=' ')
tnode += CSNodes.Keyword(text='{}'.format(typ))
tnode += CSNodes.TextNode(text=' ')
tnode += CSNodes.UnknownType(typ=typ, text=name)
if generic:
self.append_generic(generic, tnode)
if inherits:
inherits_node = CSNodes.EmptyNode()
inherits_node += CSNodes.TextNode(text=' : ')
inherit_types = split_sig(inherits)
i = 1
for t in inherit_types:
self.append_ref_signature(t, inherits_node)
if i < len(inherit_types):
inherits_node += CSNodes.TextNode(text=', ')
i += 1
tnode += inherits_node
signode += tnode
opt_parent = self.options['parent'] if 'parent' in self.options else None
form = '{}.{}' if self.has_parent() and opt_parent else '{}{}'
parent = form.format(self.get_parent() if self.has_parent() else '', opt_parent if opt_parent else '')
self.env.ref_context[CSharpObject.PARENT_TYPE_NAME] = self.ParentType(
parent=parent, name=name, type=typ, override=opt_parent)
if opt_parent:
self.env.ref_context[self.PARENT_ATTR_NAME] = parent
return self.get_fullname(name)
def get_index_text(self, sig, name, typ):
rname = '{} (C# {})'.format(name, _(typ))
return rname
def parse_signature(self, sig):
match = TYPE_SIG_RE.match(sig.strip())
if not match:
raise Exception('Invalid type signature. Got: {}'.format(sig))
mod, typ, names, generic, inherits = match.groups()
return mod, typ.strip(), names, generic, inherits
def get_obj_name(self, sig):
_, typ, name, _, _ = self.parse_signature(sig)
return name, typ
class CSharpEnum(CSharpObject):
option_spec = {**CSharpObject.option_spec, 'values': directives.unchanged_required,
**dict(zip([('val(' + str(i) + ')') for i in range(1, 21)],
[directives.unchanged] * 20))}
def handle_signature(self, sig, signode):
mod, name = self.parse_signature(sig)
node = CSNodes.EmptyNode()
if mod:
node += CSNodes.Modificator(text='{}'.format(mod.strip()))
node += CSNodes.TextNode(text=' ')
node += CSNodes.Keyword(text='enum')
node += CSNodes.TextNode(text=' ')
node += CSNodes.Enum(text='{}'.format(name.strip()))
signode += node
return self.get_fullname(name)
def after_content_node(self, node):
options = self.options['values'].split()
node += CSNodes.Description(title=_('values').title(), desc=', '.join(options))
options_values = list(value for key, value in self.options.items() \
if key not in ('noindex', 'values') and value)
if not options_values:
return
i = 0
for vname in options:
if i < len(options_values):
node += CSNodes.Description(title=vname, desc=options_values[i])
i += 1
def parse_signature(self, sig):
match = ENUM_SIG_RE.match(sig.strip())
if not match:
raise Exception('Invalid enum signature. Got: {}'.format(sig))
mod, name = match.groups()
return mod, name.strip()
def get_index_text(self, sig, name, typ):
rname = '{} (C# {})'.format(name, _('enum'))
return rname
def get_obj_name(self, sig):
_, name = self.parse_signature(sig)
return name, 'enum'
class CSharpVariable(CSharpObject):
_default = ''
def handle_signature(self, sig, signode):
mod, typ, name, self._default = self.parse_signature(sig)
node = CSNodes.EmptyNode()
node += CSNodes.Modificator(text='{}'.format(mod if mod else 'private'))
node += CSNodes.TextNode(text=' ')
self.append_ref_signature(typ, node)
node += CSNodes.TextNode(text=' ')
node += CSNodes.VariableName(text='{}'.format(name))
signode += node
return self.get_fullname(name)
def before_content_node(self, node):
if self._default:
node += CSNodes.Description(title=_('value').title(), desc=self._default)
def parse_signature(self, sig):
match = VAR_SIG_RE.match(sig.strip())
if not match:
raise Exception('Invalid variable signature. Got: {}'.format(sig))
mod, typ, name, default = match.groups()
return mod, typ.strip(), name.strip(), default
def get_index_text(self, sig, name, typ):
rname = '{} (C# {})->{}'.format(name, _('variable'), typ)
return rname
def get_obj_name(self, sig):
_, typ, name, _ = self.parse_signature(sig)
return name, typ
class CSharpProperty(CSharpObject):
def handle_signature(self, sig, signode):
mod, typ, name, getter, setter = self.parse_signature(sig)
node = CSNodes.EmptyNode()
node += CSNodes.Modificator(text='{}'.format(mod if mod else 'private'))
node += CSNodes.TextNode(text=' ')
self.append_ref_signature(typ, node)
node += CSNodes.TextNode(text=' ')
node += CSNodes.MethodName(text='{}'.format(name))
node += CSNodes.TextNode(text=' { ')
accessors = []
if getter:
accessors.append('get;')
if setter:
accessors.append(setter.strip())
node += CSNodes.Modificator(text=' '.join(accessors))
node += CSNodes.TextNode(text=' } ')
signode += node
return self.get_fullname(name)
def parse_signature(self, sig):
match = PROP_SIG_RE.match(sig.strip())
if not match:
raise Exception('Invalid property signature. Got: {}'.format(sig))
mod, typ, name, getter, setter = match.groups()
return mod, typ.strip(), name.strip(), getter, setter
def get_index_text(self, sig, name, typ):
rname = '{} (C# {})->{}'.format(name, _('property'), typ)
return rname
def get_obj_name(self, sig):
_, typ, name, _, _ = self.parse_signature(sig)
return name, typ
class CSharpMethod(CSharpObject):
option_spec = {**CSharpObject.option_spec,
'returns': directives.unchanged,
**dict(zip([('param(' + str(i) + ')') for i in range(1, 8)],
[directives.unchanged] * 7))}
_params_list = ()
def handle_signature(self, sig, signode):
mod, typ, name, generic, params = self.parse_signature(sig)
node = CSNodes.EmptyNode()
node += CSNodes.Modificator(text='{}'.format(mod if mod else 'private'))
node += CSNodes.TextNode(text=' ')
self.append_ref_signature(typ if typ else name, node)
if typ:
node += CSNodes.TextNode(text=' ')
node += CSNodes.MethodName(text='{}'.format(name))
if generic:
self.append_generic(generic, node)
param_node = CSNodes.EmptyNode()
param_node += CSNodes.TextNode(text='(')
if params:
self._params_list = self._get_params(params)
i = 1
for (pmod, ptyp, pname, pvalue) in self._params_list:
pnode = CSNodes.EmptyNode()
if pmod:
pnode += CSNodes.Keyword(text='{}'.format(pmod))
pnode += CSNodes.TextNode(text=' ')
self.append_ref_signature(ptyp, pnode)
pnode += CSNodes.TextNode(text=' ')
pnode += CSNodes.TextNode(text='{}'.format(pname))
if pvalue:
pnode += CSNodes.TextNode(text=' = ')
self.append_ref_signature(pvalue, pnode)
param_node += pnode
if i < len(self._params_list):
param_node += CSNodes.TextNode(text=', ')
i += 1
param_node += CSNodes.TextNode(text=')')
node += param_node
signode += node
return self.get_fullname(name)
def before_content_node(self, node):
if 'returns' in self.options:
node += CSNodes.Description(title=_('returns').title(), desc=self.options['returns'])
def after_content_node(self, node):
options_values = list(value for key, value in self.options.items() if key != 'noindex')
i = 0
for (_, _, pname, _) in self._params_list:
if i < len(options_values):
node += CSNodes.Description(title=pname, desc=options_values[i], lower=True)
i += 1
def after_content(self):
super().after_content()
if self._params_list is not None and len(self._params_list) > 0:
del self._params_list
def parse_signature(self, sig):
match = METHOD_SIG_RE.match(sig.strip())
if not match:
raise Exception('Invalid method signature. Got: {}'.format(sig))
mod, typ, name, generic, params = match.groups()
return mod, typ, name.strip(), generic, params
@staticmethod
def parse_param_signature(sig):
match = PARAM_SIG_RE.match(sig.strip())
if not match:
raise Exception('Invalid parameter signature. Got: {}'.format(sig))
mod, typ, name, value = match.groups()
return mod, typ.strip(), name.strip(), value
def _get_params(self, params):
if not params:
return None
result = []
params_group = split_sig(params)
for param in params_group:
pmod, ptyp, pname, pvalue = self.parse_param_signature(param)
result.append((pmod, ptyp, pname, pvalue))
return result
def get_index_text(self, sig, name, typ):
params_text = ''
if self._params_list:
names = [pname
for _, _, pname, _
in self._params_list]
params_text = '({})'.format(', '.join(names))
if typ:
rname = '{}{} (C# {})->{}'.format(name, params_text, _('method'), typ)
else:
rname = '{}{} (C# {})->{}'.format(name, params_text, _('constructor'), name)
return rname
def get_obj_name(self, sig):
_, typ, name, _, _ = self.parse_signature(sig)
return name, typ
class CSharpNamespace(Directive):
required_arguments = 1
def run(self):
env = self.state.document.settings.env
namespace = self.arguments[0].strip()
if namespace is None:
env.ref_context.pop(CSharpObject.PARENT_ATTR_NAME, None)
else:
env.ref_context[CSharpObject.PARENT_ATTR_NAME] = namespace
return []
class CSharpEndType(Directive):
required_arguments = 0
def run(self):
env = self.state.document.settings.env
if CSharpObject.PARENT_TYPE_NAME in env.ref_context:
env.ref_context.pop(CSharpObject.PARENT_TYPE_NAME, None)
return []
class CSharpXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
refnode[CSharpObject.PARENT_ATTR_NAME] = env.ref_context.get(
CSharpObject.PARENT_ATTR_NAME)
return super(CSharpXRefRole, self).process_link(env, refnode,
has_explicit_title, title, target)
class CSharpIndex(Index):
name = 'csharp'
localname = 'CSharp Index'
shortname = 'CSharp'
def generate(self, docnames=None):
content = defaultdict(list)
objects = self.domain.get_objects()
objects = sorted(objects, key=lambda obj: obj[0])
for name, dispname, objtype, docname, anchor, _ in objects:
content[dispname.split('.')[-1][0].lower()].append(
(dispname, 0, docname, anchor, docname, '', objtype))
content = sorted(content.items())
return content, True
class CSharpDomain(Domain):
name = 'sphinxsharp'
label = 'C#'
roles = {
'type': CSharpXRefRole(),
'var': CSharpXRefRole(),
'prop': CSharpXRefRole(),
'meth': CSharpXRefRole(),
'enum': CSharpXRefRole()
}
object_types = {
'type': ObjType(_('type'), 'type', 'obj'),
'variable': ObjType(_('variable'), 'var', 'obj'),
'property': ObjType(_('property'), 'prop', 'obj'),
'method': ObjType(_('method'), 'meth', 'obj'),
'enum': ObjType(_('enum'), 'enum', 'obj')
}
directives = {
'namespace': CSharpNamespace,
'end-type': CSharpEndType,
'type': CSharpType,
'variable': CSharpVariable,
'property': CSharpProperty,
'method': CSharpMethod,
'enum': CSharpEnum
}
indices = {
CSharpIndex
}
initial_data = {
'objects': {} # (objtype, name) -> (docname, objtype(class, struct etc.))
}
def clear_doc(self, docname):
for (objtype, name), (doc, _) in self.data['objects'].copy().items():
if doc == docname:
del self.data['objects'][(objtype, name)]
def get_objects(self):
for (objtype, name), (docname, _) in self.data['objects'].items():
yield (name, name, objtype, docname, '{}-{}'.format(objtype, name), 0)
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
targets = get_targets(target, node)
objects = self.data['objects']
roletypes = self.objtypes_for_role(typ)
types = ('type', 'enum', 'method') if typ is None else roletypes
for t in targets:
for objtyp in types:
key = (objtyp, t)
if key in objects:
obj = objects[key]
if typ is not None:
role = self.role_for_objtype(objtyp)
node['reftype'] = role
else:
contnode = CSNodes.UnknownType(typ=obj[1], text=target)
return make_refnode(builder, fromdocname, obj[0],
'{}-{}'.format(objtyp, t), contnode,
'{} {}'.format(obj[1], t))
if typ is None:
contnode = CSNodes.UnknownType(text=target)
return None
def merge_domaindata(self, docnames, otherdata):
for (objtype, name), (docname, typ) in otherdata['objects'].items():
if docname in docnames:
self.data['objects'][(objtype, name)] = (docname, typ)
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
for typ in self.roles:
xref = self.resolve_xref(env, fromdocname, builder, typ,
target, node, contnode)
if xref:
return [('sphinxsharp:{}'.format(typ), xref)]
return []
class CSNodes:
_TYPES = ('class', 'struct', 'interface', 'enum', 'delegate')
class BaseNode(nodes.Element):
def __init__(self, rawsource='', *children, **attributes):
super().__init__(rawsource, *children, **attributes)
@staticmethod
def visit_html(self, node):
self.body.append(self.starttag(node, 'div'))
@staticmethod
def depart_html(self, node):
self.body.append('</div>')
class EmptyNode(BaseNode):
def __init__(self, rawsource='', *children, **attributes):
super().__init__(rawsource, *children, **attributes)
@staticmethod
def visit_html(self, node): pass
@staticmethod
def depart_html(self, node): pass
class InlineText(BaseNode):
def __init__(self, rawsource, type_class, text, *children, **attributes):
super().__init__(rawsource, *children, **attributes)
if type_class is None:
return
self['classes'].append(type_class)
if text:
self.append(nodes.raw(text=text, format='html'))
@staticmethod
def visit_html(self, node):
self.body.append(self.starttag(node, 'span').replace('\n', ''))
@staticmethod
def depart_html(self, node):
self.body.append('</span>')
class Description(BaseNode):
def __init__(self, rawsource='', title='', desc='', *children, **attributes):
super().__init__(rawsource, *children, **attributes)
self['classes'].append('desc')
if title and desc:
if 'lower' not in attributes:
title = title[0].upper() + title[1:]
node = nodes.raw(
text='<strong class="first">{}:</strong><span class="last">{}</span>'.format(title, desc),
format='html')
self.append(node)
else:
raise Exception('Title and description must be assigned.')
class Modificator(InlineText):
def __init__(self, rawsource='', text='', *children, **attributes):
super().__init__(rawsource, 'mod', text, *children, **attributes)
class UnknownType(InlineText):
def __init__(self, rawsource='', typ='', text='', *children, **attributes):
objclass = typ
if not text:
super().__init__(rawsource, None, text, *children, **attributes)
return
if typ not in CSNodes._TYPES:
objclass = 'kw'
if typ not in VALUE_KEYWORDS:
objclass = 'unknown'
super().__init__(rawsource, objclass, text, *children, **attributes)
class TextNode(InlineText):
def __init__(self, rawsource='', text='', *children, **attributes):
super().__init__(rawsource, 'text', text, *children, **attributes)
class MethodName(InlineText):
def __init__(self, rawsource='', text='', *children, **attributes):
super().__init__(rawsource, 'meth-name', text, *children, **attributes)
class VariableName(InlineText):
def __init__(self, rawsource='', text='', *children, **attributes):
super().__init__(rawsource, 'var-name', text, *children, **attributes)
class Keyword(InlineText):
def __init__(self, rawsource='', text='', *children, **attributes):
super().__init__(rawsource, 'kw', text, *children, **attributes)
class Enum(InlineText):
def __init__(self, rawsource='', text='', *children, **attributes):
super().__init__(rawsource, 'enum', text, *children, **attributes)
class Generic(InlineText):
def __init__(self, rawsource='', text='', *children, **attributes):
super().__init__(rawsource, 'generic', text, *children, **attributes)
@staticmethod
def add_nodes(app):
app.add_node(CSNodes.Description,
html=(CSNodes.Description.visit_html, CSNodes.Description.depart_html))
app.add_node(CSNodes.Modificator,
html=(CSNodes.Modificator.visit_html, CSNodes.Modificator.depart_html))
app.add_node(CSNodes.UnknownType,
html=(CSNodes.UnknownType.visit_html, CSNodes.UnknownType.depart_html))
app.add_node(CSNodes.TextNode,
html=(CSNodes.TextNode.visit_html, CSNodes.TextNode.depart_html))
app.add_node(CSNodes.Enum,
html=(CSNodes.Enum.visit_html, CSNodes.Enum.depart_html))
app.add_node(CSNodes.Keyword,
html=(CSNodes.Keyword.visit_html, CSNodes.Keyword.depart_html))
app.add_node(CSNodes.MethodName,
html=(CSNodes.MethodName.visit_html, CSNodes.MethodName.depart_html))
app.add_node(CSNodes.VariableName,
html=(CSNodes.VariableName.visit_html, CSNodes.VariableName.depart_html))
app.add_node(CSNodes.BaseNode,
html=(CSNodes.BaseNode.visit_html, CSNodes.BaseNode.depart_html))
app.add_node(CSNodes.EmptyNode,
html=(CSNodes.EmptyNode.visit_html, CSNodes.EmptyNode.depart_html))
app.add_node(CSNodes.Generic,
html=(CSNodes.Generic.visit_html, CSNodes.Generic.depart_html))
def split_sig(params):
if not params:
return None
result = []
current = ''
level = 0
for char in params:
if char in ('<', '{', '['):
level += 1
elif char in ('>', '}', ']'):
level -= 1
if char != ',' or level > 0:
current += char
elif char == ',' and level == 0:
result.append(current)
current = ''
if current.strip() != '':
result.append(current)
return result
def get_targets(target, node):
targets = [target]
if node[CSharpObject.PARENT_ATTR_NAME] is not None:
parts = node[CSharpObject.PARENT_ATTR_NAME].split('.')
while parts:
targets.append('{}.{}'.format('.'.join(parts), target))
parts = parts[:-1]
return targets
def copy_asset_files(app, exc):
package_dir = path.abspath(path.dirname(__file__))
asset_files = [path.join(package_dir, '_static/css/sphinxsharp.css')]
if exc is None: # build succeeded
for asset_path in asset_files:
copy_asset(asset_path, path.join(app.outdir, '_static'))
def setup(app):
app.connect('build-finished', copy_asset_files)
package_dir = path.abspath(path.dirname(__file__))
app.add_domain(CSharpDomain)
app.add_css_file('sphinxsharp.css')
override_file = path.join(app.confdir, '_static/sphinxsharp-override.css')
if path.exists(override_file):
app.add_css_file('sphinxsharp-override.css')
CSNodes.add_nodes(app)
locale_dir = path.join(package_dir, 'locales')
app.add_message_catalog('sphinxsharp', locale_dir)
return {
'version': '1.0.2',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
[
"sphinx.addnodes.desc_name",
"os.path.exists",
"sphinx.addnodes.index",
"collections.namedtuple",
"sphinx.addnodes.desc_type",
"re.compile",
"os.path.join",
"sphinx.addnodes.desc_content",
"os.path.dirname",
"sphinx.addnodes.pending_xref",
"sphinx.addnodes.desc",
"collections.defaultdict",
"sphinx.addnodes.desc_signature",
"sphinx.util.docfields.DocFieldTransformer",
"docutils.nodes.raw",
"sphinx.locale.get_translation"
] |
[((1293, 1400), 're.compile', 're.compile', (["('^((?:(?:' + MODIFIERS_RE +\n ')\\\\s+)*)?(\\\\w+)\\\\s([\\\\w\\\\.]+)(?:<(.+)>)?(?:\\\\s?\\\\:\\\\s?(.+))?$')"], {}), "('^((?:(?:' + MODIFIERS_RE +\n ')\\\\s+)*)?(\\\\w+)\\\\s([\\\\w\\\\.]+)(?:<(.+)>)?(?:\\\\s?\\\\:\\\\s?(.+))?$')\n", (1303, 1400), False, 'import re\n'), ((1430, 1519), 're.compile', 're.compile', (['"""^(?:(new)\\\\s+)?([\\\\w\\\\.]+)\\\\s*(?:<(.+)>)*(\\\\[\\\\])*\\\\s?(?:\\\\((.*)\\\\))?$"""'], {}), "(\n '^(?:(new)\\\\s+)?([\\\\w\\\\.]+)\\\\s*(?:<(.+)>)*(\\\\[\\\\])*\\\\s?(?:\\\\((.*)\\\\))?$')\n", (1440, 1519), False, 'import re\n'), ((1523, 1659), 're.compile', 're.compile', (["('^((?:(?:' + MODIFIERS_RE +\n ')\\\\s+)*)?([^\\\\s=\\\\(\\\\)]+\\\\s+)?([^\\\\s=\\\\(\\\\)]+)\\\\s?(?:\\\\<(.+)\\\\>)?\\\\s?(?:\\\\((.+)*\\\\))$'\n )"], {}), "('^((?:(?:' + MODIFIERS_RE +\n ')\\\\s+)*)?([^\\\\s=\\\\(\\\\)]+\\\\s+)?([^\\\\s=\\\\(\\\\)]+)\\\\s?(?:\\\\<(.+)\\\\>)?\\\\s?(?:\\\\((.+)*\\\\))$'\n )\n", (1533, 1659), False, 'import re\n'), ((1681, 1778), 're.compile', 're.compile', (["('^(?:(?:(' + PARAM_MODIFIERS_RE +\n ')\\\\s)*)?([^=]+)\\\\s+([^=]+)\\\\s*(?:=\\\\s?(.+))?$')"], {}), "('^(?:(?:(' + PARAM_MODIFIERS_RE +\n ')\\\\s)*)?([^=]+)\\\\s+([^=]+)\\\\s*(?:=\\\\s?(.+))?$')\n", (1691, 1778), False, 'import re\n'), ((1786, 1881), 're.compile', 're.compile', (["('^((?:(?:' + MODIFIERS_RE +\n ')\\\\s+)*)?([^=]+)\\\\s+([^\\\\s=]+)\\\\s*(?:=\\\\s*(.+))?$')"], {}), "('^((?:(?:' + MODIFIERS_RE +\n ')\\\\s+)*)?([^=]+)\\\\s+([^\\\\s=]+)\\\\s*(?:=\\\\s*(.+))?$')\n", (1796, 1881), False, 'import re\n'), ((1889, 2031), 're.compile', 're.compile', (["('^((?:(?:' + MODIFIERS_RE +\n ')\\\\s+)*)?(.+)\\\\s+([^\\\\s]+)\\\\s*(?:{(\\\\s*get;\\\\s*)?((?:' + MODIFIERS_RE +\n ')?\\\\s*set;\\\\s*)?})$')"], {}), "('^((?:(?:' + MODIFIERS_RE +\n ')\\\\s+)*)?(.+)\\\\s+([^\\\\s]+)\\\\s*(?:{(\\\\s*get;\\\\s*)?((?:' + MODIFIERS_RE +\n ')?\\\\s*set;\\\\s*)?})$')\n", (1899, 2031), False, 'import re\n'), ((2083, 2153), 're.compile', 're.compile', (["('^((?:(?:' + MODIFIERS_RE + ')\\\\s+)*)?(?:enum)\\\\s?(\\\\w+)$')"], {}), "('^((?:(?:' + MODIFIERS_RE + ')\\\\s+)*)?(?:enum)\\\\s?(\\\\w+)$')\n", (2093, 2153), False, 'import re\n'), ((2158, 2188), 'sphinx.locale.get_translation', 'get_translation', (['"""sphinxsharp"""'], {}), "('sphinxsharp')\n", (2173, 2188), False, 'from sphinx.locale import get_translation\n'), ((2334, 2398), 'collections.namedtuple', 'namedtuple', (['"""ParentType"""', "['parent', 'name', 'type', 'override']"], {}), "('ParentType', ['parent', 'name', 'type', 'override'])\n", (2344, 2398), False, 'from collections import defaultdict, namedtuple\n'), ((34371, 34429), 'os.path.join', 'path.join', (['app.confdir', '"""_static/sphinxsharp-override.css"""'], {}), "(app.confdir, '_static/sphinxsharp-override.css')\n", (34380, 34429), False, 'from os import path\n'), ((34437, 34463), 'os.path.exists', 'path.exists', (['override_file'], {}), '(override_file)\n', (34448, 34463), False, 'from os import path\n'), ((34563, 34596), 'os.path.join', 'path.join', (['package_dir', '"""locales"""'], {}), "(package_dir, 'locales')\n", (34572, 34596), False, 'from os import path\n'), ((2841, 2867), 'sphinx.addnodes.index', 'addnodes.index', ([], {'entries': '[]'}), '(entries=[])\n', (2855, 2867), False, 'from sphinx import addnodes\n'), ((2884, 2899), 'sphinx.addnodes.desc', 'addnodes.desc', ([], {}), '()\n', (2897, 2899), False, 'from sphinx import addnodes\n'), ((4062, 4085), 'sphinx.addnodes.desc_content', 'addnodes.desc_content', ([], {}), '()\n', (4083, 4085), False, 'from sphinx import addnodes\n'), ((9554, 9574), 'sphinx.addnodes.desc_type', 'addnodes.desc_type', ([], {}), '()\n', (9572, 9574), False, 'from sphinx import addnodes\n'), ((24286, 24303), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (24297, 24303), False, 'from collections import defaultdict, namedtuple\n'), ((33907, 33929), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (33919, 33929), False, 'from os import path\n'), ((33950, 34003), 'os.path.join', 'path.join', (['package_dir', '"""_static/css/sphinxsharp.css"""'], {}), "(package_dir, '_static/css/sphinxsharp.css')\n", (33959, 34003), False, 'from os import path\n'), ((34253, 34275), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (34265, 34275), False, 'from os import path\n'), ((3369, 3401), 'sphinx.addnodes.desc_signature', 'addnodes.desc_signature', (['sig', '""""""'], {}), "(sig, '')\n", (3392, 3401), False, 'from sphinx import addnodes\n'), ((4408, 4433), 'sphinx.util.docfields.DocFieldTransformer', 'DocFieldTransformer', (['self'], {}), '(self)\n', (4427, 4433), False, 'from sphinx.util.docfields import DocFieldTransformer\n'), ((9956, 10071), 'sphinx.addnodes.pending_xref', 'addnodes.pending_xref', (['""""""'], {'refdomain': '"""sphinxsharp"""', 'reftype': 'None', 'reftarget': 'styp', 'modname': 'None', 'classname': 'None'}), "('', refdomain='sphinxsharp', reftype=None, reftarget=\n styp, modname=None, classname=None)\n", (9977, 10071), False, 'from sphinx import addnodes\n'), ((34119, 34151), 'os.path.join', 'path.join', (['app.outdir', '"""_static"""'], {}), "(app.outdir, '_static')\n", (34128, 34151), False, 'from os import path\n'), ((3681, 3709), 'sphinx.addnodes.desc_name', 'addnodes.desc_name', (['sig', 'sig'], {}), '(sig, sig)\n', (3699, 3709), False, 'from sphinx import addnodes\n'), ((28775, 28810), 'docutils.nodes.raw', 'nodes.raw', ([], {'text': 'text', 'format': '"""html"""'}), "(text=text, format='html')\n", (28784, 28810), False, 'from docutils import nodes\n')]
|
from KratosMultiphysics import ParallelEnvironment, IsDistributedRun
if IsDistributedRun():
from KratosMultiphysics.mpi import DataCommunicatorFactory
import KratosMultiphysics.KratosUnittest as UnitTest
import math
class TestDataCommunicatorFactory(UnitTest.TestCase):
def setUp(self):
self.registered_comms = []
self.default_data_communicator = ParallelEnvironment.GetDefaultDataCommunicator()
self.original_default = ParallelEnvironment.GetDefaultDataCommunicatorName()
def tearDown(self):
if len(self.registered_comms) > 0:
ParallelEnvironment.SetDefaultDataCommunicator(self.original_default)
for comm_name in self.registered_comms:
ParallelEnvironment.UnregisterDataCommunicator(comm_name)
def markForCleanUp(self,comm_name):
self.registered_comms.append(comm_name)
@UnitTest.skipUnless(IsDistributedRun(), "Test is distributed.")
def testDataCommunicatorDuplication(self):
duplicate_comm = DataCommunicatorFactory.DuplicateAndRegister(self.default_data_communicator, "Duplicate")
self.markForCleanUp("Duplicate") # to clean up during tearDown
self.assertEqual(duplicate_comm.Rank(), self.default_data_communicator.Rank())
self.assertEqual(duplicate_comm.Size(), self.default_data_communicator.Size())
@UnitTest.skipUnless(IsDistributedRun(), "Test is distributed.")
def testDataCommunicatorSplit(self):
rank = self.default_data_communicator.Rank()
size = self.default_data_communicator.Size()
split_comm = DataCommunicatorFactory.SplitAndRegister(self.default_data_communicator, rank % 2, 0, "EvenOdd")
self.markForCleanUp("EvenOdd") # to clean up during tearDown
expected_rank = rank // 2
if rank % 2 == 0:
expected_size = math.ceil(size/2)
else:
expected_size = math.floor(size/2)
self.assertEqual(split_comm.Rank(), expected_rank)
self.assertEqual(split_comm.Size(), expected_size)
@UnitTest.skipUnless(IsDistributedRun() and ParallelEnvironment.GetDefaultSize() > 1, "Test requires at least two ranks.")
def testDataCommunicatorCreateFromRange(self):
rank = self.default_data_communicator.Rank()
size = self.default_data_communicator.Size()
# Create a communicator using all ranks except the first
ranks = [i for i in range(1,size)]
range_comm = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, ranks, "AllExceptFirst")
self.markForCleanUp("AllExceptFirst") # to clean up during tearDown
if rank == 0:
self.assertTrue(range_comm.IsNullOnThisRank())
self.assertFalse(range_comm.IsDefinedOnThisRank())
else:
self.assertEqual(range_comm.Rank(), rank-1)
self.assertEqual(range_comm.Size(), size-1)
@UnitTest.skipUnless(IsDistributedRun() and ParallelEnvironment.GetDefaultSize() > 2, "Test requires at least three ranks.")
def testDataCommunicatorCreateUnion(self):
rank = self.default_data_communicator.Rank()
size = self.default_data_communicator.Size()
# Create a communicator using all ranks except the first
all_except_first = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(1,size)], "AllExceptFirst")
self.markForCleanUp("AllExceptFirst") # to clean up during tearDown
all_except_last = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(0,size-1)], "AllExceptLast")
self.markForCleanUp("AllExceptLast") # to clean up during tearDown
# Create union communicator (should contain all ranks)
union_comm = DataCommunicatorFactory.CreateUnionAndRegister(all_except_first, all_except_last, self.default_data_communicator, "Union")
self.markForCleanUp("Union") # to clean up during tearDown
self.assertFalse(union_comm.IsNullOnThisRank())
self.assertEqual(union_comm.Rank(), rank)
self.assertEqual(union_comm.Size(), size)
@UnitTest.skipUnless(IsDistributedRun() and ParallelEnvironment.GetDefaultSize() > 2, "Test requires at least three ranks.")
def testDataCommunicatorCreateIntersection(self):
rank = self.default_data_communicator.Rank()
size = self.default_data_communicator.Size()
# Create a communicator using all ranks except the first
all_except_first = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(1,size)], "AllExceptFirst")
self.markForCleanUp("AllExceptFirst") # to clean up during tearDown
all_except_last = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(0,size-1)], "AllExceptLast")
self.markForCleanUp("AllExceptLast") # to clean up during tearDown
intersection_comm = DataCommunicatorFactory.CreateIntersectionAndRegister(
all_except_first, all_except_last, self.default_data_communicator, "Intersection")
self.markForCleanUp("Intersection") # to clean up during tearDown
if rank == 0 or rank == size - 1:
# The first and last ranks do not participate in the intersection communicator
self.assertTrue(intersection_comm.IsNullOnThisRank())
else:
self.assertEqual(intersection_comm.Rank(), rank - 1 )
self.assertEqual(intersection_comm.Size(), size - 2 )
if __name__ == "__main__":
UnitTest.main()
|
[
"KratosMultiphysics.ParallelEnvironment.GetDefaultDataCommunicator",
"KratosMultiphysics.mpi.DataCommunicatorFactory.CreateUnionAndRegister",
"math.ceil",
"KratosMultiphysics.mpi.DataCommunicatorFactory.SplitAndRegister",
"math.floor",
"KratosMultiphysics.KratosUnittest.main",
"KratosMultiphysics.mpi.DataCommunicatorFactory.DuplicateAndRegister",
"KratosMultiphysics.mpi.DataCommunicatorFactory.CreateFromRanksAndRegister",
"KratosMultiphysics.mpi.DataCommunicatorFactory.CreateIntersectionAndRegister",
"KratosMultiphysics.ParallelEnvironment.UnregisterDataCommunicator",
"KratosMultiphysics.ParallelEnvironment.GetDefaultSize",
"KratosMultiphysics.IsDistributedRun",
"KratosMultiphysics.ParallelEnvironment.SetDefaultDataCommunicator",
"KratosMultiphysics.ParallelEnvironment.GetDefaultDataCommunicatorName"
] |
[((72, 90), 'KratosMultiphysics.IsDistributedRun', 'IsDistributedRun', ([], {}), '()\n', (88, 90), False, 'from KratosMultiphysics import ParallelEnvironment, IsDistributedRun\n'), ((5606, 5621), 'KratosMultiphysics.KratosUnittest.main', 'UnitTest.main', ([], {}), '()\n', (5619, 5621), True, 'import KratosMultiphysics.KratosUnittest as UnitTest\n'), ((374, 422), 'KratosMultiphysics.ParallelEnvironment.GetDefaultDataCommunicator', 'ParallelEnvironment.GetDefaultDataCommunicator', ([], {}), '()\n', (420, 422), False, 'from KratosMultiphysics import ParallelEnvironment, IsDistributedRun\n'), ((455, 507), 'KratosMultiphysics.ParallelEnvironment.GetDefaultDataCommunicatorName', 'ParallelEnvironment.GetDefaultDataCommunicatorName', ([], {}), '()\n', (505, 507), False, 'from KratosMultiphysics import ParallelEnvironment, IsDistributedRun\n'), ((1015, 1108), 'KratosMultiphysics.mpi.DataCommunicatorFactory.DuplicateAndRegister', 'DataCommunicatorFactory.DuplicateAndRegister', (['self.default_data_communicator', '"""Duplicate"""'], {}), "(self.default_data_communicator,\n 'Duplicate')\n", (1059, 1108), False, 'from KratosMultiphysics.mpi import DataCommunicatorFactory\n'), ((899, 917), 'KratosMultiphysics.IsDistributedRun', 'IsDistributedRun', ([], {}), '()\n', (915, 917), False, 'from KratosMultiphysics import ParallelEnvironment, IsDistributedRun\n'), ((1589, 1690), 'KratosMultiphysics.mpi.DataCommunicatorFactory.SplitAndRegister', 'DataCommunicatorFactory.SplitAndRegister', (['self.default_data_communicator', '(rank % 2)', '(0)', '"""EvenOdd"""'], {}), "(self.default_data_communicator, \n rank % 2, 0, 'EvenOdd')\n", (1629, 1690), False, 'from KratosMultiphysics.mpi import DataCommunicatorFactory\n'), ((1377, 1395), 'KratosMultiphysics.IsDistributedRun', 'IsDistributedRun', ([], {}), '()\n', (1393, 1395), False, 'from KratosMultiphysics import ParallelEnvironment, IsDistributedRun\n'), ((2457, 2569), 'KratosMultiphysics.mpi.DataCommunicatorFactory.CreateFromRanksAndRegister', 'DataCommunicatorFactory.CreateFromRanksAndRegister', (['self.default_data_communicator', 'ranks', '"""AllExceptFirst"""'], {}), "(self.\n default_data_communicator, ranks, 'AllExceptFirst')\n", (2507, 2569), False, 'from KratosMultiphysics.mpi import DataCommunicatorFactory\n'), ((3809, 3935), 'KratosMultiphysics.mpi.DataCommunicatorFactory.CreateUnionAndRegister', 'DataCommunicatorFactory.CreateUnionAndRegister', (['all_except_first', 'all_except_last', 'self.default_data_communicator', '"""Union"""'], {}), "(all_except_first,\n all_except_last, self.default_data_communicator, 'Union')\n", (3855, 3935), False, 'from KratosMultiphysics.mpi import DataCommunicatorFactory\n'), ((5004, 5144), 'KratosMultiphysics.mpi.DataCommunicatorFactory.CreateIntersectionAndRegister', 'DataCommunicatorFactory.CreateIntersectionAndRegister', (['all_except_first', 'all_except_last', 'self.default_data_communicator', '"""Intersection"""'], {}), "(all_except_first,\n all_except_last, self.default_data_communicator, 'Intersection')\n", (5057, 5144), False, 'from KratosMultiphysics.mpi import DataCommunicatorFactory\n'), ((588, 657), 'KratosMultiphysics.ParallelEnvironment.SetDefaultDataCommunicator', 'ParallelEnvironment.SetDefaultDataCommunicator', (['self.original_default'], {}), '(self.original_default)\n', (634, 657), False, 'from KratosMultiphysics import ParallelEnvironment, IsDistributedRun\n'), ((1844, 1863), 'math.ceil', 'math.ceil', (['(size / 2)'], {}), '(size / 2)\n', (1853, 1863), False, 'import math\n'), ((1904, 1924), 'math.floor', 'math.floor', (['(size / 2)'], {}), '(size / 2)\n', (1914, 1924), False, 'import math\n'), ((2068, 2086), 'KratosMultiphysics.IsDistributedRun', 'IsDistributedRun', ([], {}), '()\n', (2084, 2086), False, 'from KratosMultiphysics import ParallelEnvironment, IsDistributedRun\n'), ((2938, 2956), 'KratosMultiphysics.IsDistributedRun', 'IsDistributedRun', ([], {}), '()\n', (2954, 2956), False, 'from KratosMultiphysics import ParallelEnvironment, IsDistributedRun\n'), ((4182, 4200), 'KratosMultiphysics.IsDistributedRun', 'IsDistributedRun', ([], {}), '()\n', (4198, 4200), False, 'from KratosMultiphysics import ParallelEnvironment, IsDistributedRun\n'), ((726, 783), 'KratosMultiphysics.ParallelEnvironment.UnregisterDataCommunicator', 'ParallelEnvironment.UnregisterDataCommunicator', (['comm_name'], {}), '(comm_name)\n', (772, 783), False, 'from KratosMultiphysics import ParallelEnvironment, IsDistributedRun\n'), ((2091, 2127), 'KratosMultiphysics.ParallelEnvironment.GetDefaultSize', 'ParallelEnvironment.GetDefaultSize', ([], {}), '()\n', (2125, 2127), False, 'from KratosMultiphysics import ParallelEnvironment, IsDistributedRun\n'), ((2961, 2997), 'KratosMultiphysics.ParallelEnvironment.GetDefaultSize', 'ParallelEnvironment.GetDefaultSize', ([], {}), '()\n', (2995, 2997), False, 'from KratosMultiphysics import ParallelEnvironment, IsDistributedRun\n'), ((4205, 4241), 'KratosMultiphysics.ParallelEnvironment.GetDefaultSize', 'ParallelEnvironment.GetDefaultSize', ([], {}), '()\n', (4239, 4241), False, 'from KratosMultiphysics import ParallelEnvironment, IsDistributedRun\n')]
|
from __future__ import print_function
try:
import vkaudiotoken
except ImportError:
import path_hack
from vkaudiotoken import supported_clients
import sys
import requests
import json
token = sys.argv[1]
user_agent = supported_clients.KATE.user_agent
sess = requests.session()
sess.headers.update({'User-Agent': user_agent})
def prettyprint(result):
print(json.dumps(json.loads(result.content.decode('utf-8')), indent=2))
prettyprint(sess.get(
"https://api.vk.com/method/audio.getById",
params=[('access_token', token),
('audios', '371745461_456289486,-41489995_202246189'),
('v', '5.95')]
))
|
[
"requests.session"
] |
[((268, 286), 'requests.session', 'requests.session', ([], {}), '()\n', (284, 286), False, 'import requests\n')]
|
# -*- coding: utf-8 -*-
import os
import sys
from pprint import pprint
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
exchange = ccxt.binance({
'apiKey': '<KEY>',
'secret': '<KEY>',
'enableRateLimit': True,
})
exchange.urls['api'] = exchange.urls['test'] # use the testnet
symbol = 'BTC/USDT'; type = 'market' # or limit
amount = 0.01; price = None; side = 'buy' # or sell
# extra params and overrides if needed
params = {
'test': True, # test if it's valid, but don't actually place it
}
order = exchange.create_order(symbol, type, side, amount, price)
pprint(order)
|
[
"os.path.abspath",
"ccxt.binance",
"sys.path.append",
"pprint.pprint"
] |
[((156, 189), 'sys.path.append', 'sys.path.append', (["(root + '/python')"], {}), "(root + '/python')\n", (171, 189), False, 'import sys\n'), ((230, 307), 'ccxt.binance', 'ccxt.binance', (["{'apiKey': '<KEY>', 'secret': '<KEY>', 'enableRateLimit': True}"], {}), "({'apiKey': '<KEY>', 'secret': '<KEY>', 'enableRateLimit': True})\n", (242, 307), False, 'import ccxt\n'), ((679, 692), 'pprint.pprint', 'pprint', (['order'], {}), '(order)\n', (685, 692), False, 'from pprint import pprint\n'), ((127, 152), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (142, 152), False, 'import os\n')]
|
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
import os
import re
from . import base
class OutProc(base.ExpectedOutProc):
def __init__(self, expected_outcomes, basepath, expected_fail,
expected_filename, regenerate_expected_files):
super(OutProc, self).__init__(expected_outcomes, expected_filename,
regenerate_expected_files)
self._basepath = basepath
self._expected_fail = expected_fail
def _is_failure_output(self, output):
fail = output.exit_code != 0
if fail != self._expected_fail:
return True
expected_lines = []
# Can't use utils.ReadLinesFrom() here because it strips whitespace.
with open(self._basepath + '.out') as f:
for line in f:
if line.startswith("#") or not line.strip():
continue
expected_lines.append(line)
raw_lines = output.stdout.splitlines()
actual_lines = [ s for s in raw_lines if not self._ignore_line(s) ]
if len(expected_lines) != len(actual_lines):
return True
# Try .js first, and fall back to .mjs.
# TODO(v8:9406): clean this up by never separating the path from
# the extension in the first place.
base_path = self._basepath + '.js'
if not os.path.exists(base_path):
base_path = self._basepath + '.mjs'
env = {
'basename': os.path.basename(base_path),
}
for (expected, actual) in itertools.izip_longest(
expected_lines, actual_lines, fillvalue=''):
pattern = re.escape(expected.rstrip() % env)
pattern = pattern.replace('\\*', '.*')
pattern = pattern.replace('\\{NUMBER\\}', '\d+(?:\.\d*)?')
pattern = '^%s$' % pattern
if not re.match(pattern, actual):
return True
return False
def _ignore_line(self, string):
"""Ignore empty lines, valgrind output, Android output."""
return (
not string or
not string.strip() or
string.startswith("==") or
string.startswith("**") or
string.startswith("ANDROID") or
# Android linker warning.
string.startswith('WARNING: linker:')
)
|
[
"os.path.exists",
"itertools.izip_longest",
"re.match",
"os.path.basename"
] |
[((1542, 1608), 'itertools.izip_longest', 'itertools.izip_longest', (['expected_lines', 'actual_lines'], {'fillvalue': '""""""'}), "(expected_lines, actual_lines, fillvalue='')\n", (1564, 1608), False, 'import itertools\n'), ((1377, 1402), 'os.path.exists', 'os.path.exists', (['base_path'], {}), '(base_path)\n', (1391, 1402), False, 'import os\n'), ((1477, 1504), 'os.path.basename', 'os.path.basename', (['base_path'], {}), '(base_path)\n', (1493, 1504), False, 'import os\n'), ((1826, 1851), 're.match', 're.match', (['pattern', 'actual'], {}), '(pattern, actual)\n', (1834, 1851), False, 'import re\n')]
|
import pytest
from Thycotic import Client, \
secret_password_get_command, secret_username_get_command, \
secret_get_command, secret_password_update_command, secret_checkout_command, secret_checkin_command, \
secret_delete_command, folder_create_command, folder_delete_command, folder_update_command
from test_data.context import GET_PASSWORD_BY_ID_CONTEXT, GET_USERNAME_BY_ID_CONTENT, \
SECRET_GET_CONTENT, SECRET_PASSWORD_UPDATE_CONTEXT, SECRET_CHECKOUT_CONTEXT, SECRET_CHECKIN_CONTEXT, \
SECRET_DELETE_CONTEXT, FOLDER_CREATE_CONTEXT, FOLDER_DELETE_CONTEXT, FOLDER_UPDATE_CONTEXT
from test_data.http_responses import GET_PASSWORD_BY_ID_RAW_RESPONSE, GET_USERNAME_BY_ID_RAW_RESPONSE, \
SECRET_GET_RAW_RESPONSE, SECRET_PASSWORD_UPDATE_RAW_RESPONSE, SECRET_CHECKOUT_RAW_RESPONSE, \
SECRET_CHECKIN_RAW_RESPONSE, SECRET_DELETE_RAW_RESPONSE, FOLDER_CREATE_RAW_RESPONSE, FOLDER_DELETE_RAW_RESPONSE, \
FOLDER_UPDATE_RAW_RESPONSE
GET_PASSWORD_BY_ID_ARGS = {"secret_id": "4"}
GET_USERNAME_BY_ID_ARGS = {"secret_id": "4"}
SECRET_GET_ARGS = {"secret_id": "4"}
SECRET_PASSWORD_UPDATE_ARGS = {"secret_id": "4", "newpassword": "<PASSWORD>"}
SECRET_CHECKOUT_ARGS = {"secret_id": "4"}
SECRET_CHECKIN_ARGS = {"secret_id": "4"}
SECRET_DELETE_ARGS = {"id": "9"}
FOLDER_CREATE_ARGS = {"folderName": "xsoarFolderTest3", "folderTypeId": "1", "parentFolderId": "3"}
FOLDER_DELETE_ARGS = {"folder_id": "9"}
FOLDER_UPDATE_ARGS = {"id": "12", "folderName": "xsoarTF3New"}
@pytest.mark.parametrize('command, args, http_response, context', [
(secret_password_get_command, GET_PASSWORD_BY_ID_ARGS, GET_PASSWORD_BY_ID_RAW_RESPONSE, GET_PASSWORD_BY_ID_CONTEXT),
(secret_username_get_command, GET_USERNAME_BY_ID_ARGS, GET_USERNAME_BY_ID_RAW_RESPONSE, GET_USERNAME_BY_ID_CONTENT),
(secret_get_command, SECRET_GET_ARGS, SECRET_GET_RAW_RESPONSE, SECRET_GET_CONTENT),
(secret_password_update_command, SECRET_PASSWORD_UPDATE_ARGS, SECRET_PASSWORD_UPDATE_RAW_RESPONSE,
SECRET_PASSWORD_UPDATE_CONTEXT),
(secret_checkout_command, SECRET_CHECKOUT_ARGS, SECRET_CHECKOUT_RAW_RESPONSE, SECRET_CHECKOUT_CONTEXT),
(secret_checkin_command, SECRET_CHECKIN_ARGS, SECRET_CHECKIN_RAW_RESPONSE, SECRET_CHECKIN_CONTEXT),
(secret_delete_command, SECRET_DELETE_ARGS, SECRET_DELETE_RAW_RESPONSE, SECRET_DELETE_CONTEXT),
(folder_create_command, FOLDER_CREATE_ARGS, FOLDER_CREATE_RAW_RESPONSE, FOLDER_CREATE_CONTEXT),
(folder_delete_command, FOLDER_DELETE_ARGS, FOLDER_DELETE_RAW_RESPONSE, FOLDER_DELETE_CONTEXT),
(folder_update_command, FOLDER_UPDATE_ARGS, FOLDER_UPDATE_RAW_RESPONSE, FOLDER_UPDATE_CONTEXT)
])
def test_thycotic_commands(command, args, http_response, context, mocker):
mocker.patch.object(Client, '_generate_token')
client = Client(server_url="https://thss.softwarium.net/SecretServer", username="xsoar1", password="<PASSWORD>",
proxy=False, verify=False)
mocker.patch.object(Client, '_http_request', return_value=http_response)
outputs = command(client, **args)
results = outputs.to_context()
assert results.get("EntryContext") == context
|
[
"pytest.mark.parametrize",
"Thycotic.Client"
] |
[((1483, 2660), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""command, args, http_response, context"""', '[(secret_password_get_command, GET_PASSWORD_BY_ID_ARGS,\n GET_PASSWORD_BY_ID_RAW_RESPONSE, GET_PASSWORD_BY_ID_CONTEXT), (\n secret_username_get_command, GET_USERNAME_BY_ID_ARGS,\n GET_USERNAME_BY_ID_RAW_RESPONSE, GET_USERNAME_BY_ID_CONTENT), (\n secret_get_command, SECRET_GET_ARGS, SECRET_GET_RAW_RESPONSE,\n SECRET_GET_CONTENT), (secret_password_update_command,\n SECRET_PASSWORD_UPDATE_ARGS, SECRET_PASSWORD_UPDATE_RAW_RESPONSE,\n SECRET_PASSWORD_UPDATE_CONTEXT), (secret_checkout_command,\n SECRET_CHECKOUT_ARGS, SECRET_CHECKOUT_RAW_RESPONSE,\n SECRET_CHECKOUT_CONTEXT), (secret_checkin_command, SECRET_CHECKIN_ARGS,\n SECRET_CHECKIN_RAW_RESPONSE, SECRET_CHECKIN_CONTEXT), (\n secret_delete_command, SECRET_DELETE_ARGS, SECRET_DELETE_RAW_RESPONSE,\n SECRET_DELETE_CONTEXT), (folder_create_command, FOLDER_CREATE_ARGS,\n FOLDER_CREATE_RAW_RESPONSE, FOLDER_CREATE_CONTEXT), (\n folder_delete_command, FOLDER_DELETE_ARGS, FOLDER_DELETE_RAW_RESPONSE,\n FOLDER_DELETE_CONTEXT), (folder_update_command, FOLDER_UPDATE_ARGS,\n FOLDER_UPDATE_RAW_RESPONSE, FOLDER_UPDATE_CONTEXT)]'], {}), "('command, args, http_response, context', [(\n secret_password_get_command, GET_PASSWORD_BY_ID_ARGS,\n GET_PASSWORD_BY_ID_RAW_RESPONSE, GET_PASSWORD_BY_ID_CONTEXT), (\n secret_username_get_command, GET_USERNAME_BY_ID_ARGS,\n GET_USERNAME_BY_ID_RAW_RESPONSE, GET_USERNAME_BY_ID_CONTENT), (\n secret_get_command, SECRET_GET_ARGS, SECRET_GET_RAW_RESPONSE,\n SECRET_GET_CONTENT), (secret_password_update_command,\n SECRET_PASSWORD_UPDATE_ARGS, SECRET_PASSWORD_UPDATE_RAW_RESPONSE,\n SECRET_PASSWORD_UPDATE_CONTEXT), (secret_checkout_command,\n SECRET_CHECKOUT_ARGS, SECRET_CHECKOUT_RAW_RESPONSE,\n SECRET_CHECKOUT_CONTEXT), (secret_checkin_command, SECRET_CHECKIN_ARGS,\n SECRET_CHECKIN_RAW_RESPONSE, SECRET_CHECKIN_CONTEXT), (\n secret_delete_command, SECRET_DELETE_ARGS, SECRET_DELETE_RAW_RESPONSE,\n SECRET_DELETE_CONTEXT), (folder_create_command, FOLDER_CREATE_ARGS,\n FOLDER_CREATE_RAW_RESPONSE, FOLDER_CREATE_CONTEXT), (\n folder_delete_command, FOLDER_DELETE_ARGS, FOLDER_DELETE_RAW_RESPONSE,\n FOLDER_DELETE_CONTEXT), (folder_update_command, FOLDER_UPDATE_ARGS,\n FOLDER_UPDATE_RAW_RESPONSE, FOLDER_UPDATE_CONTEXT)])\n", (1506, 2660), False, 'import pytest\n'), ((2775, 2910), 'Thycotic.Client', 'Client', ([], {'server_url': '"""https://thss.softwarium.net/SecretServer"""', 'username': '"""xsoar1"""', 'password': '"""<PASSWORD>"""', 'proxy': '(False)', 'verify': '(False)'}), "(server_url='https://thss.softwarium.net/SecretServer', username=\n 'xsoar1', password='<PASSWORD>', proxy=False, verify=False)\n", (2781, 2910), False, 'from Thycotic import Client, secret_password_get_command, secret_username_get_command, secret_get_command, secret_password_update_command, secret_checkout_command, secret_checkin_command, secret_delete_command, folder_create_command, folder_delete_command, folder_update_command\n')]
|
import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
def xml_to_csv(path):
xml_list = []
for xml_file in glob.glob(path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
value = (root.find('filename').text,
member[0].text,
int(member[4][0].text),
int(member[4][2].text),
int(member[4][1].text),
int(member[4][3].text)
)
xml_list.append(value)
column_name = ['image_names', 'cell_type', 'xmin', 'xmax', 'ymin', 'ymax']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
def main():
for folder in ['train','test']:
image_path = os.path.join(os.getcwd(), ('images/' + folder))
xml_df = xml_to_csv(image_path)
xml_df.to_csv(('images/' + folder + '_labels.csv'), index=None)
print('Successfully converted xml to csv.')
main()
|
[
"pandas.DataFrame",
"xml.etree.ElementTree.parse",
"glob.glob",
"os.getcwd"
] |
[((139, 165), 'glob.glob', 'glob.glob', (["(path + '/*.xml')"], {}), "(path + '/*.xml')\n", (148, 165), False, 'import glob\n'), ((692, 735), 'pandas.DataFrame', 'pd.DataFrame', (['xml_list'], {'columns': 'column_name'}), '(xml_list, columns=column_name)\n', (704, 735), True, 'import pandas as pd\n'), ((182, 200), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_file'], {}), '(xml_file)\n', (190, 200), True, 'import xml.etree.ElementTree as ET\n'), ((838, 849), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (847, 849), False, 'import os\n')]
|
import sys, os, json, jinja2, redis
from jinja2 import Template
r_server = redis.StrictRedis('127.0.0.1', db=2)
i_key = "owner-info"
json_data = r_server.get(i_key)
if json_data is not None:
data = json.loads(json_data)
main_domain = data['Hostname']
fqdn = sys.argv[1] + ".ext." + main_domain
config_template = open('/opt/madcore/bin/templates/ingress.template').read()
template = Template(config_template)
config = (template.render(HOST=fqdn, SERVICE_NAME=sys.argv[2], SERVICE_PORT=sys.argv[3], NAMESPACE=sys.argv[4]))
open("/opt/ingress/" + sys.argv[2] + ".yaml", "w").write(config)
|
[
"json.loads",
"redis.StrictRedis",
"jinja2.Template"
] |
[((76, 112), 'redis.StrictRedis', 'redis.StrictRedis', (['"""127.0.0.1"""'], {'db': '(2)'}), "('127.0.0.1', db=2)\n", (93, 112), False, 'import sys, os, json, jinja2, redis\n'), ((393, 418), 'jinja2.Template', 'Template', (['config_template'], {}), '(config_template)\n', (401, 418), False, 'from jinja2 import Template\n'), ((203, 224), 'json.loads', 'json.loads', (['json_data'], {}), '(json_data)\n', (213, 224), False, 'import sys, os, json, jinja2, redis\n')]
|
# -*- coding: utf-8 -*-
# snapshottest: v1 - https://goo.gl/zC4yUc
from __future__ import unicode_literals
from snapshottest import Snapshot
snapshots = Snapshot()
snapshots['test_keywords 1'] = '[{"lineno": 7, "source": [" a\\n"], "value": "1"}, {"lineno": 7, "source": [" a\\n"], "value": "2"}, {"lineno": 7, "source": [" a\\n"], "value": "3"}, {"lineno": 13, "source": [" i\\n"], "value": "0"}, {"lineno": 13, "source": [" i\\n"], "value": "1"}, {"lineno": 13, "source": [" i\\n"], "value": "2"}, {"lineno": 13, "source": [" i\\n"], "value": "3"}, {"lineno": 13, "source": [" i\\n"], "value": "4"}]'
|
[
"snapshottest.Snapshot"
] |
[((156, 166), 'snapshottest.Snapshot', 'Snapshot', ([], {}), '()\n', (164, 166), False, 'from snapshottest import Snapshot\n')]
|
import numpy as np
from radix import radixConvert
c = radixConvert()
a = np.load("../../data/5/layer4.npy")
print(a.shape)
a = a*128
a = np.around(a).astype(np.int16)
print(a)
a = np.load('../../data/6.npy')
a = a*128
a = np.around(a).astype(np.int8)
print(a.shape)
for i in range(84):
print(i)
print(a[i])
'''
a = a*128
print(a)
for i in range(a.shape[0]):
for j in range(a.shape[1]):
if a[i][j] > 127:
a[i][j] = 127
a = np.around(a).astype(np.int8)
print(a)
print(a[4][17])
weight_file = open('f1_rom.coe', 'w')
weight_file.write('MEMORY_INITIALIZATION_RADIX=2;\n')
weight_file.write('MEMORY_INITIALIZATION_VECTOR=\n')
for i in range(32):
for j in range(32):
if(i < 2 or i > 29):
weight_file.write(c.dec2Bincmpmt('0', 8)+';\n')
elif(j < 2 or j > 29):
weight_file.write(c.dec2Bincmpmt('0', 8)+';\n')
else:
weight_file.write(c.dec2Bincmpmt(str(a[i-2][j-2]), 8)+',\n')
'''
|
[
"numpy.load",
"numpy.around",
"radix.radixConvert"
] |
[((54, 68), 'radix.radixConvert', 'radixConvert', ([], {}), '()\n', (66, 68), False, 'from radix import radixConvert\n'), ((74, 108), 'numpy.load', 'np.load', (['"""../../data/5/layer4.npy"""'], {}), "('../../data/5/layer4.npy')\n", (81, 108), True, 'import numpy as np\n'), ((183, 210), 'numpy.load', 'np.load', (['"""../../data/6.npy"""'], {}), "('../../data/6.npy')\n", (190, 210), True, 'import numpy as np\n'), ((139, 151), 'numpy.around', 'np.around', (['a'], {}), '(a)\n', (148, 151), True, 'import numpy as np\n'), ((225, 237), 'numpy.around', 'np.around', (['a'], {}), '(a)\n', (234, 237), True, 'import numpy as np\n')]
|
from saifooler.classifiers.classifier import Classifier
import torch
import json
import os
class ImageNetClassifier(Classifier):
def __init__(self, model, *args, **kwargs):
super().__init__(model, *args, **kwargs)
self.std = torch.tensor([0.229, 0.224, 0.225], device=self.device)
self.mean = torch.tensor([0.485, 0.456, 0.406], device=self.device)
class_index_path = os.path.join(
os.path.dirname(__file__),
"imagenet_class_index.json"
)
self.class_dict = {
int(key): val[1]
for key, val in json.load(open(class_index_path)).items()
}
def to(self, device):
super().to(device)
self.mean = self.mean.to(device)
self.std = self.std.to(device)
def get_class_label(self, class_id: int):
return self.class_dict[class_id]
def normalize_image(self, image):
"""
:param image: tensor of shape (N, W, H, C)
:return: image normalized for ImageNet and permuted in the shape (N, C, W, H) which is the shape
used by torchvision models
"""
image = (image - self.mean) / self.std
image = image.permute(0, 3, 1, 2)
return image
|
[
"os.path.dirname",
"torch.tensor"
] |
[((248, 303), 'torch.tensor', 'torch.tensor', (['[0.229, 0.224, 0.225]'], {'device': 'self.device'}), '([0.229, 0.224, 0.225], device=self.device)\n', (260, 303), False, 'import torch\n'), ((324, 379), 'torch.tensor', 'torch.tensor', (['[0.485, 0.456, 0.406]'], {'device': 'self.device'}), '([0.485, 0.456, 0.406], device=self.device)\n', (336, 379), False, 'import torch\n'), ((434, 459), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (449, 459), False, 'import os\n')]
|
#!/usr/bin/env python
# coding: utf-8
# conda install pytorch>=1.6 cudatoolkit=10.2 -c pytorch
# wandb login XXX
import json
import logging
import os
import re
import sklearn
import time
from itertools import product
import numpy as np
import pandas as pd
import wandb
#from IPython import get_ipython
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation
from keras.layers import Bidirectional, GlobalMaxPool1D
from keras.models import Model
from keras import initializers, regularizers, constraints, optimizers, layers
from simpletransformers.classification import MultiLabelClassificationModel
from sklearn.model_selection import train_test_split
truthy_values = ("true", "1", "y", "yes")
TAG = os.environ.get("TAG", "bertsification")
LANGS = [lang.strip() for lang in os.environ.get("LANGS", "es,ge,en,multi").lower().split(",")]
MODELNAMES = os.environ.get("MODELNAMES")
EVAL = os.environ.get("EVAL", "True").lower() in truthy_values
OVERWRITE = os.environ.get("OVERWRITE", "False").lower() in truthy_values
logging.basicConfig(level=logging.INFO, filename=time.strftime("models/{}-%Y-%m-%dT%H%M%S.log".format(TAG)))
with open('pid', 'w') as pid:
pid.write(str(os.getpid()))
logging.info("Experiment '{}' on {}, (eval = {}, pid = {})".format(
TAG, LANGS, str(EVAL), str(os.getpid()),
))
# SimpleTransformers (based on HuggingFace/Transformers) for Multilingual Scansion
# We will be using `simpletransformers`, a wrapper of `huggingface/transformers` to fine-tune different BERT-based and other architecture models with support for Spanish.
# Utils
def clean_text(string):
output = string.strip()
# replacements = (("“", '"'), ("”", '"'), ("//", ""), ("«", '"'), ("»",'"'))
replacements = (
("“", ''), ("”", ''), ("//", ""), ("«", ''), ("»",''), (",", ''),
(";", ''), (".", ''),
# ("?", ''), ("¿", ''), ("¡", ''), ("!", ''), ("-", ' '),
)
for replacement in replacements:
output = output.replace(*replacement)
# Any sequence of two or more spaces should be converted into one space
output = re.sub(r'(?is)\s+', ' ', output)
return output.strip()
def metric2binary(meter, pad=11):
return ([1 if syllable == "+" else 0 for syllable in meter] + [0] * (11 - len(meter)))[:pad]
def label2metric(label):
return "".join("+" if l else "-" for l in label)
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
# Spanish
# if not os.path.isfile("adso100.json"):
# get_ipython().system("averell export adso100 --filename adso100.json")
# if not os.path.isfile("adso.json"):
# get_ipython().system("averell export adso --filename adso.json")
es_test = (pd
.read_json(open("adso100.json"))
.query("manually_checked == True")[["line_text", "metrical_pattern"]]
.assign(
line_text=lambda x: x["line_text"].apply(clean_text),
length=lambda x: x["metrical_pattern"].str.len()
)
.drop_duplicates("line_text")
.rename(columns={"line_text": "text", "metrical_pattern": "meter"})
)
es_test = es_test[es_test["length"] == 11]
es = (pd
.read_json(open("adso.json"))
.query("manually_checked == True")[["line_text", "metrical_pattern"]]
.assign(
line_text=lambda x: x["line_text"].apply(clean_text),
length=lambda x: x["metrical_pattern"].str.len()
)
.drop_duplicates("line_text")
.rename(columns={"line_text": "text", "metrical_pattern": "meter"})
)
es = es[~es["text"].isin(es_test["text"])][es["length"] == 11]
es["labels"] = es.meter.apply(metric2binary)
es_train, es_eval = train_test_split(
es[["text", "labels"]], test_size=0.25, random_state=42)
logging.info("Spanish")
logging.info("- Lines: {} train, {} eval, {} test".format(es_train.shape[0], es_eval.shape[0], es_test.shape[0]))
# English
en_test = (pd
.read_csv("4b4v_prosodic_meter.csv")
.assign(
text=lambda x: x["text"].apply(clean_text),
length=lambda x: x["meter"].str.len()
)
.drop_duplicates("text")
.rename(columns={"line_text": "text", "metrical_pattern": "meter", "prosodic_meter": "sota"})
)
en_test = en_test.query("length in (5,6,7,8,9,10,11)")
# if not os.path.isfile("ecpa.json"):
# get_ipython().system("averell export ecpa --filename ecpa.json")
en = (pd
.read_json(open("ecpa.json"))
.query("manually_checked == True")[["line_text", "metrical_pattern"]]
.assign(
line_text=lambda x: x["line_text"].apply(clean_text),
metrical_pattern=lambda x: x["metrical_pattern"].str.replace("|", "").str.replace("(", "").str.replace(")", "")
)
.assign(
length=lambda x: x["metrical_pattern"].str.len(),
)
.drop_duplicates("line_text")
.rename(columns={"line_text": "text", "metrical_pattern": "meter", "prosodic_meter": "sota"})
)
en = en[~en["text"].isin(en_test["text"])].query("length in (5,6,7,8,9,10,11)")
en["labels"] = en.meter.apply(metric2binary)
en_train, en_eval = train_test_split(
en[["text", "labels"]], test_size=0.25, random_state=42)
logging.info("English")
logging.info("- Lines: {} train, {} eval, {} test".format(en_train.shape[0], en_eval.shape[0], en_test.shape[0]))
# sota
en_sota = sum(en_test.meter == en_test.sota) / en_test.meter.size
# German
ge = (pd
.read_csv("po-emo-metricalizer.csv")
.rename(columns={"verse": "text", "annotated_pattern": "meter", "metricalizer_pattern": "sota"})
.assign(
text=lambda x: x["text"].apply(clean_text),
length=lambda x: x["meter"].str.len()
)
.drop_duplicates("text")
.query("length in (5, 6, 7, 8, 9, 10, 11)")
)
ge["labels"] = ge.meter.apply(metric2binary)
ge_train_eval, ge_test = train_test_split(ge, test_size=0.15, random_state=42)
ge_train, ge_eval = train_test_split(
ge_train_eval[["text", "labels"]], test_size=0.176, random_state=42)
logging.info("German")
logging.info("- Lines: {} train, {} eval, {} test".format(ge_train.shape[0], ge_eval.shape[0], ge_test.shape[0]))
# sota
ge_sota = sum(ge_test.meter == ge_test.sota) / ge_test.meter.size
# training
# Multilingual inputs
# - bert bert-base-multilingual-cased
# - distilbert distilbert-base-multilingual-cased
# - xlmroberta, xlm-roberta-base
# - xlmroberta, xlm-roberta-large
# Only English
# - roberta roberta-base
# - roberta roberta-large
# - albert albert-xxlarge-v2
# You can set class weights by using the optional weight argument
models = (
# ("xlnet", "xlnet-base-cased"),
("bert", "bert-base-multilingual-cased"),
("distilbert", "distilbert-base-multilingual-cased"),
("roberta", "roberta-base"),
("roberta", "roberta-large"),
("xlmroberta", "xlm-roberta-base"),
("xlmroberta", "xlm-roberta-large"),
("electra", "google/electra-base-discriminator"),
("albert", "albert-base-v2"),
("albert", "albert-large-v2"),
)
if MODELNAMES:
models = [list(map(str.strip, modelname.split(",")))
for modelname in MODELNAMES.split(";")]
langs = LANGS or ("es", "ge", "en", "multi")
for lang, (model_type, model_name) in product(langs, models):
model_output = 'models/{}-{}-{}-{}'.format(TAG, lang, model_type, model_name.replace("/", "-"))
if OVERWRITE is False and os.path.exists(model_output):
logging.info("Skipping training of {} for {}".format(model_name, lang))
continue
logging.info("Starting training of {} for {}".format(model_name, lang))
run = wandb.init(project=model_output.split("/")[-1], reinit=True)
model = MultiLabelClassificationModel(
model_type, model_name, num_labels=11, args={
'output_dir': model_output,
'best_model_dir': '{}/best'.format(model_output),
'reprocess_input_data': True,
'overwrite_output_dir': True,
'use_cached_eval_features': True,
'num_train_epochs': 100, # For BERT, 2, 3, 4
'save_steps': 10000,
'early_stopping_patience': 5,
'evaluate_during_training': EVAL,
#'early_stopping_metric': "accuracy_score",
'evaluate_during_training_steps': 1000,
'early_stopping_delta': 0.00001,
'manual_seed': 42,
# 'learning_rate': 2e-5, # For BERT, 5e-5, 3e-5, 2e-5
# For BERT 16, 32. It could be 128, but with gradient_acc_steps set to 2 is equivalent
'train_batch_size': 16 if "large" in model_name else 32,
'eval_batch_size': 16 if "large" in model_name else 32,
# Doubles train_batch_size, but gradients and wrights are calculated once every 2 steps
'gradient_accumulation_steps': 2 if "large" in model_name else 1,
'max_seq_length': 32,
'use_early_stopping': True,
'wandb_project': model_output.split("/")[-1],
#'wandb_kwargs': {'reinit': True},
# "adam_epsilon": 3e-5, # 1e-8
"silent": False,
"fp16": False,
"n_gpu": 2,
})
# train the model
if lang == "multi":
train_df = pd.concat([es_train, en_train, ge_train], ignore_index=True)
eval_df = pd.concat([es_eval, en_eval, ge_eval], ignore_index=True)
elif lang == "es":
train_df = es_train
eval_df = es_eval
elif lang == "en":
train_df = en_train
eval_df = en_eval
elif lang == "ge":
train_df = ge_train
eval_df = ge_eval
if EVAL:
model.train_model(train_df, eval_df=eval_df)
# evaluate the model
result, model_outputs, wrong_predictions = model.eval_model(eval_df)
logging.info(str(result))
#logging.info(str(model_outputs))
else:
train_eval_df = pd.concat([train_df, eval_df, ge_train], ignore_index=True)
model.train_model(train_eval_df)
if lang in ("es", "multi"):
es_test["predicted"], *_ = model.predict(es_test.text.values)
es_test["predicted"] = es_test["predicted"].apply(label2metric)
es_test["pred"] = es_test.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1)
es_bert = sum(es_test.meter == es_test.pred) / es_test.meter.size
logging.info("Accuracy [{}:es]: {} ({})".format(lang, es_bert, model_name))
wandb.log({"accuracy_es": es_bert})
if lang in ("en", "multi"):
en_test["predicted"], *_ = model.predict(en_test.text.values)
en_test["predicted"] = en_test["predicted"].apply(label2metric)
en_test["pred"] = en_test.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1)
en_bert = sum(en_test.meter == en_test.pred) / en_test.meter.size
logging.info("Accuracy [{}:en]: {} ({})".format(lang, en_bert, model_name))
wandb.log({"accuracy_en": en_bert})
if lang in ("ge", "multi"):
ge_test["predicted"], *_ = model.predict(ge_test.text.values)
ge_test["predicted"] = ge_test["predicted"].apply(label2metric)
ge_test["pred"] = ge_test.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1)
ge_bert = sum(ge_test.meter == ge_test.pred) / ge_test.meter.size
logging.info("Accuracy [{}:ge]: {} ({})".format(lang, ge_bert, model_name))
wandb.log({"accuracy_ge": ge_bert})
if lang in ("multi", ):
test_df = pd.concat([es_test, en_test, ge_test], ignore_index=True)
test_df["predicted"], *_ = model.predict(test_df.text.values)
test_df["predicted"] = test_df["predicted"].apply(label2metric)
test_df["pred"] = test_df.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1)
multi_bert = sum(test_df.meter == test_df.pred) / test_df.meter.size
logging.info("Accuracy [{}:multi]: {} ({})".format(lang, multi_bert, model_name))
wandb.log({"accuracy_multi": multi_bert})
run.finish()
logging.info("Done training '{}'".format(model_output))
# get_ipython().system("rm -rf `ls -dt models/{}-*/checkpoint*/ | awk 'NR>5'`".format(TAG))
logging.info("Done training")
|
[
"os.path.exists",
"wandb.log",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"itertools.product",
"os.environ.get",
"numpy.argmax",
"numpy.sum",
"pandas.concat",
"os.getpid",
"re.sub",
"logging.info"
] |
[((827, 866), 'os.environ.get', 'os.environ.get', (['"""TAG"""', '"""bertsification"""'], {}), "('TAG', 'bertsification')\n", (841, 866), False, 'import os\n'), ((976, 1004), 'os.environ.get', 'os.environ.get', (['"""MODELNAMES"""'], {}), "('MODELNAMES')\n", (990, 1004), False, 'import os\n'), ((3792, 3865), 'sklearn.model_selection.train_test_split', 'train_test_split', (["es[['text', 'labels']]"], {'test_size': '(0.25)', 'random_state': '(42)'}), "(es[['text', 'labels']], test_size=0.25, random_state=42)\n", (3808, 3865), False, 'from sklearn.model_selection import train_test_split\n'), ((3871, 3894), 'logging.info', 'logging.info', (['"""Spanish"""'], {}), "('Spanish')\n", (3883, 3894), False, 'import logging\n'), ((5159, 5232), 'sklearn.model_selection.train_test_split', 'train_test_split', (["en[['text', 'labels']]"], {'test_size': '(0.25)', 'random_state': '(42)'}), "(en[['text', 'labels']], test_size=0.25, random_state=42)\n", (5175, 5232), False, 'from sklearn.model_selection import train_test_split\n'), ((5238, 5261), 'logging.info', 'logging.info', (['"""English"""'], {}), "('English')\n", (5250, 5261), False, 'import logging\n'), ((5877, 5930), 'sklearn.model_selection.train_test_split', 'train_test_split', (['ge'], {'test_size': '(0.15)', 'random_state': '(42)'}), '(ge, test_size=0.15, random_state=42)\n', (5893, 5930), False, 'from sklearn.model_selection import train_test_split\n'), ((5951, 6040), 'sklearn.model_selection.train_test_split', 'train_test_split', (["ge_train_eval[['text', 'labels']]"], {'test_size': '(0.176)', 'random_state': '(42)'}), "(ge_train_eval[['text', 'labels']], test_size=0.176,\n random_state=42)\n", (5967, 6040), False, 'from sklearn.model_selection import train_test_split\n'), ((6042, 6064), 'logging.info', 'logging.info', (['"""German"""'], {}), "('German')\n", (6054, 6064), False, 'import logging\n'), ((7239, 7261), 'itertools.product', 'product', (['langs', 'models'], {}), '(langs, models)\n', (7246, 7261), False, 'from itertools import product\n'), ((12099, 12128), 'logging.info', 'logging.info', (['"""Done training"""'], {}), "('Done training')\n", (12111, 12128), False, 'import logging\n'), ((2189, 2221), 're.sub', 're.sub', (['"""(?is)\\\\s+"""', '""" """', 'output'], {}), "('(?is)\\\\s+', ' ', output)\n", (2195, 2221), False, 'import re\n'), ((2594, 2626), 'numpy.sum', 'np.sum', (['(pred_flat == labels_flat)'], {}), '(pred_flat == labels_flat)\n', (2600, 2626), True, 'import numpy as np\n'), ((7393, 7421), 'os.path.exists', 'os.path.exists', (['model_output'], {}), '(model_output)\n', (7407, 7421), False, 'import os\n'), ((9215, 9275), 'pandas.concat', 'pd.concat', (['[es_train, en_train, ge_train]'], {'ignore_index': '(True)'}), '([es_train, en_train, ge_train], ignore_index=True)\n', (9224, 9275), True, 'import pandas as pd\n'), ((9294, 9351), 'pandas.concat', 'pd.concat', (['[es_eval, en_eval, ge_eval]'], {'ignore_index': '(True)'}), '([es_eval, en_eval, ge_eval], ignore_index=True)\n', (9303, 9351), True, 'import pandas as pd\n'), ((9865, 9924), 'pandas.concat', 'pd.concat', (['[train_df, eval_df, ge_train]'], {'ignore_index': '(True)'}), '([train_df, eval_df, ge_train], ignore_index=True)\n', (9874, 9924), True, 'import pandas as pd\n'), ((10399, 10434), 'wandb.log', 'wandb.log', (["{'accuracy_es': es_bert}"], {}), "({'accuracy_es': es_bert})\n", (10408, 10434), False, 'import wandb\n'), ((10867, 10902), 'wandb.log', 'wandb.log', (["{'accuracy_en': en_bert}"], {}), "({'accuracy_en': en_bert})\n", (10876, 10902), False, 'import wandb\n'), ((11335, 11370), 'wandb.log', 'wandb.log', (["{'accuracy_ge': ge_bert}"], {}), "({'accuracy_ge': ge_bert})\n", (11344, 11370), False, 'import wandb\n'), ((11417, 11474), 'pandas.concat', 'pd.concat', (['[es_test, en_test, ge_test]'], {'ignore_index': '(True)'}), '([es_test, en_test, ge_test], ignore_index=True)\n', (11426, 11474), True, 'import pandas as pd\n'), ((11884, 11925), 'wandb.log', 'wandb.log', (["{'accuracy_multi': multi_bert}"], {}), "({'accuracy_multi': multi_bert})\n", (11893, 11925), False, 'import wandb\n'), ((1012, 1042), 'os.environ.get', 'os.environ.get', (['"""EVAL"""', '"""True"""'], {}), "('EVAL', 'True')\n", (1026, 1042), False, 'import os\n'), ((1080, 1116), 'os.environ.get', 'os.environ.get', (['"""OVERWRITE"""', '"""False"""'], {}), "('OVERWRITE', 'False')\n", (1094, 1116), False, 'import os\n'), ((1299, 1310), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1308, 1310), False, 'import os\n'), ((1412, 1423), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1421, 1423), False, 'import os\n'), ((2513, 2537), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (2522, 2537), True, 'import numpy as np\n'), ((901, 942), 'os.environ.get', 'os.environ.get', (['"""LANGS"""', '"""es,ge,en,multi"""'], {}), "('LANGS', 'es,ge,en,multi')\n", (915, 942), False, 'import os\n'), ((4031, 4069), 'pandas.read_csv', 'pd.read_csv', (['"""4b4v_prosodic_meter.csv"""'], {}), "('4b4v_prosodic_meter.csv')\n", (4042, 4069), True, 'import pandas as pd\n'), ((5465, 5503), 'pandas.read_csv', 'pd.read_csv', (['"""po-emo-metricalizer.csv"""'], {}), "('po-emo-metricalizer.csv')\n", (5476, 5503), True, 'import pandas as pd\n')]
|
if not __name__ == "__main__":
print("Started <Pycraft_StartupAnimation>")
class GenerateStartupScreen:
def __init__(self):
pass
def Start(self):
try:
self.Display.fill(self.BackgroundCol)
self.mod_Pygame__.display.flip()
self.mod_Pygame__.display.set_caption(f"Pycraft: v{self.version}: Welcome")
PresentsFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 35)
PycraftFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 60)
NameFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 45)
NameText = NameFont.render("<NAME>", True, self.FontCol)
NameTextWidth = NameText.get_width()
NameTextHeight = NameText.get_height()
PresentsText = PresentsFont.render("presents", True, self.FontCol)
PycraftText = PycraftFont.render("Pycraft", True, self.FontCol)
PycraftTextWidth = PycraftText.get_width()
PycraftTextHeight = PycraftText.get_height()
iteration = 0
clock = self.mod_Pygame__.time.Clock()
if self.RunFullStartup == True:
while iteration <= (60*3):
self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size()
self.Display.fill(self.BackgroundCol)
self.Display.blit(NameText, ((self.realWidth-NameTextWidth)/2, (self.realHeight-NameTextHeight)/2))
iteration += 1
if self.realWidth < 1280:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight)
if self.realHeight < 720:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720)
self.mod_Pygame__.display.flip()
clock.tick(60)
for event in self.mod_Pygame__.event.get():
if event.type == self.mod_Pygame__.QUIT:
self.Stop_Thread_Event.set()
self.Thread_StartLongThread.join()
self.Thread_AdaptiveMode.join()
self.Thread_StartLongThread.join()
self.mod_Pygame__.quit()
self.mod_Sys__.exit("Thanks for playing")
quit()
iteration = 0
while iteration <= (60*2):
self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size()
self.Display.fill(self.BackgroundCol)
self.Display.blit(NameText, ((self.realWidth-NameTextWidth)/2, (self.realHeight-NameTextHeight)/2))
self.Display.blit(PresentsText, ((((self.realWidth-NameTextWidth)/2)+120), ((self.realHeight-NameTextHeight)/2)+30))
iteration += 1
if self.realWidth < 1280:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight)
if self.realHeight < 720:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720)
self.mod_Pygame__.display.flip()
clock.tick(60)
for event in self.mod_Pygame__.event.get():
if event.type == self.mod_Pygame__.QUIT:
self.Stop_Thread_Event.set()
self.Thread_StartLongThread.join()
self.Thread_AdaptiveMode.join()
self.Thread_StartLongThread.join()
self.mod_Pygame__.quit()
self.mod_Sys__.exit("Thanks for playing")
quit()
iteration = 0
while iteration <= (60*3):
self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size()
self.Display.fill(self.BackgroundCol)
self.Display.blit(PycraftText, ((self.realWidth-PycraftTextWidth)/2, (self.realHeight-PycraftTextHeight)/2))
iteration += 1
if self.realWidth < 1280:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight)
if self.realHeight < 720:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720)
self.mod_Pygame__.display.flip()
clock.tick(60)
for event in self.mod_Pygame__.event.get():
if event.type == self.mod_Pygame__.QUIT:
self.Stop_Thread_Event.set()
self.Thread_StartLongThread.join()
self.Thread_AdaptiveMode.join()
self.Thread_StartLongThread.join()
self.mod_Pygame__.quit()
self.mod_Sys__.exit("Thanks for playing")
quit()
y = 0
while True:
self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size()
self.Display.fill(self.BackgroundCol)
self.Display.blit(PycraftText, ((self.realWidth-PycraftTextWidth)/2, ((self.realHeight-PycraftTextHeight)/2)-y))
y += 2
if self.realWidth < 1280:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight)
if self.realHeight < 720:
self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720)
self.mod_Pygame__.display.flip()
clock.tick(60)
for event in self.mod_Pygame__.event.get():
if event.type == self.mod_Pygame__.QUIT:
self.Stop_Thread_Event.set()
self.Thread_StartLongThread.join()
self.Thread_AdaptiveMode.join()
self.Thread_StartLongThread.join()
self.mod_Pygame__.quit()
self.mod_Sys__.exit("Thanks for playing")
quit()
if ((self.realHeight-PycraftTextHeight)/2)-y <= 0:
self.RunFullStartup = False
return None
except Exception as Message:
self.RunFullStartup = False
return Message
else:
print("You need to run this as part of Pycraft")
import tkinter as tk
from tkinter import messagebox
root = tk.Tk()
root.withdraw()
messagebox.showerror("Startup Fail", "You need to run this as part of Pycraft, please run the 'main.py' file")
quit()
|
[
"tkinter.messagebox.showerror",
"tkinter.Tk"
] |
[((7525, 7532), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (7530, 7532), True, 'import tkinter as tk\n'), ((7559, 7673), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Startup Fail"""', '"""You need to run this as part of Pycraft, please run the \'main.py\' file"""'], {}), '(\'Startup Fail\',\n "You need to run this as part of Pycraft, please run the \'main.py\' file")\n', (7579, 7673), False, 'from tkinter import messagebox\n')]
|
#!/usr/bin/env python3
"""Script to do basic health checks of the system and turn on an LED on
BCM pin 12 (pin 32 on header) if they pass, turn Off otherwise.
"""
import time
import RPi.GPIO as GPIO
import subprocess
# The BCM pin number that the LED is wired to. When the pin
# is at 3.3V the LED is On.
LED_PIN = 12
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(LED_PIN, GPIO.OUT)
# ----- Test for Internet availability.
# Try to ping for a minute before declaring that the Internet
# is not available
internet_available = False
for i in range(12):
if subprocess.call('/bin/ping -q -c1 8.8.8.8', shell=True) == 0:
internet_available = True
break
time.sleep(5)
# Set LED according to results of test
GPIO.output(LED_PIN, internet_available)
|
[
"RPi.GPIO.setup",
"RPi.GPIO.output",
"RPi.GPIO.setwarnings",
"time.sleep",
"subprocess.call",
"RPi.GPIO.setmode"
] |
[((322, 344), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (334, 344), True, 'import RPi.GPIO as GPIO\n'), ((345, 368), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (361, 368), True, 'import RPi.GPIO as GPIO\n'), ((369, 398), 'RPi.GPIO.setup', 'GPIO.setup', (['LED_PIN', 'GPIO.OUT'], {}), '(LED_PIN, GPIO.OUT)\n', (379, 398), True, 'import RPi.GPIO as GPIO\n'), ((744, 784), 'RPi.GPIO.output', 'GPIO.output', (['LED_PIN', 'internet_available'], {}), '(LED_PIN, internet_available)\n', (755, 784), True, 'import RPi.GPIO as GPIO\n'), ((690, 703), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (700, 703), False, 'import time\n'), ((576, 631), 'subprocess.call', 'subprocess.call', (['"""/bin/ping -q -c1 8.8.8.8"""'], {'shell': '(True)'}), "('/bin/ping -q -c1 8.8.8.8', shell=True)\n", (591, 631), False, 'import subprocess\n')]
|
import json
import numpy as np
from tqdm import tqdm
# Change these based on experiment
#exp_dataset = 'mask_char_oov_test_set.db'
#exp_name = 'results_test_mask_char'
#exp_dataset = 'mask_2_oov_test_set.db'
#exp_name = 'results_test_mask_2'
#exp_dataset = 'mask_2_oov_test_set.db'
#exp_name = 'results_test_synonyms_mask_2_ensemble_all_5'
#exp_dataset = 'synonyms_mask_char_l03_oov_test_set.db'
#exp_name = 'results_test_synonyms_mask_char_l03'
#exp_dataset = 'synonyms_mask_char_03m_oov_test_set.db'
#exp_name = 'results_test_synonyms_mask_char_03m'
#exp_dataset = 'synonyms_mask_2_03l_oov_test_set.db'
#exp_name = 'results_test_synonyms_mask_2_03l'
exp_dataset = 'mask_2_oov_test_set.db'
exp_name = 'results_test_synonyms_mask_2_fixed'
q_list_file = '/scratch/cluster/billyang/vqa_dataset/txt_db/oov_datasets/{}/questions_changed.json'.format(exp_dataset)
exp_ans_file = '/scratch/cluster/billyang/uniter_image/vqa_joint_trained/{}/results_3000_all.json'.format(exp_name)
#exp_ans_file = '/scratch/cluster/billyang/uniter_image/vqa_joint_fixed_trained/{}/results_3000_all.json'.format(exp_name)
q_list = json.load(open(q_list_file))
exp_ans_list = json.load(open(exp_ans_file))
baseline_ans_list = json.load(open('/scratch/cluster/billyang/uniter_image/vqa_joint_trained/results_test_normal_test/results_3000_all.json'))
#baseline_ans_list = json.load(open('/scratch/cluster/billyang/uniter_image/vqa_joint_fixed_trained/results_test_normal_test_fixed/results_3000_all.json'))
exp_ans = {o['question_id']: o['answer'] for o in exp_ans_list}
baseline_ans = {o['question_id']: o['answer'] for o in baseline_ans_list}
gt_ans = json.load(open('oov_test_full_answers.json'))
results = {}
results['num_questions'] = len(q_list)
exp_tot_score = 0
bl_tot_score = 0
rtw = []
wtr = []
def getscore(answer, answers, scores):
if answer in answers:
return scores[answers.index(answer)]
return 0
for qid in tqdm(q_list):
exp_score = getscore(exp_ans[qid], gt_ans[qid]['strings'], gt_ans[qid]['scores'])
exp_tot_score += exp_score
bl_score = getscore(baseline_ans[qid], gt_ans[qid]['strings'], gt_ans[qid]['scores'])
bl_tot_score += bl_score
if exp_score > 0 and bl_score == 0:
wtr.append(qid)
if bl_score > 0 and exp_score == 0:
rtw.append(qid)
results['exp_score'] = exp_tot_score / len(q_list)
results['bl_score'] = bl_tot_score / len(q_list)
results['rtw'] = rtw
results['wtr'] = wtr
results['rtw_count'] = len(rtw)
results['wtr_count'] = len(wtr)
print("dumping")
json.dump(results, open('{}.json'.format(exp_name), 'w'))
# get new scores
# find answers wrong to right
# find answers right to wrong
|
[
"tqdm.tqdm"
] |
[((1923, 1935), 'tqdm.tqdm', 'tqdm', (['q_list'], {}), '(q_list)\n', (1927, 1935), False, 'from tqdm import tqdm\n')]
|
import bts.model as model
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
BATCH_SIZE = 6
FILTER_LIST = [16,32,64,128,256]
unet_model = model.DynamicUNet(FILTER_LIST)
unet_model.summary(batch_size=BATCH_SIZE, device=device)
|
[
"bts.model.DynamicUNet",
"torch.cuda.is_available"
] |
[((171, 201), 'bts.model.DynamicUNet', 'model.DynamicUNet', (['FILTER_LIST'], {}), '(FILTER_LIST)\n', (188, 201), True, 'import bts.model as model\n'), ((72, 97), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (95, 97), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
import sphinx_rtd_theme
# -- General configuration -----------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bravado'
copyright = u'2013, Digium, Inc.; 2014-2015, Yelp, Inc'
exclude_patterns = []
pygments_style = 'sphinx'
autoclass_content = 'both'
# -- Options for HTML output ---------------------------------------------
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_static_path = ['_static']
htmlhelp_basename = 'bravado-pydoc'
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'bravado-core': ('https://bravado-core.readthedocs.io/en/latest/', None),
}
|
[
"sphinx_rtd_theme.get_html_theme_path"
] |
[((767, 805), 'sphinx_rtd_theme.get_html_theme_path', 'sphinx_rtd_theme.get_html_theme_path', ([], {}), '()\n', (803, 805), False, 'import sphinx_rtd_theme\n')]
|
import importlib
import os
import pytest
from helpers import running_on_ci
import janitor.biology # noqa: F403, F401
# Skip all tests if Biopython not installed
pytestmark = pytest.mark.skipif(
(importlib.util.find_spec("Bio") is None) & ~running_on_ci(),
reason="Biology tests relying on Biopython only required for CI",
)
@pytest.mark.biology
def test_join_fasta(biodf):
"""Test adding sequence from FASTA file in ``sequence`` column."""
df = biodf.join_fasta(
filename=os.path.join(pytest.TEST_DATA_DIR, "sequences.fasta"),
id_col="sequence_accession",
column_name="sequence",
)
assert "sequence" in df.columns
|
[
"importlib.util.find_spec",
"helpers.running_on_ci",
"os.path.join"
] |
[((203, 234), 'importlib.util.find_spec', 'importlib.util.find_spec', (['"""Bio"""'], {}), "('Bio')\n", (227, 234), False, 'import importlib\n'), ((247, 262), 'helpers.running_on_ci', 'running_on_ci', ([], {}), '()\n', (260, 262), False, 'from helpers import running_on_ci\n'), ((502, 555), 'os.path.join', 'os.path.join', (['pytest.TEST_DATA_DIR', '"""sequences.fasta"""'], {}), "(pytest.TEST_DATA_DIR, 'sequences.fasta')\n", (514, 555), False, 'import os\n')]
|
"""TilePyramid creation."""
import pytest
from shapely.geometry import Point
from shapely.ops import unary_union
from types import GeneratorType
from tilematrix import TilePyramid, snap_bounds
def test_init():
"""Initialize TilePyramids."""
for tptype in ["geodetic", "mercator"]:
assert TilePyramid(tptype)
with pytest.raises(ValueError):
TilePyramid("invalid")
with pytest.raises(ValueError):
TilePyramid()
assert hash(TilePyramid(tptype))
def test_metatiling():
"""Metatiling setting."""
for metatiling in [1, 2, 4, 8, 16]:
assert TilePyramid("geodetic", metatiling=metatiling)
try:
TilePyramid("geodetic", metatiling=5)
raise Exception()
except ValueError:
pass
def test_tile_size():
"""Tile sizes."""
for tile_size in [128, 256, 512, 1024]:
tp = TilePyramid("geodetic", tile_size=tile_size)
assert tp.tile_size == tile_size
def test_intersect():
"""Get intersecting Tiles."""
# same metatiling
tp = TilePyramid("geodetic")
intersect_tile = TilePyramid("geodetic").tile(5, 1, 1)
control = {(5, 1, 1)}
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
assert control == test_tiles
# smaller metatiling
tp = TilePyramid("geodetic")
intersect_tile = TilePyramid("geodetic", metatiling=2).tile(5, 1, 1)
control = {(5, 2, 2), (5, 2, 3), (5, 3, 3), (5, 3, 2)}
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
assert control == test_tiles
# bigger metatiling
tp = TilePyramid("geodetic", metatiling=2)
intersect_tile = TilePyramid("geodetic").tile(5, 1, 1)
control = {(5, 0, 0)}
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
assert control == test_tiles
intersect_tile = TilePyramid("geodetic").tile(4, 12, 31)
control = {(4, 6, 15)}
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
assert control == test_tiles
# different CRSes
tp = TilePyramid("geodetic")
intersect_tile = TilePyramid("mercator").tile(5, 1, 1)
try:
test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)}
raise Exception()
except ValueError:
pass
def test_tilepyramid_compare(grid_definition_proj, grid_definition_epsg):
"""Comparison operators."""
gproj, gepsg = grid_definition_proj, grid_definition_epsg
# predefined
assert TilePyramid("geodetic") == TilePyramid("geodetic")
assert TilePyramid("geodetic") != TilePyramid("geodetic", metatiling=2)
assert TilePyramid("geodetic") != TilePyramid("geodetic", tile_size=512)
assert TilePyramid("mercator") == TilePyramid("mercator")
assert TilePyramid("mercator") != TilePyramid("mercator", metatiling=2)
assert TilePyramid("mercator") != TilePyramid("mercator", tile_size=512)
# epsg based
assert TilePyramid(gepsg) == TilePyramid(gepsg)
assert TilePyramid(gepsg) != TilePyramid(gepsg, metatiling=2)
assert TilePyramid(gepsg) != TilePyramid(gepsg, tile_size=512)
# proj based
assert TilePyramid(gproj) == TilePyramid(gproj)
assert TilePyramid(gproj) != TilePyramid(gproj, metatiling=2)
assert TilePyramid(gproj) != TilePyramid(gproj, tile_size=512)
# altered bounds
abounds = dict(**gproj)
abounds.update(bounds=(-5000000.0, -5000000.0, 5000000.0, 5000000.0))
assert TilePyramid(abounds) == TilePyramid(abounds)
assert TilePyramid(gproj) != TilePyramid(abounds)
# other type
assert TilePyramid("geodetic") != "string"
def test_grid_compare(grid_definition_proj, grid_definition_epsg):
"""Comparison operators."""
gproj, gepsg = grid_definition_proj, grid_definition_epsg
# predefined
assert TilePyramid("geodetic").grid == TilePyramid("geodetic").grid
assert TilePyramid("geodetic").grid == TilePyramid("geodetic", metatiling=2).grid
assert TilePyramid("geodetic").grid == TilePyramid("geodetic", tile_size=512).grid
assert TilePyramid("mercator").grid == TilePyramid("mercator").grid
assert TilePyramid("mercator").grid == TilePyramid("mercator", metatiling=2).grid
assert TilePyramid("mercator").grid == TilePyramid("mercator", tile_size=512).grid
# epsg based
assert TilePyramid(gepsg).grid == TilePyramid(gepsg).grid
assert TilePyramid(gepsg).grid == TilePyramid(gepsg, metatiling=2).grid
assert TilePyramid(gepsg).grid == TilePyramid(gepsg, tile_size=512).grid
# proj based
assert TilePyramid(gproj).grid == TilePyramid(gproj).grid
assert TilePyramid(gproj).grid == TilePyramid(gproj, metatiling=2).grid
assert TilePyramid(gproj).grid == TilePyramid(gproj, tile_size=512).grid
# altered bounds
abounds = dict(**gproj)
abounds.update(bounds=(-5000000.0, -5000000.0, 5000000.0, 5000000.0))
assert TilePyramid(abounds).grid == TilePyramid(abounds).grid
assert TilePyramid(gproj).grid != TilePyramid(abounds).grid
def test_tile_from_xy():
tp = TilePyramid("geodetic")
zoom = 5
# point inside tile
p_in = (0.5, 0.5, zoom)
control_in = [
((5, 15, 32), "rb"),
((5, 15, 32), "lb"),
((5, 15, 32), "rt"),
((5, 15, 32), "lt"),
]
for tile_id, on_edge_use in control_in:
tile = tp.tile_from_xy(*p_in, on_edge_use=on_edge_use)
assert tile.id == tile_id
assert Point(p_in[0], p_in[1]).within(tile.bbox())
# point is on tile edge
p_edge = (0, 0, zoom)
control_edge = [
((5, 16, 32), "rb"),
((5, 16, 31), "lb"),
((5, 15, 32), "rt"),
((5, 15, 31), "lt"),
]
for tile_id, on_edge_use in control_edge:
tile = tp.tile_from_xy(*p_edge, on_edge_use=on_edge_use)
assert tile.id == tile_id
assert Point(p_edge[0], p_edge[1]).touches(tile.bbox())
with pytest.raises(ValueError):
tp.tile_from_xy(180, -90, zoom, on_edge_use="rb")
with pytest.raises(ValueError):
tp.tile_from_xy(180, -90, zoom, on_edge_use="lb")
tile = tp.tile_from_xy(180, -90, zoom, on_edge_use="rt")
assert tile.id == (5, 31, 0)
tile = tp.tile_from_xy(180, -90, zoom, on_edge_use="lt")
assert tile.id == (5, 31, 63)
with pytest.raises(TypeError):
tp.tile_from_xy(-180, 90, zoom, on_edge_use="lt")
with pytest.raises(TypeError):
tp.tile_from_xy(-180, 90, zoom, on_edge_use="rt")
tile = tp.tile_from_xy(-180, 90, zoom, on_edge_use="rb")
assert tile.id == (5, 0, 0)
tile = tp.tile_from_xy(-180, 90, zoom, on_edge_use="lb")
assert tile.id == (5, 0, 63)
with pytest.raises(ValueError):
tp.tile_from_xy(-180, 90, zoom, on_edge_use="invalid")
def test_tiles_from_bounds(grid_definition_proj):
# global pyramids
tp = TilePyramid("geodetic")
parent = tp.tile(8, 5, 5)
from_bounds = set([t.id for t in tp.tiles_from_bounds(parent.bounds(), 9)])
children = set([t.id for t in parent.get_children()])
assert from_bounds == children
# non-global pyramids
tp = TilePyramid(grid_definition_proj)
parent = tp.tile(8, 0, 0)
from_bounds = set([t.id for t in tp.tiles_from_bounds(parent.bounds(), 9)])
children = set([t.id for t in parent.get_children()])
assert from_bounds == children
def test_tiles_from_bounds_batch_by_row():
tp = TilePyramid("geodetic")
bounds = (0, 0, 90, 90)
zoom = 8
tiles = tp.tiles_from_bounds(bounds, zoom, batch_by="row")
assert isinstance(tiles, GeneratorType)
assert list(tiles)
previous_row = None
tiles = 0
for tile_row in tp.tiles_from_bounds(bounds, zoom, batch_by="row"):
assert isinstance(tile_row, GeneratorType)
previous_tile = None
for tile in tile_row:
tiles += 1
if previous_row is None:
if previous_tile is not None:
assert tile.col == previous_tile.col + 1
else:
if previous_tile is not None:
assert tile.col == previous_tile.col + 1
assert tile.row == previous_tile.row
assert tile.row == previous_row + 1
previous_tile = tile
previous_row = tile.row
assert tiles == len(list(tp.tiles_from_bounds(bounds, zoom)))
def test_tiles_from_bounds_batch_by_column():
tp = TilePyramid("geodetic")
bounds = (0, 0, 90, 90)
zoom = 8
tiles = tp.tiles_from_bounds(bounds, zoom, batch_by="column")
assert isinstance(tiles, GeneratorType)
assert list(tiles)
previous_column = None
tiles = 0
for tile_column in tp.tiles_from_bounds(bounds, zoom, batch_by="column"):
assert isinstance(tile_column, GeneratorType)
previous_tile = None
for tile in tile_column:
tiles += 1
if previous_column is None:
if previous_tile is not None:
assert tile.row == previous_tile.row + 1
else:
if previous_tile is not None:
assert tile.row == previous_tile.row + 1
assert tile.col == previous_tile.col
assert tile.col == previous_column + 1
previous_tile = tile
previous_column = tile.col
assert tiles == len(list(tp.tiles_from_bounds(bounds, zoom)))
def test_tiles_from_bounds_batch_by_row_antimeridian_bounds():
tp = TilePyramid("geodetic")
bounds = (0, 0, 185, 95)
zoom = 8
tiles = tp.tiles_from_bounds(bounds, zoom, batch_by="row")
assert isinstance(tiles, GeneratorType)
assert list(tiles)
previous_row = None
tiles = 0
for tile_row in tp.tiles_from_bounds(bounds, zoom, batch_by="row"):
assert isinstance(tile_row, GeneratorType)
previous_tile = None
for tile in tile_row:
tiles += 1
if previous_row is None:
if previous_tile is not None:
assert tile.col > previous_tile.col
else:
if previous_tile is not None:
assert tile.col > previous_tile.col
assert tile.row == previous_tile.row
assert tile.row > previous_row
previous_tile = tile
previous_row = tile.row
assert tiles == len(list(tp.tiles_from_bounds(bounds, zoom)))
def test_tiles_from_bounds_batch_by_row_both_antimeridian_bounds():
tp = TilePyramid("geodetic")
bounds = (-185, 0, 185, 95)
zoom = 8
tiles = tp.tiles_from_bounds(bounds, zoom, batch_by="row")
assert isinstance(tiles, GeneratorType)
assert list(tiles)
previous_row = None
tiles = 0
for tile_row in tp.tiles_from_bounds(bounds, zoom, batch_by="row"):
assert isinstance(tile_row, GeneratorType)
previous_tile = None
for tile in tile_row:
tiles += 1
if previous_row is None:
if previous_tile is not None:
assert tile.col == previous_tile.col + 1
else:
if previous_tile is not None:
assert tile.col == previous_tile.col + 1
assert tile.row == previous_tile.row
assert tile.row == previous_row + 1
previous_tile = tile
previous_row = tile.row
assert tiles == len(list(tp.tiles_from_bounds(bounds, zoom)))
def test_snap_bounds():
bounds = (0, 1, 2, 3)
tp = TilePyramid("geodetic")
zoom = 8
snapped = snap_bounds(bounds=bounds, tile_pyramid=tp, zoom=zoom)
control = unary_union(
[tile.bbox() for tile in tp.tiles_from_bounds(bounds, zoom)]
).bounds
assert snapped == control
pixelbuffer = 10
snapped = snap_bounds(
bounds=bounds, tile_pyramid=tp, zoom=zoom, pixelbuffer=pixelbuffer
)
control = unary_union(
[tile.bbox(pixelbuffer) for tile in tp.tiles_from_bounds(bounds, zoom)]
).bounds
assert snapped == control
def test_deprecated():
tp = TilePyramid("geodetic")
assert tp.type
assert tp.srid
assert tp.tile_x_size(0)
assert tp.tile_y_size(0)
assert tp.tile_height(0)
assert tp.tile_width(0)
|
[
"shapely.geometry.Point",
"tilematrix.TilePyramid",
"tilematrix.snap_bounds",
"pytest.raises"
] |
[((1042, 1065), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (1053, 1065), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((1290, 1313), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (1301, 1313), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((1584, 1621), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {'metatiling': '(2)'}), "('geodetic', metatiling=2)\n", (1595, 1621), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((2035, 2058), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (2046, 2058), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((5005, 5028), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (5016, 5028), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((6780, 6803), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (6791, 6803), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((7042, 7075), 'tilematrix.TilePyramid', 'TilePyramid', (['grid_definition_proj'], {}), '(grid_definition_proj)\n', (7053, 7075), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((7333, 7356), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (7344, 7356), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((8346, 8369), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (8357, 8369), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((9403, 9426), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (9414, 9426), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((10424, 10447), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (10435, 10447), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((11445, 11468), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (11456, 11468), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((11497, 11551), 'tilematrix.snap_bounds', 'snap_bounds', ([], {'bounds': 'bounds', 'tile_pyramid': 'tp', 'zoom': 'zoom'}), '(bounds=bounds, tile_pyramid=tp, zoom=zoom)\n', (11508, 11551), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((11727, 11806), 'tilematrix.snap_bounds', 'snap_bounds', ([], {'bounds': 'bounds', 'tile_pyramid': 'tp', 'zoom': 'zoom', 'pixelbuffer': 'pixelbuffer'}), '(bounds=bounds, tile_pyramid=tp, zoom=zoom, pixelbuffer=pixelbuffer)\n', (11738, 11806), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((12005, 12028), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (12016, 12028), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((308, 327), 'tilematrix.TilePyramid', 'TilePyramid', (['tptype'], {}), '(tptype)\n', (319, 327), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((337, 362), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (350, 362), False, 'import pytest\n'), ((372, 394), 'tilematrix.TilePyramid', 'TilePyramid', (['"""invalid"""'], {}), "('invalid')\n", (383, 394), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((404, 429), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (417, 429), False, 'import pytest\n'), ((439, 452), 'tilematrix.TilePyramid', 'TilePyramid', ([], {}), '()\n', (450, 452), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((469, 488), 'tilematrix.TilePyramid', 'TilePyramid', (['tptype'], {}), '(tptype)\n', (480, 488), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((600, 646), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {'metatiling': 'metatiling'}), "('geodetic', metatiling=metatiling)\n", (611, 646), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((664, 701), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {'metatiling': '(5)'}), "('geodetic', metatiling=5)\n", (675, 701), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((867, 911), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {'tile_size': 'tile_size'}), "('geodetic', tile_size=tile_size)\n", (878, 911), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((2462, 2485), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (2473, 2485), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((2489, 2512), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (2500, 2512), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((2524, 2547), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (2535, 2547), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((2551, 2588), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {'metatiling': '(2)'}), "('geodetic', metatiling=2)\n", (2562, 2588), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((2600, 2623), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (2611, 2623), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((2627, 2665), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {'tile_size': '(512)'}), "('geodetic', tile_size=512)\n", (2638, 2665), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((2677, 2700), 'tilematrix.TilePyramid', 'TilePyramid', (['"""mercator"""'], {}), "('mercator')\n", (2688, 2700), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((2704, 2727), 'tilematrix.TilePyramid', 'TilePyramid', (['"""mercator"""'], {}), "('mercator')\n", (2715, 2727), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((2739, 2762), 'tilematrix.TilePyramid', 'TilePyramid', (['"""mercator"""'], {}), "('mercator')\n", (2750, 2762), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((2766, 2803), 'tilematrix.TilePyramid', 'TilePyramid', (['"""mercator"""'], {'metatiling': '(2)'}), "('mercator', metatiling=2)\n", (2777, 2803), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((2815, 2838), 'tilematrix.TilePyramid', 'TilePyramid', (['"""mercator"""'], {}), "('mercator')\n", (2826, 2838), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((2842, 2880), 'tilematrix.TilePyramid', 'TilePyramid', (['"""mercator"""'], {'tile_size': '(512)'}), "('mercator', tile_size=512)\n", (2853, 2880), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((2909, 2927), 'tilematrix.TilePyramid', 'TilePyramid', (['gepsg'], {}), '(gepsg)\n', (2920, 2927), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((2931, 2949), 'tilematrix.TilePyramid', 'TilePyramid', (['gepsg'], {}), '(gepsg)\n', (2942, 2949), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((2961, 2979), 'tilematrix.TilePyramid', 'TilePyramid', (['gepsg'], {}), '(gepsg)\n', (2972, 2979), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((2983, 3015), 'tilematrix.TilePyramid', 'TilePyramid', (['gepsg'], {'metatiling': '(2)'}), '(gepsg, metatiling=2)\n', (2994, 3015), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((3027, 3045), 'tilematrix.TilePyramid', 'TilePyramid', (['gepsg'], {}), '(gepsg)\n', (3038, 3045), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((3049, 3082), 'tilematrix.TilePyramid', 'TilePyramid', (['gepsg'], {'tile_size': '(512)'}), '(gepsg, tile_size=512)\n', (3060, 3082), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((3111, 3129), 'tilematrix.TilePyramid', 'TilePyramid', (['gproj'], {}), '(gproj)\n', (3122, 3129), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((3133, 3151), 'tilematrix.TilePyramid', 'TilePyramid', (['gproj'], {}), '(gproj)\n', (3144, 3151), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((3163, 3181), 'tilematrix.TilePyramid', 'TilePyramid', (['gproj'], {}), '(gproj)\n', (3174, 3181), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((3185, 3217), 'tilematrix.TilePyramid', 'TilePyramid', (['gproj'], {'metatiling': '(2)'}), '(gproj, metatiling=2)\n', (3196, 3217), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((3229, 3247), 'tilematrix.TilePyramid', 'TilePyramid', (['gproj'], {}), '(gproj)\n', (3240, 3247), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((3251, 3284), 'tilematrix.TilePyramid', 'TilePyramid', (['gproj'], {'tile_size': '(512)'}), '(gproj, tile_size=512)\n', (3262, 3284), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((3419, 3439), 'tilematrix.TilePyramid', 'TilePyramid', (['abounds'], {}), '(abounds)\n', (3430, 3439), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((3443, 3463), 'tilematrix.TilePyramid', 'TilePyramid', (['abounds'], {}), '(abounds)\n', (3454, 3463), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((3475, 3493), 'tilematrix.TilePyramid', 'TilePyramid', (['gproj'], {}), '(gproj)\n', (3486, 3493), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((3497, 3517), 'tilematrix.TilePyramid', 'TilePyramid', (['abounds'], {}), '(abounds)\n', (3508, 3517), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((3546, 3569), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (3557, 3569), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((5853, 5878), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5866, 5878), False, 'import pytest\n'), ((5947, 5972), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5960, 5972), False, 'import pytest\n'), ((6232, 6256), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (6245, 6256), False, 'import pytest\n'), ((6325, 6349), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (6338, 6349), False, 'import pytest\n'), ((6607, 6632), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6620, 6632), False, 'import pytest\n'), ((1087, 1110), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (1098, 1110), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((1335, 1372), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {'metatiling': '(2)'}), "('geodetic', metatiling=2)\n", (1346, 1372), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((1643, 1666), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (1654, 1666), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((1832, 1855), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (1843, 1855), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((2080, 2103), 'tilematrix.TilePyramid', 'TilePyramid', (['"""mercator"""'], {}), "('mercator')\n", (2091, 2103), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((3773, 3796), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (3784, 3796), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((3805, 3828), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (3816, 3828), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((3845, 3868), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (3856, 3868), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((3877, 3914), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {'metatiling': '(2)'}), "('geodetic', metatiling=2)\n", (3888, 3914), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((3931, 3954), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {}), "('geodetic')\n", (3942, 3954), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((3963, 4001), 'tilematrix.TilePyramid', 'TilePyramid', (['"""geodetic"""'], {'tile_size': '(512)'}), "('geodetic', tile_size=512)\n", (3974, 4001), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4018, 4041), 'tilematrix.TilePyramid', 'TilePyramid', (['"""mercator"""'], {}), "('mercator')\n", (4029, 4041), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4050, 4073), 'tilematrix.TilePyramid', 'TilePyramid', (['"""mercator"""'], {}), "('mercator')\n", (4061, 4073), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4090, 4113), 'tilematrix.TilePyramid', 'TilePyramid', (['"""mercator"""'], {}), "('mercator')\n", (4101, 4113), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4122, 4159), 'tilematrix.TilePyramid', 'TilePyramid', (['"""mercator"""'], {'metatiling': '(2)'}), "('mercator', metatiling=2)\n", (4133, 4159), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4176, 4199), 'tilematrix.TilePyramid', 'TilePyramid', (['"""mercator"""'], {}), "('mercator')\n", (4187, 4199), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4208, 4246), 'tilematrix.TilePyramid', 'TilePyramid', (['"""mercator"""'], {'tile_size': '(512)'}), "('mercator', tile_size=512)\n", (4219, 4246), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4280, 4298), 'tilematrix.TilePyramid', 'TilePyramid', (['gepsg'], {}), '(gepsg)\n', (4291, 4298), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4307, 4325), 'tilematrix.TilePyramid', 'TilePyramid', (['gepsg'], {}), '(gepsg)\n', (4318, 4325), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4342, 4360), 'tilematrix.TilePyramid', 'TilePyramid', (['gepsg'], {}), '(gepsg)\n', (4353, 4360), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4369, 4401), 'tilematrix.TilePyramid', 'TilePyramid', (['gepsg'], {'metatiling': '(2)'}), '(gepsg, metatiling=2)\n', (4380, 4401), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4418, 4436), 'tilematrix.TilePyramid', 'TilePyramid', (['gepsg'], {}), '(gepsg)\n', (4429, 4436), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4445, 4478), 'tilematrix.TilePyramid', 'TilePyramid', (['gepsg'], {'tile_size': '(512)'}), '(gepsg, tile_size=512)\n', (4456, 4478), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4512, 4530), 'tilematrix.TilePyramid', 'TilePyramid', (['gproj'], {}), '(gproj)\n', (4523, 4530), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4539, 4557), 'tilematrix.TilePyramid', 'TilePyramid', (['gproj'], {}), '(gproj)\n', (4550, 4557), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4574, 4592), 'tilematrix.TilePyramid', 'TilePyramid', (['gproj'], {}), '(gproj)\n', (4585, 4592), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4601, 4633), 'tilematrix.TilePyramid', 'TilePyramid', (['gproj'], {'metatiling': '(2)'}), '(gproj, metatiling=2)\n', (4612, 4633), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4650, 4668), 'tilematrix.TilePyramid', 'TilePyramid', (['gproj'], {}), '(gproj)\n', (4661, 4668), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4677, 4710), 'tilematrix.TilePyramid', 'TilePyramid', (['gproj'], {'tile_size': '(512)'}), '(gproj, tile_size=512)\n', (4688, 4710), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4850, 4870), 'tilematrix.TilePyramid', 'TilePyramid', (['abounds'], {}), '(abounds)\n', (4861, 4870), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4879, 4899), 'tilematrix.TilePyramid', 'TilePyramid', (['abounds'], {}), '(abounds)\n', (4890, 4899), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4916, 4934), 'tilematrix.TilePyramid', 'TilePyramid', (['gproj'], {}), '(gproj)\n', (4927, 4934), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((4943, 4963), 'tilematrix.TilePyramid', 'TilePyramid', (['abounds'], {}), '(abounds)\n', (4954, 4963), False, 'from tilematrix import TilePyramid, snap_bounds\n'), ((5392, 5415), 'shapely.geometry.Point', 'Point', (['p_in[0]', 'p_in[1]'], {}), '(p_in[0], p_in[1])\n', (5397, 5415), False, 'from shapely.geometry import Point\n'), ((5794, 5821), 'shapely.geometry.Point', 'Point', (['p_edge[0]', 'p_edge[1]'], {}), '(p_edge[0], p_edge[1])\n', (5799, 5821), False, 'from shapely.geometry import Point\n')]
|
# -*- coding: utf-8 -*-
"""
setup.py script
"""
import io
from collections import OrderedDict
from setuptools import setup, find_packages
with io.open('README.md', 'rt', encoding='utf8') as f:
README = f.read()
setup(
name='reportbuilder',
version='0.0.1',
url='http://github.com/giovannicuriel/report-builder',
project_urls=OrderedDict((
('Code', 'https://github.com/giovannicuriel/report-builder.git'),
('Issue tracker', 'https://github.com/giovannicuriel/report-builder/issues'),
)),
license='BSD-2-Clause',
author='<NAME>',
author_email='<EMAIL>',
description='Sample package for Python training courses',
long_description=README,
packages=["reportbuilder"],
include_package_data=True,
zip_safe=False,
platforms=[any],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
install_requires=[
'flask==1.1.1'
],
entry_points={
'console_scripts': [
'report-builder = reportbuilder.app:main'
]
}
)
|
[
"collections.OrderedDict",
"io.open"
] |
[((145, 188), 'io.open', 'io.open', (['"""README.md"""', '"""rt"""'], {'encoding': '"""utf8"""'}), "('README.md', 'rt', encoding='utf8')\n", (152, 188), False, 'import io\n'), ((348, 518), 'collections.OrderedDict', 'OrderedDict', (["(('Code', 'https://github.com/giovannicuriel/report-builder.git'), (\n 'Issue tracker', 'https://github.com/giovannicuriel/report-builder/issues')\n )"], {}), "((('Code',\n 'https://github.com/giovannicuriel/report-builder.git'), (\n 'Issue tracker',\n 'https://github.com/giovannicuriel/report-builder/issues')))\n", (359, 518), False, 'from collections import OrderedDict\n')]
|
from django.urls import path
from itembase.core.views.location_views import LocationAddressCreateView, LocationAddressDetailView, \
LocationAddressUpdateView, LocationCreateView, LocationDeleteView, LocationDetailView, LocationListView, \
LocationUpdateView
app_name = "locations"
urlpatterns = [
path("", LocationListView.as_view(), name="list"),
path("new/", LocationCreateView.as_view(), name="new"),
path("edit/<int:pk>/", LocationUpdateView.as_view(), name="edit"),
path("delete/<int:pk>/", LocationDeleteView.as_view(), name="delete"),
path("<int:pk>/", LocationDetailView.as_view(), name="view"),
path('<int:pk>/address-new/', LocationAddressCreateView.as_view(), name='address-new'),
path('address/<int:pk>', LocationAddressDetailView.as_view(), name='address-view'),
path('address/edit/<int:pk>', LocationAddressUpdateView.as_view(), name='address-edit'),
]
|
[
"itembase.core.views.location_views.LocationAddressUpdateView.as_view",
"itembase.core.views.location_views.LocationDeleteView.as_view",
"itembase.core.views.location_views.LocationListView.as_view",
"itembase.core.views.location_views.LocationAddressDetailView.as_view",
"itembase.core.views.location_views.LocationCreateView.as_view",
"itembase.core.views.location_views.LocationDetailView.as_view",
"itembase.core.views.location_views.LocationAddressCreateView.as_view",
"itembase.core.views.location_views.LocationUpdateView.as_view"
] |
[((320, 346), 'itembase.core.views.location_views.LocationListView.as_view', 'LocationListView.as_view', ([], {}), '()\n', (344, 346), False, 'from itembase.core.views.location_views import LocationAddressCreateView, LocationAddressDetailView, LocationAddressUpdateView, LocationCreateView, LocationDeleteView, LocationDetailView, LocationListView, LocationUpdateView\n'), ((379, 407), 'itembase.core.views.location_views.LocationCreateView.as_view', 'LocationCreateView.as_view', ([], {}), '()\n', (405, 407), False, 'from itembase.core.views.location_views import LocationAddressCreateView, LocationAddressDetailView, LocationAddressUpdateView, LocationCreateView, LocationDeleteView, LocationDetailView, LocationListView, LocationUpdateView\n'), ((449, 477), 'itembase.core.views.location_views.LocationUpdateView.as_view', 'LocationUpdateView.as_view', ([], {}), '()\n', (475, 477), False, 'from itembase.core.views.location_views import LocationAddressCreateView, LocationAddressDetailView, LocationAddressUpdateView, LocationCreateView, LocationDeleteView, LocationDetailView, LocationListView, LocationUpdateView\n'), ((522, 550), 'itembase.core.views.location_views.LocationDeleteView.as_view', 'LocationDeleteView.as_view', ([], {}), '()\n', (548, 550), False, 'from itembase.core.views.location_views import LocationAddressCreateView, LocationAddressDetailView, LocationAddressUpdateView, LocationCreateView, LocationDeleteView, LocationDetailView, LocationListView, LocationUpdateView\n'), ((590, 618), 'itembase.core.views.location_views.LocationDetailView.as_view', 'LocationDetailView.as_view', ([], {}), '()\n', (616, 618), False, 'from itembase.core.views.location_views import LocationAddressCreateView, LocationAddressDetailView, LocationAddressUpdateView, LocationCreateView, LocationDeleteView, LocationDetailView, LocationListView, LocationUpdateView\n'), ((668, 703), 'itembase.core.views.location_views.LocationAddressCreateView.as_view', 'LocationAddressCreateView.as_view', ([], {}), '()\n', (701, 703), False, 'from itembase.core.views.location_views import LocationAddressCreateView, LocationAddressDetailView, LocationAddressUpdateView, LocationCreateView, LocationDeleteView, LocationDetailView, LocationListView, LocationUpdateView\n'), ((755, 790), 'itembase.core.views.location_views.LocationAddressDetailView.as_view', 'LocationAddressDetailView.as_view', ([], {}), '()\n', (788, 790), False, 'from itembase.core.views.location_views import LocationAddressCreateView, LocationAddressDetailView, LocationAddressUpdateView, LocationCreateView, LocationDeleteView, LocationDetailView, LocationListView, LocationUpdateView\n'), ((848, 883), 'itembase.core.views.location_views.LocationAddressUpdateView.as_view', 'LocationAddressUpdateView.as_view', ([], {}), '()\n', (881, 883), False, 'from itembase.core.views.location_views import LocationAddressCreateView, LocationAddressDetailView, LocationAddressUpdateView, LocationCreateView, LocationDeleteView, LocationDetailView, LocationListView, LocationUpdateView\n')]
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class product_margin(osv.osv_memory):
_name = 'product.margin'
_description = 'Product Margin'
_columns = {
'from_date': fields.date('From'),
'to_date': fields.date('To'),
'invoice_state': fields.selection([
('paid', 'Paid'),
('open_paid', 'Open and Paid'),
('draft_open_paid', 'Draft, Open and Paid'),
], 'Invoice State', select=True, required=True),
}
_defaults = {
'from_date': time.strftime('%Y-01-01'),
'to_date': time.strftime('%Y-12-31'),
'invoice_state': "open_paid",
}
def action_open_window(self, cr, uid, ids, context=None):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: the ID or list of IDs if we want more than one
@return:
"""
context = dict(context or {})
def ref(module, xml_id):
proxy = self.pool.get('ir.model.data')
return proxy.get_object_reference(cr, uid, module, xml_id)
model, search_view_id = ref('product', 'product_search_form_view')
model, graph_view_id = ref('product_margin', 'view_product_margin_graph')
model, form_view_id = ref('product_margin', 'view_product_margin_form')
model, tree_view_id = ref('product_margin', 'view_product_margin_tree')
#get the current product.margin object to obtain the values from it
records = self.browse(cr, uid, ids, context=context)
record = records[0]
context.update(invoice_state=record.invoice_state)
if record.from_date:
context.update(date_from=record.from_date)
if record.to_date:
context.update(date_to=record.to_date)
views = [
(tree_view_id, 'tree'),
(form_view_id, 'form'),
(graph_view_id, 'graph')
]
return {
'name': _('Product Margins'),
'context': context,
'view_type': 'form',
"view_mode": 'tree,form,graph',
'res_model': 'product.product',
'type': 'ir.actions.act_window',
'views': views,
'view_id': False,
'search_view_id': search_view_id,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"openerp.osv.fields.selection",
"time.strftime",
"openerp.osv.fields.date",
"openerp.tools.translate._"
] |
[((1209, 1228), 'openerp.osv.fields.date', 'fields.date', (['"""From"""'], {}), "('From')\n", (1220, 1228), False, 'from openerp.osv import fields, osv\n'), ((1249, 1266), 'openerp.osv.fields.date', 'fields.date', (['"""To"""'], {}), "('To')\n", (1260, 1266), False, 'from openerp.osv import fields, osv\n'), ((1293, 1461), 'openerp.osv.fields.selection', 'fields.selection', (["[('paid', 'Paid'), ('open_paid', 'Open and Paid'), ('draft_open_paid',\n 'Draft, Open and Paid')]", '"""Invoice State"""'], {'select': '(True)', 'required': '(True)'}), "([('paid', 'Paid'), ('open_paid', 'Open and Paid'), (\n 'draft_open_paid', 'Draft, Open and Paid')], 'Invoice State', select=\n True, required=True)\n", (1309, 1461), False, 'from openerp.osv import fields, osv\n'), ((1546, 1571), 'time.strftime', 'time.strftime', (['"""%Y-01-01"""'], {}), "('%Y-01-01')\n", (1559, 1571), False, 'import time\n'), ((1592, 1617), 'time.strftime', 'time.strftime', (['"""%Y-12-31"""'], {}), "('%Y-12-31')\n", (1605, 1617), False, 'import time\n'), ((3053, 3073), 'openerp.tools.translate._', '_', (['"""Product Margins"""'], {}), "('Product Margins')\n", (3054, 3073), False, 'from openerp.tools.translate import _\n')]
|
"""Search-related testing utilities."""
import tempfile
import time
from contextlib import contextmanager
import haystack
from django.conf import settings
from django.core.management import call_command
from djblets.siteconfig.models import SiteConfiguration
from reviewboard.admin.siteconfig import load_site_config
def reindex_search():
"""Rebuild the search index."""
call_command('rebuild_index', interactive=False)
# On Whoosh, the above is asynchronous, and we can end up trying to read
# before we end up writing, occasionally breaking tests. We need to
# introduce just a bit of a delay.
#
# Yeah, this is still sketchy, but we can't turn off the async behavior
# or receive notification that the write has completed.
time.sleep(0.1)
@contextmanager
def search_enabled(on_the_fly_indexing=False, backend_id='whoosh'):
"""Temporarily enable indexed search.
Args:
on_the_fly_indexing (bool, optional):
Whether or not to enable on-the-fly indexing.
backend_id (unicode, optional):
The search backend to enable. Valid options are "whoosh" (default)
and "elasticsearch".
"""
siteconfig = SiteConfiguration.objects.get_current()
old_backend_id = siteconfig.get('search_backend_id')
old_backend_settings = siteconfig.get('search_backend_settings')
if backend_id == 'whoosh':
backend_settings = {
'PATH': tempfile.mkdtemp(suffix='search-index',
dir=settings.SITE_DATA_DIR),
'STORAGE': 'file',
}
elif backend_id == 'elasticsearch':
backend_settings = {
'INDEX_NAME': 'reviewboard-tests',
'URL': 'http://es.example.com:9200/',
}
else:
raise NotImplementedError('Unexpected backend ID "%s"' % backend_id)
siteconfig.settings.update({
'search_enable': True,
'search_backend_id': backend_id,
'search_backend_settings': {
backend_id: backend_settings,
},
'search_on_the_fly_indexing': on_the_fly_indexing,
})
siteconfig.save(update_fields=('settings',))
load_site_config()
try:
yield
haystack.connections['default'].reset_sessions()
finally:
siteconfig.settings.update({
'search_enable': False,
'search_backend_id': old_backend_id,
'search_backend_settings': old_backend_settings,
'search_on_the_fly_indexing': False,
})
siteconfig.save(update_fields=('settings',))
load_site_config()
|
[
"django.core.management.call_command",
"reviewboard.admin.siteconfig.load_site_config",
"time.sleep",
"tempfile.mkdtemp",
"djblets.siteconfig.models.SiteConfiguration.objects.get_current"
] |
[((384, 432), 'django.core.management.call_command', 'call_command', (['"""rebuild_index"""'], {'interactive': '(False)'}), "('rebuild_index', interactive=False)\n", (396, 432), False, 'from django.core.management import call_command\n'), ((768, 783), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (778, 783), False, 'import time\n'), ((1205, 1244), 'djblets.siteconfig.models.SiteConfiguration.objects.get_current', 'SiteConfiguration.objects.get_current', ([], {}), '()\n', (1242, 1244), False, 'from djblets.siteconfig.models import SiteConfiguration\n'), ((2179, 2197), 'reviewboard.admin.siteconfig.load_site_config', 'load_site_config', ([], {}), '()\n', (2195, 2197), False, 'from reviewboard.admin.siteconfig import load_site_config\n'), ((2598, 2616), 'reviewboard.admin.siteconfig.load_site_config', 'load_site_config', ([], {}), '()\n', (2614, 2616), False, 'from reviewboard.admin.siteconfig import load_site_config\n'), ((1453, 1520), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'suffix': '"""search-index"""', 'dir': 'settings.SITE_DATA_DIR'}), "(suffix='search-index', dir=settings.SITE_DATA_DIR)\n", (1469, 1520), False, 'import tempfile\n')]
|
# Copyright (C) 2015 <NAME>
import json
import os.path
from remoot import pythonstarter, smartstarter
import anycall
from pydron.backend import worker
from pydron.interpreter import scheduler, strategies
from twisted.internet import defer
preload_packages = []
def load_config(configfile=None):
if not configfile:
candidates = []
if "PYDRON_CONF" in os.environ:
candidates.append(os.environ["PYDRON_CONF"])
candidates.append(os.path.abspath("pydron.conf"))
candidates.append(os.path.expanduser("~/pydron.conf"))
candidates.append("/etc/pydron.conf")
for candidate in candidates:
if os.path.exists(candidate):
configfile = candidate
break
else:
raise ValueError("Config file could not be found. Looked for %s" % repr(candidates))
with open(configfile, 'r') as f:
cfg = json.load(f)
def convert(obj):
if isinstance(obj, dict):
return {k:convert(v) for k,v in obj.iteritems()}
elif isinstance(obj, list):
return [convert(v) for v in obj]
elif isinstance(obj, unicode):
return str(obj)
else:
return obj
cfg = convert(cfg)
return cfg
def create_scheduler(config, pool):
if "scheduler" not in config:
strategy_name = "trivial"
else:
strategy_name = config["scheduler"]
if strategy_name == "trivial":
strategy = strategies.TrivialSchedulingStrategy(pool)
strategy = strategies.VerifySchedulingStrategy(strategy)
else:
raise ValueError("Unsupported scheduler: %s" % strategy_name)
return scheduler.Scheduler(pool, strategy)
def create_pool(config, rpcsystem, error_handler):
"""
starts workers and returns a pool of them.
Returns two callbacks:
* The first callbacks with the pool as
soon as there is one worker. Errbacks if all starters
failed to create a worker.
* The second calls back once all workers have been
started. This one can be cancelled.
The given `error_handler` is invoked for every failed start.
"""
starters = []
for starter_conf in config["workers"]:
starters.extend(_create_starters(starter_conf, rpcsystem))
pool = worker.Pool()
ds = []
for i, starter in enumerate(starters):
d = starter.start()
def success(worker, i, starter):
worker.nicename = "#%s" % i
pool.add_worker(worker)
def fail(failure):
error_handler(failure)
return failure
d.addCallback(success, i, starter)
ds.append(d)
d = defer.DeferredList(ds, fireOnOneErrback=True, consumeErrors=True)
def on_success(result):
return pool
def on_fail(firsterror):
return firsterror.value.subFailure
d.addCallbacks(on_success, on_fail)
return d
def create_rpc_system(conf):
port_range = _parse_port_range(conf.get("data_ports", 0))
return anycall.create_tcp_rpc_system(port_range = port_range)
def _create_starters(conf, rpcsystem):
global preload_packages
import pydron
data_ports = _parse_port_range(conf.get("data_ports", 0))
preconnect = conf.get("preconnect", True)
if 0 in data_ports:
# use automatically selected ports. this is not compatible
# with preconnect
preconnect = False
data_ports = [0]
if data_ports != [0] and len(data_ports) <= conf["cores"]:
if 0 not in data_ports:
raise ValueError("Not enough ports configured for %r" % conf)
starters = []
for i in range(conf["cores"]):
starter_type = conf["type"]
if starter_type == "multicore":
starter = _multicore_starter(conf, rpcsystem)
elif starter_type == "ssh":
starter = _ssh_starter(conf, rpcsystem)
elif starter_type == "cloud":
starter = _ec2_starter(conf, rpcsystem)
else:
raise ValueError("Not supported worker type %s" % repr(starter_type))
if data_ports == [0]:
port = 0
else:
port = data_ports[i]
smart = smartstarter.SmartStarter(starter,
rpcsystem,
anycall.create_tcp_rpc_system,
list(preload_packages)+[pydron],
preconnect = preconnect,
data_port = port)
starters.append(worker.WorkerStarter(smart))
return starters
def _multicore_starter(conf, rpcsystem):
return pythonstarter.LocalStarter()
def _ssh_starter(conf, rpcsystem):
starter = pythonstarter.SSHStarter(conf["hostname"],
username=conf["username"],
password=conf.get("password", None),
private_key_files=conf.get("private_key_files", []),
private_keys=conf.get("private_keys", []),
tmp_dir=conf.get("tmp_dir", "/tmp"))
return starter
def _ec2_starter(conf, rpcsystem):
starter = pythonstarter.EC2Starter(username=conf["username"],
provider=conf["provider"],
provider_keyid=conf["accesskeyid"],
provider_key=conf["accesskey"],
image_id=conf["imageid"],
size_id=conf["sizeid"],
public_key_file=conf["publickey"],
private_key_file=conf["privatekey"],
tmp_dir=conf.get("tmp_dir", "/tmp"))
return starter
def _parse_port_range(ports):
try:
return [int(ports)]
except ValueError:
pass
if isinstance(ports, list):
return [int(x) for x in ports]
min_port, max_port = str(ports).split('-', 1)
min_port = int(min_port)
max_port = int(max_port)
return range(min_port, max_port + 1)
|
[
"anycall.create_tcp_rpc_system",
"pydron.backend.worker.Pool",
"pydron.interpreter.scheduler.Scheduler",
"remoot.pythonstarter.LocalStarter",
"twisted.internet.defer.DeferredList",
"pydron.backend.worker.WorkerStarter",
"json.load",
"pydron.interpreter.strategies.TrivialSchedulingStrategy",
"pydron.interpreter.strategies.VerifySchedulingStrategy"
] |
[((1718, 1753), 'pydron.interpreter.scheduler.Scheduler', 'scheduler.Scheduler', (['pool', 'strategy'], {}), '(pool, strategy)\n', (1737, 1753), False, 'from pydron.interpreter import scheduler, strategies\n'), ((2372, 2385), 'pydron.backend.worker.Pool', 'worker.Pool', ([], {}), '()\n', (2383, 2385), False, 'from pydron.backend import worker\n'), ((2776, 2841), 'twisted.internet.defer.DeferredList', 'defer.DeferredList', (['ds'], {'fireOnOneErrback': '(True)', 'consumeErrors': '(True)'}), '(ds, fireOnOneErrback=True, consumeErrors=True)\n', (2794, 2841), False, 'from twisted.internet import defer\n'), ((3129, 3181), 'anycall.create_tcp_rpc_system', 'anycall.create_tcp_rpc_system', ([], {'port_range': 'port_range'}), '(port_range=port_range)\n', (3158, 3181), False, 'import anycall\n'), ((4850, 4878), 'remoot.pythonstarter.LocalStarter', 'pythonstarter.LocalStarter', ([], {}), '()\n', (4876, 4878), False, 'from remoot import pythonstarter, smartstarter\n'), ((925, 937), 'json.load', 'json.load', (['f'], {}), '(f)\n', (934, 937), False, 'import json\n'), ((1514, 1556), 'pydron.interpreter.strategies.TrivialSchedulingStrategy', 'strategies.TrivialSchedulingStrategy', (['pool'], {}), '(pool)\n', (1550, 1556), False, 'from pydron.interpreter import scheduler, strategies\n'), ((1576, 1621), 'pydron.interpreter.strategies.VerifySchedulingStrategy', 'strategies.VerifySchedulingStrategy', (['strategy'], {}), '(strategy)\n', (1611, 1621), False, 'from pydron.interpreter import scheduler, strategies\n'), ((4735, 4762), 'pydron.backend.worker.WorkerStarter', 'worker.WorkerStarter', (['smart'], {}), '(smart)\n', (4755, 4762), False, 'from pydron.backend import worker\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This plugin provides customization of the header displayed by pytest for
reporting purposes.
"""
import os
import sys
import datetime
import locale
import math
from collections import OrderedDict
from astropy.tests.helper import ignore_warnings
from astropy.utils.introspection import resolve_name
PYTEST_HEADER_MODULES = OrderedDict([('Numpy', 'numpy'),
('Scipy', 'scipy'),
('Matplotlib', 'matplotlib'),
('h5py', 'h5py'),
('Pandas', 'pandas')])
# This always returns with Astropy's version
from astropy import __version__
TESTED_VERSIONS = OrderedDict([('Astropy', __version__)])
def pytest_report_header(config):
try:
stdoutencoding = sys.stdout.encoding or 'ascii'
except AttributeError:
stdoutencoding = 'ascii'
args = config.args
# TESTED_VERSIONS can contain the affiliated package version, too
if len(TESTED_VERSIONS) > 1:
for pkg, version in TESTED_VERSIONS.items():
if pkg not in ['Astropy', 'astropy_helpers']:
s = "\nRunning tests with {} version {}.\n".format(
pkg, version)
else:
s = "\nRunning tests with Astropy version {}.\n".format(
TESTED_VERSIONS['Astropy'])
# Per https://github.com/astropy/astropy/pull/4204, strip the rootdir from
# each directory argument
if hasattr(config, 'rootdir'):
rootdir = str(config.rootdir)
if not rootdir.endswith(os.sep):
rootdir += os.sep
dirs = [arg[len(rootdir):] if arg.startswith(rootdir) else arg
for arg in args]
else:
dirs = args
s += "Running tests in {}.\n\n".format(" ".join(dirs))
s += "Date: {}\n\n".format(datetime.datetime.now().isoformat()[:19])
from platform import platform
plat = platform()
if isinstance(plat, bytes):
plat = plat.decode(stdoutencoding, 'replace')
s += f"Platform: {plat}\n\n"
s += f"Executable: {sys.executable}\n\n"
s += f"Full Python Version: \n{sys.version}\n\n"
s += "encodings: sys: {}, locale: {}, filesystem: {}".format(
sys.getdefaultencoding(),
locale.getpreferredencoding(),
sys.getfilesystemencoding())
s += '\n'
s += f"byteorder: {sys.byteorder}\n"
s += "float info: dig: {0.dig}, mant_dig: {0.dig}\n\n".format(
sys.float_info)
for module_display, module_name in PYTEST_HEADER_MODULES.items():
try:
with ignore_warnings(DeprecationWarning):
module = resolve_name(module_name)
except ImportError:
s += f"{module_display}: not available\n"
else:
try:
version = module.__version__
except AttributeError:
version = 'unknown (no __version__ attribute)'
s += f"{module_display}: {version}\n"
# Helpers version
if 'astropy_helpers' in TESTED_VERSIONS:
astropy_helpers_version = TESTED_VERSIONS['astropy_helpers']
else:
try:
from astropy.version import astropy_helpers_version
except ImportError:
astropy_helpers_version = None
if astropy_helpers_version:
s += f"astropy_helpers: {astropy_helpers_version}\n"
special_opts = ["remote_data", "pep8"]
opts = []
for op in special_opts:
op_value = getattr(config.option, op, None)
if op_value:
if isinstance(op_value, str):
op = ': '.join((op, op_value))
opts.append(op)
if opts:
s += "Using Astropy options: {}.\n".format(", ".join(opts))
return s
def pytest_terminal_summary(terminalreporter):
"""Output a warning to IPython users in case any tests failed."""
try:
get_ipython()
except NameError:
return
if not terminalreporter.stats.get('failed'):
# Only issue the warning when there are actually failures
return
terminalreporter.ensure_newline()
terminalreporter.write_line(
'Some tests are known to fail when run from the IPython prompt; '
'especially, but not limited to tests involving logging and warning '
'handling. Unless you are certain as to the cause of the failure, '
'please check that the failure occurs outside IPython as well. See '
'http://docs.astropy.org/en/stable/known_issues.html#failing-logging-'
'tests-when-running-the-tests-in-ipython for more information.',
yellow=True, bold=True)
|
[
"collections.OrderedDict",
"sys.getfilesystemencoding",
"sys.getdefaultencoding",
"platform.platform",
"locale.getpreferredencoding",
"datetime.datetime.now",
"astropy.tests.helper.ignore_warnings",
"astropy.utils.introspection.resolve_name"
] |
[((394, 521), 'collections.OrderedDict', 'OrderedDict', (["[('Numpy', 'numpy'), ('Scipy', 'scipy'), ('Matplotlib', 'matplotlib'), (\n 'h5py', 'h5py'), ('Pandas', 'pandas')]"], {}), "([('Numpy', 'numpy'), ('Scipy', 'scipy'), ('Matplotlib',\n 'matplotlib'), ('h5py', 'h5py'), ('Pandas', 'pandas')])\n", (405, 521), False, 'from collections import OrderedDict\n'), ((762, 801), 'collections.OrderedDict', 'OrderedDict', (["[('Astropy', __version__)]"], {}), "([('Astropy', __version__)])\n", (773, 801), False, 'from collections import OrderedDict\n'), ((1989, 1999), 'platform.platform', 'platform', ([], {}), '()\n', (1997, 1999), False, 'from platform import platform\n'), ((2292, 2316), 'sys.getdefaultencoding', 'sys.getdefaultencoding', ([], {}), '()\n', (2314, 2316), False, 'import sys\n'), ((2326, 2355), 'locale.getpreferredencoding', 'locale.getpreferredencoding', ([], {}), '()\n', (2353, 2355), False, 'import locale\n'), ((2365, 2392), 'sys.getfilesystemencoding', 'sys.getfilesystemencoding', ([], {}), '()\n', (2390, 2392), False, 'import sys\n'), ((2642, 2677), 'astropy.tests.helper.ignore_warnings', 'ignore_warnings', (['DeprecationWarning'], {}), '(DeprecationWarning)\n', (2657, 2677), False, 'from astropy.tests.helper import ignore_warnings\n'), ((2704, 2729), 'astropy.utils.introspection.resolve_name', 'resolve_name', (['module_name'], {}), '(module_name)\n', (2716, 2729), False, 'from astropy.utils.introspection import resolve_name\n'), ((1901, 1924), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1922, 1924), False, 'import datetime\n')]
|
#!/usr/local/bin/python3.6
"""
Copyright (c) 2015-2019 <NAME> <<EMAIL>>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
query suricata alert log
"""
import sys
import os.path
import re
import sre_constants
import shlex
import ujson
sys.path.insert(0, "/usr/local/opnsense/site-python")
from log_helper import reverse_log_reader
from params import update_params
from lib import suricata_alert_log
if __name__ == '__main__':
# handle parameters
parameters = {'limit': '0', 'offset': '0', 'filter': '', 'fileid': ''}
update_params(parameters)
# choose logfile by number
if parameters['fileid'].isdigit():
suricata_log = '%s.%d' % (suricata_alert_log, int(parameters['fileid']))
else:
suricata_log = suricata_alert_log
if parameters['limit'].isdigit():
limit = int(parameters['limit'])
else:
limit = 0
if parameters['offset'].isdigit():
offset = int(parameters['offset'])
else:
offset = 0
data_filters = {}
data_filters_comp = {}
for filter_txt in shlex.split(parameters['filter']):
filterField = filter_txt.split('/')[0]
if filter_txt.find('/') > -1:
data_filters[filterField] = '/'.join(filter_txt.split('/')[1:])
filter_regexp = data_filters[filterField]
filter_regexp = filter_regexp.replace('*', '.*')
filter_regexp = filter_regexp.lower()
try:
data_filters_comp[filterField] = re.compile(filter_regexp)
except sre_constants.error:
# remove illegal expression
# del data_filters[filterField]
data_filters_comp[filterField] = re.compile('.*')
# filter one specific log line
if 'filepos' in data_filters and data_filters['filepos'].isdigit():
log_start_pos = int(data_filters['filepos'])
else:
log_start_pos = None
# query suricata eve log
result = {'filters': data_filters, 'rows': [], 'total_rows': 0, 'origin': suricata_log.split('/')[-1]}
if os.path.exists(suricata_log):
for line in reverse_log_reader(filename=suricata_log, start_pos=log_start_pos):
try:
record = ujson.loads(line['line'])
except ValueError:
# can not handle line
record = {}
# only process valid alert items
if 'alert' in record:
# add position in file
record['filepos'] = line['pos']
record['fileid'] = parameters['fileid']
# flatten structure
record['alert_sid'] = record['alert']['signature_id']
record['alert_action'] = record['alert']['action']
record['alert'] = record['alert']['signature']
# use filters on data (using regular expressions)
do_output = True
for filterKeys in data_filters:
filter_hit = False
for filterKey in filterKeys.split(','):
if filterKey in record and data_filters_comp[filterKeys].match(
('%s' % record[filterKey]).lower()):
filter_hit = True
if not filter_hit:
do_output = False
if do_output:
result['total_rows'] += 1
if (len(result['rows']) < limit or limit == 0) and result['total_rows'] >= offset:
result['rows'].append(record)
elif result['total_rows'] > offset + limit:
# do not fetch data until end of file...
break
# only try to fetch one line when filepos is given
if log_start_pos is not None:
break
# output results
print(ujson.dumps(result))
|
[
"sys.path.insert",
"re.compile",
"shlex.split",
"ujson.dumps",
"params.update_params",
"ujson.loads",
"log_helper.reverse_log_reader"
] |
[((1554, 1607), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/usr/local/opnsense/site-python"""'], {}), "(0, '/usr/local/opnsense/site-python')\n", (1569, 1607), False, 'import sys\n'), ((1849, 1874), 'params.update_params', 'update_params', (['parameters'], {}), '(parameters)\n', (1862, 1874), False, 'from params import update_params\n'), ((2371, 2404), 'shlex.split', 'shlex.split', (["parameters['filter']"], {}), "(parameters['filter'])\n", (2382, 2404), False, 'import shlex\n'), ((3416, 3482), 'log_helper.reverse_log_reader', 'reverse_log_reader', ([], {'filename': 'suricata_log', 'start_pos': 'log_start_pos'}), '(filename=suricata_log, start_pos=log_start_pos)\n', (3434, 3482), False, 'from log_helper import reverse_log_reader\n'), ((5192, 5211), 'ujson.dumps', 'ujson.dumps', (['result'], {}), '(result)\n', (5203, 5211), False, 'import ujson\n'), ((2798, 2823), 're.compile', 're.compile', (['filter_regexp'], {}), '(filter_regexp)\n', (2808, 2823), False, 'import re\n'), ((3526, 3551), 'ujson.loads', 'ujson.loads', (["line['line']"], {}), "(line['line'])\n", (3537, 3551), False, 'import ujson\n'), ((3005, 3021), 're.compile', 're.compile', (['""".*"""'], {}), "('.*')\n", (3015, 3021), False, 'import re\n')]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
from DictionaryEntryDialog import DictionaryEntryDialog
class DictionaryListCtrl(wx.ListCtrl):
def __init__(self,parent):
wx.ListCtrl.__init__(self,parent,PROJECTSETTINGS_LISTDICTIONARY_ID,size=wx.DefaultSize,style=wx.LC_REPORT | wx.LC_SORT_ASCENDING)
self.keys = []
self.InsertColumn(0,'Name')
self.SetColumnWidth(0,150)
self.InsertColumn(1,'Definition')
self.SetColumnWidth(1,300)
self.theSelectedIdx = -1
self.theMenu = wx.Menu()
self.theMenu.Append(DICTIONARYLISTCTRL_MENUADD_ID,'Add')
self.theMenu.Append(DICTIONARYLISTCTRL_MENUDELETE_ID,'Delete')
self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK,self.OnRightDown)
self.Bind(wx.EVT_LIST_ITEM_SELECTED,self.OnItemSelected)
self.Bind(wx.EVT_LIST_ITEM_DESELECTED,self.OnItemDeselected)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED,self.onEntryActivated)
wx.EVT_MENU(self.theMenu,DICTIONARYLISTCTRL_MENUADD_ID,self.onAddEntry)
wx.EVT_MENU(self.theMenu,DICTIONARYLISTCTRL_MENUDELETE_ID,self.onDeleteEntry)
def OnItemSelected(self,evt):
self.theSelectedIdx = evt.GetIndex()
def OnItemDeselected(self,evt):
self.theSelectedIdx = -1
def OnRightDown(self,evt):
self.PopupMenu(self.theMenu)
def onAddEntry(self,evt):
dlg = DictionaryEntryDialog(self)
if (dlg.ShowModal() == DICTIONARYENTRY_BUTTONCOMMIT_ID):
name = dlg.name()
definition = dlg.definition()
idx = self.GetItemCount()
self.InsertStringItem(idx,name)
self.SetStringItem(idx,1,definition)
def onDeleteEntry(self,evt):
if (self.theSelectedIdx == -1):
errorText = 'No entry selected'
errorLabel = 'Delete definition'
dlg = wx.MessageDialog(self,errorText,errorLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
else:
selectedValue = self.GetItemText(self.theSelectedIdx)
self.DeleteItem(self.theSelectedIdx)
def onEntryActivated(self,evt):
self.theSelectedIdx = evt.GetIndex()
name = self.GetItemText(self.theSelectedIdx)
definition = self.GetItem(self.theSelectedIdx,1)
dlg = DictionaryEntryDialog(self,name,definition.GetText())
if (dlg.ShowModal() == DICTIONARYENTRY_BUTTONCOMMIT_ID):
self.SetStringItem(self.theSelectedIdx,0,dlg.name())
self.SetStringItem(self.theSelectedIdx,1,dlg.definition())
def load(self,entries):
self.keys = entries.keys()
self.keys.sort()
for name in self.keys:
idx = self.GetItemCount()
self.InsertStringItem(idx,name)
self.SetStringItem(idx,1,entries[name])
def dimensions(self):
entries = []
for x in range(self.GetItemCount()):
name = self.GetItemText(x)
definition = self.GetItem(x,1)
entries.append((name,definition.GetText()))
return entries
|
[
"wx.ListCtrl.__init__",
"DictionaryEntryDialog.DictionaryEntryDialog",
"wx.Menu",
"wx.MessageDialog",
"wx.EVT_MENU"
] |
[((970, 1108), 'wx.ListCtrl.__init__', 'wx.ListCtrl.__init__', (['self', 'parent', 'PROJECTSETTINGS_LISTDICTIONARY_ID'], {'size': 'wx.DefaultSize', 'style': '(wx.LC_REPORT | wx.LC_SORT_ASCENDING)'}), '(self, parent, PROJECTSETTINGS_LISTDICTIONARY_ID, size=\n wx.DefaultSize, style=wx.LC_REPORT | wx.LC_SORT_ASCENDING)\n', (990, 1108), False, 'import wx\n'), ((1299, 1308), 'wx.Menu', 'wx.Menu', ([], {}), '()\n', (1306, 1308), False, 'import wx\n'), ((1692, 1765), 'wx.EVT_MENU', 'wx.EVT_MENU', (['self.theMenu', 'DICTIONARYLISTCTRL_MENUADD_ID', 'self.onAddEntry'], {}), '(self.theMenu, DICTIONARYLISTCTRL_MENUADD_ID, self.onAddEntry)\n', (1703, 1765), False, 'import wx\n'), ((1768, 1847), 'wx.EVT_MENU', 'wx.EVT_MENU', (['self.theMenu', 'DICTIONARYLISTCTRL_MENUDELETE_ID', 'self.onDeleteEntry'], {}), '(self.theMenu, DICTIONARYLISTCTRL_MENUDELETE_ID, self.onDeleteEntry)\n', (1779, 1847), False, 'import wx\n'), ((2086, 2113), 'DictionaryEntryDialog.DictionaryEntryDialog', 'DictionaryEntryDialog', (['self'], {}), '(self)\n', (2107, 2113), False, 'from DictionaryEntryDialog import DictionaryEntryDialog\n'), ((2505, 2557), 'wx.MessageDialog', 'wx.MessageDialog', (['self', 'errorText', 'errorLabel', 'wx.OK'], {}), '(self, errorText, errorLabel, wx.OK)\n', (2521, 2557), False, 'import wx\n')]
|
from __future__ import print_function, division
import numpy as np
import Nio
import time, os
#
# Creating a file
#
init_time = time.clock()
ncfile = 'test-large.nc'
if (os.path.exists(ncfile)):
os.system("/bin/rm -f " + ncfile)
opt = Nio.options()
opt.Format = "LargeFile"
opt.PreFill = False
file = Nio.open_file(ncfile, 'w', options=opt)
file.title = "Testing large files and dimensions"
file.create_dimension('big', 2500000000)
bigvar = file.create_variable('bigvar', "b", ('big',))
print("created bigvar")
# note it is incredibly slow to write a scalar to a large file variable
# so create an temporary variable x that will get assigned in steps
x = np.empty(1000000,dtype = 'int8')
#print x
x[:] = 42
t = list(range(0,2500000000,1000000))
ii = 0
for i in t:
if (i == 0):
continue
print(t[ii],i)
bigvar[t[ii]:i] = x[:]
ii += 1
x[:] = 84
bigvar[2499000000:2500000000] = x[:]
bigvar[-1] = 84
bigvar.units = "big var units"
#print bigvar[-1]
print(bigvar.dimensions)
# check unlimited status
for dim in list(file.dimensions.keys()):
print(dim, " unlimited: ",file.unlimited(dim))
print(file)
print("closing file")
print('elapsed time: ',time.clock() - init_time)
file.close()
#quit()
#
# Reading a file
#
print('opening file for read')
print('elapsed time: ',time.clock() - init_time)
file = Nio.open_file(ncfile, 'r')
print('file is open')
print('elapsed time: ',time.clock() - init_time)
print(file.dimensions)
print(list(file.variables.keys()))
print(file)
print("reading variable")
print('elapsed time: ',time.clock() - init_time)
x = file.variables['bigvar']
print(x[0],x[1000000],x[249000000],x[2499999999])
print("max and min")
min = x[:].min()
max = x[:].max()
print(min, max)
print('elapsed time: ',time.clock() - init_time)
# check unlimited status
for dim in list(file.dimensions.keys()):
print(dim, " unlimited: ",file.unlimited(dim))
print("closing file")
print('elapsed time: ',time.clock() - init_time)
file.close()
|
[
"os.path.exists",
"time.clock",
"numpy.empty",
"Nio.options",
"Nio.open_file",
"os.system"
] |
[((129, 141), 'time.clock', 'time.clock', ([], {}), '()\n', (139, 141), False, 'import time, os\n'), ((171, 193), 'os.path.exists', 'os.path.exists', (['ncfile'], {}), '(ncfile)\n', (185, 193), False, 'import time, os\n'), ((238, 251), 'Nio.options', 'Nio.options', ([], {}), '()\n', (249, 251), False, 'import Nio\n'), ((304, 343), 'Nio.open_file', 'Nio.open_file', (['ncfile', '"""w"""'], {'options': 'opt'}), "(ncfile, 'w', options=opt)\n", (317, 343), False, 'import Nio\n'), ((662, 693), 'numpy.empty', 'np.empty', (['(1000000)'], {'dtype': '"""int8"""'}), "(1000000, dtype='int8')\n", (670, 693), True, 'import numpy as np\n'), ((1321, 1347), 'Nio.open_file', 'Nio.open_file', (['ncfile', '"""r"""'], {}), "(ncfile, 'r')\n", (1334, 1347), False, 'import Nio\n'), ((198, 231), 'os.system', 'os.system', (["('/bin/rm -f ' + ncfile)"], {}), "('/bin/rm -f ' + ncfile)\n", (207, 231), False, 'import time, os\n'), ((1166, 1178), 'time.clock', 'time.clock', ([], {}), '()\n', (1176, 1178), False, 'import time, os\n'), ((1288, 1300), 'time.clock', 'time.clock', ([], {}), '()\n', (1298, 1300), False, 'import time, os\n'), ((1394, 1406), 'time.clock', 'time.clock', ([], {}), '()\n', (1404, 1406), False, 'import time, os\n'), ((1539, 1551), 'time.clock', 'time.clock', ([], {}), '()\n', (1549, 1551), False, 'import time, os\n'), ((1738, 1750), 'time.clock', 'time.clock', ([], {}), '()\n', (1748, 1750), False, 'import time, os\n'), ((1926, 1938), 'time.clock', 'time.clock', ([], {}), '()\n', (1936, 1938), False, 'import time, os\n')]
|
#This file is part of ElectricEye.
#SPDX-License-Identifier: Apache-2.0
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
import boto3
import datetime
from check_register import CheckRegister
registry = CheckRegister()
# import boto3 clients
ecs = boto3.client("ecs")
# loop through ECS Clusters
def list_clusters(cache):
response = cache.get("list_clusters")
if response:
return response
cache["list_clusters"] = ecs.list_clusters()
return cache["list_clusters"]
@registry.register_check("ecs")
def ecs_cluster_container_insights_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ECS.1] ECS clusters should have container insights enabled"""
response = list_clusters(cache)
myEcsClusters = response["clusterArns"]
for clusters in myEcsClusters:
clusterArn = str(clusters)
try:
response = ecs.describe_clusters(clusters=[clusterArn])
for clusterinfo in response["clusters"]:
clusterName = str(clusterinfo["clusterName"])
ecsClusterArn = str(clusterinfo["clusterArn"])
for settings in clusterinfo["settings"]:
contInsightsCheck = str(settings["value"])
# ISO Time
iso8601Time = (
datetime.datetime.utcnow()
.replace(tzinfo=datetime.timezone.utc)
.isoformat()
)
if contInsightsCheck == "disabled":
finding = {
"SchemaVersion": "2018-10-08",
"Id": ecsClusterArn + "/ecs-cluster-container-insights-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": ecsClusterArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "LOW"},
"Confidence": 99,
"Title": "[ECS.1] ECS clusters should have container insights enabled",
"Description": "ECS cluster "
+ clusterName
+ " does not have container insights enabled. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "For information on configuring Container Insights for your cluster refer to the Setting Up Container Insights on Amazon ECS for Cluster- and Service-Level Metrics section of the Amazon CloudWatch User Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/deploy-container-insights-ECS-cluster.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsCluster",
"Id": ecsClusterArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"ClusterName": clusterName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF DE.AE-3",
"NIST SP 800-53 AU-6",
"NIST SP 800-53 CA-7",
"NIST SP 800-53 IR-4",
"NIST SP 800-53 IR-5",
"NIST SP 800-53 IR-8",
"NIST SP 800-53 SI-4",
"AICPA TSC CC7.2",
"ISO 27001:2013 A.12.4.1",
"ISO 27001:2013 A.16.1.7",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": ecsClusterArn + "/ecs-cluster-container-insights-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": ecsClusterArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ECS.1] ECS clusters should have container insights enabled",
"Description": "ECS cluster "
+ clusterName
+ " has container insights enabled.",
"Remediation": {
"Recommendation": {
"Text": "For information on configuring Container Insights for your cluster refer to the Setting Up Container Insights on Amazon ECS for Cluster- and Service-Level Metrics section of the Amazon CloudWatch User Guide",
"Url": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/deploy-container-insights-ECS-cluster.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsCluster",
"Id": ecsClusterArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"ClusterName": clusterName}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF DE.AE-3",
"NIST SP 800-53 AU-6",
"NIST SP 800-53 CA-7",
"NIST SP 800-53 IR-4",
"NIST SP 800-53 IR-5",
"NIST SP 800-53 IR-8",
"NIST SP 800-53 SI-4",
"AICPA TSC CC7.2",
"ISO 27001:2013 A.12.4.1",
"ISO 27001:2013 A.16.1.7",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("ecs")
def ecs_cluster_default_provider_strategy_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ECS.2] ECS clusters should have a default cluster capacity provider strategy configured"""
response = list_clusters(cache)
myEcsClusters = response["clusterArns"]
for clusters in myEcsClusters:
clusterArn = str(clusters)
try:
response = ecs.describe_clusters(clusters=[clusterArn])
for clusterinfo in response["clusters"]:
clusterName = str(clusterinfo["clusterName"])
ecsClusterArn = str(clusterinfo["clusterArn"])
defaultProviderStratCheck = str(clusterinfo["defaultCapacityProviderStrategy"])
# ISO Time
iso8601Time = (
datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
)
if defaultProviderStratCheck == "[]":
finding = {
"SchemaVersion": "2018-10-08",
"Id": ecsClusterArn + "/ecs-cluster-default-provider-strategy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": ecsClusterArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ECS.2] ECS clusters should have a default cluster capacity provider strategy configured",
"Description": "ECS cluster "
+ clusterName
+ " does not have a default provider strategy configured. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "For information on cluster capacity provider strategies for your cluster refer to the Amazon ECS Cluster Capacity Providers section of the Amazon Elastic Container Service Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-capacity-providers.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsCluster",
"Id": ecsClusterArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"ClusterName": clusterName}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF ID.AM-2",
"NIST SP 800-53 CM-8",
"NIST SP 800-53 PM-5",
"AICPA TSC CC3.2",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.1.1",
"ISO 27001:2013 A.8.1.2",
"ISO 27001:2013 A.12.5.1",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": ecsClusterArn + "/ecs-cluster-default-provider-strategy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": ecsClusterArn,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ECS.2] ECS clusters should have a default cluster capacity provider strategy configured",
"Description": "ECS cluster "
+ clusterName
+ " has a default provider strategy configured.",
"Remediation": {
"Recommendation": {
"Text": "For information on cluster capacity provider strategies for your cluster refer to the Amazon ECS Cluster Capacity Providers section of the Amazon Elastic Container Service Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-capacity-providers.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsCluster",
"Id": ecsClusterArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"ClusterName": clusterName}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF ID.AM-2",
"NIST SP 800-53 CM-8",
"NIST SP 800-53 PM-5",
"AICPA TSC CC3.2",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.1.1",
"ISO 27001:2013 A.8.1.2",
"ISO 27001:2013 A.12.5.1",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("ecs")
def ecs_task_definition_privileged_container_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ECS.3] ECS Task Definitions should not run privileged containers if not required"""
for taskdef in ecs.list_task_definitions(status='ACTIVE')['taskDefinitionArns']:
try:
response = ecs.describe_task_definition(taskDefinition=taskdef)["taskDefinition"]
taskDefinitionArn = str(response['taskDefinitionArn'])
tdefFamily = str(response["family"])
# Loop container definitions
for cdef in response["containerDefinitions"]:
# ISO Time
iso8601Time = (datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())
cdefName = str(cdef["name"])
# We are going to assume that if there is not a privileged flag...that it is ;)
try:
privCheck = str(cdef["privileged"])
except:
privCheck = 'UNKNOWN'
if privCheck != 'False':
finding = {
"SchemaVersion": "2018-10-08",
"Id": taskDefinitionArn + "/" + cdefName + "/ecs-task-definition-privileged-container-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": taskDefinitionArn + "/" + cdefName,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"TTPs/Privilege Escalation"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[ECS.3] ECS Task Definitions should not run privileged containers if not required",
"Description": "ECS Container Definition "
+ cdefName
+ " in Task Definition "
+ taskDefinitionArn
+ " has defined a Privileged container, which should be avoided unless absolutely necessary. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "Containers running as Privileged will have Root permissions, this should be avoided if not needed. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsTaskDefinition",
"Id": taskDefinitionArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"Family": tdefFamily,
"ContainerDefinitionName": cdefName
}
}
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": taskDefinitionArn + "/" + cdefName + "/ecs-task-definition-privileged-container-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": taskDefinitionArn + "/" + cdefName,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"TTPs/Privilege Escalation"
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ECS.3] ECS Task Definitions should not run privileged containers if not required",
"Description": "ECS Container Definition "
+ cdefName
+ " in Task Definition "
+ taskDefinitionArn
+ " has not defined a Privileged container.",
"Remediation": {
"Recommendation": {
"Text": "Containers running as Privileged will have Root permissions, this should be avoided if not needed. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsTaskDefinition",
"Id": taskDefinitionArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"Family": tdefFamily,
"ContainerDefinitionName": cdefName
}
}
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.AC-1",
"NIST SP 800-53 AC-1",
"NIST SP 800-53 AC-2",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-3",
"NIST SP 800-53 IA-4",
"NIST SP 800-53 IA-5",
"NIST SP 800-53 IA-6",
"NIST SP 800-53 IA-7",
"NIST SP 800-53 IA-8",
"NIST SP 800-53 IA-9",
"NIST SP 800-53 IA-10",
"NIST SP 800-53 IA-11",
"AICPA TSC CC6.1",
"AICPA TSC CC6.2",
"ISO 27001:2013 A.9.2.1",
"ISO 27001:2013 A.9.2.2",
"ISO 27001:2013 A.9.2.3",
"ISO 27001:2013 A.9.2.4",
"ISO 27001:2013 A.9.2.6",
"ISO 27001:2013 A.9.3.1",
"ISO 27001:2013 A.9.4.2",
"ISO 27001:2013 A.9.4.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except Exception as e:
print(e)
@registry.register_check("ecs")
def ecs_task_definition_security_labels_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[ECS.4] ECS Task Definitions for EC2 should have Docker Security Options (SELinux or AppArmor) configured"""
for taskdef in ecs.list_task_definitions(status='ACTIVE')['taskDefinitionArns']:
try:
response = ecs.describe_task_definition(taskDefinition=taskdef)["taskDefinition"]
taskDefinitionArn = str(response["taskDefinitionArn"])
tdefFamily = str(response["family"])
# If there is a network mode of "awsvpc" it is likely a Fargate task - even though EC2 compute can run with that...
# time for some funky edge cases, keep that in mind before you yeet an issue at me, please ;)
if str(response["networkMode"]) == 'awsvpc':
continue
else:
# Loop container definitions
for cdef in response["containerDefinitions"]:
# ISO Time
iso8601Time = (datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat())
cdefName = str(cdef["name"])
try:
# This is a passing check
secOpts = str(cdef["dockerSecurityOptions"])
finding = {
"SchemaVersion": "2018-10-08",
"Id": taskDefinitionArn + "/" + cdefName + "/ecs-task-definition-security-labels-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": taskDefinitionArn + "/" + cdefName,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[ECS.4] ECS Task Definitions for EC2 should have Docker Security Options (SELinux or AppArmor) configured",
"Description": "ECS Container Definition "
+ cdefName
+ " in Task Definition "
+ taskDefinitionArn
+ " has Docker Security Options configured.",
"Remediation": {
"Recommendation": {
"Text": "Containers running on EC2 Compute-types should have Docker Security Options configured. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions"
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsTaskDefinition",
"Id": taskDefinitionArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"Family": tdefFamily,
"ContainerDefinitionName": cdefName,
'DockerSecurityOptions': secOpts
}
}
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.IP-1",
"NIST SP 800-53 CM-2",
"NIST SP 800-53 CM-3",
"NIST SP 800-53 CM-4",
"NIST SP 800-53 CM-5",
"NIST SP 800-53 CM-6",
"NIST SP 800-53 CM-7",
"NIST SP 800-53 CM-9",
"NIST SP 800-53 SA-10",
"AICPA TSC A1.3",
"AICPA TSC CC1.4",
"AICPA TSC CC5.3",
"AICPA TSC CC6.2",
"AICPA TSC CC7.1",
"AICPA TSC CC7.3",
"AICPA TSC CC7.4",
"ISO 27001:2013 A.12.1.2",
"ISO 27001:2013 A.12.5.1",
"ISO 27001:2013 A.12.6.2",
"ISO 27001:2013 A.14.2.2",
"ISO 27001:2013 A.14.2.3",
"ISO 27001:2013 A.14.2.4",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED"
}
yield finding
except:
secOpts = str('["NO_OPTIONS"]')
finding = {
"SchemaVersion": "2018-10-08",
"Id": taskDefinitionArn + "/" + cdefName + "/ecs-task-definition-security-labels-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": taskDefinitionArn + "/" + cdefName,
"AwsAccountId": awsAccountId,
"Types": ["Software and Configuration Checks/AWS Security Best Practices"],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "HIGH"},
"Confidence": 99,
"Title": "[ECS.4] ECS Task Definitions for EC2 should have Docker Security Options (SELinux or AppArmor) configured",
"Description": "ECS Container Definition "
+ cdefName
+ " in Task Definition "
+ taskDefinitionArn
+ " does not have any Docker Security Options configured. Refer to the remediation instructions to remediate this behavior",
"Remediation": {
"Recommendation": {
"Text": "Containers running on EC2 Compute-types should have Docker Security Options configured. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide",
"Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions"
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsEcsTaskDefinition",
"Id": taskDefinitionArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {
"Other": {
"Family": tdefFamily,
"ContainerDefinitionName": cdefName,
'DockerSecurityOptions': secOpts
}
}
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.IP-1",
"NIST SP 800-53 CM-2",
"NIST SP 800-53 CM-3",
"NIST SP 800-53 CM-4",
"NIST SP 800-53 CM-5",
"NIST SP 800-53 CM-6",
"NIST SP 800-53 CM-7",
"NIST SP 800-53 CM-9",
"NIST SP 800-53 SA-10",
"AICPA TSC A1.3",
"AICPA TSC CC1.4",
"AICPA TSC CC5.3",
"AICPA TSC CC6.2",
"AICPA TSC CC7.1",
"AICPA TSC CC7.3",
"AICPA TSC CC7.4",
"ISO 27001:2013 A.12.1.2",
"ISO 27001:2013 A.12.5.1",
"ISO 27001:2013 A.12.6.2",
"ISO 27001:2013 A.14.2.2",
"ISO 27001:2013 A.14.2.3",
"ISO 27001:2013 A.14.2.4",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE"
}
yield finding
except Exception as e:
print(e)
|
[
"check_register.CheckRegister",
"boto3.client",
"datetime.datetime.utcnow"
] |
[((923, 938), 'check_register.CheckRegister', 'CheckRegister', ([], {}), '()\n', (936, 938), False, 'from check_register import CheckRegister\n'), ((969, 988), 'boto3.client', 'boto3.client', (['"""ecs"""'], {}), "('ecs')\n", (981, 988), False, 'import boto3\n'), ((9747, 9773), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (9771, 9773), False, 'import datetime\n'), ((16546, 16572), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (16570, 16572), False, 'import datetime\n'), ((2050, 2076), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (2074, 2076), False, 'import datetime\n'), ((26839, 26865), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (26863, 26865), False, 'import datetime\n')]
|
import unittest
from .mocks import BotoSessionMock
from push_notification import apns
class APNSTestCase(unittest.TestCase):
def_apns_category = 'MGDailyDealCategory'
# def setUp(self):
# def tearDown(self):
# push_notification
# push_background
# make_new_deal_message
# make_delta_message
def test_make_delta_comment_1(self):
deal_id = 'a6k5A000000kP9LQAU'
delta_type = 'commentCount'
delta_value = 5
message = {
'id': deal_id,
'delta_type': delta_type,
'delta_value': delta_value
}
expected = (
'{"aps": {"content-available": 1}, '
'"deal-id": "a6k5A000000kP9LQAU", '
'"delta-type": "commentCount", '
'"delta-value": 5}'
)
result = apns.make_delta_message(message)
self.assertEqual(result, expected)
def test_make_delta_status_1(self):
deal_id = 'a6k5A000000kP9LQAU'
delta_type = 'launchStatus'
delta_value = 'launch'
message = {
'id': deal_id,
'delta_type': delta_type,
'delta_value': delta_value
}
expected = (
'{"aps": {"content-available": 1}, '
'"deal-id": "a6k5A000000kP9LQAU", '
'"delta-type": "launchStatus", '
'"delta-value": "launch"}'
)
result = apns.make_delta_message(message)
self.assertEqual(result, expected)
# publish_message
def test_publish_delta_status_prod(self):
message = (
'{"aps": {"content-available": 1}, '
'"deal-id": "a6k5A000000kP9LQAU", '
'"delta-type": "launchStatus", '
'"delta-value": "launch"}'
)
# deal_id = 'a6k5A000000kP9LQAU'
# delta_type = 'launchStatus'
# delta_value = 'launch'
# message = (
# '{"aps": {"content-available": 1}, '
# f'"deal-id": "{deal_id}", '
# f'"delta-type": "{delta_type}", '
# f'"delta-value": "{delta_value}"'
# '}'
# )
session = BotoSessionMock()
default_message='default message'
apns_server = 'prod'
apns.publish_message(session,
topic_arn='fake_topic_arn',
apns_server=apns_server,
apns_message=message,
default_message=default_message)
expected = (
'{'
'"default": "default message", '
'"APNS": "{'
'\\"aps\\": {'
'\\"content-available\\": 1'
'}, '
'\\"deal-id\\": \\"a6k5A000000kP9LQAU\\", '
'\\"delta-type\\": \\"launchStatus\\", '
'\\"delta-value\\": \\"launch\\"'
'}"'
'}'
)
result = session.client.message
self.assertEqual(result, expected)
def test_publish_delta_status_dev(self):
message = (
'{"aps": {"content-available": 1}, '
'"deal-id": "a6k5A000000kP9LQAU", '
'"delta-type": "launchStatus", '
'"delta-value": "launch"}'
)
session = BotoSessionMock()
default_message='default message'
apns_server = 'dev'
apns.publish_message(session,
topic_arn='fake_topic_arn',
apns_server=apns_server,
apns_message=message,
default_message=default_message)
expected = (
'{'
'"default": "default message", '
'"APNS_SANDBOX": "{'
'\\"aps\\": {'
'\\"content-available\\": 1'
'}, '
'\\"deal-id\\": \\"a6k5A000000kP9LQAU\\", '
'\\"delta-type\\": \\"launchStatus\\", '
'\\"delta-value\\": \\"launch\\"'
'}"'
'}'
)
result = session.client.message
self.assertEqual(result, expected)
def test_publish_delta_status_both(self):
message = (
'{"aps": {"content-available": 1}, '
'"deal-id": "a6k5A000000kP9LQAU", '
'"delta-type": "launchStatus", '
'"delta-value": "launch"}'
)
session = BotoSessionMock()
default_message='default message'
apns_server = 'both'
apns.publish_message(session,
topic_arn='fake_topic_arn',
apns_server=apns_server,
apns_message=message,
default_message=default_message)
expected = (
'{'
'"default": "default message", '
'"APNS": "{'
'\\"aps\\": {'
'\\"content-available\\": 1'
'}, '
'\\"deal-id\\": \\"a6k5A000000kP9LQAU\\", '
'\\"delta-type\\": \\"launchStatus\\", '
'\\"delta-value\\": \\"launch\\"'
'}", '
'"APNS_SANDBOX": "{'
'\\"aps\\": {'
'\\"content-available\\": 1'
'}, '
'\\"deal-id\\": \\"a6k5A000000kP9LQAU\\", '
'\\"delta-type\\": \\"launchStatus\\", '
'\\"delta-value\\": \\"launch\\"'
'}"'
'}'
)
result = session.client.message
self.assertEqual(result, expected)
def test_publish_invalid_server(self):
session = BotoSessionMock()
topic_arn='fake_topic_arn'
apns_server = 'meh'
apns_message ='{"aps": {"content-available": 1}'
default_message='default message'
self.assertRaises(
ValueError, apns.publish_message, session, topic_arn, apns_server, apns_message, default_message)
# _make_background_notification
def test_make_background_notification_no_additional(self):
additional = None
expected = {
'aps': {
'content-available': 1
}
}
result = apns._make_background_notification(additional)
self.assertEqual(result, expected)
def test_make_background_notification_with_additional(self):
deal_id = 'a6k5A000000kP9LQAU'
delta_type = 'commentCount'
delta_value = 5
additional = {
'id': deal_id,
'delta_type': delta_type,
'delta_value': delta_value
}
expected = {
'aps': {
'content-available': 1
},
'id': deal_id,
'delta_type': delta_type,
'delta_value': delta_value
}
result = apns._make_background_notification(additional)
self.assertDictEqual(result, expected)
# _make_notification
# def test_make_notification_1(self):
# raise_for_status
|
[
"push_notification.apns.publish_message",
"push_notification.apns._make_background_notification",
"push_notification.apns.make_delta_message"
] |
[((836, 868), 'push_notification.apns.make_delta_message', 'apns.make_delta_message', (['message'], {}), '(message)\n', (859, 868), False, 'from push_notification import apns\n'), ((1442, 1474), 'push_notification.apns.make_delta_message', 'apns.make_delta_message', (['message'], {}), '(message)\n', (1465, 1474), False, 'from push_notification import apns\n'), ((2270, 2412), 'push_notification.apns.publish_message', 'apns.publish_message', (['session'], {'topic_arn': '"""fake_topic_arn"""', 'apns_server': 'apns_server', 'apns_message': 'message', 'default_message': 'default_message'}), "(session, topic_arn='fake_topic_arn', apns_server=\n apns_server, apns_message=message, default_message=default_message)\n", (2290, 2412), False, 'from push_notification import apns\n'), ((3373, 3515), 'push_notification.apns.publish_message', 'apns.publish_message', (['session'], {'topic_arn': '"""fake_topic_arn"""', 'apns_server': 'apns_server', 'apns_message': 'message', 'default_message': 'default_message'}), "(session, topic_arn='fake_topic_arn', apns_server=\n apns_server, apns_message=message, default_message=default_message)\n", (3393, 3515), False, 'from push_notification import apns\n'), ((4486, 4628), 'push_notification.apns.publish_message', 'apns.publish_message', (['session'], {'topic_arn': '"""fake_topic_arn"""', 'apns_server': 'apns_server', 'apns_message': 'message', 'default_message': 'default_message'}), "(session, topic_arn='fake_topic_arn', apns_server=\n apns_server, apns_message=message, default_message=default_message)\n", (4506, 4628), False, 'from push_notification import apns\n'), ((6141, 6187), 'push_notification.apns._make_background_notification', 'apns._make_background_notification', (['additional'], {}), '(additional)\n', (6175, 6187), False, 'from push_notification import apns\n'), ((6763, 6809), 'push_notification.apns._make_background_notification', 'apns._make_background_notification', (['additional'], {}), '(additional)\n', (6797, 6809), False, 'from push_notification import apns\n')]
|
import analyseGithub
def test_containsGithubURL_empty():
assert not analyseGithub.containsGitHubURL("")
def test_containsGithubURL_noUrl():
assert not analyseGithub.containsGitHubURL("Some test tweet")
def test_containsGithubURL_url():
repo = "https://github.com/git/git"
assert analyseGithub.containsGitHubURL(repo)
def test_extractGitHubLink():
repo = "https://github.com/git/git"
assert analyseGithub.extractGitHubLink(f"{repo} more tweet") == "git/git"
|
[
"analyseGithub.containsGitHubURL",
"analyseGithub.extractGitHubLink"
] |
[((298, 335), 'analyseGithub.containsGitHubURL', 'analyseGithub.containsGitHubURL', (['repo'], {}), '(repo)\n', (329, 335), False, 'import analyseGithub\n'), ((73, 108), 'analyseGithub.containsGitHubURL', 'analyseGithub.containsGitHubURL', (['""""""'], {}), "('')\n", (104, 108), False, 'import analyseGithub\n'), ((161, 211), 'analyseGithub.containsGitHubURL', 'analyseGithub.containsGitHubURL', (['"""Some test tweet"""'], {}), "('Some test tweet')\n", (192, 211), False, 'import analyseGithub\n'), ((418, 471), 'analyseGithub.extractGitHubLink', 'analyseGithub.extractGitHubLink', (['f"""{repo} more tweet"""'], {}), "(f'{repo} more tweet')\n", (449, 471), False, 'import analyseGithub\n')]
|
###############################################################################
# Author: <NAME>
# Project: ARC-II: Convolutional Matching Model
# Date Created: 7/18/2017
#
# File Description: This script contains ranking evaluation functions.
###############################################################################
import torch, numpy
def mean_average_precision(logits, target):
"""
Compute mean average precision.
:param logits: 2d tensor [batch_size x num_clicks_per_query]
:param target: 2d tensor [batch_size x num_clicks_per_query]
:return: mean average precision [a float value]
"""
assert logits.size() == target.size()
sorted, indices = torch.sort(logits, 1, descending=True)
map = 0
for i in range(indices.size(0)):
average_precision = 0
num_rel = 0
for j in range(indices.size(1)):
if target[i, indices[i, j].data[0]].data[0] == 1:
num_rel += 1
average_precision += num_rel / (j + 1)
average_precision = average_precision / num_rel
map += average_precision
return map / indices.size(0)
def NDCG(logits, target, k):
"""
Compute normalized discounted cumulative gain.
:param logits: 2d tensor [batch_size x rel_docs_per_query]
:param target: 2d tensor [batch_size x rel_docs_per_query]
:return: mean average precision [a float value]
"""
assert logits.size() == target.size()
assert logits.size(1) >= k, 'NDCG@K cannot be computed, invalid value of K.'
sorted, indices = torch.sort(logits, 1, descending=True)
NDCG = 0
for i in range(indices.size(0)):
DCG_ref = 0
num_rel_docs = torch.nonzero(target[i].data).size(0)
for j in range(indices.size(1)):
if j == k:
break
if target[i, indices[i, j].data[0]].data[0] == 1:
DCG_ref += 1 / numpy.log2(j + 2)
DCG_gt = 0
for j in range(num_rel_docs):
if j == k:
break
DCG_gt += 1 / numpy.log2(j + 2)
NDCG += DCG_ref / DCG_gt
return NDCG / indices.size(0)
def MRR(logits, target):
"""
Compute mean reciprocal rank.
:param logits: 2d tensor [batch_size x rel_docs_per_query]
:param target: 2d tensor [batch_size x rel_docs_per_query]
:return: mean reciprocal rank [a float value]
"""
assert logits.size() == target.size()
sorted, indices = torch.sort(logits, 1, descending=True)
total_reciprocal_rank = 0
for i in range(indices.size(0)):
for j in range(indices.size(1)):
if target[i, indices[i, j].data[0]].data[0] == 1:
total_reciprocal_rank += 1.0 / (j + 1)
break
return total_reciprocal_rank / logits.size(0)
|
[
"torch.sort",
"numpy.log2",
"torch.nonzero"
] |
[((708, 746), 'torch.sort', 'torch.sort', (['logits', '(1)'], {'descending': '(True)'}), '(logits, 1, descending=True)\n', (718, 746), False, 'import torch, numpy\n'), ((1603, 1641), 'torch.sort', 'torch.sort', (['logits', '(1)'], {'descending': '(True)'}), '(logits, 1, descending=True)\n', (1613, 1641), False, 'import torch, numpy\n'), ((2531, 2569), 'torch.sort', 'torch.sort', (['logits', '(1)'], {'descending': '(True)'}), '(logits, 1, descending=True)\n', (2541, 2569), False, 'import torch, numpy\n'), ((1739, 1768), 'torch.nonzero', 'torch.nonzero', (['target[i].data'], {}), '(target[i].data)\n', (1752, 1768), False, 'import torch, numpy\n'), ((2112, 2129), 'numpy.log2', 'numpy.log2', (['(j + 2)'], {}), '(j + 2)\n', (2122, 2129), False, 'import torch, numpy\n'), ((1961, 1978), 'numpy.log2', 'numpy.log2', (['(j + 2)'], {}), '(j + 2)\n', (1971, 1978), False, 'import torch, numpy\n')]
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import cv2
import math
import numpy as np
import paddle
import yaml
from det_keypoint_unite_utils import argsparser
from preprocess import decode_image
from infer import Detector, DetectorPicoDet, PredictConfig, print_arguments, get_test_images, bench_log
from keypoint_infer import KeyPointDetector, PredictConfig_KeyPoint
from visualize import visualize_pose
from benchmark_utils import PaddleInferBenchmark
from utils import get_current_memory_mb
from keypoint_postprocess import translate_to_ori_images
KEYPOINT_SUPPORT_MODELS = {
'HigherHRNet': 'keypoint_bottomup',
'HRNet': 'keypoint_topdown'
}
def predict_with_given_det(image, det_res, keypoint_detector,
keypoint_batch_size, run_benchmark):
rec_images, records, det_rects = keypoint_detector.get_person_from_rect(
image, det_res)
keypoint_vector = []
score_vector = []
rect_vector = det_rects
keypoint_results = keypoint_detector.predict_image(
rec_images, run_benchmark, repeats=10, visual=False)
keypoint_vector, score_vector = translate_to_ori_images(keypoint_results,
np.array(records))
keypoint_res = {}
keypoint_res['keypoint'] = [
keypoint_vector.tolist(), score_vector.tolist()
] if len(keypoint_vector) > 0 else [[], []]
keypoint_res['bbox'] = rect_vector
return keypoint_res
def topdown_unite_predict(detector,
topdown_keypoint_detector,
image_list,
keypoint_batch_size=1,
save_res=False):
det_timer = detector.get_timer()
store_res = []
for i, img_file in enumerate(image_list):
# Decode image in advance in det + pose prediction
det_timer.preprocess_time_s.start()
image, _ = decode_image(img_file, {})
det_timer.preprocess_time_s.end()
if FLAGS.run_benchmark:
results = detector.predict_image(
[image], run_benchmark=True, repeats=10)
cm, gm, gu = get_current_memory_mb()
detector.cpu_mem += cm
detector.gpu_mem += gm
detector.gpu_util += gu
else:
results = detector.predict_image([image], visual=False)
results = detector.filter_box(results, FLAGS.det_threshold)
if results['boxes_num'] > 0:
keypoint_res = predict_with_given_det(
image, results, topdown_keypoint_detector, keypoint_batch_size,
FLAGS.run_benchmark)
if save_res:
save_name = img_file if isinstance(img_file, str) else i
store_res.append([
save_name, keypoint_res['bbox'],
[keypoint_res['keypoint'][0], keypoint_res['keypoint'][1]]
])
else:
results["keypoint"] = [[], []]
keypoint_res = results
if FLAGS.run_benchmark:
cm, gm, gu = get_current_memory_mb()
topdown_keypoint_detector.cpu_mem += cm
topdown_keypoint_detector.gpu_mem += gm
topdown_keypoint_detector.gpu_util += gu
else:
if not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
visualize_pose(
img_file,
keypoint_res,
visual_thresh=FLAGS.keypoint_threshold,
save_dir=FLAGS.output_dir)
if save_res:
"""
1) store_res: a list of image_data
2) image_data: [imageid, rects, [keypoints, scores]]
3) rects: list of rect [xmin, ymin, xmax, ymax]
4) keypoints: 17(joint numbers)*[x, y, conf], total 51 data in list
5) scores: mean of all joint conf
"""
with open("det_keypoint_unite_image_results.json", 'w') as wf:
json.dump(store_res, wf, indent=4)
def topdown_unite_predict_video(detector,
topdown_keypoint_detector,
camera_id,
keypoint_batch_size=1,
save_res=False):
video_name = 'output.mp4'
if camera_id != -1:
capture = cv2.VideoCapture(camera_id)
else:
capture = cv2.VideoCapture(FLAGS.video_file)
video_name = os.path.split(FLAGS.video_file)[-1]
# Get Video info : resolution, fps, frame count
width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(capture.get(cv2.CAP_PROP_FPS))
frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
print("fps: %d, frame_count: %d" % (fps, frame_count))
if not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
out_path = os.path.join(FLAGS.output_dir, video_name)
fourcc = cv2.VideoWriter_fourcc(* 'mp4v')
writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height))
index = 0
store_res = []
while (1):
ret, frame = capture.read()
if not ret:
break
index += 1
print('detect frame: %d' % (index))
frame2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
results = detector.predict_image([frame2], visual=False)
results = detector.filter_box(results, FLAGS.det_threshold)
if results['boxes_num'] == 0:
writer.write(frame)
continue
keypoint_res = predict_with_given_det(
frame2, results, topdown_keypoint_detector, keypoint_batch_size,
FLAGS.run_benchmark)
im = visualize_pose(
frame,
keypoint_res,
visual_thresh=FLAGS.keypoint_threshold,
returnimg=True)
if save_res:
store_res.append([
index, keypoint_res['bbox'],
[keypoint_res['keypoint'][0], keypoint_res['keypoint'][1]]
])
writer.write(im)
if camera_id != -1:
cv2.imshow('Mask Detection', im)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
writer.release()
print('output_video saved to: {}'.format(out_path))
if save_res:
"""
1) store_res: a list of frame_data
2) frame_data: [frameid, rects, [keypoints, scores]]
3) rects: list of rect [xmin, ymin, xmax, ymax]
4) keypoints: 17(joint numbers)*[x, y, conf], total 51 data in list
5) scores: mean of all joint conf
"""
with open("det_keypoint_unite_video_results.json", 'w') as wf:
json.dump(store_res, wf, indent=4)
def main():
deploy_file = os.path.join(FLAGS.det_model_dir, 'infer_cfg.yml')
with open(deploy_file) as f:
yml_conf = yaml.safe_load(f)
arch = yml_conf['arch']
detector_func = 'Detector'
if arch == 'PicoDet':
detector_func = 'DetectorPicoDet'
detector = eval(detector_func)(FLAGS.det_model_dir,
device=FLAGS.device,
run_mode=FLAGS.run_mode,
trt_min_shape=FLAGS.trt_min_shape,
trt_max_shape=FLAGS.trt_max_shape,
trt_opt_shape=FLAGS.trt_opt_shape,
trt_calib_mode=FLAGS.trt_calib_mode,
cpu_threads=FLAGS.cpu_threads,
enable_mkldnn=FLAGS.enable_mkldnn,
threshold=FLAGS.det_threshold)
topdown_keypoint_detector = KeyPointDetector(
FLAGS.keypoint_model_dir,
device=FLAGS.device,
run_mode=FLAGS.run_mode,
batch_size=FLAGS.keypoint_batch_size,
trt_min_shape=FLAGS.trt_min_shape,
trt_max_shape=FLAGS.trt_max_shape,
trt_opt_shape=FLAGS.trt_opt_shape,
trt_calib_mode=FLAGS.trt_calib_mode,
cpu_threads=FLAGS.cpu_threads,
enable_mkldnn=FLAGS.enable_mkldnn,
use_dark=FLAGS.use_dark)
keypoint_arch = topdown_keypoint_detector.pred_config.arch
assert KEYPOINT_SUPPORT_MODELS[
keypoint_arch] == 'keypoint_topdown', 'Detection-Keypoint unite inference only supports topdown models.'
# predict from video file or camera video stream
if FLAGS.video_file is not None or FLAGS.camera_id != -1:
topdown_unite_predict_video(detector, topdown_keypoint_detector,
FLAGS.camera_id, FLAGS.keypoint_batch_size,
FLAGS.save_res)
else:
# predict from image
img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file)
topdown_unite_predict(detector, topdown_keypoint_detector, img_list,
FLAGS.keypoint_batch_size, FLAGS.save_res)
if not FLAGS.run_benchmark:
detector.det_times.info(average=True)
topdown_keypoint_detector.det_times.info(average=True)
else:
mode = FLAGS.run_mode
det_model_dir = FLAGS.det_model_dir
det_model_info = {
'model_name': det_model_dir.strip('/').split('/')[-1],
'precision': mode.split('_')[-1]
}
bench_log(detector, img_list, det_model_info, name='Det')
keypoint_model_dir = FLAGS.keypoint_model_dir
keypoint_model_info = {
'model_name': keypoint_model_dir.strip('/').split('/')[-1],
'precision': mode.split('_')[-1]
}
bench_log(topdown_keypoint_detector, img_list, keypoint_model_info,
FLAGS.keypoint_batch_size, 'KeyPoint')
if __name__ == '__main__':
paddle.enable_static()
parser = argsparser()
FLAGS = parser.parse_args()
print_arguments(FLAGS)
FLAGS.device = FLAGS.device.upper()
assert FLAGS.device in ['CPU', 'GPU', 'XPU'
], "device should be CPU, GPU or XPU"
main()
|
[
"visualize.visualize_pose",
"cv2.imshow",
"numpy.array",
"infer.bench_log",
"os.path.exists",
"infer.get_test_images",
"cv2.VideoWriter",
"paddle.enable_static",
"os.path.split",
"cv2.VideoWriter_fourcc",
"cv2.waitKey",
"utils.get_current_memory_mb",
"cv2.cvtColor",
"det_keypoint_unite_utils.argsparser",
"infer.print_arguments",
"os.makedirs",
"keypoint_infer.KeyPointDetector",
"os.path.join",
"preprocess.decode_image",
"yaml.safe_load",
"cv2.VideoCapture",
"json.dump"
] |
[((5452, 5494), 'os.path.join', 'os.path.join', (['FLAGS.output_dir', 'video_name'], {}), '(FLAGS.output_dir, video_name)\n', (5464, 5494), False, 'import os\n'), ((5508, 5539), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (5530, 5539), False, 'import cv2\n'), ((5554, 5609), 'cv2.VideoWriter', 'cv2.VideoWriter', (['out_path', 'fourcc', 'fps', '(width, height)'], {}), '(out_path, fourcc, fps, (width, height))\n', (5569, 5609), False, 'import cv2\n'), ((7294, 7344), 'os.path.join', 'os.path.join', (['FLAGS.det_model_dir', '"""infer_cfg.yml"""'], {}), "(FLAGS.det_model_dir, 'infer_cfg.yml')\n", (7306, 7344), False, 'import os\n'), ((8232, 8616), 'keypoint_infer.KeyPointDetector', 'KeyPointDetector', (['FLAGS.keypoint_model_dir'], {'device': 'FLAGS.device', 'run_mode': 'FLAGS.run_mode', 'batch_size': 'FLAGS.keypoint_batch_size', 'trt_min_shape': 'FLAGS.trt_min_shape', 'trt_max_shape': 'FLAGS.trt_max_shape', 'trt_opt_shape': 'FLAGS.trt_opt_shape', 'trt_calib_mode': 'FLAGS.trt_calib_mode', 'cpu_threads': 'FLAGS.cpu_threads', 'enable_mkldnn': 'FLAGS.enable_mkldnn', 'use_dark': 'FLAGS.use_dark'}), '(FLAGS.keypoint_model_dir, device=FLAGS.device, run_mode=\n FLAGS.run_mode, batch_size=FLAGS.keypoint_batch_size, trt_min_shape=\n FLAGS.trt_min_shape, trt_max_shape=FLAGS.trt_max_shape, trt_opt_shape=\n FLAGS.trt_opt_shape, trt_calib_mode=FLAGS.trt_calib_mode, cpu_threads=\n FLAGS.cpu_threads, enable_mkldnn=FLAGS.enable_mkldnn, use_dark=FLAGS.\n use_dark)\n', (8248, 8616), False, 'from keypoint_infer import KeyPointDetector, PredictConfig_KeyPoint\n'), ((10364, 10386), 'paddle.enable_static', 'paddle.enable_static', ([], {}), '()\n', (10384, 10386), False, 'import paddle\n'), ((10400, 10412), 'det_keypoint_unite_utils.argsparser', 'argsparser', ([], {}), '()\n', (10410, 10412), False, 'from det_keypoint_unite_utils import argsparser\n'), ((10449, 10471), 'infer.print_arguments', 'print_arguments', (['FLAGS'], {}), '(FLAGS)\n', (10464, 10471), False, 'from infer import Detector, DetectorPicoDet, PredictConfig, print_arguments, get_test_images, bench_log\n'), ((1804, 1821), 'numpy.array', 'np.array', (['records'], {}), '(records)\n', (1812, 1821), True, 'import numpy as np\n'), ((2490, 2516), 'preprocess.decode_image', 'decode_image', (['img_file', '{}'], {}), '(img_file, {})\n', (2502, 2516), False, 'from preprocess import decode_image\n'), ((4876, 4903), 'cv2.VideoCapture', 'cv2.VideoCapture', (['camera_id'], {}), '(camera_id)\n', (4892, 4903), False, 'import cv2\n'), ((4932, 4966), 'cv2.VideoCapture', 'cv2.VideoCapture', (['FLAGS.video_file'], {}), '(FLAGS.video_file)\n', (4948, 4966), False, 'import cv2\n'), ((5365, 5397), 'os.path.exists', 'os.path.exists', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (5379, 5397), False, 'import os\n'), ((5407, 5436), 'os.makedirs', 'os.makedirs', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (5418, 5436), False, 'import os\n'), ((5813, 5851), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (5825, 5851), False, 'import cv2\n'), ((6249, 6344), 'visualize.visualize_pose', 'visualize_pose', (['frame', 'keypoint_res'], {'visual_thresh': 'FLAGS.keypoint_threshold', 'returnimg': '(True)'}), '(frame, keypoint_res, visual_thresh=FLAGS.keypoint_threshold,\n returnimg=True)\n', (6263, 6344), False, 'from visualize import visualize_pose\n'), ((7397, 7414), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (7411, 7414), False, 'import yaml\n'), ((9272, 9322), 'infer.get_test_images', 'get_test_images', (['FLAGS.image_dir', 'FLAGS.image_file'], {}), '(FLAGS.image_dir, FLAGS.image_file)\n', (9287, 9322), False, 'from infer import Detector, DetectorPicoDet, PredictConfig, print_arguments, get_test_images, bench_log\n'), ((2721, 2744), 'utils.get_current_memory_mb', 'get_current_memory_mb', ([], {}), '()\n', (2742, 2744), False, 'from utils import get_current_memory_mb\n'), ((3640, 3663), 'utils.get_current_memory_mb', 'get_current_memory_mb', ([], {}), '()\n', (3661, 3663), False, 'from utils import get_current_memory_mb\n'), ((3946, 4056), 'visualize.visualize_pose', 'visualize_pose', (['img_file', 'keypoint_res'], {'visual_thresh': 'FLAGS.keypoint_threshold', 'save_dir': 'FLAGS.output_dir'}), '(img_file, keypoint_res, visual_thresh=FLAGS.\n keypoint_threshold, save_dir=FLAGS.output_dir)\n', (3960, 4056), False, 'from visualize import visualize_pose\n'), ((4519, 4553), 'json.dump', 'json.dump', (['store_res', 'wf'], {'indent': '(4)'}), '(store_res, wf, indent=4)\n', (4528, 4553), False, 'import json\n'), ((4988, 5019), 'os.path.split', 'os.path.split', (['FLAGS.video_file'], {}), '(FLAGS.video_file)\n', (5001, 5019), False, 'import os\n'), ((6643, 6675), 'cv2.imshow', 'cv2.imshow', (['"""Mask Detection"""', 'im'], {}), "('Mask Detection', im)\n", (6653, 6675), False, 'import cv2\n'), ((7227, 7261), 'json.dump', 'json.dump', (['store_res', 'wf'], {'indent': '(4)'}), '(store_res, wf, indent=4)\n', (7236, 7261), False, 'import json\n'), ((9899, 9956), 'infer.bench_log', 'bench_log', (['detector', 'img_list', 'det_model_info'], {'name': '"""Det"""'}), "(detector, img_list, det_model_info, name='Det')\n", (9908, 9956), False, 'from infer import Detector, DetectorPicoDet, PredictConfig, print_arguments, get_test_images, bench_log\n'), ((10202, 10313), 'infer.bench_log', 'bench_log', (['topdown_keypoint_detector', 'img_list', 'keypoint_model_info', 'FLAGS.keypoint_batch_size', '"""KeyPoint"""'], {}), "(topdown_keypoint_detector, img_list, keypoint_model_info, FLAGS.\n keypoint_batch_size, 'KeyPoint')\n", (10211, 10313), False, 'from infer import Detector, DetectorPicoDet, PredictConfig, print_arguments, get_test_images, bench_log\n'), ((3854, 3886), 'os.path.exists', 'os.path.exists', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (3868, 3886), False, 'import os\n'), ((3904, 3933), 'os.makedirs', 'os.makedirs', (['FLAGS.output_dir'], {}), '(FLAGS.output_dir)\n', (3915, 3933), False, 'import os\n'), ((6691, 6705), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (6702, 6705), False, 'import cv2\n')]
|
import random
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from mla.base import BaseEstimator
from mla.metrics.distance import euclidean_distance
random.seed(1111)
class KMeans(BaseEstimator):
"""Partition a dataset into K clusters.
Finds clusters by repeatedly assigning each data point to the cluster with
the nearest centroid and iterating until the assignments converge (meaning
they don't change during an iteration) or the maximum number of iterations
is reached.
Parameters
----------
K : int
The number of clusters into which the dataset is partitioned.
max_iters: int
The maximum iterations of assigning points to the nearest cluster.
Short-circuited by the assignments converging on their own.
init: str, default 'random'
The name of the method used to initialize the first clustering.
'random' - Randomly select values from the dataset as the K centroids.
'++' - Select a random first centroid from the dataset, then select
K - 1 more centroids by choosing values from the dataset with a
probability distribution proportional to the squared distance
from each point's closest existing cluster. Attempts to create
larger distances between initial clusters to improve convergence
rates and avoid degenerate cases.
"""
y_required = False
def __init__(self, K=5, max_iters=100, init='random'):
self.K = K
self.max_iters = max_iters
self.clusters = [[] for _ in range(self.K)]
self.centroids = []
self.init = init
def _initialize_cetroids(self, init):
"""Set the initial centroids."""
if init == 'random':
self.centroids = [self.X[x] for x in
random.sample(range(self.n_samples), self.K)]
elif init == '++':
self.centroids = [random.choice(self.X)]
while len(self.centroids) < self.K:
self.centroids.append(self._choose_next_center())
else:
raise ValueError('Unknown type of init parameter')
def _predict(self, X=None):
"""Perform the clustering on the dataset."""
self._initialize_cetroids(self.init)
centroids = self.centroids
for _ in range(self.max_iters):
self._assign(centroids)
centroids_old = centroids
centroids = [self._get_centroid(cluster) for cluster in self.clusters]
if self._is_converged(centroids_old, centroids):
break
self.centroids = centroids
return self._get_predictions()
def _get_predictions(self):
predictions = np.empty(self.n_samples)
for i, cluster in enumerate(self.clusters):
for index in cluster:
predictions[index] = i
return predictions
def _assign(self, centroids):
for row in range(self.n_samples):
for i, cluster in enumerate(self.clusters):
if row in cluster:
self.clusters[i].remove(row)
break
closest = self._closest(row, centroids)
self.clusters[closest].append(row)
def _closest(self, fpoint, centroids):
closest_index = None
closest_distance = None
for i, point in enumerate(centroids):
dist = euclidean_distance(self.X[fpoint], point)
if closest_index is None or dist < closest_distance:
closest_index = i
closest_distance = dist
return closest_index
def _get_centroid(self, cluster):
"""Get values by indices and take the mean."""
return [np.mean(np.take(self.X[:, i], cluster)) for i in range(self.n_features)]
def _dist_from_centers(self):
return np.array([min([euclidean_distance(x, c) for c in self.centroids]) for x in self.X])
def _choose_next_center(self):
distances = self._dist_from_centers()
probs = distances / distances.sum()
cumprobs = probs.cumsum()
r = random.random()
ind = np.where(cumprobs >= r)[0][0]
return self.X[ind]
def _is_converged(self, centroids_old, centroids):
return True if sum([euclidean_distance(centroids_old[i], centroids[i]) for i in range(self.K)]) == 0 else False
def plot(self, data=None):
sns.set(style="white")
if data is None:
data = self.X
for i, index in enumerate(self.clusters):
point = np.array(data[index]).T
plt.scatter(*point, c=sns.color_palette("hls", self.K + 1)[i])
for point in self.centroids:
plt.scatter(*point, marker='x', linewidths=10)
plt.show()
|
[
"seaborn.set",
"random.choice",
"seaborn.color_palette",
"mla.metrics.distance.euclidean_distance",
"numpy.where",
"random.seed",
"numpy.take",
"numpy.array",
"numpy.empty",
"matplotlib.pyplot.scatter",
"random.random",
"matplotlib.pyplot.show"
] |
[((177, 194), 'random.seed', 'random.seed', (['(1111)'], {}), '(1111)\n', (188, 194), False, 'import random\n'), ((2761, 2785), 'numpy.empty', 'np.empty', (['self.n_samples'], {}), '(self.n_samples)\n', (2769, 2785), True, 'import numpy as np\n'), ((4152, 4167), 'random.random', 'random.random', ([], {}), '()\n', (4165, 4167), False, 'import random\n'), ((4455, 4477), 'seaborn.set', 'sns.set', ([], {'style': '"""white"""'}), "(style='white')\n", (4462, 4477), True, 'import seaborn as sns\n'), ((4806, 4816), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4814, 4816), True, 'import matplotlib.pyplot as plt\n'), ((3453, 3494), 'mla.metrics.distance.euclidean_distance', 'euclidean_distance', (['self.X[fpoint]', 'point'], {}), '(self.X[fpoint], point)\n', (3471, 3494), False, 'from mla.metrics.distance import euclidean_distance\n'), ((4750, 4796), 'matplotlib.pyplot.scatter', 'plt.scatter', (['*point'], {'marker': '"""x"""', 'linewidths': '(10)'}), "(*point, marker='x', linewidths=10)\n", (4761, 4796), True, 'import matplotlib.pyplot as plt\n'), ((3781, 3811), 'numpy.take', 'np.take', (['self.X[:, i]', 'cluster'], {}), '(self.X[:, i], cluster)\n', (3788, 3811), True, 'import numpy as np\n'), ((4182, 4205), 'numpy.where', 'np.where', (['(cumprobs >= r)'], {}), '(cumprobs >= r)\n', (4190, 4205), True, 'import numpy as np\n'), ((4601, 4622), 'numpy.array', 'np.array', (['data[index]'], {}), '(data[index])\n', (4609, 4622), True, 'import numpy as np\n'), ((1969, 1990), 'random.choice', 'random.choice', (['self.X'], {}), '(self.X)\n', (1982, 1990), False, 'import random\n'), ((3911, 3935), 'mla.metrics.distance.euclidean_distance', 'euclidean_distance', (['x', 'c'], {}), '(x, c)\n', (3929, 3935), False, 'from mla.metrics.distance import euclidean_distance\n'), ((4323, 4373), 'mla.metrics.distance.euclidean_distance', 'euclidean_distance', (['centroids_old[i]', 'centroids[i]'], {}), '(centroids_old[i], centroids[i])\n', (4341, 4373), False, 'from mla.metrics.distance import euclidean_distance\n'), ((4659, 4695), 'seaborn.color_palette', 'sns.color_palette', (['"""hls"""', '(self.K + 1)'], {}), "('hls', self.K + 1)\n", (4676, 4695), True, 'import seaborn as sns\n')]
|
import os
import sys
import argparse
import time
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
# from sru import *
import dataloader
import modules
class Model(nn.Module):
def __init__(self, embedding, hidden_size=150, depth=1, dropout=0.3, cnn=False, nclasses=2, args=None):
super(Model, self).__init__()
self.cnn = cnn
self.drop = nn.Dropout(dropout)
self.args = args
self.emb_layer = modules.EmbeddingLayer(
embs = dataloader.load_embedding(embedding), dist_embeds = self.args.dist_embeds
)
self.word2id = self.emb_layer.word2id
if cnn:
self.encoder = modules.CNN_Text(
self.emb_layer.n_d,
widths = [3,4,5],
filters=hidden_size
)
d_out = 3*hidden_size
else:
self.encoder = nn.LSTM(
self.emb_layer.n_d,
hidden_size//2,
depth,
dropout = dropout,
# batch_first=True,
bidirectional=True
)
d_out = hidden_size
# else:
# self.encoder = SRU(
# emb_layer.n_d,
# args.d,
# args.depth,
# dropout = args.dropout,
# )
# d_out = args.d
self.out = nn.Linear(d_out, nclasses)
def forward(self, input):
if self.cnn:
input = input.t()
if self.args.dist_embeds:
emb, kl_loss = self.emb_layer(input)
else:
emb = self.emb_layer(input)
emb = self.drop(emb)
if self.cnn:
output = self.encoder(emb)
else:
output, hidden = self.encoder(emb)
# output = output[-1]
output = torch.max(output, dim=0)[0].squeeze()
output = self.drop(output)
if self.args.dist_embeds:
return self.out(output), kl_loss
else:
return self.out(output)
def text_pred(self, text, batch_size=32):
batches_x = dataloader.create_batches_x(
text,
batch_size, ##TODO
self.word2id
)
outs = []
with torch.no_grad():
for x in batches_x:
x = Variable(x)
if self.cnn:
x = x.t()
emb = self.emb_layer(x)
if self.cnn:
output = self.encoder(emb)
else:
output, hidden = self.encoder(emb)
# output = output[-1]
output = torch.max(output, dim=0)[0]
outs.append(F.softmax(self.out(output), dim=-1))
return torch.cat(outs, dim=0)
def eval_model(niter, model, input_x, input_y):
model.eval()
# N = len(valid_x)
# criterion = nn.CrossEntropyLoss()
correct = 0.0
cnt = 0.
# total_loss = 0.0
with torch.no_grad():
for x, y in zip(input_x, input_y):
x, y = Variable(x, volatile=True), Variable(y)
if model.args.dist_embeds:
output, kl_loss = model(x)
else:
output = model(x)
# loss = criterion(output, y)
# total_loss += loss.item()*x.size(1)
pred = output.data.max(1)[1]
correct += pred.eq(y.data).cpu().sum()
cnt += y.numel()
model.train()
return correct.item()/cnt
def train_model(epoch, model, optimizer,
train_x, train_y,
test_x, test_y,
best_test, save_path):
model.train()
niter = epoch*len(train_x)
criterion = nn.CrossEntropyLoss()
cnt = 0
for x, y in zip(train_x, train_y):
niter += 1
cnt += 1
model.zero_grad()
x, y = Variable(x), Variable(y)
if model.args.dist_embeds:
output, kl_loss = model(x)
ce_loss = criterion(output, y)
loss = ce_loss + model.args.kl_weight*kl_loss
else:
output = model(x)
loss = criterion(output, y)
loss.backward()
optimizer.step()
test_acc = eval_model(niter, model, test_x, test_y)
if model.args.dist_embeds:
sys.stdout.write("Epoch={} iter={} lr={:.6f} train_loss_class={:.6f} train_loss_kl={:.6f} train_loss_ovr = {:.6f} test_err={:.6f}\n".format(
epoch, niter,
optimizer.param_groups[0]['lr'],
ce_loss.item(), kl_loss.item(), loss.item(),
test_acc
))
else:
sys.stdout.write("Epoch={} iter={} lr={:.6f} train_loss = {:.6f} test_err={:.6f}\n".format(
epoch, niter,
optimizer.param_groups[0]['lr'],
loss.item(),
test_acc
))
if test_acc > best_test:
best_test = test_acc
if save_path:
torch.save(model.state_dict(), save_path)
# test_err = eval_model(niter, model, test_x, test_y)
sys.stdout.write("\n")
return best_test
def save_data(data, labels, path, type='train'):
with open(os.path.join(path, type+'.txt'), 'w') as ofile:
for text, label in zip(data, labels):
ofile.write('{} {}\n'.format(label, ' '.join(text)))
def main(args):
if args.dataset == 'mr':
# data, label = dataloader.read_MR(args.path)
# train_x, train_y, test_x, test_y = dataloader.cv_split2(
# data, label,
# nfold=10,
# valid_id=args.cv
# )
#
# if args.save_data_split:
# save_data(train_x, train_y, args.path, 'train')
# save_data(test_x, test_y, args.path, 'test')
train_x, train_y = dataloader.read_corpus('data/mr/train.txt')
test_x, test_y = dataloader.read_corpus('data/mr/test.txt')
elif args.dataset == 'imdb':
train_x, train_y = dataloader.read_corpus(os.path.join('/data/medg/misc/jindi/nlp/datasets/imdb',
'train_tok.csv'),
clean=False, MR=True, shuffle=True)
test_x, test_y = dataloader.read_corpus(os.path.join('/data/medg/misc/jindi/nlp/datasets/imdb',
'test_tok.csv'),
clean=False, MR=True, shuffle=True)
else:
train_x, train_y = dataloader.read_corpus('/afs/csail.mit.edu/u/z/zhijing/proj/to_di/data/{}/'
'train_tok.csv'.format(args.dataset),
clean=False, MR=False, shuffle=True)
test_x, test_y = dataloader.read_corpus('/afs/csail.mit.edu/u/z/zhijing/proj/to_di/data/{}/'
'test_tok.csv'.format(args.dataset),
clean=False, MR=False, shuffle=True)
nclasses = max(train_y) + 1
# elif args.dataset == 'subj':
# data, label = dataloader.read_SUBJ(args.path)
# elif args.dataset == 'cr':
# data, label = dataloader.read_CR(args.path)
# elif args.dataset == 'mpqa':
# data, label = dataloader.read_MPQA(args.path)
# elif args.dataset == 'trec':
# train_x, train_y, test_x, test_y = dataloader.read_TREC(args.path)
# data = train_x + test_x
# label = None
# elif args.dataset == 'sst':
# train_x, train_y, valid_x, valid_y, test_x, test_y = dataloader.read_SST(args.path)
# data = train_x + valid_x + test_x
# label = None
# else:
# raise Exception("unknown dataset: {}".format(args.dataset))
# if args.dataset == 'trec':
# elif args.dataset != 'sst':
# train_x, train_y, valid_x, valid_y, test_x, test_y = dataloader.cv_split(
# data, label,
# nfold = 10,
# test_id = args.cv
# )
model = Model(args.embedding, args.d, args.depth, args.dropout, args.cnn, nclasses, args=args).cuda()
need_grad = lambda x: x.requires_grad
optimizer = optim.Adam(
filter(need_grad, model.parameters()),
lr = args.lr
)
train_x, train_y = dataloader.create_batches(
train_x, train_y,
args.batch_size,
model.word2id,
)
# valid_x, valid_y = dataloader.create_batches(
# valid_x, valid_y,
# args.batch_size,
# emb_layer.word2id,
# )
test_x, test_y = dataloader.create_batches(
test_x, test_y,
args.batch_size,
model.word2id,
)
best_test = 0
# test_err = 1e+8
for epoch in range(args.max_epoch):
best_test = train_model(epoch, model, optimizer,
train_x, train_y,
# valid_x, valid_y,
test_x, test_y,
best_test, args.save_path
)
if args.lr_decay>0:
optimizer.param_groups[0]['lr'] *= args.lr_decay
# sys.stdout.write("best_valid: {:.6f}\n".format(
# best_valid
# ))
sys.stdout.write("test_err: {:.6f}\n".format(
best_test
))
if __name__ == "__main__":
argparser = argparse.ArgumentParser(sys.argv[0], conflict_handler='resolve')
argparser.add_argument("--cnn", action='store_true', help="whether to use cnn")
argparser.add_argument("--lstm", action='store_true', help="whether to use lstm")
argparser.add_argument("--dataset", type=str, default="mr", help="which dataset")
argparser.add_argument("--embedding", type=str, required=True, help="word vectors")
argparser.add_argument("--batch_size", "--batch", type=int, default=32)
argparser.add_argument("--max_epoch", type=int, default=70)
argparser.add_argument("--d", type=int, default=150)
argparser.add_argument("--dropout", type=float, default=0.3)
argparser.add_argument("--depth", type=int, default=1)
argparser.add_argument("--lr", type=float, default=0.001)
argparser.add_argument("--lr_decay", type=float, default=0)
argparser.add_argument("--cv", type=int, default=0)
argparser.add_argument("--save_path", type=str, default='')
argparser.add_argument("--save_data_split", action='store_true', help="whether to save train/test split")
argparser.add_argument("--gpu_id", type=int, default=0)
argparser.add_argument("--kl_weight", type=float, default = 0.001)
argparser.add_argument("--dist_embeds", action='store_true')
args = argparser.parse_args()
# args.save_path = os.path.join(args.save_path, args.dataset)
print (args)
torch.cuda.set_device(args.gpu_id)
main(args)
|
[
"dataloader.create_batches_x",
"torch.nn.Dropout",
"dataloader.load_embedding",
"torch.nn.CrossEntropyLoss",
"argparse.ArgumentParser",
"torch.nn.LSTM",
"torch.max",
"modules.CNN_Text",
"os.path.join",
"dataloader.create_batches",
"dataloader.read_corpus",
"torch.nn.Linear",
"torch.no_grad",
"torch.autograd.Variable",
"torch.cuda.set_device",
"torch.cat",
"sys.stdout.write"
] |
[((3781, 3802), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3800, 3802), True, 'import torch.nn as nn\n'), ((5099, 5121), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (5115, 5121), False, 'import sys\n'), ((8341, 8416), 'dataloader.create_batches', 'dataloader.create_batches', (['train_x', 'train_y', 'args.batch_size', 'model.word2id'], {}), '(train_x, train_y, args.batch_size, model.word2id)\n', (8366, 8416), False, 'import dataloader\n'), ((8613, 8686), 'dataloader.create_batches', 'dataloader.create_batches', (['test_x', 'test_y', 'args.batch_size', 'model.word2id'], {}), '(test_x, test_y, args.batch_size, model.word2id)\n', (8638, 8686), False, 'import dataloader\n'), ((9287, 9351), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['sys.argv[0]'], {'conflict_handler': '"""resolve"""'}), "(sys.argv[0], conflict_handler='resolve')\n", (9310, 9351), False, 'import argparse\n'), ((10691, 10725), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.gpu_id'], {}), '(args.gpu_id)\n', (10712, 10725), False, 'import torch\n'), ((482, 501), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (492, 501), True, 'import torch.nn as nn\n'), ((1479, 1505), 'torch.nn.Linear', 'nn.Linear', (['d_out', 'nclasses'], {}), '(d_out, nclasses)\n', (1488, 1505), True, 'import torch.nn as nn\n'), ((2203, 2262), 'dataloader.create_batches_x', 'dataloader.create_batches_x', (['text', 'batch_size', 'self.word2id'], {}), '(text, batch_size, self.word2id)\n', (2230, 2262), False, 'import dataloader\n'), ((2862, 2884), 'torch.cat', 'torch.cat', (['outs'], {'dim': '(0)'}), '(outs, dim=0)\n', (2871, 2884), False, 'import torch\n'), ((3078, 3093), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3091, 3093), False, 'import torch\n'), ((5816, 5859), 'dataloader.read_corpus', 'dataloader.read_corpus', (['"""data/mr/train.txt"""'], {}), "('data/mr/train.txt')\n", (5838, 5859), False, 'import dataloader\n'), ((5885, 5927), 'dataloader.read_corpus', 'dataloader.read_corpus', (['"""data/mr/test.txt"""'], {}), "('data/mr/test.txt')\n", (5907, 5927), False, 'import dataloader\n'), ((769, 844), 'modules.CNN_Text', 'modules.CNN_Text', (['self.emb_layer.n_d'], {'widths': '[3, 4, 5]', 'filters': 'hidden_size'}), '(self.emb_layer.n_d, widths=[3, 4, 5], filters=hidden_size)\n', (785, 844), False, 'import modules\n'), ((982, 1075), 'torch.nn.LSTM', 'nn.LSTM', (['self.emb_layer.n_d', '(hidden_size // 2)', 'depth'], {'dropout': 'dropout', 'bidirectional': '(True)'}), '(self.emb_layer.n_d, hidden_size // 2, depth, dropout=dropout,\n bidirectional=True)\n', (989, 1075), True, 'import torch.nn as nn\n'), ((2347, 2362), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2360, 2362), False, 'import torch\n'), ((3932, 3943), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (3940, 3943), False, 'from torch.autograd import Variable\n'), ((3945, 3956), 'torch.autograd.Variable', 'Variable', (['y'], {}), '(y)\n', (3953, 3956), False, 'from torch.autograd import Variable\n'), ((5207, 5240), 'os.path.join', 'os.path.join', (['path', "(type + '.txt')"], {}), "(path, type + '.txt')\n", (5219, 5240), False, 'import os\n'), ((595, 631), 'dataloader.load_embedding', 'dataloader.load_embedding', (['embedding'], {}), '(embedding)\n', (620, 631), False, 'import dataloader\n'), ((2416, 2427), 'torch.autograd.Variable', 'Variable', (['x'], {}), '(x)\n', (2424, 2427), False, 'from torch.autograd import Variable\n'), ((3157, 3183), 'torch.autograd.Variable', 'Variable', (['x'], {'volatile': '(True)'}), '(x, volatile=True)\n', (3165, 3183), False, 'from torch.autograd import Variable\n'), ((3185, 3196), 'torch.autograd.Variable', 'Variable', (['y'], {}), '(y)\n', (3193, 3196), False, 'from torch.autograd import Variable\n'), ((6011, 6083), 'os.path.join', 'os.path.join', (['"""/data/medg/misc/jindi/nlp/datasets/imdb"""', '"""train_tok.csv"""'], {}), "('/data/medg/misc/jindi/nlp/datasets/imdb', 'train_tok.csv')\n", (6023, 6083), False, 'import os\n'), ((6282, 6353), 'os.path.join', 'os.path.join', (['"""/data/medg/misc/jindi/nlp/datasets/imdb"""', '"""test_tok.csv"""'], {}), "('/data/medg/misc/jindi/nlp/datasets/imdb', 'test_tok.csv')\n", (6294, 6353), False, 'import os\n'), ((1932, 1956), 'torch.max', 'torch.max', (['output'], {'dim': '(0)'}), '(output, dim=0)\n', (1941, 1956), False, 'import torch\n'), ((2752, 2776), 'torch.max', 'torch.max', (['output'], {'dim': '(0)'}), '(output, dim=0)\n', (2761, 2776), False, 'import torch\n')]
|
import datetime
import io
import os
import tweepy
from dotenv import load_dotenv
from PIL import Image, ImageDraw, ImageFont
class Twitter:
"""
A class used to manage the connection with the Twitter API
...
Methods
-------
post_tweet(solver_answers, nyt_answers, pangrams)
Creates the tweet text and posts a picture with todays answers
"""
def __init__(self):
load_dotenv()
api_key = os.environ.get('TWITTER_API')
api_key_secret = os.environ.get('TWITTER_API_SECRET')
access_token = os.environ.get('TWITTER_ACCESS')
access_token_secret = os.environ.get('TWITTER_ACCESS_SECRET')
auth = tweepy.OAuthHandler(api_key, api_key_secret)
auth.set_access_token(access_token, access_token_secret)
self.api = tweepy.API(auth)
def post_tweet(self, solver_answers, nyt_answers, pangrams):
"""Composes the tweet text and posts a picture with todays answers
marked as NSFW to avoid spoilers
Parameters
----------
solver_answers: list, required
The answers returned by the solver
nyt_answers: list, required
The answers of todays New York Times Spelling Bee
pangrams: list, required
The pangrams in the answers of todays New York Times Spelling Bee
"""
pangrams.sort()
nyt_answers.sort()
text = ("Pangram(s):\n"
+ self.__make_rows(pangrams)
+ '\n\nAnswers:\n'
+ self.__make_rows(nyt_answers))
pic = self.__create_pic(text)
media = self.api.media_upload(
filename=str(datetime.date.today()),
file=pic,
possibly_sensitive=True)
if len(solver_answers) == len(nyt_answers):
tweet = "Cheating Bee got all {} answers on todays #SpellingBee!🐝🎓"
tweet = tweet + "\n\nNeed help with todays puzzle? Click the image below!"
tweet = tweet.format(len(nyt_answers))
else:
tweet = "Cheating Bee got {}/{} answers on todays #SpellingBee!🐝"
tweet = tweet + "\n\nNeed help with todays puzzle? Click the image below!"
tweet = tweet.format(len(solver_answers), len(nyt_answers))
self.api.update_status(status=tweet, media_ids=[media.media_id])
def __make_rows(self, word_list):
"""Formats a list of words into a string with rows five words long
Parameters
----------
word_list: list, required
A list of words
Returns
-------
str
The word list composed to a string with rows of five words
"""
text = ''
for i in range(0, len(word_list), 5):
if i + 5 < len(word_list):
text = text + ', '.join(word_list[i:i+5]) + ',\n'
else:
text = text + ', '.join(word_list[i:len(word_list)])
return text
def __create_pic(self, text):
"""Creates an image with and fills it with the text provided
Parameters
----------
text: str, required
The text string to be drawn on the picture
Returns
-------
file
The picture as a file object
"""
font_size = 20
# number of lines plus 3 for padding
height = (text.count('\n') + 3) * font_size
# longest line in string length times font size at a ratio of .65
width = int(
max([len(x) for x in text.splitlines()]) * font_size * 0.65)
pic = Image.new("RGB", (width, height), (255, 255, 255))
font = ImageFont.truetype("Pillow/Tests/fonts/FreeMono.ttf", font_size)
drawing = ImageDraw.Draw(pic)
drawing.multiline_text((10, 10), text, font=font, fill=(0, 0, 0))
b = io.BytesIO()
pic.save(b, 'png')
b.seek(0)
return b
|
[
"PIL.Image.new",
"os.environ.get",
"PIL.ImageFont.truetype",
"io.BytesIO",
"dotenv.load_dotenv",
"tweepy.API",
"PIL.ImageDraw.Draw",
"datetime.date.today",
"tweepy.OAuthHandler"
] |
[((413, 426), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (424, 426), False, 'from dotenv import load_dotenv\n'), ((445, 474), 'os.environ.get', 'os.environ.get', (['"""TWITTER_API"""'], {}), "('TWITTER_API')\n", (459, 474), False, 'import os\n'), ((500, 536), 'os.environ.get', 'os.environ.get', (['"""TWITTER_API_SECRET"""'], {}), "('TWITTER_API_SECRET')\n", (514, 536), False, 'import os\n'), ((560, 592), 'os.environ.get', 'os.environ.get', (['"""TWITTER_ACCESS"""'], {}), "('TWITTER_ACCESS')\n", (574, 592), False, 'import os\n'), ((623, 662), 'os.environ.get', 'os.environ.get', (['"""TWITTER_ACCESS_SECRET"""'], {}), "('TWITTER_ACCESS_SECRET')\n", (637, 662), False, 'import os\n'), ((678, 722), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['api_key', 'api_key_secret'], {}), '(api_key, api_key_secret)\n', (697, 722), False, 'import tweepy\n'), ((807, 823), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (817, 823), False, 'import tweepy\n'), ((3601, 3651), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(width, height)', '(255, 255, 255)'], {}), "('RGB', (width, height), (255, 255, 255))\n", (3610, 3651), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3667, 3731), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""Pillow/Tests/fonts/FreeMono.ttf"""', 'font_size'], {}), "('Pillow/Tests/fonts/FreeMono.ttf', font_size)\n", (3685, 3731), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3750, 3769), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['pic'], {}), '(pic)\n', (3764, 3769), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3856, 3868), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3866, 3868), False, 'import io\n'), ((1670, 1691), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1689, 1691), False, 'import datetime\n')]
|
# -*- coding: utf-8 -*-
"""
:Author: <NAME>
"""
import logging
import numpy as np
import scipy as sp
import collections
import itertools
from model.modelTemplate import Model
class BPE(Model):
"""The Bayesian predictor model
Attributes
----------
Name : string
The name of the class used when recording what has been used.
Parameters
----------
alpha : float, optional
Learning rate parameter
epsilon : float, optional
Noise parameter. The larger it is the less likely the model is to choose the highest expected reward
number_actions : integer, optional
The maximum number of valid actions the model can expect to receive.
Default 2.
number_cues : integer, optional
The initial maximum number of stimuli the model can expect to receive.
Default 1.
number_critics : integer, optional
The number of different reaction learning sets.
Default number_actions*number_cues
validRewards : list,np.ndarray, optional
The different reward values that can occur in the task. Default ``array([0, 1])``
action_codes : dict with string or int as keys and int values, optional
A dictionary used to convert between the action references used by the
task or dataset and references used in the models to describe the order
in which the action information is stored.
dirichletInit : float, optional
The initial values for values of the dirichlet distribution.
Normally 0, 1/2 or 1. Default 1
prior : array of floats in ``[0, 1]``, optional
Ignored in this case
stimFunc : function, optional
The function that transforms the stimulus into a form the model can
understand and a string to identify it later. Default is blankStim
rewFunc : function, optional
The function that transforms the reward into a form the model can
understand. Default is blankRew
decFunc : function, optional
The function that takes the internal values of the model and turns them
in to a decision. Default is model.decision.discrete.weightProb
See Also
--------
model.BP : This model is heavily based on that one
"""
def __init__(self, alpha=0.3, epsilon=0.1, dirichletInit=1, validRewards=np.array([0, 1]), **kwargs):
super(BPE, self).__init__(**kwargs)
self.alpha = alpha
self.epsilon = epsilon
self.validRew = validRewards
self.rewLoc = collections.OrderedDict(((k, v) for k, v in itertools.izip(self.validRew, range(len(self.validRew)))))
self.dirichletVals = np.ones((self.number_actions, self.number_cues, len(self.validRew))) * dirichletInit
self.expectations = self.updateExpectations(self.dirichletVals)
self.parameters["epsilon"] = self.epsilon
self.parameters["alpha"] = self.alpha
self.parameters["dirichletInit"] = dirichletInit
# Recorded information
self.recDirichletVals = []
def returnTaskState(self):
""" Returns all the relevant data for this model
Returns
-------
results : dict
The dictionary contains a series of keys including Name,
Probabilities, Actions and Events.
"""
results = self.standardResultOutput()
results["dirichletVals"] = np.array(self.recDirichletVals)
return results
def storeState(self):
"""
Stores the state of all the important variables so that they can be
accessed later
"""
self.storeStandardResults()
self.recDirichletVals.append(self.dirichletVals.copy())
def rewardExpectation(self, observation):
"""Calculate the estimated reward based on the action and stimuli
This contains parts that are task dependent
Parameters
----------
observation : {int | float | tuple}
The set of stimuli
Returns
-------
actionExpectations : array of floats
The expected rewards for each action
stimuli : list of floats
The processed observations
activeStimuli : list of [0, 1] mapping to [False, True]
A list of the stimuli that were or were not present
"""
activeStimuli, stimuli = self.stimulus_shaper.processStimulus(observation)
actionExpectations = self._actExpectations(self.dirichletVals, stimuli)
return actionExpectations, stimuli, activeStimuli
def delta(self, reward, expectation, action, stimuli):
"""
Calculates the comparison between the reward and the expectation
Parameters
----------
reward : float
The reward value
expectation : float
The expected reward value
action : int
The chosen action
stimuli : {int | float | tuple | None}
The stimuli received
Returns
-------
delta
"""
modReward = self.reward_shaper.processFeedback(reward, action, stimuli)
return modReward
def updateModel(self, delta, action, stimuli, stimuliFilter):
"""
Parameters
----------
delta : float
The difference between the reward and the expected reward
action : int
The action chosen by the model in this trialstep
stimuli : list of float
The weights of the different stimuli in this trialstep
stimuliFilter : list of bool
A list describing if a stimulus cue is present in this trialstep
"""
# Find the new activities
self._newExpect(action, delta, stimuli)
# Calculate the new probabilities
# We need to combine the expectations before calculating the probabilities
actionExpectations = self._actExpectations(self.dirichletVals, stimuli)
self.probabilities = self.calcProbabilities(actionExpectations)
def _newExpect(self, action, delta, stimuli):
self.dirichletVals[action, :, self.rewLoc[delta]] += self.alpha * stimuli/np.sum(stimuli)
self.expectations = self.updateExpectations(self.dirichletVals)
def _actExpectations(self, dirichletVals, stimuli):
# If there are multiple possible stimuli, filter by active stimuli and calculate
# calculate the expectations associated with each action.
if self.number_cues > 1:
actionExpectations = self.calcActExpectations(self.actStimMerge(dirichletVals, stimuli))
else:
actionExpectations = self.calcActExpectations(dirichletVals[:, 0, :])
return actionExpectations
def calcProbabilities(self, actionValues):
# type: (np.ndarray) -> np.ndarray
"""
Calculate the probabilities associated with the actions
Parameters
----------
actionValues : 1D ndArray of floats
Returns
-------
probArray : 1D ndArray of floats
The probabilities associated with the actionValues
"""
cbest = actionValues == max(actionValues)
deltaEpsilon = self.epsilon * (1 / self.number_actions)
bestEpsilon = (1 - self.epsilon) / np.sum(cbest) + deltaEpsilon
probArray = bestEpsilon * cbest + deltaEpsilon * (1 - cbest)
return probArray
def actorStimulusProbs(self):
"""
Calculates in the model-appropriate way the probability of each action.
Returns
-------
probabilities : 1D ndArray of floats
The probabilities associated with the action choices
"""
probabilities = self.calcProbabilities(self.expectedRewards)
return probabilities
def actStimMerge(self, dirichletVals, stimuli):
dirVals = dirichletVals * np.expand_dims(np.repeat([stimuli], self.number_actions, axis=0), 2)
actDirVals = np.sum(dirVals, 1)
return actDirVals
def calcActExpectations(self, dirichletVals):
actExpect = np.fromiter((np.sum(sp.stats.dirichlet(d).mean() * self.validRew) for d in dirichletVals), float, count=self.number_actions)
return actExpect
def updateExpectations(self, dirichletVals):
def meanFunc(p, r=[]):
return np.sum(sp.stats.dirichlet(p).mean() * r)
expectations = np.apply_along_axis(meanFunc, 2, dirichletVals, r=self.validRew)
return expectations
|
[
"numpy.repeat",
"scipy.stats.dirichlet",
"numpy.array",
"numpy.sum",
"numpy.apply_along_axis"
] |
[((2391, 2407), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (2399, 2407), True, 'import numpy as np\n'), ((3481, 3512), 'numpy.array', 'np.array', (['self.recDirichletVals'], {}), '(self.recDirichletVals)\n', (3489, 3512), True, 'import numpy as np\n'), ((8204, 8222), 'numpy.sum', 'np.sum', (['dirVals', '(1)'], {}), '(dirVals, 1)\n', (8210, 8222), True, 'import numpy as np\n'), ((8654, 8718), 'numpy.apply_along_axis', 'np.apply_along_axis', (['meanFunc', '(2)', 'dirichletVals'], {'r': 'self.validRew'}), '(meanFunc, 2, dirichletVals, r=self.validRew)\n', (8673, 8718), True, 'import numpy as np\n'), ((6338, 6353), 'numpy.sum', 'np.sum', (['stimuli'], {}), '(stimuli)\n', (6344, 6353), True, 'import numpy as np\n'), ((7494, 7507), 'numpy.sum', 'np.sum', (['cbest'], {}), '(cbest)\n', (7500, 7507), True, 'import numpy as np\n'), ((8126, 8175), 'numpy.repeat', 'np.repeat', (['[stimuli]', 'self.number_actions'], {'axis': '(0)'}), '([stimuli], self.number_actions, axis=0)\n', (8135, 8175), True, 'import numpy as np\n'), ((8594, 8615), 'scipy.stats.dirichlet', 'sp.stats.dirichlet', (['p'], {}), '(p)\n', (8612, 8615), True, 'import scipy as sp\n'), ((8348, 8369), 'scipy.stats.dirichlet', 'sp.stats.dirichlet', (['d'], {}), '(d)\n', (8366, 8369), True, 'import scipy as sp\n')]
|
from django.db.models.signals import post_init
from factory import DjangoModelFactory, Sequence, SubFactory
from factory.django import mute_signals
from affiliates.banners import models
class CategoryFactory(DjangoModelFactory):
FACTORY_FOR = models.Category
name = Sequence(lambda n: 'test{0}'.format(n))
class BannerFactory(DjangoModelFactory):
ABSTRACT_FACTORY = True
category = SubFactory(CategoryFactory)
name = Sequence(lambda n: 'test{0}'.format(n))
destination = 'https://mozilla.org/'
visible = True
class ImageBannerFactory(BannerFactory):
FACTORY_FOR = models.ImageBanner
@mute_signals(post_init)
class ImageVariationFactory(DjangoModelFactory):
ABSTRACT_FACTORY = True
color = 'Blue'
locale = 'en-us'
image = 'uploads/image_banners/test.png'
class ImageBannerVariationFactory(ImageVariationFactory):
FACTORY_FOR = models.ImageBannerVariation
banner = SubFactory(ImageBannerFactory)
class TextBannerFactory(BannerFactory):
FACTORY_FOR = models.TextBanner
class TextBannerVariationFactory(DjangoModelFactory):
FACTORY_FOR = models.TextBannerVariation
banner = SubFactory(TextBannerFactory)
locale = 'en-us'
text = Sequence(lambda n: 'test{0}'.format(n))
class FirefoxUpgradeBannerFactory(BannerFactory):
FACTORY_FOR = models.FirefoxUpgradeBanner
@mute_signals(post_init)
class FirefoxUpgradeBannerVariationFactory(ImageVariationFactory):
FACTORY_FOR = models.FirefoxUpgradeBannerVariation
banner = SubFactory(FirefoxUpgradeBannerFactory)
image = 'uploads/firefox_upgrade_banners/test.png'
upgrade_image = 'uploads/firefox_upgrade_banners/test_upgrade.png'
|
[
"factory.django.mute_signals",
"factory.SubFactory"
] |
[((628, 651), 'factory.django.mute_signals', 'mute_signals', (['post_init'], {}), '(post_init)\n', (640, 651), False, 'from factory.django import mute_signals\n'), ((1362, 1385), 'factory.django.mute_signals', 'mute_signals', (['post_init'], {}), '(post_init)\n', (1374, 1385), False, 'from factory.django import mute_signals\n'), ((406, 433), 'factory.SubFactory', 'SubFactory', (['CategoryFactory'], {}), '(CategoryFactory)\n', (416, 433), False, 'from factory import DjangoModelFactory, Sequence, SubFactory\n'), ((935, 965), 'factory.SubFactory', 'SubFactory', (['ImageBannerFactory'], {}), '(ImageBannerFactory)\n', (945, 965), False, 'from factory import DjangoModelFactory, Sequence, SubFactory\n'), ((1159, 1188), 'factory.SubFactory', 'SubFactory', (['TextBannerFactory'], {}), '(TextBannerFactory)\n', (1169, 1188), False, 'from factory import DjangoModelFactory, Sequence, SubFactory\n'), ((1522, 1561), 'factory.SubFactory', 'SubFactory', (['FirefoxUpgradeBannerFactory'], {}), '(FirefoxUpgradeBannerFactory)\n', (1532, 1561), False, 'from factory import DjangoModelFactory, Sequence, SubFactory\n')]
|
#! /usr/bin/env python
"""Toolbox for unbalanced dataset in machine learning."""
from setuptools import setup, find_packages
import os
import sys
import setuptools
from distutils.command.build_py import build_py
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
descr = """Toolbox for unbalanced dataset in machine learning."""
DISTNAME = 'unbalanced_dataset'
DESCRIPTION = 'Toolbox for unbalanced dataset in machine learning.'
LONG_DESCRIPTION = descr
MAINTAINER = '<NAME>, <NAME>'
MAINTAINER_EMAIL = '<EMAIL>, <EMAIL>'
URL = 'https://github.com/fmfn/UnbalancedDataset'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'https://github.com/fmfn/UnbalancedDataset'
# This is a bit (!) hackish: we are setting a global variable so that the main
# skimage __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by UnbalancedDataset to
# recursively build the compiled extensions in sub-packages is based on
# the Python import machinery.
builtins.__UNBALANCED_DATASET_SETUP__ = True
with open('unbalanced_dataset/__init__.py') as fid:
for line in fid:
if line.startswith('__version__'):
VERSION = line.strip().split()[-1][1:-1]
break
with open('requirements.txt') as fid:
INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l]
# requirements for those browsing PyPI
REQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES]
REQUIRES = [r.replace('==', ' (== ') for r in REQUIRES]
REQUIRES = [r.replace('[array]', '') for r in REQUIRES]
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(
ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('unbalanced_dataset')
return config
if __name__ == "__main__":
try:
from numpy.distutils.core import setup
extra = {'configuration': configuration}
# Do not try and upgrade larger dependencies
for lib in ['scipy', 'numpy', 'matplotlib']:
try:
__import__(lib)
INSTALL_REQUIRES = [i for i in INSTALL_REQUIRES
if lib not in i]
except ImportError:
pass
except ImportError:
if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'--version',
'clean')):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install UnbalancedDataset when Numpy is not yet
# present in the system.
from setuptools import setup
extra = {}
else:
print('To install UnbalancedDataset from source, you need numpy.' +
'Install numpy with pip:\n' +
'pip install numpy\n'
'Or use your operating system package manager.')
sys.exit(1)
setup(
name=DISTNAME,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
license=LICENSE,
download_url=DOWNLOAD_URL,
version=VERSION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
install_requires=INSTALL_REQUIRES,
requires=REQUIRES,
packages=setuptools.find_packages(exclude=['doc']),
include_package_data=True,
zip_safe=False, # the package can run out of an .egg file
cmdclass={'build_py': build_py},
**extra
)
|
[
"os.path.exists",
"setuptools.find_packages",
"numpy.distutils.misc_util.Configuration",
"sys.exit",
"os.remove"
] |
[((1708, 1734), 'os.path.exists', 'os.path.exists', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (1722, 1734), False, 'import os\n'), ((1836, 1881), 'numpy.distutils.misc_util.Configuration', 'Configuration', (['None', 'parent_package', 'top_path'], {}), '(None, parent_package, top_path)\n', (1849, 1881), False, 'from numpy.distutils.misc_util import Configuration\n'), ((1744, 1765), 'os.remove', 'os.remove', (['"""MANIFEST"""'], {}), "('MANIFEST')\n", (1753, 1765), False, 'import os\n'), ((4762, 4803), 'setuptools.find_packages', 'setuptools.find_packages', ([], {'exclude': "['doc']"}), "(exclude=['doc'])\n", (4786, 4803), False, 'import setuptools\n'), ((3442, 3453), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3450, 3453), False, 'import sys\n')]
|
#!/usr/bin/env python
__author__ = "<NAME>"
__copyright__ = "Copyright 2016, Rackspace Inc."
__email__ = "<EMAIL>"
from mongo_schema import schema_engine
import os
def get_schema_files(schemas_dirpath):
""" get list of js / json files resided in dirpath param. """
res = []
for fname in os.listdir(schemas_dirpath):
if fname.endswith('json') or fname.endswith('js'):
res.append(fname)
res.sort()
return res
def get_schema_engines_as_dict(schemas_dirpath):
""" Load schema engines into dict.
Basename of schema file should be the name of collection"""
js_schema_files = get_schema_files(schemas_dirpath)
schemas = {}
for fname in js_schema_files:
collection_name = os.path.splitext(os.path.basename(fname))[0]
schema_path = os.path.join(schemas_dirpath, fname)
schemas[collection_name] = \
schema_engine.create_schema_engine(collection_name, schema_path)
return schemas
|
[
"mongo_schema.schema_engine.create_schema_engine",
"os.listdir",
"os.path.join",
"os.path.basename"
] |
[((302, 329), 'os.listdir', 'os.listdir', (['schemas_dirpath'], {}), '(schemas_dirpath)\n', (312, 329), False, 'import os\n'), ((803, 839), 'os.path.join', 'os.path.join', (['schemas_dirpath', 'fname'], {}), '(schemas_dirpath, fname)\n', (815, 839), False, 'import os\n'), ((889, 953), 'mongo_schema.schema_engine.create_schema_engine', 'schema_engine.create_schema_engine', (['collection_name', 'schema_path'], {}), '(collection_name, schema_path)\n', (923, 953), False, 'from mongo_schema import schema_engine\n'), ((753, 776), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (769, 776), False, 'import os\n')]
|
from random import randint
from django.core.management.base import BaseCommand
from django.db import transaction
from faker import Faker
from hn_users.models import HNUser, User
from links.models import Link, Vote
faker = Faker()
class Command(BaseCommand):
help = "Generate Links from a small user subset"
def add_arguments(self, parser):
parser.add_argument("no_of_users", type=int, nargs="?", default=4)
parser.add_argument("no_of_links", type=int, nargs="?", default=20)
@transaction.atomic()
def handle(self, *args, **options):
no_of_users = options.get("no_of_users")
no_of_links = options.get("no_of_links")
for user in range(no_of_users):
user = self._create_user()
hn_user = self._create_hn_user(django_user=user)
for link in range(no_of_links):
generated_link = self._create_link()
generated_link.refresh_from_db()
self.stdout.write(
self.style.SUCCESS(
f"Link {generated_link.url} generated with {generated_link.link_votes.count()} votes"
)
)
def _create_link(self):
all_users_count = HNUser.objects.count()
number_of_users_who_voted = randint(1, all_users_count) # nosec
randomly_ordered_users = HNUser.objects.all().order_by("?") # nosec
random_users = randomly_ordered_users[:number_of_users_who_voted]
hn_user = HNUser.objects.all().order_by("?").first()
link = Link()
link.posted_by = hn_user
link.url = faker.url()
link.description = faker.text()
link.save()
for random_user in random_users:
vote = Vote()
vote.link = link
vote.user = random_user
vote.save()
return link
def _create_user(self):
simple_profile = faker.simple_profile()
user = User()
user.email = simple_profile["mail"]
user.username = simple_profile["username"]
user.first_name = simple_profile["name"].split(" ")[0]
user.last_name = simple_profile["name"].split(" ")[-1]
user.set_password(faker.password())
user.save()
return user
def _create_hn_user(self, django_user):
hn_user = HNUser()
hn_user.bio = faker.text()
hn_user.django_user = django_user
hn_user.save()
|
[
"django.db.transaction.atomic",
"hn_users.models.HNUser",
"links.models.Link",
"links.models.Vote",
"hn_users.models.HNUser.objects.count",
"hn_users.models.User",
"faker.Faker",
"hn_users.models.HNUser.objects.all",
"random.randint"
] |
[((225, 232), 'faker.Faker', 'Faker', ([], {}), '()\n', (230, 232), False, 'from faker import Faker\n'), ((511, 531), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (529, 531), False, 'from django.db import transaction\n'), ((1207, 1229), 'hn_users.models.HNUser.objects.count', 'HNUser.objects.count', ([], {}), '()\n', (1227, 1229), False, 'from hn_users.models import HNUser, User\n'), ((1266, 1293), 'random.randint', 'randint', (['(1)', 'all_users_count'], {}), '(1, all_users_count)\n', (1273, 1293), False, 'from random import randint\n'), ((1531, 1537), 'links.models.Link', 'Link', ([], {}), '()\n', (1535, 1537), False, 'from links.models import Link, Vote\n'), ((1932, 1938), 'hn_users.models.User', 'User', ([], {}), '()\n', (1936, 1938), False, 'from hn_users.models import HNUser, User\n'), ((2307, 2315), 'hn_users.models.HNUser', 'HNUser', ([], {}), '()\n', (2313, 2315), False, 'from hn_users.models import HNUser, User\n'), ((1723, 1729), 'links.models.Vote', 'Vote', ([], {}), '()\n', (1727, 1729), False, 'from links.models import Link, Vote\n'), ((1336, 1356), 'hn_users.models.HNUser.objects.all', 'HNUser.objects.all', ([], {}), '()\n', (1354, 1356), False, 'from hn_users.models import HNUser, User\n'), ((1473, 1493), 'hn_users.models.HNUser.objects.all', 'HNUser.objects.all', ([], {}), '()\n', (1491, 1493), False, 'from hn_users.models import HNUser, User\n')]
|
# -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
class CenexSpider(scrapy.Spider):
name = "cenex"
item_attributes = {"brand": "Cenex", "brand_wikidata": "Q5011381"}
allowed_domains = ["www.cenex.com"]
def start_requests(self):
yield scrapy.http.JsonRequest(
"https://www.cenex.com/Common/Services/InteractiveMap.svc/GetLocations",
method="POST",
data={
"SearchRequest": {
"Metadata": {"MapId": "", "Categories": []},
"Query": {
"SearchLat": 0,
"SearchLong": 0,
"LocationTypes": [1, 16, 15],
"Amenities": [],
"Organizations": ["28e93e82-edfa-418e-90aa-7ded057a0c68"],
"NELat": 90,
"NELong": 180,
"SWLat": -90,
"SWLong": -180,
},
},
"MapItemId": "40381d43-1c05-43e0-8477-78737b9974df",
"AllOrganizationIds": [
"b4ed9d2c-cc3b-4ce0-b642-79d75eac11fa",
"cb27078e-9b6a-4f4d-ac81-eb1d163a5ff6",
"68be9e56-ff49-4724-baf0-90fc833fb459",
"28e93e82-edfa-418e-90aa-7ded057a0c68",
],
"ServiceUrl": "https://locatorservice.chsinc.ds/api/search",
},
)
def parse(self, response):
result = json.loads(response.body_as_unicode())
for store in result["SearchResponse"]["Locations"]:
amenities = "|".join([a["Name"] for a in store["Amenities"]])
yield GeojsonPointItem(
lon=store["Long"],
lat=store["Lat"],
ref=store["LocationId"],
name=store["Name"],
addr_full=" ".join([store["Address1"], store["Address2"]]).strip(),
city=store["City"],
state=store["State"],
postcode=store["Zip"],
country="US",
phone=store["Phone"],
website=store["WebsiteUrl"],
opening_hours="24/7" if "24-Hour" in amenities else None,
extras={
"amenity:fuel": True,
"atm": "ATM" in amenities,
"car_wash": "Car Wash" in amenities,
"fuel:biodiesel": "Biodiesel" in amenities or None,
"fuel:diesel": "Diesel" in amenities or None,
"fuel:e85": "Flex Fuels" in amenities or None,
"fuel:HGV_diesel": "Truck Stop" in amenities or None,
"fuel:propane": "Propane" in amenities or None,
"hgv": "Truck Stop" in amenities or None,
"shop": "convenience" if "Convenience Store" in amenities else None,
},
)
|
[
"scrapy.http.JsonRequest"
] |
[((306, 1045), 'scrapy.http.JsonRequest', 'scrapy.http.JsonRequest', (['"""https://www.cenex.com/Common/Services/InteractiveMap.svc/GetLocations"""'], {'method': '"""POST"""', 'data': "{'SearchRequest': {'Metadata': {'MapId': '', 'Categories': []}, 'Query': {\n 'SearchLat': 0, 'SearchLong': 0, 'LocationTypes': [1, 16, 15],\n 'Amenities': [], 'Organizations': [\n '28e93e82-edfa-418e-90aa-7ded057a0c68'], 'NELat': 90, 'NELong': 180,\n 'SWLat': -90, 'SWLong': -180}}, 'MapItemId':\n '40381d43-1c05-43e0-8477-78737b9974df', 'AllOrganizationIds': [\n 'b4ed9d2c-cc3b-4ce0-b642-79d75eac11fa',\n 'cb27078e-9b6a-4f4d-ac81-eb1d163a5ff6',\n '68be9e56-ff49-4724-baf0-90fc833fb459',\n '28e93e82-edfa-418e-90aa-7ded057a0c68'], 'ServiceUrl':\n 'https://locatorservice.chsinc.ds/api/search'}"}), "(\n 'https://www.cenex.com/Common/Services/InteractiveMap.svc/GetLocations',\n method='POST', data={'SearchRequest': {'Metadata': {'MapId': '',\n 'Categories': []}, 'Query': {'SearchLat': 0, 'SearchLong': 0,\n 'LocationTypes': [1, 16, 15], 'Amenities': [], 'Organizations': [\n '28e93e82-edfa-418e-90aa-7ded057a0c68'], 'NELat': 90, 'NELong': 180,\n 'SWLat': -90, 'SWLong': -180}}, 'MapItemId':\n '40381d43-1c05-43e0-8477-78737b9974df', 'AllOrganizationIds': [\n 'b4ed9d2c-cc3b-4ce0-b642-79d75eac11fa',\n 'cb27078e-9b6a-4f4d-ac81-eb1d163a5ff6',\n '68be9e56-ff49-4724-baf0-90fc833fb459',\n '28e93e82-edfa-418e-90aa-7ded057a0c68'], 'ServiceUrl':\n 'https://locatorservice.chsinc.ds/api/search'})\n", (329, 1045), False, 'import scrapy\n')]
|
import os
import multiprocessing
if os.name != "nt":
# https://bugs.python.org/issue41567
import multiprocessing.popen_spawn_posix # type: ignore
from pathlib import Path
from typing import Optional
# PROFILES_DIR must be set before the other flags
# It also gets set in main.py and in set_from_args because the rpc server
# doesn't go through exactly the same main arg processing.
DEFAULT_PROFILES_DIR = os.path.join(os.path.expanduser("~"), ".dbt")
PROFILES_DIR = os.path.expanduser(os.getenv("DBT_PROFILES_DIR", DEFAULT_PROFILES_DIR))
STRICT_MODE = False # Only here for backwards compatibility
FULL_REFRESH = False # subcommand
STORE_FAILURES = False # subcommand
# Global CLI commands
USE_EXPERIMENTAL_PARSER = None
STATIC_PARSER = None
WARN_ERROR = None
WRITE_JSON = None
PARTIAL_PARSE = None
USE_COLORS = None
DEBUG = None
LOG_FORMAT = None
VERSION_CHECK = None
FAIL_FAST = None
SEND_ANONYMOUS_USAGE_STATS = None
PRINTER_WIDTH = 80
WHICH = None
INDIRECT_SELECTION = None
LOG_CACHE_EVENTS = None
EVENT_BUFFER_SIZE = 100000
QUIET = None
# Global CLI defaults. These flags are set from three places:
# CLI args, environment variables, and user_config (profiles.yml).
# Environment variables use the pattern 'DBT_{flag name}', like DBT_PROFILES_DIR
flag_defaults = {
"USE_EXPERIMENTAL_PARSER": False,
"STATIC_PARSER": True,
"WARN_ERROR": False,
"WRITE_JSON": True,
"PARTIAL_PARSE": True,
"USE_COLORS": True,
"PROFILES_DIR": DEFAULT_PROFILES_DIR,
"DEBUG": False,
"LOG_FORMAT": None,
"VERSION_CHECK": True,
"FAIL_FAST": False,
"SEND_ANONYMOUS_USAGE_STATS": True,
"PRINTER_WIDTH": 80,
"INDIRECT_SELECTION": "eager",
"LOG_CACHE_EVENTS": False,
"EVENT_BUFFER_SIZE": 100000,
"QUIET": False,
}
def env_set_truthy(key: str) -> Optional[str]:
"""Return the value if it was set to a "truthy" string value, or None
otherwise.
"""
value = os.getenv(key)
if not value or value.lower() in ("0", "false", "f"):
return None
return value
def env_set_bool(env_value):
if env_value in ("1", "t", "true", "y", "yes"):
return True
return False
def env_set_path(key: str) -> Optional[Path]:
value = os.getenv(key)
if value is None:
return value
else:
return Path(value)
MACRO_DEBUGGING = env_set_truthy("DBT_MACRO_DEBUGGING")
DEFER_MODE = env_set_truthy("DBT_DEFER_TO_STATE")
ARTIFACT_STATE_PATH = env_set_path("DBT_ARTIFACT_STATE_PATH")
ENABLE_LEGACY_LOGGER = env_set_truthy("DBT_ENABLE_LEGACY_LOGGER")
def _get_context():
# TODO: change this back to use fork() on linux when we have made that safe
return multiprocessing.get_context("spawn")
# This is not a flag, it's a place to store the lock
MP_CONTEXT = _get_context()
def set_from_args(args, user_config):
# N.B. Multiple `globals` are purely for line length.
# Because `global` is a parser directive (as opposed to a language construct)
# black insists in putting them all on one line
global STRICT_MODE, FULL_REFRESH, WARN_ERROR, USE_EXPERIMENTAL_PARSER, STATIC_PARSER
global WRITE_JSON, PARTIAL_PARSE, USE_COLORS, STORE_FAILURES, PROFILES_DIR, DEBUG, LOG_FORMAT
global INDIRECT_SELECTION, VERSION_CHECK, FAIL_FAST, SEND_ANONYMOUS_USAGE_STATS
global PRINTER_WIDTH, WHICH, LOG_CACHE_EVENTS, EVENT_BUFFER_SIZE, QUIET
STRICT_MODE = False # backwards compatibility
# cli args without user_config or env var option
FULL_REFRESH = getattr(args, "full_refresh", FULL_REFRESH)
STORE_FAILURES = getattr(args, "store_failures", STORE_FAILURES)
WHICH = getattr(args, "which", WHICH)
# global cli flags with env var and user_config alternatives
USE_EXPERIMENTAL_PARSER = get_flag_value("USE_EXPERIMENTAL_PARSER", args, user_config)
STATIC_PARSER = get_flag_value("STATIC_PARSER", args, user_config)
WARN_ERROR = get_flag_value("WARN_ERROR", args, user_config)
WRITE_JSON = get_flag_value("WRITE_JSON", args, user_config)
PARTIAL_PARSE = get_flag_value("PARTIAL_PARSE", args, user_config)
USE_COLORS = get_flag_value("USE_COLORS", args, user_config)
PROFILES_DIR = get_flag_value("PROFILES_DIR", args, user_config)
DEBUG = get_flag_value("DEBUG", args, user_config)
LOG_FORMAT = get_flag_value("LOG_FORMAT", args, user_config)
VERSION_CHECK = get_flag_value("VERSION_CHECK", args, user_config)
FAIL_FAST = get_flag_value("FAIL_FAST", args, user_config)
SEND_ANONYMOUS_USAGE_STATS = get_flag_value("SEND_ANONYMOUS_USAGE_STATS", args, user_config)
PRINTER_WIDTH = get_flag_value("PRINTER_WIDTH", args, user_config)
INDIRECT_SELECTION = get_flag_value("INDIRECT_SELECTION", args, user_config)
LOG_CACHE_EVENTS = get_flag_value("LOG_CACHE_EVENTS", args, user_config)
EVENT_BUFFER_SIZE = get_flag_value("EVENT_BUFFER_SIZE", args, user_config)
QUIET = get_flag_value("QUIET", args, user_config)
def get_flag_value(flag, args, user_config):
lc_flag = flag.lower()
flag_value = getattr(args, lc_flag, None)
if flag_value is None:
# Environment variables use pattern 'DBT_{flag name}'
env_flag = f"DBT_{flag}"
env_value = os.getenv(env_flag)
if env_value is not None and env_value != "":
env_value = env_value.lower()
# non Boolean values
if flag in [
"LOG_FORMAT",
"PRINTER_WIDTH",
"PROFILES_DIR",
"INDIRECT_SELECTION",
"EVENT_BUFFER_SIZE",
]:
flag_value = env_value
else:
flag_value = env_set_bool(env_value)
elif user_config is not None and getattr(user_config, lc_flag, None) is not None:
flag_value = getattr(user_config, lc_flag)
else:
flag_value = flag_defaults[flag]
if flag in ["PRINTER_WIDTH", "EVENT_BUFFER_SIZE"]: # must be ints
flag_value = int(flag_value)
if flag == "PROFILES_DIR":
flag_value = os.path.abspath(flag_value)
return flag_value
def get_flag_dict():
return {
"use_experimental_parser": USE_EXPERIMENTAL_PARSER,
"static_parser": STATIC_PARSER,
"warn_error": WARN_ERROR,
"write_json": WRITE_JSON,
"partial_parse": PARTIAL_PARSE,
"use_colors": USE_COLORS,
"profiles_dir": PROFILES_DIR,
"debug": DEBUG,
"log_format": LOG_FORMAT,
"version_check": VERSION_CHECK,
"fail_fast": FAIL_FAST,
"send_anonymous_usage_stats": SEND_ANONYMOUS_USAGE_STATS,
"printer_width": PRINTER_WIDTH,
"indirect_selection": INDIRECT_SELECTION,
"log_cache_events": LOG_CACHE_EVENTS,
"event_buffer_size": EVENT_BUFFER_SIZE,
"quiet": QUIET,
}
|
[
"os.getenv",
"pathlib.Path",
"multiprocessing.get_context",
"os.path.abspath",
"os.path.expanduser"
] |
[((429, 452), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (447, 452), False, 'import os\n'), ((496, 547), 'os.getenv', 'os.getenv', (['"""DBT_PROFILES_DIR"""', 'DEFAULT_PROFILES_DIR'], {}), "('DBT_PROFILES_DIR', DEFAULT_PROFILES_DIR)\n", (505, 547), False, 'import os\n'), ((1933, 1947), 'os.getenv', 'os.getenv', (['key'], {}), '(key)\n', (1942, 1947), False, 'import os\n'), ((2223, 2237), 'os.getenv', 'os.getenv', (['key'], {}), '(key)\n', (2232, 2237), False, 'import os\n'), ((2667, 2703), 'multiprocessing.get_context', 'multiprocessing.get_context', (['"""spawn"""'], {}), "('spawn')\n", (2694, 2703), False, 'import multiprocessing\n'), ((2306, 2317), 'pathlib.Path', 'Path', (['value'], {}), '(value)\n', (2310, 2317), False, 'from pathlib import Path\n'), ((5184, 5203), 'os.getenv', 'os.getenv', (['env_flag'], {}), '(env_flag)\n', (5193, 5203), False, 'import os\n'), ((6017, 6044), 'os.path.abspath', 'os.path.abspath', (['flag_value'], {}), '(flag_value)\n', (6032, 6044), False, 'import os\n')]
|
import os
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
from src.python.baselines import *
from pymongo import MongoClient
from tqdm import tqdm
import tensorflow as tf
### Keras
from keras import optimizers
from keras.models import Model
from keras.layers import Input, Dense, Embedding, Activation
from keras.layers import Conv2D, Conv1D
from keras.layers import Dropout, BatchNormalization
from keras.layers import MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, GlobalAveragePooling1D
from keras.layers import Concatenate, Flatten, Reshape
from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint, LambdaCallback, LearningRateScheduler
# from keras.losses import hinge, binary_crossentropy
from keras import backend as K
from sklearn.metrics import log_loss
import math
import argparse
sess = tf.Session()
K.set_session(sess)
LR = 0.001
BATCH_SIZE = 32
LONG_EXPOSURE = True
t0 = datetime(2014, 1, 1, 0, 0)
t1 = datetime(2014, 9, 1, 0, 0)
MAX_LENGTH = 2000
MIN_LENGTH = 30
def get_classes(db, onto, start=t0, end=t1):
q1 = {'DB': 'UniProtKB',
'Evidence': {'$in': exp_codes},
'Date': {"$lte": start},
'Aspect': ASPECT}
q2 = {'DB': 'UniProtKB',
'Evidence': {'$in': exp_codes},
'Date': {"$gt": start, "$lte": end},
'Aspect': ASPECT}
def helper(q):
seq2go, _ = GoAnnotationCollectionLoader(
db.goa_uniprot.find(q), db.goa_uniprot.count(q), ASPECT).load()
for i, (k, v) in enumerate(seq2go.items()):
sys.stdout.write("\r{0:.0f}%".format(100.0 * i / len(seq2go)))
seq2go[k] = onto.propagate(v)
return reduce(lambda x, y: set(x) | set(y), seq2go.values(), set())
return onto.sort(helper(q1) | helper(q2))
def get_training_and_validation_streams(db, limit=None):
q_train = {'DB': 'UniProtKB',
'Evidence': {'$in': exp_codes},
'Date': {"$lte": t0},
'Aspect': ASPECT}
seq2go_trn, _ = GoAnnotationCollectionLoader(db.goa_uniprot.find(q_train), db.goa_uniprot.count(q_train), ASPECT).load()
query = {"_id": {"$in": unique(list(seq2go_trn.keys())).tolist()}}
count = limit if limit else db.uniprot.count(query)
source = db.uniprot.find(query).batch_size(10)
if limit: source = source.limit(limit)
stream_trn = DataStream(source, count, seq2go_trn)
q_valid = {'DB': 'UniProtKB',
'Evidence': {'$in': exp_codes},
'Date': {"$gt": t0, "$lte": t1},
'Aspect': ASPECT}
seq2go_tst, _ = GoAnnotationCollectionLoader(db.goa_uniprot.find(q_valid), db.goa_uniprot.count(q_valid), ASPECT).load()
query = {"_id": {"$in": unique(list(seq2go_tst.keys())).tolist()}}
count = limit if limit else db.uniprot.count(query)
source = db.uniprot.find(query).batch_size(10)
if limit: source = source.limit(limit)
stream_tst = DataStream(source, count, seq2go_tst)
return stream_trn, stream_tst
class DataStream(object):
def __init__(self, source, count, seq2go):
self._count = count
self._source = source
self._seq2go = seq2go
def __iter__(self):
count = self._count
source = self._source
seq2go = self._seq2go
for k, seq in UniprotCollectionLoader(source, count):
if not MIN_LENGTH <= len(seq) <= MAX_LENGTH:
continue
x = [AA.aa2index[aa] for aa in seq]
yield k, x, seq2go[k]
def __len__(self):
return self._count
def step_decay(epoch):
initial_lrate = LR
drop = 0.5
epochs_drop = 1.0
lrate = max(0.0001, initial_lrate * math.pow(drop, math.floor((1 + epoch) / epochs_drop)))
return lrate
def OriginalIception(inpt, num_channels=64):
# tower_0 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt)
tower_1 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt)
tower_1 = Conv1D(num_channels, 3, padding='same', activation='relu')(tower_1)
tower_2 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt)
tower_2 = Conv1D(num_channels, 5, padding='same', activation='relu')(tower_2)
# tower_3 = MaxPooling1D(3, padding='same')(inpt)
# tower_3 = Conv1D(num_channels, 1, padding='same')(tower_3)
return Concatenate(axis=2)([tower_1, tower_2,])
def LargeInception(inpt, num_channels=64):
tower_1 = Conv1D(num_channels, 6, padding='same', activation='relu')(inpt)
tower_1 = BatchNormalization()(tower_1)
tower_1 = Conv1D(num_channels, 6, padding='same', activation='relu')(tower_1)
tower_2 = Conv1D(num_channels, 10, padding='same', activation='relu')(inpt)
tower_2 = BatchNormalization()(tower_2)
tower_2 = Conv1D(num_channels, 10, padding='same', activation='relu')(tower_2)
return Concatenate(axis=2)([tower_1, tower_2])
def SmallInception(inpt, num_channels=150):
tower_1 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt)
tower_1 = Conv1D(num_channels, 5, padding='same', activation='relu')(tower_1)
# tower_1 = BatchNormalization()(tower_1)
tower_2 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt)
tower_2 = Conv1D(num_channels, 15, padding='same', activation='relu')(tower_2)
# tower_2 = BatchNormalization()(tower_2)
return Concatenate(axis=2)([tower_1, tower_2])
def Classifier(inp1d, classes):
out = Dense(len(classes))(inp1d)
out = BatchNormalization()(out)
out = Activation('sigmoid')(out)
return out
def MotifNet(classes, opt):
inpt = Input(shape=(None,))
out = Embedding(input_dim=26, output_dim=23, embeddings_initializer='uniform')(inpt)
out = Conv1D(250, 15, activation='relu', padding='valid')(out)
out = Dropout(0.2)(out)
out = Conv1D(100, 15, activation='relu', padding='valid')(out)
out = SmallInception(out)
out = Dropout(0.2)(out)
out = SmallInception(out)
out = Dropout(0.2)(out)
out = Conv1D(250, 5, activation='relu', padding='valid')(out)
out = Dropout(0.2)(out)
out = Classifier(GlobalMaxPooling1D()(out), classes)
model = Model(inputs=[inpt], outputs=[out])
model.compile(loss='binary_crossentropy', optimizer=opt)
return model
def Inception(inpt, tower1=6, tower2=10):
tower_1 = Conv1D(64, 1, padding='same', activation='relu')(inpt)
tower_1 = Conv1D(64, tower1, padding='same', activation='relu')(tower_1)
tower_2 = Conv1D(64, 1, padding='same', activation='relu')(inpt)
tower_2 = Conv1D(64, tower2, padding='same', activation='relu')(tower_2)
# tower_3 = MaxPooling1D(3, strides=1, padding='same')(inpt)
# tower_3 = Conv1D(64, 1, padding='same', activation='relu')(tower_3)
return Concatenate(axis=2)([tower_1, tower_2])
def ProteinInception(classes, opt):
inpt = Input(shape=(None,))
img = Embedding(input_dim=26, output_dim=23, embeddings_initializer='uniform')(inpt)
feats = Inception(Inception(img))
out = Classifier(GlobalMaxPooling1D()(feats), classes)
model = Model(inputs=[inpt], outputs=[out])
model.compile(loss='binary_crossentropy', optimizer=opt)
return model
def Features(inpt):
feats = Embedding(input_dim=26, output_dim=23, embeddings_initializer='uniform')(inpt)
feats = Conv1D(250, 15, activation='relu', padding='valid')(feats)
feats = Dropout(0.3)(feats)
feats = Conv1D(100, 15, activation='relu', padding='valid')(feats)
feats = Dropout(0.3)(feats)
feats = Conv1D(100, 15, activation='relu', padding='valid')(feats)
feats = Dropout(0.3)(feats)
feats = Conv1D(250, 15, activation='relu', padding='valid')(feats)
feats = Dropout(0.3)(feats)
feats = GlobalMaxPooling1D()(feats)
return feats
def DeeperSeq(classes, opt):
inp = Input(shape=(None,))
out = Classifier(Features(inp), classes)
model = Model(inputs=[inp], outputs=[out])
model.compile(loss='binary_crossentropy', optimizer=opt)
return model
def batch_generator(stream, onto, classes):
s_cls = set(classes)
data = dict()
def labels2vec(lbl):
y = np.zeros(len(classes))
for go in onto.propagate(lbl, include_root=False):
if go not in s_cls:
continue
y[classes.index(go)] = 1
return y
def pad_seq(seq, max_length=MAX_LENGTH):
delta = max_length - len(seq)
left = [PAD for _ in range(delta // 2)]
right = [PAD for _ in range(delta - delta // 2)]
seq = left + seq + right
return np.asarray(seq)
def prepare_batch(sequences, labels):
b = max(map(len, sequences)) + 100
Y = np.asarray([labels2vec(lbl) for lbl in labels])
X = np.asarray([pad_seq(seq, b) for seq in sequences])
return X, Y
for k, x, y in stream:
lx = len(x)
if lx in data:
data[lx].append([k, x, y])
ids, seqs, lbls = zip(*data[lx])
if len(seqs) == BATCH_SIZE:
yield ids, prepare_batch(seqs, lbls)
del data[lx]
else:
data[lx] = [[k, x, y]]
for packet in data.values():
ids, seqs, lbls = zip(*packet)
yield ids, prepare_batch(seqs, lbls)
class LossHistory(Callback):
def __init__(self):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
def train(model, gen_xy, length_xy, epoch, num_epochs,
history=LossHistory(), lrate=LearningRateScheduler(step_decay)):
pbar = tqdm(total=length_xy)
for _, (X, Y) in gen_xy:
model.fit(x=X, y=Y,
batch_size=BATCH_SIZE,
epochs=num_epochs if LONG_EXPOSURE else epoch + 1,
verbose=0,
validation_data=None,
initial_epoch=epoch,
callbacks=[history])
pbar.set_description("Training Loss:%.5f" % np.mean(history.losses))
pbar.update(len(Y))
pbar.close()
def zeroone2oneminusone(vec):
return np.add(np.multiply(np.array(vec), 2), -1)
def oneminusone2zeroone(vec):
return np.divide(np.add(np.array(vec), 1), 2)
def calc_loss(y_true, y_pred):
return np.mean([log_loss(y, y_hat) for y, y_hat in zip(y_true, y_pred) if np.any(y)])
def predict(model, gen_xy, length_xy, classes):
pbar = tqdm(total=length_xy, desc="Predicting...")
i, m, n = 0, length_xy, len(classes)
ids = list()
y_pred, y_true = np.zeros((m, n)), np.zeros((m, n))
for i, (keys, (X, Y)) in enumerate(gen_xy):
k = len(Y)
ids.extend(keys)
y_hat, y = model.predict(X), Y
y_pred[i:i + k, ], y_true[i:i + k, ] = y_hat, y
pbar.update(k)
pbar.close()
return ids, y_true, y_pred
def evaluate(y_true, y_pred, classes):
y_pred = y_pred[~np.all(y_pred == 0, axis=1)]
y_true = y_true[~np.all(y_true == 0, axis=1)]
prs, rcs, f1s = performance(y_pred, y_true, classes)
return calc_loss(y_true, y_pred), prs, rcs, f1s
def add_arguments(parser):
parser.add_argument("--mongo_url", type=str, default='mongodb://localhost:27017/',
help="Supply the URL of MongoDB"),
parser.add_argument("--aspect", type=str, choices=['F', 'P', 'C'],
default="F", help="Specify the ontology aspect.")
parser.add_argument("--init_epoch", type=int, default=0,
help="Which epoch to start training the model?")
parser.add_argument("--arch", type=str, choices=['deepseq', 'motifnet', 'inception'],
default="deepseq", help="Specify the model arch.")
parser.add_argument('-r', '--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
ASPECT = args.aspect # default: Molecular Function
client = MongoClient(args.mongo_url)
db = client['prot2vec']
print("Loading Ontology...")
onto = get_ontology(ASPECT)
# classes = get_classes(db, onto)
classes = onto.classes
classes.remove(onto.root)
assert onto.root not in classes
opt = optimizers.Adam(lr=LR, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
if args.arch == 'inception':
model = ProteinInception(classes, opt)
LONG_EXPOSURE = False
num_epochs = 200
elif args.arch == 'deepseq':
model = DeeperSeq(classes, opt)
LONG_EXPOSURE = True
num_epochs = 20
elif args.arch == 'motifnet':
model = MotifNet(classes, opt)
LONG_EXPOSURE = False
num_epochs = 200
else:
print('Unknown model arch')
exit(0)
if args.resume:
model.load_weights(args.resume)
print("Loaded model from disk")
model.summary()
for epoch in range(args.init_epoch, num_epochs):
trn_stream, tst_stream = get_training_and_validation_streams(db)
train(model, batch_generator(trn_stream, onto, classes), len(trn_stream), epoch, num_epochs)
_, y_true, y_pred = predict(model, batch_generator(tst_stream, onto, classes), len(tst_stream), classes)
loss, prs, rcs, f1s = evaluate(y_true, y_pred, classes)
i = np.argmax(f1s)
f_max = f1s[i]
print("[Epoch %d/%d] (Validation Loss: %.5f, F_max: %.3f, precision: %.3f, recall: %.3f)"
% (epoch + 1, num_epochs, loss, f1s[i], prs[i], rcs[i]))
model_str = '%s-%d-%.5f-%.2f' % (args.arch, epoch + 1, loss, f_max)
model.save_weights("checkpoints/%s.hdf5" % model_str)
with open("checkpoints/%s.json" % model_str, "w+") as f:
f.write(model.to_json())
np.save("checkpoints/%s.npy" % model_str, np.asarray(classes))
|
[
"keras.optimizers.Adam",
"keras.callbacks.LearningRateScheduler",
"argparse.ArgumentParser",
"math.floor",
"keras.layers.Concatenate",
"tensorflow.Session",
"tqdm.tqdm",
"keras.backend.set_session",
"keras.layers.GlobalMaxPooling1D",
"keras.layers.Input",
"sklearn.metrics.log_loss",
"keras.models.Model",
"keras.layers.Activation",
"pymongo.MongoClient",
"keras.layers.BatchNormalization",
"keras.layers.Embedding",
"keras.layers.Dropout",
"keras.layers.Conv1D"
] |
[((865, 877), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (875, 877), True, 'import tensorflow as tf\n'), ((878, 897), 'keras.backend.set_session', 'K.set_session', (['sess'], {}), '(sess)\n', (891, 897), True, 'from keras import backend as K\n'), ((5645, 5665), 'keras.layers.Input', 'Input', ([], {'shape': '(None,)'}), '(shape=(None,))\n', (5650, 5665), False, 'from keras.layers import Input, Dense, Embedding, Activation\n'), ((6196, 6231), 'keras.models.Model', 'Model', ([], {'inputs': '[inpt]', 'outputs': '[out]'}), '(inputs=[inpt], outputs=[out])\n', (6201, 6231), False, 'from keras.models import Model\n'), ((6889, 6909), 'keras.layers.Input', 'Input', ([], {'shape': '(None,)'}), '(shape=(None,))\n', (6894, 6909), False, 'from keras.layers import Input, Dense, Embedding, Activation\n'), ((7108, 7143), 'keras.models.Model', 'Model', ([], {'inputs': '[inpt]', 'outputs': '[out]'}), '(inputs=[inpt], outputs=[out])\n', (7113, 7143), False, 'from keras.models import Model\n'), ((7845, 7865), 'keras.layers.Input', 'Input', ([], {'shape': '(None,)'}), '(shape=(None,))\n', (7850, 7865), False, 'from keras.layers import Input, Dense, Embedding, Activation\n'), ((7923, 7957), 'keras.models.Model', 'Model', ([], {'inputs': '[inp]', 'outputs': '[out]'}), '(inputs=[inp], outputs=[out])\n', (7928, 7957), False, 'from keras.models import Model\n'), ((9549, 9582), 'keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['step_decay'], {}), '(step_decay)\n', (9570, 9582), False, 'from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint, LambdaCallback, LearningRateScheduler\n'), ((9597, 9618), 'tqdm.tqdm', 'tqdm', ([], {'total': 'length_xy'}), '(total=length_xy)\n', (9601, 9618), False, 'from tqdm import tqdm\n'), ((10409, 10452), 'tqdm.tqdm', 'tqdm', ([], {'total': 'length_xy', 'desc': '"""Predicting..."""'}), "(total=length_xy, desc='Predicting...')\n", (10413, 10452), False, 'from tqdm import tqdm\n'), ((11891, 11916), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (11914, 11916), False, 'import argparse\n'), ((12045, 12072), 'pymongo.MongoClient', 'MongoClient', (['args.mongo_url'], {}), '(args.mongo_url)\n', (12056, 12072), False, 'from pymongo import MongoClient\n'), ((12311, 12374), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': 'LR', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': '(1e-08)'}), '(lr=LR, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n', (12326, 12374), False, 'from keras import optimizers\n'), ((3937, 3995), 'keras.layers.Conv1D', 'Conv1D', (['num_channels', '(1)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(num_channels, 1, padding='same', activation='relu')\n", (3943, 3995), False, 'from keras.layers import Conv2D, Conv1D\n'), ((4016, 4074), 'keras.layers.Conv1D', 'Conv1D', (['num_channels', '(3)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(num_channels, 3, padding='same', activation='relu')\n", (4022, 4074), False, 'from keras.layers import Conv2D, Conv1D\n'), ((4099, 4157), 'keras.layers.Conv1D', 'Conv1D', (['num_channels', '(1)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(num_channels, 1, padding='same', activation='relu')\n", (4105, 4157), False, 'from keras.layers import Conv2D, Conv1D\n'), ((4178, 4236), 'keras.layers.Conv1D', 'Conv1D', (['num_channels', '(5)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(num_channels, 5, padding='same', activation='relu')\n", (4184, 4236), False, 'from keras.layers import Conv2D, Conv1D\n'), ((4378, 4397), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(2)'}), '(axis=2)\n', (4389, 4397), False, 'from keras.layers import Concatenate, Flatten, Reshape\n'), ((4479, 4537), 'keras.layers.Conv1D', 'Conv1D', (['num_channels', '(6)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(num_channels, 6, padding='same', activation='relu')\n", (4485, 4537), False, 'from keras.layers import Conv2D, Conv1D\n'), ((4558, 4578), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4576, 4578), False, 'from keras.layers import Dropout, BatchNormalization\n'), ((4602, 4660), 'keras.layers.Conv1D', 'Conv1D', (['num_channels', '(6)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(num_channels, 6, padding='same', activation='relu')\n", (4608, 4660), False, 'from keras.layers import Conv2D, Conv1D\n'), ((4685, 4744), 'keras.layers.Conv1D', 'Conv1D', (['num_channels', '(10)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(num_channels, 10, padding='same', activation='relu')\n", (4691, 4744), False, 'from keras.layers import Conv2D, Conv1D\n'), ((4765, 4785), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4783, 4785), False, 'from keras.layers import Dropout, BatchNormalization\n'), ((4809, 4868), 'keras.layers.Conv1D', 'Conv1D', (['num_channels', '(10)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(num_channels, 10, padding='same', activation='relu')\n", (4815, 4868), False, 'from keras.layers import Conv2D, Conv1D\n'), ((4890, 4909), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(2)'}), '(axis=2)\n', (4901, 4909), False, 'from keras.layers import Concatenate, Flatten, Reshape\n'), ((4991, 5049), 'keras.layers.Conv1D', 'Conv1D', (['num_channels', '(1)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(num_channels, 1, padding='same', activation='relu')\n", (4997, 5049), False, 'from keras.layers import Conv2D, Conv1D\n'), ((5070, 5128), 'keras.layers.Conv1D', 'Conv1D', (['num_channels', '(5)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(num_channels, 5, padding='same', activation='relu')\n", (5076, 5128), False, 'from keras.layers import Conv2D, Conv1D\n'), ((5199, 5257), 'keras.layers.Conv1D', 'Conv1D', (['num_channels', '(1)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(num_channels, 1, padding='same', activation='relu')\n", (5205, 5257), False, 'from keras.layers import Conv2D, Conv1D\n'), ((5278, 5337), 'keras.layers.Conv1D', 'Conv1D', (['num_channels', '(15)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(num_channels, 15, padding='same', activation='relu')\n", (5284, 5337), False, 'from keras.layers import Conv2D, Conv1D\n'), ((5405, 5424), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(2)'}), '(axis=2)\n', (5416, 5424), False, 'from keras.layers import Concatenate, Flatten, Reshape\n'), ((5526, 5546), 'keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (5544, 5546), False, 'from keras.layers import Dropout, BatchNormalization\n'), ((5562, 5583), 'keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (5572, 5583), False, 'from keras.layers import Input, Dense, Embedding, Activation\n'), ((5676, 5748), 'keras.layers.Embedding', 'Embedding', ([], {'input_dim': '(26)', 'output_dim': '(23)', 'embeddings_initializer': '"""uniform"""'}), "(input_dim=26, output_dim=23, embeddings_initializer='uniform')\n", (5685, 5748), False, 'from keras.layers import Input, Dense, Embedding, Activation\n'), ((5765, 5816), 'keras.layers.Conv1D', 'Conv1D', (['(250)', '(15)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(250, 15, activation='relu', padding='valid')\n", (5771, 5816), False, 'from keras.layers import Conv2D, Conv1D\n'), ((5832, 5844), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (5839, 5844), False, 'from keras.layers import Dropout, BatchNormalization\n'), ((5860, 5911), 'keras.layers.Conv1D', 'Conv1D', (['(100)', '(15)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(100, 15, activation='relu', padding='valid')\n", (5866, 5911), False, 'from keras.layers import Conv2D, Conv1D\n'), ((5957, 5969), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (5964, 5969), False, 'from keras.layers import Dropout, BatchNormalization\n'), ((6015, 6027), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (6022, 6027), False, 'from keras.layers import Dropout, BatchNormalization\n'), ((6043, 6093), 'keras.layers.Conv1D', 'Conv1D', (['(250)', '(5)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(250, 5, activation='relu', padding='valid')\n", (6049, 6093), False, 'from keras.layers import Conv2D, Conv1D\n'), ((6109, 6121), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (6116, 6121), False, 'from keras.layers import Dropout, BatchNormalization\n'), ((6369, 6417), 'keras.layers.Conv1D', 'Conv1D', (['(64)', '(1)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(64, 1, padding='same', activation='relu')\n", (6375, 6417), False, 'from keras.layers import Conv2D, Conv1D\n'), ((6438, 6491), 'keras.layers.Conv1D', 'Conv1D', (['(64)', 'tower1'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(64, tower1, padding='same', activation='relu')\n", (6444, 6491), False, 'from keras.layers import Conv2D, Conv1D\n'), ((6516, 6564), 'keras.layers.Conv1D', 'Conv1D', (['(64)', '(1)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(64, 1, padding='same', activation='relu')\n", (6522, 6564), False, 'from keras.layers import Conv2D, Conv1D\n'), ((6585, 6638), 'keras.layers.Conv1D', 'Conv1D', (['(64)', 'tower2'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(64, tower2, padding='same', activation='relu')\n", (6591, 6638), False, 'from keras.layers import Conv2D, Conv1D\n'), ((6800, 6819), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(2)'}), '(axis=2)\n', (6811, 6819), False, 'from keras.layers import Concatenate, Flatten, Reshape\n'), ((6920, 6992), 'keras.layers.Embedding', 'Embedding', ([], {'input_dim': '(26)', 'output_dim': '(23)', 'embeddings_initializer': '"""uniform"""'}), "(input_dim=26, output_dim=23, embeddings_initializer='uniform')\n", (6929, 6992), False, 'from keras.layers import Input, Dense, Embedding, Activation\n'), ((7256, 7328), 'keras.layers.Embedding', 'Embedding', ([], {'input_dim': '(26)', 'output_dim': '(23)', 'embeddings_initializer': '"""uniform"""'}), "(input_dim=26, output_dim=23, embeddings_initializer='uniform')\n", (7265, 7328), False, 'from keras.layers import Input, Dense, Embedding, Activation\n'), ((7347, 7398), 'keras.layers.Conv1D', 'Conv1D', (['(250)', '(15)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(250, 15, activation='relu', padding='valid')\n", (7353, 7398), False, 'from keras.layers import Conv2D, Conv1D\n'), ((7418, 7430), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (7425, 7430), False, 'from keras.layers import Dropout, BatchNormalization\n'), ((7450, 7501), 'keras.layers.Conv1D', 'Conv1D', (['(100)', '(15)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(100, 15, activation='relu', padding='valid')\n", (7456, 7501), False, 'from keras.layers import Conv2D, Conv1D\n'), ((7521, 7533), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (7528, 7533), False, 'from keras.layers import Dropout, BatchNormalization\n'), ((7553, 7604), 'keras.layers.Conv1D', 'Conv1D', (['(100)', '(15)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(100, 15, activation='relu', padding='valid')\n", (7559, 7604), False, 'from keras.layers import Conv2D, Conv1D\n'), ((7624, 7636), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (7631, 7636), False, 'from keras.layers import Dropout, BatchNormalization\n'), ((7656, 7707), 'keras.layers.Conv1D', 'Conv1D', (['(250)', '(15)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(250, 15, activation='relu', padding='valid')\n", (7662, 7707), False, 'from keras.layers import Conv2D, Conv1D\n'), ((7727, 7739), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (7734, 7739), False, 'from keras.layers import Dropout, BatchNormalization\n'), ((7759, 7779), 'keras.layers.GlobalMaxPooling1D', 'GlobalMaxPooling1D', ([], {}), '()\n', (7777, 7779), False, 'from keras.layers import MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, GlobalAveragePooling1D\n'), ((6148, 6168), 'keras.layers.GlobalMaxPooling1D', 'GlobalMaxPooling1D', ([], {}), '()\n', (6166, 6168), False, 'from keras.layers import MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, GlobalAveragePooling1D\n'), ((7058, 7078), 'keras.layers.GlobalMaxPooling1D', 'GlobalMaxPooling1D', ([], {}), '()\n', (7076, 7078), False, 'from keras.layers import MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, GlobalAveragePooling1D\n'), ((10278, 10296), 'sklearn.metrics.log_loss', 'log_loss', (['y', 'y_hat'], {}), '(y, y_hat)\n', (10286, 10296), False, 'from sklearn.metrics import log_loss\n'), ((3736, 3773), 'math.floor', 'math.floor', (['((1 + epoch) / epochs_drop)'], {}), '((1 + epoch) / epochs_drop)\n', (3746, 3773), False, 'import math\n')]
|
import matplotlib.pyplot as plt
import openpyxl
import sys
from fs import FS
from journals import Journals
from utils import load_sheet
from utils import log
from word import Word
YEARS = [2017, 2018, 2019, 2020, 2021]
class InBiMa():
def __init__(self, is_new_folder=False):
self.fs = FS(is_new_folder)
if is_new_folder: return
self.wb = openpyxl.load_workbook(self.fs.get_path('cait.xlsx'))
log('Excel file is opened', 'res')
self.team = load_sheet(self.wb['team'])
self.grants = load_sheet(self.wb['grants'])
self.papers = load_sheet(self.wb['papers'])
self.journals = Journals(load_sheet(self.wb['journals']))
# self.journals_ref = self.load_journals_ref()
self.task = {
'authors': ['#cichocki'],
'grants': ['#megagrant1'],
}
log('Excel file is parsed', 'res')
# log('Journal info is loaded', 'res')
for uid in self.team.keys():
self.task['authors'] = [uid]
self.export_word_cv()
self.export_grant_papers()
self.export_stat()
def export_word_cv(self):
if len(self.task.get('authors', [])) != 1:
text = 'export_word_cv (task should contain only one author)'
log(text, 'err')
return
person = self.team.get(self.task['authors'][0])
if person is None:
text = 'export_word_cv (invalid team member uid in task)'
log(text, 'err')
return
uid = person['id']
stat = self.get_papers_stat(uid, YEARS)
photo_logo = self.fs.download_photo_logo()
photo_person = self.fs.download_photo(uid[1:], person.get('photo'))
self.word = Word(YEARS, self.get_papers)
self.word.add_person_info(person, photo_person, photo_logo)
self.word.add_person_stat(stat)
self.word.add_note(is_grant=True)
self.word.add_break()
self.word.add_paper_list(stat, author=person['id'])
fname = 'CAIT_' + person['surname'] + '_' + person['name'] + '.docx'
fpath = self.fs.get_path(fname)
self.word.save(fpath)
log(f'Document "{fpath}" is saved', 'res')
def export_grant_papers(self):
if len(self.task.get('grants', [])) != 1:
text = 'export_grant_papers (task should contain only one grant)'
log(text, 'err')
return
grant = self.grants.get(self.task['grants'][0])
if grant is None:
text = 'export_grant_papers (invalid grant uid in task)'
log(text, 'err')
return
uid = grant['id']
stat = self.get_papers_stat(years=YEARS, grant=uid)
photo_logo = self.fs.download_photo_logo()
head = grant.get('head', '')
head = self.team[head]
self.word = Word(YEARS, self.get_papers)
self.word.add_grant_info(grant, head, photo_logo)
self.word.add_note(is_grant=True)
self.word.add_break()
self.word.add_paper_list(stat, grant=uid, with_links=True)
fname = 'CAIT_' + uid[1:] + '.docx'
fpath = self.fs.get_path(fname)
self.word.save(fpath)
log(f'Document "{fpath}" is saved', 'res')
def export_stat(self):
stats = {}
for uid in self.team.keys():
if self.team[uid].get('active') != 'Yes':
continue
if self.team[uid].get('lead') != 'Yes':
continue
stats[uid] = self.get_papers_stat(uid, YEARS)
for uid, stat in stats.items():
x = YEARS
y = [stat[y]['total'] for y in YEARS]
plt.plot(x, y, marker='o', label=uid)
plt.legend(loc='best')
fpath = self.fs.get_path('plot.png')
plt.savefig(fpath)
log(f'Figure "{fpath}" is saved', 'res')
def get_papers(self, author=None, year=None, q=None, grant=None):
res = {}
for title, paper in self.papers.items():
if year and int(year) != int(paper['year']):
continue
if author and not author in paper['authors_parsed']:
continue
if grant and not grant in paper.get('grant', ''):
continue
if q is not None:
journal = self.journals.data[paper['journal']]
q1 = journal.get('sjr_q1', '')
q2 = journal.get('sjr_q2', '')
if q == 1 and len(q1) < 2:
continue
if q == 2 and (len(q1) >= 2 or len(q2) < 2):
continue
if q == 0 and (len(q1) >= 2 or len(q2) >= 2):
continue
res[title] = paper
res[title]['journal_object'] = self.journals.data[paper['journal']]
return res
def get_papers_stat(self, author=None, years=[], grant=None):
res = {}
for year in years:
res[year] = {
'q1': len(self.get_papers(author, year, q=1, grant=grant)),
'q2': len(self.get_papers(author, year, q=2, grant=grant)),
'q0': len(self.get_papers(author, year, q=0, grant=grant)),
'total': len(self.get_papers(author, year, grant=grant))
}
res['total'] = {
'q1': sum(res[year]['q1'] for year in years),
'q2': sum(res[year]['q2'] for year in years),
'q0': sum(res[year]['q0'] for year in years),
'total': sum(res[year]['total'] for year in years),
}
return res
if __name__ == '__main__':
args = sys.argv[1:]
if len(args) == 0:
ibm = InBiMa()
elif len(args) == 1 and args[0] == '-f':
ibm = InBiMa(is_new_folder=True)
elif len(args) == 2 and args[0] == '-j':
journals = Journals()
journals.load_ref()
journals.log_ref(title=args[1])
else:
raise ValueError('Invalid arguments for script')
|
[
"utils.log",
"word.Word",
"matplotlib.pyplot.savefig",
"utils.load_sheet",
"matplotlib.pyplot.plot",
"fs.FS",
"journals.Journals",
"matplotlib.pyplot.legend"
] |
[((304, 321), 'fs.FS', 'FS', (['is_new_folder'], {}), '(is_new_folder)\n', (306, 321), False, 'from fs import FS\n'), ((436, 470), 'utils.log', 'log', (['"""Excel file is opened"""', '"""res"""'], {}), "('Excel file is opened', 'res')\n", (439, 470), False, 'from utils import log\n'), ((492, 519), 'utils.load_sheet', 'load_sheet', (["self.wb['team']"], {}), "(self.wb['team'])\n", (502, 519), False, 'from utils import load_sheet\n'), ((542, 571), 'utils.load_sheet', 'load_sheet', (["self.wb['grants']"], {}), "(self.wb['grants'])\n", (552, 571), False, 'from utils import load_sheet\n'), ((594, 623), 'utils.load_sheet', 'load_sheet', (["self.wb['papers']"], {}), "(self.wb['papers'])\n", (604, 623), False, 'from utils import load_sheet\n'), ((862, 896), 'utils.log', 'log', (['"""Excel file is parsed"""', '"""res"""'], {}), "('Excel file is parsed', 'res')\n", (865, 896), False, 'from utils import log\n'), ((1750, 1778), 'word.Word', 'Word', (['YEARS', 'self.get_papers'], {}), '(YEARS, self.get_papers)\n', (1754, 1778), False, 'from word import Word\n'), ((2175, 2217), 'utils.log', 'log', (['f"""Document "{fpath}" is saved"""', '"""res"""'], {}), '(f\'Document "{fpath}" is saved\', \'res\')\n', (2178, 2217), False, 'from utils import log\n'), ((2858, 2886), 'word.Word', 'Word', (['YEARS', 'self.get_papers'], {}), '(YEARS, self.get_papers)\n', (2862, 2886), False, 'from word import Word\n'), ((3207, 3249), 'utils.log', 'log', (['f"""Document "{fpath}" is saved"""', '"""res"""'], {}), '(f\'Document "{fpath}" is saved\', \'res\')\n', (3210, 3249), False, 'from utils import log\n'), ((3720, 3742), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (3730, 3742), True, 'import matplotlib.pyplot as plt\n'), ((3797, 3815), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fpath'], {}), '(fpath)\n', (3808, 3815), True, 'import matplotlib.pyplot as plt\n'), ((3824, 3864), 'utils.log', 'log', (['f"""Figure "{fpath}" is saved"""', '"""res"""'], {}), '(f\'Figure "{fpath}" is saved\', \'res\')\n', (3827, 3864), False, 'from utils import log\n'), ((657, 688), 'utils.load_sheet', 'load_sheet', (["self.wb['journals']"], {}), "(self.wb['journals'])\n", (667, 688), False, 'from utils import load_sheet\n'), ((1288, 1304), 'utils.log', 'log', (['text', '"""err"""'], {}), "(text, 'err')\n", (1291, 1304), False, 'from utils import log\n'), ((1490, 1506), 'utils.log', 'log', (['text', '"""err"""'], {}), "(text, 'err')\n", (1493, 1506), False, 'from utils import log\n'), ((2394, 2410), 'utils.log', 'log', (['text', '"""err"""'], {}), "(text, 'err')\n", (2397, 2410), False, 'from utils import log\n'), ((2594, 2610), 'utils.log', 'log', (['text', '"""err"""'], {}), "(text, 'err')\n", (2597, 2610), False, 'from utils import log\n'), ((3673, 3710), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'marker': '"""o"""', 'label': 'uid'}), "(x, y, marker='o', label=uid)\n", (3681, 3710), True, 'import matplotlib.pyplot as plt\n'), ((5831, 5841), 'journals.Journals', 'Journals', ([], {}), '()\n', (5839, 5841), False, 'from journals import Journals\n')]
|
from django.forms import (
Form, CharField, Textarea, PasswordInput, ChoiceField, DateField,
ImageField, BooleanField, IntegerField, MultipleChoiceField
)
from django import forms
from fb.models import UserProfile
class UserPostForm(Form):
text = CharField(widget=Textarea(
attrs={'rows': 1, 'cols': 40, 'class': 'form-control','placeholder': "What's on your mind?"}))
class UserPostCommentForm(Form):
text = CharField(widget=Textarea(
attrs={'rows': 1, 'cols': 50, 'class': 'form-control','placeholder': "Write a comment..."}))
class UserLogin(Form):
username = CharField(max_length=30)
password = CharField(widget=PasswordInput)
class UserProfileForm(Form):
first_name = CharField(max_length=100, required=False)
last_name = CharField(max_length=100, required=False)
gender = ChoiceField(choices=UserProfile.GENDERS, required=False)
date_of_birth = DateField(required=False)
avatar = ImageField(required=False)
OPTIONS = (
("Cars", "Cars"),
("Dogs", "Dogs"),
("Sports", "Sports"),
)
interests = MultipleChoiceField(widget=forms.CheckboxSelectMultiple,
choices=OPTIONS, required=False)
class QuestionFrom(Form):
question_description = CharField(max_length=300)
points = IntegerField()
class AddAnswerForm(Form):
answer_description = CharField(max_length=30)
correct_answer = BooleanField(required=False)
|
[
"django.forms.BooleanField",
"django.forms.CharField",
"django.forms.DateField",
"django.forms.ChoiceField",
"django.forms.ImageField",
"django.forms.IntegerField",
"django.forms.Textarea",
"django.forms.MultipleChoiceField"
] |
[((606, 630), 'django.forms.CharField', 'CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (615, 630), False, 'from django.forms import Form, CharField, Textarea, PasswordInput, ChoiceField, DateField, ImageField, BooleanField, IntegerField, MultipleChoiceField\n'), ((646, 677), 'django.forms.CharField', 'CharField', ([], {'widget': 'PasswordInput'}), '(widget=PasswordInput)\n', (655, 677), False, 'from django.forms import Form, CharField, Textarea, PasswordInput, ChoiceField, DateField, ImageField, BooleanField, IntegerField, MultipleChoiceField\n'), ((726, 767), 'django.forms.CharField', 'CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (735, 767), False, 'from django.forms import Form, CharField, Textarea, PasswordInput, ChoiceField, DateField, ImageField, BooleanField, IntegerField, MultipleChoiceField\n'), ((784, 825), 'django.forms.CharField', 'CharField', ([], {'max_length': '(100)', 'required': '(False)'}), '(max_length=100, required=False)\n', (793, 825), False, 'from django.forms import Form, CharField, Textarea, PasswordInput, ChoiceField, DateField, ImageField, BooleanField, IntegerField, MultipleChoiceField\n'), ((839, 895), 'django.forms.ChoiceField', 'ChoiceField', ([], {'choices': 'UserProfile.GENDERS', 'required': '(False)'}), '(choices=UserProfile.GENDERS, required=False)\n', (850, 895), False, 'from django.forms import Form, CharField, Textarea, PasswordInput, ChoiceField, DateField, ImageField, BooleanField, IntegerField, MultipleChoiceField\n'), ((916, 941), 'django.forms.DateField', 'DateField', ([], {'required': '(False)'}), '(required=False)\n', (925, 941), False, 'from django.forms import Form, CharField, Textarea, PasswordInput, ChoiceField, DateField, ImageField, BooleanField, IntegerField, MultipleChoiceField\n'), ((955, 981), 'django.forms.ImageField', 'ImageField', ([], {'required': '(False)'}), '(required=False)\n', (965, 981), False, 'from django.forms import Form, CharField, Textarea, PasswordInput, ChoiceField, DateField, ImageField, BooleanField, IntegerField, MultipleChoiceField\n'), ((1123, 1216), 'django.forms.MultipleChoiceField', 'MultipleChoiceField', ([], {'widget': 'forms.CheckboxSelectMultiple', 'choices': 'OPTIONS', 'required': '(False)'}), '(widget=forms.CheckboxSelectMultiple, choices=OPTIONS,\n required=False)\n', (1142, 1216), False, 'from django.forms import Form, CharField, Textarea, PasswordInput, ChoiceField, DateField, ImageField, BooleanField, IntegerField, MultipleChoiceField\n'), ((1303, 1328), 'django.forms.CharField', 'CharField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (1312, 1328), False, 'from django.forms import Form, CharField, Textarea, PasswordInput, ChoiceField, DateField, ImageField, BooleanField, IntegerField, MultipleChoiceField\n'), ((1342, 1356), 'django.forms.IntegerField', 'IntegerField', ([], {}), '()\n', (1354, 1356), False, 'from django.forms import Form, CharField, Textarea, PasswordInput, ChoiceField, DateField, ImageField, BooleanField, IntegerField, MultipleChoiceField\n'), ((1410, 1434), 'django.forms.CharField', 'CharField', ([], {'max_length': '(30)'}), '(max_length=30)\n', (1419, 1434), False, 'from django.forms import Form, CharField, Textarea, PasswordInput, ChoiceField, DateField, ImageField, BooleanField, IntegerField, MultipleChoiceField\n'), ((1456, 1484), 'django.forms.BooleanField', 'BooleanField', ([], {'required': '(False)'}), '(required=False)\n', (1468, 1484), False, 'from django.forms import Form, CharField, Textarea, PasswordInput, ChoiceField, DateField, ImageField, BooleanField, IntegerField, MultipleChoiceField\n'), ((279, 386), 'django.forms.Textarea', 'Textarea', ([], {'attrs': '{\'rows\': 1, \'cols\': 40, \'class\': \'form-control\', \'placeholder\':\n "What\'s on your mind?"}'}), '(attrs={\'rows\': 1, \'cols\': 40, \'class\': \'form-control\',\n \'placeholder\': "What\'s on your mind?"})\n', (287, 386), False, 'from django.forms import Form, CharField, Textarea, PasswordInput, ChoiceField, DateField, ImageField, BooleanField, IntegerField, MultipleChoiceField\n'), ((455, 560), 'django.forms.Textarea', 'Textarea', ([], {'attrs': "{'rows': 1, 'cols': 50, 'class': 'form-control', 'placeholder':\n 'Write a comment...'}"}), "(attrs={'rows': 1, 'cols': 50, 'class': 'form-control',\n 'placeholder': 'Write a comment...'})\n", (463, 560), False, 'from django.forms import Form, CharField, Textarea, PasswordInput, ChoiceField, DateField, ImageField, BooleanField, IntegerField, MultipleChoiceField\n')]
|
from flask import Flask, jsonify, request, render_template, redirect
from flask_pymongo import PyMongo
from werkzeug import secure_filename
import base64
app = Flask(__name__)
app.config['MONGO_DBNAME'] = 'restdb'
app.config['MONGO_URI'] = 'mongodb://localhost:27017/restdb'
mongo = PyMongo(app)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/w')
def webcam():
return render_template("webcam.html")
@app.route('/img')
def img():
i = request.query_string
f = open('a.png','wb')
f.write(i.decode('base64'))
return "success <img src='" + i + "'>"
@app.route('/hello')
def hello():
return "hello world"
@app.route('/star', methods=['GET'])
def get_all_stars():
star = mongo.db.stars
output = []
for s in star.find():
output.append({'name' : s['name'], 'distance' : s['distance']})
return jsonify(output)
@app.route('/star/', methods=['GET'])
def get_one_star(name):
star = mongo.db.stars
s = star.find_one({'name' : name})
if s:
output = {'name': s['name'], 'distance': s['distance']}
else:
output = "No such name"
return jsonify(output)
@app.route('/star', methods=['POST'])
def add_star():
star = mongo.db.stars
name = request.json['name']
distance = request.json['distance']
star_id = star.insert({'name': name, 'distance': distance})
new_star = star.find_one({'_id': star_id})
output = {'name' : new_star['name'], 'distance' : new_star['distance']}
return jsonify(output)
@app.route('/uploader', methods=['POST'])
def upload_file():
f = request.files['file']
f.save(secure_filename('1'))
return "uploaded"
if __name__ == '__main__':
app.run(debug=True)
|
[
"flask.render_template",
"flask.Flask",
"flask_pymongo.PyMongo",
"werkzeug.secure_filename",
"flask.jsonify"
] |
[((161, 176), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (166, 176), False, 'from flask import Flask, jsonify, request, render_template, redirect\n'), ((286, 298), 'flask_pymongo.PyMongo', 'PyMongo', (['app'], {}), '(app)\n', (293, 298), False, 'from flask_pymongo import PyMongo\n'), ((338, 367), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (353, 367), False, 'from flask import Flask, jsonify, request, render_template, redirect\n'), ((409, 439), 'flask.render_template', 'render_template', (['"""webcam.html"""'], {}), "('webcam.html')\n", (424, 439), False, 'from flask import Flask, jsonify, request, render_template, redirect\n'), ((850, 865), 'flask.jsonify', 'jsonify', (['output'], {}), '(output)\n', (857, 865), False, 'from flask import Flask, jsonify, request, render_template, redirect\n'), ((1103, 1118), 'flask.jsonify', 'jsonify', (['output'], {}), '(output)\n', (1110, 1118), False, 'from flask import Flask, jsonify, request, render_template, redirect\n'), ((1456, 1471), 'flask.jsonify', 'jsonify', (['output'], {}), '(output)\n', (1463, 1471), False, 'from flask import Flask, jsonify, request, render_template, redirect\n'), ((1571, 1591), 'werkzeug.secure_filename', 'secure_filename', (['"""1"""'], {}), "('1')\n", (1586, 1591), False, 'from werkzeug import secure_filename\n')]
|
""" Utilities
:Author: <NAME> <<EMAIL>>
:Date: 2016-11-10
:Copyright: 2016, Karr Lab
:License: MIT
"""
from obj_tables import get_models as base_get_models
from wc_lang import core
from wc_lang import io
from wc_utils.util import git
def get_model_size(model):
""" Get numbers of model components
Args:
model (:obj:`core.Model`): model
Returns:
:obj:`dict`: dictionary with numbers of each type of model component
"""
return {
"submodels": len(model.get_submodels()),
"compartments": len(model.get_compartments()),
"species_types": len(model.get_species_types()),
"species": len(model.get_species()),
"parameters": len(model.get_parameters()),
"references": len(model.get_references()),
"reactions": len(model.get_reactions()),
}
def get_model_summary(model):
""" Get textual summary of a model
Args:
model (:obj:`core.Model`): model
Returns:
:obj:`str`: textual summary of the model
"""
return "Model with:" \
+ "\n{:d} submodels".format(len(model.get_submodels())) \
+ "\n{:d} compartments".format(len(model.get_compartments())) \
+ "\n{:d} species types".format(len(model.get_species_types())) \
+ "\n{:d} species".format(len(model.get_species())) \
+ "\n{:d} parameters".format(len(model.get_parameters())) \
+ "\n{:d} references".format(len(model.get_references())) \
+ "\n{:d} dFBA objective reactions".format(len(model.get_dfba_obj_reactions())) \
+ "\n{:d} reactions".format(len(model.get_reactions())) \
+ "\n{:d} rate laws".format(len(model.get_rate_laws()))
def get_models(inline=True):
""" Get list of models
Args:
inline (:obj:`bool`, optional): if true, return inline models
Returns:
:obj:`list` of :obj:`class`: list of models
"""
return base_get_models(module=core, inline=inline)
def gen_ids(model):
""" Generate ids for model objects
Args:
model (:obj:`core.Model`): model
"""
for obj in model.get_related():
if hasattr(obj, 'gen_id'):
obj.id = obj.gen_id()
|
[
"obj_tables.get_models"
] |
[((1907, 1950), 'obj_tables.get_models', 'base_get_models', ([], {'module': 'core', 'inline': 'inline'}), '(module=core, inline=inline)\n', (1922, 1950), True, 'from obj_tables import get_models as base_get_models\n')]
|
import synapse.common as s_common
import synapse.tests.utils as s_t_utils
import synapse.tools.autodoc as s_autodoc
class TestAutoDoc(s_t_utils.SynTest):
async def test_tools_autodoc_docmodel(self):
with self.getTestDir() as path:
argv = ['--doc-model', '--savedir', path]
outp = self.getTestOutp()
self.eq(await s_autodoc.main(argv, outp=outp), 0)
with s_common.genfile(path, 'datamodel_types.rst') as fd:
buf = fd.read()
s = buf.decode()
self.isin('Base types are defined via Python classes.', s)
self.isin('synapse.models.inet.Addr', s)
self.isin('Regular types are derived from BaseTypes.', s)
self.isin(r'inet\:server', s)
with s_common.genfile(path, 'datamodel_forms.rst') as fd:
buf = fd.read()
s = buf.decode()
self.isin('Forms are derived from types, or base types. Forms represent node types in the graph.', s)
self.isin(r'inet\:ipv4', s)
self.notin(r'file\:bytes:.created', s)
self.isin('Universal props are system level properties which may be present on every node.', s)
self.isin('.created', s)
self.notin('..created\n', s)
self.isin('An example of ``inet:dns:a``\\:', s)
async def test_tools_autodoc_confdefs(self):
with self.getTestDir() as path:
argv = ['--savedir', path, '--doc-conf',
'synapse.tests.test_lib_stormsvc.StormvarServiceCell']
outp = self.getTestOutp()
self.eq(await s_autodoc.main(argv, outp=outp), 0)
with s_common.genfile(path, 'conf_stormvarservicecell.rst') as fd:
buf = fd.read()
s = buf.decode()
self.isin('autodoc-stormvarservicecell-conf', s)
self.isin('StormvarServiceCell Configuration Options', s)
self.isin('See :ref:`devops-cell-config` for', s)
self.isin('auth\\:passwd', s)
self.isin('Environment Variable\n ``SYN_STORMVARSERVICECELL_AUTH_PASSWD``', s)
self.isin('``--auth-passwd``', s)
argv.append('--doc-conf-reflink')
argv.append('`Configuring a Cell Service <https://synapse.docs.vertex.link/en/latest/synapse/devguides/devops_cell.html>`_')
# truncate the current file
with s_common.genfile(path, 'conf_stormvarservicecell.rst') as fd:
fd.truncate()
outp = self.getTestOutp()
self.eq(await s_autodoc.main(argv, outp=outp), 0)
with s_common.genfile(path, 'conf_stormvarservicecell.rst') as fd:
buf = fd.read()
s = buf.decode()
self.isin('StormvarServiceCell Configuration Options', s)
self.isin('See `Configuring a Cell Service <https://synapse', s)
async def test_tools_autodoc_stormsvc(self):
with self.getTestDir() as path:
argv = ['--savedir', path, '--doc-storm',
'synapse.tests.test_lib_stormsvc.StormvarServiceCell']
outp = self.getTestOutp()
self.eq(await s_autodoc.main(argv, outp=outp), 0)
with s_common.genfile(path, 'stormsvc_stormvarservicecell.rst') as fd:
buf = fd.read()
s = buf.decode()
self.isin('StormvarServiceCell Storm Service', s)
self.isin('This documentation is generated for version 0.0.1 of the service.', s)
self.isin('Storm Package\\: stormvar', s)
self.isin('.. _stormcmd-stormvar-magic:\n', s)
self.isin('magic\n-----', s)
self.isin('Test stormvar support', s)
self.isin('forms as input nodes', s)
self.isin('``test:str``', s)
self.isin('nodes in the graph', s)
self.isin('``test:comp``', s)
self.isin('nodedata with the following keys', s)
self.isin('``foo`` on ``inet:ipv4``', s)
async def test_tools_autodoc_stormtypes(self):
with self.getTestDir() as path:
argv = ['--savedir', path, '--doc-stormtypes']
outp = self.getTestOutp()
self.eq(await s_autodoc.main(argv, outp=outp), 0)
with s_common.genfile(path, 'stormtypes_libs.rst') as fd:
libbuf = fd.read()
libtext = libbuf.decode()
self.isin('.. _stormlibs-lib-print:\n\n$lib.print(mesg, \\*\\*kwargs)\n============================',
libtext)
self.isin('Print a message to the runtime.', libtext)
self.isin('\\*\\*kwargs (any): Keyword arguments to substitute into the mesg.', libtext)
self.isin('.. _stormlibs-lib-time:\n\n*********\n$lib.time\n*********', libtext)
self.isin('A Storm Library for interacting with timestamps.', libtext)
with s_common.genfile(path, 'stormtypes_prims.rst') as fd:
primbuf = fd.read()
primstext = primbuf.decode()
self.isin('.. _stormprims-storm-auth-user:\n\n*****************\nstorm\\:auth\\:user\n*****************', primstext)
self.isin('iden\n====\n\nThe User iden.', primstext)
|
[
"synapse.tools.autodoc.main",
"synapse.common.genfile"
] |
[((422, 467), 'synapse.common.genfile', 's_common.genfile', (['path', '"""datamodel_types.rst"""'], {}), "(path, 'datamodel_types.rst')\n", (438, 467), True, 'import synapse.common as s_common\n'), ((791, 836), 'synapse.common.genfile', 's_common.genfile', (['path', '"""datamodel_forms.rst"""'], {}), "(path, 'datamodel_forms.rst')\n", (807, 836), True, 'import synapse.common as s_common\n'), ((1696, 1750), 'synapse.common.genfile', 's_common.genfile', (['path', '"""conf_stormvarservicecell.rst"""'], {}), "(path, 'conf_stormvarservicecell.rst')\n", (1712, 1750), True, 'import synapse.common as s_common\n'), ((2437, 2491), 'synapse.common.genfile', 's_common.genfile', (['path', '"""conf_stormvarservicecell.rst"""'], {}), "(path, 'conf_stormvarservicecell.rst')\n", (2453, 2491), True, 'import synapse.common as s_common\n'), ((2647, 2701), 'synapse.common.genfile', 's_common.genfile', (['path', '"""conf_stormvarservicecell.rst"""'], {}), "(path, 'conf_stormvarservicecell.rst')\n", (2663, 2701), True, 'import synapse.common as s_common\n'), ((3258, 3316), 'synapse.common.genfile', 's_common.genfile', (['path', '"""stormsvc_stormvarservicecell.rst"""'], {}), "(path, 'stormsvc_stormvarservicecell.rst')\n", (3274, 3316), True, 'import synapse.common as s_common\n'), ((4309, 4354), 'synapse.common.genfile', 's_common.genfile', (['path', '"""stormtypes_libs.rst"""'], {}), "(path, 'stormtypes_libs.rst')\n", (4325, 4354), True, 'import synapse.common as s_common\n'), ((4942, 4988), 'synapse.common.genfile', 's_common.genfile', (['path', '"""stormtypes_prims.rst"""'], {}), "(path, 'stormtypes_prims.rst')\n", (4958, 4988), True, 'import synapse.common as s_common\n'), ((368, 399), 'synapse.tools.autodoc.main', 's_autodoc.main', (['argv'], {'outp': 'outp'}), '(argv, outp=outp)\n', (382, 399), True, 'import synapse.tools.autodoc as s_autodoc\n'), ((1642, 1673), 'synapse.tools.autodoc.main', 's_autodoc.main', (['argv'], {'outp': 'outp'}), '(argv, outp=outp)\n', (1656, 1673), True, 'import synapse.tools.autodoc as s_autodoc\n'), ((2594, 2625), 'synapse.tools.autodoc.main', 's_autodoc.main', (['argv'], {'outp': 'outp'}), '(argv, outp=outp)\n', (2608, 2625), True, 'import synapse.tools.autodoc as s_autodoc\n'), ((3204, 3235), 'synapse.tools.autodoc.main', 's_autodoc.main', (['argv'], {'outp': 'outp'}), '(argv, outp=outp)\n', (3218, 3235), True, 'import synapse.tools.autodoc as s_autodoc\n'), ((4255, 4286), 'synapse.tools.autodoc.main', 's_autodoc.main', (['argv'], {'outp': 'outp'}), '(argv, outp=outp)\n', (4269, 4286), True, 'import synapse.tools.autodoc as s_autodoc\n')]
|
from abc import ABC, abstractmethod
from django.test import TestCase
from rest_framework.generics import GenericAPIView
from rest_framework.test import APIRequestFactory
from apps.cars.factory import UserFactory
class AbstractBaseTest(object):
class AbstractBaseApiTestCase(TestCase, ABC):
"""
Abstract Base TestCase class.
"""
def setUp(self) -> None:
"""Base setup"""
self.user = UserFactory.create()
self.request_factory = APIRequestFactory()
self.view = self._view()
self.endpoint = self._endpoint()
@abstractmethod
def _view(self) -> GenericAPIView.as_view():
"""Abstract method that returns YourApiToTest.as_view()"""
pass
@abstractmethod
def _endpoint(self) -> str:
"""Abstract method that return endpoint string E.g /cars/"""
pass
@abstractmethod
def test_anonymous_request(self, *args, **kwargs) -> None:
"""test if anonymous user cannot access endpoint"""
pass
|
[
"rest_framework.test.APIRequestFactory",
"apps.cars.factory.UserFactory.create",
"rest_framework.generics.GenericAPIView.as_view"
] |
[((657, 681), 'rest_framework.generics.GenericAPIView.as_view', 'GenericAPIView.as_view', ([], {}), '()\n', (679, 681), False, 'from rest_framework.generics import GenericAPIView\n'), ((447, 467), 'apps.cars.factory.UserFactory.create', 'UserFactory.create', ([], {}), '()\n', (465, 467), False, 'from apps.cars.factory import UserFactory\n'), ((503, 522), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (520, 522), False, 'from rest_framework.test import APIRequestFactory\n')]
|
"""
A simple templating tool for Dockerfiles
"""
import sys
import os
import click
import jinja2
import yaml
@click.group()
def cli():
""" @Unimplemented """
pass
@cli.command()
@click.argument("template", required=True, type=str)
@click.option("-y", "--yaml_file", required=True,
help="Yaml file with keys for template",
type=str)
def from_yaml(template, yaml_file):
"""
Fills in template file fields using the
yaml_file
"""
temp_path = os.path.expanduser(
os.path.expandvars(template))
yml_path = os.path.expanduser(
os.path.expandvars(yaml_file))
with open(temp_path, 'r') as tfile:
temp_jin = jinja2.Template(tfile.read())
with open(yml_path, 'r') as yfile:
yml_loaded = yaml.load(yfile, Loader=yaml.BaseLoader)
temp_rend = temp_jin.render(**yml_loaded)
sys.stdout.write(temp_rend)
sys.stdout.flush()
cli.add_command(from_yaml)
if __name__ == '__main__':
cli()
|
[
"click.argument",
"click.group",
"click.option",
"os.path.expandvars",
"yaml.load",
"sys.stdout.flush",
"sys.stdout.write"
] |
[((114, 127), 'click.group', 'click.group', ([], {}), '()\n', (125, 127), False, 'import click\n'), ((193, 244), 'click.argument', 'click.argument', (['"""template"""'], {'required': '(True)', 'type': 'str'}), "('template', required=True, type=str)\n", (207, 244), False, 'import click\n'), ((246, 350), 'click.option', 'click.option', (['"""-y"""', '"""--yaml_file"""'], {'required': '(True)', 'help': '"""Yaml file with keys for template"""', 'type': 'str'}), "('-y', '--yaml_file', required=True, help=\n 'Yaml file with keys for template', type=str)\n", (258, 350), False, 'import click\n'), ((877, 904), 'sys.stdout.write', 'sys.stdout.write', (['temp_rend'], {}), '(temp_rend)\n', (893, 904), False, 'import sys\n'), ((909, 927), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (925, 927), False, 'import sys\n'), ((529, 557), 'os.path.expandvars', 'os.path.expandvars', (['template'], {}), '(template)\n', (547, 557), False, 'import os\n'), ((602, 631), 'os.path.expandvars', 'os.path.expandvars', (['yaml_file'], {}), '(yaml_file)\n', (620, 631), False, 'import os\n'), ((784, 824), 'yaml.load', 'yaml.load', (['yfile'], {'Loader': 'yaml.BaseLoader'}), '(yfile, Loader=yaml.BaseLoader)\n', (793, 824), False, 'import yaml\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[18]:
# this definition exposes all python module imports that should be available in all subsequent commands
import json
import numpy as np
import pandas as pd
from causalnex.structure import DAGRegressor
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
# ...
# global constants
MODEL_DIRECTORY = "/srv/app/model/data/"
# In[22]:
# this cell is not executed from MLTK and should only be used for staging data into the notebook environment
def stage(name):
with open("data/"+name+".csv", 'r') as f:
df = pd.read_csv(f)
with open("data/"+name+".json", 'r') as f:
param = json.load(f)
return df, param
# In[24]:
# initialize your model
# available inputs: data and parameters
# returns the model object which will be used as a reference to call fit, apply and summary subsequently
def init(df,param):
model = DAGRegressor(
alpha=0.1,
beta=0.9,
fit_intercept=True,
hidden_layer_units=None,
dependent_target=True,
enforce_dag=True,
)
return model
# In[26]:
# train your model
# returns a fit info json object and may modify the model object
def fit(model,df,param):
target=param['target_variables'][0]
#Data prep for processing
y_p = df[target]
y = y_p.values
X_p = df[param['feature_variables']]
X = X_p.to_numpy()
X_col = list(X_p.columns)
#Scale the data
ss = StandardScaler()
X_ss = ss.fit_transform(X)
y_ss = (y - y.mean()) / y.std()
scores = cross_val_score(model, X_ss, y_ss, cv=KFold(shuffle=True, random_state=42))
print(f'MEAN R2: {np.mean(scores).mean():.3f}')
X_pd = pd.DataFrame(X_ss, columns=X_col)
y_pd = pd.Series(y_ss, name=target)
model.fit(X_pd, y_pd)
info = pd.Series(model.coef_, index=X_col)
#info = pd.Series(model.coef_, index=list(df.drop(['_time'],axis=1).columns))
return info
# In[28]:
# apply your model
# returns the calculated results
def apply(model,df,param):
data = []
for col in list(df.columns):
s = model.get_edges_to_node(col)
for i in s.index:
data.append([i,col,s[i]]);
graph = pd.DataFrame(data, columns=['src','dest','weight'])
#results to send back to Splunk
graph_output=graph[graph['weight']>0]
return graph_output
# In[ ]:
# save model to name in expected convention "<algo_name>_<model_name>"
def save(model,name):
#with open(MODEL_DIRECTORY + name + ".json", 'w') as file:
# json.dump(model, file)
return model
# In[ ]:
# load model from name in expected convention "<algo_name>_<model_name>"
def load(name):
model = {}
#with open(MODEL_DIRECTORY + name + ".json", 'r') as file:
# model = json.load(file)
return model
# In[ ]:
# return a model summary
def summary(model=None):
returns = {"version": {"numpy": np.__version__, "pandas": pd.__version__} }
return returns
|
[
"pandas.Series",
"numpy.mean",
"pandas.read_csv",
"sklearn.preprocessing.StandardScaler",
"json.load",
"pandas.DataFrame",
"causalnex.structure.DAGRegressor",
"sklearn.model_selection.KFold"
] |
[((1022, 1146), 'causalnex.structure.DAGRegressor', 'DAGRegressor', ([], {'alpha': '(0.1)', 'beta': '(0.9)', 'fit_intercept': '(True)', 'hidden_layer_units': 'None', 'dependent_target': '(True)', 'enforce_dag': '(True)'}), '(alpha=0.1, beta=0.9, fit_intercept=True, hidden_layer_units=\n None, dependent_target=True, enforce_dag=True)\n', (1034, 1146), False, 'from causalnex.structure import DAGRegressor\n'), ((1653, 1669), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1667, 1669), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1895, 1928), 'pandas.DataFrame', 'pd.DataFrame', (['X_ss'], {'columns': 'X_col'}), '(X_ss, columns=X_col)\n', (1907, 1928), True, 'import pandas as pd\n'), ((1940, 1968), 'pandas.Series', 'pd.Series', (['y_ss'], {'name': 'target'}), '(y_ss, name=target)\n', (1949, 1968), True, 'import pandas as pd\n'), ((2012, 2047), 'pandas.Series', 'pd.Series', (['model.coef_'], {'index': 'X_col'}), '(model.coef_, index=X_col)\n', (2021, 2047), True, 'import pandas as pd\n'), ((2416, 2469), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': "['src', 'dest', 'weight']"}), "(data, columns=['src', 'dest', 'weight'])\n", (2428, 2469), True, 'import pandas as pd\n'), ((675, 689), 'pandas.read_csv', 'pd.read_csv', (['f'], {}), '(f)\n', (686, 689), True, 'import pandas as pd\n'), ((753, 765), 'json.load', 'json.load', (['f'], {}), '(f)\n', (762, 765), False, 'import json\n'), ((1793, 1829), 'sklearn.model_selection.KFold', 'KFold', ([], {'shuffle': '(True)', 'random_state': '(42)'}), '(shuffle=True, random_state=42)\n', (1798, 1829), False, 'from sklearn.model_selection import KFold\n'), ((1853, 1868), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (1860, 1868), True, 'import numpy as np\n')]
|
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.animation as animation
matplotlib.use('Agg')
import math
import numpy as np
import sys
from os.path import join, isfile
import warnings
warnings.filterwarnings("ignore")
def gda(x, y):
x = x.T
y = y.T
# phi = P(y = 1)
# mu[i] = mean of the feature vectors of the ith class
# sigma = common co-variance matrix
# M[i] = number of data points of class i
phi, mu, sigma, M = 0, np.array([0., 0.]), 0, np.array([0, 0])
m = y.shape[0]
M[1] = np.sum(y)
M[0] = m - M[1]
phi = M[1] / m
mu = np.array([np.sum(np.array([x[j] for j in range(m) if y[j] == i]), axis=0) / M[i] for i in range(2)])
sigma = np.sum(np.array([np.outer(x[i] - mu[y[i]], x[i] - mu[y[i]]) for i in range(m)]), axis=0).astype(float) / m
return phi, mu, sigma
def gda_general(x, y):
x = x.T
y = y.T
# phi = P(y = 1)
# mu[i] = mean of the feature vectors of the ith class
# sigma[i] = co-variance matrix for the ith class
# M[i] = number of data points of class i
phi, mu, sigma, M = 0, np.array([0., 0.]), 0, np.array([0, 0])
m = y.shape[0]
M[1] = np.sum(y)
M[0] = m - M[1]
phi = M[1] / m
mu = np.array([np.sum(np.array([x[j] for j in range(m) if y[j] == i]), axis=0) / M[i] for i in range(2)])
sigma = np.array([np.sum(np.array([np.outer(x[i] - mu[k], x[i] - mu[k]) for i in range(m) if y[i] == k]), axis=0) / M[k] for k in range(2)]).astype(float)
return phi, mu, sigma
def main():
# read command-line arguments
data_dir = sys.argv[1]
out_dir = sys.argv[2]
part = sys.argv[3]
# check for existence of input files
for c in ['x', 'y']:
if not isfile(join(data_dir, 'q4' + c + '.dat')):
raise Exception('q4' + c + '.dat not found')
# read from csv file
x = np.array(np.genfromtxt(join(data_dir, 'q4x.dat'))).T
y = np.array([0 if yi == 'Alaska' else 1 for yi in np.loadtxt(join(data_dir, 'q4y.dat'), dtype=str)])
# normalisation
x_mean = np.array([0.0] * 2)
x_stddev = np.array([0.0] * 2)
for i in range(2):
x_mean[i] = np.mean(x[i])
x[i] -= np.full_like(x[i], np.mean(x[i]))
x_stddev[i] = np.sqrt(np.sum(x[i] ** 2) / x[i].shape[0])
x[i] /= np.sqrt(np.sum(x[i] ** 2) / x[i].shape[0])
# part A
# running GDA with common co-variance matrix
phi, mu, sigma = gda(x, y)
if part == 'a':
output_file = open(join(out_dir, '4aoutput.txt'), mode='w')
output_file.write('phi = ' + str(phi) + '\n')
output_file.write('mu[0] = ' + str(mu[0]) + '\n')
output_file.write('mu[1] = ' + str(mu[1]) + '\n')
output_file.write('sigma = \n' + str(sigma) + '\n')
output_file.close()
print('phi = ' + str(phi))
print('mu[0] = ' + str(mu[0]))
print('mu[1] = ' + str(mu[1]))
print('sigma = \n' + str(sigma))
return 0
# part B, C
fig4b, ax4b = plt.subplots()
# filter by y-values
x0, x1 = [], []
for i in range(y.shape[0]):
if y[i] == 0:
x0.append([x[0][i], x[1][i]])
else:
x1.append([x[0][i], x[1][i]])
x0 = np.array(x0).T
x1 = np.array(x1).T
# plot classes
alaska = ax4b.scatter(x0[0] * x_stddev[0] + x_mean[0], x0[1] * x_stddev[1] + x_mean[1], c='red', s=6)
canada = ax4b.scatter(x1[0] * x_stddev[0] + x_mean[0], x1[1] * x_stddev[1] + x_mean[1], c='blue', s=6)
ax4b.set_xlabel('Fresh water ring dia.')
ax4b.set_ylabel('Marine water ring dia.')
fig4b.legend((alaska, canada), ('Alaska', 'Canada'))
if part == 'b':
fig4b.savefig(join(out_dir, '1b_plot.png'))
plt.show()
return 0
# linear boundary computation - equation in report
sigma_inverse = np.linalg.inv(sigma)
theta = np.array([0., 0., 0.])
theta[0] = np.log(phi / (1 - phi))
for i in range(2):
mui = np.array([mu[i]])
theta[0] += ((-1) ** i) * np.matmul(np.matmul(mui, sigma_inverse), mui.T)
theta[1:] = np.matmul(np.array([mu[1] - mu[0]]), sigma_inverse)
# plotting the boundary
rx = np.arange(-3, 4)
ry = (-theta[0] - theta[1] * rx) / theta[2]
ax4b.plot(rx * x_stddev[0] + x_mean[0], ry * x_stddev[1] + x_mean[1])
#plt.show()
if part == 'c':
fig4b.savefig(join(out_dir, '1c_plot.png'))
plt.show()
return 0
# part D
# running generalised GDA
phi, mu, sigma = gda_general(x, y)
if part == 'd':
output_file = open(join(out_dir, '4doutput.txt'), mode='w')
output_file.write('phi = ' + str(phi) + '\n')
output_file.write('mu[0] = ' + str(mu[0]) + '\n')
output_file.write('mu[1] = ' + str(mu[1]) + '\n')
output_file.write('sigma[0] = \n' + str(sigma[0]) + '\n')
output_file.write('sigma[1] = \n' + str(sigma[1]) + '\n')
output_file.close()
print('phi = ' + str(phi))
print('mu[0] = ' + str(mu[0]))
print('mu[1] = ' + str(mu[1]))
print('sigma[0] = \n' + str(sigma[0]))
print('sigma[1] = \n' + str(sigma[1]))
return 0
# part E
# quadratic boundary computation - equation in report
constant = np.log(phi / (1 - phi)) + np.log(np.linalg.det(sigma[0]) / np.linalg.det(sigma[1])) / 2
linear = 0
quadratic = 0
for i in range(2):
sigma_inverse = np.linalg.inv(sigma[i])
mui = np.array([mu[i]])
prod = np.matmul(mui, sigma_inverse)
constant += ((-1) ** i) * np.matmul(prod, mui.T) / 2
linear += ((-1) ** (i + 1)) * prod
quadratic += ((-1) ** i) * sigma_inverse / 2
constant = constant[0][0]
linear = linear[0]
# note that here x transposed is the feature vector (as x is a row vector)
# and similarly mu[i] is also a row vector, which explains the equations above
# equation is x * quadratic * x.T + linear * x.T + constant = 0
# plotting the quadratic boundary
Z = 0
X, Y = np.meshgrid(np.linspace(-4, 4, 100), np.linspace(-4, 4, 100))
Z += quadratic[0, 0] * (X ** 2) + (quadratic[0, 1] + quadratic[1, 0]) * X * Y + (quadratic[1, 1]) * (Y ** 2)
Z += linear[0] * X + linear[1] * Y
Z += constant
ax4b.contour(X * x_stddev[0] + x_mean[0], Y * x_stddev[1] + x_mean[1], Z, 0)
if part == 'e':
fig4b.savefig(join(out_dir, '1e_plot.png'))
plt.show()
# part F - in the report
return 0
if __name__ == '__main__':
main()
|
[
"warnings.filterwarnings",
"numpy.mean",
"matplotlib.use",
"numpy.log",
"os.path.join",
"numpy.linalg.det",
"numpy.sum",
"numpy.array",
"numpy.linalg.inv",
"numpy.matmul",
"numpy.linspace",
"numpy.outer",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((91, 112), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (105, 112), False, 'import matplotlib\n'), ((206, 239), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (229, 239), False, 'import warnings\n'), ((547, 556), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (553, 556), True, 'import numpy as np\n'), ((1184, 1193), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (1190, 1193), True, 'import numpy as np\n'), ((2065, 2084), 'numpy.array', 'np.array', (['([0.0] * 2)'], {}), '([0.0] * 2)\n', (2073, 2084), True, 'import numpy as np\n'), ((2100, 2119), 'numpy.array', 'np.array', (['([0.0] * 2)'], {}), '([0.0] * 2)\n', (2108, 2119), True, 'import numpy as np\n'), ((3001, 3015), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3013, 3015), True, 'import matplotlib.pyplot as plt\n'), ((3827, 3847), 'numpy.linalg.inv', 'np.linalg.inv', (['sigma'], {}), '(sigma)\n', (3840, 3847), True, 'import numpy as np\n'), ((3860, 3885), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (3868, 3885), True, 'import numpy as np\n'), ((3898, 3921), 'numpy.log', 'np.log', (['(phi / (1 - phi))'], {}), '(phi / (1 - phi))\n', (3904, 3921), True, 'import numpy as np\n'), ((4165, 4181), 'numpy.arange', 'np.arange', (['(-3)', '(4)'], {}), '(-3, 4)\n', (4174, 4181), True, 'import numpy as np\n'), ((475, 495), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (483, 495), True, 'import numpy as np\n'), ((498, 514), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (506, 514), True, 'import numpy as np\n'), ((1112, 1132), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (1120, 1132), True, 'import numpy as np\n'), ((1135, 1151), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1143, 1151), True, 'import numpy as np\n'), ((2163, 2176), 'numpy.mean', 'np.mean', (['x[i]'], {}), '(x[i])\n', (2170, 2176), True, 'import numpy as np\n'), ((3223, 3235), 'numpy.array', 'np.array', (['x0'], {}), '(x0)\n', (3231, 3235), True, 'import numpy as np\n'), ((3247, 3259), 'numpy.array', 'np.array', (['x1'], {}), '(x1)\n', (3255, 3259), True, 'import numpy as np\n'), ((3723, 3733), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3731, 3733), True, 'import matplotlib.pyplot as plt\n'), ((3959, 3976), 'numpy.array', 'np.array', (['[mu[i]]'], {}), '([mu[i]])\n', (3967, 3976), True, 'import numpy as np\n'), ((4085, 4110), 'numpy.array', 'np.array', (['[mu[1] - mu[0]]'], {}), '([mu[1] - mu[0]])\n', (4093, 4110), True, 'import numpy as np\n'), ((4401, 4411), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4409, 4411), True, 'import matplotlib.pyplot as plt\n'), ((5244, 5267), 'numpy.log', 'np.log', (['(phi / (1 - phi))'], {}), '(phi / (1 - phi))\n', (5250, 5267), True, 'import numpy as np\n'), ((5412, 5435), 'numpy.linalg.inv', 'np.linalg.inv', (['sigma[i]'], {}), '(sigma[i])\n', (5425, 5435), True, 'import numpy as np\n'), ((5450, 5467), 'numpy.array', 'np.array', (['[mu[i]]'], {}), '([mu[i]])\n', (5458, 5467), True, 'import numpy as np\n'), ((5483, 5512), 'numpy.matmul', 'np.matmul', (['mui', 'sigma_inverse'], {}), '(mui, sigma_inverse)\n', (5492, 5512), True, 'import numpy as np\n'), ((6025, 6048), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(100)'], {}), '(-4, 4, 100)\n', (6036, 6048), True, 'import numpy as np\n'), ((6050, 6073), 'numpy.linspace', 'np.linspace', (['(-4)', '(4)', '(100)'], {}), '(-4, 4, 100)\n', (6061, 6073), True, 'import numpy as np\n'), ((6406, 6416), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6414, 6416), True, 'import matplotlib.pyplot as plt\n'), ((2212, 2225), 'numpy.mean', 'np.mean', (['x[i]'], {}), '(x[i])\n', (2219, 2225), True, 'import numpy as np\n'), ((2495, 2524), 'os.path.join', 'join', (['out_dir', '"""4aoutput.txt"""'], {}), "(out_dir, '4aoutput.txt')\n", (2499, 2524), False, 'from os.path import join, isfile\n'), ((3685, 3713), 'os.path.join', 'join', (['out_dir', '"""1b_plot.png"""'], {}), "(out_dir, '1b_plot.png')\n", (3689, 3713), False, 'from os.path import join, isfile\n'), ((4363, 4391), 'os.path.join', 'join', (['out_dir', '"""1c_plot.png"""'], {}), "(out_dir, '1c_plot.png')\n", (4367, 4391), False, 'from os.path import join, isfile\n'), ((4561, 4590), 'os.path.join', 'join', (['out_dir', '"""4doutput.txt"""'], {}), "(out_dir, '4doutput.txt')\n", (4565, 4590), False, 'from os.path import join, isfile\n'), ((6368, 6396), 'os.path.join', 'join', (['out_dir', '"""1e_plot.png"""'], {}), "(out_dir, '1e_plot.png')\n", (6372, 6396), False, 'from os.path import join, isfile\n'), ((1745, 1778), 'os.path.join', 'join', (['data_dir', "('q4' + c + '.dat')"], {}), "(data_dir, 'q4' + c + '.dat')\n", (1749, 1778), False, 'from os.path import join, isfile\n'), ((1895, 1920), 'os.path.join', 'join', (['data_dir', '"""q4x.dat"""'], {}), "(data_dir, 'q4x.dat')\n", (1899, 1920), False, 'from os.path import join, isfile\n'), ((2257, 2274), 'numpy.sum', 'np.sum', (['(x[i] ** 2)'], {}), '(x[i] ** 2)\n', (2263, 2274), True, 'import numpy as np\n'), ((2316, 2333), 'numpy.sum', 'np.sum', (['(x[i] ** 2)'], {}), '(x[i] ** 2)\n', (2322, 2333), True, 'import numpy as np\n'), ((4021, 4050), 'numpy.matmul', 'np.matmul', (['mui', 'sigma_inverse'], {}), '(mui, sigma_inverse)\n', (4030, 4050), True, 'import numpy as np\n'), ((5547, 5569), 'numpy.matmul', 'np.matmul', (['prod', 'mui.T'], {}), '(prod, mui.T)\n', (5556, 5569), True, 'import numpy as np\n'), ((1991, 2016), 'os.path.join', 'join', (['data_dir', '"""q4y.dat"""'], {}), "(data_dir, 'q4y.dat')\n", (1995, 2016), False, 'from os.path import join, isfile\n'), ((5277, 5300), 'numpy.linalg.det', 'np.linalg.det', (['sigma[0]'], {}), '(sigma[0])\n', (5290, 5300), True, 'import numpy as np\n'), ((5303, 5326), 'numpy.linalg.det', 'np.linalg.det', (['sigma[1]'], {}), '(sigma[1])\n', (5316, 5326), True, 'import numpy as np\n'), ((738, 780), 'numpy.outer', 'np.outer', (['(x[i] - mu[y[i]])', '(x[i] - mu[y[i]])'], {}), '(x[i] - mu[y[i]], x[i] - mu[y[i]])\n', (746, 780), True, 'import numpy as np\n'), ((1385, 1421), 'numpy.outer', 'np.outer', (['(x[i] - mu[k])', '(x[i] - mu[k])'], {}), '(x[i] - mu[k], x[i] - mu[k])\n', (1393, 1421), True, 'import numpy as np\n')]
|
import bcrypt
salt = bcrypt.gensalt()
def generate_hash(passwd, salt=salt):
return str(bcrypt.hashpw(passwd, salt))
def match_password(req_pwd, db_pwd):
db_pwd = db_pwd.replace('b\'','').replace('\'','').encode('utf-8')
return db_pwd == bcrypt.hashpw(req_pwd, db_pwd)
|
[
"bcrypt.gensalt",
"bcrypt.hashpw"
] |
[((22, 38), 'bcrypt.gensalt', 'bcrypt.gensalt', ([], {}), '()\n', (36, 38), False, 'import bcrypt\n'), ((93, 120), 'bcrypt.hashpw', 'bcrypt.hashpw', (['passwd', 'salt'], {}), '(passwd, salt)\n', (106, 120), False, 'import bcrypt\n'), ((253, 283), 'bcrypt.hashpw', 'bcrypt.hashpw', (['req_pwd', 'db_pwd'], {}), '(req_pwd, db_pwd)\n', (266, 283), False, 'import bcrypt\n')]
|
"""
Module for Serialization and Deserialization of a KNX Disconnect Request information.
Disconnect requests are used to disconnect a tunnel from a KNX/IP device.
"""
from __future__ import annotations
from typing import TYPE_CHECKING
from xknx.exceptions import CouldNotParseKNXIP
from .body import KNXIPBody
from .hpai import HPAI
from .knxip_enum import KNXIPServiceType
if TYPE_CHECKING:
from xknx.xknx import XKNX
class DisconnectRequest(KNXIPBody):
"""Representation of a KNX Disconnect Request."""
SERVICE_TYPE = KNXIPServiceType.DISCONNECT_REQUEST
def __init__(
self,
xknx: XKNX,
communication_channel_id: int = 1,
control_endpoint: HPAI = HPAI(),
):
"""Initialize DisconnectRequest object."""
super().__init__(xknx)
self.communication_channel_id = communication_channel_id
self.control_endpoint = control_endpoint
def calculated_length(self) -> int:
"""Get length of KNX/IP body."""
return 2 + HPAI.LENGTH
def from_knx(self, raw: bytes) -> int:
"""Parse/deserialize from KNX/IP raw data."""
if len(raw) < 2:
raise CouldNotParseKNXIP("Disconnect info has wrong length")
self.communication_channel_id = raw[0]
# raw[1] is reserved
return self.control_endpoint.from_knx(raw[2:]) + 2
def to_knx(self) -> bytes:
"""Serialize to KNX/IP raw data."""
return (
bytes((self.communication_channel_id, 0x00)) # 2nd byte is reserved
+ self.control_endpoint.to_knx()
)
def __str__(self) -> str:
"""Return object as readable string."""
return (
"<DisconnectRequest "
f'CommunicationChannelID="{self.communication_channel_id}" '
f'control_endpoint="{self.control_endpoint}" />'
)
|
[
"xknx.exceptions.CouldNotParseKNXIP"
] |
[((1172, 1226), 'xknx.exceptions.CouldNotParseKNXIP', 'CouldNotParseKNXIP', (['"""Disconnect info has wrong length"""'], {}), "('Disconnect info has wrong length')\n", (1190, 1226), False, 'from xknx.exceptions import CouldNotParseKNXIP\n')]
|
from src.sqlite_helper import create_message_table, drop_message_table
"""
This script will create a SQLite table for you, and should be one time setup
The table name is message which will store all the Post message
"""
create_message_table()
"""
If you need to drop the message table, un-comment the following code by removing the # sign in the beginning
"""
#
# drop_message_table()
#
|
[
"src.sqlite_helper.create_message_table"
] |
[((222, 244), 'src.sqlite_helper.create_message_table', 'create_message_table', ([], {}), '()\n', (242, 244), False, 'from src.sqlite_helper import create_message_table, drop_message_table\n')]
|
import typing as t
from typing import TYPE_CHECKING
import numpy as np
import torch
import pytest
import imageio
from detectron2 import model_zoo
from detectron2.data import transforms as T
from detectron2.config import get_cfg
from detectron2.modeling import build_model
import bentoml
if TYPE_CHECKING:
from detectron2.config import CfgNode
from bentoml._internal.types import Tag
from bentoml._internal.models import ModelStore
IMAGE_URL: str = "./tests/utils/_static/detectron2_sample.jpg"
def extract_result(raw_result: t.Dict[str, t.Any]) -> t.Dict[str, t.Any]:
pred_instances = raw_result["instances"]
boxes = pred_instances.pred_boxes.to("cpu").tensor.detach().numpy()
scores = pred_instances.scores.to("cpu").detach().numpy()
pred_classes = pred_instances.pred_classes.to("cpu").detach().numpy()
result = {
"boxes": boxes,
"scores": scores,
"classes": pred_classes,
}
return result
def prepare_image(
original_image: "np.ndarray[t.Any, np.dtype[t.Any]]",
) -> "np.ndarray[t.Any, np.dtype[t.Any]]":
"""Mainly to test on COCO dataset"""
_aug = T.ResizeShortestEdge([800, 800], 1333)
image = _aug.get_transform(original_image).apply_image(original_image)
return image.transpose(2, 0, 1)
def detectron_model_and_config() -> t.Tuple[torch.nn.Module, "CfgNode"]:
model_url: str = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
cfg: "CfgNode" = get_cfg()
cfg.merge_from_file(model_zoo.get_config_file(model_url))
# set threshold for this model
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_url)
cloned = cfg.clone()
cloned.MODEL.DEVICE = "cpu" # running on CI
model: torch.nn.Module = build_model(cloned)
model.eval()
return model, cfg
@pytest.fixture(scope="module", name="image_array")
def fixture_image_array() -> "np.ndarray[t.Any, np.dtype[t.Any]]":
return np.asarray(imageio.imread(IMAGE_URL))
def save_procedure(metadata: t.Dict[str, t.Any], _modelstore: "ModelStore") -> "Tag":
model, config = detectron_model_and_config()
tag_info = bentoml.detectron.save(
"test_detectron2_model",
model,
model_config=config,
metadata=metadata,
model_store=_modelstore,
)
return tag_info
@pytest.mark.parametrize("metadata", [{"acc": 0.876}])
def test_detectron2_save_load(
metadata: t.Dict[str, t.Any],
image_array: "np.ndarray[t.Any, np.dtype[t.Any]]",
modelstore: "ModelStore",
) -> None:
tag = save_procedure(metadata, _modelstore=modelstore)
_model = bentoml.models.get(tag, _model_store=modelstore)
assert _model.info.metadata is not None
detectron_loaded = bentoml.detectron.load(
_model.tag,
device="cpu",
model_store=modelstore,
)
assert next(detectron_loaded.parameters()).device.type == "cpu"
image = prepare_image(image_array)
image = torch.as_tensor(image)
input_data = [{"image": image}]
raw_result = detectron_loaded(input_data)
result = extract_result(raw_result[0])
assert result["scores"][0] > 0.9
def test_detectron2_setup_run_batch(
image_array: "np.ndarray[t.Any, np.dtype[t.Any]]", modelstore: "ModelStore"
) -> None:
tag = save_procedure({}, _modelstore=modelstore)
runner = bentoml.detectron.load_runner(tag, model_store=modelstore)
assert tag in runner.required_models
assert runner.num_concurrency_per_replica == 1
assert runner.num_replica == 1
image = torch.as_tensor(prepare_image(image_array))
res = runner.run_batch(image)
result = extract_result(res[0])
assert result["boxes"] is not None
|
[
"bentoml.detectron.save",
"detectron2.modeling.build_model",
"torch.as_tensor",
"detectron2.config.get_cfg",
"bentoml.models.get",
"bentoml.detectron.load_runner",
"imageio.imread",
"pytest.mark.parametrize",
"detectron2.model_zoo.get_checkpoint_url",
"detectron2.model_zoo.get_config_file",
"bentoml.detectron.load",
"pytest.fixture",
"detectron2.data.transforms.ResizeShortestEdge"
] |
[((1848, 1898), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'name': '"""image_array"""'}), "(scope='module', name='image_array')\n", (1862, 1898), False, 'import pytest\n'), ((2357, 2410), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""metadata"""', "[{'acc': 0.876}]"], {}), "('metadata', [{'acc': 0.876}])\n", (2380, 2410), False, 'import pytest\n'), ((1138, 1176), 'detectron2.data.transforms.ResizeShortestEdge', 'T.ResizeShortestEdge', (['[800, 800]', '(1333)'], {}), '([800, 800], 1333)\n', (1158, 1176), True, 'from detectron2.data import transforms as T\n'), ((1462, 1471), 'detectron2.config.get_cfg', 'get_cfg', ([], {}), '()\n', (1469, 1471), False, 'from detectron2.config import get_cfg\n'), ((1641, 1680), 'detectron2.model_zoo.get_checkpoint_url', 'model_zoo.get_checkpoint_url', (['model_url'], {}), '(model_url)\n', (1669, 1680), False, 'from detectron2 import model_zoo\n'), ((1785, 1804), 'detectron2.modeling.build_model', 'build_model', (['cloned'], {}), '(cloned)\n', (1796, 1804), False, 'from detectron2.modeling import build_model\n'), ((2167, 2290), 'bentoml.detectron.save', 'bentoml.detectron.save', (['"""test_detectron2_model"""', 'model'], {'model_config': 'config', 'metadata': 'metadata', 'model_store': '_modelstore'}), "('test_detectron2_model', model, model_config=config,\n metadata=metadata, model_store=_modelstore)\n", (2189, 2290), False, 'import bentoml\n'), ((2644, 2692), 'bentoml.models.get', 'bentoml.models.get', (['tag'], {'_model_store': 'modelstore'}), '(tag, _model_store=modelstore)\n', (2662, 2692), False, 'import bentoml\n'), ((2762, 2834), 'bentoml.detectron.load', 'bentoml.detectron.load', (['_model.tag'], {'device': '"""cpu"""', 'model_store': 'modelstore'}), "(_model.tag, device='cpu', model_store=modelstore)\n", (2784, 2834), False, 'import bentoml\n'), ((2986, 3008), 'torch.as_tensor', 'torch.as_tensor', (['image'], {}), '(image)\n', (3001, 3008), False, 'import torch\n'), ((3368, 3426), 'bentoml.detectron.load_runner', 'bentoml.detectron.load_runner', (['tag'], {'model_store': 'modelstore'}), '(tag, model_store=modelstore)\n', (3397, 3426), False, 'import bentoml\n'), ((1496, 1532), 'detectron2.model_zoo.get_config_file', 'model_zoo.get_config_file', (['model_url'], {}), '(model_url)\n', (1521, 1532), False, 'from detectron2 import model_zoo\n'), ((1988, 2013), 'imageio.imread', 'imageio.imread', (['IMAGE_URL'], {}), '(IMAGE_URL)\n', (2002, 2013), False, 'import imageio\n')]
|
import pytest
from pypospack.potential import EamPotential
symbols = ['Al']
func_pair_name = "bornmayer"
func_density_name = "eam_dens_exp"
func_embedding_name = "fs"
expected_parameter_names_pair_potential = []
expected_parameter_names_density_function = []
expected_parameter_names_embedding_function = []
expected_parameter_names = [
'p_AlAl_phi0', 'p_AlAl_gamma', 'p_AlAl_r0',
'd_Al_rho0', 'd_Al_beta', 'd_Al_r0',
'e_Al_F0', 'e_Al_p', 'e_Al_q', 'e_Al_F1', 'e_Al_rho0']
print(80*'-')
print("func_pair_name={}".format(func_pair_name))
print("func_density_name={}".format(func_density_name))
print("func_embedding_name={}".format(func_density_name))
print(80*'-')
def test____init__():
obj_pot = EamPotential(
symbols=symbols,
func_pair=func_pair_name,
func_density=func_density_name,
func_embedding=func_embedding_name)
assert type(obj_pot) is EamPotential
assert obj_pot.potential_type == 'eam'
assert type(obj_pot.symbols) is list
assert len(obj_pot.symbols) == len(symbols)
for i,v in enumerate(symbols):
obj_pot.symbols[i] = v
assert obj_pot.is_charge is False
assert type(obj_pot.parameter_names) is list
assert len(obj_pot.parameter_names) == len(expected_parameter_names)
for i,v in enumerate(expected_parameter_names):
obj_pot.parameter_names = v
if __name__ == "__main__":
# CONSTRUCTOR TEST
pot = EamPotential(symbols=symbols,
func_pair=func_pair_name,
func_density=func_density_name,
func_embedding=func_embedding_name)
print('pot.potential_type == {}'.format(\
pot.potential_type))
print('pot.symbols == {}'.format(\
pot.symbols))
print('pot.parameter_names == {}'.format(\
pot.parameter_names))
print('pot.is_charge == {}'.format(\
pot.is_charge))
|
[
"pypospack.potential.EamPotential"
] |
[((733, 861), 'pypospack.potential.EamPotential', 'EamPotential', ([], {'symbols': 'symbols', 'func_pair': 'func_pair_name', 'func_density': 'func_density_name', 'func_embedding': 'func_embedding_name'}), '(symbols=symbols, func_pair=func_pair_name, func_density=\n func_density_name, func_embedding=func_embedding_name)\n', (745, 861), False, 'from pypospack.potential import EamPotential\n'), ((1459, 1587), 'pypospack.potential.EamPotential', 'EamPotential', ([], {'symbols': 'symbols', 'func_pair': 'func_pair_name', 'func_density': 'func_density_name', 'func_embedding': 'func_embedding_name'}), '(symbols=symbols, func_pair=func_pair_name, func_density=\n func_density_name, func_embedding=func_embedding_name)\n', (1471, 1587), False, 'from pypospack.potential import EamPotential\n')]
|
from typing import Dict
from main.helpers.print_helper import PrintHelper
class Enricher(object):
def __init__(self, enricher_type: str, header: str) -> None:
self.enricher_type = enricher_type
self.header = header
def get_information(self, packet: Dict[str, str], information_dict) -> None:
pass
def print(self) -> None:
PrintHelper.print_nothing(self.enricher_type)
|
[
"main.helpers.print_helper.PrintHelper.print_nothing"
] |
[((371, 416), 'main.helpers.print_helper.PrintHelper.print_nothing', 'PrintHelper.print_nothing', (['self.enricher_type'], {}), '(self.enricher_type)\n', (396, 416), False, 'from main.helpers.print_helper import PrintHelper\n')]
|
import coloredlogs
coloredlogs.install()
custom_logger = logging.getLogger(name)
coloredlogs.install(level="INFO", logger=custom_logger)
|
[
"coloredlogs.install"
] |
[((20, 41), 'coloredlogs.install', 'coloredlogs.install', ([], {}), '()\n', (39, 41), False, 'import coloredlogs\n'), ((82, 137), 'coloredlogs.install', 'coloredlogs.install', ([], {'level': '"""INFO"""', 'logger': 'custom_logger'}), "(level='INFO', logger=custom_logger)\n", (101, 137), False, 'import coloredlogs\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.