code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
import pytest
from tartiflette.language.ast import FragmentSpreadNode
def test_fragmentspreadnode__init__():
fragment_spread_node = FragmentSpreadNode(
name="fragmentSpreadName",
directives="fragmentSpreadDirectives",
location="fragmentSpreadLocation",
)
assert fragment_spread_node.name == "fragmentSpreadName"
assert fragment_spread_node.directives == "fragmentSpreadDirectives"
assert fragment_spread_node.location == "fragmentSpreadLocation"
@pytest.mark.parametrize(
"fragment_spread_node,other,expected",
[
(
FragmentSpreadNode(
name="fragmentSpreadName",
directives="fragmentSpreadDirectives",
location="fragmentSpreadLocation",
),
Ellipsis,
False,
),
(
FragmentSpreadNode(
name="fragmentSpreadName",
directives="fragmentSpreadDirectives",
location="fragmentSpreadLocation",
),
FragmentSpreadNode(
name="fragmentSpreadNameBis",
directives="fragmentSpreadDirectives",
location="fragmentSpreadLocation",
),
False,
),
(
FragmentSpreadNode(
name="fragmentSpreadName",
directives="fragmentSpreadDirectives",
location="fragmentSpreadLocation",
),
FragmentSpreadNode(
name="fragmentSpreadName",
directives="fragmentSpreadDirectivesBis",
location="fragmentSpreadLocation",
),
False,
),
(
FragmentSpreadNode(
name="fragmentSpreadName",
directives="fragmentSpreadDirectives",
location="fragmentSpreadLocation",
),
FragmentSpreadNode(
name="fragmentSpreadName",
directives="fragmentSpreadDirectives",
location="fragmentSpreadLocationBis",
),
False,
),
(
FragmentSpreadNode(
name="fragmentSpreadName",
directives="fragmentSpreadDirectives",
location="fragmentSpreadLocation",
),
FragmentSpreadNode(
name="fragmentSpreadName",
directives="fragmentSpreadDirectives",
location="fragmentSpreadLocation",
),
True,
),
],
)
def test_fragmentspreadnode__eq__(fragment_spread_node, other, expected):
assert (fragment_spread_node == other) is expected
@pytest.mark.parametrize(
"fragment_spread_node,expected",
[
(
FragmentSpreadNode(
name="fragmentSpreadName",
directives="fragmentSpreadDirectives",
location="fragmentSpreadLocation",
),
"FragmentSpreadNode("
"name='fragmentSpreadName', "
"directives='fragmentSpreadDirectives', "
"location='fragmentSpreadLocation')",
)
],
)
def test_fragmentspreadnode__repr__(fragment_spread_node, expected):
assert fragment_spread_node.__repr__() == expected
| [
"tartiflette.language.ast.FragmentSpreadNode"
] | [((139, 263), 'tartiflette.language.ast.FragmentSpreadNode', 'FragmentSpreadNode', ([], {'name': '"""fragmentSpreadName"""', 'directives': '"""fragmentSpreadDirectives"""', 'location': '"""fragmentSpreadLocation"""'}), "(name='fragmentSpreadName', directives=\n 'fragmentSpreadDirectives', location='fragmentSpreadLocation')\n", (157, 263), False, 'from tartiflette.language.ast import FragmentSpreadNode\n'), ((592, 716), 'tartiflette.language.ast.FragmentSpreadNode', 'FragmentSpreadNode', ([], {'name': '"""fragmentSpreadName"""', 'directives': '"""fragmentSpreadDirectives"""', 'location': '"""fragmentSpreadLocation"""'}), "(name='fragmentSpreadName', directives=\n 'fragmentSpreadDirectives', location='fragmentSpreadLocation')\n", (610, 716), False, 'from tartiflette.language.ast import FragmentSpreadNode\n'), ((850, 974), 'tartiflette.language.ast.FragmentSpreadNode', 'FragmentSpreadNode', ([], {'name': '"""fragmentSpreadName"""', 'directives': '"""fragmentSpreadDirectives"""', 'location': '"""fragmentSpreadLocation"""'}), "(name='fragmentSpreadName', directives=\n 'fragmentSpreadDirectives', location='fragmentSpreadLocation')\n", (868, 974), False, 'from tartiflette.language.ast import FragmentSpreadNode\n'), ((1046, 1173), 'tartiflette.language.ast.FragmentSpreadNode', 'FragmentSpreadNode', ([], {'name': '"""fragmentSpreadNameBis"""', 'directives': '"""fragmentSpreadDirectives"""', 'location': '"""fragmentSpreadLocation"""'}), "(name='fragmentSpreadNameBis', directives=\n 'fragmentSpreadDirectives', location='fragmentSpreadLocation')\n", (1064, 1173), False, 'from tartiflette.language.ast import FragmentSpreadNode\n'), ((1285, 1409), 'tartiflette.language.ast.FragmentSpreadNode', 'FragmentSpreadNode', ([], {'name': '"""fragmentSpreadName"""', 'directives': '"""fragmentSpreadDirectives"""', 'location': '"""fragmentSpreadLocation"""'}), "(name='fragmentSpreadName', directives=\n 'fragmentSpreadDirectives', location='fragmentSpreadLocation')\n", (1303, 1409), False, 'from tartiflette.language.ast import FragmentSpreadNode\n'), ((1481, 1608), 'tartiflette.language.ast.FragmentSpreadNode', 'FragmentSpreadNode', ([], {'name': '"""fragmentSpreadName"""', 'directives': '"""fragmentSpreadDirectivesBis"""', 'location': '"""fragmentSpreadLocation"""'}), "(name='fragmentSpreadName', directives=\n 'fragmentSpreadDirectivesBis', location='fragmentSpreadLocation')\n", (1499, 1608), False, 'from tartiflette.language.ast import FragmentSpreadNode\n'), ((1720, 1844), 'tartiflette.language.ast.FragmentSpreadNode', 'FragmentSpreadNode', ([], {'name': '"""fragmentSpreadName"""', 'directives': '"""fragmentSpreadDirectives"""', 'location': '"""fragmentSpreadLocation"""'}), "(name='fragmentSpreadName', directives=\n 'fragmentSpreadDirectives', location='fragmentSpreadLocation')\n", (1738, 1844), False, 'from tartiflette.language.ast import FragmentSpreadNode\n'), ((1916, 2043), 'tartiflette.language.ast.FragmentSpreadNode', 'FragmentSpreadNode', ([], {'name': '"""fragmentSpreadName"""', 'directives': '"""fragmentSpreadDirectives"""', 'location': '"""fragmentSpreadLocationBis"""'}), "(name='fragmentSpreadName', directives=\n 'fragmentSpreadDirectives', location='fragmentSpreadLocationBis')\n", (1934, 2043), False, 'from tartiflette.language.ast import FragmentSpreadNode\n'), ((2155, 2279), 'tartiflette.language.ast.FragmentSpreadNode', 'FragmentSpreadNode', ([], {'name': '"""fragmentSpreadName"""', 'directives': '"""fragmentSpreadDirectives"""', 'location': '"""fragmentSpreadLocation"""'}), "(name='fragmentSpreadName', directives=\n 'fragmentSpreadDirectives', location='fragmentSpreadLocation')\n", (2173, 2279), False, 'from tartiflette.language.ast import FragmentSpreadNode\n'), ((2351, 2475), 'tartiflette.language.ast.FragmentSpreadNode', 'FragmentSpreadNode', ([], {'name': '"""fragmentSpreadName"""', 'directives': '"""fragmentSpreadDirectives"""', 'location': '"""fragmentSpreadLocation"""'}), "(name='fragmentSpreadName', directives=\n 'fragmentSpreadDirectives', location='fragmentSpreadLocation')\n", (2369, 2475), False, 'from tartiflette.language.ast import FragmentSpreadNode\n'), ((2795, 2919), 'tartiflette.language.ast.FragmentSpreadNode', 'FragmentSpreadNode', ([], {'name': '"""fragmentSpreadName"""', 'directives': '"""fragmentSpreadDirectives"""', 'location': '"""fragmentSpreadLocation"""'}), "(name='fragmentSpreadName', directives=\n 'fragmentSpreadDirectives', location='fragmentSpreadLocation')\n", (2813, 2919), False, 'from tartiflette.language.ast import FragmentSpreadNode\n')] |
from twisted.plugin import IPlugin
from txircd.module_interface import IMode, IModuleData, Mode, ModuleData
from txircd.utils import ModeType
from zope.interface import implements
class PrivateMode(ModuleData, Mode):
implements(IPlugin, IModuleData, IMode)
name = "PrivateMode"
core = True
affectedActions = { "displaychannel": 10,
"showchannel-whois": 10 }
def channelModes(self):
return [ ("p", ModeType.NoParam, self) ]
def actions(self):
return [ ("modeactioncheck-channel-p-displaychannel", 1, self.chanIsPrivateList),
("modeactioncheck-channel-p-showchannel-whois", 1, self.chanIsPrivateWhois) ]
def chanIsPrivateList(self, channel, displayData, sameChannel, user, usedSearchMask):
if "p" in channel.modes:
return True
return None
def chanIsPrivateWhois(self, channel, sameChannel, queryUser, targetUser):
if "p" in channel.modes:
return True
return None
def apply(self, actionName, channel, param, *params):
if actionName == "displaychannel":
displayData, sameChannel, user, usedSearchMask = params
if usedSearchMask:
displayData.clear()
elif user not in channel.users:
displayData["name"] = "*"
displayData["modestopic"] = "[]"
return
if actionName == "showchannel-whois":
sameChannel, queryUser, targetUser = params
if queryUser not in channel.users:
return False
return None
privateMode = PrivateMode() | [
"zope.interface.implements"
] | [((219, 258), 'zope.interface.implements', 'implements', (['IPlugin', 'IModuleData', 'IMode'], {}), '(IPlugin, IModuleData, IMode)\n', (229, 258), False, 'from zope.interface import implements\n')] |
import pytest
from app.app import HttpHtml
#FIXME: add mocks for WEB resource. It is a bad thing to test something with uncotrollable resource like website.
def test_cdn_recognition():
url = 'https://www.funnygames.at'
#It seems that I cannot use Packaging in Python :( (we need moar apps)
app_service = HttpHtml()
cdns = app_service.recognize_cdns(url)
expected = {
'assets.funnygames.at': 'OptimiCDN',
'www.funnygames.at': 'Cloudflare',
'cdnjs.cloudflare.com': 'Cloudflare',
's7.addthis.com': None
}
assert cdns == expected | [
"app.app.HttpHtml"
] | [((314, 324), 'app.app.HttpHtml', 'HttpHtml', ([], {}), '()\n', (322, 324), False, 'from app.app import HttpHtml\n')] |
# Title : Generate 6 digit number
# Author : <NAME>.
# Date : 28:10:2020
import secrets
secretsGenerator = secrets.SystemRandom()
num = secretsGenerator.randrange(100000, 999999)
print(f"The generated number is {num}")
| [
"secrets.SystemRandom"
] | [((112, 134), 'secrets.SystemRandom', 'secrets.SystemRandom', ([], {}), '()\n', (132, 134), False, 'import secrets\n')] |
"""Command line interface to create or display Icestupa class
"""
# External modules
import os, sys
import logging, coloredlogs
import matplotlib.pyplot as plt
import pandas as pd
from datetime import datetime, timedelta
from matplotlib.offsetbox import AnchoredText
import matplotlib.dates as mdates
import seaborn as sns
from matplotlib.lines import Line2D
from operator import truediv
import numpy as np
# Locals
dirname = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
sys.path.append(dirname)
from src.models.icestupaClass import Icestupa
from src.utils.settings import config
from src.utils import setup_logger
from src.models.methods.metadata import get_parameter_metadata
if __name__ == "__main__":
# Main logger
logger = logging.getLogger(__name__)
coloredlogs.install(
fmt="%(funcName)s %(levelname)s %(message)s",
# level=logging.WARNING,
level=logging.INFO,
logger=logger,
)
locations = ['gangles21', 'guttannen21', 'guttannen20']
# locations = ['gangles21', 'guttannen21']
# locations = ['guttannen20', 'guttannen21']
fig, ax = plt.subplots()
custom_colors = sns.color_palette("Set1", len(locations))
for i,location in enumerate(locations):
SITE, FOLDER = config(location)
icestupa = Icestupa(location)
icestupa.self_attributes()
icestupa.read_output()
if location == 'guttannen20':
SITE["start_date"] +=pd.offsets.DateOffset(year=2023)
if location == 'guttannen21':
SITE["start_date"] +=pd.offsets.DateOffset(year=2022)
if location == 'gangles21':
SITE["start_date"] +=pd.offsets.DateOffset(year=2023)
days = pd.date_range(
start=SITE["start_date"],
end=SITE["start_date"]+ timedelta(hours=icestupa.total_hours - 1),
freq="1H",
)
days2 = pd.date_range(
start=SITE["start_date"]+ timedelta(hours= 1),
end=SITE["start_date"]+ timedelta(hours=icestupa.total_hours - 1),
freq="1H",
)
df = icestupa.df[["When","SA", "iceV", "mb"]]
if location == 'guttannen20':
df['When'] = df['When'].mask(icestupa.df['When'].dt.year == 2019,
icestupa.df['When'] + pd.offsets.DateOffset(year=2022))
df['When'] = df['When'].mask(icestupa.df['When'].dt.year == 2020,
icestupa.df['When'] + pd.offsets.DateOffset(year=2023))
if location == 'guttannen21':
df['When'] = df['When'].mask(icestupa.df['When'].dt.year == 2020,
icestupa.df['When'] + pd.offsets.DateOffset(year=2022))
df['When'] = df['When'].mask(icestupa.df['When'].dt.year == 2021,
icestupa.df['When'] + pd.offsets.DateOffset(year=2023))
df= df.reset_index()
df['mb'] *= 1000
# df['mb'] = df['mb'].cumsum(axis=0)
df = df.set_index("When").resample("D").mean().reset_index()
x = df.When[1:-1]
y1 = df.mb[1:-1]
y2 = df.SA[1:-1]
v = get_parameter_metadata(location)
ax.plot(
x,
y1,
linewidth=1,
color=custom_colors[i],
zorder=1,
label = v['shortname']
)
# ax.plot(
# x,
# y2,
# linewidth=1,
# color='k',
# )
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_color('grey')
ax.spines['bottom'].set_color('grey')
# Only show ticks on the left and bottom spines
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter("%b"))
ax.legend()
fig.autofmt_xdate()
plt.savefig(
"data/paper/mb_cumulative.jpg",
dpi=300,
bbox_inches="tight",
)
| [
"logging.getLogger",
"src.utils.settings.config",
"src.models.methods.metadata.get_parameter_metadata",
"matplotlib.pyplot.savefig",
"coloredlogs.install",
"matplotlib.dates.MonthLocator",
"matplotlib.dates.DateFormatter",
"src.models.icestupaClass.Icestupa",
"os.path.realpath",
"pandas.offsets.DateOffset",
"datetime.timedelta",
"sys.path.append",
"matplotlib.pyplot.subplots"
] | [((505, 529), 'sys.path.append', 'sys.path.append', (['dirname'], {}), '(dirname)\n', (520, 529), False, 'import os, sys\n'), ((772, 799), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (789, 799), False, 'import logging, coloredlogs\n'), ((804, 909), 'coloredlogs.install', 'coloredlogs.install', ([], {'fmt': '"""%(funcName)s %(levelname)s %(message)s"""', 'level': 'logging.INFO', 'logger': 'logger'}), "(fmt='%(funcName)s %(levelname)s %(message)s', level=\n logging.INFO, logger=logger)\n", (823, 909), False, 'import logging, coloredlogs\n'), ((1141, 1155), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1153, 1155), True, 'import matplotlib.pyplot as plt\n'), ((3966, 4039), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""data/paper/mb_cumulative.jpg"""'], {'dpi': '(300)', 'bbox_inches': '"""tight"""'}), "('data/paper/mb_cumulative.jpg', dpi=300, bbox_inches='tight')\n", (3977, 4039), True, 'import matplotlib.pyplot as plt\n'), ((1286, 1302), 'src.utils.settings.config', 'config', (['location'], {}), '(location)\n', (1292, 1302), False, 'from src.utils.settings import config\n'), ((1323, 1341), 'src.models.icestupaClass.Icestupa', 'Icestupa', (['location'], {}), '(location)\n', (1331, 1341), False, 'from src.models.icestupaClass import Icestupa\n'), ((3187, 3219), 'src.models.methods.metadata.get_parameter_metadata', 'get_parameter_metadata', (['location'], {}), '(location)\n', (3209, 3219), False, 'from src.models.methods.metadata import get_parameter_metadata\n'), ((475, 501), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (491, 501), False, 'import os, sys\n'), ((1480, 1512), 'pandas.offsets.DateOffset', 'pd.offsets.DateOffset', ([], {'year': '(2023)'}), '(year=2023)\n', (1501, 1512), True, 'import pandas as pd\n'), ((1584, 1616), 'pandas.offsets.DateOffset', 'pd.offsets.DateOffset', ([], {'year': '(2022)'}), '(year=2022)\n', (1605, 1616), True, 'import pandas as pd\n'), ((1686, 1718), 'pandas.offsets.DateOffset', 'pd.offsets.DateOffset', ([], {'year': '(2023)'}), '(year=2023)\n', (1707, 1718), True, 'import pandas as pd\n'), ((3825, 3846), 'matplotlib.dates.MonthLocator', 'mdates.MonthLocator', ([], {}), '()\n', (3844, 3846), True, 'import matplotlib.dates as mdates\n'), ((3885, 3911), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%b"""'], {}), "('%b')\n", (3905, 3911), True, 'import matplotlib.dates as mdates\n'), ((2895, 2927), 'pandas.offsets.DateOffset', 'pd.offsets.DateOffset', ([], {'year': '(2023)'}), '(year=2023)\n', (2916, 2927), True, 'import pandas as pd\n'), ((1824, 1865), 'datetime.timedelta', 'timedelta', ([], {'hours': '(icestupa.total_hours - 1)'}), '(hours=icestupa.total_hours - 1)\n', (1833, 1865), False, 'from datetime import datetime, timedelta\n'), ((1969, 1987), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (1978, 1987), False, 'from datetime import datetime, timedelta\n'), ((2026, 2067), 'datetime.timedelta', 'timedelta', ([], {'hours': '(icestupa.total_hours - 1)'}), '(hours=icestupa.total_hours - 1)\n', (2035, 2067), False, 'from datetime import datetime, timedelta\n'), ((2337, 2369), 'pandas.offsets.DateOffset', 'pd.offsets.DateOffset', ([], {'year': '(2022)'}), '(year=2022)\n', (2358, 2369), True, 'import pandas as pd\n'), ((2513, 2545), 'pandas.offsets.DateOffset', 'pd.offsets.DateOffset', ([], {'year': '(2023)'}), '(year=2023)\n', (2534, 2545), True, 'import pandas as pd\n'), ((2727, 2759), 'pandas.offsets.DateOffset', 'pd.offsets.DateOffset', ([], {'year': '(2022)'}), '(year=2022)\n', (2748, 2759), True, 'import pandas as pd\n')] |
# coding: utf-8
import os
import pandas as pd
endpoint = os.environ["AZURE_LANGUAGE_ENDPOINT"]
key = os.environ["AZURE_LANGUAGE_KEY"]
from azure.ai.textanalytics import TextAnalyticsClient
from azure.core.credentials import AzureKeyCredential
# Authenticate the client using your key and endpoint
def authenticate_client():
ta_credential = AzureKeyCredential(key)
text_analytics_client = TextAnalyticsClient(
endpoint=endpoint,
credential=ta_credential)
return text_analytics_client
client = authenticate_client()
# Example method for summarizing text
def extractive_summarization(client, document):
from azure.core.credentials import AzureKeyCredential
from azure.ai.textanalytics import (
TextAnalyticsClient,
ExtractSummaryAction
)
poller = client.begin_analyze_actions(
document,
actions=[
ExtractSummaryAction(MaxSentenceCount=4)
],
)
document_results = poller.result()
for result in document_results:
extract_summary_result = result[0] # first document, first result
if extract_summary_result.is_error:
print("...Is an error with code '{}' and message '{}'".format(
extract_summary_result.code, extract_summary_result.message
))
else:
print("Summary extracted: \n{}".format(
" ".join([sentence.text for sentence in extract_summary_result.sentences]))
)
if __name__ == "__main__":
document = [
"The extractive summarization feature uses natural language processing techniques to locate key sentences in an unstructured text document. "
"These sentences collectively convey the main idea of the document. This feature is provided as an API for developers. "
"They can use it to build intelligent solutions based on the relevant information extracted to support various use cases. "
"In the public preview, extractive summarization supports several languages. It is based on pretrained multilingual transformer models, part of our quest for holistic representations. "
"It draws its strength from transfer learning across monolingual and harness the shared nature of languages to produce models of improved quality and efficiency. "
]
document = [
"""GROSS:
A: Left simple mastectomy specimen. There is a well-defined but irregularly-shaped tumour mass in the
upper inner quadrant of the breast. The tumour, measuring 1.8cm in its greatest dimension, is located about
2.0cm below the skin. A piece of the muscles beneath the tumour is excised en bloc. There is no obvious
tumour invasion of the muscles. The tumour tissue is tan-white and firm to hard in consistency. There is a
large but poorly-outlined area of indurated retroareolar breast tissue. Patchy yellowish specks are found in
the tissue.
Blocks 1-3 : Representative sections of the breast tumour with deep surgical margin (muscles).
Block 4 : Representative section of the nipple.
Block 5 : Representative sections of the retroareolar breast tissue.
B: Left axillary lymph node dissection specimen. A few hard nodes are identified in the fat, the largest of
which measures 1.0cm. The nodes are submitted in 4 blocks.
C: Specimen consists of a lymph node, 0.7cm, covered by fat labelled lateral node. Bissected and submitted
entirely in one block.
HISTOLOGY:
A: Sections of the tumour show infiltration in the fibrous stroma by irregularly-shaped clusters of cells
arranged in sheets. Duct formation is scanty (<10%). Nuclear atypia of the tumour cells is severe. The
nuclei are large, vesicular and markedly pleomorphic. Nucleoli are prominent. Cytoplasm is moderately
abundant. Mitotic count is >20 per 10hpf. Necrosis is noted in some clusters of tumour cells. Foci of high-
grade DCIS are seen at the periphery of the tumour. Perineural and lymphovasular tumour cell invasion is
observed. No evidence of invasion of the muscles at the deep surgical margin. Sections of the indurated
retroareolar breast tissue show extensive and widespread foci of high-grade DCIS (comedo-type). An area
of invasive carcinoma, about 1.0cm in its greatest dimension, is identified among the DCIS. The histology
of the invasive malignancy is similar to the main tumour described above. No Paget's disease of nipple.
Breast Cancer Profile
Estrogen receptor: Negative expression.
Progesterone receptor: Negative expression.
c-erbB-2: Positive expression (70%; 2+ - 3+; IHC).
Ki67 index: 60%.
B: Sections show two of 20 lymph nodes examined are infiltrated by tumour cells. Focal perinodal invasion
is observed.
C: Sections of the lymph node do not show metastatic malignancy.
INTERPRETATION:
A, B: Left simple mastectomy specimen and left axillary lymph node dissection specimen:
"""
]
extractive_summarization(client, document)
| [
"azure.ai.textanalytics.TextAnalyticsClient",
"azure.core.credentials.AzureKeyCredential",
"azure.ai.textanalytics.ExtractSummaryAction"
] | [((349, 372), 'azure.core.credentials.AzureKeyCredential', 'AzureKeyCredential', (['key'], {}), '(key)\n', (367, 372), False, 'from azure.core.credentials import AzureKeyCredential\n'), ((401, 465), 'azure.ai.textanalytics.TextAnalyticsClient', 'TextAnalyticsClient', ([], {'endpoint': 'endpoint', 'credential': 'ta_credential'}), '(endpoint=endpoint, credential=ta_credential)\n', (420, 465), False, 'from azure.ai.textanalytics import TextAnalyticsClient, ExtractSummaryAction\n'), ((900, 940), 'azure.ai.textanalytics.ExtractSummaryAction', 'ExtractSummaryAction', ([], {'MaxSentenceCount': '(4)'}), '(MaxSentenceCount=4)\n', (920, 940), False, 'from azure.ai.textanalytics import TextAnalyticsClient, ExtractSummaryAction\n')] |
""""Test cases for StructuredMock"""
import pytest
from dimodmock import StructuredMock
EXAMPLE_CONFIGURATIONS = [
([0, 1, 2, 3], [(0, 2), (1, 3)], {}, {}),
(list(range(1000)), [(0, 1), (1, 2), (0, 100)], {}, {}),
(
list(range(1000)),
[(0, 1000), (10, 20)],
{"max_num_reads": 100},
{"num_reads": ["max_num_reads"]},
),
(
["a", "b", "c", "d"],
[("a", "b"), ("c", "a")],
{"max_num_reads": 100},
{"num_reads": ["max_num_reads"]},
),
]
@pytest.mark.parametrize("nodelist,edgelist,properties,parameters", EXAMPLE_CONFIGURATIONS)
def test_propagates_nodelist(nodelist, edgelist, properties, parameters):
mock = StructuredMock(nodelist, edgelist, properties, parameters)
assert mock.nodelist == nodelist
@pytest.mark.parametrize("nodelist,edgelist,properties,parameters", EXAMPLE_CONFIGURATIONS)
def test_propagates_edgelist(nodelist, edgelist, properties, parameters):
mock = StructuredMock(nodelist, edgelist, properties, parameters)
assert mock.edgelist == edgelist
@pytest.mark.parametrize("nodelist,edgelist,properties,parameters", EXAMPLE_CONFIGURATIONS)
def test_propagates_properties(nodelist, edgelist, properties, parameters):
mock = StructuredMock(nodelist, edgelist, properties, parameters)
assert mock.properties == properties
@pytest.mark.parametrize("nodelist,edgelist,properties,parameters", EXAMPLE_CONFIGURATIONS)
def test_propagates_parameters(nodelist, edgelist, properties, parameters):
mock = StructuredMock(nodelist, edgelist, properties, parameters)
assert mock.parameters == parameters
| [
"dimodmock.StructuredMock",
"pytest.mark.parametrize"
] | [((524, 618), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nodelist,edgelist,properties,parameters"""', 'EXAMPLE_CONFIGURATIONS'], {}), "('nodelist,edgelist,properties,parameters',\n EXAMPLE_CONFIGURATIONS)\n", (547, 618), False, 'import pytest\n'), ((799, 893), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nodelist,edgelist,properties,parameters"""', 'EXAMPLE_CONFIGURATIONS'], {}), "('nodelist,edgelist,properties,parameters',\n EXAMPLE_CONFIGURATIONS)\n", (822, 893), False, 'import pytest\n'), ((1074, 1168), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nodelist,edgelist,properties,parameters"""', 'EXAMPLE_CONFIGURATIONS'], {}), "('nodelist,edgelist,properties,parameters',\n EXAMPLE_CONFIGURATIONS)\n", (1097, 1168), False, 'import pytest\n'), ((1355, 1449), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""nodelist,edgelist,properties,parameters"""', 'EXAMPLE_CONFIGURATIONS'], {}), "('nodelist,edgelist,properties,parameters',\n EXAMPLE_CONFIGURATIONS)\n", (1378, 1449), False, 'import pytest\n'), ((700, 758), 'dimodmock.StructuredMock', 'StructuredMock', (['nodelist', 'edgelist', 'properties', 'parameters'], {}), '(nodelist, edgelist, properties, parameters)\n', (714, 758), False, 'from dimodmock import StructuredMock\n'), ((975, 1033), 'dimodmock.StructuredMock', 'StructuredMock', (['nodelist', 'edgelist', 'properties', 'parameters'], {}), '(nodelist, edgelist, properties, parameters)\n', (989, 1033), False, 'from dimodmock import StructuredMock\n'), ((1252, 1310), 'dimodmock.StructuredMock', 'StructuredMock', (['nodelist', 'edgelist', 'properties', 'parameters'], {}), '(nodelist, edgelist, properties, parameters)\n', (1266, 1310), False, 'from dimodmock import StructuredMock\n'), ((1533, 1591), 'dimodmock.StructuredMock', 'StructuredMock', (['nodelist', 'edgelist', 'properties', 'parameters'], {}), '(nodelist, edgelist, properties, parameters)\n', (1547, 1591), False, 'from dimodmock import StructuredMock\n')] |
from django.db.models import Max
from rest_framework import serializers
import json
from base.models import (
UserProfile,
)
class DynamicFieldsModelSerializer(serializers.ModelSerializer):
"""
A ModelSerializer that takes an additional `fields` argument that
controls which fields should be displayed.
See: http://tomchristie.github.io/rest-framework-2-docs/api-guide/serializers
"""
def __init__(self, *args, **kwargs):
# Don't pass the 'fields' arg up to the superclass
fields = kwargs.pop('fields', None)
# Instantiate the superclass normally
super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs)
if fields:
# Drop any fields that are not specified in the `fields` argument.
allowed = set(fields)
existing = set(self.fields.keys())
for field_name in existing - allowed:
self.fields.pop(field_name)
class UserSerializer(DynamicFieldsModelSerializer):
name = serializers.SerializerMethodField(read_only=True)
first_last_name = serializers.SerializerMethodField(read_only=True)
def get_name(self, obj):
return str(obj.last_name + ', ' + obj.first_name)
def get_first_last_name(self, obj):
return str(obj.first_name + ' ' + obj.last_name)
class Meta:
model = UserProfile
fields = ('id', 'name', 'first_last_name', 'first_name', 'last_name', 'email', 'phone1', 'phone2', 'other_info')
| [
"rest_framework.serializers.SerializerMethodField"
] | [((1021, 1070), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (1054, 1070), False, 'from rest_framework import serializers\n'), ((1093, 1142), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (1126, 1142), False, 'from rest_framework import serializers\n')] |
import string
import re
from nltk.tokenize import TweetTokenizer
from emoji import UNICODE_EMOJI
import csv
import numpy as np
import sys
from collections import Counter
from string import punctuation
from nltk.corpus import wordnet as wn # per calcolo OOV ratio
# check dei parametri
if len(sys.argv) != 3:
print('USAGE: features_extractor.py path_to_dataset.csv output_file.csv')
sys.exit(2)
# percorso per il dataset dal quale verranno estratte le features
input_file = sys.argv[1]
# file csv dove verrà salvata la matrice di output
output_file = sys.argv[2]
# funzione che controlla la presenza di url nel tweet
# ritorna True in caso positivo, False altrimenti
def hasURL(row):
result = False
if row[35]:
result = True
return result
# funzione che controlla se il tweet è di risposta
# ritorna True in caso positivo, False altrimenti
def isReply(row):
reply = row[36] # contiene stringa con ID del tweet a cui si risponde
# se non è di risposta contiene None
result = False
if reply:
result = True
return result
# funzione che consente di identificare un retweet e che ritorna lp screen_name dell'utente
# di cui è stato fatto retweet, altrimenti ritorna una stringa vuota
def isRT(tweet):
rt_user = ''
ed_tweet = ''
potential_rt = ''
# insime di punteggiatura che verrà poi tolta da dalle parole analizzate
my_punctuation = (':', '“', '\"', '@', '.', ',')
# cerca la sigla 'rt' in tutte le sue possibili forme
rts = re.search(r'^(RT |rt |RT|rt| RT| rt| RT | rt )(@\w*| \w*)?[: ]', tweet)
if rts:
# uniforma la sigla 'rt' in 'RT'
# in caso una delle forme non sia presente, replace() non fa nulla
forma1 = tweet
forma2 = forma1.replace('RT ', "RT", 1)
forma3 = forma1.replace('rt ', "RT", 1)
forma4 = forma1.replace(' RT ', "RT", 1)
forma5 = forma1.replace(' rt ', "RT", 1)
forma6 = forma1.replace('rt', "RT", 1)
# cerchiamo quale dei replace ha avuto effetto e
if forma1 != forma2:
ed_tweet = forma2
elif forma1 != forma3:
ed_tweet = forma3
elif forma1 != forma4:
ed_tweet = forma4
elif forma1 != forma5:
ed_tweet = forma5
elif forma1 != forma6:
ed_tweet = forma6
else:
ed_tweet = tweet
# cerchiamo le varie situazioni in cui potremmo trovare 'RT'
rt = re.search(r'(RT\w+|RT@\w+|RT: \w+|RT.\w+|RT.\w+:|RT: @\w+:|RT: @\w+ |RT\( |RT\“@\w+:)', ed_tweet) #così dovrebbe coprire tutti gli altri casi (però ne tira dentro altri che non sono veri RT)
if rt:
# esegue una prima pulizia della stringa
ed_tweet = ed_tweet.replace('RT', '', 1)
ed_tweet = ed_tweet.replace(':', ' :')
ed_tweet = ed_tweet.replace('\"', ' \"')
potential_rt = ed_tweet
# va ad analizzare parola per parola
if potential_rt != '':
tmp = potential_rt.split(' ')
tmp_first_word = ' '
for word in tmp:
if len(word) > 1 and word.strip() != 'via' and word[0] != '#' and word != 'from':
tmp_first_word = word.lower()
for char in my_punctuation:
if char in tmp_first_word:
tmp_first_word = tmp_first_word.replace(char, '')
if len(tmp_first_word) > 1:
break
if tmp_first_word in users:
rt_user = tmp_first_word
else:
# controlli necessari in caso il tweet in questione sia in inglese
if 'via' in tmp:
check = tmp[tmp.index('via') + 1].lower()
check = ''.join(ch for ch in check if ch not in my_punctuation)
if check in users:
rt_user = check
elif 'Via' in tmp:
check = tmp[tmp.index('Via') + 1].lower()
check = ''.join(ch for ch in check if ch not in my_punctuation)
if check in users:
rt_user = check
return rt_user
# funzione che aggiorna le occorrenze degli url
def url_count(row):
urls = row[35]
# nel caso ci siano più url in un tweet
urls = urls.split()
for url in urls:
if url in url_counter:
# l'url è già comparso almeno una volta
url_counter[url] += 1
else:
# prima apparizione per l'url
url_counter[url] = 1
# funzione che ritorna la somma dei vari url_count presenti in un tweet
def get_sum_url_count(row):
result = 0
urls = row[35]
urls = urls.split()
for url in urls:
result += url_counter[url]
return result
# funzione che aggiorna il dizionario degli user.screeen_name appartenenti al dataset
def getUsers(row):
# lower() utilizzata perché twitter non è case sensitive
user = row[23].lower()
users[user] = []
return user
# funzione che cicla per prima nel dataset per raccogliere le info preliminari necessarie al calcolo delle varie features
def get_initial_info():
counter = 0 # serve per contare n_righe
with open(input_file, newline= '') as csvfile:
csv_reader = csv.reader(csvfile, delimiter = ',', quotechar = '"')
header = next(csv_reader) # serve per saltare la riga d'intestazione del dataset
for row in csv_reader:
tweet = row[0]
user = getUsers(row)
PScores[user] = round(1.0 - e, 4) # tutti i punteggi 'popularity score' vengono inizializzati a 0.2
mentionScores[user] = 0
getOVVratio(tweet)
getHashtags(row)
url_count(row)
counter += 1
return counter # conterrà il numero di righe necessario per creare la matrice finale
# funzione che aggiorna le occorrenze dei vari hashtag presenti in un tweet
def getHashtags(row):
htgs = row[34]
htgs = htgs.split()
for htg in htgs:
htg = htg.lower() # lower() per non contare distintamente lo stesso hashtag scritto diversamente
if htg in hashtags:
# l'hashtag è già comparso almeno una volta
hashtags[htg] += 1
else:
# prima apparizione per l'hashtag
hashtags[htg] = 1
# funzione che calcola l'hashtagScore di un tweet
def getHashtagScore(row):
result = 0.0
htgs = row[34]
htgs = htgs.split()
for htg in htgs:
htg = htg.lower()
result += hashtags[htg]
if result > 0:
result = result / len(htgs) # divisione della somma delle istanze dei vari tweet per il numero di essi contenuto nel tweet
return result
# ritorna il numero di volte in cui un autore di un tweet viene rtato da un utente
def getRN_ij(matrix, author, user):
RN_ij = 0 #numero di volte in cui author è stato RTato da user
RN_ij = matrix[row_per_user[user]][column_per_user[author]]
return RN_ij
# funzione che prende in input l'autore di un tweet di cui è stato fatto retweet e
# ne restituisce il Popularity Score
def PScoreCalculator(author):
newScore = 0.0
calculatedScores = 0.0
changed = True
initial_score = PScores[author] # inizialmente sarà uguale a 0.2
not_first_iteration = False
# fino a quando il punteggio continuerà a cambiare, verrà eseguito il calcolo
while changed:
scores_to_sum = [] # per la sommatoria
for RTuser in users_inverse_senzadoppioni[author]: #RTuser sono gli utenti che hanno fatto RT di author
RN_ij = getRN_ij(rel_matrix, author, RTuser) #numero d volte in cui author e' stato RTato da user
N_j = len(users_senzadoppioni[RTuser]) #numero di utenti di cui user ha fatto RT
score = PScores[RTuser]
newScore = (score * RN_ij) / N_j
tmp_diff = abs(newScore - initial_score)
rounded_tmp_diff = round(tmp_diff, 4)
rounded_score = round(score, 4)
rounded_newScore = round(newScore, 4)
scores_to_sum.append(rounded_newScore)
calculated_scores = sum(scores_to_sum)
old_score = PScores[author]
score_holder = round(1 - e + (e * calculated_scores), 4)
PScores[author] = score_holder
if not_first_iteration: #se non è la prima iterazione procediamo col confronto
difference = abs(score_holder - old_score)
rounded_difference = round(difference, 4)
if rounded_difference == 0.0: # se il punteggio non è cambiato, possiamo uscire dal ciclo
changed = False
not_first_iteration = True
# funzione che calcola le parole non appartenenti ad un dizionario delle seguenti lingue supportate
# {'en', 'fa', 'no', 'tr', 'pl', 'et', 'ht', 'nl', 'fr', 'pt', 'de', 'es', 'fi', 'ca', 'it', 'in'}
def getOVVratio(tweet):
result = 0.0
counterOOV = 0
tweet_lenght = len(tweet)
special_words = 0
tokenizer = TweetTokenizer() # separa in unità lessicali caratteristiche dei tweet (hashtag, ...)
tt = tokenizer.tokenize(tweet)
for token in tt:
# se token non e' punteggiatura, se primo carattere non e' '#' o '@' e se token non e' una emoji
token = token.lower()
if token not in string.punctuation and token[0] != '#' and token[0] != '@' and token not in UNICODE_EMOJI:
if not wn.synsets(token):
counterOOV += 1
else:
special_words += 1 # i token non considerati per l'analisi verranno tolti dalla lunghezza della stringa
result = counterOOV / (tweet_lenght - special_words)
return result
# funzione che ritorna il numero di follower di un utente
def get_followerScores(followerScore, username):
followerScores[username] = followerScore
# funzione che calcola il numero di volte che aggiorna il numero di volte in cui
# un utente è stato menzionato in un tweet
def get_mentionScores(row):
mentions = row[37]
mentions = mentions.split()
for user in mentions:
user = user.lower()
#ho tolto il controllo user in users perche' serve lo score anche di chi ha efffettuato un T che e' stato RT
if user in mentionScores and mentionScores[user] > 0:
mentionScores[user] = mentionScores[user] + 1
else:
mentionScores[user] = 1
# funzione che ritorna il numero di liste in cui un utente è stato inserito
def get_listScores(row):
user = row[23].lower()
listScore = int(row[15])
listScores[user] = listScore
# funzione che permette di estrarre le features legate all'autorità di un utente e che completa
# il vettore rappresentate il tweet in esame
def authority_related_features_extractor():
line_count = 0
with open(input_file, newline= '') as csvfile:
csv_reader = csv.reader(csvfile, delimiter = ',', quotechar = '"')
for row in csv_reader:
if line_count == 0:
line_count += 1
else:
tweet = row[0]
actual_user = row[23].lower()
rt_user = isRT(tweet)
followerScore = int(row[16])
#nel caso in cui il tweet non abbia particolari relazioni con altri utenti (RT) allora non c'è bisogno di calcolare
#e i valori sono tutti e 3 uguali
first_follower = followerScores[actual_user]
first_popularity = PScores[actual_user]
first_mention = mentionScores[actual_user]
first_list = listScores[actual_user]
sum_follower = first_follower
sum_popularity = first_popularity
sum_mention = first_mention
sum_list = first_list
important_follower = first_follower
important_popularity = first_popularity
important_mention = first_mention
important_list = first_list
#se invece ci sono stati dei RT.....rt_user e' l'utente che ha postato in origini un Tweet mentre
#user in user_inverse_senzadoppioni sono gli utenti che hanno RTato rt_user
if rt_user != '':
sum_follower = followerScores[rt_user]
tmp_list = [followerScores[rt_user]]
for user in users_inverse_senzadoppioni[rt_user]:
sum_follower += followerScores[user]
tmp_list.append(followerScores[user])
important_follower = max(tmp_list)
sum_popularity = PScores[rt_user]
tmp_list = [PScores[rt_user]]
for user in users_inverse_senzadoppioni[rt_user]:
sum_popularity += PScores[user]
tmp_list.append(PScores[user])
important_popularity = max(tmp_list)
sum_mention = mentionScores[rt_user]
tmp_list = [mentionScores[rt_user]]
for user in users_inverse_senzadoppioni[rt_user]:
sum_mention += mentionScores[user]
tmp_list.append(mentionScores[rt_user])
important_mention = max(tmp_list)
sum_list = listScores[rt_user]
tmp_list = [listScores[rt_user]]
for user in users_inverse_senzadoppioni[rt_user]:
sum_list += listScores[user]
tmp_list.append(listScores[user])
important_list = max(tmp_list)
# completiamo il vettore che rappresenta il tweet in esame
tweet_vectors[line_count].append(first_follower)
tweet_vectors[line_count].append(first_popularity)
tweet_vectors[line_count].append(first_mention)
tweet_vectors[line_count].append(first_list)
tweet_vectors[line_count].append(sum_follower)
tweet_vectors[line_count].append(sum_popularity)
tweet_vectors[line_count].append(sum_mention)
tweet_vectors[line_count].append(sum_list)
tweet_vectors[line_count].append(important_follower)
tweet_vectors[line_count].append(important_popularity)
tweet_vectors[line_count].append(important_mention)
tweet_vectors[line_count].append(important_list)
line_count += 1
# funzione che estrae le features legate al tweet in esame e prepara all'estrazione quelle
# legate all'autorità cell'utente
def tweet_related_features_extractor():
with open(input_file, newline= '') as csvfile:
csv_reader = csv.reader(csvfile, delimiter = ',', quotechar = '"')
line_count = 0
for row in csv_reader:
# riga d'intestazione
if line_count == 0:
line_count += 1
else:
tweet_vectors[line_count] = []
tweet = row[0]
rt_user = isRT(tweet)
actual_user = row[23].lower()
followerScore = int(row[16])
if rt_user != '':
users[actual_user].append(rt_user) #a fine ciclo avro' il dizionario "users" completo di liste con relazioni
containsURL = hasURL(row)
URLcount = get_sum_url_count(row)
RTcount = row[33]
reply = isReply(row)
OOVratio = round(getOVVratio(tweet), 4)
htgScore = round(getHashtagScore(row), 4)
# al fine di poterle estarre dopo, vegnono chiamate le funzioni
# per il calcolo delle features legate all'autorità
get_followerScores(followerScore, actual_user)
get_mentionScores(row)
get_listScores(row)
# iniziamo a costruire il vettore che rappresenterà il tweet in esame
tweet_vectors[line_count].append(containsURL)
tweet_vectors[line_count].append(URLcount)
tweet_vectors[line_count].append(RTcount)
tweet_vectors[line_count].append(htgScore)
tweet_vectors[line_count].append(reply)
tweet_vectors[line_count].append(OOVratio)
line_count += 1
# manipola i vari dizionari che abbiamo popolato in modo da avere rappresentazioni
# diverse degli stessi dati che sono utili per il calcolo delle features
def data_manipolation():
#popolo users_list, users_senzadoppioni, RTusers_list, users_inverse, users_inverse_senzadoppioni
for user in users:
if users[user]:
users_list.append(user)
if user not in users_senzadoppioni:
users_senzadoppioni[user] = []
for u in users[user]:
if u not in users_inverse:
users_inverse[u] = [user]
else:
users_inverse[u].append(user)
if u not in users_inverse_senzadoppioni:
users_inverse_senzadoppioni[u] = []
users_inverse_senzadoppioni[u].append(user)
if u not in users_senzadoppioni[user]:
users_senzadoppioni[user].append(u)
# funzione che indicizza la matrice della relazioni tra autori e utenti che fanno RT
def matrix_maker():
n_rows = 0
n_columns = 0
# le righe indicheranno un utente presente nel dataset
for user in users:
row_per_user[user] = n_rows
n_rows += 1
# le colonne saranno destinate ad autori i cui tweet sono stati RTati
for user in users_inverse:
column_per_user[user] = n_columns
n_columns += 1
#inizializzo una matrice di zeri nella forma n_righe x n_colonne
matrix = np.zeros((n_rows, n_columns))
# indice i, j sarà diverso da zero solamente se utente i-esimo avrà
# fatto RT dell'utente j-esimo
for user in users:
for rt_user, count in Counter(users[user]).items():
matrix[row_per_user[user]][column_per_user[rt_user]] = count
return matrix
#potrebbe non servire, lo tengo solo in caso di necessità
#for i in range(0, n_rows):
# for j in range(0, n_columns):
# CORRETTO
def get_final_matrix(d_list, n_righe):
vector = np.array(d_list)
final_matrix = np.reshape(vector, (n_righe, 18))
return final_matrix
#************************************************************************************************************************************
#********************************************************* "MAIN" *******************************************************************
#************************************************************************************************************************************
#variabili globali
PScores = {} #associazione utente del dataset -> valore inizialmente posto a 0.2 e successivamente aggiornato
#(PS: per maggior parte degli utenti restera' a 0.2)
e = 0.8 #costante presente nella formula per calcolare PScore
#per features tabella 2
followerScores = {} #associazione utente del dataset -> punteggio
mentionScores = {} #associazione utente del dataset -> punteggio
listScores = {} #associazione utente del dataset -> punteggio
#(PS: gli utenti che sono stati RTati ma non sono presenti nel dataset hanno valore impostato a 0
# ai fini di calcolare le features sum_* e important_*)
hashtags = {} #associazione hashtag -> numero di volte in cui questo compare in tutto il dataset
url_counter = {} #associazione url -> numero di volte in cui questo compare in tutto il dataset
tweet_vectors = {} #associazione n.tweet (riga nel dataset) -> vettore di features ad esso associato
mega_list = [] #conterra' il vettore contenente tutti i nostri vettori sul quale viene fatto numpy.reshape() per ottenere la matrice finale
users = {} #associazione user che ha fatto dei RT -> lista di user che l'utente chiave ha RTato
users_senzadoppioni = {} #associzione come sopra, eliminando i doppioni
users_list = [] #user che hanno fatto RT di quelli sopra
users_inverse = {} #associazione user che e' stato RTato -> lista con user che hanno fatto RT dell'utente chiave
users_inverse_senzadoppioni = {} #associzione come sopra, eliminando i doppioni
#per indicizzazione la matrice di relazioni retweet
row_per_user = {}
column_per_user = {}
n_righe = get_initial_info() # otteniamo info iniziali e numero di righe che avrà la matrice
tweet_related_features_extractor()
data_manipolation()
rel_matrix = matrix_maker()
# ora che abbiamo la matrice delle relazioni è possibile calcolare il Popularity Score
for author in users_inverse:
PScoreCalculator(author)
authority_related_features_extractor()
# per ogni vettore calcolato, lo mette in un unico vettore che verrà poi rimodellato nella matrice finale
for l in tweet_vectors:
mega_list.extend(tweet_vectors[l])
# rimodellazione vettore unico
final_matrix = get_final_matrix(mega_list, n_righe)
# salvataggio della matrice calcolata
with open(output_file, 'w', newline= '') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',', quoting = csv.QUOTE_NONNUMERIC)
csv_writer.writerow(('containsURL', 'URLcount', 'RTcount', 'htgScore', 'reply', 'OOVratio', 'first_follower', 'first_popularity', 'first_mention', 'first_list', 'sum_follower', 'sum_popularity', 'sum_mention', 'sum_list', 'important_follower', 'important_popularity', 'important_mention', 'important_list'))
for row in final_matrix:
csv_writer.writerow(row)
print('Matrice creata in', sys.argv[2], '!')
| [
"nltk.tokenize.TweetTokenizer",
"numpy.reshape",
"csv.writer",
"collections.Counter",
"numpy.array",
"numpy.zeros",
"sys.exit",
"nltk.corpus.wordnet.synsets",
"csv.reader",
"re.search"
] | [((391, 402), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (399, 402), False, 'import sys\n'), ((1549, 1621), 're.search', 're.search', (['"""^(RT |rt |RT|rt| RT| rt| RT | rt )(@\\\\w*| \\\\w*)?[: ]"""', 'tweet'], {}), "('^(RT |rt |RT|rt| RT| rt| RT | rt )(@\\\\w*| \\\\w*)?[: ]', tweet)\n", (1558, 1621), False, 'import re\n'), ((2516, 2632), 're.search', 're.search', (['"""(RT\\\\w+|RT@\\\\w+|RT: \\\\w+|RT.\\\\w+|RT.\\\\w+:|RT: @\\\\w+:|RT: @\\\\w+ |RT\\\\( |RT\\\\“@\\\\w+:)"""', 'ed_tweet'], {}), "(\n '(RT\\\\w+|RT@\\\\w+|RT: \\\\w+|RT.\\\\w+|RT.\\\\w+:|RT: @\\\\w+:|RT: @\\\\w+ |RT\\\\( |RT\\\\“@\\\\w+:)'\n , ed_tweet)\n", (2525, 2632), False, 'import re\n'), ((9130, 9146), 'nltk.tokenize.TweetTokenizer', 'TweetTokenizer', ([], {}), '()\n', (9144, 9146), False, 'from nltk.tokenize import TweetTokenizer\n'), ((18163, 18192), 'numpy.zeros', 'np.zeros', (['(n_rows, n_columns)'], {}), '((n_rows, n_columns))\n', (18171, 18192), True, 'import numpy as np\n'), ((18678, 18694), 'numpy.array', 'np.array', (['d_list'], {}), '(d_list)\n', (18686, 18694), True, 'import numpy as np\n'), ((18714, 18747), 'numpy.reshape', 'np.reshape', (['vector', '(n_righe, 18)'], {}), '(vector, (n_righe, 18))\n', (18724, 18747), True, 'import numpy as np\n'), ((21858, 21923), 'csv.writer', 'csv.writer', (['csv_file'], {'delimiter': '""","""', 'quoting': 'csv.QUOTE_NONNUMERIC'}), "(csv_file, delimiter=',', quoting=csv.QUOTE_NONNUMERIC)\n", (21868, 21923), False, 'import csv\n'), ((5337, 5386), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""\\""""'}), '(csvfile, delimiter=\',\', quotechar=\'"\')\n', (5347, 5386), False, 'import csv\n'), ((10989, 11038), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""\\""""'}), '(csvfile, delimiter=\',\', quotechar=\'"\')\n', (10999, 11038), False, 'import csv\n'), ((14972, 15021), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""', 'quotechar': '"""\\""""'}), '(csvfile, delimiter=\',\', quotechar=\'"\')\n', (14982, 15021), False, 'import csv\n'), ((9549, 9566), 'nltk.corpus.wordnet.synsets', 'wn.synsets', (['token'], {}), '(token)\n', (9559, 9566), True, 'from nltk.corpus import wordnet as wn\n'), ((18359, 18379), 'collections.Counter', 'Counter', (['users[user]'], {}), '(users[user])\n', (18366, 18379), False, 'from collections import Counter\n')] |
import time
import argparse
import pathlib
import os
import json
from concurrent.futures import ThreadPoolExecutor
from requests_futures.sessions import FuturesSession
# To load test I ran this script whilst simultaneously interacting with the application
# interactively to see how it was responding.
# Tests tried:
# * from 15 to 150 concurrent users (no autoscaling)
# * from 100 to 300 concurrent users (autoscales to two pods)
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--max-concurrent-users', dest='max_concurrent_users', type=int, default=150)
parser.add_argument('--min-concurrent-users', dest='min_concurrent_users', type=int, default=15)
ENDPOINT = "http://spell-org.spell-org.spell.services/spell-org/paint_with_ml/predict"
with open(pathlib.Path(os.getcwd()).parent / 'test_payload.json', 'r') as fp:
PAYLOAD = json.load(fp)
args = parser.parse_args()
max_requests_per_second = args.max_concurrent_users // 15
min_requests_per_second = args.min_concurrent_users // 15
curr_requests_per_second = min_requests_per_second
already_peeked = False
t = 0
# assuming a maximum of 8 seconds of latency (serving on GPU averages 4 seconds)
session = FuturesSession(executor=ThreadPoolExecutor(max_workers=8 * max_requests_per_second))
while True:
t += 1
# we can't inspect the response for errors because .result() is a blocking function so
# we're relying on model server metrics to tell us how we're doing
for _ in range(curr_requests_per_second):
_ = session.get(ENDPOINT)
if t % 15 == 0:
if not already_peeked:
curr_requests_per_second += 1
if curr_requests_per_second == max_requests_per_second:
already_peeked = True
else:
curr_requests_per_second -= 1
if curr_requests_per_second == 0:
break
print(f"Sent {curr_requests_per_second} requests at time {t}. Sleeping for 1 second...")
# this assumes that making the request is instantaneous, which, for small enough volumes, it
# basically is
time.sleep(1)
| [
"argparse.ArgumentParser",
"concurrent.futures.ThreadPoolExecutor",
"time.sleep",
"os.getcwd",
"json.load"
] | [((443, 504), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some integers."""'}), "(description='Process some integers.')\n", (466, 504), False, 'import argparse\n'), ((880, 893), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (889, 893), False, 'import json\n'), ((2100, 2113), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2110, 2113), False, 'import time\n'), ((1234, 1293), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': '(8 * max_requests_per_second)'}), '(max_workers=8 * max_requests_per_second)\n', (1252, 1293), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((811, 822), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (820, 822), False, 'import os\n')] |
from django.conf.urls import url
from django.conf.urls import include
from django.urls import path
from rest_framework.routers import DefaultRouter
from django.views.generic import TemplateView
from rest_framework.authtoken import views as authviews
from . import views
from .core import collective as collective_views
from .core import channel as channel_views
from .core import assistant as assistant_views
app_name = 'chat_assistant_api'
router = DefaultRouter()
router.register('token', views.TokenViewSet, basename='token-authentication')
router.register('session', views.SessionViewSet, basename='session-authentication')
urlpatterns = [
path('assistant/', assistant_views.Assistant.as_view()),
path('assistant/<int:pk>/', assistant_views.UpdateAssistant.as_view()),
path('ask-question/', assistant_views.AskQuestion.as_view()),
path('train-assistant/', assistant_views.TrainAssistant.as_view()),
path('channel/', channel_views.Channel.as_view()),
path('channel/<int:pk>/', channel_views.UpdateChannel.as_view()),
path('collective/', collective_views.Collective.as_view()),
path('collective/<int:pk>/', collective_views.UpdateCollective.as_view()),
path('', include(router.urls))
] | [
"django.conf.urls.include",
"rest_framework.routers.DefaultRouter"
] | [((451, 466), 'rest_framework.routers.DefaultRouter', 'DefaultRouter', ([], {}), '()\n', (464, 466), False, 'from rest_framework.routers import DefaultRouter\n'), ((1203, 1223), 'django.conf.urls.include', 'include', (['router.urls'], {}), '(router.urls)\n', (1210, 1223), False, 'from django.conf.urls import include\n')] |
from .scene import Scene, Widget
try:
from math import tau
except ImportError:
from math import pi
tau = 2 * pi
import wx
from meerk40t.svgelements import Path, Point, Rect
MILS_IN_MM = 39.3701
HITCHAIN_HIT = 0
HITCHAIN_DELEGATE = 1
HITCHAIN_HIT_AND_DELEGATE = 2
HITCHAIN_DELEGATE_AND_HIT = 3
RESPONSE_CONSUME = 0
RESPONSE_ABORT = 1
RESPONSE_CHAIN = 2
RESPONSE_DROP = 3
ORIENTATION_MODE_MASK = 0b00001111110000
ORIENTATION_DIM_MASK = 0b00000000001111
ORIENTATION_MASK = ORIENTATION_MODE_MASK | ORIENTATION_DIM_MASK
ORIENTATION_RELATIVE = 0b00000000000000
ORIENTATION_ABSOLUTE = 0b00000000010000
ORIENTATION_CENTERED = 0b00000000100000
ORIENTATION_HORIZONTAL = 0b00000001000000
ORIENTATION_VERTICAL = 0b00000010000000
ORIENTATION_GRID = 0b00000100000000
ORIENTATION_NO_BUFFER = 0b00001000000000
BUFFER = 10.0
class ToolContainer(Widget):
"""
Widget used to contain particular tools within the scene.
"""
def __init__(self, scene):
Widget.__init__(self, scene, all=False)
def signal(self, signal, *args, **kwargs):
if signal == "tool":
tool = args[0]
self.set_tool(tool)
def set_tool(self, tool):
self.remove_all_widgets()
if tool is None:
return
new_tool = self.scene.context.registered["tool/%s" % tool]
self.add_widget(0, new_tool(self.scene))
class CircleBrush:
"""
Circular Brush to be drawn for area-based tools.
"""
def __init__(self):
self.tool_size = 100
self.pos = 0 + 0j
self.scale = 1.0
self.range = self.tool_size * self.scale
self.brush_fill = wx.Brush(wx.Colour(alpha=64, red=0, green=255, blue=0))
self.using = False
def set_location(self, x: float, y: float):
self.pos = complex(x, y)
def contains(self, x: float, y: float) -> bool:
c = complex(x, y)
return abs(self.pos - c) < self.range
def draw(self, gc: wx.GraphicsContext):
if self.using:
self.draw_brush(gc)
def draw_brush(self, gc: wx.GraphicsContext):
gc.SetBrush(self.brush_fill)
gc.DrawEllipse(
self.pos.real - self.tool_size / 2.0,
self.pos.imag - self.tool_size / 2.0,
self.tool_size,
self.tool_size,
)
class ToolWidget(Widget):
"""
AbstractClass for the ToolWidgets
"""
def __init__(self, scene: Scene):
Widget.__init__(self, scene, all=True)
self.brush = CircleBrush()
def hit(self):
return HITCHAIN_HIT
def process_draw(self, gc):
self.brush.draw(gc)
class DrawTool(ToolWidget):
"""
Draw Tool adds paths that are clicked and drawn within the scene.
"""
def __init__(self, scene):
ToolWidget.__init__(self, scene)
self.preferred_length = 50
self.series = None
self.last_position = None
def process_draw(self, gc: wx.GraphicsContext):
if self.series is not None and len(self.series) > 1:
gc.StrokeLines(self.series)
def add_point(self, point):
if len(self.series):
last = self.series[-1]
if Point.distance(last, point) < self.preferred_length:
return
self.scene.gui.Update()
self.series.append(point)
def event(self, window_pos=None, space_pos=None, event_type=None):
if self.series is None:
self.series = []
if event_type == "leftdown":
self.add_point(space_pos[:2])
elif event_type == "move":
if self.series is None:
return RESPONSE_DROP
self.add_point(space_pos[:2])
elif event_type == "lost":
self.series = None
return RESPONSE_DROP
elif event_type == "leftup":
try:
t = Path(stroke="blue")
t.move(self.series[0])
for m in self.series:
t.line(m)
self.scene.context.root.elements.add_elem(t, classify=True)
except IndexError:
pass
self.series = None
class RectTool(ToolWidget):
"""
Rectangle Drawing Tool.
Adds Rectangles with click and drag.
"""
def __init__(self, scene):
ToolWidget.__init__(self, scene)
self.start_position = None
self.p1 = None
self.p2 = None
def process_draw(self, gc: wx.GraphicsContext):
if self.p1 is not None and self.p2 is not None:
x0 = min(self.p1.real, self.p2.real)
y0 = min(self.p1.imag, self.p2.imag)
x1 = max(self.p1.real, self.p2.real)
y1 = max(self.p1.imag, self.p2.imag)
gc.SetPen(wx.BLUE_PEN)
gc.SetBrush(wx.TRANSPARENT_BRUSH)
gc.DrawRectangle(x0, y0, x0 - x1, y0 - y1)
def event(self, window_pos=None, space_pos=None, event_type=None):
if event_type == "leftdown":
self.p1 = complex(space_pos[0], space_pos[1])
elif event_type == "move":
self.p2 = complex(space_pos[0], space_pos[1])
self.scene.gui.signal("refresh_scene")
elif event_type == "leftup":
try:
if self.p1 is None:
return
self.p2 = complex(space_pos[0], space_pos[1])
x0 = min(self.p1.real, self.p2.real)
y0 = min(self.p1.imag, self.p2.imag)
x1 = max(self.p1.real, self.p2.real)
y1 = max(self.p1.imag, self.p2.imag)
rect = Rect(x0, y0, x1 - x0, y1 - y0, stroke="blue")
t = Path(rect)
if len(t) != 0:
self.scene.context.root.elements.add_elem(t, classify=True)
self.p1 = None
self.p2 = None
except IndexError:
pass
| [
"meerk40t.svgelements.Rect",
"wx.Colour",
"meerk40t.svgelements.Path",
"meerk40t.svgelements.Point.distance"
] | [((1657, 1702), 'wx.Colour', 'wx.Colour', ([], {'alpha': '(64)', 'red': '(0)', 'green': '(255)', 'blue': '(0)'}), '(alpha=64, red=0, green=255, blue=0)\n', (1666, 1702), False, 'import wx\n'), ((3179, 3206), 'meerk40t.svgelements.Point.distance', 'Point.distance', (['last', 'point'], {}), '(last, point)\n', (3193, 3206), False, 'from meerk40t.svgelements import Path, Point, Rect\n'), ((5582, 5627), 'meerk40t.svgelements.Rect', 'Rect', (['x0', 'y0', '(x1 - x0)', '(y1 - y0)'], {'stroke': '"""blue"""'}), "(x0, y0, x1 - x0, y1 - y0, stroke='blue')\n", (5586, 5627), False, 'from meerk40t.svgelements import Path, Point, Rect\n'), ((5648, 5658), 'meerk40t.svgelements.Path', 'Path', (['rect'], {}), '(rect)\n', (5652, 5658), False, 'from meerk40t.svgelements import Path, Point, Rect\n'), ((3860, 3879), 'meerk40t.svgelements.Path', 'Path', ([], {'stroke': '"""blue"""'}), "(stroke='blue')\n", (3864, 3879), False, 'from meerk40t.svgelements import Path, Point, Rect\n')] |
import os
from datetime import timedelta
import pytest
from flask.cli import load_dotenv
load_dotenv()
from benwaonline_auth import create_app
from benwaonline_auth.cache import cache as _cache
from benwaonline_auth.database import db as _db
from benwaonline_auth import models
def pytest_addoption(parser):
parser.addoption(
"--db", action="store", default="sqlite", help="my option: mysql or sqlite"
)
@pytest.fixture(scope="session")
def dbopt(request):
return request.config.getoption("--db")
@pytest.fixture(scope="session")
def app(dbopt):
app = create_app("testing")
if dbopt == "sqlite":
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite://"
with app.app_context():
yield app
@pytest.fixture(scope="function")
def cache():
yield _cache
_cache.clear()
@pytest.fixture(scope="session")
def db(app):
_db.app = app
_db.drop_all()
_db.create_all()
init_clients(_db.session)
init_users(_db.session)
init_tokens(_db.session)
yield _db
_db.drop_all()
@pytest.fixture(scope="function")
def session(db):
connection = db.engine.connect()
transaction = connection.begin()
options = dict(bind=connection, binds={})
session = db.create_scoped_session(options=options)
db.session = session
yield session
transaction.rollback()
connection.close()
session.remove()
def init_clients(session):
client = models.Client(
name="test",
client_id="test_id",
client_secret="test_secret",
is_confidential=True,
blacklisted=False,
grant_type="authorization_code",
response_type="code",
_redirect_uris="http://test/callback",
allowed_scopes="ham test thanks",
default_scopes="ham test",
)
session.add(client)
session.commit()
return
def init_tokens(session):
token = models.Token(
code="testtoken",
expires_in=timedelta(days=14),
user_id="6969",
client_id="test_id",
scopes="ham test",
)
expired = models.Token(
code="expired",
expires_in=timedelta(microseconds=1),
is_expired=True,
user_id="420",
client_id="test_id",
scopes="ham test",
)
session.add(token)
session.add(expired)
session.commit()
return
def init_users(session):
user = models.User(user_id="6969")
session.add(user)
user = models.User(user_id="420")
session.add(user)
user = models.User(user_id="666")
session.add(user)
session.commit()
return
| [
"flask.cli.load_dotenv",
"benwaonline_auth.models.Client",
"benwaonline_auth.create_app",
"datetime.timedelta",
"benwaonline_auth.database.db.drop_all",
"benwaonline_auth.cache.cache.clear",
"benwaonline_auth.database.db.create_all",
"pytest.fixture",
"benwaonline_auth.models.User"
] | [((91, 104), 'flask.cli.load_dotenv', 'load_dotenv', ([], {}), '()\n', (102, 104), False, 'from flask.cli import load_dotenv\n'), ((428, 459), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (442, 459), False, 'import pytest\n'), ((527, 558), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (541, 558), False, 'import pytest\n'), ((745, 777), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (759, 777), False, 'import pytest\n'), ((830, 861), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (844, 861), False, 'import pytest\n'), ((1059, 1091), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (1073, 1091), False, 'import pytest\n'), ((586, 607), 'benwaonline_auth.create_app', 'create_app', (['"""testing"""'], {}), "('testing')\n", (596, 607), False, 'from benwaonline_auth import create_app\n'), ((812, 826), 'benwaonline_auth.cache.cache.clear', '_cache.clear', ([], {}), '()\n', (824, 826), True, 'from benwaonline_auth.cache import cache as _cache\n'), ((897, 911), 'benwaonline_auth.database.db.drop_all', '_db.drop_all', ([], {}), '()\n', (909, 911), True, 'from benwaonline_auth.database import db as _db\n'), ((916, 932), 'benwaonline_auth.database.db.create_all', '_db.create_all', ([], {}), '()\n', (930, 932), True, 'from benwaonline_auth.database import db as _db\n'), ((1041, 1055), 'benwaonline_auth.database.db.drop_all', '_db.drop_all', ([], {}), '()\n', (1053, 1055), True, 'from benwaonline_auth.database import db as _db\n'), ((1444, 1734), 'benwaonline_auth.models.Client', 'models.Client', ([], {'name': '"""test"""', 'client_id': '"""test_id"""', 'client_secret': '"""test_secret"""', 'is_confidential': '(True)', 'blacklisted': '(False)', 'grant_type': '"""authorization_code"""', 'response_type': '"""code"""', '_redirect_uris': '"""http://test/callback"""', 'allowed_scopes': '"""ham test thanks"""', 'default_scopes': '"""ham test"""'}), "(name='test', client_id='test_id', client_secret='test_secret',\n is_confidential=True, blacklisted=False, grant_type=\n 'authorization_code', response_type='code', _redirect_uris=\n 'http://test/callback', allowed_scopes='ham test thanks',\n default_scopes='ham test')\n", (1457, 1734), False, 'from benwaonline_auth import models\n'), ((2395, 2422), 'benwaonline_auth.models.User', 'models.User', ([], {'user_id': '"""6969"""'}), "(user_id='6969')\n", (2406, 2422), False, 'from benwaonline_auth import models\n'), ((2457, 2483), 'benwaonline_auth.models.User', 'models.User', ([], {'user_id': '"""420"""'}), "(user_id='420')\n", (2468, 2483), False, 'from benwaonline_auth import models\n'), ((2518, 2544), 'benwaonline_auth.models.User', 'models.User', ([], {'user_id': '"""666"""'}), "(user_id='666')\n", (2529, 2544), False, 'from benwaonline_auth import models\n'), ((1960, 1978), 'datetime.timedelta', 'timedelta', ([], {'days': '(14)'}), '(days=14)\n', (1969, 1978), False, 'from datetime import timedelta\n'), ((2138, 2163), 'datetime.timedelta', 'timedelta', ([], {'microseconds': '(1)'}), '(microseconds=1)\n', (2147, 2163), False, 'from datetime import timedelta\n')] |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from cryptography import utils
from cryptography.exceptions import UnsupportedAlgorithm, _Reasons
from cryptography.hazmat.primitives import ciphers
from cryptography.hazmat.primitives.ciphers import BlockCipherAlgorithm, modes
def is_cipher_supported(backend, cipher, mode):
"""
Is the cipher and mode supported?
"""
return (isinstance(cipher, BlockCipherAlgorithm) and
isinstance(mode, modes.Mode) and
# strangeness with go cast5 require 16 byte keys - skip for now
cipher.name != "CAST5" and
# ----
backend._lib.IsCipherSupported(
cipher.name.lower(), mode.name.lower()) == 1)
@utils.register_interface(ciphers.CipherContext)
class _CipherContext(object):
def __init__(self, backend, cipher, mode, operation):
self._backend = backend
self._cipher = cipher
self._mode = mode
self._operation = operation
if not is_cipher_supported(backend, cipher, mode):
raise UnsupportedAlgorithm(
"cipher {0} in mode {1} is not supported by this backend"
.format(cipher.name if cipher else cipher,
mode.name if mode else mode),
_Reasons.UNSUPPORTED_CIPHER
)
iv_or_nonce = ""
if isinstance(mode, modes.ModeWithInitializationVector):
iv_or_nonce = mode.initialization_vector
elif isinstance(mode, modes.ModeWithNonce):
iv_or_nonce = mode.nonce
ctx = self._backend._lib.CreateCipher(
cipher.name.lower(), mode.name.lower(), operation,
iv_or_nonce, len(iv_or_nonce),
cipher.key, len(cipher.key))
if ctx == -1:
raise ValueError(
"cipher {0} in {1} mode errored with the provided parameters."
.format(cipher.name, mode.name)
)
self._ctx = ctx
self._buffer = []
def update(self, data):
self._buffer += data
block_size = self._cipher.block_size // 8
to_update = self._buffer[
:(len(self._buffer) // block_size) * block_size]
if to_update:
dst = self._backend._ffi.new("char []", len(to_update))
self._backend._lib.UpdateCipher(
self._ctx, dst, to_update, len(to_update))
result = self._backend._ffi.buffer(dst, len(to_update))[:]
self._buffer = self._buffer[len(to_update):]
return result
return b""
def finalize(self):
if self._buffer:
raise ValueError("The length of the provided data is not a "
"multiple of the block length.")
return b""
| [
"cryptography.utils.register_interface"
] | [((923, 970), 'cryptography.utils.register_interface', 'utils.register_interface', (['ciphers.CipherContext'], {}), '(ciphers.CipherContext)\n', (947, 970), False, 'from cryptography import utils\n')] |
from streampy_classes import Stream
from streampy_classes import Agent
from streampy_classes import Operators
from indegree_CountMin import *
import networkx as nx
#from Misra_Gries_list import Misra_Gries_list
def test_MG_Class():
input_stream = Stream.Stream('input')
a = indegree_CountMin(input_stream, 1, 5, 10, 4, 4)
input_stream.extend([(3,1),(4,1)])
test1 = a.query(1)
assert test1 == 0
input_stream.extend([(7,2),(2,1)])
test2 = a.query(1)
assert int(test2) == 3
input_stream.extend([(6,2), (1,2), (3,4), (6,7)])
test3 = a.query(6)
test4 = a.query(7)
assert test3 == 0
assert test4 == 1
| [
"streampy_classes.Stream.Stream"
] | [((254, 276), 'streampy_classes.Stream.Stream', 'Stream.Stream', (['"""input"""'], {}), "('input')\n", (267, 276), False, 'from streampy_classes import Stream\n')] |
import unittest
from has_unique_chars import has_unique_chars
class Test_Case_Is_Unique_Chars(unittest.TestCase):
def test_is_unique_chars(self):
self.assertTrue(has_unique_chars(''))
self.assertTrue(has_unique_chars('taco'))
self.assertFalse(has_unique_chars('ttaco'))
self.assertFalse(has_unique_chars('taaco'))
self.assertFalse(has_unique_chars('tacoo'))
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"has_unique_chars.has_unique_chars"
] | [((436, 451), 'unittest.main', 'unittest.main', ([], {}), '()\n', (449, 451), False, 'import unittest\n'), ((176, 196), 'has_unique_chars.has_unique_chars', 'has_unique_chars', (['""""""'], {}), "('')\n", (192, 196), False, 'from has_unique_chars import has_unique_chars\n'), ((222, 246), 'has_unique_chars.has_unique_chars', 'has_unique_chars', (['"""taco"""'], {}), "('taco')\n", (238, 246), False, 'from has_unique_chars import has_unique_chars\n'), ((273, 298), 'has_unique_chars.has_unique_chars', 'has_unique_chars', (['"""ttaco"""'], {}), "('ttaco')\n", (289, 298), False, 'from has_unique_chars import has_unique_chars\n'), ((325, 350), 'has_unique_chars.has_unique_chars', 'has_unique_chars', (['"""taaco"""'], {}), "('taaco')\n", (341, 350), False, 'from has_unique_chars import has_unique_chars\n'), ((377, 402), 'has_unique_chars.has_unique_chars', 'has_unique_chars', (['"""tacoo"""'], {}), "('tacoo')\n", (393, 402), False, 'from has_unique_chars import has_unique_chars\n')] |
__author__ = "<NAME>"
__copyright__ = "Copyright 2021"
__version__ = "0.1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from collections import defaultdict
from datetime import datetime
from json import load, dump, dumps
import re
import sys
import traceback
from openpyxl import load_workbook
from requests import Session
from config_handler import ConfigHandler
config = ConfigHandler('../../conf/harvester.ini')
NLP_URL = config.get_config_option('parr', 'nlp_url')
ALL_HDR = config.get_eval_option('parr', 'hdr_map')
REX_TEL = re.compile('\d{3}(\-|\.)\d{3}(\-|\.)\d{4}(\s+Ext\.\d+)?')
def to_years(value):
if isinstance(value, int):
return value
if '-' in value:
splits = value.split('-')
start = int(splits[0])
end = int('20' + splits[1]) if len(splits[1]) < 4 else int(splits[1])
return [start, end]
return int(value)
def to_contact_list(value):
contact_list = []
for c in value.split(','):
contact = dict()
splits = [s.strip() for s in c.strip().split()]
if '@' in splits[-1]:
contact['email'] = splits[-1]
remain = ' '.join(splits[:-1]).strip()
match = REX_TEL.search(remain)
if match:
contact['tel'] = remain[match.start(): match.end()]
contact['name'] = remain[:match.start()].strip()
else:
contact['name'] = remain
contact_list.append(contact)
return contact_list
def to_yn(value):
if value in ['Yes', 'Y']:
return True
elif value in ['Yes', 'Y']:
return False
return None
def to_str_list(value):
if not isinstance(value, str):
return None
return [s.strip("and ").strip() for s in value.split(',')]
def to_number(value):
if isinstance(value, int):
return int(value)
elif isinstance(value, float):
return float(value)
elif isinstance(value, str):
try:
return float(value) if "." in value else int(value)
except:
return None
return None
def transform(case, cell):
if cell.value in [None, '', 'NA', 'Unknown']:
return None
value = cell.value.strip() if isinstance(cell.value, str) else cell.value
if case == 'number':
return to_number(value)
if case == 'integer':
return int(value) if isinstance(value, int) else None
elif case == 'year':
return to_years(value)
elif case == 'yn':
return to_yn(value)
elif case == 'category_list':
return [s.strip() for s in value.split(',') if s.strip()]
elif case == 'contact_list':
return to_contact_list(value)
elif case in ['ne_org', 'ne_loc']:
return value if value != 'No specific location available' else None
elif case == 'ne_org_list':
return to_str_list(value)
elif case in ['loc_lat', 'loc_lng']:
if isinstance(value, float):
if case == 'loc_lat':
return value if value >= -90 and value <= 90 else None
return value
return None
return value
def xlsx_to_json(file_name, sheet, data_area):
session = Session()
ws = load_workbook(filename=file_name)[sheet]
hdr_map = {i: [h[1], h[2]] for i, h in enumerate(ALL_HDR)}
# all_e_set, all_k_set = set(), set()
# all_v_set = {
# h[1]: set() for _, h in enumerate(ALL_HDR)
# if h[2] in ['category', 'category_list', 'ne_org', 'ne_org_list', 'ne_loc']
# }
row_list, row_no = [], 1
for row in ws[data_area[0]: data_area[1]]:
selected = False
for i, cell in enumerate(row):
if hdr_map[i][0] != 'selected_project' or cell.value is None:
continue
selected = cell.value == 'y'
print(row_no, hdr_map[i][0], cell.value, selected, len(row_list))
break
row_no += 1
if not selected:
continue
# if row[59] != 'y':
# continue
row_map = dict()
for i, cell in enumerate(row):
d = transform(hdr_map[i][1], cell)
if d:
# row_map[hdr_map[i][0]] = [d, hdr_map[i][1]]
row_map[hdr_map[i][0]] = d
row_list.append(row_map)
# for k, v in row_map.items():
# print(k, v)
# if k in all_v_set:
# if v[1] in ['category_list', 'ne_org_list']:
# all_v_set[k].update(v[0])
# else:
# all_v_set[k].add(v[0])
# payload = [
# {'u': k, 'c': v[0]} for k, v in row_map.items()
# if v[1] == 'nlp'
# ]
# print('----------')
# r = session.post(NLP_URL, data=dumps(payload))
# r.encoding = 'utf-8'
# for doc in r.json():
# for sent in doc['p']:
# print(sent['c'])
# e_set = set(e['c'] + ' ' + e['t'] for e in sent['e'])
# k_set = set(k['c'] for k in sent['k'])
# print('\t NE\t', ' | '.join(e_set))
# print('\t KP\t', ' | '.join(k_set))
# all_e_set.update(e_set)
# all_k_set.update(k_set)
# print('---------------%s---------------' % count)
# print('------------------------------')
# for e in sorted(all_e_set):
# print(e)
# print('------------------------------')
# for k in sorted(all_k_set):
# print(k)
# print('------------------------------')
# for k, v in all_v_set.items():
# print(k, v)
return row_list
if __name__ == '__main__':
row_list = xlsx_to_json(sys.argv[1], 'SalmonRestorationActivityList', ['A2', 'BI588'])
# with open('../data/parr_projects.json', 'wt') as f:
# dump(row_list, f)
# with open('../data/parr_projects.json', 'rt') as f:
# o_row_list = load(f)
#
# hdr_map = {h[1]: h[2] for h in ALL_HDR}
# for r1, r2 in zip(row_list, o_row_list):
# for k, v in r1.items():
# if hdr_map[k] in ['integer', 'number']:
# if v and k not in r2 or not r2[k]:
# r2[k] = v
with open('../../import/parr_projects_2.0.json', 'wt') as f:
dump(row_list, f)
| [
"requests.Session",
"re.compile",
"openpyxl.load_workbook",
"config_handler.ConfigHandler",
"json.dump"
] | [((411, 452), 'config_handler.ConfigHandler', 'ConfigHandler', (['"""../../conf/harvester.ini"""'], {}), "('../../conf/harvester.ini')\n", (424, 452), False, 'from config_handler import ConfigHandler\n'), ((569, 636), 're.compile', 're.compile', (['"""\\\\d{3}(\\\\-|\\\\.)\\\\d{3}(\\\\-|\\\\.)\\\\d{4}(\\\\s+Ext\\\\.\\\\d+)?"""'], {}), "('\\\\d{3}(\\\\-|\\\\.)\\\\d{3}(\\\\-|\\\\.)\\\\d{4}(\\\\s+Ext\\\\.\\\\d+)?')\n", (579, 636), False, 'import re\n'), ((3178, 3187), 'requests.Session', 'Session', ([], {}), '()\n', (3185, 3187), False, 'from requests import Session\n'), ((3198, 3231), 'openpyxl.load_workbook', 'load_workbook', ([], {'filename': 'file_name'}), '(filename=file_name)\n', (3211, 3231), False, 'from openpyxl import load_workbook\n'), ((6249, 6266), 'json.dump', 'dump', (['row_list', 'f'], {}), '(row_list, f)\n', (6253, 6266), False, 'from json import load, dump, dumps\n')] |
import unittest
from chapter_02.src.answer_05 import sum_lists
from chapter_02.src.LinkedList import LinkedList
class TestSumLists(unittest.TestCase):
def test_empty_list(self):
self.assertIs(sum_lists(None, None), None)
self.assertEqual(sum_lists(LinkedList(), LinkedList()), LinkedList())
def test_sum_lists(self):
self.assertEqual(sum_lists(LinkedList([7, 1, 6]), LinkedList([5, 9, 2])),
LinkedList([2, 1, 9]))
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"chapter_02.src.answer_05.sum_lists",
"chapter_02.src.LinkedList.LinkedList"
] | [((507, 522), 'unittest.main', 'unittest.main', ([], {}), '()\n', (520, 522), False, 'import unittest\n'), ((207, 228), 'chapter_02.src.answer_05.sum_lists', 'sum_lists', (['None', 'None'], {}), '(None, None)\n', (216, 228), False, 'from chapter_02.src.answer_05 import sum_lists\n'), ((300, 312), 'chapter_02.src.LinkedList.LinkedList', 'LinkedList', ([], {}), '()\n', (310, 312), False, 'from chapter_02.src.LinkedList import LinkedList\n'), ((452, 473), 'chapter_02.src.LinkedList.LinkedList', 'LinkedList', (['[2, 1, 9]'], {}), '([2, 1, 9])\n', (462, 473), False, 'from chapter_02.src.LinkedList import LinkedList\n'), ((271, 283), 'chapter_02.src.LinkedList.LinkedList', 'LinkedList', ([], {}), '()\n', (281, 283), False, 'from chapter_02.src.LinkedList import LinkedList\n'), ((285, 297), 'chapter_02.src.LinkedList.LinkedList', 'LinkedList', ([], {}), '()\n', (295, 297), False, 'from chapter_02.src.LinkedList import LinkedList\n'), ((380, 401), 'chapter_02.src.LinkedList.LinkedList', 'LinkedList', (['[7, 1, 6]'], {}), '([7, 1, 6])\n', (390, 401), False, 'from chapter_02.src.LinkedList import LinkedList\n'), ((403, 424), 'chapter_02.src.LinkedList.LinkedList', 'LinkedList', (['[5, 9, 2]'], {}), '([5, 9, 2])\n', (413, 424), False, 'from chapter_02.src.LinkedList import LinkedList\n')] |
from fuf import DispatchDict
def test_basic():
d1 = {}
d = DispatchDict()
d.dispatch(d1)
d1["foo"] = 5
d["bar"] = 7
assert "foo" in d, "Dispatch lookup failed"
assert "bar" in d, "Normal lookup failed"
assert "baz" not in d, "False positive lookup"
assert d["foo"] == 5, "Value from dispatch"
def test_nodispatch():
d1 = {}
d = DispatchDict()
d1["foo"] = 5
d["bar"] = 7
assert "foo" not in d, "Dispatch lookup failed"
assert "bar" in d, "Normal lookup failed"
assert "baz" not in d, "False positive lookup"
assert d["bar"] == 7, "Value from dispatch"
| [
"fuf.DispatchDict"
] | [((69, 83), 'fuf.DispatchDict', 'DispatchDict', ([], {}), '()\n', (81, 83), False, 'from fuf import DispatchDict\n'), ((376, 390), 'fuf.DispatchDict', 'DispatchDict', ([], {}), '()\n', (388, 390), False, 'from fuf import DispatchDict\n')] |
import logging
import sys
from pathlib import Path
import fiona
import numpy as np
from affine import Affine
from shapely.affinity import affine_transform
from shapely.errors import TopologicalError
from shapely.geometry import Polygon, shape
from shapely.geometry import box
logging.basicConfig(stream=sys.stdout, format="[GeoShpFile] %(message)s")
class GeoShpFile:
def __init__(self, path: Path, transform: Affine):
assert path.is_dir()
assert type(transform) == Affine
# https://gis.stackexchange.com/questions/380357/affine-tranformation-matrix-shapely-asks-6-coefficients-but-rasterio-delivers
self.transform = [element for array in transform.column_vectors for element in array]
self.shp_file = fiona.open(path)
self.logger = logging.getLogger()
def save_clip(self, window: tuple[int, int, int, int], saving_folder: Path) -> None:
x, y, w, h = window
window_box: Polygon = box(x, y, x + w, y + h)
window_geo = affine_transform(window_box, self.transform)
geopolygons = self.read_geopolygons()
filtered_polygons = []
for geopolygon in geopolygons:
if geopolygon.intersects(window_geo):
try:
intersection = geopolygon.intersection(window_geo)
except TopologicalError:
self.logger.info("topological error, skipping...")
continue
geom_type = intersection.geom_type
assert geom_type in ['Polygon', 'MultiPolygon', 'GeometryCollection', 'LineString']
if geom_type == 'Polygon':
filtered_polygons.append(intersection)
elif geom_type == 'MultiPolygon':
for polygon in intersection.geoms:
filtered_polygons.append(polygon)
elif geom_type == 'GeometryCollection':
for obj in intersection.geoms:
if obj.geom_type == 'Polygon':
filtered_polygons.append(obj)
elif geom_type == 'LineString':
pass
else:
self.logger.info(f"Geometry: {geom_type} is not supported! Skipping...")
# Converting polygons into shp file
schema = {
'geometry': 'Polygon',
'properties': [('Name', "str")]
}
with fiona.open(
saving_folder,
mode='w',
driver="ESRI Shapefile",
schema=schema
) as file:
for polygon in filtered_polygons:
assert polygon.geom_type != 'MultiPolygon'
if polygon.geom_type == 'GeometryCollection':
for obj in polygon.geoms:
print(obj)
file.write(self.generate_row_dictionary(polygon))
def read_geopolygons(self) -> list[Polygon]:
polygons = []
for el in self.shp_file:
geopolygon = shape(el['geometry'])
polygons.append(geopolygon)
return polygons
def close(self):
self.shp_file.close()
@staticmethod
def generate_row_dictionary(polygon: Polygon):
array_polygon = np.array(polygon.exterior.coords)
row_dict = {
"geometry": {
"type": "Polygon",
'coordinates': [array_polygon]
},
'properties': {
"Name": "segment"
}
}
return row_dict
| [
"logging.basicConfig",
"logging.getLogger",
"shapely.geometry.box",
"numpy.array",
"fiona.open",
"shapely.geometry.shape",
"shapely.affinity.affine_transform"
] | [((278, 351), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'format': '"""[GeoShpFile] %(message)s"""'}), "(stream=sys.stdout, format='[GeoShpFile] %(message)s')\n", (297, 351), False, 'import logging\n'), ((752, 768), 'fiona.open', 'fiona.open', (['path'], {}), '(path)\n', (762, 768), False, 'import fiona\n'), ((791, 810), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (808, 810), False, 'import logging\n'), ((959, 982), 'shapely.geometry.box', 'box', (['x', 'y', '(x + w)', '(y + h)'], {}), '(x, y, x + w, y + h)\n', (962, 982), False, 'from shapely.geometry import box\n'), ((1004, 1048), 'shapely.affinity.affine_transform', 'affine_transform', (['window_box', 'self.transform'], {}), '(window_box, self.transform)\n', (1020, 1048), False, 'from shapely.affinity import affine_transform\n'), ((3283, 3316), 'numpy.array', 'np.array', (['polygon.exterior.coords'], {}), '(polygon.exterior.coords)\n', (3291, 3316), True, 'import numpy as np\n'), ((2446, 2521), 'fiona.open', 'fiona.open', (['saving_folder'], {'mode': '"""w"""', 'driver': '"""ESRI Shapefile"""', 'schema': 'schema'}), "(saving_folder, mode='w', driver='ESRI Shapefile', schema=schema)\n", (2456, 2521), False, 'import fiona\n'), ((3050, 3071), 'shapely.geometry.shape', 'shape', (["el['geometry']"], {}), "(el['geometry'])\n", (3055, 3071), False, 'from shapely.geometry import Polygon, shape\n')] |
from test.base_test import BaseTest
from app.helpers.helper import Helper
class TestAuthMiddleware(BaseTest):
def test_verify_token(self):
token = Helper.token(1, '<PASSWORD>') # not user with token created using 'newuser'
headers = dict(Token=token)
data = dict(amount=10)
response = self.client.post('/wallets/deposit', data=data, headers=headers)
self.assertEqual(response.status_code, 401)
self.assertTrue('error' in response.json)
self.assertEqual(response.json['error'], 'Expects a valid token to be provided')
| [
"app.helpers.helper.Helper.token"
] | [((161, 190), 'app.helpers.helper.Helper.token', 'Helper.token', (['(1)', '"""<PASSWORD>"""'], {}), "(1, '<PASSWORD>')\n", (173, 190), False, 'from app.helpers.helper import Helper\n')] |
# coding=utf-8
import argparse
import datetime as dt
import logging
import numpy as np
import os
import sys
from typing import Any, Dict, Tuple, Optional
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
from section2_1 import calc_E_T
def main():
# コマンドライン引数の処理
parser = argparse.ArgumentParser()
parser.add_argument(
"filename",
type=str,
help="設備条件JSONファイル"
)
args = parser.parse_args()
import json
with open(args.filename, 'r') as fin:
d = json.load(fin)
E_T, E_H, E_C, E_V, E_L, E_W, E_S, E_M, UPL, E_gen, E_E_gen, E_E_PV_h_d_t, E_E, E_G, E_K = calc_E_T(d)
result = {
"E_T": E_T,
"E_H": E_H,
"E_C": E_C,
"E_V": E_V,
"E_L": E_L,
"E_W": E_W,
"E_S": E_S,
"E_M": E_M,
"UPL": str(UPL),
"E_gen": E_gen,
"E_E_gen": E_E_gen,
"E_E": str(E_E),
"E_G": str(E_G),
"E_K": str(E_K),
}
print(json.dumps(result, indent=2))
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"json.dumps",
"os.path.dirname",
"json.load",
"section2_1.calc_E_T"
] | [((297, 322), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (320, 322), False, 'import argparse\n'), ((190, 215), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (205, 215), False, 'import os\n'), ((522, 536), 'json.load', 'json.load', (['fin'], {}), '(fin)\n', (531, 536), False, 'import json\n'), ((636, 647), 'section2_1.calc_E_T', 'calc_E_T', (['d'], {}), '(d)\n', (644, 647), False, 'from section2_1 import calc_E_T\n'), ((1059, 1087), 'json.dumps', 'json.dumps', (['result'], {'indent': '(2)'}), '(result, indent=2)\n', (1069, 1087), False, 'import json\n')] |
"""Add last_notified_message_id to User
Revision ID: 065886328b03
Revises: c4445e08ea86
Create Date: 2021-02-22 12:49:32.776035
"""
import geoalchemy2
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "065886328b03"
down_revision = "c4445e08ea86"
branch_labels = None
depends_on = None
def upgrade():
op.add_column("users", sa.Column("last_notified_message_id", sa.BigInteger(), nullable=False, server_default="0"))
# drop default
op.alter_column("users", "last_notified_message_id", server_default=None)
op.execute("ALTER TYPE backgroundjobtype ADD VALUE 'send_message_notifications'")
def downgrade():
raise Exception("Can't downgrade.")
| [
"alembic.op.alter_column",
"alembic.op.execute",
"sqlalchemy.BigInteger"
] | [((497, 570), 'alembic.op.alter_column', 'op.alter_column', (['"""users"""', '"""last_notified_message_id"""'], {'server_default': 'None'}), "('users', 'last_notified_message_id', server_default=None)\n", (512, 570), False, 'from alembic import op\n'), ((575, 661), 'alembic.op.execute', 'op.execute', (['"""ALTER TYPE backgroundjobtype ADD VALUE \'send_message_notifications\'"""'], {}), '(\n "ALTER TYPE backgroundjobtype ADD VALUE \'send_message_notifications\'")\n', (585, 661), False, 'from alembic import op\n'), ((420, 435), 'sqlalchemy.BigInteger', 'sa.BigInteger', ([], {}), '()\n', (433, 435), True, 'import sqlalchemy as sa\n')] |
# https://www.acmicpc.net/problem/1993
import sys
sys.setrecursionlimit(999999999)
def dfs(name, cur, acc, score):
if cur == N + 1:
res[name] = max(res[name], score)
return
for nxt, dist, s in adj[cur]:
if people[name] < acc + dist:
continue
dfs(name, nxt, acc + dist, score + s)
if __name__ == '__main__':
input = __import__('sys').stdin.readline
T = 0
while True:
T += 1
N = int(input())
if N == 0:
break
data = [[0, 0, 0]] + [list(map(int,input().split())) for idx in range(N)]
adj = [list() for _ in range(N + 1)]
for cur in range(N + 1):
for nxt, value in enumerate(data):
if cur >= nxt:
continue
nx, ny, s = value
x, y, _ = data[cur]
adj[cur].append((nxt, ((nx - x) ** 2 + (ny - y) ** 2) ** (1 / 2), s))
if cur == 0:
continue
adj[cur].append((N + 1, (data[cur][0] ** 2 + data[cur][1] ** 2) ** (1 / 2), 0))
res = dict()
people = dict()
order = list()
while True:
name, health = input().split()
health = int(health)
if name == '#' and health == 0:
break
people[name] = health
order.append((name, health))
res[name] = 0
for name in people.keys():
dfs(name, 0, 0, 0)
print('Race {}'.format(T))
for name, health in order:
print('{}: {}'.format(name, res[name])) | [
"sys.setrecursionlimit"
] | [((51, 83), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(999999999)'], {}), '(999999999)\n', (72, 83), False, 'import sys\n')] |
from rest_framework import viewsets
from api.serializers import BookSerializer, AuthorSerializer
from books.models import Book, Author
class BookViewSet(viewsets.ModelViewSet):
model = Book
serializer_class = BookSerializer
queryset = Book.objects.all()
class AuthorViewSet(viewsets.ModelViewSet):
model = Author
serializer_class = AuthorSerializer
queryset = Author.objects.all() | [
"books.models.Author.objects.all",
"books.models.Book.objects.all"
] | [((250, 268), 'books.models.Book.objects.all', 'Book.objects.all', ([], {}), '()\n', (266, 268), False, 'from books.models import Book, Author\n'), ((388, 408), 'books.models.Author.objects.all', 'Author.objects.all', ([], {}), '()\n', (406, 408), False, 'from books.models import Book, Author\n')] |
##
#
# cmap_parse.py
# An attempt to parse concept maps, exported from cmap tools...take one
#
# Copyright 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing permissions and limitations under the
# License.
#
##
import glob
import re
import os
import itertools
import networkx as nx
def CxlConversion (file):
# get the concepts, linking phrases, and connections
concepts = {}
linking_phrases = {}
connections = []
concepts_linked = []
for line in f:
if "concept id=" in line:
concept = re.findall (r'"([^"]*)"', line)
concepts[concept[0]] = concept[1]
# get the linking phrases
if "linking-phrase id=" in line:
linking_phrase = re.findall (r'"([^"]*)"', line)
linking_phrases[linking_phrase[0]] = linking_phrase[1]
# get the connections
if "connection id=" in line:
connections.append (re.findall (r'"([^"]*)"', line))
# cycle through the linking phrase list, find all lines matching from-id and to-id
# edges are combinations of from-id and to-id
for key in linking_phrases:
from_links = []
to_links = []
for connection in connections:
# if linking phrase is in the from-id (linking phrase at beginning)
if key == connection[1]:
from_links.append ([linking_phrases[key],concepts[connection[2]]])
# if linking phrase is in the to-id (linking phrase at the end)
if key == connection[2]:
to_links.append ([concepts[connection[1]], linking_phrases[key]])
#print to_links
#print from_links
#print "--"
# now combine the lists, to_links to from_links
for to_link in to_links:
for from_link in from_links:
concepts_linked.append ([to_link[0], to_link[1], from_link[1]])
return concepts_linked
def CmapParse (cmap_files, result, filenames, root_concept, export_concepts):
# store all concepts to print later
all_concepts = []
# open the result file to write output
rfile = open(result, 'w')
rfile.write('Filename\t Num Concepts\t Num Hierarchies\t Highest Hierarchy\t Num Crosslinks\t\n\n')
# iterate over all the files and start doing stuffs
for index, cmap_file in enumerate(cmap_files):
# create an empty Multi-directed graph
G = nx.MultiDiGraph ()
# open a cmap text file and begin writing results
global f
f = open (cmap_file)
rfile.write(filenames[index] + '\t')
# if file extension cxl, do this fun conversion
textFormatCorrect = True
if os.path.splitext(filenames[index])[1][1:] == "cxl":
concepts_linked = CxlConversion(f)
for edge in concepts_linked:
G.add_edge (edge[0].lower(), edge[2].lower(), link=edge[1].lower())
else:
# split the lines in to a list
lines = ((f.read ()).splitlines ())
# iterate over the list and split each line
# in to individual lists, delimited by tab
for line in lines:
edge = line.split ('\t')
# break if not 3 items per line
if len(edge) != 3:
rfile.write('>> Text file not formatted correctly.\n')
textFormatCorrect = False
break
G.add_edge (edge[0].lower(), edge[2].lower(), link=edge[1].lower())
# if the file had a line without 3 items, break completely
if not textFormatCorrect:
continue
# if 'sustainability' isn't a concept, fail
if root_concept.lower() not in G:
rfile.write('>> ' + root_concept.lower() + ' not a concept in the map.\n')
continue
# store first-level hierarchy concepts
hierarchy_list = G.successors (root_concept.lower())
# iterate through the main graph and set hierarchy to zero for now
for x in G:
G.node[x]['hier'] = 0
# iterate through the top hierarchy in the main graph and set these first-level hierarchy
# concepts to an incrementing integer
hierIter = 1
for x in hierarchy_list:
G.node[x]['hier'] = hierIter
hierIter += 1
# number of concepts is the number of nodes
# minus the root node
num_concepts = G.order () - 1
# hierarchy is the out degree of the root node
# we assume the root is 'sustainabiliy'
hierarchy = G.out_degree (root_concept.lower())
# look at all paths from sustainability to all
# other nodes. no repeated nodes (cycles)
paths_list = []
for n in G.nodes ():
for path in nx.all_simple_paths (G, source=root_concept.lower(), target=n):
paths_list.append (path)
# highest hierarchy defined here as the max path length
# this is a bit different than how it's done manually
# discuss later
highest_hier = max (len (x) for x in paths_list) - 1
# let's make subgraphs of all hierarchies
# we can use these subgraphs to do some
# operations and check out cross links
subgraph_list = []
for x in hierarchy_list:
subgraph = nx.MultiDiGraph ()
connected_nodes = []
for y in G.nodes ():
if nx.has_path (G, x, y):
connected_nodes.append (y)
subgraph = G.subgraph(connected_nodes).copy ()
subgraph.graph['name'] = x
subgraph_list.append (subgraph)
# for node not in first-level hierarchy, check which
# of the first-level concepts is closest (shortest path)
# and then label it with that hierarchy
fail = False
for n in G.nodes ():
shortest_path = 0
assoc_hier = ''
if n not in (hierarchy_list, root_concept.lower ()):
path_list = []
for y in hierarchy_list:
if nx.has_path (G, y, n):
path_list = nx.shortest_path (G, y, n)
if shortest_path == 0:
assoc_hier = y
shortest_path = len (path_list)
else:
if (len (path_list) < shortest_path):
assoc_hier = y
shortest_path = len (path_list)
if assoc_hier:
G.node[n]['hier'] = G.node[assoc_hier]['hier']
#print G.node[n]['hier']
else:
fail = True
rfile.write('>> One or more concepts not connected to first-level hierarchy. \n')
break
# if exporting concepts, store the concepts
if export_concepts:
all_concepts.append(G.nodes())
# a concept was not connected to a first-level hierarchy
# move on to the next concept map
if fail:
continue
# now i need to find all edges that have
# two hier node attributes that don't match.
# these are crosslinks
total_crosslinks = 0
for x in G.edges():
if ((G.node[x[0]]['hier']) != 0) and ((G.node[x[1]]['hier']) != 0):
if G.node[x[0]]['hier'] != G.node[x[1]]['hier']:
#print (str (x[0]) + ' ---- ' + str (x[1]) + ' hier: ' + str (G.node[x[0]]['hier']) + ' ---- ' + str (G.node[x[1]]['hier']))
total_crosslinks += 1
# print out the stuffs
rfile.write(str (num_concepts) + '\t')
rfile.write(str (hierarchy) + '\t')
rfile.write(str (highest_hier) + '\t')
rfile.write(str (total_crosslinks) + '\t')
# make it pretty
rfile.write('\n')
# show me cycles
#print ('>> Cycles: ' + str (nx.simple_cycles (G)))
# close up the cmap file
f.close()
# if exporting concepts, print them out
rfile.write('\n')
if export_concepts:
rfile.write('Filename\t')
for filename in filenames:
rfile.write(filename + '\t')
rfile.write('\n')
rfile.write('Concepts')
# transpose to columns and write
transposed_all_concepts = map(lambda *row: list(row), *all_concepts)
for x, concepts in enumerate(transposed_all_concepts):
rfile.write('\t')
for concept in transposed_all_concepts[x]:
if concept:
#stripping these 
 characters, some cxl files seem to have for some reason
rfile.write(concept.replace('
', ' ') + '\t')
else:
rfile.write('\t')
rfile.write('\n')
# close the result file
rfile.close()
# eof.zomg
| [
"networkx.MultiDiGraph",
"os.path.splitext",
"networkx.shortest_path",
"re.findall",
"networkx.has_path"
] | [((3195, 3212), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {}), '()\n', (3210, 3212), True, 'import networkx as nx\n'), ((1023, 1052), 're.findall', 're.findall', (['""""([^"]*)\\""""', 'line'], {}), '(\'"([^"]*)"\', line)\n', (1033, 1052), False, 'import re\n'), ((1238, 1267), 're.findall', 're.findall', (['""""([^"]*)\\""""', 'line'], {}), '(\'"([^"]*)"\', line)\n', (1248, 1267), False, 'import re\n'), ((6826, 6843), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {}), '()\n', (6841, 6843), True, 'import networkx as nx\n'), ((1469, 1498), 're.findall', 're.findall', (['""""([^"]*)\\""""', 'line'], {}), '(\'"([^"]*)"\', line)\n', (1479, 1498), False, 'import re\n'), ((6970, 6990), 'networkx.has_path', 'nx.has_path', (['G', 'x', 'y'], {}), '(G, x, y)\n', (6981, 6990), True, 'import networkx as nx\n'), ((3521, 3555), 'os.path.splitext', 'os.path.splitext', (['filenames[index]'], {}), '(filenames[index])\n', (3537, 3555), False, 'import os\n'), ((7853, 7873), 'networkx.has_path', 'nx.has_path', (['G', 'y', 'n'], {}), '(G, y, n)\n', (7864, 7873), True, 'import networkx as nx\n'), ((7936, 7961), 'networkx.shortest_path', 'nx.shortest_path', (['G', 'y', 'n'], {}), '(G, y, n)\n', (7952, 7961), True, 'import networkx as nx\n')] |
#!/usr/bin/python
# PiFmAdv - Advanced featured FM transmitter for the Raspberry Pi
# Copyright (C) 2017 Miegl
#
# See https://github.com/Miegl/PiFmAdv
import scipy.io.wavfile as wavfile
import numpy
sample_rate = 228000
samples = numpy.zeros(15 * sample_rate, dtype=numpy.dtype('>i2'))
# 1-second tune
samples[:10*sample_rate] = (numpy.sin(2*numpy.pi*440*numpy.arange(10*sample_rate)/sample_rate)
* 20000).astype(numpy.dtype('>i2'))
wavfile.write("pulses.wav", sample_rate, samples)
| [
"numpy.dtype",
"scipy.io.wavfile.write",
"numpy.arange"
] | [((454, 503), 'scipy.io.wavfile.write', 'wavfile.write', (['"""pulses.wav"""', 'sample_rate', 'samples'], {}), "('pulses.wav', sample_rate, samples)\n", (467, 503), True, 'import scipy.io.wavfile as wavfile\n'), ((433, 451), 'numpy.dtype', 'numpy.dtype', (['""">i2"""'], {}), "('>i2')\n", (444, 451), False, 'import numpy\n'), ((277, 295), 'numpy.dtype', 'numpy.dtype', (['""">i2"""'], {}), "('>i2')\n", (288, 295), False, 'import numpy\n'), ((367, 397), 'numpy.arange', 'numpy.arange', (['(10 * sample_rate)'], {}), '(10 * sample_rate)\n', (379, 397), False, 'import numpy\n')] |
import numpy as np
from force_bdss.api import BaseDataSource, DataValue, Slot
class ViscosityDataSource(BaseDataSource):
"""Class that calculates viscosity from Gromacs
simulation results"""
def run(self, model, parameters):
values = [p.value for p in parameters]
viscosity = np.exp(-0.5 * (values[0] - 1) ** 2 / (2 * np.pi))
pass_mark = viscosity > model.threshold
model.notify_pass_mark(pass_mark)
return [DataValue(type="VISCOSITY", value=viscosity)]
def slots(self, model):
return (
(Slot(description="Simulation results", type="RESULTS"),),
(
Slot(
description="Calculated viscosity of surfactant",
type="VISCOSITY",
),
),
)
| [
"numpy.exp",
"force_bdss.api.DataValue",
"force_bdss.api.Slot"
] | [((310, 359), 'numpy.exp', 'np.exp', (['(-0.5 * (values[0] - 1) ** 2 / (2 * np.pi))'], {}), '(-0.5 * (values[0] - 1) ** 2 / (2 * np.pi))\n', (316, 359), True, 'import numpy as np\n'), ((469, 513), 'force_bdss.api.DataValue', 'DataValue', ([], {'type': '"""VISCOSITY"""', 'value': 'viscosity'}), "(type='VISCOSITY', value=viscosity)\n", (478, 513), False, 'from force_bdss.api import BaseDataSource, DataValue, Slot\n'), ((575, 629), 'force_bdss.api.Slot', 'Slot', ([], {'description': '"""Simulation results"""', 'type': '"""RESULTS"""'}), "(description='Simulation results', type='RESULTS')\n", (579, 629), False, 'from force_bdss.api import BaseDataSource, DataValue, Slot\n'), ((663, 735), 'force_bdss.api.Slot', 'Slot', ([], {'description': '"""Calculated viscosity of surfactant"""', 'type': '"""VISCOSITY"""'}), "(description='Calculated viscosity of surfactant', type='VISCOSITY')\n", (667, 735), False, 'from force_bdss.api import BaseDataSource, DataValue, Slot\n')] |
import logging
import requests
from config import CFG
class SlackHandler(logging.StreamHandler):
def __init__(self):
super(SlackHandler, self).__init__()
self.url = CFG.WEBHOOK_URL
def emit(self, record):
msg = self.format(record)
self.send_message(msg)
def send_message(self, text):
if "[!] Error:" in text:
text = "```" + text + " ```"
else:
text = ":sparkles: " + text
message = {"text": text}
requests.post(self.url, json=message)
| [
"requests.post"
] | [((503, 540), 'requests.post', 'requests.post', (['self.url'], {'json': 'message'}), '(self.url, json=message)\n', (516, 540), False, 'import requests\n')] |
import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence
import torch.nn.functional as F
from MaskFormer.demo.demo import get_maskformer
from retrieval_head import Head
class CNNLSTM(nn.Module):
def __init__(self, num_cam, num_ego_class, num_actor_class):
super(CNNLSTM, self).__init__()
self.num_cam = num_cam
self.backbone = get_maskformer().backbone
self.conv1 = nn.Sequential(
nn.Conv2d(2048, 1024, kernel_size=3, stride=1, padding=1,),
nn.BatchNorm2d(1024),
nn.ReLU(inplace=True)
)
self.conv2 = nn.Sequential(
nn.Conv2d(1024*self.num_cam, 256, kernel_size=1, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True)
)
self.en_lstm = nn.LSTM(input_size=256, hidden_size=256, num_layers=1, batch_first=True)
self.pool = nn.AdaptiveAvgPool2d((1,1))
self.head = Head(256, num_ego_class, num_actor_class)
def train_forward(self, inputs, tops=False, front_only=True):
hidden = None
seq_len = len(inputs)//self.num_cam
batch_size = inputs[0].shape[0]
w, h = inputs[0].shape[2], inputs[0].shape[3]
for t in range(seq_len):
x = inputs[t]
if isinstance(x, list):
x = torch.stack(x, dim=0)
x.view(batch_size*self.num_cam, 3, w, h)
x = normalize_imagenet(x)
x = self.backbone(x)['res5']
x = self.conv1(x)
x = self.conv2(x)
x = self.pool(x)
x = torch.flatten(x, 1)
out, hidden = self.en_lstm(x.view(batch_size, 1, 256), hidden)
ego, actor = self.head(out[:, -1, :])
return ego, actor
def forward(self, fronts, lefts, rights, tops=False):
hidden = None
if not tops:
batch_size = fronts[0].shape[0]
seq_len = len(fronts)
w, h = fronts[0].shape[2], fronts[0].shape[3]
else:
batch_size = tops[0].shape[0]
seq_len = len(tops)
w, h = tops[0].shape[2], tops[0].shape[3]
for t in range(seq_len):
x = []
if not tops:
x.append(fronts[t])
x.append(lefts[t])
x.append(rights[t])
else:
x.append(tops[t])
x = torch.stack(x, dim=0).view(batch_size*self.num_cam, 3, w, h)
x = self.backbone(x)['res5']
x = self.conv1(x)
x = self.conv2(x)
x = self.pool(x)
x = torch.flatten(x, 1)
out, hidden = self.en_lstm(x.view(batch_size, 1, 256), hidden)
ego, actor = self.head(out[:, -1, :])
return ego, actor
def normalize_imagenet(x):
""" Normalize input images according to ImageNet standards.
Args:
x (tensor): input images
"""
x = x.clone()
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
return x
| [
"torch.nn.BatchNorm2d",
"torch.nn.ReLU",
"torch.nn.LSTM",
"torch.stack",
"retrieval_head.Head",
"torch.nn.Conv2d",
"MaskFormer.demo.demo.get_maskformer",
"torch.nn.AdaptiveAvgPool2d",
"torch.flatten"
] | [((918, 990), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': '(256)', 'hidden_size': '(256)', 'num_layers': '(1)', 'batch_first': '(True)'}), '(input_size=256, hidden_size=256, num_layers=1, batch_first=True)\n', (925, 990), True, 'import torch.nn as nn\n'), ((1020, 1048), 'torch.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1, 1)'], {}), '((1, 1))\n', (1040, 1048), True, 'import torch.nn as nn\n'), ((1069, 1110), 'retrieval_head.Head', 'Head', (['(256)', 'num_ego_class', 'num_actor_class'], {}), '(256, num_ego_class, num_actor_class)\n', (1073, 1110), False, 'from retrieval_head import Head\n'), ((424, 440), 'MaskFormer.demo.demo.get_maskformer', 'get_maskformer', ([], {}), '()\n', (438, 440), False, 'from MaskFormer.demo.demo import get_maskformer\n'), ((505, 562), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2048)', '(1024)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(2048, 1024, kernel_size=3, stride=1, padding=1)\n', (514, 562), True, 'import torch.nn as nn\n'), ((583, 603), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(1024)'], {}), '(1024)\n', (597, 603), True, 'import torch.nn as nn\n'), ((623, 644), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (630, 644), True, 'import torch.nn as nn\n'), ((722, 793), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024 * self.num_cam)', '(256)'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(1)'}), '(1024 * self.num_cam, 256, kernel_size=1, stride=1, padding=1)\n', (731, 793), True, 'import torch.nn as nn\n'), ((811, 830), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (825, 830), True, 'import torch.nn as nn\n'), ((850, 871), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (857, 871), True, 'import torch.nn as nn\n'), ((1715, 1734), 'torch.flatten', 'torch.flatten', (['x', '(1)'], {}), '(x, 1)\n', (1728, 1734), False, 'import torch\n'), ((2725, 2744), 'torch.flatten', 'torch.flatten', (['x', '(1)'], {}), '(x, 1)\n', (2738, 2744), False, 'import torch\n'), ((1456, 1477), 'torch.stack', 'torch.stack', (['x'], {'dim': '(0)'}), '(x, dim=0)\n', (1467, 1477), False, 'import torch\n'), ((2518, 2539), 'torch.stack', 'torch.stack', (['x'], {'dim': '(0)'}), '(x, dim=0)\n', (2529, 2539), False, 'import torch\n')] |
import os
import sys
import logging
import time
import shutil
import argparse
import numpy as np
import pandas as pd
from PIL import Image
from tqdm import tqdm
from collections import defaultdict
import torch
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import dataloading as dl
import model as mdl
logger_py = logging.getLogger(__name__)
# Fix seeds
np.random.seed(42)
torch.manual_seed(42)
# Arguments
parser = argparse.ArgumentParser(
description='Training of UNISURF model'
)
parser.add_argument('config', type=str, help='Path to config file.')
parser.add_argument('--no-cuda', action='store_true', help='Do not use cuda.')
parser.add_argument('--exit-after', type=int, default=-1,
help='Checkpoint and exit after specified number of '
'seconds with exit code 2.')
args = parser.parse_args()
cfg = dl.load_config(args.config, 'configs/default.yaml')
is_cuda = (torch.cuda.is_available() and not args.no_cuda)
device = torch.device("cuda" if is_cuda else "cpu")
# params
out_dir = cfg['training']['out_dir']
backup_every = cfg['training']['backup_every']
exit_after = args.exit_after
batch_size = cfg['training']['batch_size']
n_workers = cfg['dataloading']['n_workers']
lr = cfg['training']['learning_rate']
# init dataloader
train_loader = dl.get_dataloader(cfg, mode='train')
test_loader = dl.get_dataloader(cfg, mode='test')
iter_test = iter(test_loader)
data_test = next(iter_test)
# init network
model_cfg = cfg['model']
model = mdl.NeuralNetwork(model_cfg)
print(model)
# init renderer
rendering_cfg = cfg['rendering']
renderer = mdl.Renderer(model, rendering_cfg, device=device)
# init optimizer
weight_decay = cfg['training']['weight_decay']
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
# init training
training_cfg = cfg['training']
trainer = mdl.Trainer(renderer, optimizer, training_cfg, device=device)
# init checkpoints and load
checkpoint_io = mdl.CheckpointIO(out_dir, model=model, optimizer=optimizer)
try:
load_dict = checkpoint_io.load('model.pt')
except FileExistsError:
load_dict = dict()
epoch_it = load_dict.get('epoch_it', -1)
it = load_dict.get('it', -1)
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer, cfg['training']['scheduler_milestones'],
gamma=cfg['training']['scheduler_gamma'], last_epoch=epoch_it)
logger = SummaryWriter(os.path.join(out_dir, 'logs'))
# init training output
print_every = cfg['training']['print_every']
checkpoint_every = cfg['training']['checkpoint_every']
visualize_every = cfg['training']['visualize_every']
render_path = os.path.join(out_dir, 'rendering')
if visualize_every > 0:
visualize_skip = cfg['training']['visualize_skip']
visualize_path = os.path.join(out_dir, 'visualize')
if not os.path.exists(visualize_path):
os.makedirs(visualize_path)
# Print model
nparameters = sum(p.numel() for p in model.parameters())
logger_py.info(model)
logger_py.info('Total number of parameters: %d' % nparameters)
t0b = time.time()
while True:
epoch_it += 1
for batch in train_loader:
it += 1
loss_dict = trainer.train_step(batch, it)
loss = loss_dict['loss']
metric_val_best = loss
# Print output
if print_every > 0 and (it % print_every) == 0:
print('[Epoch %02d] it=%03d, loss=%.4f, time=%.4f'
% (epoch_it, it, loss, time.time() - t0b))
logger_py.info('[Epoch %02d] it=%03d, loss=%.4f, time=%.4f'
% (epoch_it, it, loss, time.time() - t0b))
t0b = time.time()
for l, num in loss_dict.items():
logger.add_scalar('train/'+l, num.detach().cpu(), it)
if visualize_every > 0 and (it % visualize_every)==0:
logger_py.info("Rendering")
out_render_path = os.path.join(render_path, '%04d_vis' % it)
if not os.path.exists(out_render_path):
os.makedirs(out_render_path)
val_rgb = trainer.render_visdata(
data_test,
cfg['training']['vis_resolution'],
it, out_render_path)
#logger.add_image('rgb', val_rgb, it)
# Save checkpoint
if (checkpoint_every > 0 and (it % checkpoint_every) == 0):
logger_py.info('Saving checkpoint')
print('Saving checkpoint')
checkpoint_io.save('model.pt', epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
# Backup if necessary
if (backup_every > 0 and (it % backup_every) == 0):
logger_py.info('Backup checkpoint')
checkpoint_io.save('model_%d.pt' % it, epoch_it=epoch_it, it=it,
loss_val_best=metric_val_best)
scheduler.step() | [
"logging.getLogger",
"torch.manual_seed",
"os.path.exists",
"model.Renderer",
"torch.optim.lr_scheduler.MultiStepLR",
"argparse.ArgumentParser",
"os.makedirs",
"dataloading.get_dataloader",
"os.path.join",
"torch.cuda.is_available",
"numpy.random.seed",
"model.NeuralNetwork",
"model.Trainer",
"dataloading.load_config",
"model.CheckpointIO",
"time.time",
"torch.device"
] | [((348, 375), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (365, 375), False, 'import logging\n'), ((389, 407), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (403, 407), True, 'import numpy as np\n'), ((408, 429), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (425, 429), False, 'import torch\n'), ((452, 516), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Training of UNISURF model"""'}), "(description='Training of UNISURF model')\n", (475, 516), False, 'import argparse\n'), ((891, 942), 'dataloading.load_config', 'dl.load_config', (['args.config', '"""configs/default.yaml"""'], {}), "(args.config, 'configs/default.yaml')\n", (905, 942), True, 'import dataloading as dl\n'), ((1011, 1053), 'torch.device', 'torch.device', (["('cuda' if is_cuda else 'cpu')"], {}), "('cuda' if is_cuda else 'cpu')\n", (1023, 1053), False, 'import torch\n'), ((1336, 1372), 'dataloading.get_dataloader', 'dl.get_dataloader', (['cfg'], {'mode': '"""train"""'}), "(cfg, mode='train')\n", (1353, 1372), True, 'import dataloading as dl\n'), ((1387, 1422), 'dataloading.get_dataloader', 'dl.get_dataloader', (['cfg'], {'mode': '"""test"""'}), "(cfg, mode='test')\n", (1404, 1422), True, 'import dataloading as dl\n'), ((1530, 1558), 'model.NeuralNetwork', 'mdl.NeuralNetwork', (['model_cfg'], {}), '(model_cfg)\n', (1547, 1558), True, 'import model as mdl\n'), ((1633, 1682), 'model.Renderer', 'mdl.Renderer', (['model', 'rendering_cfg'], {'device': 'device'}), '(model, rendering_cfg, device=device)\n', (1645, 1682), True, 'import model as mdl\n'), ((1883, 1944), 'model.Trainer', 'mdl.Trainer', (['renderer', 'optimizer', 'training_cfg'], {'device': 'device'}), '(renderer, optimizer, training_cfg, device=device)\n', (1894, 1944), True, 'import model as mdl\n'), ((1990, 2049), 'model.CheckpointIO', 'mdl.CheckpointIO', (['out_dir'], {'model': 'model', 'optimizer': 'optimizer'}), '(out_dir, model=model, optimizer=optimizer)\n', (2006, 2049), True, 'import model as mdl\n'), ((2233, 2387), 'torch.optim.lr_scheduler.MultiStepLR', 'optim.lr_scheduler.MultiStepLR', (['optimizer', "cfg['training']['scheduler_milestones']"], {'gamma': "cfg['training']['scheduler_gamma']", 'last_epoch': 'epoch_it'}), "(optimizer, cfg['training'][\n 'scheduler_milestones'], gamma=cfg['training']['scheduler_gamma'],\n last_epoch=epoch_it)\n", (2263, 2387), True, 'import torch.optim as optim\n'), ((2638, 2672), 'os.path.join', 'os.path.join', (['out_dir', '"""rendering"""'], {}), "(out_dir, 'rendering')\n", (2650, 2672), False, 'import os\n'), ((3051, 3062), 'time.time', 'time.time', ([], {}), '()\n', (3060, 3062), False, 'import time\n'), ((954, 979), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (977, 979), False, 'import torch\n'), ((2412, 2441), 'os.path.join', 'os.path.join', (['out_dir', '"""logs"""'], {}), "(out_dir, 'logs')\n", (2424, 2441), False, 'import os\n'), ((2773, 2807), 'os.path.join', 'os.path.join', (['out_dir', '"""visualize"""'], {}), "(out_dir, 'visualize')\n", (2785, 2807), False, 'import os\n'), ((2819, 2849), 'os.path.exists', 'os.path.exists', (['visualize_path'], {}), '(visualize_path)\n', (2833, 2849), False, 'import os\n'), ((2859, 2886), 'os.makedirs', 'os.makedirs', (['visualize_path'], {}), '(visualize_path)\n', (2870, 2886), False, 'import os\n'), ((3629, 3640), 'time.time', 'time.time', ([], {}), '()\n', (3638, 3640), False, 'import time\n'), ((3897, 3939), 'os.path.join', 'os.path.join', (['render_path', "('%04d_vis' % it)"], {}), "(render_path, '%04d_vis' % it)\n", (3909, 3939), False, 'import os\n'), ((3959, 3990), 'os.path.exists', 'os.path.exists', (['out_render_path'], {}), '(out_render_path)\n', (3973, 3990), False, 'import os\n'), ((4008, 4036), 'os.makedirs', 'os.makedirs', (['out_render_path'], {}), '(out_render_path)\n', (4019, 4036), False, 'import os\n'), ((3449, 3460), 'time.time', 'time.time', ([], {}), '()\n', (3458, 3460), False, 'import time\n'), ((3591, 3602), 'time.time', 'time.time', ([], {}), '()\n', (3600, 3602), False, 'import time\n')] |
import asyncio
from datetime import datetime
from typing import List, Dict, Any
import aiohttp
import colorama
from bs4 import BeautifulSoup
HOSTS = [
"198.18.1.101",
"198.18.1.102",
"198.18.1.103",
"198.18.1.104",
"198.18.1.105",
"198.18.1.106",
"198.18.1.107",
"198.18.1.108",
"198.18.1.109",
"198.18.1.110",
]
OTHER_PARAMS = {
"username": "cisco",
"password": "<PASSWORD>",
}
HEADERS = {
"Accept": "application/yang-data+json",
"Content-Type": "application/yang-data+json",
}
async def get_hostname(host: str) -> str:
username = OTHER_PARAMS["username"]
password = OTHER_PARAMS["password"]
async with aiohttp.ClientSession() as session:
url = f"https://{host}/restconf/data/native/hostname"
async with session.get(url, auth=aiohttp.BasicAuth(username, password), headers=HEADERS, verify_ssl=False) as response:
response.raise_for_status()
json = await response.json()
return json["Cisco-IOS-XE-native:hostname"]
def process_interfaces_json(data: Dict[str, any]) -> List[Dict[str, Any]]:
result = []
for interface_type, interface_info in data["Cisco-IOS-XE-native:interface"].items():
for interface in interface_info:
interface_num = interface["name"]
interface_name = f"{interface_type}{interface_num}"
ip_address = interface.get("ip", {}).get("address", {}).get("primary", {}).get("address")
int_dict = {
"name": interface_name,
"ip_address": ip_address
}
result.append(int_dict)
return result
async def get_interfaces(host: str) -> List[Dict[str, Any]]:
username = OTHER_PARAMS["username"]
password = OTHER_PARAMS["password"]
async with aiohttp.ClientSession() as session:
url = f"https://{host}/restconf/data/native/interface"
async with session.get(url, auth=aiohttp.BasicAuth(username, password), headers=HEADERS, verify_ssl=False) as response:
response.raise_for_status()
data = await response.json()
return process_interfaces_json(data)
def main() -> None:
colorama.init()
start_time = datetime.now()
loop = asyncio.get_event_loop()
hostname_tasks = [
loop.create_task(get_hostname(host))
for host in HOSTS
]
interface_tasks = [
loop.create_task(get_interfaces(host))
for host in HOSTS
]
loop.run_until_complete(asyncio.gather(*hostname_tasks, *interface_tasks))
for host, hostname_task, interface_task in zip(HOSTS, hostname_tasks, interface_tasks):
hostname = hostname_task.result()
interfaces = interface_task.result()
print(f"Device {host}")
print(f"Hostname: {hostname}")
print(f"Has interfaces:")
for interface in interfaces:
print(f" Interface {interface['name']} has ip address: {interface['ip_address']}")
print(f"=" * 80)
# print(f"Device {host} has hostname: {hostname}")
exec_time = (datetime.now() - start_time).total_seconds()
print(colorama.Fore.GREEN + f"Summary: it took {exec_time:,.2f} seconds to run")
if __name__ == '__main__':
main()
| [
"aiohttp.ClientSession",
"datetime.datetime.now",
"asyncio.gather",
"aiohttp.BasicAuth",
"asyncio.get_event_loop",
"colorama.init"
] | [((2191, 2206), 'colorama.init', 'colorama.init', ([], {}), '()\n', (2204, 2206), False, 'import colorama\n'), ((2224, 2238), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2236, 2238), False, 'from datetime import datetime\n'), ((2250, 2274), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2272, 2274), False, 'import asyncio\n'), ((678, 701), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (699, 701), False, 'import aiohttp\n'), ((1808, 1831), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (1829, 1831), False, 'import aiohttp\n'), ((2509, 2558), 'asyncio.gather', 'asyncio.gather', (['*hostname_tasks', '*interface_tasks'], {}), '(*hostname_tasks, *interface_tasks)\n', (2523, 2558), False, 'import asyncio\n'), ((3080, 3094), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3092, 3094), False, 'from datetime import datetime\n'), ((817, 854), 'aiohttp.BasicAuth', 'aiohttp.BasicAuth', (['username', 'password'], {}), '(username, password)\n', (834, 854), False, 'import aiohttp\n'), ((1948, 1985), 'aiohttp.BasicAuth', 'aiohttp.BasicAuth', (['username', 'password'], {}), '(username, password)\n', (1965, 1985), False, 'import aiohttp\n')] |
#
# Copyright (c) 2017 Juniper Networks, Inc.
#
import setuptools
setuptools.setup(
name='contrail-vrouter-provisioning',
version='0.1dev',
install_requires=[
'future',
],
packages=setuptools.find_packages(),
# metadata
author="OpenContrail",
author_email="<EMAIL>",
license="Apache Software License",
url="http://www.opencontrail.org/",
long_description="Contrail compute provisioning module",
entry_points={
'console_scripts': [
'contrail-compute-setup = contrail_vrouter_provisioning.setup:main',
'contrail-toragent-setup = contrail_vrouter_provisioning.toragent.setup:main',
'contrail-toragent-cleanup = contrail_vrouter_provisioning.toragent.cleanup:main',
],
},
)
| [
"setuptools.find_packages"
] | [((211, 237), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (235, 237), False, 'import setuptools\n')] |
import unittest
from prosodia.base.bnfrange import create_bnfrange
from prosodia.base.bnfrange._text import text
from prosodia.base.bnfrange.example import create_example_bnfrange
from prosodia.base.bnfrange.example._text import example_text
from ._helpers import validate_recursive_grammar
class TestBNFRange(unittest.TestCase):
def test_bnf_range_parser_works(self) -> None:
validate_recursive_grammar(self, create_bnfrange(), text)
def test_bnf_range_example_parser_works(self) -> None:
validate_recursive_grammar(
self,
create_example_bnfrange(),
example_text
)
| [
"prosodia.base.bnfrange.example.create_example_bnfrange",
"prosodia.base.bnfrange.create_bnfrange"
] | [((426, 443), 'prosodia.base.bnfrange.create_bnfrange', 'create_bnfrange', ([], {}), '()\n', (441, 443), False, 'from prosodia.base.bnfrange import create_bnfrange\n'), ((577, 602), 'prosodia.base.bnfrange.example.create_example_bnfrange', 'create_example_bnfrange', ([], {}), '()\n', (600, 602), False, 'from prosodia.base.bnfrange.example import create_example_bnfrange\n')] |
import sys
import os
from PyQt5.QtCore import QCoreApplication
from qgis.core import (QgsProcessing, QgsProcessingAlgorithm,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterVectorDestination,
QgsProcessingParameterField,
QgsProcessingParameterNumber,
QgsProcessingParameterDistance,
QgsProcessingParameterEnum,
QgsProcessingParameterString)
from pylusat import interpolate
class InverseDistanceWeighting(QgsProcessingAlgorithm):
INPUT = "INPUT"
INTERPOLATE = "INTERPOLATE"
INTERPOLATE_FIELD = "INTERPOLATE_FIELD"
POWER = "POWER"
NUMBER_NEIGHBOR = "NUMBER_NEIGHBOR"
SEARCH_RADIUS = "SEARCH_RADIUS"
DATA_TYPE = "DATA_TYPE"
OUTPUT_FIELD = "OUTPUT_FIELD"
OUTPUT = "IDW_output"
def tr(self, string, context=''):
if context == '':
context = self.__class__.__name__
return QCoreApplication.translate(context, string)
def group(self):
return self.tr("LUCIS-OPEN for QGIS")
def groupId(self):
return "lucisopen"
def name(self):
return "idw"
def displayName(self):
return self.tr("Inverse Distance Weighting")
def shortHelpString(self):
html_doc = '''
<p>This function implements an `IDW interpolation`. The power parameter \
dictates how fast the influence to a given location by its nearby objects decays. `idw_cv`, a k-fold cross validation method is offered to determine the most appropriate value of the `power` parameter.</p>
<h3>Input Layer</h3>
<p>Input vector layer, to which will be assigned the interpolated value.</p>
<h3>Interpolation Layer</h3>
<p>The input features containing the values to be interpolated.</p>
<h3>Power Parameter for Interpolation</h3>
<p>The exponent of distance.</p>
<p>Controls the significance of surrounding points on the interpolated value. \
A higher power results in less influence from distant points. It can be any real \
number greater than 0, but the most reasonable results will be obtained using \
values from 0.5 to 3. The default is 2.</p>
<h3>Number of Neighbors</h3>
<p>An integer value specifying the number of nearest input sample points to be \
used to perform interpolation. The default is 12 points.</p>
<h3>Search Radius</h3>
<p>Maximum distance used to find neighbors. If not provided, the function will \
search for all neighbors specified by Number of Neighbors.</p>
<h3>Output Data Type</h3>
<p>Choose between <i>integer</i> or <i>float</i> (default) output value.</p>
<h3>Output Field Name</h3>
<p>Name of the column storing distances in the output layer.</p>
<h3>Output Layer</h3>
<p>Output vector layer.</p>
'''
return html_doc
def createInstance(self):
return InverseDistanceWeighting()
def __init__(self):
super().__init__()
self.data_type = (
('Integer', self.tr('Integer')),
('Float', self.tr('Float'))
)
def initAlgorithm(self, config=None):
self.addParameter(
QgsProcessingParameterFeatureSource(
self.INPUT,
self.tr('Input layer'),
types=[QgsProcessing.TypeVectorAnyGeometry]
)
)
self.addParameter(
QgsProcessingParameterFeatureSource(
self.INTERPOLATE,
self.tr('Interpolation layer'),
types=[QgsProcessing.TypeVectorAnyGeometry]
)
)
self.addParameter(
QgsProcessingParameterField(
self.INTERPOLATE_FIELD,
self.tr('Interpolating field'),
parentLayerParameterName=self.INTERPOLATE,
type=QgsProcessingParameterField.Numeric
)
)
power = QgsProcessingParameterNumber(
self.POWER,
self.tr('Power parameter for interpolation'),
type=QgsProcessingParameterNumber.Double,
defaultValue=2,
optional=True
)
power.setMetadata({
'widget_wrapper': {
'decimals': 2
}
})
self.addParameter(power)
self.addParameter(
QgsProcessingParameterNumber(
self.NUMBER_NEIGHBOR,
self.tr('Number of neighbors'),
type=QgsProcessingParameterNumber.Integer,
defaultValue=12,
optional=True
)
)
search_radius = QgsProcessingParameterDistance(
self.SEARCH_RADIUS,
self.tr('Search radius'),
defaultValue=None,
parentParameterName=self.INTERPOLATE,
optional=True
)
search_radius.setMetadata({
'widget_wrapper': {
'decimals': 2
}
})
self.addParameter(search_radius)
self.addParameter(
QgsProcessingParameterEnum(
self.DATA_TYPE,
self.tr('Output data type'),
options=[dtype[1] for dtype in self.data_type],
defaultValue=1,
optional=True
)
)
self.addParameter(
QgsProcessingParameterString(
name=self.OUTPUT_FIELD,
description=self.tr('Output field name'),
)
)
self.addParameter(
QgsProcessingParameterVectorDestination(
self.OUTPUT,
self.tr('Output layer')
)
)
def processAlgorithm(self, parameters, context, feedback):
input_lyr = self.parameterAsVectorLayer(parameters, self.INPUT, context)
interpolate_lyr = self.parameterAsVectorLayer(parameters,
self.INTERPOLATE,
context)
interpolate_clm = self.parameterAsString(parameters,
self.INTERPOLATE_FIELD,
context)
power = self.parameterAsDouble(parameters, self.POWER, context)
n_neighbor = self.parameterAsInt(parameters, self.NUMBER_NEIGHBOR,
context)
search_radius = self.parameterAsDouble(parameters, self.SEARCH_RADIUS,
context)
data_type = self.parameterAsEnum(parameters, self.DATA_TYPE, context)
output_clm = self.parameterAsString(parameters, self.OUTPUT_FIELD, context)
output_file = self.parameterAsOutputLayer(parameters, self.OUTPUT,
context)
sys.path.insert(1, os.path.dirname(os.path.realpath(__file__)))
from loqlib import LUCISOpenQGISUtils
feedback.pushInfo(str(search_radius))
input_gdf = LUCISOpenQGISUtils.vector_to_gdf(input_lyr)
interpolate_gdf = LUCISOpenQGISUtils.vector_to_gdf(interpolate_lyr)
data_type = int if data_type == 0 else float
input_gdf[output_clm] = interpolate.idw(input_gdf, interpolate_gdf,
interpolate_clm, power,
n_neighbor, search_radius,
dtype=data_type)
input_gdf.to_file(output_file)
return {self.OUTPUT: output_file}
| [
"os.path.realpath",
"pylusat.interpolate.idw",
"PyQt5.QtCore.QCoreApplication.translate",
"loqlib.LUCISOpenQGISUtils.vector_to_gdf"
] | [((1009, 1052), 'PyQt5.QtCore.QCoreApplication.translate', 'QCoreApplication.translate', (['context', 'string'], {}), '(context, string)\n', (1035, 1052), False, 'from PyQt5.QtCore import QCoreApplication\n'), ((7184, 7227), 'loqlib.LUCISOpenQGISUtils.vector_to_gdf', 'LUCISOpenQGISUtils.vector_to_gdf', (['input_lyr'], {}), '(input_lyr)\n', (7216, 7227), False, 'from loqlib import LUCISOpenQGISUtils\n'), ((7254, 7303), 'loqlib.LUCISOpenQGISUtils.vector_to_gdf', 'LUCISOpenQGISUtils.vector_to_gdf', (['interpolate_lyr'], {}), '(interpolate_lyr)\n', (7286, 7303), False, 'from loqlib import LUCISOpenQGISUtils\n'), ((7389, 7504), 'pylusat.interpolate.idw', 'interpolate.idw', (['input_gdf', 'interpolate_gdf', 'interpolate_clm', 'power', 'n_neighbor', 'search_radius'], {'dtype': 'data_type'}), '(input_gdf, interpolate_gdf, interpolate_clm, power,\n n_neighbor, search_radius, dtype=data_type)\n', (7404, 7504), False, 'from pylusat import interpolate\n'), ((7041, 7067), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (7057, 7067), False, 'import os\n')] |
#!/usr/bin/python
import sys
sys.dont_write_bytecode = True # No '*.pyc' precompiled files
import os
import re
import platform
import natsort
from lib_main import ListOps
from lib_main import string_vs_regs
from lib_main import append_if_not_in
from lib_parameters import sounds_main_dir
from lib_parameters import path_to_ffmpeg
from lib_parameters import path_to_ffmpeg_on_pc
from lib_parameters import local_audio_folder
if 'Linux' not in platform.system():
sounds_main_dir = local_audio_folder
jump_line_start = '^jump='
jump_line_end = '[1-9][0-9]?$' # So 'jump' value must be 1-99
# Common audio file formats
common_audio_file_exten = '(m4a|mp3|wav)'
# Format/type of file, that can be played
output_end = 'ulaw'
list_extensions = []
list_audiosubdirs = []
def bckgrds_file_patts(ext_name, re_end):
'''
RETURNS:
patterns (list):
Can be used for:
- patterns for filenames
- calculating order for sorting another list
'''
patterns = []
patterns.append('^' + 'Background' + ext_name + '\.' + re_end + '$')
patterns.append('^' + 'Background' + ext_name + '\.' + re_end + '$')
patterns.append('^' + 'background' + ext_name + '\.' + re_end + '$')
patterns.append('^' + 'Background' + ext_name + '.*' + '\.' + re_end + '$')
patterns.append('^' + 'background' + ext_name + '.*' + '\.' + re_end + '$')
patterns.append('^' + 'Background' + '.*' + '\.' + re_end + '$')
patterns.append('^' + 'background' + '.*' + '\.' + re_end + '$')
patterns.append('^' + 'Background' + '\.' + re_end + '$')
patterns.append('^' + 'background' + '\.' + re_end + '$')
return patterns
def settings_file_patterns(ext_name):
'''
RETURNS:
patterns (list):
Can be used for:
- patterns for filenames
- calculating order for sorting another list
'''
patterns = []
patterns.append('^' + 'Settings' + ext_name + '\.txt' + '$')
patterns.append('^' + 'Settings' + ext_name.lower() + '\.txt' + '$')
patterns.append('^' + 'settings' + ext_name + '\.txt' + '$')
patterns.append('^' + 'settings' + ext_name.lower() + '\.txt' + '$')
patterns.append('^' + 'Settings' + ext_name + '.*' + '\.txt' + '$')
patterns.append('^' + 'Settings' + ext_name.lower() + '.*' + '\.txt' + '$')
patterns.append('^' + 'Settings' + ext_name + '.*' + '\.txt' + '$')
patterns.append('^' + 'Settings' + ext_name.lower() + '.*' + '\.txt' + '$')
patterns.append('^' + 'Settings' + '.*' + '\.txt' + '$')
patterns.append('^' + 'settings' + '.*' + '\.txt' + '$')
patterns.append('^' + 'Settings' + '\.txt' + '$')
patterns.append('^' + 'settings' + '\.txt' + '$')
return patterns
class AudioSubDir:
'''
This represents a sub dir of the dir with custom extension.
It may/does contain:
- audio files in common format
- ulaw files (special audio files used by asterisk)
- text files containing settings
These are corresponding to a specific custom extension.
Common audio files in such a folder will be converted
to files type 'ulaw' that are playable by asterisk.
These will be played when a custom extension is called.
Also objects/values calculated for this class will be put
into Aterisk conf files and dbs.
ARGS:
settings_files_with_jumps (list of strs): Text files with settings.
input_audios (list of strs): Common audio files,
that will be converted to dialog 'ulaw' files.
output_audios (list of strs): Files of type 'ulaw'
that are already present in the audio subdir.
input_backgrounds (list of strs): Common audio files,
that will be converted to background 'ulaw' files.
output_backgrounds (list of strs): # Files of type 'ulaw',
where one of them will be the background sound.
jumps (list): jumps in files
jump (int): Number of audio file,
that should be played again after playing the last audio.
'''
def __init__(
self,
dir_name,
common_audio_dialog_files=None,
common_audio_background_files=None,
ulaw_audio_dialog_files=None,
ulaw_audio_background_files=None,
settings_files=None,
settings_with_jumps=None,
final_ulaw_background_file=None,
final_jump_file=None,
final_jump_value=None,
# valid=True
):
if re.findall(' ', dir_name):
new_dirname = dir_name.replace(' ', '_')
src = os.path.join(sounds_main_dir, dir_name + os.sep)
dst = os.path.join(sounds_main_dir, dir_name.replace(' ', '_') + os.sep)
print('The dir name:', src, 'will be renamed to:', dst, '\n'
'No spaces allowed in the dir name.')
os.rename(src=src, dst=dst)
dir_name = new_dirname
self.dir_name = dir_name
self.common_audio_dialog_files = common_audio_dialog_files
self.common_audio_background_files = common_audio_background_files
self.ulaw_audio_dialog_files = ulaw_audio_dialog_files
self.ulaw_audio_background_files = ulaw_audio_background_files
self.settings_files = settings_files
self.settings_with_jumps = settings_with_jumps
self.final_ulaw_background_file = final_ulaw_background_file
self.final_jump_file = final_jump_file
self.final_jump_value = final_jump_value
self.valid = True
if self.common_audio_dialog_files is None:
self.common_audio_dialog_files = []
if self.common_audio_background_files is None:
self.common_audio_background_files = []
if self.ulaw_audio_dialog_files is None:
self.ulaw_audio_dialog_files = []
if self.ulaw_audio_background_files is None:
self.ulaw_audio_background_files = []
if self.settings_files is None:
self.settings_files = []
if self.settings_with_jumps is None:
self.settings_with_jumps = []
self.current_dir = os.path.join(sounds_main_dir, self.dir_name)
def _find_jump_in_file(self, filepath):
'''
Auxiliary function.
The purpose is to find the 'jump' value in the settings file.
RETURN:
jump: (integer/None):
tells which recording the program jumps to,
after playing the last one.
If a correct value cannot be retrieved, None is returned
'''
# jump = None
obj_setts = ListOps()
list_setts = obj_setts.file_to_list(filepath)
# Number of line where a valid "jump=n" phrase is found.
no_of_line = ListOps(list_setts).regs_vs_list(
regs=[jump_line_start + jump_line_end]
)
if no_of_line == None:
return None
else:
jump = int(re.sub(jump_line_start, '', list_setts[no_of_line]))
return jump
def _custom_sort_aux_files(self, input_list, ext_name, what_files):
''' Auxiliary function.
It sorts the list of files, arbitrary.
Only files with: 'settings', 'common_audio_bckgrs', 'ulaw_audio_bckgrs'
For list with other files, a normal natsort is utilized.
ARGS:
what_files (str): List with which filenames should be sorted.
The options are:
'settings'
'common_audio_bckgrs'
'ulaw_audio_bckgrs'
'''
if what_files == 'settings':
patterns = settings_file_patterns(ext_name)
elif what_files == 'common_audio_bckgrs':
patterns = bckgrds_file_patts(ext_name, re_end=common_audio_file_exten)
elif what_files == 'ulaw_audio_bckgrs':
patterns = bckgrds_file_patts(ext_name, re_end=output_end)
templist = []
for file in input_list:
for reg_index, regex in enumerate(patterns):
if re.findall(patterns[reg_index], file):
templist.append((file, reg_index))
break
templist.sort(key=lambda elem: elem[1])
output_list = [item[0] for item in templist]
return output_list
def _check_order(self, ext_name, list_of_files):
''' Checks if the file names are consecutive.
'''
result = True
for index, filename in enumerate(list_of_files):
if os.path.splitext(filename)[0] != ext_name + str(index+1):
result = False
break
return result
def final_check(self):
''' Function checks if all files and settings are in place
and the audio subfolder may serve as a custom extension.
RETURNS:
self.valid (bool):
'''
self.valid = True
if len(self.ulaw_audio_dialog_files) == 0:
self.valid = False
if self._check_order(
ext_name=self.dir_name,
list_of_files=self.ulaw_audio_dialog_files
) is False:
self.valid = False
if self.final_ulaw_background_file is None:
self.valid = False
return self.valid
def find_final_jump(self):
'''
From the list of all settings files, one will be selected and the jump val taken from it.
If no valid settings file exists or the jump value is incorrect,
a default value is returned.
'''
jump = None
how_many_dialogs = len(self.ulaw_audio_dialog_files)
def _defult_jump_val():
''' If no valid jump value can de calculated from settings files,
or no valid settings file exist,
calculate a default jump according to the number of dialogs.
'''
jump = None
if how_many_dialogs == 0:
jump = None
elif how_many_dialogs == 1:
jump = 1
elif how_many_dialogs == 2:
jump = 1
elif how_many_dialogs == 3:
jump = 2
else:
jump = 3
return jump
if how_many_dialogs in (0, 1) or len(self.settings_with_jumps) == 0:
jump = _defult_jump_val()
else:
for item in self.settings_with_jumps:
if item['jump'] is not None:
if item['jump'] <= how_many_dialogs:
jump = item['jump']
break
jump = _defult_jump_val()
return jump
def evaluate(self):
''' Function operates on real dirs and files on disc,
therefore no arguments are taken or returned.
ARGS: None
RETURNS: None
'''
for sub_obj in os.scandir(self.current_dir):
if sub_obj.is_file():
# obj is a txt file with settings
# which means: its name is suitable for a settings file
if (string_vs_regs(
regs=settings_file_patterns(
self.dir_name),
string=str(sub_obj.name))
) != []:
append_if_not_in(
seq=self.settings_files,
item=sub_obj.name
)
elif re.findall(common_audio_file_exten, sub_obj.name):
regs = bckgrds_file_patts(
ext_name=self.dir_name,
re_end=common_audio_file_exten
)
if (
string_vs_regs(
regs=regs, string=str(sub_obj.name)) != []
):
append_if_not_in(
seq=self.common_audio_background_files,
item=sub_obj.name)
else:
append_if_not_in(
seq=self.common_audio_dialog_files,
item=sub_obj.name
)
elif sub_obj.name.endswith('.ulaw'):
if os.path.isfile(
os.path.join(
sounds_main_dir,
self.dir_name,
sub_obj.name.replace(' ', '_')
)
) is False and 1==1:
os.rename(
src=os.path.join(self.current_dir,
sub_obj.name),
dst=os.path.join(self.current_dir,
sub_obj.name.replace(' ', '_'))
)
if string_vs_regs(
regs=bckgrds_file_patts(
ext_name=str(self.dir_name),
re_end=output_end),
string=str(sub_obj.name)
) != []:
append_if_not_in(
seq=self.ulaw_audio_background_files,
item=sub_obj.name
)
else:
append_if_not_in(
seq=self.ulaw_audio_dialog_files,
item=sub_obj.name
)
append_if_not_in(
seq=self.ulaw_audio_dialog_files,
item=sub_obj.name
)
# Sorting section
templist = natsort.natsorted(
self.common_audio_dialog_files, key=lambda elem: os.path.splitext(elem)[0]
)
self.common_audio_dialog_files = templist
templist = natsort.natsorted(
self.ulaw_audio_dialog_files, key=lambda elem: os.path.splitext(elem)[0]
)
self.ulaw_audio_dialog_files = templist
# for common audio background files
templist = self._custom_sort_aux_files(
input_list=self.common_audio_background_files,
ext_name=self.dir_name,
what_files='common_audio_bckgrs')
self.common_audio_background_files = templist
# for ulaw audio background files
templist = self._custom_sort_aux_files(
input_list=self.ulaw_audio_background_files,
ext_name=self.dir_name,
what_files='ulaw_audio_bckgrs')
self.ulaw_audio_background_files = templist
# for settings files
templist = self._custom_sort_aux_files(
input_list=self.settings_files,
ext_name=self.dir_name,
what_files='settings'
)
self.settings_files = templist
del templist
# Create the dictionary with settings files and their corresponding jump values
for item in self.settings_files:
append_if_not_in(
seq=self.settings_with_jumps,
item={
'file': item,
'jump': self._find_jump_in_file(
filepath=self.current_dir + os.sep + item
)
}
)
self.final_jump_value = self.find_final_jump()
if len(self.ulaw_audio_background_files) > 0:
self.final_ulaw_background_file = self.ulaw_audio_background_files[0].rstrip(
'.ulaw')
self.final_check()
def createulaw(self, common_audio_file, ulaw_audio_file):
''' Makes a ulaw file from a common format audio file.
'''
print("Converting file:", common_audio_file, "to ulaw format:", ulaw_audio_file)
os.system(path_to_ffmpeg + ' -y -i ' + common_audio_file +
' -af "highpass=f=300, lowpass=f=3400" -ar 8000 -ac 1 -ab 64k -f mulaw '
+ ulaw_audio_file)
def del_dialog_ulaws(self):
self.evaluate()
for filename in self.ulaw_audio_dialog_files:
current_file = os.path.join(self.current_dir, filename)
print('A ulaw dialog file:', str(filename), 'is being deleted!')
if os.path.isfile(current_file):
os.remove(current_file)
self.evaluate()
def del_background_ulaws(self):
self.evaluate()
for filename in self.ulaw_audio_background_files:
current_file = os.path.join(self.current_dir, filename)
print('A ulaw background file:', str(filename), 'is being deleted!')
if os.path.isfile(current_file):
os.remove(current_file)
self.evaluate()
def makeulaws(self):
'''
Function makes ulaw files from common audio files.
(Both: dialog and background)
Before creating new ulaw files, existing ulaw dialogs may be deleted.
(Existing ulaw background files will be always left intact.
That will happen, only for dirs, where there are common audio dialog files.
Otherwise, no action will be performed.
So if there are only ulaws (and maybe setting-txt),
these files are assumed to remain and serve as final dialog files.
This is for a case when you put a folder,
containing only already prepared ulaws.
(eg. Original Lenny files).
It would make no sense to delete ulaws,
in order to start converting non existing common audios.
'''
self.evaluate()
if len(self.common_audio_dialog_files) > 0:
self.del_dialog_ulaws()
self.ulaw_audio_dialog_files = []
for index, filename in enumerate(self.common_audio_dialog_files):
newfilename = self.dir_name + str(index + 1) + '.ulaw'
# '"' is to embrace filenames with spaces
self.createulaw(
common_audio_file='"' + os.path.join(self.current_dir, filename) + '"',
ulaw_audio_file=os.path.join(self.current_dir, newfilename))
append_if_not_in(
seq=self.ulaw_audio_dialog_files,
item=newfilename
)
else:
print('No common format audio files present in the dir:', self.dir_name,
'no dialog ulaws will be deleted or created.')
for filename in self.common_audio_background_files:
# Replace spaces in the output filenames with "_"s.
newfilename = os.path.splitext(
filename.replace(' ', '_'))[0] + '.ulaw'
if os.path.isfile(newfilename) == False:
self.createulaw(
# '"' is to embrace filenames with spaces
common_audio_file='"' + os.path.join(self.current_dir, filename) + '"',
ulaw_audio_file=os.path.join(self.current_dir, newfilename)
)
append_if_not_in(
seq=self.ulaw_audio_background_files,
item=newfilename
)
self.evaluate()
class AudioMainDir:
'''
Class to represent the directory,
with subdirs where the final custom audios will be made
out of common audio files and put into.
These are audio files used during dialogs.
So, finally these folders will contain final audio files
and their corresponding setting files.
Eg.: the dir 'Lenny' will contain files like:
'Lenny1.ulaw', 'Lenny2.ulaw', etc.
and one file named like:
'Settings_Lenny.txt' or 'Settings.txt' or "settings.txt'
'''
def __init__(self):
self.sounds_main_dir = sounds_main_dir
self.list_audiosubdirs = self.enumerate_subdirs()
self.list_final_extensions = []
def enumerate_subdirs(self):
for obj in os.scandir(sounds_main_dir):
if obj.is_dir():
klass = AudioSubDir(obj.name)
klass.evaluate()
if str(klass.dir_name) not in [item.dir_name for item in list_audiosubdirs]:
list_audiosubdirs.append(klass)
return list_audiosubdirs
def del_ulaws(self):
for item in list_audiosubdirs:
item.del_dialog_ulaws()
item.del_background_ulaws()
def make_ulaws(self):
for item in list_audiosubdirs:
item.makeulaws()
def final_extensions(self):
'''
Function, operates on elements of the list from evaluation,
not on real dirs and files.
It calculates/validates the settings retrieved
from each dir with custom audio files.
Also it validates, that the 'ulaw' files are in order.
Eg. if the checked folder contains only 4 'ulaw' files named:
'Lenny1.ulaw', 'Lenny2.ulaw', 'Lenny4.ulaw'
They should not be valid as 'Lenny3.ulaw' is missing.
RETURNS:
final_settings (list): List of classes type 'FinalExtension'.
'''
# list_audiosubdirs = self.enumerate_subdirs()
# Add a valid extension to the final class
for item in self.list_audiosubdirs:
if item.valid is True:
self.list_final_extensions.append(
FinalExtension(
ext_name=item.dir_name,
how_many_dialogs=len(item.ulaw_audio_dialog_files),
jump=item.final_jump_value,
background=item.final_ulaw_background_file
))
return self.list_final_extensions
class FinalExtension:
''' This class will be passed to the script,
that writes settings to asterisk config files and dbs.
PROPERTIES:
ext_name (str):
how_many_dialogs (int): Should be less than 99
jump (init): To which recording, the call jumps after the last one is played.
background (str):
Name (without extension and path) of the background file.
As of now it is suggested to use a silence-like background recording.
Example: below are listed properties of 2 instances:
ext_name = 'Lenny'
how_many_dialogs = 16
jump = 3
backgound = 'backgroundnoise'
ext_name = 'Lenka'
how_many_dialogs = 12
jump = 4
backgound = 'backgrounLenka'
'''
def __init__(
self,
ext_name,
how_many_dialogs,
jump,
background
):
self.ext_name = ext_name
self.how_many_dialogs = how_many_dialogs
self.jump = jump
self.background = background
def main():
print('Action on custom exten. conf. files.')
if __name__ == '__main__':
main()
| [
"os.rename",
"lib_main.append_if_not_in",
"os.scandir",
"os.path.join",
"os.path.splitext",
"os.path.isfile",
"platform.system",
"re.sub",
"lib_main.ListOps",
"os.system",
"re.findall",
"os.remove"
] | [((459, 476), 'platform.system', 'platform.system', ([], {}), '()\n', (474, 476), False, 'import platform\n'), ((4862, 4887), 're.findall', 're.findall', (['""" """', 'dir_name'], {}), "(' ', dir_name)\n", (4872, 4887), False, 'import re\n'), ((6526, 6570), 'os.path.join', 'os.path.join', (['sounds_main_dir', 'self.dir_name'], {}), '(sounds_main_dir, self.dir_name)\n', (6538, 6570), False, 'import os\n'), ((7030, 7039), 'lib_main.ListOps', 'ListOps', ([], {}), '()\n', (7037, 7039), False, 'from lib_main import ListOps\n'), ((11402, 11430), 'os.scandir', 'os.scandir', (['self.current_dir'], {}), '(self.current_dir)\n', (11412, 11430), False, 'import os\n'), ((16777, 16935), 'os.system', 'os.system', (['(path_to_ffmpeg + \' -y -i \' + common_audio_file +\n \' -af "highpass=f=300, lowpass=f=3400" -ar 8000 -ac 1 -ab 64k -f mulaw \' +\n ulaw_audio_file)'], {}), '(path_to_ffmpeg + \' -y -i \' + common_audio_file +\n \' -af "highpass=f=300, lowpass=f=3400" -ar 8000 -ac 1 -ab 64k -f mulaw \' +\n ulaw_audio_file)\n', (16786, 16935), False, 'import os\n'), ((21001, 21028), 'os.scandir', 'os.scandir', (['sounds_main_dir'], {}), '(sounds_main_dir)\n', (21011, 21028), False, 'import os\n'), ((4962, 5010), 'os.path.join', 'os.path.join', (['sounds_main_dir', '(dir_name + os.sep)'], {}), '(sounds_main_dir, dir_name + os.sep)\n', (4974, 5010), False, 'import os\n'), ((5239, 5266), 'os.rename', 'os.rename', ([], {'src': 'src', 'dst': 'dst'}), '(src=src, dst=dst)\n', (5248, 5266), False, 'import os\n'), ((17099, 17139), 'os.path.join', 'os.path.join', (['self.current_dir', 'filename'], {}), '(self.current_dir, filename)\n', (17111, 17139), False, 'import os\n'), ((17234, 17262), 'os.path.isfile', 'os.path.isfile', (['current_file'], {}), '(current_file)\n', (17248, 17262), False, 'import os\n'), ((17483, 17523), 'os.path.join', 'os.path.join', (['self.current_dir', 'filename'], {}), '(self.current_dir, filename)\n', (17495, 17523), False, 'import os\n'), ((17622, 17650), 'os.path.isfile', 'os.path.isfile', (['current_file'], {}), '(current_file)\n', (17636, 17650), False, 'import os\n'), ((7185, 7204), 'lib_main.ListOps', 'ListOps', (['list_setts'], {}), '(list_setts)\n', (7192, 7204), False, 'from lib_main import ListOps\n'), ((7378, 7429), 're.sub', 're.sub', (['jump_line_start', '""""""', 'list_setts[no_of_line]'], {}), "(jump_line_start, '', list_setts[no_of_line])\n", (7384, 7429), False, 'import re\n'), ((8500, 8537), 're.findall', 're.findall', (['patterns[reg_index]', 'file'], {}), '(patterns[reg_index], file)\n', (8510, 8537), False, 'import re\n'), ((17281, 17304), 'os.remove', 'os.remove', (['current_file'], {}), '(current_file)\n', (17290, 17304), False, 'import os\n'), ((17669, 17692), 'os.remove', 'os.remove', (['current_file'], {}), '(current_file)\n', (17678, 17692), False, 'import os\n'), ((19142, 19210), 'lib_main.append_if_not_in', 'append_if_not_in', ([], {'seq': 'self.ulaw_audio_dialog_files', 'item': 'newfilename'}), '(seq=self.ulaw_audio_dialog_files, item=newfilename)\n', (19158, 19210), False, 'from lib_main import append_if_not_in\n'), ((19690, 19717), 'os.path.isfile', 'os.path.isfile', (['newfilename'], {}), '(newfilename)\n', (19704, 19717), False, 'import os\n'), ((20035, 20107), 'lib_main.append_if_not_in', 'append_if_not_in', ([], {'seq': 'self.ulaw_audio_background_files', 'item': 'newfilename'}), '(seq=self.ulaw_audio_background_files, item=newfilename)\n', (20051, 20107), False, 'from lib_main import append_if_not_in\n'), ((8974, 9000), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (8990, 9000), False, 'import os\n'), ((11871, 11931), 'lib_main.append_if_not_in', 'append_if_not_in', ([], {'seq': 'self.settings_files', 'item': 'sub_obj.name'}), '(seq=self.settings_files, item=sub_obj.name)\n', (11887, 11931), False, 'from lib_main import append_if_not_in\n'), ((12047, 12096), 're.findall', 're.findall', (['common_audio_file_exten', 'sub_obj.name'], {}), '(common_audio_file_exten, sub_obj.name)\n', (12057, 12096), False, 'import re\n'), ((14678, 14700), 'os.path.splitext', 'os.path.splitext', (['elem'], {}), '(elem)\n', (14694, 14700), False, 'import os\n'), ((14871, 14893), 'os.path.splitext', 'os.path.splitext', (['elem'], {}), '(elem)\n', (14887, 14893), False, 'import os\n'), ((19080, 19123), 'os.path.join', 'os.path.join', (['self.current_dir', 'newfilename'], {}), '(self.current_dir, newfilename)\n', (19092, 19123), False, 'import os\n'), ((19955, 19998), 'os.path.join', 'os.path.join', (['self.current_dir', 'newfilename'], {}), '(self.current_dir, newfilename)\n', (19967, 19998), False, 'import os\n'), ((12500, 12575), 'lib_main.append_if_not_in', 'append_if_not_in', ([], {'seq': 'self.common_audio_background_files', 'item': 'sub_obj.name'}), '(seq=self.common_audio_background_files, item=sub_obj.name)\n', (12516, 12575), False, 'from lib_main import append_if_not_in\n'), ((12705, 12776), 'lib_main.append_if_not_in', 'append_if_not_in', ([], {'seq': 'self.common_audio_dialog_files', 'item': 'sub_obj.name'}), '(seq=self.common_audio_dialog_files, item=sub_obj.name)\n', (12721, 12776), False, 'from lib_main import append_if_not_in\n'), ((13948, 14021), 'lib_main.append_if_not_in', 'append_if_not_in', ([], {'seq': 'self.ulaw_audio_background_files', 'item': 'sub_obj.name'}), '(seq=self.ulaw_audio_background_files, item=sub_obj.name)\n', (13964, 14021), False, 'from lib_main import append_if_not_in\n'), ((14185, 14254), 'lib_main.append_if_not_in', 'append_if_not_in', ([], {'seq': 'self.ulaw_audio_dialog_files', 'item': 'sub_obj.name'}), '(seq=self.ulaw_audio_dialog_files, item=sub_obj.name)\n', (14201, 14254), False, 'from lib_main import append_if_not_in\n'), ((14381, 14450), 'lib_main.append_if_not_in', 'append_if_not_in', ([], {'seq': 'self.ulaw_audio_dialog_files', 'item': 'sub_obj.name'}), '(seq=self.ulaw_audio_dialog_files, item=sub_obj.name)\n', (14397, 14450), False, 'from lib_main import append_if_not_in\n'), ((18995, 19035), 'os.path.join', 'os.path.join', (['self.current_dir', 'filename'], {}), '(self.current_dir, filename)\n', (19007, 19035), False, 'import os\n'), ((19870, 19910), 'os.path.join', 'os.path.join', (['self.current_dir', 'filename'], {}), '(self.current_dir, filename)\n', (19882, 19910), False, 'import os\n'), ((13349, 13393), 'os.path.join', 'os.path.join', (['self.current_dir', 'sub_obj.name'], {}), '(self.current_dir, sub_obj.name)\n', (13361, 13393), False, 'import os\n')] |
import time, logging
from concurrent.futures import ThreadPoolExecutor
from django.db import models
from django.core.exceptions import ValidationError
from core.models import TrackerModel
from .exceptions import TrackingActivityException
from .tasks import import_data
logger = logging.getLogger(__name__)
class RegisterUploaded(TrackerModel):
CREATED = 'created'
PROCESSING = 'processing'
SUCCESS = 'success'
ERROR = 'error'
CANCELED = 'canceled'
STATUS_CHOICES = (
(CREATED, 'Creado'),
(PROCESSING, 'Procesando'),
(SUCCESS, 'Finalizado'),
(ERROR, 'Error'),
(CANCELED, 'Cancelado')
)
file = models.FileField(upload_to='uploads/registers/%Y/%m/%d/', verbose_name='Archivo')
status = models.CharField(max_length=15, default=CREATED, choices=STATUS_CHOICES, verbose_name='Estado')
result = models.TextField(null=True, verbose_name='Resultado')
class Meta:
verbose_name = "Registro subido"
verbose_name_plural = "Registros subidos"
def __str__(self):
return 'Archivo: %s' % self.file.name.split('/')[-1]
def clean(self):
if self.status == RegisterUploaded.PROCESSING:
raise ValidationError('Actualmente ya se esta procesando el archivo')
def save(self, *args, **kwargs):
logger.info('Guardando registro subido...')
super().save(*args, **kwargs)
def processing_data(self, *args, **kwargs):
# slow operation
self.status = self.PROCESSING
self.save()
import_data.delay(self.id)
| [
"logging.getLogger",
"django.db.models.TextField",
"django.core.exceptions.ValidationError",
"django.db.models.FileField",
"django.db.models.CharField"
] | [((281, 308), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (298, 308), False, 'import time, logging\n'), ((672, 758), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': '"""uploads/registers/%Y/%m/%d/"""', 'verbose_name': '"""Archivo"""'}), "(upload_to='uploads/registers/%Y/%m/%d/', verbose_name=\n 'Archivo')\n", (688, 758), False, 'from django.db import models\n'), ((767, 866), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)', 'default': 'CREATED', 'choices': 'STATUS_CHOICES', 'verbose_name': '"""Estado"""'}), "(max_length=15, default=CREATED, choices=STATUS_CHOICES,\n verbose_name='Estado')\n", (783, 866), False, 'from django.db import models\n'), ((876, 929), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)', 'verbose_name': '"""Resultado"""'}), "(null=True, verbose_name='Resultado')\n", (892, 929), False, 'from django.db import models\n'), ((1222, 1285), 'django.core.exceptions.ValidationError', 'ValidationError', (['"""Actualmente ya se esta procesando el archivo"""'], {}), "('Actualmente ya se esta procesando el archivo')\n", (1237, 1285), False, 'from django.core.exceptions import ValidationError\n')] |
from django.utils.timezone import now
from rest_framework import serializers
from rest_framework.serializers import CreateOnlyDefault
from rest_framework.fields import CurrentUserDefault
from pfb_network_connectivity.models import PFBModel
class PFBModelSerializer(serializers.ModelSerializer):
"""Base serializer for PFBModel
This base serializer should be used for any serializer that needs
to serialize a model that inherites from ``PFBModel``. It automatically
handles setting ``created_by`` and ``modified_by``
"""
def __init__(self, *args, **kwargs):
super(PFBModelSerializer, self).__init__(*args, **kwargs)
self.request = self.context.get('request')
uuid = serializers.UUIDField(read_only=True)
createdAt = serializers.DateTimeField(default=CreateOnlyDefault(now), read_only=True,
source='created_at')
modifiedAt = serializers.DateTimeField(default=now, read_only=True,
source='modified_at')
createdBy = serializers.HiddenField(default=CreateOnlyDefault(CurrentUserDefault()),
source='created_by')
modifiedBy = serializers.HiddenField(default=CurrentUserDefault(), source='modified_by')
class Meta:
model = PFBModel
| [
"rest_framework.serializers.UUIDField",
"rest_framework.serializers.CreateOnlyDefault",
"rest_framework.fields.CurrentUserDefault",
"rest_framework.serializers.DateTimeField"
] | [((716, 753), 'rest_framework.serializers.UUIDField', 'serializers.UUIDField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (737, 753), False, 'from rest_framework import serializers\n'), ((924, 1000), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'default': 'now', 'read_only': '(True)', 'source': '"""modified_at"""'}), "(default=now, read_only=True, source='modified_at')\n", (949, 1000), False, 'from rest_framework import serializers\n'), ((804, 826), 'rest_framework.serializers.CreateOnlyDefault', 'CreateOnlyDefault', (['now'], {}), '(now)\n', (821, 826), False, 'from rest_framework.serializers import CreateOnlyDefault\n'), ((1244, 1264), 'rest_framework.fields.CurrentUserDefault', 'CurrentUserDefault', ([], {}), '()\n', (1262, 1264), False, 'from rest_framework.fields import CurrentUserDefault\n'), ((1111, 1131), 'rest_framework.fields.CurrentUserDefault', 'CurrentUserDefault', ([], {}), '()\n', (1129, 1131), False, 'from rest_framework.fields import CurrentUserDefault\n')] |
from clustaar.schemas.v1 import MATCH_INTENT_CONDITION
from clustaar.schemas.models import MatchIntentCondition, MatchIntentConditionIntent
import pytest
@pytest.fixture
def data():
return {
"type": "match_intent",
"intent": {"type": "intent", "id": "a1" * 12, "name": "an intent"},
}
@pytest.fixture
def condition():
condition = MatchIntentCondition(intent_id="a1" * 12)
condition.intent = MatchIntentConditionIntent(id="a1" * 12, name="an intent")
return condition
class TestLoad(object):
def test_must_return_a_condition(self, data, mapper):
result = mapper.load(data, MATCH_INTENT_CONDITION)
assert isinstance(result, MatchIntentCondition)
assert result.intent_id == "a1" * 12
class TestDump(object):
def test_returns_a_dict(self, condition, data, mapper):
result = mapper.dump(condition, MATCH_INTENT_CONDITION)
assert result == data
| [
"clustaar.schemas.models.MatchIntentConditionIntent",
"clustaar.schemas.models.MatchIntentCondition"
] | [((362, 403), 'clustaar.schemas.models.MatchIntentCondition', 'MatchIntentCondition', ([], {'intent_id': "('a1' * 12)"}), "(intent_id='a1' * 12)\n", (382, 403), False, 'from clustaar.schemas.models import MatchIntentCondition, MatchIntentConditionIntent\n'), ((427, 485), 'clustaar.schemas.models.MatchIntentConditionIntent', 'MatchIntentConditionIntent', ([], {'id': "('a1' * 12)", 'name': '"""an intent"""'}), "(id='a1' * 12, name='an intent')\n", (453, 485), False, 'from clustaar.schemas.models import MatchIntentCondition, MatchIntentConditionIntent\n')] |
#!/usr/bin/env python2
import copy
import moveit_commander
import numpy as np
import pickle
import rospy
import sys
from threading import Thread
import time
from geometry_msgs.msg import Pose, PoseStamped
from moveit_msgs.msg import RobotTrajectory, DisplayTrajectory
from sensor_msgs.msg import JointState
from std_msgs.msg import Float64, Int32
from trajectory_msgs.msg import JointTrajectory
class MoveitGazeboInterface():
def __init__(self, loop_rate=10, pose_target = None):
self._robot = moveit_commander.RobotCommander()
self._scene = moveit_commander.PlanningSceneInterface()
self._group = moveit_commander.MoveGroupCommander("manipulator")
self._group.set_planner_id("SBL")
self._planners = ['SBL', 'EST','LBKPIECE','BKPIECE','KPIECE',
'RRT','RRTConnect','RRTstar','TRRT','PRM',
'PRMstar','FMT','BFMT','PDST','STRIDE','BiTRRT',
'LBTRRT','BiEST','ProjEST','LazyPRM','LazyPRMstar',
'SPARS','SPARStwo']
self._group.set_planning_time(10.0)
self._group.set_goal_tolerance(0.1)
self._joint_names = ['dorna_base_to_dorna_arm_0', 'dorna_arm_0_to_dorna_arm_1', 'dorna_arm_1_to_dorna_arm_2', 'dorna_arm_2_to_dorna_arm_3', 'dorna_arm_3_to_dorna_arm_4']
self._rate = rospy.Rate(loop_rate)
self._n_joints = 5
self._target_names = ["allZeros", "home", "doorPose", "ElbowDown"]
self._display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path', DisplayTrajectory, queue_size = 10)
#TODO: add input agrs when running this node
# self.pub = rospy.Publisher('/dorna/dorna_effort_controller/command', JointTrajectory, queue_size=10)
self.plan = None
self.pub = rospy.Publisher('/dorna/dorna_position_controller/command', JointTrajectory, queue_size=10)
def run_human_interface(self):
try:
while not rospy.is_shutdown():
self._move = str(raw_input('Enter r for random valid position\n\rEnter d for open door beahvior\n\rEnter h to draw heart\n\rHere: '))
if self._move == "r":
pass
# self.random_pose()
elif self._move == "d":
self.open_door()
elif self._move == "h":
# self.draw_heart()
pass
else:
rospy.loginfo('Invalid Input')
pass
except KeyboardInterrupt:
pass
def run_testing_methods(self):
try:
rospy.loginfo(self._group.has_end_effector_link())
self.go_home()
time.sleep(2.0)
self.go_named_target(name='ElbowDown')
time.sleep(2.0)
rospy.loginfo("TESTING ORIENTATION_TARGET: roll by 90 deg")
des_pose = self._group.get_current_pose()
des_pose.pose.orientation.x = np.sqrt(0.5)
des_pose.pose.orientation.w = np.sqrt(0.5)
response = str(raw_input("Did test work? "))
self.go_pose_target(des_pose)
if response == '':
pass
else:
raise KeyboardInterrupt
# self.go_home()
# rospy.loginfo("TESTING ORIENTATION_TARGET: roll by 90 deg")
# self.go_orientation_target([np.sqrt(0.5), 0, 0, np.sqrt(0.5)])
# rospy.loginfo("TESTING ORIENTATION_TARGET: pitch by 90 deg")
# self.go_orientation_target([0, np.sqrt(0.5), 0, np.sqrt(0.5)])
# response = str(raw_input("Did ORIENTATION_TARGET test work? "))
# if response == '':
# pass
# else:
# raise KeyboardInterrupt
except KeyboardInterrupt:
raise
# def draw_heart(self):
# thetas = pickle.load(open('/home/rit/catkin_ws/src/dorna_robot/draw_heart_thetas.p','rb'))
# for theta in thetas:
# if theta[0] > np.pi:
# theta[0] -= 2*np.pi
# if theta[1] > np.pi:
# theta[1] -= 2*np.pi
# if theta[2] > np.pi:
# theta[2]-= np.pi
# # theta = [theta[0]-2.*np.pi, theta[1], theta[2]-np.pi, theta[3], theta[4]]
# rospy.loginfo(theta)
# traj = JointState()
# traj.name = self._joint_names
# traj.position = theta
# self._group.plan(joints=traj)
# self._group.go(wait=True)
def open_door(self):
traj = JointState()
traj.name = self._joint_names
positions = [[0, 1.57, 1.57, 0, 0], [0, -1, 0, 0, 0]]
for position in positions:
traj.position = position
self._group.plan(joints=traj)
self._group.go(wait=True)
def visualize_plan_rviz(self):
display_trajectory = DisplayTrajectory()
display_trajectory.trajectory_start = self._robot.get_current_state()
display_trajectory.trajectory.append(self.plan)
self._display_trajectory_publisher.publish(display_trajectory)
rospy.sleep(5)
#################################
#### Set movegroup_commander ####
#################################
# def set_joint_value_target(self, arg1, arg2 = None, arg3 = None):
def go(self):
plan = self._group.plan()
print(plan)
self._group.go(wait=True)
def go_home(self):
self.go_named_target('home')
def go_rpy_target(self, rpy):
"""rpy should be a list"""
try:
if type(rpy) == list and len(rpy) == 3:
self._group.set_rpy_target(rpy)
self.go()
else:
raise TypeError
except TypeError:
raise
def go_orientation_target(self, q):
"""q should be a list"""
try:
if type(q) == list and len(q) == 4:
self._group.set_orientation_target(q)
self.go()
else:
raise TypeError
except TypeError:
raise
def go_position_target(self, xyz):
"""xyz should be a list"""
try:
if type(xyz) == list and len(xyz) == 3:
self._group.set_position_target(xyz)
self.go()
else:
raise TypeError
except TypeError:
raise
def go_pose_target(self, pose):
""" Set the pose of the end-effector, if one is available. The expected input is a Pose message, a PoseStamped message or a list of 6 floats:"""
""" [x, y, z, rot_x, rot_y, rot_z] or a list of 7 floats [x, y, z, qx, qy, qz, qw] """
try:
if type(pose) == Pose or type(pose) == list or type(pose) == PoseStamped:
self._group.set_pose_target(pose)
self.go()
else:
raise TypeError
except TypeError:
raise
def go_pose_targets(self, poses):
try:
if type(poses) == list and type(type(poses[0])) == Pose:
self._group.set_pose_targets(poses)
self.go()
else:
raise TypeError
except TypeError:
raise
def go_shift_pose_target(self, axis, value):
""" Get the current pose of the end effector, add value to the corresponding axis (0..5: X, Y, Z, R, P, Y) and set the new pose as the pose target """
try:
if type(axis) == int:
self._group.shift_pose_target(axis, value)
self.go()
else:
raise TypeError
except TypeError:
raise
def go_random_target(self):
self._group.set_random_target()
self.go()
def go_named_target(self, name):
rospy.loginfo("Called go {}".format(name))
if name in self._target_names:
self._group.set_named_target(name)
self.go()
if __name__=="__main__":
try:
rospy.init_node('moveit_gazebo_wrapper', anonymous = True)
rospy.loginfo('Starting node "moveit_gazebo_wrapper"')
moveit_commander.roscpp_initialize(sys.argv)
m_g_w = MoveitGazeboInterface()
m_g_w.run_testing_methods()
# m_g_w.run()
# runner = Thread(target = m_g_w.run_human_interface)
# runner.start()
# rospy.spin()
# runner.join()
except rospy.ROSInterruptException: pass
| [
"moveit_commander.RobotCommander",
"numpy.sqrt",
"rospy.is_shutdown",
"sensor_msgs.msg.JointState",
"rospy.init_node",
"moveit_commander.PlanningSceneInterface",
"moveit_msgs.msg.DisplayTrajectory",
"time.sleep",
"moveit_commander.MoveGroupCommander",
"rospy.Rate",
"moveit_commander.roscpp_initialize",
"rospy.sleep",
"rospy.Publisher",
"rospy.loginfo"
] | [((500, 533), 'moveit_commander.RobotCommander', 'moveit_commander.RobotCommander', ([], {}), '()\n', (531, 533), False, 'import moveit_commander\n'), ((550, 591), 'moveit_commander.PlanningSceneInterface', 'moveit_commander.PlanningSceneInterface', ([], {}), '()\n', (589, 591), False, 'import moveit_commander\n'), ((608, 658), 'moveit_commander.MoveGroupCommander', 'moveit_commander.MoveGroupCommander', (['"""manipulator"""'], {}), "('manipulator')\n", (643, 658), False, 'import moveit_commander\n'), ((1285, 1306), 'rospy.Rate', 'rospy.Rate', (['loop_rate'], {}), '(loop_rate)\n', (1295, 1306), False, 'import rospy\n'), ((1437, 1526), 'rospy.Publisher', 'rospy.Publisher', (['"""/move_group/display_planned_path"""', 'DisplayTrajectory'], {'queue_size': '(10)'}), "('/move_group/display_planned_path', DisplayTrajectory,\n queue_size=10)\n", (1452, 1526), False, 'import rospy\n'), ((1712, 1807), 'rospy.Publisher', 'rospy.Publisher', (['"""/dorna/dorna_position_controller/command"""', 'JointTrajectory'], {'queue_size': '(10)'}), "('/dorna/dorna_position_controller/command', JointTrajectory,\n queue_size=10)\n", (1727, 1807), False, 'import rospy\n'), ((3901, 3913), 'sensor_msgs.msg.JointState', 'JointState', ([], {}), '()\n', (3911, 3913), False, 'from sensor_msgs.msg import JointState\n'), ((4181, 4200), 'moveit_msgs.msg.DisplayTrajectory', 'DisplayTrajectory', ([], {}), '()\n', (4198, 4200), False, 'from moveit_msgs.msg import RobotTrajectory, DisplayTrajectory\n'), ((4390, 4404), 'rospy.sleep', 'rospy.sleep', (['(5)'], {}), '(5)\n', (4401, 4404), False, 'import rospy\n'), ((6701, 6757), 'rospy.init_node', 'rospy.init_node', (['"""moveit_gazebo_wrapper"""'], {'anonymous': '(True)'}), "('moveit_gazebo_wrapper', anonymous=True)\n", (6716, 6757), False, 'import rospy\n'), ((6762, 6816), 'rospy.loginfo', 'rospy.loginfo', (['"""Starting node "moveit_gazebo_wrapper\\""""'], {}), '(\'Starting node "moveit_gazebo_wrapper"\')\n', (6775, 6816), False, 'import rospy\n'), ((6819, 6863), 'moveit_commander.roscpp_initialize', 'moveit_commander.roscpp_initialize', (['sys.argv'], {}), '(sys.argv)\n', (6853, 6863), False, 'import moveit_commander\n'), ((2399, 2414), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (2409, 2414), False, 'import time\n'), ((2460, 2475), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (2470, 2475), False, 'import time\n'), ((2479, 2538), 'rospy.loginfo', 'rospy.loginfo', (['"""TESTING ORIENTATION_TARGET: roll by 90 deg"""'], {}), "('TESTING ORIENTATION_TARGET: roll by 90 deg')\n", (2492, 2538), False, 'import rospy\n'), ((2617, 2629), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (2624, 2629), True, 'import numpy as np\n'), ((2663, 2675), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (2670, 2675), True, 'import numpy as np\n'), ((1857, 1876), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (1874, 1876), False, 'import rospy\n'), ((2207, 2237), 'rospy.loginfo', 'rospy.loginfo', (['"""Invalid Input"""'], {}), "('Invalid Input')\n", (2220, 2237), False, 'import rospy\n')] |
import unittest
from Cheetah import SettingsManager
class SettingsManagerTests(unittest.TestCase):
def test_mergeDictionaries(self):
left = {'foo': 'bar', 'abc': {'a': 1, 'b': 2, 'c': (3,)}}
right = {'xyz': (10, 9)}
expect = {
'xyz': (10, 9), 'foo': 'bar', 'abc': {'a': 1, 'c': (3,), 'b': 2}
}
result = SettingsManager.mergeNestedDictionaries(left, right)
self.assertEqual(result, expect)
| [
"Cheetah.SettingsManager.mergeNestedDictionaries"
] | [((362, 414), 'Cheetah.SettingsManager.mergeNestedDictionaries', 'SettingsManager.mergeNestedDictionaries', (['left', 'right'], {}), '(left, right)\n', (401, 414), False, 'from Cheetah import SettingsManager\n')] |
from math import exp, log
from pyfluka.base import IllegalArgumentError
from pyfluka.plugins.BasePlugin import BasePlugin
from pyfluka.reader import _dh
from pyfluka.utils import PhysicsQuantities as PQ
class TimeEvolution(BasePlugin):
"""
Plugin for time evolution of production yields
"""
def __init__(self, **kwargs):
if "irr_time" not in kwargs or "cool_time" not in kwargs:
raise IllegalArgumentError("Unable to instantiate TimeEvolution. Either irr_time or cool_time missing.")
self.cool_time = self._parse_config(kwargs.pop('cool_time'))
self.irr_time = self._parse_config(kwargs.pop('irr_time'))
def invoke(self, data):
"""
Call method. Initiates calculation
:param dict data: dictionary holding data to be processed
:return:
"""
for det in data.keys():
self._apply_coefficient(data[det])
def _apply_coefficient(self, data):
"""
Application of time evolution coefficients on production yield
:param dict data: scoring region specific dictionaries with isotopes as key
:return:
"""
for k, val in data.iteritems():
val["ProductionYield"] *= self._calculate_evolution_coeff(k)
def _calculate_evolution_coeff(self, isotope):
"""
Simple time evolution based on exp. decay and build up model.
:param Isotope isotope: isotope
:return: time evolution factor
:rtype: float
"""
half_life = _dh._hl[isotope]
return max(0., (1. - exp(-log(2.) * self.irr_time / half_life)) * exp(-log(2.) * self.cool_time / half_life))
@staticmethod
def _parse_config(time):
ts = time.split(" ")
return PQ.Time(float(ts[0]), 0., PQ.Time._get_conversion_dict()[ts[1]])
| [
"pyfluka.base.IllegalArgumentError",
"pyfluka.utils.PhysicsQuantities.Time._get_conversion_dict",
"math.log"
] | [((424, 532), 'pyfluka.base.IllegalArgumentError', 'IllegalArgumentError', (['"""Unable to instantiate TimeEvolution. Either irr_time or cool_time missing."""'], {}), "(\n 'Unable to instantiate TimeEvolution. Either irr_time or cool_time missing.'\n )\n", (444, 532), False, 'from pyfluka.base import IllegalArgumentError\n'), ((1790, 1820), 'pyfluka.utils.PhysicsQuantities.Time._get_conversion_dict', 'PQ.Time._get_conversion_dict', ([], {}), '()\n', (1818, 1820), True, 'from pyfluka.utils import PhysicsQuantities as PQ\n'), ((1633, 1641), 'math.log', 'log', (['(2.0)'], {}), '(2.0)\n', (1636, 1641), False, 'from math import exp, log\n'), ((1588, 1596), 'math.log', 'log', (['(2.0)'], {}), '(2.0)\n', (1591, 1596), False, 'from math import exp, log\n')] |
"""
FFMpeg To Thumbnails
====================
This module defines the FFMpeg job that transforms a video file into a sequence of thumbnail images.
Those thumbnails are more suitable for analysis.
.. autosummary::
FFMpegThumbnailsJob
NumberOfFilesJob
"""
import os
import subprocess
import time
from ..job import Job
def extract_thumbnails(video_file_name, output_width, output_folder):
"""Extract the thumbnails using FFMpeg.
:param video_file_name: name of the video file to process
:param output_width: width of the resized images
:param output_folder: folder where the thumbnails are stored
"""
args = ['ffmpeg',
'-i', os.path.abspath(video_file_name),
'-r', '1',
'-vf', 'scale=%d:-1' % output_width,
'-f', 'image2', '%s/frame-%%05d.png' % os.path.abspath(output_folder)]
proc = subprocess.Popen(args)
return_code = proc.poll()
while return_code is None:
time.sleep(1)
return_code = proc.poll()
return
class FFMpegThumbnailsJob(Job):
"""
Job for extracting thumbnails from a video. This job may be the root of a
workflow as it does not expect any *workflow input*.
.. rubric:: Runtime parameters
* See :py:func:`FFMpegThumbnailsJob.__init__` for details.
.. rubric:: Workflow output
* A list of absolute filenames that specify the generated thumbnails. This list is sorted.
.. note::
The Job is designed to have relocatable set of files. The input files are governed by the ``video_location``
parameter, which is not cached (but the video filename is). The generated files are also relative to the
"""
name = "ffmpeg_thumbnails"
#: Cached inputs:
#:
#: * ``video_filename`` base name of the video file
#: * ``video_width`` width of the generated thumbnails
#: * ``video_fps`` framerate of the thumbnails
#: * ``thumbnails_location`` location of the thumbnails relative to the thumbnail root.
attributes_to_serialize = ['video_filename',
'video_fps',
'video_width',
'thumbnails_location']
#: Cached outputs:
#:
#: * ``thumbnail_files`` list of generated files, relative to the thumbnail root
outputs_to_cache = ['thumbnail_files']
def get_thumbnail_root(self):
"""Indicates the root where files are stored. Currently in the parent folder of the json files"""
return os.path.abspath(os.path.join(os.path.dirname(self.json_filename), os.pardir, 'generated_thumbnails'))
def get_thumbnail_location(self):
"""Returns the location where the thumbnails will/are stored, relative to the thumbnail root directory."""
return os.path.splitext(os.path.basename(self.video_filename))[0]
def __init__(self,
*args,
**kwargs):
"""
The class instanciation accepts the following arguments.
:param str video_filename: the name of the video file to process without the folder. This parameter is
mandatory.
:param str video_location: the location of the video file. This parameter is
mandatory, but is not cached.
:param str thumbnail_root: absolute location of the root folder containing the thumbnails. This is not
cached and default to :py:func:`get_thumbnail_root`.
:param str thumbnails_location: location of the generated thumbnails relative to the ``thumbnail_root``.
Default given by :py:func:`get_thumbnail_location`.
:param int video_width: the width of the generated thumbnails. Defaults to `640`.
:param int video_fps: how many frames per second to extract. Default to `1`.
"""
super(FFMpegThumbnailsJob, self).__init__(*args, **kwargs)
if self.video_filename is None:
raise RuntimeError("The video file name cannot be empty")
assert('video_location' in kwargs)
# this `unicode` necessary because the json files are stored in unicode, and the
# comparison of the list of files should work (unicode path operations
# is unicode)
self.video_filename = unicode(self.video_filename)
if not os.path.exists(os.path.join(self.video_location, self.video_filename)):
raise RuntimeError("The video file %s does not exist" %
os.path.abspath(os.path.join(self.video_location, self.video_filename)))
# Put in default values if they are not passed in the kwargs
self.video_width = kwargs.get('video_width', 640)
self.video_fps = kwargs.get('video_fps', 1)
self.thumbnail_root = kwargs.get('thumbnails_root',
self.get_thumbnail_root())
self.thumbnails_location = kwargs.get('thumbnails_location',
self.get_thumbnail_location())
self.thumbnails_location = unicode(self.thumbnails_location) # same issue as for the video filename
def run(self, *args, **kwargs):
if self.is_up_to_date():
return True
thumb_final_directory = os.path.join(self.thumbnail_root, self.thumbnails_location)
if not os.path.exists(thumb_final_directory):
os.makedirs(thumb_final_directory)
extract_thumbnails(video_file_name=os.path.abspath(os.path.join(self.video_location, self.video_filename)),
output_width=self.video_width,
output_folder=thumb_final_directory)
# save the output files
self.thumbnail_files = self._get_files()
def _get_files(self):
"""Returns the list of thumbnails, relative to the thumbnail root"""
thumb_final_directory = os.path.join(self.thumbnail_root, self.thumbnails_location)
if not os.path.exists(thumb_final_directory):
return []
possible_output = [os.path.join(self.thumbnails_location, i) for i in os.listdir(thumb_final_directory) if i.find('frame-') != -1]
possible_output.sort()
return possible_output
def get_outputs(self):
"""Returns the list of thumbnail files (absolute paths)"""
super(FFMpegThumbnailsJob, self).get_outputs()
return [os.path.abspath(os.path.join(self.thumbnail_root, i)) for i in self._get_files()]
class NumberOfFilesJob(Job):
"""Indicates how many thumbnails were generated by the :py:class:`FFMpegThumbnailsJob`.
This job is dependent on :py:class:`FFMpegThumbnailsJob`.
.. rubric:: Workflow input
The output of :py:class:`FFMpegThumbnailsJob`
.. rubric:: Workflow output
One unique number indicating the number of thumbnails.
"""
name = 'number_of_files'
parents = [FFMpegThumbnailsJob]
outputs_to_cache = ['nb_files']
def __init__(self, *args, **kwargs):
super(NumberOfFilesJob, self).__init__(*args, **kwargs)
def run(self, *args, **kwargs):
filelist = args[0]
self.nb_files = len(filelist)
def get_outputs(self):
super(NumberOfFilesJob, self).get_outputs()
return self.nb_files
| [
"os.path.exists",
"os.listdir",
"os.makedirs",
"subprocess.Popen",
"os.path.join",
"time.sleep",
"os.path.dirname",
"os.path.basename",
"os.path.abspath"
] | [((872, 894), 'subprocess.Popen', 'subprocess.Popen', (['args'], {}), '(args)\n', (888, 894), False, 'import subprocess\n'), ((672, 704), 'os.path.abspath', 'os.path.abspath', (['video_file_name'], {}), '(video_file_name)\n', (687, 704), False, 'import os\n'), ((965, 978), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (975, 978), False, 'import time\n'), ((5212, 5271), 'os.path.join', 'os.path.join', (['self.thumbnail_root', 'self.thumbnails_location'], {}), '(self.thumbnail_root, self.thumbnails_location)\n', (5224, 5271), False, 'import os\n'), ((5830, 5889), 'os.path.join', 'os.path.join', (['self.thumbnail_root', 'self.thumbnails_location'], {}), '(self.thumbnail_root, self.thumbnails_location)\n', (5842, 5889), False, 'import os\n'), ((829, 859), 'os.path.abspath', 'os.path.abspath', (['output_folder'], {}), '(output_folder)\n', (844, 859), False, 'import os\n'), ((5287, 5324), 'os.path.exists', 'os.path.exists', (['thumb_final_directory'], {}), '(thumb_final_directory)\n', (5301, 5324), False, 'import os\n'), ((5338, 5372), 'os.makedirs', 'os.makedirs', (['thumb_final_directory'], {}), '(thumb_final_directory)\n', (5349, 5372), False, 'import os\n'), ((5906, 5943), 'os.path.exists', 'os.path.exists', (['thumb_final_directory'], {}), '(thumb_final_directory)\n', (5920, 5943), False, 'import os\n'), ((5995, 6036), 'os.path.join', 'os.path.join', (['self.thumbnails_location', 'i'], {}), '(self.thumbnails_location, i)\n', (6007, 6036), False, 'import os\n'), ((2538, 2573), 'os.path.dirname', 'os.path.dirname', (['self.json_filename'], {}), '(self.json_filename)\n', (2553, 2573), False, 'import os\n'), ((2797, 2834), 'os.path.basename', 'os.path.basename', (['self.video_filename'], {}), '(self.video_filename)\n', (2813, 2834), False, 'import os\n'), ((4289, 4343), 'os.path.join', 'os.path.join', (['self.video_location', 'self.video_filename'], {}), '(self.video_location, self.video_filename)\n', (4301, 4343), False, 'import os\n'), ((6046, 6079), 'os.listdir', 'os.listdir', (['thumb_final_directory'], {}), '(thumb_final_directory)\n', (6056, 6079), False, 'import os\n'), ((6352, 6388), 'os.path.join', 'os.path.join', (['self.thumbnail_root', 'i'], {}), '(self.thumbnail_root, i)\n', (6364, 6388), False, 'import os\n'), ((5433, 5487), 'os.path.join', 'os.path.join', (['self.video_location', 'self.video_filename'], {}), '(self.video_location, self.video_filename)\n', (5445, 5487), False, 'import os\n'), ((4461, 4515), 'os.path.join', 'os.path.join', (['self.video_location', 'self.video_filename'], {}), '(self.video_location, self.video_filename)\n', (4473, 4515), False, 'import os\n')] |
#!/usr/bin/python
import sys #for arguments
import MiniNero
import mnemonic
import PaperWallet
import Ecdh
import ASNL
import MLSAG
import MLSAG2
import LLW_Sigs
import RingCT
import Crypto.Random.random as rand
import Translator
import binascii
import RingCT2
#Schnorr NonLinkable true one and false one
x, P1 = PaperWallet.skpkGen()
P2 = PaperWallet.pkGen()
P3 = PaperWallet.pkGen()
L1, s1, s2 = ASNL.GenSchnorrNonLinkable(x, P1, P2, 0)
print("Testing Schnorr Non-linkable!")
print("This one should verify!")
print(ASNL.VerSchnorrNonLinkable(P1, P2, L1, s1, s2))
print("")
print("This one should NOT verify!")
print(ASNL.VerSchnorrNonLinkable(P1, P3, L1, s1, s2))
#ASNL true one, false one, C != sum Ci, and one out of the range..
print("\n\n\nTesting ASNL")
N = 10
x = [None] * N
P1 = [None] * N
P2 = [None] * N
indi = [None] * N
for j in range(0, N):
indi[j] = rand.getrandbits(1)
x[j] = PaperWallet.skGen()
if indi[j] == 0:
P1[j] = MiniNero.scalarmultBase(x[j])
P2[j] = PaperWallet.pkGen()
else:
P2[j] = MiniNero.scalarmultBase(x[j])
P1[j] = PaperWallet.pkGen()
L1, s2, s = ASNL.GenASNL(x, P1, P2, indi)
#true one
print("This one should verify!")
ASNL.VerASNL(P1, P2, L1, s2, s)
#false one
indi[3] = (indi[3] + 1) % 2
print("")
print("This one should NOT verify!")
L1, s2, s = ASNL.GenASNL(x, P1, P2, indi)
ASNL.VerASNL(P1, P2, L1, s2, s)
#MG sig: true one
print("\n\n\nTesting MG Sig: this one should verify!")
N = 3 #cols
R = 3 #rows
x = [None] * N #just used to generate test public keys
sk = [None]* R #vector of secret keys
P = [None]*N #stores the public keys
ind = 2
for j in range(0, N):
x[j] = [None] * R
P[j] = [None] * R
for i in range(0, R):
x[j][i] = PaperWallet.skGen()
P[j][i] = MiniNero.scalarmultBase(x[j][i])
for j in range(0, R):
sk[j] = x[j][ind]
print("x", x)
II, cc, ss = MLSAG.MLSAG_Sign(P, sk, ind)
print("Sig verified?", MLSAG.MLSAG_Ver(P, II, cc, ss) )
#MG sig: false one
print("\n\nMG Sig: this one should NOT verify!")
N = 3 #cols
R = 3 #rows
x = [None]*N #just used to generate test public keys
sk = [None] * R #vector of secret keys
P = [None]*N #stores the public keys
ind = 2
for j in range(0, N):
x[j] = [None] * R
P[j] = [None] * R
for i in range(0, R):
x[j][i] = PaperWallet.skGen()
P[j][i] = MiniNero.scalarmultBase(x[j][i])
for j in range(0, R):
sk[j] = x[j][ind]
sk[2] = PaperWallet.skGen() #assume we don't know one of the secret keys
print("x", x)
II, cc, ss = MLSAG.MLSAG_Sign(P, sk, ind)
print("Sig verified?", MLSAG.MLSAG_Ver(P, II, cc, ss) )
#rct Sig: range proof true / false, sum Ci true / false, MG sig true / false,
print("\n\n\nTesting Ring CT")
sc = []
pc = []
sctmp, pctmp = RingCT2.ctskpkGen(60)
sc.append(sctmp)
pc.append(pctmp)
sctmp, pctmp = RingCT2.ctskpkGen(70)
sc.append(sctmp)
pc.append(pctmp)
#add output 500
amounts = []
amounts.append(5)
destinations = []
Sk, Pk = PaperWallet.skpkGen()
destinations.append(Pk)
#add output for 12500
amounts.append(125);
Sk, Pk = PaperWallet.skpkGen()
destinations.append(Pk)
s = RingCT2.genRct(sc, pc, destinations, amounts, 2)
print("attempting to verify")
print(RingCT2.verRct(s))
#decode received amount
print("decode amounts working?")
print(RingCT2.decodeRct(s, Sk, 0))
print("decode amounts working?")
print(RingCT2.decodeRct(s, Sk, 1))
| [
"MLSAG.MLSAG_Ver",
"ASNL.GenSchnorrNonLinkable",
"RingCT2.decodeRct",
"RingCT2.genRct",
"ASNL.GenASNL",
"PaperWallet.skGen",
"ASNL.VerSchnorrNonLinkable",
"PaperWallet.pkGen",
"MiniNero.scalarmultBase",
"MLSAG.MLSAG_Sign",
"PaperWallet.skpkGen",
"RingCT2.ctskpkGen",
"ASNL.VerASNL",
"Crypto.Random.random.getrandbits",
"RingCT2.verRct"
] | [((314, 335), 'PaperWallet.skpkGen', 'PaperWallet.skpkGen', ([], {}), '()\n', (333, 335), False, 'import PaperWallet\n'), ((341, 360), 'PaperWallet.pkGen', 'PaperWallet.pkGen', ([], {}), '()\n', (358, 360), False, 'import PaperWallet\n'), ((366, 385), 'PaperWallet.pkGen', 'PaperWallet.pkGen', ([], {}), '()\n', (383, 385), False, 'import PaperWallet\n'), ((400, 440), 'ASNL.GenSchnorrNonLinkable', 'ASNL.GenSchnorrNonLinkable', (['x', 'P1', 'P2', '(0)'], {}), '(x, P1, P2, 0)\n', (426, 440), False, 'import ASNL\n'), ((1132, 1161), 'ASNL.GenASNL', 'ASNL.GenASNL', (['x', 'P1', 'P2', 'indi'], {}), '(x, P1, P2, indi)\n', (1144, 1161), False, 'import ASNL\n'), ((1206, 1237), 'ASNL.VerASNL', 'ASNL.VerASNL', (['P1', 'P2', 'L1', 's2', 's'], {}), '(P1, P2, L1, s2, s)\n', (1218, 1237), False, 'import ASNL\n'), ((1337, 1366), 'ASNL.GenASNL', 'ASNL.GenASNL', (['x', 'P1', 'P2', 'indi'], {}), '(x, P1, P2, indi)\n', (1349, 1366), False, 'import ASNL\n'), ((1367, 1398), 'ASNL.VerASNL', 'ASNL.VerASNL', (['P1', 'P2', 'L1', 's2', 's'], {}), '(P1, P2, L1, s2, s)\n', (1379, 1398), False, 'import ASNL\n'), ((1890, 1918), 'MLSAG.MLSAG_Sign', 'MLSAG.MLSAG_Sign', (['P', 'sk', 'ind'], {}), '(P, sk, ind)\n', (1906, 1918), False, 'import MLSAG\n'), ((2441, 2460), 'PaperWallet.skGen', 'PaperWallet.skGen', ([], {}), '()\n', (2458, 2460), False, 'import PaperWallet\n'), ((2533, 2561), 'MLSAG.MLSAG_Sign', 'MLSAG.MLSAG_Sign', (['P', 'sk', 'ind'], {}), '(P, sk, ind)\n', (2549, 2561), False, 'import MLSAG\n'), ((2767, 2788), 'RingCT2.ctskpkGen', 'RingCT2.ctskpkGen', (['(60)'], {}), '(60)\n', (2784, 2788), False, 'import RingCT2\n'), ((2839, 2860), 'RingCT2.ctskpkGen', 'RingCT2.ctskpkGen', (['(70)'], {}), '(70)\n', (2856, 2860), False, 'import RingCT2\n'), ((2970, 2991), 'PaperWallet.skpkGen', 'PaperWallet.skpkGen', ([], {}), '()\n', (2989, 2991), False, 'import PaperWallet\n'), ((3069, 3090), 'PaperWallet.skpkGen', 'PaperWallet.skpkGen', ([], {}), '()\n', (3088, 3090), False, 'import PaperWallet\n'), ((3120, 3168), 'RingCT2.genRct', 'RingCT2.genRct', (['sc', 'pc', 'destinations', 'amounts', '(2)'], {}), '(sc, pc, destinations, amounts, 2)\n', (3134, 3168), False, 'import RingCT2\n'), ((520, 566), 'ASNL.VerSchnorrNonLinkable', 'ASNL.VerSchnorrNonLinkable', (['P1', 'P2', 'L1', 's1', 's2'], {}), '(P1, P2, L1, s1, s2)\n', (546, 566), False, 'import ASNL\n'), ((621, 667), 'ASNL.VerSchnorrNonLinkable', 'ASNL.VerSchnorrNonLinkable', (['P1', 'P3', 'L1', 's1', 's2'], {}), '(P1, P3, L1, s1, s2)\n', (647, 667), False, 'import ASNL\n'), ((874, 893), 'Crypto.Random.random.getrandbits', 'rand.getrandbits', (['(1)'], {}), '(1)\n', (890, 893), True, 'import Crypto.Random.random as rand\n'), ((905, 924), 'PaperWallet.skGen', 'PaperWallet.skGen', ([], {}), '()\n', (922, 924), False, 'import PaperWallet\n'), ((1943, 1973), 'MLSAG.MLSAG_Ver', 'MLSAG.MLSAG_Ver', (['P', 'II', 'cc', 'ss'], {}), '(P, II, cc, ss)\n', (1958, 1973), False, 'import MLSAG\n'), ((2586, 2616), 'MLSAG.MLSAG_Ver', 'MLSAG.MLSAG_Ver', (['P', 'II', 'cc', 'ss'], {}), '(P, II, cc, ss)\n', (2601, 2616), False, 'import MLSAG\n'), ((3206, 3223), 'RingCT2.verRct', 'RingCT2.verRct', (['s'], {}), '(s)\n', (3220, 3223), False, 'import RingCT2\n'), ((3289, 3316), 'RingCT2.decodeRct', 'RingCT2.decodeRct', (['s', 'Sk', '(0)'], {}), '(s, Sk, 0)\n', (3306, 3316), False, 'import RingCT2\n'), ((3357, 3384), 'RingCT2.decodeRct', 'RingCT2.decodeRct', (['s', 'Sk', '(1)'], {}), '(s, Sk, 1)\n', (3374, 3384), False, 'import RingCT2\n'), ((962, 991), 'MiniNero.scalarmultBase', 'MiniNero.scalarmultBase', (['x[j]'], {}), '(x[j])\n', (985, 991), False, 'import MiniNero\n'), ((1008, 1027), 'PaperWallet.pkGen', 'PaperWallet.pkGen', ([], {}), '()\n', (1025, 1027), False, 'import PaperWallet\n'), ((1054, 1083), 'MiniNero.scalarmultBase', 'MiniNero.scalarmultBase', (['x[j]'], {}), '(x[j])\n', (1077, 1083), False, 'import MiniNero\n'), ((1100, 1119), 'PaperWallet.pkGen', 'PaperWallet.pkGen', ([], {}), '()\n', (1117, 1119), False, 'import PaperWallet\n'), ((1747, 1766), 'PaperWallet.skGen', 'PaperWallet.skGen', ([], {}), '()\n', (1764, 1766), False, 'import PaperWallet\n'), ((1785, 1817), 'MiniNero.scalarmultBase', 'MiniNero.scalarmultBase', (['x[j][i]'], {}), '(x[j][i])\n', (1808, 1817), False, 'import MiniNero\n'), ((2317, 2336), 'PaperWallet.skGen', 'PaperWallet.skGen', ([], {}), '()\n', (2334, 2336), False, 'import PaperWallet\n'), ((2355, 2387), 'MiniNero.scalarmultBase', 'MiniNero.scalarmultBase', (['x[j][i]'], {}), '(x[j][i])\n', (2378, 2387), False, 'import MiniNero\n')] |
import unittest
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
from haascli import __version__
class UnitTest(TestCommand):
def initialize_options(self):
TestCommand.initialize_options(self)
def finalize_options(self):
TestCommand.finalize_options(self)
def run_tests(self):
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('tests')
runner = unittest.runner.TextTestRunner()
runner.run(test_suite)
setup_options = dict(
name='haascli',
version=__version__,
description='Universal Command Line Environment for HAAS.',
long_description=open('README.rst').read(),
author='NCSU Operating Research Lab',
url='https://github.com/vin0110/haas',
scripts=['bin/haas'],
packages=find_packages(exclude=['tests*']),
package_data={'haascli': ['examples/*/*.rst']},
cmdclass={'test': UnitTest},
install_requires=[
'boto3',
'click',
'executor',
'troposphere',
'awacs',
],
extras_require={
':python_version=="3.4"': [
'click>=6.7',
]
},
license="Apache License 2.0",
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
),
)
setup(**setup_options)
| [
"setuptools.find_packages",
"setuptools.setup",
"setuptools.command.test.test.finalize_options",
"setuptools.command.test.test.initialize_options",
"unittest.runner.TextTestRunner",
"unittest.TestLoader"
] | [((1885, 1907), 'setuptools.setup', 'setup', ([], {}), '(**setup_options)\n', (1890, 1907), False, 'from setuptools import setup, find_packages\n'), ((224, 260), 'setuptools.command.test.test.initialize_options', 'TestCommand.initialize_options', (['self'], {}), '(self)\n', (254, 260), True, 'from setuptools.command.test import test as TestCommand\n'), ((302, 336), 'setuptools.command.test.test.finalize_options', 'TestCommand.finalize_options', (['self'], {}), '(self)\n', (330, 336), True, 'from setuptools.command.test import test as TestCommand\n'), ((385, 406), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (404, 406), False, 'import unittest\n'), ((475, 507), 'unittest.runner.TextTestRunner', 'unittest.runner.TextTestRunner', ([], {}), '()\n', (505, 507), False, 'import unittest\n'), ((843, 876), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests*']"}), "(exclude=['tests*'])\n", (856, 876), False, 'from setuptools import setup, find_packages\n')] |
from sage.misc.lazy_import import lazy_import
lazy_import('sage.schemes.product_projective.space', 'ProductProjectiveSpaces')
lazy_import('sage.schemes.product_projective.space', 'is_ProductProjectiveSpaces')
lazy_import('sage.schemes.product_projective.wehlerK3', 'WehlerK3Surface')
lazy_import('sage.schemes.product_projective.wehlerK3', 'random_WehlerK3Surface')
| [
"sage.misc.lazy_import.lazy_import"
] | [((47, 126), 'sage.misc.lazy_import.lazy_import', 'lazy_import', (['"""sage.schemes.product_projective.space"""', '"""ProductProjectiveSpaces"""'], {}), "('sage.schemes.product_projective.space', 'ProductProjectiveSpaces')\n", (58, 126), False, 'from sage.misc.lazy_import import lazy_import\n'), ((127, 213), 'sage.misc.lazy_import.lazy_import', 'lazy_import', (['"""sage.schemes.product_projective.space"""', '"""is_ProductProjectiveSpaces"""'], {}), "('sage.schemes.product_projective.space',\n 'is_ProductProjectiveSpaces')\n", (138, 213), False, 'from sage.misc.lazy_import import lazy_import\n'), ((210, 284), 'sage.misc.lazy_import.lazy_import', 'lazy_import', (['"""sage.schemes.product_projective.wehlerK3"""', '"""WehlerK3Surface"""'], {}), "('sage.schemes.product_projective.wehlerK3', 'WehlerK3Surface')\n", (221, 284), False, 'from sage.misc.lazy_import import lazy_import\n'), ((285, 370), 'sage.misc.lazy_import.lazy_import', 'lazy_import', (['"""sage.schemes.product_projective.wehlerK3"""', '"""random_WehlerK3Surface"""'], {}), "('sage.schemes.product_projective.wehlerK3',\n 'random_WehlerK3Surface')\n", (296, 370), False, 'from sage.misc.lazy_import import lazy_import\n')] |
import numpy as np
from typing import List
class UnaryOperator:
def kernel(self, data: np.ndarray):
raise NotImplementedError()
def name(self, operand):
raise NotImplementedError()
class RadialBasis(UnaryOperator):
def __init__(self, gamma: float):
self._gamma = gamma
def kernel(self, data: np.ndarray):
data[:, :] = np.exp(-self._gamma * np.power(data, 2))
def name(self, operand):
return f'radialbasis({operand}, {self._gamma})'
class DistanceToSimilarity(UnaryOperator):
def kernel(self, data: np.ndarray):
data[:, :] = np.maximum(0, 1 - data)
def name(self, operand):
return f'(1 - {operand})'
class Bias(UnaryOperator):
def __init__(self, bias: float):
self._bias = bias
def kernel(self, data: np.ndarray):
data += self._bias
def name(self, operand):
return f'({operand} + {self._bias})'
class Scale(UnaryOperator):
def __init__(self, scale: float):
self._scale = scale
def kernel(self, data: np.ndarray):
data *= self._scale
def name(self, operand):
return f'({operand} * {self._scale})'
class Power(UnaryOperator):
def __init__(self, exp: float):
self._exp = exp
def kernel(self, data: np.ndarray):
data[:, :] = np.power(np.maximum(data, 0), self._exp)
def name(self, operand):
return f'({operand} ** {self._exp})'
class Threshold(UnaryOperator):
def __init__(self, threshold: float):
self._threshold = threshold
def kernel(self, data: np.ndarray):
x = np.maximum(0, data - self._threshold)
x[x > 0] += self._threshold
data[:, :] = x
def name(self, operand):
return f'threshold({operand}, {self._threshold})'
class Kernel:
def __init__(self, operators: List[UnaryOperator]):
self._operators = operators
# might JIT in the future.
def chain(data):
for x in operators:
x.kernel(data)
self._kernel = chain
def __call__(self, data: np.ndarray):
self._kernel(data)
def name(self, operand):
name = operand
for x in self._operators:
name = x.name(name)
return name
| [
"numpy.maximum",
"numpy.power"
] | [((552, 575), 'numpy.maximum', 'np.maximum', (['(0)', '(1 - data)'], {}), '(0, 1 - data)\n', (562, 575), True, 'import numpy as np\n'), ((1441, 1478), 'numpy.maximum', 'np.maximum', (['(0)', '(data - self._threshold)'], {}), '(0, data - self._threshold)\n', (1451, 1478), True, 'import numpy as np\n'), ((1196, 1215), 'numpy.maximum', 'np.maximum', (['data', '(0)'], {}), '(data, 0)\n', (1206, 1215), True, 'import numpy as np\n'), ((359, 376), 'numpy.power', 'np.power', (['data', '(2)'], {}), '(data, 2)\n', (367, 376), True, 'import numpy as np\n')] |
import zipfile
from netCDF4 import Dataset
import pytest
from tests.utils import execute_process, mock_local_datasets, wps_literal_input
@pytest.fixture
def mock_datasets(monkeypatch):
filenames = ["tasmin_inmcm4_subset.nc"]
mock_local_datasets(monkeypatch, filenames=filenames)
def test_bccaqv2_subset_point(mock_datasets, client):
# --- given ---
identifier = "subset_ensemble_BCCAQv2"
inputs = [
wps_literal_input("variable", "tasmin"),
wps_literal_input("rcp", "rcp26"),
wps_literal_input("lat", "46.0"),
wps_literal_input("lon", "-72.8"),
]
# --- when ---
outputs = execute_process(client, identifier, inputs, output_names=["output"])
# --- then ---
assert len(outputs) == 1
zf = zipfile.ZipFile(outputs[0])
assert len(zf.namelist()) == 1
ds = Dataset("inmemory.nc", memory=zf.read(zf.namelist()[0]))
dims = {d.name: d.size for d in ds.dimensions.values()}
assert dims == {"region": 1, "time": 100}
def test_bccaqv2_subset_point_csv(mock_datasets, client):
# --- given ---
identifier = "subset_ensemble_BCCAQv2"
inputs = [
wps_literal_input("variable", "tasmin"),
wps_literal_input("rcp", "rcp26"),
wps_literal_input("lat", "46.0"),
wps_literal_input("lon", "-72.8"),
wps_literal_input("output_format", "csv"),
]
# --- when ---
outputs = execute_process(client, identifier, inputs, output_names=["output"])
# --- then ---
assert len(outputs) == 1
zf = zipfile.ZipFile(outputs[0])
assert len(zf.namelist()) == 2 # metadata + data
data_filename = [n for n in zf.namelist() if "metadata" not in n]
csv = zf.read(data_filename[0])
n_lines = csv.count(b"\n") - 1
assert n_lines == 100
def test_bccaqv2_subset_point_multiple(mock_datasets, client):
# --- given ---
identifier = "subset_ensemble_BCCAQv2"
inputs = [
wps_literal_input("variable", "tasmin"),
wps_literal_input("rcp", "rcp26"),
wps_literal_input("lat", "46.0, 46.1, 46.1"),
wps_literal_input("lon", "-72.8, -72.7, -72.9"),
]
# --- when ---
outputs = execute_process(client, identifier, inputs, output_names=["output"])
# --- then ---
assert len(outputs) == 1
zf = zipfile.ZipFile(outputs[0])
assert len(zf.namelist()) == 1
ds = Dataset("inmemory.nc", memory=zf.read(zf.namelist()[0]))
dims = {d.name: d.size for d in ds.dimensions.values()}
assert dims == {"region": 3, "time": 100}
def test_bccaqv2_subset_point_multiple_csv(mock_datasets, client):
# --- given ---
identifier = "subset_ensemble_BCCAQv2"
inputs = [
wps_literal_input("variable", "tasmin"),
wps_literal_input("rcp", "rcp26"),
wps_literal_input("lat", "46.0, 46.1, 46.1"),
wps_literal_input("lon", "-72.8, -72.7, -72.9"),
wps_literal_input("output_format", "csv"),
]
# --- when ---
outputs = execute_process(client, identifier, inputs, output_names=["output"])
# --- then ---
assert len(outputs) == 1
zf = zipfile.ZipFile(outputs[0])
assert len(zf.namelist()) == 2 # metadata + data
data_filename = [n for n in zf.namelist() if "metadata" not in n]
csv = zf.read(data_filename[0])
n_lines = csv.count(b"\n") - 1
assert n_lines == 300
def test_bccaqv2_subset_point_multiple_same_cell(mock_datasets, client):
# --- given ---
identifier = "subset_ensemble_BCCAQv2"
inputs = [
wps_literal_input("variable", "tasmin"),
wps_literal_input("rcp", "rcp26"),
wps_literal_input("lat", "46.0, 46.0"), # The coordinates pairs are the same
wps_literal_input("lon", "-72.8, -72.8"),
]
# --- when ---
outputs = execute_process(client, identifier, inputs, output_names=["output"])
# --- then ---
assert len(outputs) == 1
zf = zipfile.ZipFile(outputs[0])
assert len(zf.namelist()) == 1
ds = Dataset("inmemory.nc", memory=zf.read(zf.namelist()[0]))
dims = {d.name: d.size for d in ds.dimensions.values()}
# Todo: the cells are concatenated: is this the desired behaviour?
assert dims == {"region": 2, "time": 100}
def test_bccaqv2_subset_point_lat0_lon0_deprecation(mock_datasets, client):
# --- given ---
identifier = "subset_ensemble_BCCAQv2"
inputs = [
wps_literal_input("variable", "tasmin"),
wps_literal_input("rcp", "rcp26"),
wps_literal_input("lat0", "46.0"),
wps_literal_input("lon0", "-72.8"),
]
# --- when ---
outputs = execute_process(client, identifier, inputs, output_names=["output"])
# --- then ---
zf = zipfile.ZipFile(outputs[0])
ds = Dataset("inmemory.nc", memory=zf.read(zf.namelist()[0]))
dims = {d.name: d.size for d in ds.dimensions.values()}
assert dims == {"region": 1, "time": 100}
def test_bccaqv2_subset_bbox_process(mock_datasets, client):
# --- given ---
identifier = "subset_ensemble_bbox_BCCAQv2"
inputs = [
wps_literal_input("variable", "tasmin"),
wps_literal_input("rcp", "rcp26"),
wps_literal_input("lat0", "46.0"),
wps_literal_input("lat1", "46.2"),
wps_literal_input("lon0", "-73.0"),
wps_literal_input("lon1", "-72.8"),
]
# --- when ---
outputs = execute_process(client, identifier, inputs, output_names=["output"])
# --- then ---
assert len(outputs) == 1
zf = zipfile.ZipFile(outputs[0])
assert len(zf.namelist()) == 1
ds = Dataset("inmemory.nc", memory=zf.read(zf.namelist()[0]))
dims = {d.name: d.size for d in ds.dimensions.values()}
assert dims == {"lat": 2, "lon": 2, "time": 100}
def test_bccaqv2_subset_bbox_process_csv(mock_datasets, client):
# --- given ---
identifier = "subset_ensemble_bbox_BCCAQv2"
inputs = [
wps_literal_input("variable", "tasmin"),
wps_literal_input("rcp", "rcp26"),
wps_literal_input("lat0", "46.0"),
wps_literal_input("lat1", "46.2"),
wps_literal_input("lon0", "-73.0"),
wps_literal_input("lon1", "-72.8"),
wps_literal_input("output_format", "csv"),
]
# --- when ---
outputs = execute_process(client, identifier, inputs, output_names=["output"])
# --- then ---
assert len(outputs) == 1
zf = zipfile.ZipFile(outputs[0])
assert len(zf.namelist()) == 2 # metadata + data
data_filename = [n for n in zf.namelist() if "metadata" not in n]
csv = zf.read(data_filename[0])
n_lines = csv.count(b"\n") - 1
assert n_lines == 400
@pytest.mark.skip("Skipping: subset using real data is too long.")
def test_bccaqv2_subset_online(client):
identifier = "subset_ensemble_BCCAQv2"
up_right = 45.507485, -73.541295
bottom_left = 45.385644, -73.691963
inputs = [
wps_literal_input("variable", "tasmin"),
wps_literal_input("rcp", "rcp26"),
wps_literal_input("lon0", str(bottom_left[1])),
wps_literal_input("lon1", str(up_right[1])),
wps_literal_input("lat0", str(bottom_left[0])),
wps_literal_input("lat1", str(up_right[0])),
wps_literal_input("y0", "2010"),
wps_literal_input("y1", "2011"),
]
outputs = execute_process(client, identifier, inputs, output_names=["output"])
print(outputs)
| [
"tests.utils.execute_process",
"zipfile.ZipFile",
"pytest.mark.skip",
"tests.utils.mock_local_datasets",
"tests.utils.wps_literal_input"
] | [((6586, 6651), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Skipping: subset using real data is too long."""'], {}), "('Skipping: subset using real data is too long.')\n", (6602, 6651), False, 'import pytest\n'), ((237, 290), 'tests.utils.mock_local_datasets', 'mock_local_datasets', (['monkeypatch'], {'filenames': 'filenames'}), '(monkeypatch, filenames=filenames)\n', (256, 290), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((642, 710), 'tests.utils.execute_process', 'execute_process', (['client', 'identifier', 'inputs'], {'output_names': "['output']"}), "(client, identifier, inputs, output_names=['output'])\n", (657, 710), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((769, 796), 'zipfile.ZipFile', 'zipfile.ZipFile', (['outputs[0]'], {}), '(outputs[0])\n', (784, 796), False, 'import zipfile\n'), ((1410, 1478), 'tests.utils.execute_process', 'execute_process', (['client', 'identifier', 'inputs'], {'output_names': "['output']"}), "(client, identifier, inputs, output_names=['output'])\n", (1425, 1478), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((1537, 1564), 'zipfile.ZipFile', 'zipfile.ZipFile', (['outputs[0]'], {}), '(outputs[0])\n', (1552, 1564), False, 'import zipfile\n'), ((2172, 2240), 'tests.utils.execute_process', 'execute_process', (['client', 'identifier', 'inputs'], {'output_names': "['output']"}), "(client, identifier, inputs, output_names=['output'])\n", (2187, 2240), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((2299, 2326), 'zipfile.ZipFile', 'zipfile.ZipFile', (['outputs[0]'], {}), '(outputs[0])\n', (2314, 2326), False, 'import zipfile\n'), ((2975, 3043), 'tests.utils.execute_process', 'execute_process', (['client', 'identifier', 'inputs'], {'output_names': "['output']"}), "(client, identifier, inputs, output_names=['output'])\n", (2990, 3043), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((3102, 3129), 'zipfile.ZipFile', 'zipfile.ZipFile', (['outputs[0]'], {}), '(outputs[0])\n', (3117, 3129), False, 'import zipfile\n'), ((3772, 3840), 'tests.utils.execute_process', 'execute_process', (['client', 'identifier', 'inputs'], {'output_names': "['output']"}), "(client, identifier, inputs, output_names=['output'])\n", (3787, 3840), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((3899, 3926), 'zipfile.ZipFile', 'zipfile.ZipFile', (['outputs[0]'], {}), '(outputs[0])\n', (3914, 3926), False, 'import zipfile\n'), ((4581, 4649), 'tests.utils.execute_process', 'execute_process', (['client', 'identifier', 'inputs'], {'output_names': "['output']"}), "(client, identifier, inputs, output_names=['output'])\n", (4596, 4649), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((4679, 4706), 'zipfile.ZipFile', 'zipfile.ZipFile', (['outputs[0]'], {}), '(outputs[0])\n', (4694, 4706), False, 'import zipfile\n'), ((5331, 5399), 'tests.utils.execute_process', 'execute_process', (['client', 'identifier', 'inputs'], {'output_names': "['output']"}), "(client, identifier, inputs, output_names=['output'])\n", (5346, 5399), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((5458, 5485), 'zipfile.ZipFile', 'zipfile.ZipFile', (['outputs[0]'], {}), '(outputs[0])\n', (5473, 5485), False, 'import zipfile\n'), ((6207, 6275), 'tests.utils.execute_process', 'execute_process', (['client', 'identifier', 'inputs'], {'output_names': "['output']"}), "(client, identifier, inputs, output_names=['output'])\n", (6222, 6275), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((6334, 6361), 'zipfile.ZipFile', 'zipfile.ZipFile', (['outputs[0]'], {}), '(outputs[0])\n', (6349, 6361), False, 'import zipfile\n'), ((7241, 7309), 'tests.utils.execute_process', 'execute_process', (['client', 'identifier', 'inputs'], {'output_names': "['output']"}), "(client, identifier, inputs, output_names=['output'])\n", (7256, 7309), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((433, 472), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""variable"""', '"""tasmin"""'], {}), "('variable', 'tasmin')\n", (450, 472), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((482, 515), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""rcp"""', '"""rcp26"""'], {}), "('rcp', 'rcp26')\n", (499, 515), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((525, 557), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lat"""', '"""46.0"""'], {}), "('lat', '46.0')\n", (542, 557), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((567, 600), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lon"""', '"""-72.8"""'], {}), "('lon', '-72.8')\n", (584, 600), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((1150, 1189), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""variable"""', '"""tasmin"""'], {}), "('variable', 'tasmin')\n", (1167, 1189), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((1199, 1232), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""rcp"""', '"""rcp26"""'], {}), "('rcp', 'rcp26')\n", (1216, 1232), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((1242, 1274), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lat"""', '"""46.0"""'], {}), "('lat', '46.0')\n", (1259, 1274), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((1284, 1317), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lon"""', '"""-72.8"""'], {}), "('lon', '-72.8')\n", (1301, 1317), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((1327, 1368), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""output_format"""', '"""csv"""'], {}), "('output_format', 'csv')\n", (1344, 1368), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((1937, 1976), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""variable"""', '"""tasmin"""'], {}), "('variable', 'tasmin')\n", (1954, 1976), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((1986, 2019), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""rcp"""', '"""rcp26"""'], {}), "('rcp', 'rcp26')\n", (2003, 2019), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((2029, 2073), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lat"""', '"""46.0, 46.1, 46.1"""'], {}), "('lat', '46.0, 46.1, 46.1')\n", (2046, 2073), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((2083, 2130), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lon"""', '"""-72.8, -72.7, -72.9"""'], {}), "('lon', '-72.8, -72.7, -72.9')\n", (2100, 2130), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((2689, 2728), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""variable"""', '"""tasmin"""'], {}), "('variable', 'tasmin')\n", (2706, 2728), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((2738, 2771), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""rcp"""', '"""rcp26"""'], {}), "('rcp', 'rcp26')\n", (2755, 2771), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((2781, 2825), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lat"""', '"""46.0, 46.1, 46.1"""'], {}), "('lat', '46.0, 46.1, 46.1')\n", (2798, 2825), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((2835, 2882), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lon"""', '"""-72.8, -72.7, -72.9"""'], {}), "('lon', '-72.8, -72.7, -72.9')\n", (2852, 2882), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((2892, 2933), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""output_format"""', '"""csv"""'], {}), "('output_format', 'csv')\n", (2909, 2933), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((3512, 3551), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""variable"""', '"""tasmin"""'], {}), "('variable', 'tasmin')\n", (3529, 3551), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((3561, 3594), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""rcp"""', '"""rcp26"""'], {}), "('rcp', 'rcp26')\n", (3578, 3594), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((3604, 3642), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lat"""', '"""46.0, 46.0"""'], {}), "('lat', '46.0, 46.0')\n", (3621, 3642), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((3690, 3730), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lon"""', '"""-72.8, -72.8"""'], {}), "('lon', '-72.8, -72.8')\n", (3707, 3730), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((4370, 4409), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""variable"""', '"""tasmin"""'], {}), "('variable', 'tasmin')\n", (4387, 4409), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((4419, 4452), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""rcp"""', '"""rcp26"""'], {}), "('rcp', 'rcp26')\n", (4436, 4452), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((4462, 4495), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lat0"""', '"""46.0"""'], {}), "('lat0', '46.0')\n", (4479, 4495), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((4505, 4539), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lon0"""', '"""-72.8"""'], {}), "('lon0', '-72.8')\n", (4522, 4539), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((5033, 5072), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""variable"""', '"""tasmin"""'], {}), "('variable', 'tasmin')\n", (5050, 5072), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((5082, 5115), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""rcp"""', '"""rcp26"""'], {}), "('rcp', 'rcp26')\n", (5099, 5115), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((5125, 5158), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lat0"""', '"""46.0"""'], {}), "('lat0', '46.0')\n", (5142, 5158), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((5168, 5201), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lat1"""', '"""46.2"""'], {}), "('lat1', '46.2')\n", (5185, 5201), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((5211, 5245), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lon0"""', '"""-73.0"""'], {}), "('lon0', '-73.0')\n", (5228, 5245), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((5255, 5289), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lon1"""', '"""-72.8"""'], {}), "('lon1', '-72.8')\n", (5272, 5289), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((5858, 5897), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""variable"""', '"""tasmin"""'], {}), "('variable', 'tasmin')\n", (5875, 5897), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((5907, 5940), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""rcp"""', '"""rcp26"""'], {}), "('rcp', 'rcp26')\n", (5924, 5940), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((5950, 5983), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lat0"""', '"""46.0"""'], {}), "('lat0', '46.0')\n", (5967, 5983), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((5993, 6026), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lat1"""', '"""46.2"""'], {}), "('lat1', '46.2')\n", (6010, 6026), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((6036, 6070), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lon0"""', '"""-73.0"""'], {}), "('lon0', '-73.0')\n", (6053, 6070), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((6080, 6114), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""lon1"""', '"""-72.8"""'], {}), "('lon1', '-72.8')\n", (6097, 6114), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((6124, 6165), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""output_format"""', '"""csv"""'], {}), "('output_format', 'csv')\n", (6141, 6165), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((6836, 6875), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""variable"""', '"""tasmin"""'], {}), "('variable', 'tasmin')\n", (6853, 6875), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((6885, 6918), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""rcp"""', '"""rcp26"""'], {}), "('rcp', 'rcp26')\n", (6902, 6918), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((7146, 7177), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""y0"""', '"""2010"""'], {}), "('y0', '2010')\n", (7163, 7177), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n'), ((7187, 7218), 'tests.utils.wps_literal_input', 'wps_literal_input', (['"""y1"""', '"""2011"""'], {}), "('y1', '2011')\n", (7204, 7218), False, 'from tests.utils import execute_process, mock_local_datasets, wps_literal_input\n')] |
import pygments
from pygments.styles import get_all_styles
from wandbox.colorschemes import schemes
def init_lexers():
pygments.lexers.find_lexer_class_by_name("D")
pygments.lexers.find_lexer_class_by_name("ruby")
pygments.lexers.find_lexer_class_by_name("rust")
pygments.lexers.find_lexer_class_by_name("sql")
pygments.lexers.find_lexer_class_by_name("lisp")
pygments.lexers.find_lexer_class_by_name("go")
pygments.lexers.find_lexer_class_by_name("f#")
pygments.lexers.find_lexer_class_by_name("scala")
pygments.lexers.find_lexer_class_by_name("swift")
pygments.lexers.find_lexer_class_by_name("typescript")
pygments.lexers.find_lexer_class_by_name("vim")
pygments.lexers.find_lexer_class_by_name("lua")
pygments.lexers.find_lexer_class_by_name("nim")
pygments.lexers.find_lexer_class_by_name("php")
pygments.lexers.find_lexer_class_by_name("elixir")
pygments.lexers.find_lexer_class_by_name("python")
pygments.lexers.find_lexer_class_by_name("cpp")
pygments.lexers.find_lexer_class_by_name("c")
pygments.lexers.find_lexer_class_by_name("javascript")
pygments.lexers.find_lexer_class_by_name("coffeescript")
pygments.lexers.find_lexer_class_by_name("java")
pygments.lexers.find_lexer_class_by_name("haskell")
pygments.lexers.find_lexer_class_by_name("bash")
pygments.lexers.find_lexer_class_by_name("cmake")
pygments.lexers.find_lexer_class_by_name("crystal")
pygments.lexers.find_lexer_class_by_name("perl")
pygments.lexers.find_lexer_class_by_name("pony")
pygments.lexers.find_lexer_class_by_name("c#")
init_lexers()
lexers_dict = {
"python": pygments.lexers.python.PythonLexer,
"c++": pygments.lexers.c_cpp.CppLexer,
"c#": pygments.lexers.dotnet.CSharpLexer,
"cpp": pygments.lexers.c_cpp.CppLexer,
"c": pygments.lexers.c_cpp.CLexer,
"javascript": pygments.lexers.javascript.JavascriptLexer,
"js": pygments.lexers.javascript.JavascriptLexer,
"coffeescript": pygments.lexers.javascript.CoffeeScriptLexer,
"cs": pygments.lexers.javascript.CoffeeScriptLexer,
"java": pygments.lexers.jvm.JavaLexer,
"haskell": pygments.lexers.haskell.HaskellLexer,
"bash": pygments.lexers.shell.BashLexer,
"cmake": pygments.lexers.make.CMakeLexer,
"crystal": pygments.lexers.crystal.CrystalLexer,
"elixir": pygments.lexers.erlang.ElixirLexer,
"d": pygments.lexers.d.DLexer,
"ruby": pygments.lexers.ruby.RubyLexer,
"rust": pygments.lexers.rust.RustLexer,
"sql": pygments.lexers.sql.SqlLexer,
"sqlite": pygments.lexers.sql.SqlLexer,
"lisp": pygments.lexers.lisp.CommonLispLexer,
"go": pygments.lexers.go.GoLexer,
"f#": pygments.lexers.dotnet.FSharpLexer,
"scala": pygments.lexers.jvm.ScalaLexer,
"swift": pygments.lexers.objective.SwiftLexer,
"typescript": pygments.lexers.javascript.TypeScriptLexer,
"ts": pygments.lexers.javascript.TypeScriptLexer,
"vim": pygments.lexers.textedit.VimLexer,
"lua": pygments.lexers.scripting.LuaLexer,
"nim": pygments.lexers.nimrod.NimrodLexer,
"php": pygments.lexers.php.PhpLexer,
"perl": pygments.lexers.perl.PerlLexer,
"pony": pygments.lexers.pony.PonyLexer,
}
spinners = [
"point",
"dots",
"dots12",
"dots9",
"dots2",
"simpleDotsScrolling",
"bouncingBall",
]
themes = list(get_all_styles()) + schemes
themes = [list(themes[style : style + 2]) for style in range(0, len(themes), 2)]
languages_table = [
("python", "f#"),
("c++", "scala"),
("c", "swift"),
("javascript", "typescript"),
("java", "vim"),
("haskell", "lua"),
("bash", "nim"),
("crystal", "php"),
("elixir", "perl"),
("d", "pony"),
("sqlite", "go"),
("lisp", "ruby"),
("c#", "coffeescript"),
("rust", )
]
| [
"pygments.lexers.find_lexer_class_by_name",
"pygments.styles.get_all_styles"
] | [((125, 170), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""D"""'], {}), "('D')\n", (165, 170), False, 'import pygments\n'), ((175, 223), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""ruby"""'], {}), "('ruby')\n", (215, 223), False, 'import pygments\n'), ((228, 276), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""rust"""'], {}), "('rust')\n", (268, 276), False, 'import pygments\n'), ((281, 328), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""sql"""'], {}), "('sql')\n", (321, 328), False, 'import pygments\n'), ((333, 381), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""lisp"""'], {}), "('lisp')\n", (373, 381), False, 'import pygments\n'), ((386, 432), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""go"""'], {}), "('go')\n", (426, 432), False, 'import pygments\n'), ((437, 483), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""f#"""'], {}), "('f#')\n", (477, 483), False, 'import pygments\n'), ((488, 537), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""scala"""'], {}), "('scala')\n", (528, 537), False, 'import pygments\n'), ((542, 591), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""swift"""'], {}), "('swift')\n", (582, 591), False, 'import pygments\n'), ((596, 650), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""typescript"""'], {}), "('typescript')\n", (636, 650), False, 'import pygments\n'), ((655, 702), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""vim"""'], {}), "('vim')\n", (695, 702), False, 'import pygments\n'), ((707, 754), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""lua"""'], {}), "('lua')\n", (747, 754), False, 'import pygments\n'), ((759, 806), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""nim"""'], {}), "('nim')\n", (799, 806), False, 'import pygments\n'), ((811, 858), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""php"""'], {}), "('php')\n", (851, 858), False, 'import pygments\n'), ((863, 913), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""elixir"""'], {}), "('elixir')\n", (903, 913), False, 'import pygments\n'), ((918, 968), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""python"""'], {}), "('python')\n", (958, 968), False, 'import pygments\n'), ((973, 1020), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""cpp"""'], {}), "('cpp')\n", (1013, 1020), False, 'import pygments\n'), ((1025, 1070), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""c"""'], {}), "('c')\n", (1065, 1070), False, 'import pygments\n'), ((1075, 1129), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""javascript"""'], {}), "('javascript')\n", (1115, 1129), False, 'import pygments\n'), ((1134, 1190), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""coffeescript"""'], {}), "('coffeescript')\n", (1174, 1190), False, 'import pygments\n'), ((1195, 1243), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""java"""'], {}), "('java')\n", (1235, 1243), False, 'import pygments\n'), ((1248, 1299), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""haskell"""'], {}), "('haskell')\n", (1288, 1299), False, 'import pygments\n'), ((1304, 1352), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""bash"""'], {}), "('bash')\n", (1344, 1352), False, 'import pygments\n'), ((1357, 1406), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""cmake"""'], {}), "('cmake')\n", (1397, 1406), False, 'import pygments\n'), ((1411, 1462), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""crystal"""'], {}), "('crystal')\n", (1451, 1462), False, 'import pygments\n'), ((1467, 1515), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""perl"""'], {}), "('perl')\n", (1507, 1515), False, 'import pygments\n'), ((1520, 1568), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""pony"""'], {}), "('pony')\n", (1560, 1568), False, 'import pygments\n'), ((1573, 1619), 'pygments.lexers.find_lexer_class_by_name', 'pygments.lexers.find_lexer_class_by_name', (['"""c#"""'], {}), "('c#')\n", (1613, 1619), False, 'import pygments\n'), ((3370, 3386), 'pygments.styles.get_all_styles', 'get_all_styles', ([], {}), '()\n', (3384, 3386), False, 'from pygments.styles import get_all_styles\n')] |
# Standard library
import os
# 3rd party packages
import pytest
# Local source
from parametrization_clean.infrastructure.utils.reax_reader import ReaxReader, write_ffield, read_array
from parametrization_clean.domain.utils.helpers import get_param
@pytest.fixture(autouse=True)
@pytest.mark.usefixtures("reax_output_dir_path")
def module_setup_teardown(reax_output_dir_path):
# Setup
yield
# Teardown
if os.path.isfile(os.path.join(reax_output_dir_path, 'ffield')):
os.remove(os.path.join(reax_output_dir_path, 'ffield'))
@pytest.fixture()
@pytest.mark.usefixtures("training_set_dir_path")
def reax_io_obj(training_set_dir_path):
return ReaxReader(training_set_dir_path)
@pytest.mark.usefixtures("training_set_dir_path")
def test_reax_io_init(reax_io_obj, training_set_dir_path):
assert reax_io_obj.dir_path == training_set_dir_path
def test_reax_io_read_ffield(reax_io_obj):
ffield, atom_types = reax_io_obj.read_ffield()
assert isinstance(ffield, dict)
assert isinstance(atom_types, dict)
assert get_param([2, 14, 1], ffield) == 1.8862
assert get_param([2, 14, 25], ffield) == -3.0614
assert get_param([3, 38, 4], ffield) == -0.6944
assert get_param([4, 19, 1], ffield) == 0.0987
assert get_param([5, 94, 5], ffield) == 2.0000
assert atom_types[3][37 - 1][0] == '3'
assert atom_types[3][37 - 1][1] == '14'
def test_reax_io_read_fort99(reax_io_obj):
fort99_results = reax_io_obj.read_fort99()
assert fort99_results[0][1] == 1.0950
assert fort99_results[62][1] == 107.2990
assert fort99_results[65][2] == 0.0500
assert fort99_results[-1][0] == -0.7802
def test_reax_io_read_params(reax_io_obj):
param_keys, param_increments, param_bounds = reax_io_obj.read_params()
assert param_keys[0] == [2, 14, 1]
assert param_increments[0] == 0.001
assert param_bounds[0] == [1.8, 2.5]
assert param_keys[22] == [4, 18, 1]
assert param_increments[22] == 0.010
assert param_bounds[22] == [0.02, 0.30]
assert param_keys[-1] == [5, 94, 7]
assert param_increments[-1] == 0.050
assert param_bounds[-1] == [1.1, 4.0]
@pytest.mark.usefixtures("training_set_dir_path", "reax_output_dir_path")
def test_write_ffield(reax_io_obj, training_set_dir_path, reax_output_dir_path):
input_path = os.path.join(training_set_dir_path, "ffield")
output_path = os.path.join(reax_output_dir_path, "ffield")
ffield, atom_types = reax_io_obj.read_ffield()
write_ffield(input_path, output_path, ffield, atom_types)
new_reax_io_obj = ReaxReader(reax_output_dir_path)
new_ffield, new_atom_types = new_reax_io_obj.read_ffield()
assert get_param([2, 14, 1], new_ffield) == 1.8862
assert get_param([2, 14, 25], new_ffield) == -3.0614
assert get_param([3, 38, 4], new_ffield) == -0.6944
assert get_param([4, 19, 1], new_ffield) == 0.0987
assert get_param([5, 94, 5], new_ffield) == 2.0000
assert new_atom_types[3][37 - 1][0] == '3'
assert new_atom_types[3][37 - 1][1] == '14'
| [
"os.path.join",
"parametrization_clean.domain.utils.helpers.get_param",
"parametrization_clean.infrastructure.utils.reax_reader.write_ffield",
"pytest.mark.usefixtures",
"pytest.fixture",
"parametrization_clean.infrastructure.utils.reax_reader.ReaxReader"
] | [((254, 282), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (268, 282), False, 'import pytest\n'), ((284, 331), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""reax_output_dir_path"""'], {}), "('reax_output_dir_path')\n", (307, 331), False, 'import pytest\n'), ((554, 570), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (568, 570), False, 'import pytest\n'), ((572, 620), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""training_set_dir_path"""'], {}), "('training_set_dir_path')\n", (595, 620), False, 'import pytest\n'), ((709, 757), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""training_set_dir_path"""'], {}), "('training_set_dir_path')\n", (732, 757), False, 'import pytest\n'), ((2148, 2220), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""training_set_dir_path"""', '"""reax_output_dir_path"""'], {}), "('training_set_dir_path', 'reax_output_dir_path')\n", (2171, 2220), False, 'import pytest\n'), ((672, 705), 'parametrization_clean.infrastructure.utils.reax_reader.ReaxReader', 'ReaxReader', (['training_set_dir_path'], {}), '(training_set_dir_path)\n', (682, 705), False, 'from parametrization_clean.infrastructure.utils.reax_reader import ReaxReader, write_ffield, read_array\n'), ((2319, 2364), 'os.path.join', 'os.path.join', (['training_set_dir_path', '"""ffield"""'], {}), "(training_set_dir_path, 'ffield')\n", (2331, 2364), False, 'import os\n'), ((2383, 2427), 'os.path.join', 'os.path.join', (['reax_output_dir_path', '"""ffield"""'], {}), "(reax_output_dir_path, 'ffield')\n", (2395, 2427), False, 'import os\n'), ((2483, 2540), 'parametrization_clean.infrastructure.utils.reax_reader.write_ffield', 'write_ffield', (['input_path', 'output_path', 'ffield', 'atom_types'], {}), '(input_path, output_path, ffield, atom_types)\n', (2495, 2540), False, 'from parametrization_clean.infrastructure.utils.reax_reader import ReaxReader, write_ffield, read_array\n'), ((2563, 2595), 'parametrization_clean.infrastructure.utils.reax_reader.ReaxReader', 'ReaxReader', (['reax_output_dir_path'], {}), '(reax_output_dir_path)\n', (2573, 2595), False, 'from parametrization_clean.infrastructure.utils.reax_reader import ReaxReader, write_ffield, read_array\n'), ((440, 484), 'os.path.join', 'os.path.join', (['reax_output_dir_path', '"""ffield"""'], {}), "(reax_output_dir_path, 'ffield')\n", (452, 484), False, 'import os\n'), ((1057, 1086), 'parametrization_clean.domain.utils.helpers.get_param', 'get_param', (['[2, 14, 1]', 'ffield'], {}), '([2, 14, 1], ffield)\n', (1066, 1086), False, 'from parametrization_clean.domain.utils.helpers import get_param\n'), ((1108, 1138), 'parametrization_clean.domain.utils.helpers.get_param', 'get_param', (['[2, 14, 25]', 'ffield'], {}), '([2, 14, 25], ffield)\n', (1117, 1138), False, 'from parametrization_clean.domain.utils.helpers import get_param\n'), ((1161, 1190), 'parametrization_clean.domain.utils.helpers.get_param', 'get_param', (['[3, 38, 4]', 'ffield'], {}), '([3, 38, 4], ffield)\n', (1170, 1190), False, 'from parametrization_clean.domain.utils.helpers import get_param\n'), ((1213, 1242), 'parametrization_clean.domain.utils.helpers.get_param', 'get_param', (['[4, 19, 1]', 'ffield'], {}), '([4, 19, 1], ffield)\n', (1222, 1242), False, 'from parametrization_clean.domain.utils.helpers import get_param\n'), ((1264, 1293), 'parametrization_clean.domain.utils.helpers.get_param', 'get_param', (['[5, 94, 5]', 'ffield'], {}), '([5, 94, 5], ffield)\n', (1273, 1293), False, 'from parametrization_clean.domain.utils.helpers import get_param\n'), ((2670, 2703), 'parametrization_clean.domain.utils.helpers.get_param', 'get_param', (['[2, 14, 1]', 'new_ffield'], {}), '([2, 14, 1], new_ffield)\n', (2679, 2703), False, 'from parametrization_clean.domain.utils.helpers import get_param\n'), ((2725, 2759), 'parametrization_clean.domain.utils.helpers.get_param', 'get_param', (['[2, 14, 25]', 'new_ffield'], {}), '([2, 14, 25], new_ffield)\n', (2734, 2759), False, 'from parametrization_clean.domain.utils.helpers import get_param\n'), ((2782, 2815), 'parametrization_clean.domain.utils.helpers.get_param', 'get_param', (['[3, 38, 4]', 'new_ffield'], {}), '([3, 38, 4], new_ffield)\n', (2791, 2815), False, 'from parametrization_clean.domain.utils.helpers import get_param\n'), ((2838, 2871), 'parametrization_clean.domain.utils.helpers.get_param', 'get_param', (['[4, 19, 1]', 'new_ffield'], {}), '([4, 19, 1], new_ffield)\n', (2847, 2871), False, 'from parametrization_clean.domain.utils.helpers import get_param\n'), ((2893, 2926), 'parametrization_clean.domain.utils.helpers.get_param', 'get_param', (['[5, 94, 5]', 'new_ffield'], {}), '([5, 94, 5], new_ffield)\n', (2902, 2926), False, 'from parametrization_clean.domain.utils.helpers import get_param\n'), ((505, 549), 'os.path.join', 'os.path.join', (['reax_output_dir_path', '"""ffield"""'], {}), "(reax_output_dir_path, 'ffield')\n", (517, 549), False, 'import os\n')] |
"""
Copyright 2017 <NAME> <<EMAIL>>
This file is part of PyCAM.
PyCAM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PyCAM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PyCAM. If not, see <http://www.gnu.org/licenses/>.
"""
from pycam.Flow.history import merge_history_and_block_events
import pycam.Plugins
import pycam.workspace.data_models
from pycam.workspace.data_models import ToolpathFilter
class ExportSettings(pycam.Plugins.ListPluginBase):
UI_FILE = "export_settings.ui"
DEPENDS = ["Toolpaths", "ParameterGroupManager"]
CATEGORIES = ["Toolpath", "Export"]
COLLECTION_ITEM_TYPE = pycam.workspace.data_models.ExportSettings
def setup(self):
if self.gui:
list_box = self.gui.get_object("ExportSettingsBox")
list_box.unparent()
self.core.register_ui("main", "Export Settings", list_box, weight=50)
self._gtk_handlers = []
modelview = self.gui.get_object("ExportSettingTable")
self.set_gtk_modelview(modelview)
self.register_model_update(
lambda: self.core.emit_event("export-settings-list-changed"))
for action, obj_name in ((self.ACTION_UP, "ExportSettingMoveUp"),
(self.ACTION_DOWN, "ExportSettingMoveDown"),
(self.ACTION_DELETE, "ExportSettingDelete"),
(self.ACTION_CLEAR, "ExportSettingDeleteAll")):
self.register_list_action_button(action, self.gui.get_object(obj_name))
self._gtk_handlers.append((self.gui.get_object("ExportSettingNew"), "clicked",
self._export_setting_new))
# details of export settings
self.item_details_container = self.gui.get_object("ExportSettingHandlingNotebook")
def clear_item_details_container():
for index in range(self.item_details_container.get_n_pages()):
self.item_details_container.remove_page(0)
def add_item_details_container(item, name):
self.item_details_container.append_page(item, self._gtk.Label(name))
self.core.register_ui_section("export_settings_handling", add_item_details_container,
clear_item_details_container)
# register UI sections for GCode settings
self.core.register_ui_section(
"gcode_preferences",
lambda item, name: self.core.register_ui("export_settings_handling", name, item),
lambda: self.core.clear_ui_section("export_settings_handling"))
general_widget = pycam.Gui.ControlsGTK.ParameterSection()
general_widget.get_widget().show()
self.core.register_ui_section("gcode_general_parameters", general_widget.add_widget,
general_widget.clear_widgets)
self.core.register_ui("gcode_preferences", "General", general_widget.get_widget())
self._profile_selector = pycam.Gui.ControlsGTK.InputChoice(
[], change_handler=lambda widget=None: self.core.emit_event(
"toolpath-profiles-selection-changed"))
profile_widget = self._profile_selector.get_widget()
profile_widget.show()
self.core.register_ui("gcode_general_parameters", "GCode Profile", profile_widget)
self.core.get("register_parameter_group")(
"toolpath_profile", changed_set_event="toolpath-profiles-selection-changed",
changed_set_list_event="toolpath-profiles-list-changed",
get_related_parameter_names=self._get_selected_profile_parameter_names)
# handle table changes
self._gtk_handlers.extend((
(modelview, "row-activated", "export-settings-changed"),
(self.gui.get_object("ExportSettingNameCell"), "edited", self.edit_item_name)))
# handle selection changes
selection = modelview.get_selection()
self._gtk_handlers.append((selection, "changed", "export-settings-selection-changed"))
# define cell renderers
self.gui.get_object("ExportSettingNameColumn").set_cell_data_func(
self.gui.get_object("ExportSettingNameCell"), self.render_item_name)
self._event_handlers = (
("toolpath-profiles-list-changed", self._update_profiles),
("export-settings-selection-changed", self._transfer_settings_to_controls),
("export-settings-selection-changed", "visual-item-updated"),
("export-settings-changed", self._transfer_settings_to_controls),
("export-settings-changed", self.force_gtk_modelview_refresh),
("export-settings-changed", "visual-item-updated"),
("export-settings-list-changed", self.force_gtk_modelview_refresh),
("export-settings-list-changed", "visual-item-updated"),
("export-settings-control-changed", self._transfer_controls_to_settings))
self.register_gtk_handlers(self._gtk_handlers)
self.register_event_handlers(self._event_handlers)
self._transfer_settings_to_controls()
self.core.set("export_settings", self)
return True
def teardown(self):
if self.gui and self._gtk:
self.unregister_event_handlers(self._event_handlers)
self.unregister_gtk_handlers(self._gtk_handlers)
self.core.unregister_ui("main", self.gui.get_object("ExportSettingsBox"))
self.core.get("unregister_parameter_group")("toolpath_profile")
self.core.set("export_settings", None)
def _export_setting_new(self, widget=None):
with merge_history_and_block_events(self.core):
params = {"gcode": self.core.get("get_default_parameter_values")("toolpath_profile")}
new_item = pycam.workspace.data_models.ExportSettings(None, data=params)
new_item.set_application_value("name", self.get_non_conflicting_name("Settings #%d"))
self.select(new_item)
def _transfer_settings_to_controls(self, widget=None):
"""transfer the content of the currently selected setting item to the related widgets"""
settings = self.get_selected()
if settings is None:
self.item_details_container.hide()
else:
with self.core.blocked_events({"export-settings-control-changed"}):
gcode_settings = settings.get_settings_by_type("gcode")
if not gcode_settings or (ToolpathFilter.SAFETY_HEIGHT.value in gcode_settings):
# it looks like a "milling" profile
profile = "milling"
else:
profile = "laser"
self.select_profile(profile)
self.core.get("set_parameter_values")("toolpath_profile", gcode_settings)
self.item_details_container.show()
def _transfer_controls_to_settings(self):
"""the value of a control related to export settings was changed by by the user
The changed value needs to be transferred to the currently selected export settings.
"""
settings = self.get_selected()
profile = self.get_selected_profile()
if settings and profile:
gcode_settings = settings.get_settings_by_type("gcode")
for key, value in self.core.get("get_parameter_values")("toolpath_profile").items():
gcode_settings[key] = value
settings.set_settings_by_type("gcode", gcode_settings)
def _update_profiles(self):
selected = self.get_selected_profile()
profiles = list(self.core.get("get_parameter_sets")("toolpath_profile").values())
choices = []
for profile in sorted(profiles, key=lambda item: item["weight"]):
choices.append((profile["label"], profile["name"]))
self._profile_selector.update_choices(choices)
if selected:
self.select_profile(selected)
elif profiles:
self.select_profile(None)
else:
pass
def _get_selected_profile_parameter_names(self):
profile = self.get_selected_profile()
return set() if profile is None else set(profile["parameters"].keys())
def get_selected_profile(self):
all_profiles = self.core.get("get_parameter_sets")("toolpath_profile")
current_name = self._profile_selector.get_value()
return all_profiles.get(current_name, None)
def select_profile(self, item=None):
if isinstance(item, str):
profile_name = item
elif item is None:
profile_name = None
else:
profile_name = item["name"]
self._profile_selector.set_value(profile_name)
| [
"pycam.Flow.history.merge_history_and_block_events"
] | [((6279, 6320), 'pycam.Flow.history.merge_history_and_block_events', 'merge_history_and_block_events', (['self.core'], {}), '(self.core)\n', (6309, 6320), False, 'from pycam.Flow.history import merge_history_and_block_events\n')] |
from typing import Any, Dict, List
from unittest import mock
import determined as det
from determined.exec import launch
@mock.patch("subprocess.Popen")
def do_test_launch(config: Dict[str, Any], cmd: List[str], mock_popen: mock.MagicMock) -> None:
mock_proc = mock.MagicMock()
mock_proc.wait.return_value = 99
mock_popen.return_value = mock_proc
assert launch.launch(det.ExperimentConfig(config)) == 99
mock_popen.assert_called_once_with(cmd)
def test_launch_trial() -> None:
entrypoint = "model_def:TrialClass"
config = {"entrypoint": entrypoint}
cmd = ["python3", "-m", "determined.launch.horovod", "--autohorovod", "--trial", entrypoint]
do_test_launch(config, cmd)
def test_launch_string() -> None:
entrypoint = "a b c"
config = {"entrypoint": entrypoint}
cmd = ["sh", "-c", entrypoint]
do_test_launch(config, cmd)
def test_launch_list() -> None:
entrypoint = ["a", "b", "c"]
config = {"entrypoint": entrypoint}
cmd = [*entrypoint]
do_test_launch(config, cmd)
| [
"determined.ExperimentConfig",
"unittest.mock.MagicMock",
"unittest.mock.patch"
] | [((125, 155), 'unittest.mock.patch', 'mock.patch', (['"""subprocess.Popen"""'], {}), "('subprocess.Popen')\n", (135, 155), False, 'from unittest import mock\n'), ((268, 284), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (282, 284), False, 'from unittest import mock\n'), ((387, 415), 'determined.ExperimentConfig', 'det.ExperimentConfig', (['config'], {}), '(config)\n', (407, 415), True, 'import determined as det\n')] |
import win32com.client as win32
import numpy as np
import gensim
def intersect(a, b):
return list(set(a) & set(b))
def remove_punctuation(word):
return word.lower().replace('.', '').replace(',', '').replace('\"', '').replace('(', '').replace(')', '').replace('-', '').replace('–', '')
def word():
msword = win32.gencache.EnsureDispatch('Word.Application')
selection = msword.Selection.Words
document = msword.Documents(1).Words
context = list(selection if len(selection) > 1 else document)
model = gensim.models.Word2Vec.load('korpus_finansowy')
text = ''
words_array = np.asarray(context)
words_array_lenth = len(words_array)
for word_i, word in enumerate(words_array):
word_s = str(word)
word_string = word_s.rstrip()
word_string_lowercase = word_string.lower()
try:
similar = model.vw.similar_by_vector(word_string_lowercase, topn=100, restrict_vocab=None)
except Exception as e:
similar = []
similar_list = [remove_punctuation(x[0]) for x in similar if x[1] > 0.60]
if word_i is not 0 and word_i is not words_array_lenth - 1:
local_context = [str(words_array[word_i-1]).rstrip(), str(words_array[word_i+1]).rstrip()]
predicted = model.predict_output_word(local_context, topn=100) or []
from_context = [remove_punctuation(w[0]) for w in predicted if w[1] > 0.000]
else:
from_context = []
replacement = ''
synonym_list = []
for i in range(1, word.SynonymInfo.MeaningCount + 1):
if i == 1:
synonym_list.append(word_string_lowercase)
for synonym in word.SynonymInfo.SynonymList(i):
synonym_list.append(str(synonym))
intersection_one = intersect(similar_list, synonym_list)
intersection_two = intersect(from_context, synonym_list)
intersection_three = intersect(similar_list[:10], from_context[:10])
joined = intersection_one + intersection_two + intersection_three
merged = [x for x in set(joined) if str(x).rstrip() != word_string_lowercase]
for n, w in enumerate(merged):
if n == 0:
replacement += '{' + word_string
w_cased = w.title() if word_string[0].isupper() else w
replacement += '|' + w_cased
if n == len(merged) - 1:
replacement += '}'
if ' ' in word_s:
replacement += ' '
text += replacement or word_s
msword.Selection.TypeText(text)
if __name__ == '__main__':
word()
| [
"gensim.models.Word2Vec.load",
"win32com.client.gencache.EnsureDispatch",
"numpy.asarray"
] | [((321, 370), 'win32com.client.gencache.EnsureDispatch', 'win32.gencache.EnsureDispatch', (['"""Word.Application"""'], {}), "('Word.Application')\n", (350, 370), True, 'import win32com.client as win32\n'), ((529, 576), 'gensim.models.Word2Vec.load', 'gensim.models.Word2Vec.load', (['"""korpus_finansowy"""'], {}), "('korpus_finansowy')\n", (556, 576), False, 'import gensim\n'), ((610, 629), 'numpy.asarray', 'np.asarray', (['context'], {}), '(context)\n', (620, 629), True, 'import numpy as np\n')] |
"""Views for the game moderator."""
from aiohttp_session import get_session
from planningpoker.routing import route
from planningpoker.random_id import get_random_id
from planningpoker.json import json_response, loads_or_empty
from planningpoker.cards import coerce_cards
from planningpoker.persistence.exceptions import (
RoundExists, NoSuchRound, RoundFinalized, NoActivePoll
)
from planningpoker.views.identity import client_owns_game, get_or_assign_id
@route('POST', '/new_game')
async def add_game(request, persistence):
"""
Create a new game.
The user will become the moderator of the game.
"""
json = await request.json(loads=loads_or_empty)
try:
available_cards = json['cards']
except KeyError:
return json_response({'error': 'No card set provided.'}, status=400)
moderator_name = json.get('moderator_name', '')
if moderator_name == '':
return json_response({'error': 'Moderator name not provided.'}, status=400)
if len(available_cards) < 2:
return json_response({'error': 'Cannot play with less than 2 cards.'}, status=400)
moderator_session = await get_session(request)
# Get or assign the moderator id:
moderator_id = get_or_assign_id(moderator_session)
game_id = get_random_id()
persistence.add_game(game_id, moderator_id, moderator_name, coerce_cards(available_cards))
return json_response({'game_id': game_id, 'game': persistence.serialize_game(game_id)})
@route('POST', '/game/{game_id}/new_round')
async def add_round(request, persistence):
"""Add a round to the game."""
game_id = request.match_info['game_id']
json = await request.json(loads=loads_or_empty)
try:
round_name = json['round_name']
except KeyError:
return json_response({'error': 'Must specify the name.'}, status=400)
if len(round_name) < 1:
return json_response({'error': 'The name must not be empty.'}, status=400)
user_session = await get_session(request)
if not client_owns_game(game_id, user_session, persistence):
return json_response({'error': 'The user is not the moderator of this game.'}, status=403)
try:
persistence.add_round(game_id, round_name)
except RoundExists:
return json_response({'error': 'Round with this name already exists.'}, status=409)
# No point to catch NoSuchGame because we cannot sensibly handle situation when there is a game
# in a session but not in the storage. Let's better 500.
return json_response({'game': persistence.serialize_game(game_id)})
@route('POST', '/game/{game_id}/round/{round_name}/new_poll')
async def add_poll(request, persistence):
"""Add a poll to a round."""
game_id = request.match_info['game_id']
round_name = request.match_info['round_name']
user_session = await get_session(request)
if not client_owns_game(game_id, user_session, persistence):
return json_response({'error': 'The user is not the moderator of this game.'}, status=403)
try:
persistence.add_poll(game_id, round_name)
except NoSuchRound:
return json_response({'error': 'Round does not exist.'}, status=404)
except RoundFinalized:
return json_response({'error': 'This round is finalized.'}, status=409)
return json_response({'game': persistence.serialize_game(game_id)})
@route('POST', '/game/{game_id}/round/{round_name}/finalize')
async def finalize_round(request, persistence):
"""Finalize an owned round."""
game_id = request.match_info['game_id']
round_name = request.match_info['round_name']
user_session = await get_session(request)
if not client_owns_game(game_id, user_session, persistence):
return json_response({'error': 'The user is not the moderator of this game.'}, status=403)
try:
persistence.finalize_round(game_id, round_name)
except NoSuchRound:
return json_response({'error': 'Round does not exist.'}, status=404)
except NoActivePoll:
return json_response({'error': 'There is no active poll in this round.'}, status=404)
except RoundFinalized:
return json_response({'error': 'This round has already been finalized.'}, status=409)
return json_response({'game': persistence.serialize_game(game_id)})
| [
"planningpoker.json.json_response",
"planningpoker.routing.route",
"aiohttp_session.get_session",
"planningpoker.random_id.get_random_id",
"planningpoker.views.identity.client_owns_game",
"planningpoker.views.identity.get_or_assign_id",
"planningpoker.cards.coerce_cards"
] | [((464, 490), 'planningpoker.routing.route', 'route', (['"""POST"""', '"""/new_game"""'], {}), "('POST', '/new_game')\n", (469, 490), False, 'from planningpoker.routing import route\n'), ((1482, 1524), 'planningpoker.routing.route', 'route', (['"""POST"""', '"""/game/{game_id}/new_round"""'], {}), "('POST', '/game/{game_id}/new_round')\n", (1487, 1524), False, 'from planningpoker.routing import route\n'), ((2585, 2645), 'planningpoker.routing.route', 'route', (['"""POST"""', '"""/game/{game_id}/round/{round_name}/new_poll"""'], {}), "('POST', '/game/{game_id}/round/{round_name}/new_poll')\n", (2590, 2645), False, 'from planningpoker.routing import route\n'), ((3370, 3430), 'planningpoker.routing.route', 'route', (['"""POST"""', '"""/game/{game_id}/round/{round_name}/finalize"""'], {}), "('POST', '/game/{game_id}/round/{round_name}/finalize')\n", (3375, 3430), False, 'from planningpoker.routing import route\n'), ((1225, 1260), 'planningpoker.views.identity.get_or_assign_id', 'get_or_assign_id', (['moderator_session'], {}), '(moderator_session)\n', (1241, 1260), False, 'from planningpoker.views.identity import client_owns_game, get_or_assign_id\n'), ((1275, 1290), 'planningpoker.random_id.get_random_id', 'get_random_id', ([], {}), '()\n', (1288, 1290), False, 'from planningpoker.random_id import get_random_id\n'), ((922, 990), 'planningpoker.json.json_response', 'json_response', (["{'error': 'Moderator name not provided.'}"], {'status': '(400)'}), "({'error': 'Moderator name not provided.'}, status=400)\n", (935, 990), False, 'from planningpoker.json import json_response, loads_or_empty\n'), ((1040, 1115), 'planningpoker.json.json_response', 'json_response', (["{'error': 'Cannot play with less than 2 cards.'}"], {'status': '(400)'}), "({'error': 'Cannot play with less than 2 cards.'}, status=400)\n", (1053, 1115), False, 'from planningpoker.json import json_response, loads_or_empty\n'), ((1147, 1167), 'aiohttp_session.get_session', 'get_session', (['request'], {}), '(request)\n', (1158, 1167), False, 'from aiohttp_session import get_session\n'), ((1355, 1384), 'planningpoker.cards.coerce_cards', 'coerce_cards', (['available_cards'], {}), '(available_cards)\n', (1367, 1384), False, 'from planningpoker.cards import coerce_cards\n'), ((1892, 1959), 'planningpoker.json.json_response', 'json_response', (["{'error': 'The name must not be empty.'}"], {'status': '(400)'}), "({'error': 'The name must not be empty.'}, status=400)\n", (1905, 1959), False, 'from planningpoker.json import json_response, loads_or_empty\n'), ((1986, 2006), 'aiohttp_session.get_session', 'get_session', (['request'], {}), '(request)\n', (1997, 2006), False, 'from aiohttp_session import get_session\n'), ((2018, 2070), 'planningpoker.views.identity.client_owns_game', 'client_owns_game', (['game_id', 'user_session', 'persistence'], {}), '(game_id, user_session, persistence)\n', (2034, 2070), False, 'from planningpoker.views.identity import client_owns_game, get_or_assign_id\n'), ((2087, 2174), 'planningpoker.json.json_response', 'json_response', (["{'error': 'The user is not the moderator of this game.'}"], {'status': '(403)'}), "({'error': 'The user is not the moderator of this game.'},\n status=403)\n", (2100, 2174), False, 'from planningpoker.json import json_response, loads_or_empty\n'), ((2840, 2860), 'aiohttp_session.get_session', 'get_session', (['request'], {}), '(request)\n', (2851, 2860), False, 'from aiohttp_session import get_session\n'), ((2873, 2925), 'planningpoker.views.identity.client_owns_game', 'client_owns_game', (['game_id', 'user_session', 'persistence'], {}), '(game_id, user_session, persistence)\n', (2889, 2925), False, 'from planningpoker.views.identity import client_owns_game, get_or_assign_id\n'), ((2942, 3029), 'planningpoker.json.json_response', 'json_response', (["{'error': 'The user is not the moderator of this game.'}"], {'status': '(403)'}), "({'error': 'The user is not the moderator of this game.'},\n status=403)\n", (2955, 3029), False, 'from planningpoker.json import json_response, loads_or_empty\n'), ((3633, 3653), 'aiohttp_session.get_session', 'get_session', (['request'], {}), '(request)\n', (3644, 3653), False, 'from aiohttp_session import get_session\n'), ((3666, 3718), 'planningpoker.views.identity.client_owns_game', 'client_owns_game', (['game_id', 'user_session', 'persistence'], {}), '(game_id, user_session, persistence)\n', (3682, 3718), False, 'from planningpoker.views.identity import client_owns_game, get_or_assign_id\n'), ((3735, 3822), 'planningpoker.json.json_response', 'json_response', (["{'error': 'The user is not the moderator of this game.'}"], {'status': '(403)'}), "({'error': 'The user is not the moderator of this game.'},\n status=403)\n", (3748, 3822), False, 'from planningpoker.json import json_response, loads_or_empty\n'), ((763, 824), 'planningpoker.json.json_response', 'json_response', (["{'error': 'No card set provided.'}"], {'status': '(400)'}), "({'error': 'No card set provided.'}, status=400)\n", (776, 824), False, 'from planningpoker.json import json_response, loads_or_empty\n'), ((1785, 1847), 'planningpoker.json.json_response', 'json_response', (["{'error': 'Must specify the name.'}"], {'status': '(400)'}), "({'error': 'Must specify the name.'}, status=400)\n", (1798, 1847), False, 'from planningpoker.json import json_response, loads_or_empty\n'), ((2271, 2347), 'planningpoker.json.json_response', 'json_response', (["{'error': 'Round with this name already exists.'}"], {'status': '(409)'}), "({'error': 'Round with this name already exists.'}, status=409)\n", (2284, 2347), False, 'from planningpoker.json import json_response, loads_or_empty\n'), ((3125, 3186), 'planningpoker.json.json_response', 'json_response', (["{'error': 'Round does not exist.'}"], {'status': '(404)'}), "({'error': 'Round does not exist.'}, status=404)\n", (3138, 3186), False, 'from planningpoker.json import json_response, loads_or_empty\n'), ((3229, 3293), 'planningpoker.json.json_response', 'json_response', (["{'error': 'This round is finalized.'}"], {'status': '(409)'}), "({'error': 'This round is finalized.'}, status=409)\n", (3242, 3293), False, 'from planningpoker.json import json_response, loads_or_empty\n'), ((3924, 3985), 'planningpoker.json.json_response', 'json_response', (["{'error': 'Round does not exist.'}"], {'status': '(404)'}), "({'error': 'Round does not exist.'}, status=404)\n", (3937, 3985), False, 'from planningpoker.json import json_response, loads_or_empty\n'), ((4026, 4104), 'planningpoker.json.json_response', 'json_response', (["{'error': 'There is no active poll in this round.'}"], {'status': '(404)'}), "({'error': 'There is no active poll in this round.'}, status=404)\n", (4039, 4104), False, 'from planningpoker.json import json_response, loads_or_empty\n'), ((4147, 4225), 'planningpoker.json.json_response', 'json_response', (["{'error': 'This round has already been finalized.'}"], {'status': '(409)'}), "({'error': 'This round has already been finalized.'}, status=409)\n", (4160, 4225), False, 'from planningpoker.json import json_response, loads_or_empty\n')] |
"""Opencv keypoint matching with different algorithm for detector and descriptors.
Not all combinations of detectors and descriptors work well together.
"""
import cv2
import matplotlib.pyplot as plt
import time
img1 = cv2.imread(r'imgs/1.png', cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread(r'imgs/8.png', cv2.IMREAD_GRAYSCALE)
start1 = time.time()
kp_detector = cv2.BRISK_create()
#kp_detector = cv2.AgastFeatureDetector_create()
#kp_detector = cv2.KAZE_create()
#kp_detector = cv2.AKAZE_create()
#kp_detector = cv2.ORB_create()
#kp_detector = cv2.FastFeatureDetector_create()
#kp_detector = cv2.GFTTDetector_create()
descriptor_extractor = cv2.SIFT_create()
#descriptor_extractor = cv2.KAZE_create()
#descriptor_extractor = cv2.AKAZE_create()
keypoints1 = kp_detector.detect(img1)
keypoints2 = kp_detector.detect(img2)
# print(keypoints1)
# print(keypoints2)
keypoints1, descriptors1 = descriptor_extractor.compute(img1, keypoints1)
keypoints2, descriptors2 = descriptor_extractor.compute(img2, keypoints2)
# print(img1_kp)
# print(img1_desc)
# FLANN parameters
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE,
trees = 5)
search_params = dict(checks = 50)
# Create FLANN object
FLANN = cv2.FlannBasedMatcher(indexParams = index_params,
searchParams = search_params)
# Matching descriptor vectors using FLANN Matcher
matches = FLANN.knnMatch(queryDescriptors = descriptors1,
trainDescriptors = descriptors2,
k = 2)
# Lowe's ratio test
ratio_thresh = 0.7
# "Good" matches
good_matches = []
# Filter matches
for m, n in matches:
if m.distance < ratio_thresh * n.distance:
good_matches.append(m)
print("GOOD MATCHES", len(good_matches))
end1 = time.time()
print("FPS:", (1. / float(end1 - start1)))
#img3 = cv2.drawMatchesKnn(img1,img1_kp,img2,img2_kp,matches,None,**draw_params)
output = cv2.drawMatches(img1 = img1,
keypoints1 = keypoints1,
img2 = img2,
keypoints2 = keypoints2,
matches1to2 = good_matches,
outImg = None,
flags = cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
plt.imshow(output,),plt.show()
| [
"matplotlib.pyplot.imshow",
"cv2.SIFT_create",
"cv2.FlannBasedMatcher",
"time.time",
"cv2.drawMatches",
"cv2.BRISK_create",
"cv2.imread",
"matplotlib.pyplot.show"
] | [((222, 268), 'cv2.imread', 'cv2.imread', (['"""imgs/1.png"""', 'cv2.IMREAD_GRAYSCALE'], {}), "('imgs/1.png', cv2.IMREAD_GRAYSCALE)\n", (232, 268), False, 'import cv2\n'), ((277, 323), 'cv2.imread', 'cv2.imread', (['"""imgs/8.png"""', 'cv2.IMREAD_GRAYSCALE'], {}), "('imgs/8.png', cv2.IMREAD_GRAYSCALE)\n", (287, 323), False, 'import cv2\n'), ((335, 346), 'time.time', 'time.time', ([], {}), '()\n', (344, 346), False, 'import time\n'), ((362, 380), 'cv2.BRISK_create', 'cv2.BRISK_create', ([], {}), '()\n', (378, 380), False, 'import cv2\n'), ((642, 659), 'cv2.SIFT_create', 'cv2.SIFT_create', ([], {}), '()\n', (657, 659), False, 'import cv2\n'), ((1243, 1318), 'cv2.FlannBasedMatcher', 'cv2.FlannBasedMatcher', ([], {'indexParams': 'index_params', 'searchParams': 'search_params'}), '(indexParams=index_params, searchParams=search_params)\n', (1264, 1318), False, 'import cv2\n'), ((1796, 1807), 'time.time', 'time.time', ([], {}), '()\n', (1805, 1807), False, 'import time\n'), ((1943, 2126), 'cv2.drawMatches', 'cv2.drawMatches', ([], {'img1': 'img1', 'keypoints1': 'keypoints1', 'img2': 'img2', 'keypoints2': 'keypoints2', 'matches1to2': 'good_matches', 'outImg': 'None', 'flags': 'cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS'}), '(img1=img1, keypoints1=keypoints1, img2=img2, keypoints2=\n keypoints2, matches1to2=good_matches, outImg=None, flags=cv2.\n DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)\n', (1958, 2126), False, 'import cv2\n'), ((2281, 2299), 'matplotlib.pyplot.imshow', 'plt.imshow', (['output'], {}), '(output)\n', (2291, 2299), True, 'import matplotlib.pyplot as plt\n'), ((2301, 2311), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2309, 2311), True, 'import matplotlib.pyplot as plt\n')] |
import unittest
import rcbu.client.restore_report as restore_report
import tests.mock.report as mock_report
from rcbu.common.exceptions import RestoreFailed
class TestRestoreReport(unittest.TestCase):
def setUp(self):
mock = mock_report.restore_report(errors=['explosions'])
self.report = restore_report.from_dict(1, mock)
def test_repr_matches_expected(self):
form = ('<RestoreReport id:{0} state:{1} ok:{2} started:{3} '
'duration:{4} #errors:{5} bytes:{6}>')
self.assertEqual(repr(self.report),
form.format(self.report.id, self.report.state,
self.report.ok,
self.report.started.isoformat(),
'0:00:00', len(self.report.errors),
self.report.bytes_restored))
def test_id_matches_expected(self):
self.assertEqual(self.report.id, 1)
def test_errors_match_expected(self):
self.assertEqual(len(self.report.errors), 1)
self.assertEqual(self.report.errors[0], 'explosions')
def test_ok_matches_expected(self):
self.assertEqual(self.report.ok, False)
def test_raises_if_not_restorable(self):
with self.assertRaises(RestoreFailed):
self.report.raise_if_not_ok()
def test_restored_matches_expected(self):
self.assertEqual(self.report.files_restored, 0)
self.assertEqual(self.report.bytes_restored, 2 * 2**30)
def test_destination_matches_expected(self):
self.assertEqual(self.report.destination, '/mock 1')
| [
"tests.mock.report.restore_report",
"rcbu.client.restore_report.from_dict"
] | [((240, 289), 'tests.mock.report.restore_report', 'mock_report.restore_report', ([], {'errors': "['explosions']"}), "(errors=['explosions'])\n", (266, 289), True, 'import tests.mock.report as mock_report\n'), ((312, 345), 'rcbu.client.restore_report.from_dict', 'restore_report.from_dict', (['(1)', 'mock'], {}), '(1, mock)\n', (336, 345), True, 'import rcbu.client.restore_report as restore_report\n')] |
import unittest
import os
from pathlib import Path
import shutil
import labinform.datasafe.datasafe as datasafe
class TestDatasafe(unittest.TestCase):
def setUp(self):
self.datasafe = datasafe.Datasafe()
def test_instantiate_class(self):
pass
def test_has_generate_method(self):
self.assertTrue(hasattr(self.datasafe, 'generate'))
self.assertTrue(callable(self.datasafe.generate))
@unittest.skip
def test_call_generate_with_parameters(self):
self.datasafe.generate("cwepr", "sa42")
@unittest.skip
def test_generate_returns_loi(self):
loi = self.datasafe.generate("cwepr", "sa42")
self.assertEqual("42.1001/ds/cwepr/sa42/2/data/raw", loi)
def test_has_push_method(self):
self.assertTrue(hasattr(self.datasafe, 'push'))
self.assertTrue(callable(self.datasafe.push))
@unittest.skip
def test_call_push_with_parameters(self):
self.datasafe.push("", "42.1001/ds/cwepr/sa42/01/data/raw")
def test_has_pull_method(self):
self.assertTrue(hasattr(self.datasafe, 'pull'))
self.assertTrue(callable(self.datasafe.pull))
def test_call_pull_with_parameters(self):
self.datasafe.pull("42.1001/ds/cwepr/sa42/01/data/raw")
def test_pull_returns_data(self):
data = self.datasafe.pull("42.1001/ds/cwepr/sa42/01/data/raw")
self.assertEqual(str, type(data))
def test_has_index_method(self):
self.assertTrue(hasattr(self.datasafe, 'index'))
self.assertTrue(callable(self.datasafe.index))
def test_call_index_with_parameters(self):
self.datasafe.index("42.1001/ds/cwepr/sa42/01/data/raw")
def test_index_returns_dict(self):
returnvalue = self.datasafe.index()
self.assertTrue(type(returnvalue) == dict)
def test_has_checksum_method(self):
self.assertTrue(hasattr(self.datasafe, 'checksum'))
self.assertTrue(callable(self.datasafe.checksum))
def test_call_checksum_with_parameters(self):
self.datasafe.checksum("42.1001/ds/cwepr/sa42/01/data/raw")
def test_checksum_returns_str(self):
checksum = self.datasafe.checksum("42.1001/ds/cwepr/sa42/01/data/raw")
self.assertEqual(str, type(checksum))
def test_has_moveto_method(self):
self.assertTrue(hasattr(self.datasafe, 'moveto'))
self.assertTrue(callable(self.datasafe.moveto))
def test_call_moveto_with_parameters(self):
self.datasafe.moveto("", "cwepr", "sa42")
def test_moveto_returns_bool(self):
worked = self.datasafe.moveto("", "cwepr", "sa42")
self.assertEqual(bool, type(worked))
def test_has_set_path_method(self):
self.assertTrue(hasattr(self.datasafe, 'set_path'))
self.assertTrue(callable(self.datasafe.set_path))
def test_call_set_path_with_parameters(self):
try:
self.datasafe.set_path("")
except datasafe.NoSuchDirectoryError:
pass
def test_has_verify_path_method(self):
self.assertTrue(hasattr(self.datasafe, 'verify_path'))
self.assertTrue(callable(self.datasafe.verify_path))
def test_call_verify_path_with_parameters(self):
self.datasafe.verify_path("")
def test_verify_path_returns_bool(self):
path_okay = self.datasafe.verify_path("")
self.assertEqual(bool, type(path_okay))
def test_has_verify_own_path(self):
self.assertTrue(hasattr(self.datasafe, 'verify_own_path'))
self.assertTrue(callable(self.datasafe.verify_own_path))
def test_verify_own_path_returns_bool(self):
path_okay = self.datasafe.verify_own_path()
self.assertEqual(bool, type(path_okay))
def test_has_loi_to_path_method(self):
self.assertTrue(hasattr(self.datasafe, 'loi_to_path'))
self.assertTrue(callable(self.datasafe.loi_to_path))
def test_call_loi_to_path_with_parameters(self):
self.datasafe.loi_to_path("42.1001/ds/cwepr/sa42/01/data/raw")
def test_has_add_directory_method(self):
self.assertTrue(hasattr(self.datasafe, 'add_directory'))
self.assertTrue(callable(self.datasafe.add_directory))
@unittest.skip
def test_call_add_directory_with_parameters(self):
top_level_directory = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(__file__))))
self.target_directory = top_level_directory + "/datasafe-test/ds"
if not os.path.exists(self.target_directory):
os.makedirs(self.target_directory)
self.datasafe.set_path(self.target_directory)
self.datasafe.add_directory(self.datasafe.path + "cwepr")
def test_has_dir_empty_method(self):
self.assertTrue(hasattr(self.datasafe, 'dir_empty'))
self.assertTrue(callable(self.datasafe.dir_empty))
def test_call_dir_empty_with_parameters(self):
self.datasafe.dir_empty(os.path.dirname(__file__))
def test_dir_empty_returns_bool(self):
dir_empty = self.datasafe.dir_empty(os.path.dirname(__file__))
self.assertEqual(bool, type(dir_empty))
def test_has_increment_method(self):
self.assertTrue(hasattr(self.datasafe, 'increment'))
self.assertTrue(callable(self.datasafe.increment))
def test_call_increment_with_parameters(self):
self.datasafe.increment(1)
def test_increment_returns_int(self):
incremented = self.datasafe.increment(1)
self.assertEqual(int, type(incremented))
def test_has_find_highest_method(self):
self.assertTrue(hasattr(self.datasafe, 'find_highest'))
self.assertTrue(callable(self.datasafe.find_highest))
def test_call_find_highest_with_parameters(self):
try:
self.datasafe.find_highest("")
except datasafe.NoSuchDirectoryError:
pass
#def test_find_highest_returns_int(self):
# highest = self.datasafe.find_highest("")
# self.assertEqual(int, type(highest))
def test_has_has_dir_method(self):
self.assertTrue(hasattr(self.datasafe, 'has_dir'))
self.assertTrue(callable(self.datasafe.has_dir))
def test_call_has_dir_with_parameters(self):
self.datasafe.has_dir("")
def test_has_dir_returns_bool(self):
hasdir = self.datasafe.has_dir("")
self.assertEqual(bool, type(hasdir))
def test_has_make_checksum_for_path_method(self):
self.assertTrue(hasattr(self.datasafe, 'make_checksum_for_path'))
self.assertTrue(callable(self.datasafe.make_checksum_for_path))
@unittest.skip
def test_call_make_checksum_for_path_with_parameters(self):
self.datasafe.make_checksum_for_path("")
@unittest.skip
def test_make_checksum_for_path_returns_str(self):
checksum = self.datasafe.make_checksum_for_path("")
self.assertEqual(str, type(checksum))
class TestEmptyDir(unittest.TestCase):
def setUp(self):
self.datasafe = datasafe.Datasafe()
def tearDown(self):
pass
def test_dir_empty_on_non_empty_dir(self):
path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
dir_empty = self.datasafe.dir_empty(path)
self.assertEqual(False, dir_empty)
def test_has_dir_on_real_dir(self):
path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
hasdir = self.datasafe.has_dir(path)
self.assertEqual(True, hasdir)
def test_has_dir_fail(self):
hasdir = self.datasafe.has_dir("")
self.assertEqual(False, hasdir)
class TestGenerate(unittest.TestCase):
def setUp(self):
self.datasafe = datasafe.Datasafe()
top_level_directory = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(__file__))))
self.target_directory = top_level_directory + "/datasafe-test/ds"
if not os.path.exists(self.target_directory):
os.makedirs(self.target_directory)
self.datasafe.set_path(self.target_directory)
def tearDown(self):
shutil.rmtree(self.target_directory)
def test_increment(self):
incremented = self.datasafe.increment(1)
self.assertEqual(2, incremented)
def test_generate(self):
expected_loi = "42.1001/ds/cwepr/sa42/1/data/raw"
real_loi = self.datasafe.generate(experiment="cwepr", sample_id="sa42")
self.assertEqual(expected_loi, real_loi)
path_complete = os.path.join(self.datasafe.path, "cwepr/sa42/1/data/raw")
self.assertTrue(os.path.isdir(path_complete))
class TestSetPath(unittest.TestCase):
def setUp(self):
self.datasafe = datasafe.Datasafe()
top_level_directory = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(__file__))))
self.target_directory = top_level_directory + "/datasafe-test/ds"
if not os.path.exists(self.target_directory):
os.makedirs(self.target_directory)
def tearDown(self):
shutil.rmtree(self.target_directory)
def test_set_path(self):
self.datasafe.set_path(self.target_directory)
self.assertEqual(True, hasattr(self.datasafe, 'path'))
self.assertEqual(str, type(self.datasafe.path))
def test_set_path_raises_error_for_incorrect_path(self):
with self.assertRaises(datasafe.NoSuchDirectoryError):
self.datasafe.set_path("")
class TestPathManipulation(unittest.TestCase):
def setUp(self):
self.datasafe = datasafe.Datasafe()
top_level_directory = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(__file__))))
self.target_directory = top_level_directory + "/datasafe-test/ds"
if not os.path.exists(self.target_directory):
os.makedirs(self.target_directory)
def tearDown(self):
shutil.rmtree(self.target_directory)
def test_verify_path_returns_false_for_incorrect_path(self):
path_okay = self.datasafe.verify_path("")
self.assertEqual(False, path_okay)
def test_verify_path_returns_true_for_correct_path(self):
datasafe_path = self.target_directory
path_okay = self.datasafe.verify_path(datasafe_path)
self.assertEqual(True, path_okay)
def test_verify_own_path_returns_false_when_path_not_set(self):
path_okay = self.datasafe.verify_own_path()
self.assertEqual(False, path_okay)
def test_verify_own_path_returns_true_when_path_is_set(self):
self.datasafe.set_path(self.target_directory)
path_okay = self.datasafe.verify_own_path()
self.assertEqual(True, path_okay)
class TestLoiToPath(unittest.TestCase):
def setUp(self):
self.datasafe = datasafe.Datasafe()
top_level_directory = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(__file__))))
self.target_directory = top_level_directory + "/datasafe-test/ds"
if not os.path.exists(self.target_directory):
os.makedirs(self.target_directory)
self.datasafe.set_path(self.target_directory)
def tearDown(self):
shutil.rmtree(self.target_directory)
def test_correct_path_from_loi(self):
path_correct = self.target_directory + "/cwepr/sa42/01/data/raw"
path_experimental = self.datasafe.loi_to_path(
"42.1001/ds/cwepr/sa42/01/data/raw")
self.assertEqual(path_correct, path_experimental)
def test_loi_to_path_raises_error_for_incorrect_loi(self):
with self.assertRaises(datasafe.IncorrectLoiError):
self.datasafe.loi_to_path("42.1001//raw")
class TestAddDirectory(unittest.TestCase):
def setUp(self):
self.datasafe = datasafe.Datasafe()
top_level_directory = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(__file__))))
self.target_directory = top_level_directory + "/datasafe-test/ds"
if not os.path.exists(self.target_directory):
os.makedirs(self.target_directory)
self.datasafe.set_path(self.target_directory)
def tearDown(self):
shutil.rmtree(self.target_directory)
def test_directory_is_added(self):
path_complete = self.datasafe.path + "/cwepr"
self.datasafe.add_directory(path_complete)
self.assertTrue(os.path.isdir(path_complete))
class TestFindHighest(unittest.TestCase):
def setUp(self):
self.datasafe = datasafe.Datasafe()
top_level_directory = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(__file__))))
self.target_directory = top_level_directory + "/datasafe-test/ds/1"
if not os.path.exists(self.target_directory):
os.makedirs(self.target_directory)
self.datasafe.set_path(self.target_directory)
def tearDown(self):
shutil.rmtree(self.target_directory[:-1])
def test_find_highest(self):
highest = self.datasafe.find_highest(self.target_directory[:-1])
self.assertEqual(1, highest)
class TestChecksumPath(unittest.TestCase):
def setUp(self):
self.datasafe = datasafe.Datasafe()
top_level_directory = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(__file__))))
self.target_directory = top_level_directory + "/datasafe-test/cwepr/sa571/1"
self.target_file = self.target_directory + "/data/raw/Manifest.yaml"
def test_correct_checksum_for_yaml(self):
md5 = self.datasafe.make_checksum_for_path(self.target_file)
self.assertEqual("48aa739357f70bd7694fcf0ebc3a2e24", md5)
class TestChecksum(unittest.TestCase):
pass
class TestPush(unittest.TestCase):
def setUp(self):
self.datasafe = datasafe.Datasafe()
top_level_directory = os.path.dirname(os.path.dirname(os.path.dirname(
os.path.dirname(__file__))))
self.common_directory = top_level_directory + "/datasafe-test"
self.target_directory = top_level_directory + "/datasafe-test/ds"
print(self.target_directory)
if not os.path.exists(self.target_directory):
os.makedirs(self.target_directory)
self.datasafe.set_path(self.target_directory)
self.target_file = self.common_directory + "/cwepr/sa571/1/data/raw/Manifest.yaml"
def tearDown(self):
shutil.rmtree(self.target_directory)
def test_push(self):
loi = self.datasafe.generate(experiment="cwepr", sample_id="sa571")
self.assertEqual(True, self.datasafe.has_dir(self.datasafe.path + "/cwepr/sa571/1/data/raw"))
self.datasafe.push(self.target_file, loi)
final_path = self.datasafe.path + "/cwepr/sa571/1/data/raw/Manifest.yaml"
self.assertEqual(True, os.path.isfile(final_path))
| [
"os.path.exists",
"os.makedirs",
"os.path.join",
"labinform.datasafe.datasafe.Datasafe",
"os.path.isfile",
"os.path.dirname",
"os.path.isdir",
"shutil.rmtree"
] | [((199, 218), 'labinform.datasafe.datasafe.Datasafe', 'datasafe.Datasafe', ([], {}), '()\n', (216, 218), True, 'import labinform.datasafe.datasafe as datasafe\n'), ((6954, 6973), 'labinform.datasafe.datasafe.Datasafe', 'datasafe.Datasafe', ([], {}), '()\n', (6971, 6973), True, 'import labinform.datasafe.datasafe as datasafe\n'), ((7631, 7650), 'labinform.datasafe.datasafe.Datasafe', 'datasafe.Datasafe', ([], {}), '()\n', (7648, 7650), True, 'import labinform.datasafe.datasafe as datasafe\n'), ((8033, 8069), 'shutil.rmtree', 'shutil.rmtree', (['self.target_directory'], {}), '(self.target_directory)\n', (8046, 8069), False, 'import shutil\n'), ((8432, 8489), 'os.path.join', 'os.path.join', (['self.datasafe.path', '"""cwepr/sa42/1/data/raw"""'], {}), "(self.datasafe.path, 'cwepr/sa42/1/data/raw')\n", (8444, 8489), False, 'import os\n'), ((8629, 8648), 'labinform.datasafe.datasafe.Datasafe', 'datasafe.Datasafe', ([], {}), '()\n', (8646, 8648), True, 'import labinform.datasafe.datasafe as datasafe\n'), ((8977, 9013), 'shutil.rmtree', 'shutil.rmtree', (['self.target_directory'], {}), '(self.target_directory)\n', (8990, 9013), False, 'import shutil\n'), ((9475, 9494), 'labinform.datasafe.datasafe.Datasafe', 'datasafe.Datasafe', ([], {}), '()\n', (9492, 9494), True, 'import labinform.datasafe.datasafe as datasafe\n'), ((9823, 9859), 'shutil.rmtree', 'shutil.rmtree', (['self.target_directory'], {}), '(self.target_directory)\n', (9836, 9859), False, 'import shutil\n'), ((10697, 10716), 'labinform.datasafe.datasafe.Datasafe', 'datasafe.Datasafe', ([], {}), '()\n', (10714, 10716), True, 'import labinform.datasafe.datasafe as datasafe\n'), ((11099, 11135), 'shutil.rmtree', 'shutil.rmtree', (['self.target_directory'], {}), '(self.target_directory)\n', (11112, 11135), False, 'import shutil\n'), ((11682, 11701), 'labinform.datasafe.datasafe.Datasafe', 'datasafe.Datasafe', ([], {}), '()\n', (11699, 11701), True, 'import labinform.datasafe.datasafe as datasafe\n'), ((12084, 12120), 'shutil.rmtree', 'shutil.rmtree', (['self.target_directory'], {}), '(self.target_directory)\n', (12097, 12120), False, 'import shutil\n'), ((12409, 12428), 'labinform.datasafe.datasafe.Datasafe', 'datasafe.Datasafe', ([], {}), '()\n', (12426, 12428), True, 'import labinform.datasafe.datasafe as datasafe\n'), ((12813, 12854), 'shutil.rmtree', 'shutil.rmtree', (['self.target_directory[:-1]'], {}), '(self.target_directory[:-1])\n', (12826, 12854), False, 'import shutil\n'), ((13089, 13108), 'labinform.datasafe.datasafe.Datasafe', 'datasafe.Datasafe', ([], {}), '()\n', (13106, 13108), True, 'import labinform.datasafe.datasafe as datasafe\n'), ((13705, 13724), 'labinform.datasafe.datasafe.Datasafe', 'datasafe.Datasafe', ([], {}), '()\n', (13722, 13724), True, 'import labinform.datasafe.datasafe as datasafe\n'), ((14306, 14342), 'shutil.rmtree', 'shutil.rmtree', (['self.target_directory'], {}), '(self.target_directory)\n', (14319, 14342), False, 'import shutil\n'), ((4463, 4500), 'os.path.exists', 'os.path.exists', (['self.target_directory'], {}), '(self.target_directory)\n', (4477, 4500), False, 'import os\n'), ((4514, 4548), 'os.makedirs', 'os.makedirs', (['self.target_directory'], {}), '(self.target_directory)\n', (4525, 4548), False, 'import os\n'), ((4915, 4940), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4930, 4940), False, 'import os\n'), ((5030, 5055), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5045, 5055), False, 'import os\n'), ((7860, 7897), 'os.path.exists', 'os.path.exists', (['self.target_directory'], {}), '(self.target_directory)\n', (7874, 7897), False, 'import os\n'), ((7911, 7945), 'os.makedirs', 'os.makedirs', (['self.target_directory'], {}), '(self.target_directory)\n', (7922, 7945), False, 'import os\n'), ((8514, 8542), 'os.path.isdir', 'os.path.isdir', (['path_complete'], {}), '(path_complete)\n', (8527, 8542), False, 'import os\n'), ((8858, 8895), 'os.path.exists', 'os.path.exists', (['self.target_directory'], {}), '(self.target_directory)\n', (8872, 8895), False, 'import os\n'), ((8909, 8943), 'os.makedirs', 'os.makedirs', (['self.target_directory'], {}), '(self.target_directory)\n', (8920, 8943), False, 'import os\n'), ((9704, 9741), 'os.path.exists', 'os.path.exists', (['self.target_directory'], {}), '(self.target_directory)\n', (9718, 9741), False, 'import os\n'), ((9755, 9789), 'os.makedirs', 'os.makedirs', (['self.target_directory'], {}), '(self.target_directory)\n', (9766, 9789), False, 'import os\n'), ((10926, 10963), 'os.path.exists', 'os.path.exists', (['self.target_directory'], {}), '(self.target_directory)\n', (10940, 10963), False, 'import os\n'), ((10977, 11011), 'os.makedirs', 'os.makedirs', (['self.target_directory'], {}), '(self.target_directory)\n', (10988, 11011), False, 'import os\n'), ((11911, 11948), 'os.path.exists', 'os.path.exists', (['self.target_directory'], {}), '(self.target_directory)\n', (11925, 11948), False, 'import os\n'), ((11962, 11996), 'os.makedirs', 'os.makedirs', (['self.target_directory'], {}), '(self.target_directory)\n', (11973, 11996), False, 'import os\n'), ((12290, 12318), 'os.path.isdir', 'os.path.isdir', (['path_complete'], {}), '(path_complete)\n', (12303, 12318), False, 'import os\n'), ((12640, 12677), 'os.path.exists', 'os.path.exists', (['self.target_directory'], {}), '(self.target_directory)\n', (12654, 12677), False, 'import os\n'), ((12691, 12725), 'os.makedirs', 'os.makedirs', (['self.target_directory'], {}), '(self.target_directory)\n', (12702, 12725), False, 'import os\n'), ((14042, 14079), 'os.path.exists', 'os.path.exists', (['self.target_directory'], {}), '(self.target_directory)\n', (14056, 14079), False, 'import os\n'), ((14093, 14127), 'os.makedirs', 'os.makedirs', (['self.target_directory'], {}), '(self.target_directory)\n', (14104, 14127), False, 'import os\n'), ((14710, 14736), 'os.path.isfile', 'os.path.isfile', (['final_path'], {}), '(final_path)\n', (14724, 14736), False, 'import os\n'), ((7107, 7132), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7122, 7132), False, 'import os\n'), ((7316, 7341), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7331, 7341), False, 'import os\n'), ((4345, 4370), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (4360, 4370), False, 'import os\n'), ((7742, 7767), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7757, 7767), False, 'import os\n'), ((8740, 8765), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (8755, 8765), False, 'import os\n'), ((9586, 9611), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (9601, 9611), False, 'import os\n'), ((10808, 10833), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (10823, 10833), False, 'import os\n'), ((11793, 11818), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (11808, 11818), False, 'import os\n'), ((12520, 12545), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (12535, 12545), False, 'import os\n'), ((13200, 13225), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (13215, 13225), False, 'import os\n'), ((13816, 13841), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (13831, 13841), False, 'import os\n')] |
import jax.numpy as np
import numpy as onp
from jaxkern.kernels import rbf_kernel, covariance_matrix, gram
from jaxkern.utils import centering
from sklearn.metrics.pairwise import rbf_kernel as rbf_sklearn
from sklearn.preprocessing import KernelCenterer
onp.random.seed(123)
def test_rbf_kernel_gram_1d():
rng = onp.random.RandomState(123)
n_samples = 100
X = rng.rand(n_samples)
# X
K_sk = rbf_sklearn(X[:, np.newaxis], X[:, np.newaxis], gamma=1.0)
K = gram(rbf_kernel, {"gamma": 1.0}, X, X)
onp.testing.assert_array_almost_equal(K_sk, onp.array(K))
Y = 10 * X + 0.1 * rng.randn(n_samples)
# Y
K_sk = rbf_sklearn(Y[:, np.newaxis], Y[:, np.newaxis], gamma=1.0)
K = gram(rbf_kernel, {"gamma": 1.0}, Y, Y)
onp.testing.assert_array_almost_equal(K_sk, onp.array(K))
# X AND Y
K_sk = rbf_sklearn(X[:, np.newaxis], Y[:, np.newaxis], gamma=1.0)
K = gram(rbf_kernel, {"gamma": 1.0}, X, Y)
onp.testing.assert_array_almost_equal(K_sk, onp.array(K))
def test_rbf_kernel_gram_2d():
rng = onp.random.RandomState(123)
n_samples, n_features = 100, 2
X = onp.random.rand(n_samples, n_features)
# sklearn rbf_kernel
K_sk = rbf_sklearn(X, X, gamma=1.0)
K = covariance_matrix(rbf_kernel, {"gamma": 1.0}, X, X)
onp.testing.assert_array_almost_equal(K_sk, onp.array(K))
Y = 10 * X + 0.1 * rng.randn(n_samples, n_features)
# sklearn rbf_kernel
K_sk = rbf_sklearn(Y, Y, gamma=1.0)
K = gram(rbf_kernel, {"gamma": 1.0}, Y, Y)
onp.testing.assert_array_almost_equal(K_sk, onp.array(K))
def test_rbf_kernel_cov_1d():
X = onp.random.rand(100)
# sklearn rbf_kernel
K_sk = rbf_sklearn(X[:, np.newaxis], X[:, np.newaxis], gamma=1.0)
K = covariance_matrix(rbf_kernel, {"gamma": 1.0}, X, X)
onp.testing.assert_array_almost_equal(K_sk, onp.array(K))
def test_rbf_kernel_cov_2d():
X = onp.random.rand(100, 2)
# sklearn rbf_kernel
K_sk = rbf_sklearn(X, X, gamma=1.0)
K = gram(rbf_kernel, {"gamma": 1.0}, X, X)
onp.testing.assert_array_almost_equal(K_sk, onp.array(K))
def test_centering():
n_samples = 100
X = onp.random.rand(n_samples)
# sklearn rbf_kernel
K_sk = rbf_sklearn(X[:, np.newaxis], X[:, np.newaxis], gamma=1.0)
K_sk = KernelCenterer().fit_transform(K_sk)
K = gram(rbf_kernel, {"gamma": 1.0}, X, X)
# H = np.eye(n_samples) - (1.0 / n_samples) * np.ones((n_samples, n_samples))
# K = np.einsum("ij,jk,kl->il", H, K, H)
# K = np.dot(H, np.dot(K, H))
K = centering(K)
onp.testing.assert_array_almost_equal(K_sk, onp.array(K)) | [
"numpy.random.rand",
"sklearn.metrics.pairwise.rbf_kernel",
"sklearn.preprocessing.KernelCenterer",
"numpy.array",
"numpy.random.seed",
"jaxkern.kernels.gram",
"jaxkern.utils.centering",
"jaxkern.kernels.covariance_matrix",
"numpy.random.RandomState"
] | [((257, 277), 'numpy.random.seed', 'onp.random.seed', (['(123)'], {}), '(123)\n', (272, 277), True, 'import numpy as onp\n'), ((322, 349), 'numpy.random.RandomState', 'onp.random.RandomState', (['(123)'], {}), '(123)\n', (344, 349), True, 'import numpy as onp\n'), ((419, 477), 'sklearn.metrics.pairwise.rbf_kernel', 'rbf_sklearn', (['X[:, np.newaxis]', 'X[:, np.newaxis]'], {'gamma': '(1.0)'}), '(X[:, np.newaxis], X[:, np.newaxis], gamma=1.0)\n', (430, 477), True, 'from sklearn.metrics.pairwise import rbf_kernel as rbf_sklearn\n'), ((487, 525), 'jaxkern.kernels.gram', 'gram', (['rbf_kernel', "{'gamma': 1.0}", 'X', 'X'], {}), "(rbf_kernel, {'gamma': 1.0}, X, X)\n", (491, 525), False, 'from jaxkern.kernels import rbf_kernel, covariance_matrix, gram\n'), ((654, 712), 'sklearn.metrics.pairwise.rbf_kernel', 'rbf_sklearn', (['Y[:, np.newaxis]', 'Y[:, np.newaxis]'], {'gamma': '(1.0)'}), '(Y[:, np.newaxis], Y[:, np.newaxis], gamma=1.0)\n', (665, 712), True, 'from sklearn.metrics.pairwise import rbf_kernel as rbf_sklearn\n'), ((722, 760), 'jaxkern.kernels.gram', 'gram', (['rbf_kernel', "{'gamma': 1.0}", 'Y', 'Y'], {}), "(rbf_kernel, {'gamma': 1.0}, Y, Y)\n", (726, 760), False, 'from jaxkern.kernels import rbf_kernel, covariance_matrix, gram\n'), ((850, 908), 'sklearn.metrics.pairwise.rbf_kernel', 'rbf_sklearn', (['X[:, np.newaxis]', 'Y[:, np.newaxis]'], {'gamma': '(1.0)'}), '(X[:, np.newaxis], Y[:, np.newaxis], gamma=1.0)\n', (861, 908), True, 'from sklearn.metrics.pairwise import rbf_kernel as rbf_sklearn\n'), ((918, 956), 'jaxkern.kernels.gram', 'gram', (['rbf_kernel', "{'gamma': 1.0}", 'X', 'Y'], {}), "(rbf_kernel, {'gamma': 1.0}, X, Y)\n", (922, 956), False, 'from jaxkern.kernels import rbf_kernel, covariance_matrix, gram\n'), ((1064, 1091), 'numpy.random.RandomState', 'onp.random.RandomState', (['(123)'], {}), '(123)\n', (1086, 1091), True, 'import numpy as onp\n'), ((1135, 1173), 'numpy.random.rand', 'onp.random.rand', (['n_samples', 'n_features'], {}), '(n_samples, n_features)\n', (1150, 1173), True, 'import numpy as onp\n'), ((1211, 1239), 'sklearn.metrics.pairwise.rbf_kernel', 'rbf_sklearn', (['X', 'X'], {'gamma': '(1.0)'}), '(X, X, gamma=1.0)\n', (1222, 1239), True, 'from sklearn.metrics.pairwise import rbf_kernel as rbf_sklearn\n'), ((1249, 1300), 'jaxkern.kernels.covariance_matrix', 'covariance_matrix', (['rbf_kernel', "{'gamma': 1.0}", 'X', 'X'], {}), "(rbf_kernel, {'gamma': 1.0}, X, X)\n", (1266, 1300), False, 'from jaxkern.kernels import rbf_kernel, covariance_matrix, gram\n'), ((1458, 1486), 'sklearn.metrics.pairwise.rbf_kernel', 'rbf_sklearn', (['Y', 'Y'], {'gamma': '(1.0)'}), '(Y, Y, gamma=1.0)\n', (1469, 1486), True, 'from sklearn.metrics.pairwise import rbf_kernel as rbf_sklearn\n'), ((1496, 1534), 'jaxkern.kernels.gram', 'gram', (['rbf_kernel', "{'gamma': 1.0}", 'Y', 'Y'], {}), "(rbf_kernel, {'gamma': 1.0}, Y, Y)\n", (1500, 1534), False, 'from jaxkern.kernels import rbf_kernel, covariance_matrix, gram\n'), ((1639, 1659), 'numpy.random.rand', 'onp.random.rand', (['(100)'], {}), '(100)\n', (1654, 1659), True, 'import numpy as onp\n'), ((1697, 1755), 'sklearn.metrics.pairwise.rbf_kernel', 'rbf_sklearn', (['X[:, np.newaxis]', 'X[:, np.newaxis]'], {'gamma': '(1.0)'}), '(X[:, np.newaxis], X[:, np.newaxis], gamma=1.0)\n', (1708, 1755), True, 'from sklearn.metrics.pairwise import rbf_kernel as rbf_sklearn\n'), ((1765, 1816), 'jaxkern.kernels.covariance_matrix', 'covariance_matrix', (['rbf_kernel', "{'gamma': 1.0}", 'X', 'X'], {}), "(rbf_kernel, {'gamma': 1.0}, X, X)\n", (1782, 1816), False, 'from jaxkern.kernels import rbf_kernel, covariance_matrix, gram\n'), ((1921, 1944), 'numpy.random.rand', 'onp.random.rand', (['(100)', '(2)'], {}), '(100, 2)\n', (1936, 1944), True, 'import numpy as onp\n'), ((1982, 2010), 'sklearn.metrics.pairwise.rbf_kernel', 'rbf_sklearn', (['X', 'X'], {'gamma': '(1.0)'}), '(X, X, gamma=1.0)\n', (1993, 2010), True, 'from sklearn.metrics.pairwise import rbf_kernel as rbf_sklearn\n'), ((2020, 2058), 'jaxkern.kernels.gram', 'gram', (['rbf_kernel', "{'gamma': 1.0}", 'X', 'X'], {}), "(rbf_kernel, {'gamma': 1.0}, X, X)\n", (2024, 2058), False, 'from jaxkern.kernels import rbf_kernel, covariance_matrix, gram\n'), ((2176, 2202), 'numpy.random.rand', 'onp.random.rand', (['n_samples'], {}), '(n_samples)\n', (2191, 2202), True, 'import numpy as onp\n'), ((2240, 2298), 'sklearn.metrics.pairwise.rbf_kernel', 'rbf_sklearn', (['X[:, np.newaxis]', 'X[:, np.newaxis]'], {'gamma': '(1.0)'}), '(X[:, np.newaxis], X[:, np.newaxis], gamma=1.0)\n', (2251, 2298), True, 'from sklearn.metrics.pairwise import rbf_kernel as rbf_sklearn\n'), ((2357, 2395), 'jaxkern.kernels.gram', 'gram', (['rbf_kernel', "{'gamma': 1.0}", 'X', 'X'], {}), "(rbf_kernel, {'gamma': 1.0}, X, X)\n", (2361, 2395), False, 'from jaxkern.kernels import rbf_kernel, covariance_matrix, gram\n'), ((2565, 2577), 'jaxkern.utils.centering', 'centering', (['K'], {}), '(K)\n', (2574, 2577), False, 'from jaxkern.utils import centering\n'), ((575, 587), 'numpy.array', 'onp.array', (['K'], {}), '(K)\n', (584, 587), True, 'import numpy as onp\n'), ((810, 822), 'numpy.array', 'onp.array', (['K'], {}), '(K)\n', (819, 822), True, 'import numpy as onp\n'), ((1006, 1018), 'numpy.array', 'onp.array', (['K'], {}), '(K)\n', (1015, 1018), True, 'import numpy as onp\n'), ((1350, 1362), 'numpy.array', 'onp.array', (['K'], {}), '(K)\n', (1359, 1362), True, 'import numpy as onp\n'), ((1584, 1596), 'numpy.array', 'onp.array', (['K'], {}), '(K)\n', (1593, 1596), True, 'import numpy as onp\n'), ((1866, 1878), 'numpy.array', 'onp.array', (['K'], {}), '(K)\n', (1875, 1878), True, 'import numpy as onp\n'), ((2108, 2120), 'numpy.array', 'onp.array', (['K'], {}), '(K)\n', (2117, 2120), True, 'import numpy as onp\n'), ((2627, 2639), 'numpy.array', 'onp.array', (['K'], {}), '(K)\n', (2636, 2639), True, 'import numpy as onp\n'), ((2311, 2327), 'sklearn.preprocessing.KernelCenterer', 'KernelCenterer', ([], {}), '()\n', (2325, 2327), False, 'from sklearn.preprocessing import KernelCenterer\n')] |
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, LSTM
import os
import pickle
class Critic:
def __init__(self, state_dim, steps, lr, agent_name, model_path=None):
self.state_dim = state_dim
self.steps = steps
self.agent_name = agent_name
self.lr = lr
self.opt = tf.keras.optimizers.Adam(lr)
if model_path == None:
self.model = self.create_model()
else:
self.model = tf.keras.models.load_model(model_path)
# if os.path.exists(model_path+"/critic_optimizer.pkl"):
# with open(model_path+"/critic_optimizer.pkl", 'rb') as f:
# self.opt.set_weights(pickle.load(f))
# self.global_opt_weight = None
def create_model(self):
return tf.keras.Sequential([
Input((self.steps, self.state_dim)),
Dense(2048, activation='relu'),
Dense(1024, activation='relu'),
Dense(512, activation='relu'),
LSTM(512, return_sequences=False),
Dense(1, activation='linear')
])
def compute_loss(self, v_pred, td_targets):
mse = tf.keras.losses.MeanSquaredError()
return mse(td_targets, v_pred)
def train(self, states, td_targets):
with tf.GradientTape() as tape:
v_pred = self.model(states, training=True)
assert v_pred.shape == td_targets.shape
loss = self.compute_loss(v_pred, tf.stop_gradient(td_targets))
grads = tape.gradient(loss, self.model.trainable_variables)
self.opt.apply_gradients(
zip(grads, self.model.trainable_variables))
return loss
def save_model(self, name=None, step=None):
if name == None:
name = self.agent_name
if step == None:
step = "final"
else:
step = str(step)
directory = "saved_models/" + name + "/" + step + "/critic/"
if not os.path.exists(directory):
os.makedirs(directory)
self.model.save(directory)
# with open(directory + "critic_optimizer.pkl", 'wb') as f:
# pickle.dump(self.global_opt_weight, f)
| [
"os.path.exists",
"tensorflow.keras.layers.Input",
"os.makedirs",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.keras.optimizers.Adam",
"tensorflow.GradientTape",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.models.load_model",
"tensorflow.stop_gradient"
] | [((333, 361), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['lr'], {}), '(lr)\n', (357, 361), True, 'import tensorflow as tf\n'), ((1169, 1203), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (1201, 1203), True, 'import tensorflow as tf\n'), ((477, 515), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_path'], {}), '(model_path)\n', (503, 515), True, 'import tensorflow as tf\n'), ((1298, 1315), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (1313, 1315), True, 'import tensorflow as tf\n'), ((1986, 2011), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (2000, 2011), False, 'import os\n'), ((2025, 2047), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (2036, 2047), False, 'import os\n'), ((838, 873), 'tensorflow.keras.layers.Input', 'Input', (['(self.steps, self.state_dim)'], {}), '((self.steps, self.state_dim))\n', (843, 873), False, 'from tensorflow.keras.layers import Input, Dense, LSTM\n'), ((887, 917), 'tensorflow.keras.layers.Dense', 'Dense', (['(2048)'], {'activation': '"""relu"""'}), "(2048, activation='relu')\n", (892, 917), False, 'from tensorflow.keras.layers import Input, Dense, LSTM\n'), ((931, 961), 'tensorflow.keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (936, 961), False, 'from tensorflow.keras.layers import Input, Dense, LSTM\n'), ((975, 1004), 'tensorflow.keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (980, 1004), False, 'from tensorflow.keras.layers import Input, Dense, LSTM\n'), ((1018, 1051), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(512)'], {'return_sequences': '(False)'}), '(512, return_sequences=False)\n', (1022, 1051), False, 'from tensorflow.keras.layers import Input, Dense, LSTM\n'), ((1065, 1094), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (1070, 1094), False, 'from tensorflow.keras.layers import Input, Dense, LSTM\n'), ((1477, 1505), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['td_targets'], {}), '(td_targets)\n', (1493, 1505), True, 'import tensorflow as tf\n')] |
# Various functions to expose components version
import ctypes
import re
def read_distinfo(): # pragma: nocover
with open('/etc/os-release') as fo:
distinfos = parse_lsb_release(fo)
return distinfos
def parse_lsb_release(lines):
_assignement_re = re.compile(
r"""(?P<variable>[A-Z_]+)="(?P<value>[^"]+)"$"""
)
infos = dict()
for line in lines:
m = _assignement_re.match(line)
if not m:
continue
infos[m.group('variable')] = m.group('value')
return infos
def read_libpq_version():
# Search libpq version bound to this process.
try:
# For psycopg2 2.7+
from psycopg2.extensions import libpq_version
return libpq_version()
except ImportError:
__import__('psycopg2')
# Search for libpq.so path in loaded libraries.
with open('/proc/self/maps') as fo:
for line in fo:
values = line.split()
path = values[-1]
if '/libpq' in path:
break
else: # pragma: nocover
raise Exception("libpq.so not loaded")
libpq = ctypes.cdll.LoadLibrary(path)
return libpq.PQlibVersion()
def format_pq_version(version):
pqnums = [
version / 10000,
version % 100,
]
if version <= 100000:
pqnums[1:1] = [(version % 10000) / 100]
return '.'.join(str(int(n)) for n in pqnums)
| [
"ctypes.cdll.LoadLibrary",
"psycopg2.extensions.libpq_version",
"re.compile"
] | [((273, 328), 're.compile', 're.compile', (['"""(?P<variable>[A-Z_]+)="(?P<value>[^"]+)"$"""'], {}), '(\'(?P<variable>[A-Z_]+)="(?P<value>[^"]+)"$\')\n', (283, 328), False, 'import re\n'), ((725, 740), 'psycopg2.extensions.libpq_version', 'libpq_version', ([], {}), '()\n', (738, 740), False, 'from psycopg2.extensions import libpq_version\n'), ((1169, 1198), 'ctypes.cdll.LoadLibrary', 'ctypes.cdll.LoadLibrary', (['path'], {}), '(path)\n', (1192, 1198), False, 'import ctypes\n')] |
import os, time
import tensorflow as tf
import util
# Params: (5 * 5 * 3 * 32 + 32) + (5 * 5 * 32 * 32 + 32) * 2 + 32 * 8 * 8 * 100 + 100 * 2
dir = os.path.dirname(os.path.realpath(__file__))
class Bee_simple(object):
def __init__(self):
self.result_folder = dir + '/../results/' + str(int(time.time()))
self.graph = tf.Graph()
print('Building bee graph')
with self.graph.as_default():
with tf.variable_scope("placeholder"):
self.x_plh = tf.placeholder(tf.float32, shape=[None, 64, 64, 3])
self.y_plh = tf.placeholder(tf.int32, shape=[None, 1])
y_true_reshaped = tf.reshape(self.y_plh, [-1])
# First compresse channel wise
with tf.variable_scope("bee_conv"):
W1 = tf.get_variable('W1', shape=[5, 5, 3, 32], initializer=tf.random_normal_initializer(stddev=1e-1))
b1 = tf.get_variable('b1', shape=[32], initializer=tf.constant_initializer(0.1))
z = tf.nn.conv2d(self.x_plh, W1, strides=[1, 2, 2, 1], padding='SAME') + b1
a = tf.nn.relu(z)
W2 = tf.get_variable('W2', shape=[5, 5, 32, 32], initializer=tf.random_normal_initializer(stddev=1e-1))
b2 = tf.get_variable('b2', shape=[32], initializer=tf.constant_initializer(0.1))
z = tf.nn.conv2d(a, W2, strides=[1, 2, 2, 1], padding='SAME') + b2
a = tf.nn.relu(z)
W3 = tf.get_variable('W3', shape=[5, 5, 32, 32], initializer=tf.random_normal_initializer(stddev=1e-1))
b3 = tf.get_variable('b3', shape=[32], initializer=tf.constant_initializer(0.1))
z = tf.nn.conv2d(a, W3, strides=[1, 2, 2, 1], padding='SAME') + b3
a = tf.nn.relu(z)
tf.histogram_summary("W1", W1)
tf.histogram_summary("b1", b1)
tf.histogram_summary("W2", W2)
tf.histogram_summary("b2", b2)
tf.histogram_summary("W3", W3)
tf.histogram_summary("b3", b3)
with tf.variable_scope("classifier"):
shape = a.get_shape().as_list()
a_vec_size = shape[1] * shape[2] * shape[3]
a_vec = tf.reshape(a, [-1, a_vec_size])
W_fc1 = tf.get_variable('W_fc1', shape=[a_vec_size, 100], initializer=tf.random_normal_initializer(stddev=1e-1))
b_fc1 = tf.get_variable('b_fc1', shape=[100], initializer=tf.constant_initializer(0.1))
z = tf.matmul(a_vec, W_fc1) + b_fc1
a = tf.nn.relu(z)
W_fc2 = tf.get_variable('W_fc2', shape=[100, 2], initializer=tf.random_normal_initializer(stddev=1e-1))
b_fc2 = tf.get_variable('b_fc2', shape=[2], initializer=tf.constant_initializer(0.1))
z = tf.matmul(a, W_fc2) + b_fc2
tf.histogram_summary("W_fc1", W_fc1)
tf.histogram_summary("b_fc1", b_fc1)
tf.histogram_summary("W_fc2", W_fc2)
tf.histogram_summary("b_fc2", b_fc2)
with tf.variable_scope('loss'):
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(z, y_true_reshaped)
self.total_loss = tf.reduce_mean(losses)
tf.scalar_summary("Loss", self.total_loss)
self.global_step = tf.Variable(initial_value=0, name='global_step', trainable=False)
adam = tf.train.AdamOptimizer(learning_rate=1e-3)
self.train_op = adam.minimize(self.total_loss, global_step=self.global_step)
self.train_summaries_op = tf.merge_all_summaries()
with tf.variable_scope('Accuracy'):
preds = tf.cast(tf.argmax(z, 1, name="predictions"), tf.int32)
correct_predictions = tf.equal(preds, y_true_reshaped)
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name="accuracy")
self.acc_summary = tf.scalar_summary("Accuracy", self.accuracy)
self.saver = tf.train.Saver()
def train_step(self, sess, x_batch, y_batch):
# print('train step', x_batch.shape, y_batch.shape)
to_compute = [self.train_op, self.train_summaries_op, self.total_loss, self.global_step]
return sess.run(to_compute, feed_dict={
self.x_plh: x_batch,
self.y_plh: y_batch
})
def dev_step(self, sess, x_batch, y_batch):
# print('dev step', x_batch.shape, y_batch.shape)
to_compute = [self.accuracy, self.acc_summary]
return sess.run(to_compute, feed_dict={
self.x_plh: x_batch,
self.y_plh: y_batch
})
def eval(self, args, test_data):
x_test_batch, y_test_batch = util.preprocess(test_data)
checkpoint = tf.train.get_checkpoint_state(args.model_folder)
with tf.Session(graph=self.graph) as sess:
print("Init models")
self.saver.restore(sess, checkpoint.model_checkpoint_path)
acc = sess.run(self.accuracy, feed_dict={
self.x_plh: x_test_batch,
self.y_plh: y_test_batch
})
print('Accuracy on test data: %f' % acc)
def fit(self, args, train_data, dev_data):
x_dev_batch, y_dev_batch = util.preprocess(dev_data)
with tf.Session(graph=self.graph) as sess:
tf.train.write_graph(sess.graph_def, self.result_folder, 'bee.pb', as_text=False)
sw = tf.train.SummaryWriter(self.result_folder, sess.graph)
print("Init models")
sess.run(tf.initialize_all_variables())
for i in range(args.num_epochs):
train_iterator = util.ptb_iterator(train_data, args.batch_size)
for x_batch, y_batch in train_iterator:
_, train_summaries, total_loss, current_step = self.train_step(sess, x_batch, y_batch)
sw.add_summary(train_summaries, current_step)
if current_step % args.eval_freq == 0:
acc, dev_summaries = self.dev_step(sess, x_dev_batch, y_dev_batch)
sw.add_summary(dev_summaries, current_step)
if current_step % args.save_freq == 0:
self.saver.save(sess, self.result_folder + '/bee.chkp', global_step=current_step)
epoch_acc, dev_summaries = self.dev_step(sess, x_dev_batch, y_dev_batch)
print('Epoch: %d, Accuracy: %f' % (i + 1, epoch_acc))
self.saver.save(sess, self.result_folder + '/bee.chkp')
with open(self.result_folder + '/bee_saver_def.pb', 'wb') as f:
saver_def = self.saver.as_saver_def().SerializeToString()
f.write(saver_def)
| [
"tensorflow.equal",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.train.write_graph",
"tensorflow.reduce_mean",
"util.ptb_iterator",
"tensorflow.cast",
"tensorflow.Graph",
"util.preprocess",
"tensorflow.Session",
"tensorflow.placeholder",
"tensorflow.random_normal_initializer",
"tensorflow.histogram_summary",
"tensorflow.matmul",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.conv2d",
"tensorflow.train.SummaryWriter",
"tensorflow.initialize_all_variables",
"tensorflow.scalar_summary",
"tensorflow.variable_scope",
"tensorflow.Variable",
"tensorflow.train.get_checkpoint_state",
"tensorflow.reshape",
"time.time",
"tensorflow.nn.relu",
"tensorflow.train.Saver",
"tensorflow.merge_all_summaries",
"os.path.realpath",
"tensorflow.argmax",
"tensorflow.constant_initializer"
] | [((167, 193), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (183, 193), False, 'import os, time\n'), ((341, 351), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (349, 351), True, 'import tensorflow as tf\n'), ((4786, 4812), 'util.preprocess', 'util.preprocess', (['test_data'], {}), '(test_data)\n', (4801, 4812), False, 'import util\n'), ((4834, 4882), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['args.model_folder'], {}), '(args.model_folder)\n', (4863, 4882), True, 'import tensorflow as tf\n'), ((5335, 5360), 'util.preprocess', 'util.preprocess', (['dev_data'], {}), '(dev_data)\n', (5350, 5360), False, 'import util\n'), ((3384, 3449), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': '(0)', 'name': '"""global_step"""', 'trainable': '(False)'}), "(initial_value=0, name='global_step', trainable=False)\n", (3395, 3449), True, 'import tensorflow as tf\n'), ((3469, 3512), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (3491, 3512), True, 'import tensorflow as tf\n'), ((3640, 3664), 'tensorflow.merge_all_summaries', 'tf.merge_all_summaries', ([], {}), '()\n', (3662, 3664), True, 'import tensorflow as tf\n'), ((4076, 4092), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (4090, 4092), True, 'import tensorflow as tf\n'), ((4896, 4924), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.graph'}), '(graph=self.graph)\n', (4906, 4924), True, 'import tensorflow as tf\n'), ((5374, 5402), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.graph'}), '(graph=self.graph)\n', (5384, 5402), True, 'import tensorflow as tf\n'), ((5424, 5510), 'tensorflow.train.write_graph', 'tf.train.write_graph', (['sess.graph_def', 'self.result_folder', '"""bee.pb"""'], {'as_text': '(False)'}), "(sess.graph_def, self.result_folder, 'bee.pb', as_text=\n False)\n", (5444, 5510), True, 'import tensorflow as tf\n'), ((5523, 5577), 'tensorflow.train.SummaryWriter', 'tf.train.SummaryWriter', (['self.result_folder', 'sess.graph'], {}), '(self.result_folder, sess.graph)\n', (5545, 5577), True, 'import tensorflow as tf\n'), ((444, 476), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""placeholder"""'], {}), "('placeholder')\n", (461, 476), True, 'import tensorflow as tf\n'), ((507, 558), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 64, 64, 3]'}), '(tf.float32, shape=[None, 64, 64, 3])\n', (521, 558), True, 'import tensorflow as tf\n'), ((588, 629), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None, 1]'}), '(tf.int32, shape=[None, 1])\n', (602, 629), True, 'import tensorflow as tf\n'), ((664, 692), 'tensorflow.reshape', 'tf.reshape', (['self.y_plh', '[-1]'], {}), '(self.y_plh, [-1])\n', (674, 692), True, 'import tensorflow as tf\n'), ((754, 783), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""bee_conv"""'], {}), "('bee_conv')\n", (771, 783), True, 'import tensorflow as tf\n'), ((1113, 1126), 'tensorflow.nn.relu', 'tf.nn.relu', (['z'], {}), '(z)\n', (1123, 1126), True, 'import tensorflow as tf\n'), ((1448, 1461), 'tensorflow.nn.relu', 'tf.nn.relu', (['z'], {}), '(z)\n', (1458, 1461), True, 'import tensorflow as tf\n'), ((1783, 1796), 'tensorflow.nn.relu', 'tf.nn.relu', (['z'], {}), '(z)\n', (1793, 1796), True, 'import tensorflow as tf\n'), ((1814, 1844), 'tensorflow.histogram_summary', 'tf.histogram_summary', (['"""W1"""', 'W1'], {}), "('W1', W1)\n", (1834, 1844), True, 'import tensorflow as tf\n'), ((1861, 1891), 'tensorflow.histogram_summary', 'tf.histogram_summary', (['"""b1"""', 'b1'], {}), "('b1', b1)\n", (1881, 1891), True, 'import tensorflow as tf\n'), ((1908, 1938), 'tensorflow.histogram_summary', 'tf.histogram_summary', (['"""W2"""', 'W2'], {}), "('W2', W2)\n", (1928, 1938), True, 'import tensorflow as tf\n'), ((1955, 1985), 'tensorflow.histogram_summary', 'tf.histogram_summary', (['"""b2"""', 'b2'], {}), "('b2', b2)\n", (1975, 1985), True, 'import tensorflow as tf\n'), ((2002, 2032), 'tensorflow.histogram_summary', 'tf.histogram_summary', (['"""W3"""', 'W3'], {}), "('W3', W3)\n", (2022, 2032), True, 'import tensorflow as tf\n'), ((2049, 2079), 'tensorflow.histogram_summary', 'tf.histogram_summary', (['"""b3"""', 'b3'], {}), "('b3', b3)\n", (2069, 2079), True, 'import tensorflow as tf\n'), ((2098, 2129), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""classifier"""'], {}), "('classifier')\n", (2115, 2129), True, 'import tensorflow as tf\n'), ((2263, 2294), 'tensorflow.reshape', 'tf.reshape', (['a', '[-1, a_vec_size]'], {}), '(a, [-1, a_vec_size])\n', (2273, 2294), True, 'import tensorflow as tf\n'), ((2601, 2614), 'tensorflow.nn.relu', 'tf.nn.relu', (['z'], {}), '(z)\n', (2611, 2614), True, 'import tensorflow as tf\n'), ((2903, 2939), 'tensorflow.histogram_summary', 'tf.histogram_summary', (['"""W_fc1"""', 'W_fc1'], {}), "('W_fc1', W_fc1)\n", (2923, 2939), True, 'import tensorflow as tf\n'), ((2956, 2992), 'tensorflow.histogram_summary', 'tf.histogram_summary', (['"""b_fc1"""', 'b_fc1'], {}), "('b_fc1', b_fc1)\n", (2976, 2992), True, 'import tensorflow as tf\n'), ((3009, 3045), 'tensorflow.histogram_summary', 'tf.histogram_summary', (['"""W_fc2"""', 'W_fc2'], {}), "('W_fc2', W_fc2)\n", (3029, 3045), True, 'import tensorflow as tf\n'), ((3062, 3098), 'tensorflow.histogram_summary', 'tf.histogram_summary', (['"""b_fc2"""', 'b_fc2'], {}), "('b_fc2', b_fc2)\n", (3082, 3098), True, 'import tensorflow as tf\n'), ((3117, 3142), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), "('loss')\n", (3134, 3142), True, 'import tensorflow as tf\n'), ((3169, 3235), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', (['z', 'y_true_reshaped'], {}), '(z, y_true_reshaped)\n', (3215, 3235), True, 'import tensorflow as tf\n'), ((3270, 3292), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['losses'], {}), '(losses)\n', (3284, 3292), True, 'import tensorflow as tf\n'), ((3309, 3351), 'tensorflow.scalar_summary', 'tf.scalar_summary', (['"""Loss"""', 'self.total_loss'], {}), "('Loss', self.total_loss)\n", (3326, 3351), True, 'import tensorflow as tf\n'), ((3683, 3712), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""Accuracy"""'], {}), "('Accuracy')\n", (3700, 3712), True, 'import tensorflow as tf\n'), ((3831, 3863), 'tensorflow.equal', 'tf.equal', (['preds', 'y_true_reshaped'], {}), '(preds, y_true_reshaped)\n', (3839, 3863), True, 'import tensorflow as tf\n'), ((4005, 4049), 'tensorflow.scalar_summary', 'tf.scalar_summary', (['"""Accuracy"""', 'self.accuracy'], {}), "('Accuracy', self.accuracy)\n", (4022, 4049), True, 'import tensorflow as tf\n'), ((5633, 5662), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (5660, 5662), True, 'import tensorflow as tf\n'), ((5743, 5789), 'util.ptb_iterator', 'util.ptb_iterator', (['train_data', 'args.batch_size'], {}), '(train_data, args.batch_size)\n', (5760, 5789), False, 'import util\n'), ((306, 317), 'time.time', 'time.time', ([], {}), '()\n', (315, 317), False, 'import os, time\n'), ((1021, 1087), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['self.x_plh', 'W1'], {'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(self.x_plh, W1, strides=[1, 2, 2, 1], padding='SAME')\n", (1033, 1087), True, 'import tensorflow as tf\n'), ((1365, 1422), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['a', 'W2'], {'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(a, W2, strides=[1, 2, 2, 1], padding='SAME')\n", (1377, 1422), True, 'import tensorflow as tf\n'), ((1700, 1757), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['a', 'W3'], {'strides': '[1, 2, 2, 1]', 'padding': '"""SAME"""'}), "(a, W3, strides=[1, 2, 2, 1], padding='SAME')\n", (1712, 1757), True, 'import tensorflow as tf\n'), ((2549, 2572), 'tensorflow.matmul', 'tf.matmul', (['a_vec', 'W_fc1'], {}), '(a_vec, W_fc1)\n', (2558, 2572), True, 'import tensorflow as tf\n'), ((2858, 2877), 'tensorflow.matmul', 'tf.matmul', (['a', 'W_fc2'], {}), '(a, W_fc2)\n', (2867, 2877), True, 'import tensorflow as tf\n'), ((3746, 3781), 'tensorflow.argmax', 'tf.argmax', (['z', '(1)'], {'name': '"""predictions"""'}), "(z, 1, name='predictions')\n", (3755, 3781), True, 'import tensorflow as tf\n'), ((3911, 3951), 'tensorflow.cast', 'tf.cast', (['correct_predictions', 'tf.float32'], {}), '(correct_predictions, tf.float32)\n', (3918, 3951), True, 'import tensorflow as tf\n'), ((861, 901), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (889, 901), True, 'import tensorflow as tf\n'), ((971, 999), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.1)'], {}), '(0.1)\n', (994, 999), True, 'import tensorflow as tf\n'), ((1205, 1245), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (1233, 1245), True, 'import tensorflow as tf\n'), ((1315, 1343), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.1)'], {}), '(0.1)\n', (1338, 1343), True, 'import tensorflow as tf\n'), ((1540, 1580), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (1568, 1580), True, 'import tensorflow as tf\n'), ((1650, 1678), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.1)'], {}), '(0.1)\n', (1673, 1678), True, 'import tensorflow as tf\n'), ((2382, 2422), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (2410, 2422), True, 'import tensorflow as tf\n'), ((2499, 2527), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.1)'], {}), '(0.1)\n', (2522, 2527), True, 'import tensorflow as tf\n'), ((2693, 2733), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (2721, 2733), True, 'import tensorflow as tf\n'), ((2808, 2836), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.1)'], {}), '(0.1)\n', (2831, 2836), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
#
# Copyright (c) 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
import os
import struct
import tempfile
import typing
from pathlib import Path
from typing import BinaryIO, Type
class MdvHeader:
# uint32_t magic;
# uint32_t version; // 1
# uint32_t recordVersion; // T::VERSION
# uint32_t recordSize; // sizeof(T)
# uint64_t entryCount; // end() - begin()
# uint64_t padding;
FORMAT = struct.Struct("=4sIIIQQ")
MAGIC = b"MDV\0"
VERSION_1 = 1
def __init__(
self,
magic: bytes,
version: int,
record_version: int,
record_size: int,
entry_count: int,
) -> None:
self.magic = magic
self.version = version
self.record_version = record_version
self.record_size = record_size
self.entry_count = entry_count
def serialize(self) -> bytes:
return self.FORMAT.pack(
self.magic,
self.version,
self.record_version,
self.record_size,
self.entry_count,
0,
)
@classmethod
def parse(cls: Type["MdvHeader"], data: bytes) -> "MdvHeader":
fields = cls.FORMAT.unpack(data)
(magic, version, record_version, record_size, entry_count, _padding) = fields
return cls(magic, version, record_version, record_size, entry_count)
@classmethod
def read(cls: Type["MdvHeader"], input_file: BinaryIO) -> "MdvHeader":
data = input_file.read(cls.FORMAT.size)
return cls.parse(data)
class InodeMetadataV0:
# uint64_t inode_number
# mode_t mode
# uid_t uid
# gid_t gid
# uint32_t padding
# uint64_t atime # encoded as EdenTimestamp (nanoseconds from 1901-12-13)
# uint64_t mtime # EdenTimestamp
# uint64_t ctime # EdenTimestamp
FORMAT = struct.Struct("=QIIIIQQQ")
VERSION = 0
def __init__(
self,
inode_number: int,
mode: int,
uid: int,
gid: int,
atime: int,
mtime: int,
ctime: int,
) -> None:
self.inode_number = inode_number
self.mode = mode
self.uid = uid
self.gid = gid
self.atime = atime
self.mtime = mtime
self.ctime = ctime
def serialize(self) -> bytes:
return self.FORMAT.pack(
self.inode_number,
self.mode,
self.uid,
self.gid,
0,
self.atime,
self.mtime,
self.ctime,
)
@classmethod
def parse(cls: Type["InodeMetadataV0"], data: bytes) -> "InodeMetadataV0":
fields = cls.FORMAT.unpack(data)
(inode_number, mode, uid, gid, _padding, atime, mtime, ctime) = fields
return cls(inode_number, mode, uid, gid, atime, mtime, ctime)
@classmethod
def read(cls: Type["InodeMetadataV0"], input_file: BinaryIO) -> "InodeMetadataV0":
data = input_file.read(cls.FORMAT.size)
if len(data) != cls.FORMAT.size:
raise Exception(f"short inode metadata table header: size={len(data)}")
return cls.parse(data)
def update_ownership(metadata_path: Path, uid: int, gid: int) -> None:
"""Update an Eden inode metadata table file, replacing the UID and GID fields
for all inodes with the specified values.
"""
with typing.cast(BinaryIO, metadata_path.open("rb")) as input_file:
header = MdvHeader.read(input_file)
if header.magic != MdvHeader.MAGIC:
raise Exception(
"unsupported inode metadata table file format: "
f"magic={header.magic!r}"
)
if header.version != MdvHeader.VERSION_1:
raise Exception(
"unsupported inode metadata table file format: "
f"version={header.version}"
)
if header.record_version != InodeMetadataV0.VERSION:
raise Exception(
"unsupported inode metadata table file format: "
f"record_version={header.record_version}"
)
if header.record_size != InodeMetadataV0.FORMAT.size:
raise Exception(
"unsupported inode metadata table file format: "
f"record_size: {header.record_size} != {InodeMetadataV0.FORMAT.size}"
)
tmp_fd, tmp_file_name = tempfile.mkstemp(
dir=str(metadata_path.parent), prefix=metadata_path.name + "."
)
tmp_file = os.fdopen(tmp_fd, "wb")
try:
tmp_file.write(header.serialize())
_rewrite_ownership_v0(input_file, tmp_file, header, uid, gid)
tmp_file.close()
tmp_file = None
os.rename(tmp_file_name, metadata_path)
except Exception:
try:
os.unlink(tmp_file_name)
except Exception:
pass
raise
finally:
if tmp_file is not None:
tmp_file.close()
def _rewrite_ownership_v0(
input_file: BinaryIO, new_file: BinaryIO, header: MdvHeader, uid: int, gid: int
) -> None:
entries_processed = 0
entry_size = InodeMetadataV0.FORMAT.size
for _ in range(header.entry_count):
entries_processed += 1
entry_data = input_file.read(entry_size)
if len(entry_data) != entry_size:
raise Exception("inode metadata table appears truncated")
entry = InodeMetadataV0.parse(entry_data)
entry.uid = uid
entry.gid = gid
new_file.write(entry.serialize())
# Copy the remaining file contents as is. This is normally all 0-filled data
# that provides space for new entries to be written in the future.
padding = input_file.read()
new_file.write(padding)
| [
"os.fdopen",
"struct.Struct",
"os.rename",
"os.unlink"
] | [((671, 696), 'struct.Struct', 'struct.Struct', (['"""=4sIIIQQ"""'], {}), "('=4sIIIQQ')\n", (684, 696), False, 'import struct\n'), ((2079, 2105), 'struct.Struct', 'struct.Struct', (['"""=QIIIIQQQ"""'], {}), "('=QIIIIQQQ')\n", (2092, 2105), False, 'import struct\n'), ((4724, 4747), 'os.fdopen', 'os.fdopen', (['tmp_fd', '"""wb"""'], {}), "(tmp_fd, 'wb')\n", (4733, 4747), False, 'import os\n'), ((4951, 4990), 'os.rename', 'os.rename', (['tmp_file_name', 'metadata_path'], {}), '(tmp_file_name, metadata_path)\n', (4960, 4990), False, 'import os\n'), ((5050, 5074), 'os.unlink', 'os.unlink', (['tmp_file_name'], {}), '(tmp_file_name)\n', (5059, 5074), False, 'import os\n')] |
# %%
'''
Create parameters
'''
import boto3
import os
def run():
# Login in first to load your profile or provide other environment variables
os.environ['AWS_PROFILE'] = "some_profile"
os.environ['AWS_REGION'] = "us-east-1"
client = boto3.client("ssm")
environments = ['alpha', 'dev', 'qa', 'qa-2']
parameters = [
{
'name': '/<env>/efs/ttc/session/dvir/key_id',
'description': 'TTC credential key id for dvir api',
'value': 'key',
'type': 'SecureString',
'key_id': ''
},
{
'name': '/<env>/efs/ttc/session/dvir/key_secret',
'description': 'TTC credential key secret for dvir api',
'value': 'secret',
'type': 'SecureString',
'key_id': ''
},
{
'name': '/<env>/efs/ttc/api_base_url',
'description': 'TTC base URL',
'value': '',
'type': 'String'
}
]
for e in environments:
for p in parameters:
# Need to be adjusted
parameter = p.replace('<env>', e)
response = client.put_parameter(
Name='parameter',
Description='string',
Value='string',
Type='String'|'StringList'|'SecureString',
KeyId='string',
Overwrite=True|False,
AllowedPattern='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
],
Tier='Standard'|'Advanced'|'Intelligent-Tiering',
Policies='string',
DataType='string'
)
# %%
environments = ['alpha', 'dev', 'qa', 'qa-2']
for e in environments:
print(f'aws ssm put-parameter --name /{e}/efs/ttc/session/dvir/key_id --value "46748174eadB03C05eA6Ee5711e1303" --description "TTC credential key id for dvir api" --type SecureString --key-id <key-id>')
# %%
for e in environments:
print(f'aws ssm put-parameter --name /{e}/efs/ttc/session/dvir/key_secret --value "<KEY>" --overwrite')
# %%
for e in environments:
print(f'aws ssm put-parameter --name /{e}/efs/ttc/api_base_url --value "https://cloud.dev.api.trimblecloud.com" --description "TTC Base URL" --type String')
# %%
## Some result
'''
# Adding keys
aws ssm put-parameter --name /alpha/efs/ttc/session/dvir/key_id --value "<sensitive_key>" --description "TTC credential key id for dvir api" --type SecureString --key-id c42964a1-ed4e-42ad-bbb4-83c262462539
aws ssm put-parameter --name /dev/efs/ttc/session/dvir/key_id --value "<sensitive_key>" --description "TTC credential key id for dvir api" --type SecureString --key-id 0ffbeaa5-1280-4149-9004-74202e31ee1a
aws ssm put-parameter --name /qa/efs/ttc/session/dvir/key_id --value "<sensitive_key>" --description "TTC credential key id for dvir api" --type SecureString --key-id 31b21206-9023-4ce6-a57d-56cc88701d1c
aws ssm put-parameter --name /qa-2/efs/ttc/session/dvir/key_id --value "<sensitive_key>" --description "TTC credential key id for dvir api" --type SecureString --key-id f555e8a0-c41e-4c6a-82d2-90e82ecaef57
# Adding secrets
aws ssm put-parameter --name /alpha/efs/ttc/session/dvir/key_secret --value "<sensitive_key>" --description "TTC credential key secret for dvir api" --type SecureString --key-id c42964a1-ed4e-42ad-bbb4-83c262462539
aws ssm put-parameter --name /dev/efs/ttc/session/dvir/key_secret --value "<sensitive_key>" --description "TTC credential key secret for dvir api" --type SecureString --key-id 0ffbeaa5-1280-4149-9004-74202e31ee1a
aws ssm put-parameter --name /qa/efs/ttc/session/dvir/key_secret --value "<sensitive_key>" --description "TTC credential key secret for dvir api" --type SecureString --key-id 31b21206-9023-4ce6-a57d-56cc88701d1c
aws ssm put-parameter --name /qa-2/efs/ttc/session/dvir/key_secret --value "<sensitive_key>" --description "TTC credential key secret for dvir api" --type SecureString --key-id f555e8a0-c41e-4c6a-82d2-90e82ecaef57
# Adding base URL
aws ssm put-parameter --name /alpha/efs/ttc/api_base_url --value "https://cloud.dev.api.trimblecloud.com" --description "TTC Base URL" --type String
aws ssm put-parameter --name /dev/efs/ttc/api_base_url --value "https://cloud.dev.api.trimblecloud.com" --description "TTC Base URL" --type String
aws ssm put-parameter --name /qa/efs/ttc/api_base_url --value "https://cloud.dev.api.trimblecloud.com" --description "TTC Base URL" --type String
aws ssm put-parameter --name /qa-2/efs/ttc/api_base_url --value "https://cloud.dev.api.trimblecloud.com" --description "TTC Base URL" --type String
''' | [
"boto3.client"
] | [((267, 286), 'boto3.client', 'boto3.client', (['"""ssm"""'], {}), "('ssm')\n", (279, 286), False, 'import boto3\n')] |
import sys, pygame, random
import os
import game_board
#import Piece
#from Piece import *
pygame.init()
# set colour and size of screen
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
RED = (255, 0, 0)
WIDTH = 600
GRID_WIDTH = 100
HEIGHT = 600
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Red n' Black")
# set refresh rate
FPS = 30
clock = pygame.time.Clock()
# add background image
base_folder = os.path.dirname(__file__)
img_folder = os.path.join(base_folder, 'images')
background_img = pygame.image.load(os.path.join(img_folder, 'board.png')).convert()
background_img_2 = pygame.image.load(os.path.join(img_folder, 'wood.jpg')).convert()
piece_black = pygame.image.load(os.path.join(img_folder, 'black.png'))
piece_red = pygame.image.load(os.path.join(img_folder, 'red.png'))
# draw the board (8 * 8)
def draw_background(surf):
# load background image
surf.blit(background_img_2, (0, 0))
surf.blit(background_img, (100, 100))
# draw board rim
# print ((GRID_WIDTH, GRID_WIDTH))
rect_lines = [
((GRID_WIDTH, GRID_WIDTH), (GRID_WIDTH, HEIGHT - GRID_WIDTH)),
((GRID_WIDTH, GRID_WIDTH), (WIDTH - GRID_WIDTH, GRID_WIDTH)),
((GRID_WIDTH, HEIGHT - GRID_WIDTH),
(WIDTH - GRID_WIDTH, HEIGHT - GRID_WIDTH)),
((WIDTH - GRID_WIDTH, GRID_WIDTH),
(WIDTH - GRID_WIDTH, HEIGHT - GRID_WIDTH)),
]
for line in rect_lines:
pygame.draw.line(surf, BLACK, line[0], line[1], 2)
# draw grid
for i in range(1, 8):
count = (50 * i) + 100
# print ((GRID_WIDTH * count, GRID_WIDTH))
pygame.draw.line(surf, BLACK,
(count, GRID_WIDTH),
(count, HEIGHT - GRID_WIDTH))
pygame.draw.line(surf, BLACK,
(GRID_WIDTH, count),
(HEIGHT - GRID_WIDTH, count))
'''
pygame.draw.line(surf, BLACK,
(150, 100),
(150, 500))
'''
myfont=pygame.font.Font(None,40)
alp_lis = ["A", "B", "C", "D", "E", "F", "G", "H"]
num_lis = ["1", "2", "3", "4", "5", "6", "7", "8"]
ori_x = 75
ori_y = 115
# Mark the letter of the Y-axis of the board
for alp in range(len(alp_lis)):
textImage = myfont.render(alp_lis[alp],True,BLACK)
screen.blit(textImage,(ori_x,ori_y))
ori_y += 50
#textImage = myfont.render("B",True,BLACK)
#screen.blit(textImage,(75,165))
ori_x = 115
ori_y = 65
for num in range(len(num_lis)):
textImage = myfont.render(num_lis[num],True,BLACK)
screen.blit(textImage,(ori_x,ori_y))
ori_x += 50
def play_round(play):
myfont = pygame.font.Font(None, 40)
if play == "play 1":
textImage = myfont.render("Play 1", True, RED)
screen.blit(textImage, (80, 35))
else:
textImage = myfont.render("Play 2", True, RED)
screen.blit(textImage, (80, 35))
def display_box(screen, message):
"Print a message in a box"
fontobject = pygame.font.Font(None,18)
pygame.draw.rect(screen, (0,0,0),
((screen.get_width() / 2) - 100,
(screen.get_height() / 2) - 10,
200,20), 0)
pygame.draw.rect(screen, (255,255,255),
((screen.get_width() / 2) - 102,
(screen.get_height() / 2) - 12,
204,24), 1)
if len(message) != 0:
screen.blit(fontobject.render(message, 1, (255,255,255)),
((screen.get_width() / 2) - 100, (screen.get_height() / 2) - 10))
pygame.display.flip()
def set_piece(surf):
#surf.blit(piece_black, (153, 153))
#surf.blit(piece_red, (103, 103))
#surf.blit(piece_red, (103, 203))
x_black = 103
y_black = 103
x_red = 103
y_red = 353
for loc in range (4):
# build black piece
surf.blit(piece_black, (x_black, y_black))
surf.blit(piece_black, (x_black + 50, y_black + 50))
surf.blit(piece_black, (x_black, y_black + 100))
# build red piece
surf.blit(piece_red, (x_red + 50, y_red))
surf.blit(piece_red, (x_red, y_red + 50))
surf.blit(piece_red, (x_red + 50, y_red + 100))
# make y-axle shift 100px
x_black += 100
x_red += 100
running = True
while running:
# set refresh
clock.tick(FPS)
for event in pygame.event.get():
# check if windows is quit
if event.type == pygame.QUIT:
running = False
# draw the background board
draw_background(screen)
# draw the piece for the board
set_piece(screen)
#display_box(screen, "From: ")
play_round("play 1")
for event in pygame.event.get():
print (event)
# refresh
pygame.display.flip()
| [
"pygame.display.set_caption",
"pygame.init",
"pygame.draw.line",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.display.flip",
"os.path.join",
"os.path.dirname",
"pygame.time.Clock",
"pygame.font.Font"
] | [((90, 103), 'pygame.init', 'pygame.init', ([], {}), '()\n', (101, 103), False, 'import sys, pygame, random\n'), ((269, 309), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(WIDTH, HEIGHT)'], {}), '((WIDTH, HEIGHT))\n', (292, 309), False, 'import sys, pygame, random\n'), ((310, 352), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""Red n\' Black"""'], {}), '("Red n\' Black")\n', (336, 352), False, 'import sys, pygame, random\n'), ((390, 409), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (407, 409), False, 'import sys, pygame, random\n'), ((448, 473), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (463, 473), False, 'import os\n'), ((487, 522), 'os.path.join', 'os.path.join', (['base_folder', '"""images"""'], {}), "(base_folder, 'images')\n", (499, 522), False, 'import os\n'), ((724, 761), 'os.path.join', 'os.path.join', (['img_folder', '"""black.png"""'], {}), "(img_folder, 'black.png')\n", (736, 761), False, 'import os\n'), ((793, 828), 'os.path.join', 'os.path.join', (['img_folder', '"""red.png"""'], {}), "(img_folder, 'red.png')\n", (805, 828), False, 'import os\n'), ((2065, 2091), 'pygame.font.Font', 'pygame.font.Font', (['None', '(40)'], {}), '(None, 40)\n', (2081, 2091), False, 'import sys, pygame, random\n'), ((2763, 2789), 'pygame.font.Font', 'pygame.font.Font', (['None', '(40)'], {}), '(None, 40)\n', (2779, 2789), False, 'import sys, pygame, random\n'), ((3102, 3128), 'pygame.font.Font', 'pygame.font.Font', (['None', '(18)'], {}), '(None, 18)\n', (3118, 3128), False, 'import sys, pygame, random\n'), ((3676, 3697), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (3695, 3697), False, 'import sys, pygame, random\n'), ((4489, 4507), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (4505, 4507), False, 'import sys, pygame, random\n'), ((4808, 4826), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (4824, 4826), False, 'import sys, pygame, random\n'), ((4869, 4890), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (4888, 4890), False, 'import sys, pygame, random\n'), ((1455, 1505), 'pygame.draw.line', 'pygame.draw.line', (['surf', 'BLACK', 'line[0]', 'line[1]', '(2)'], {}), '(surf, BLACK, line[0], line[1], 2)\n', (1471, 1505), False, 'import sys, pygame, random\n'), ((1648, 1733), 'pygame.draw.line', 'pygame.draw.line', (['surf', 'BLACK', '(count, GRID_WIDTH)', '(count, HEIGHT - GRID_WIDTH)'], {}), '(surf, BLACK, (count, GRID_WIDTH), (count, HEIGHT - GRID_WIDTH)\n )\n', (1664, 1733), False, 'import sys, pygame, random\n'), ((1787, 1872), 'pygame.draw.line', 'pygame.draw.line', (['surf', 'BLACK', '(GRID_WIDTH, count)', '(HEIGHT - GRID_WIDTH, count)'], {}), '(surf, BLACK, (GRID_WIDTH, count), (HEIGHT - GRID_WIDTH, count)\n )\n', (1803, 1872), False, 'import sys, pygame, random\n'), ((558, 595), 'os.path.join', 'os.path.join', (['img_folder', '"""board.png"""'], {}), "(img_folder, 'board.png')\n", (570, 595), False, 'import os\n'), ((644, 680), 'os.path.join', 'os.path.join', (['img_folder', '"""wood.jpg"""'], {}), "(img_folder, 'wood.jpg')\n", (656, 680), False, 'import os\n')] |
import numpy as np
from scipy.spatial.distance import cdist
from .index import SearchResults
from random import choice
from zounds.timeseries import ConstantRateTimeSeries
from zounds.nputil import packed_hamming_distance
class BaseBruteForceSearch(object):
def __init__(self, gen):
index = []
self._ids = []
for _id, example in gen:
index.append(example)
crts = ConstantRateTimeSeries(example)
for ts, _ in crts.iter_slices():
self._ids.append((_id, ts))
self.index = np.concatenate(index)
def search(self, query, n_results=10):
raise NotImplementedError()
def random_search(self, n_results=10):
query = choice(self.index)
return self.search(query, n_results)
class BruteForceSearch(BaseBruteForceSearch):
def __init__(self, gen, distance_metric='euclidean'):
super(BruteForceSearch, self).__init__(gen)
self.distance_metric = distance_metric
def search(self, query, n_results=10):
distances = cdist(
query[None, ...], self.index, metric=self.distance_metric)
indices = np.argsort(distances[0])[:n_results]
return SearchResults(query, (self._ids[i] for i in indices))
class HammingDistanceBruteForceSearch(BaseBruteForceSearch):
def __init__(self, gen):
super(HammingDistanceBruteForceSearch, self).__init__(gen)
def search(self, query, n_results=10):
scores = packed_hamming_distance(query, self.index)
indices = np.argsort(scores)[:n_results]
return SearchResults(query, (self._ids[i] for i in indices))
| [
"random.choice",
"zounds.timeseries.ConstantRateTimeSeries",
"zounds.nputil.packed_hamming_distance",
"scipy.spatial.distance.cdist",
"numpy.argsort",
"numpy.concatenate"
] | [((559, 580), 'numpy.concatenate', 'np.concatenate', (['index'], {}), '(index)\n', (573, 580), True, 'import numpy as np\n'), ((721, 739), 'random.choice', 'choice', (['self.index'], {}), '(self.index)\n', (727, 739), False, 'from random import choice\n'), ((1054, 1118), 'scipy.spatial.distance.cdist', 'cdist', (['query[None, ...]', 'self.index'], {'metric': 'self.distance_metric'}), '(query[None, ...], self.index, metric=self.distance_metric)\n', (1059, 1118), False, 'from scipy.spatial.distance import cdist\n'), ((1476, 1518), 'zounds.nputil.packed_hamming_distance', 'packed_hamming_distance', (['query', 'self.index'], {}), '(query, self.index)\n', (1499, 1518), False, 'from zounds.nputil import packed_hamming_distance\n'), ((417, 448), 'zounds.timeseries.ConstantRateTimeSeries', 'ConstantRateTimeSeries', (['example'], {}), '(example)\n', (439, 448), False, 'from zounds.timeseries import ConstantRateTimeSeries\n'), ((1150, 1174), 'numpy.argsort', 'np.argsort', (['distances[0]'], {}), '(distances[0])\n', (1160, 1174), True, 'import numpy as np\n'), ((1537, 1555), 'numpy.argsort', 'np.argsort', (['scores'], {}), '(scores)\n', (1547, 1555), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import urllib.parse
import requests
import argparse
import re, sys
def output(outfile, output_list):
file_var = open(outfile,'w')
no_of_links = len(output_list)
for i in range(no_of_links):
file_var.write(output_list[i])
file_var.write('\n')
def search(query, outfile, limit, proxy):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0', 'Referer': 'https://www.bing.com/'}
session = requests.Session()
end_of_search = 'No results found for <strong>' + query + '</strong>'
enc_query = urllib.parse.quote(query)
output_list = []
if_limit = False
sw_next = True
response = ''
next = 1
if limit :
limit = (limit - 1) * 10 + 1
if_limit = True
try:
while end_of_search not in response and sw_next == True:
search_url = f'https://www.bing.com/search?q={enc_query}&go=Submit&qs=n&pq={enc_query}&first={str(next)}&FORM=PERE'
response = session.get(search_url, headers=headers, proxies=proxy)
hrefs = re.findall('<h2><a href="\S+', response.text)
for href in hrefs:
temp = href.replace('<h2><a href="http://', 'http://').replace('<h2><a href="', '')
url = temp.replace('"', '').replace('amp;', '')
if not outfile:
print(url)
else:
output_list.append(url)
if if_limit:
if next < limit and re.findall('Next\S+', response.text):
sw_next = True
else:
sw_next = False
elif re.findall('Next\S+', response.text):
sw_next = True
else:
sw_next = False
next = next + 10
if outfile:
output(outfile, output_list)
except Exception as e:
print('Connection Error')
print(e)
def main():
description = 'Bing Search'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('query', type=str, help='Your search query.')
parser.add_argument('-w', '--outfile', type=str, help='Write to the specified output file instead of standard output.')
parser.add_argument('-l', '--limit', type=int, help='Limit the Number of pages to check.')
parser.add_argument('-x', '--proxy', type=str, help='Proxy to be used. Format: [PROTOCOL://][USER:PASSWORD@]HOST[:PORT]')
args = parser.parse_args()
print('Searching for:', args.query, '\n')
query = args.query
outfile = args.outfile
limit = args.limit
proxy = {'http': args.proxy, 'https': args.proxy}
search(query, outfile, limit, proxy)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Exiting..')
sys.exit()
| [
"re.findall",
"requests.Session",
"argparse.ArgumentParser",
"sys.exit"
] | [((473, 491), 'requests.Session', 'requests.Session', ([], {}), '()\n', (489, 491), False, 'import requests\n'), ((1699, 1747), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description'}), '(description=description)\n', (1722, 1747), False, 'import argparse\n'), ((1007, 1053), 're.findall', 're.findall', (['"""<h2><a href="\\\\S+"""', 'response.text'], {}), '(\'<h2><a href="\\\\S+\', response.text)\n', (1017, 1053), False, 'import re, sys\n'), ((2469, 2479), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2477, 2479), False, 'import re, sys\n'), ((1429, 1466), 're.findall', 're.findall', (['"""Next\\\\S+"""', 'response.text'], {}), "('Next\\\\S+', response.text)\n", (1439, 1466), False, 'import re, sys\n'), ((1332, 1369), 're.findall', 're.findall', (['"""Next\\\\S+"""', 'response.text'], {}), "('Next\\\\S+', response.text)\n", (1342, 1369), False, 'import re, sys\n')] |
import os
import numpy as np
import torch
import torchvision.datasets as datasets
import torchvision.transforms as TF
# Specify one or more data directories here
from skimage.util import view_as_windows
DATA_DIRS = '/your/data/path/here/',
PLACES_LOADER = None
PLACES_ITER = None
def _load_places(batch_size=256, image_size=84, num_workers=16):
global PLACES_LOADER, PLACES_ITER
print('Loading places365_standard...')
for data_dir in DATA_DIRS:
if os.path.exists(data_dir):
fp = os.path.join(data_dir, 'places365_standard', 'train')
PLACES_LOADER = torch.utils.data.DataLoader(
datasets.ImageFolder(fp, TF.Compose([
TF.RandomResizedCrop(image_size),
TF.RandomHorizontalFlip(),
TF.ToTensor()
])),
batch_size=batch_size, shuffle=True,
num_workers=num_workers, pin_memory=True)
PLACES_ITER = iter(PLACES_LOADER)
break
if PLACES_ITER is None:
raise FileNotFoundError('failed to find places365 data at any of the specified paths')
print('Load complete.')
def _get_places_batch(batch_size):
global PLACES_ITER
try:
imgs, _ = next(PLACES_ITER)
if imgs.size(0) < batch_size:
PLACES_ITER = iter(PLACES_LOADER)
imgs, _ = next(PLACES_ITER)
except StopIteration:
PLACES_ITER = iter(PLACES_LOADER)
imgs, _ = next(PLACES_ITER)
return imgs.cuda()
def random_overlay(x, dataset='places365_standard'):
"""Randomly overlay an image from Places"""
global PLACES_ITER
alpha = 0.5
if dataset == 'places365_standard':
if PLACES_LOADER is None:
_load_places(batch_size=x.size(0), image_size=x.size(-1))
imgs = _get_places_batch(batch_size=x.size(0)).repeat(1, x.size(1) // 3, 1, 1)
else:
raise NotImplementedError(f'overlay has not been implemented for dataset "{dataset}"')
return ((1 - alpha) * (x / 255.) + (alpha) * imgs) * 255.
def batch_from_obs(obs, batch_size=32):
"""Copy a single observation along the batch dimension"""
if isinstance(obs, torch.Tensor):
if len(obs.shape) == 3:
obs = obs.unsqueeze(0)
return obs.repeat(batch_size, 1, 1, 1)
if len(obs.shape) == 3:
obs = np.expand_dims(obs, axis=0)
return np.repeat(obs, repeats=batch_size, axis=0)
def prepare_pad_batch(obs, next_obs, action, batch_size=32):
"""Prepare batch for self-supervised policy adaptation at test-time"""
batch_obs = batch_from_obs(torch.from_numpy(obs).cuda(), batch_size)
batch_next_obs = batch_from_obs(torch.from_numpy(next_obs).cuda(), batch_size)
batch_action = torch.from_numpy(action).cuda().unsqueeze(0).repeat(batch_size, 1)
return random_crop_cuda(batch_obs), random_crop_cuda(batch_next_obs), batch_action
def random_crop_cuda(x, size=84, w1=None, h1=None, return_w1_h1=False):
"""Vectorized CUDA implementation of random crop"""
assert isinstance(x, torch.Tensor) and x.is_cuda, \
'input must be CUDA tensor'
n = x.shape[0]
img_size = x.shape[-1]
crop_max = img_size - size
if crop_max <= 0:
if return_w1_h1:
return x, None, None
return x
x = x.permute(0, 2, 3, 1)
if w1 is None:
w1 = torch.LongTensor(n).random_(0, crop_max)
h1 = torch.LongTensor(n).random_(0, crop_max)
windows = view_as_windows_cuda(x, (1, size, size, 1))[..., 0, :, :, 0]
cropped = windows[torch.arange(n), w1, h1]
if return_w1_h1:
return cropped, w1, h1
return cropped
def view_as_windows_cuda(x, window_shape):
"""PyTorch CUDA-enabled implementation of view_as_windows"""
assert isinstance(window_shape, tuple) and len(window_shape) == len(x.shape), \
'window_shape must be a tuple with same number of dimensions as x'
slices = tuple(slice(None, None, st) for st in torch.ones(4).long())
win_indices_shape = [
x.size(0),
x.size(1) - int(window_shape[1]),
x.size(2) - int(window_shape[2]),
x.size(3)
]
new_shape = tuple(list(win_indices_shape) + list(window_shape))
strides = tuple(list(x[slices].stride()) + list(x.stride()))
return x.as_strided(new_shape, strides)
def random_crop(imgs, size=84, w1=None, h1=None, return_w1_h1=False):
"""Vectorized random crop, imgs: (B,C,H,W), size: output size"""
assert (w1 is None and h1 is None) or (w1 is not None and h1 is not None), \
'must either specify both w1 and h1 or neither of them'
is_tensor = isinstance(imgs, torch.Tensor)
if is_tensor:
assert imgs.is_cuda, 'input images are tensors but not cuda!'
return random_crop_cuda(imgs, size=size, w1=w1, h1=h1, return_w1_h1=return_w1_h1)
n = imgs.shape[0]
img_size = imgs.shape[-1]
crop_max = img_size - size
if crop_max <= 0:
if return_w1_h1:
return imgs, None, None
return imgs
imgs = np.transpose(imgs, (0, 2, 3, 1))
if w1 is None:
w1 = np.random.randint(0, crop_max, n)
h1 = np.random.randint(0, crop_max, n)
windows = view_as_windows(imgs, (1, size, size, 1))[..., 0, :, :, 0]
cropped = windows[np.arange(n), w1, h1]
if return_w1_h1:
return cropped, w1, h1
return cropped
| [
"os.path.exists",
"torch.ones",
"numpy.repeat",
"numpy.arange",
"torch.LongTensor",
"os.path.join",
"torchvision.transforms.RandomHorizontalFlip",
"torch.from_numpy",
"numpy.random.randint",
"numpy.expand_dims",
"torchvision.transforms.ToTensor",
"numpy.transpose",
"torchvision.transforms.RandomResizedCrop",
"torch.arange",
"skimage.util.view_as_windows"
] | [((2399, 2441), 'numpy.repeat', 'np.repeat', (['obs'], {'repeats': 'batch_size', 'axis': '(0)'}), '(obs, repeats=batch_size, axis=0)\n', (2408, 2441), True, 'import numpy as np\n'), ((5050, 5082), 'numpy.transpose', 'np.transpose', (['imgs', '(0, 2, 3, 1)'], {}), '(imgs, (0, 2, 3, 1))\n', (5062, 5082), True, 'import numpy as np\n'), ((473, 497), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (487, 497), False, 'import os\n'), ((2360, 2387), 'numpy.expand_dims', 'np.expand_dims', (['obs'], {'axis': '(0)'}), '(obs, axis=0)\n', (2374, 2387), True, 'import numpy as np\n'), ((5115, 5148), 'numpy.random.randint', 'np.random.randint', (['(0)', 'crop_max', 'n'], {}), '(0, crop_max, n)\n', (5132, 5148), True, 'import numpy as np\n'), ((5162, 5195), 'numpy.random.randint', 'np.random.randint', (['(0)', 'crop_max', 'n'], {}), '(0, crop_max, n)\n', (5179, 5195), True, 'import numpy as np\n'), ((5211, 5252), 'skimage.util.view_as_windows', 'view_as_windows', (['imgs', '(1, size, size, 1)'], {}), '(imgs, (1, size, size, 1))\n', (5226, 5252), False, 'from skimage.util import view_as_windows\n'), ((516, 569), 'os.path.join', 'os.path.join', (['data_dir', '"""places365_standard"""', '"""train"""'], {}), "(data_dir, 'places365_standard', 'train')\n", (528, 569), False, 'import os\n'), ((3565, 3580), 'torch.arange', 'torch.arange', (['n'], {}), '(n)\n', (3577, 3580), False, 'import torch\n'), ((5292, 5304), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (5301, 5304), True, 'import numpy as np\n'), ((2611, 2632), 'torch.from_numpy', 'torch.from_numpy', (['obs'], {}), '(obs)\n', (2627, 2632), False, 'import torch\n'), ((2689, 2715), 'torch.from_numpy', 'torch.from_numpy', (['next_obs'], {}), '(next_obs)\n', (2705, 2715), False, 'import torch\n'), ((3372, 3391), 'torch.LongTensor', 'torch.LongTensor', (['n'], {}), '(n)\n', (3388, 3391), False, 'import torch\n'), ((3426, 3445), 'torch.LongTensor', 'torch.LongTensor', (['n'], {}), '(n)\n', (3442, 3445), False, 'import torch\n'), ((3984, 3997), 'torch.ones', 'torch.ones', (['(4)'], {}), '(4)\n', (3994, 3997), False, 'import torch\n'), ((701, 733), 'torchvision.transforms.RandomResizedCrop', 'TF.RandomResizedCrop', (['image_size'], {}), '(image_size)\n', (721, 733), True, 'import torchvision.transforms as TF\n'), ((755, 780), 'torchvision.transforms.RandomHorizontalFlip', 'TF.RandomHorizontalFlip', ([], {}), '()\n', (778, 780), True, 'import torchvision.transforms as TF\n'), ((802, 815), 'torchvision.transforms.ToTensor', 'TF.ToTensor', ([], {}), '()\n', (813, 815), True, 'import torchvision.transforms as TF\n'), ((2755, 2779), 'torch.from_numpy', 'torch.from_numpy', (['action'], {}), '(action)\n', (2771, 2779), False, 'import torch\n')] |
# Generated by Django 3.2.4 on 2021-06-25 04:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('nav', '0003_alter_jumpline_ob2'),
]
operations = [
migrations.CreateModel(
name='Ship',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ob1', models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='nav.jumppoint')),
],
),
]
| [
"django.db.models.BigAutoField",
"django.db.models.ForeignKey"
] | [((357, 453), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (376, 453), False, 'from django.db import migrations, models\n'), ((476, 591), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'default': '(0)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""+"""', 'to': '"""nav.jumppoint"""'}), "(default=0, on_delete=django.db.models.deletion.CASCADE,\n related_name='+', to='nav.jumppoint')\n", (493, 591), False, 'from django.db import migrations, models\n')] |
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import importlib
import sys
from infraform.cli import utils
from infraform.exceptions.usage import missing_scenario_arg
from infraform.exceptions.utils import success_or_exit
LOG = logging.getLogger(__name__)
def add_rm_parser(subparsers):
"""The parser for sub command 'rm'."""
rm_parser = subparsers.add_parser("rm")
rm_parser.set_defaults(func=main)
rm_parser.add_argument('--scenario', '-s',
dest="scenario",
help='Predefined scenario to use for exection')
rm_parser.add_argument('--platform', dest="platform",
help="The platform to use \
(podman, docker, terraform, shell, python)")
rm_parser.add_argument('--vars', dest="vars",
default="",
help="extra variables")
rm_parser.add_argument('--debug', dest="debug",
action="store_true",
help="Enable debug level logging")
rm_parser.add_argument('--hosts', dest="hosts",
default="", nargs='*',
help="host(s) to execute the scenario/command on \
by specifying host name or user@host")
rm_parser.add_argument('--skip-check', dest="skip_check",
action="store_true",
help="Skip requirements check")
rm_parser.add_argument('--keep-files', dest="keep_files",
action="store_true",
help="Don't remove scripts when done running")
def main(args):
"""Runner main entry."""
if not args.scenario and not args.vars:
LOG.error(missing_scenario_arg())
sys.exit(2)
if args.scenario and not args.platform:
args.platform = utils.guess_platform(args.scenario)
if not args.scenario and not args.platform:
success_or_exit(1, "Couldn't figure out which platform to use. \
Please specify --platform")
Platform = getattr(importlib.import_module(
"infraform.platforms.{}".format(args.platform)),
args.platform.capitalize())
platform = Platform(args=args)
platform.prepare()
platform.remove()
| [
"logging.getLogger",
"infraform.exceptions.usage.missing_scenario_arg",
"sys.exit",
"infraform.cli.utils.guess_platform",
"infraform.exceptions.utils.success_or_exit"
] | [((796, 823), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (813, 823), False, 'import logging\n'), ((2321, 2332), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (2329, 2332), False, 'import sys\n'), ((2401, 2436), 'infraform.cli.utils.guess_platform', 'utils.guess_platform', (['args.scenario'], {}), '(args.scenario)\n', (2421, 2436), False, 'from infraform.cli import utils\n'), ((2493, 2587), 'infraform.exceptions.utils.success_or_exit', 'success_or_exit', (['(1)', '"""Couldn\'t figure out which platform to use. Please specify --platform"""'], {}), '(1,\n "Couldn\'t figure out which platform to use. Please specify --platform")\n', (2508, 2587), False, 'from infraform.exceptions.utils import success_or_exit\n'), ((2289, 2311), 'infraform.exceptions.usage.missing_scenario_arg', 'missing_scenario_arg', ([], {}), '()\n', (2309, 2311), False, 'from infraform.exceptions.usage import missing_scenario_arg\n')] |
import logging
import sys
from os import environ as env
from urllib.parse import urlencode
import requests
import concurrent.futures
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
PAGE_SIZE = 1000
API_KEY = env.get('API_KEY')
DRIVE_FILE_MIME_TYPES = {
'g_file': 'application/vnd.google-apps.file',
'g_folder': 'application/vnd.google-apps.folder'
}
def is_valid_drive_id(drive_id):
return drive_id and drive_id.strip() != ''
def is_drive_file_type(mime):
return mime == DRIVE_FILE_MIME_TYPES['g_file']
def is_drive_folder_type(mime):
return mime == DRIVE_FILE_MIME_TYPES['g_folder']
def get_files(drive_id, api_key, next_page_token=None):
query = {
'orderBy': 'folder desc', # trying to list all files on first
'pageSize': PAGE_SIZE,
'key': api_key,
'q': '"%s" in parents' % (drive_id)
}
if next_page_token is not None:
query['pageToken'] = next_page_token
api_url = 'https://www.googleapis.com/drive/v3/files?%s' % urlencode(
query)
r = requests.get(api_url,
headers={"Accept": "application/json"})
if r.status_code != 200:
return [], [], None
content = r.json()
files = list(
filter(lambda file: not is_drive_folder_type(file['mimeType']),
content.get('files')))
folders = list(
filter(lambda file: is_drive_folder_type(file['mimeType']),
content.get('files')))
next_page_token = content.get('nextPageToken', None)
return files, folders, next_page_token
def extract_drive(drive_id):
folder_ids = [{
'drive_id': drive_id,
'next_page_token': None
}]
while len(folder_ids) > 0:
with concurrent.futures.ThreadPoolExecutor(max_workers=None) as worker:
futures = {
worker.submit(
get_files,
item['drive_id'],
API_KEY,
item['next_page_token']): item for item in
folder_ids[:10]}
for future in concurrent.futures.as_completed(futures):
item = futures[future]
(files, folders, page_token) = future.result()
if len(files) > 0:
yield [file['id'] for file in files]
if page_token:
item['next_page_token'] = page_token
else:
folder_ids.remove(item)
for folder in folders:
folder_ids.append({
'drive_id': folder['id'],
'next_page_token': None
})
if __name__ == '__main__':
drive_id = sys.argv[1] if len(sys.argv) > 1 else None
filename = '{}.csv'.format(drive_id)
total_file = 0
if not is_valid_drive_id(drive_id):
logger.info('input drive is not valid')
sys.exit()
with open(filename, mode='w+') as f:
for drives in extract_drive(drive_id):
total_file += len(drives)
logger.info('exported {} files'.format(total_file))
f.write('\n'.join(drives) + '\n')
| [
"logging.basicConfig",
"logging.getLogger",
"os.environ.get",
"requests.get",
"sys.exit",
"urllib.parse.urlencode"
] | [((135, 174), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (154, 174), False, 'import logging\n'), ((184, 211), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (201, 211), False, 'import logging\n'), ((241, 259), 'os.environ.get', 'env.get', (['"""API_KEY"""'], {}), "('API_KEY')\n", (248, 259), True, 'from os import environ as env\n'), ((1071, 1132), 'requests.get', 'requests.get', (['api_url'], {'headers': "{'Accept': 'application/json'}"}), "(api_url, headers={'Accept': 'application/json'})\n", (1083, 1132), False, 'import requests\n'), ((1037, 1053), 'urllib.parse.urlencode', 'urlencode', (['query'], {}), '(query)\n', (1046, 1053), False, 'from urllib.parse import urlencode\n'), ((2932, 2942), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2940, 2942), False, 'import sys\n')] |
import re
class Target:
"""
A target in a Makefile.
"""
def __init__(self, name, dependencies, statements):
self.name = name.strip()
self.dependencies = dependencies.strip().split(" ")
self.statements = [s.strip() for s in statements]
def __str__(self):
return f"{self.name}: {self.dependencies}"
def __repr__(self):
return self.__str__()
@classmethod
def from_lines(cls, lines):
"""
Create target from string.
"""
tokens = lines[0].split(":")
if len(tokens) == 2:
name, dependencies = tokens
else:
name, dependencies = tokens[0], ""
return cls(name, dependencies, lines[1:])
def is_depend_on(self, depens):
return all([d in self.dependencies for d in depens])
def depend_is(self, depens):
return set(self.dependencies) == set(depens)
class Makefile:
"""
A Makefile.
"""
target_pattern = re.compile(r"^(?P<targetname>.+:)")
statement_pattern = re.compile(r"^\t.+")
def __init__(self, path):
self.path = path
self.lines = []
self.targets = []
self.read()
self.parse()
def __getitem__(self, key):
if not isinstance(key, str):
raise TypeError("key has to be string")
for t in self.targets:
if t.name == key:
return t
raise KeyError(f"{key} cannot be found")
def read(self):
with open(self.path, "r") as f:
self.lines = f.readlines()
def parse(self):
targets = []
target_lines = []
for line in self.lines:
target_match_result = self.target_pattern.search(line)
statement_match_result = self.statement_pattern.match(line)
if target_match_result is not None:
if len(target_lines) != 0:
targets.append(Target.from_lines(target_lines))
target_lines = [line]
elif statement_match_result is not None:
target_lines.append(line)
# That one last target.
if len(target_lines) != 0:
targets.append(Target.from_lines(target_lines))
self.targets = targets
| [
"re.compile"
] | [((991, 1025), 're.compile', 're.compile', (['"""^(?P<targetname>.+:)"""'], {}), "('^(?P<targetname>.+:)')\n", (1001, 1025), False, 'import re\n'), ((1051, 1071), 're.compile', 're.compile', (['"""^\\\\t.+"""'], {}), "('^\\\\t.+')\n", (1061, 1071), False, 'import re\n')] |
#!/usr/bin/env python
# coding: utf-8
# # TEACH Video Clip Creator
#
# This program will create 15 minute chunks of videos for the TEACH graders to use to evaluate teacher pedagogical skills.
#
# The program will use Python to clip the longer classroom videos into 15 minute chunks.
# In[1]:
# Import everything needed to edit video clips
from moviepy.editor import *
#import tool for dealing with files and paths
from pathlib import Path, PureWindowsPath
#Allow showing video in python notebook
from IPython.display import Video
#inport os.path
import os.path
#specify file path
save_folder=Path("C:/Users/wb469649/WBG/<NAME> - Videos_Peru/")
network_folder=Path("//wbgfscifs01/GEDEDU/datalib-edu/projects/GEPD-Confidential/CNT/PER/PER_2019_GEPD/PER_2019_GEPD_v01_RAW/Data/videos")
#data_folder=Path("D:/Módulo 4 Observación Aula")
data_folder=Path("C:/Users/wb469649/WBG/<NAME> - Videos_Peru/")
# # Test Case
#
# Now I will open a test clip to see how things are working. This will help us understand how well the program is functioning.
# In[2]:
#specify file to open
file_to_open = data_folder / 'Ruta 1' / 'Video' / '256305-(1) IE18113 Amazonas .mp4'
print(file_to_open)
# Now we will create a subclip between 10 and 10:30 minutes of the video.
# In[3]:
#Calculate duration of video
vid=VideoFileClip(str(file_to_open))
vid.reader.close()
vid.audio.reader.close_proc()
vid_duration=vid.duration
print(vid_duration)
# In[4]:
# Load test clip and select the subclip 00:10:00 - 00:10:30
clip = vid.subclip('00:10:00','00:10:30')
clip.ipython_display(width=280)
# Now we will save the clip to the folder.
# In[5]:
#path to saved file
file_to_write= save_folder / 'Ruta 1' / 'Video_clips' / '256305-(1) IE18113 Amazonas - Test Clip 1 .mp4'
#write the clip
clip.write_videofile(str(file_to_write), preset='veryfast', threads=2)
# ## Loop through all videos and create clips
#
# Now that we have completed a test case, we will loop through all the videos and then create two clips for each video. One clip will be between 10 and 25 minutes. The other clip will span from 30 minutes to 45 minutes. These clips should be checked by hand to report any mistakes or problems. This python code does a nice job of automating the process of creating the clips, but human eyes should verify the quality.
#
# Because of the large file sizes and disk space constraints, I have set up the code to run in chunks, based on the enumerator routes (Ruta 1, Ruta 2, Ruta 3, ...)
# In[6]:
# first define the route "chunk"
chunk = 'Ruta 6'
chunks=[ 'Ruta 18', 'Ruta 19', 'Ruta 20']
for chunk in chunks:
path_to_open = data_folder / chunk / 'Video'
if os.path.isdir(str(path_to_open)):
print("Is Path")
else:
print("need new path")
path_to_open = data_folder / chunk / 'Videos'
path_to_write = save_folder / chunk / 'Video_clips'
Path(path_to_write).mkdir(parents=True, exist_ok=True)
print(path_to_write)
# In[7]:
#get list of all files
file_list =[f for f in path_to_open.glob('**/*') if f.is_file() ]
for f in file_list:
print(f)
print(f.parts[7])
file_name=str(f.parts[7]) #this is the 4th part of the file route, with just the file name
file_name_base=file_name[:-4]
file_name_new=file_name_base + "Clip 1 " + ".MP4"
print(file_name_new)
# In[ ]:
for f in file_list:
#come up a new file name called ".. Clip1.MP4" and ".. Clip2.MP4"
file_name=str(f.parts[7]) #this is the 4th part of the file route, with just the file name
file_name=str(f.parts[7]) #this is the 4th part of the file route, with just the file name
file_name_base=file_name[:-4]
file_name_suffix=file_name[-4:]
file_name_new1=file_name_base + "Clip 1" + ".MP4"
file_name_new2=file_name_base + "Clip 2" + ".MP4"
print(file_name_new1)
print(file_name_new2)
#Calculate duration of video
vid=VideoFileClip(str(f))
vid.reader.close()
vid.audio.reader.close_proc()
vid_duration=vid.duration
#do this if video duration longer than 41 min
if vid_duration>= 2340:
print("Video is of sufficient length for two clips")
#Now cut the clips
clip1 = vid.subclip('00:08:00','00:23:00')
file_to_write1= save_folder / chunk / 'Video_clips' / file_name_new1
clip2 = vid.subclip('00:24:00','00:39:00')
file_to_write2= save_folder / chunk / 'Video_clips' / file_name_new2
if os.path.isfile(str(file_to_write1)):
print ("File exist")
else:
print ("File not exist")
#write the clip
clip1.write_videofile(str(file_to_write1), codec='libx264')
if os.path.isfile(str(file_to_write2)):
print ("File exist")
else:
print ("File not exist")
#write the clip
clip2.write_videofile(str(file_to_write2), threads=200, codec='libx264', logger=None)
#do this if video duration longer than 25 min but less than 41
elif vid_duration>= 1800 and vid_duration< 2340 :
print("Video less than 41 minutes but larger than 25 min")
#Now cut the clips
clip1 = vid.subclip('00:08:00','00:23:00')
file_to_write1= save_folder / chunk / 'Video_clips' / file_name_new1
if os.path.isfile(str(file_to_write1)):
print ("File exist")
else:
print ("File not exist")
#write the clip
clip1.write_videofile(str(file_to_write1), threads=200, codec='libx264')
else:
print("Video of insufficient length")
| [
"pathlib.Path"
] | [((600, 651), 'pathlib.Path', 'Path', (['"""C:/Users/wb469649/WBG/<NAME> - Videos_Peru/"""'], {}), "('C:/Users/wb469649/WBG/<NAME> - Videos_Peru/')\n", (604, 651), False, 'from pathlib import Path, PureWindowsPath\n'), ((668, 801), 'pathlib.Path', 'Path', (['"""//wbgfscifs01/GEDEDU/datalib-edu/projects/GEPD-Confidential/CNT/PER/PER_2019_GEPD/PER_2019_GEPD_v01_RAW/Data/videos"""'], {}), "(\n '//wbgfscifs01/GEDEDU/datalib-edu/projects/GEPD-Confidential/CNT/PER/PER_2019_GEPD/PER_2019_GEPD_v01_RAW/Data/videos'\n )\n", (672, 801), False, 'from pathlib import Path, PureWindowsPath\n'), ((854, 905), 'pathlib.Path', 'Path', (['"""C:/Users/wb469649/WBG/<NAME> - Videos_Peru/"""'], {}), "('C:/Users/wb469649/WBG/<NAME> - Videos_Peru/')\n", (858, 905), False, 'from pathlib import Path, PureWindowsPath\n'), ((2915, 2934), 'pathlib.Path', 'Path', (['path_to_write'], {}), '(path_to_write)\n', (2919, 2934), False, 'from pathlib import Path, PureWindowsPath\n')] |
#! /usr/bin/env python
"""
Setup script.
"""
import sys
from setuptools import setup, find_packages, Extension
from setuptools.command.install import install
import numpy as np
# Version warning
if sys.version_info >= (3,):
print("Please note that this software was only tested with Python 2.7.")
# Determine whether Cython is available
try:
from Cython.Distutils import build_ext
except ImportError:
print("Cython is not available.")
use_cython = False
else:
use_cython = True
# Build information
if use_cython:
ext_modules = [Extension('pycog.euler', ['pycog/euler.pyx'],
extra_compile_args=['-Wno-unused-function'],
include_dirs=[np.get_include()])]
cmdclass = {'build_ext': build_ext}
else:
ext_modules = []
cmdclass = {}
# Setup
setup(
name='pycog',
version='0.1',
license='MIT',
author='<NAME>, <NAME>',
author_email='<EMAIL>',
url='https://github.com/frsong/pycog',
cmdclass=cmdclass,
ext_modules=ext_modules,
packages=find_packages(exclude=['examples', 'examples.*', 'paper']),
setup_requires=['numpy'],
install_requires=['theano'],
classifiers=[
'Programming Language :: Python',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering'
]
)
| [
"setuptools.find_packages",
"numpy.get_include"
] | [((1088, 1146), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['examples', 'examples.*', 'paper']"}), "(exclude=['examples', 'examples.*', 'paper'])\n", (1101, 1146), False, 'from setuptools import setup, find_packages, Extension\n'), ((740, 756), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (754, 756), True, 'import numpy as np\n')] |
import logging
from . import asyncio, sys
from .__version__ import __version__
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
__all__ = ["asyncio", "sys"]
| [
"logging.getLogger",
"logging.NullHandler"
] | [((90, 117), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (107, 117), False, 'import logging\n'), ((136, 157), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (155, 157), False, 'import logging\n')] |
#!/usr/bin/python
import os
import sys
import subprocess
import shutil
import tempfile
import re
#dump -0 / -f dump.img && dd if=/dev/zero of=filesystem.img bs=2M count=1000 && losetup /dev/loop0 filesystem.img && mkdir filesystem && mke2fs -j /dev/loop0 && mount -t ext3 /dev/loop0 -o loop filesystem && cd filesystem && restore -r -f ../dump.img && cd ../ && umount filesystem && losetup -d /dev/loop0
def main():
def usage():
sys.exit(1)
def mktmpdir():
tmpdir = tempfile.mkdtemp()
return tmpdir
def mktmpfile():
tmpfile = tempfile.mkstemp()
return tmpfile[1]
def size(ent):
current = os.stat(ent).st_size
return current
def mkempty(bs, count):
image = tempfile.mkstemp()
args = ("/bin/dd", "if=/dev/zero", "of=%s"%image, "bs=%s"%bs, "count=%s"%count)
execute(args)
return image
def dump(ent, file):
args = ("/sbin/dump", "-0", ent, "-f", file)
execute(args)
def restore():
pass
def mkcopy():
pass
def getrootents():
ents = list()
args = ("/bin/df", "-lh")
data = execute(args, needdata=True)
lines = data.split('\n')
for line in lines:
if re.search(r"/dev/", line) and not re.search(r"tmpfs", line):
ent = re.search(r"/dev/[-0-9A-Za-z]+", line)
ents.append(ent.group())
return ents
def execute(args, needdata=False):
if needdata is False:
rc = subprocess.call(args)
else:
rc = subprocess.Popen(args, stdout=subprocess.PIPE).communicate()[0]
return rc
def setup():
dumpfiles = list()
ents = getrootents()
for ent in ents:
dumpfile = mktmpfile()
dump(ent, dumpfile)
dumpfiles.append(dumpfile)
tsize = 0
for dumpfile in dumpfiles:
tsize = size(dumpfile) + tsize
bs = 2048
count = (tsize / 2048)
target = mkempty(bs, count)
setup()
rc = 0
return rc
if __name__ == "__main__":
sys.exit(main())
| [
"subprocess.Popen",
"tempfile.mkdtemp",
"subprocess.call",
"sys.exit",
"os.stat",
"tempfile.mkstemp",
"re.search"
] | [((446, 457), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (454, 457), False, 'import sys\n'), ((500, 518), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (516, 518), False, 'import tempfile\n'), ((581, 599), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (597, 599), False, 'import tempfile\n'), ((761, 779), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (777, 779), False, 'import tempfile\n'), ((664, 676), 'os.stat', 'os.stat', (['ent'], {}), '(ent)\n', (671, 676), False, 'import os\n'), ((1577, 1598), 'subprocess.call', 'subprocess.call', (['args'], {}), '(args)\n', (1592, 1598), False, 'import subprocess\n'), ((1288, 1312), 're.search', 're.search', (['"""/dev/"""', 'line'], {}), "('/dev/', line)\n", (1297, 1312), False, 'import re\n'), ((1371, 1408), 're.search', 're.search', (['"""/dev/[-0-9A-Za-z]+"""', 'line'], {}), "('/dev/[-0-9A-Za-z]+', line)\n", (1380, 1408), False, 'import re\n'), ((1322, 1346), 're.search', 're.search', (['"""tmpfs"""', 'line'], {}), "('tmpfs', line)\n", (1331, 1346), False, 'import re\n'), ((1630, 1676), 'subprocess.Popen', 'subprocess.Popen', (['args'], {'stdout': 'subprocess.PIPE'}), '(args, stdout=subprocess.PIPE)\n', (1646, 1676), False, 'import subprocess\n')] |
import os
import pathlib
import discord
from discord.commands import slash_command
from discord.ext import commands
BASE_DIR = pathlib.Path(__file__).parent.parent
class Privacy(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@slash_command(name='privacy-policy')
async def privacy_policy(self, ctx: discord.ApplicationContext):
"""Show the privacy policy"""
with open(BASE_DIR / 'config' / 'privacy-policy.md', 'r') as f:
await ctx.respond(f.read())
@slash_command(name='opt-out')
async def opt_out(self, ctx: discord.ApplicationContext):
"""Opt out of your message content data to be tracked"""
opt_out_users = []
if os.path.exists(BASE_DIR / 'data' / 'opt-out-users.txt'):
with open(BASE_DIR / 'data' / 'opt-out-users.txt', 'r') as f:
for line in f.readlines():
if line.strip():
opt_out_users.append(int(line))
if ctx.author.id in opt_out_users:
await ctx.respond('Your message content is already off-track. To use other commands, please use the /opt-in command.')
else:
opt_out_users.append(ctx.author.id)
with open(BASE_DIR / 'data' / 'opt-out-users.txt', 'w') as f:
for user in opt_out_users:
f.write(str(user) + '\n')
await ctx.respond('This bot will not track your message content from now on. Most commands will no longer respond.')
@slash_command(name='opt-in')
async def opt_in(self, ctx: discord.ApplicationContext):
"""Opt out of your message content data to be tracked"""
opt_out_users = []
if os.path.exists(BASE_DIR / 'data' / 'opt-out-users.txt'):
with open(BASE_DIR / 'data' / 'opt-out-users.txt', 'r') as f:
for line in f.readlines():
if line.strip():
opt_out_users.append(int(line))
if ctx.author.id in opt_out_users:
opt_out_users.remove(ctx.author.id)
with open(BASE_DIR / 'data' / 'opt-out-users.txt', 'w') as f:
for user in opt_out_users:
f.write(str(user) + '\n')
await ctx.respond('This bot will now track the content of your messages. It will only be used to provide commands. Use the /privacy-policy command to view the privacy policy.')
else:
await ctx.respond('This bot is already tracking your message content.')
def setup(bot):
return bot.add_cog(Privacy(bot))
| [
"os.path.exists",
"pathlib.Path",
"discord.commands.slash_command"
] | [((271, 307), 'discord.commands.slash_command', 'slash_command', ([], {'name': '"""privacy-policy"""'}), "(name='privacy-policy')\n", (284, 307), False, 'from discord.commands import slash_command\n'), ((533, 562), 'discord.commands.slash_command', 'slash_command', ([], {'name': '"""opt-out"""'}), "(name='opt-out')\n", (546, 562), False, 'from discord.commands import slash_command\n'), ((1529, 1557), 'discord.commands.slash_command', 'slash_command', ([], {'name': '"""opt-in"""'}), "(name='opt-in')\n", (1542, 1557), False, 'from discord.commands import slash_command\n'), ((130, 152), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (142, 152), False, 'import pathlib\n'), ((728, 783), 'os.path.exists', 'os.path.exists', (["(BASE_DIR / 'data' / 'opt-out-users.txt')"], {}), "(BASE_DIR / 'data' / 'opt-out-users.txt')\n", (742, 783), False, 'import os\n'), ((1722, 1777), 'os.path.exists', 'os.path.exists', (["(BASE_DIR / 'data' / 'opt-out-users.txt')"], {}), "(BASE_DIR / 'data' / 'opt-out-users.txt')\n", (1736, 1777), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from keras.layers import Input
from keras.layers.core import Dense
from keras.models import Model
from keras.layers.core import Activation
inputs = Input(shape=(784,))
x = Dense(32)(inputs)
x = Activation("sigmoid")(x)
x = Dense(10)(x)
predictions = Activation("softmax")(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss="categorical_crossentropy", optimizer="adam")
| [
"keras.layers.Input",
"keras.models.Model",
"keras.layers.core.Activation",
"keras.layers.core.Dense"
] | [((173, 192), 'keras.layers.Input', 'Input', ([], {'shape': '(784,)'}), '(shape=(784,))\n', (178, 192), False, 'from keras.layers import Input\n'), ((310, 351), 'keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'predictions'}), '(inputs=inputs, outputs=predictions)\n', (315, 351), False, 'from keras.models import Model\n'), ((198, 207), 'keras.layers.core.Dense', 'Dense', (['(32)'], {}), '(32)\n', (203, 207), False, 'from keras.layers.core import Dense\n'), ((220, 241), 'keras.layers.core.Activation', 'Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (230, 241), False, 'from keras.layers.core import Activation\n'), ((249, 258), 'keras.layers.core.Dense', 'Dense', (['(10)'], {}), '(10)\n', (254, 258), False, 'from keras.layers.core import Dense\n'), ((276, 297), 'keras.layers.core.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (286, 297), False, 'from keras.layers.core import Activation\n')] |
# coding: utf-8
"""
PowerMeter API
API # noqa: E501
The version of the OpenAPI document: 2021.4.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from powermeter_api.configuration import Configuration
class Heatsink(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'int',
'name': 'str',
'descr': 'str',
'editable': 'bool',
'version': 'HeatsinkVersion',
'current_version': 'str',
'width': 'float',
'height': 'float',
'thickness': 'float',
'r_convec': 'float',
'c_convec': 'float',
'conductivity': 'float',
'heatcapacity': 'float'
}
attribute_map = {
'id': 'id',
'name': 'name',
'descr': 'descr',
'editable': 'editable',
'version': 'version',
'current_version': 'current_version',
'width': 'width',
'height': 'height',
'thickness': 'thickness',
'r_convec': 'r_convec',
'c_convec': 'c_convec',
'conductivity': 'conductivity',
'heatcapacity': 'heatcapacity'
}
def __init__(self, id=None, name=None, descr=None, editable=None, version=None, current_version=None, width=None, height=None, thickness=None, r_convec=None, c_convec=None, conductivity=None, heatcapacity=None, local_vars_configuration=None): # noqa: E501
"""Heatsink - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._name = None
self._descr = None
self._editable = None
self._version = None
self._current_version = None
self._width = None
self._height = None
self._thickness = None
self._r_convec = None
self._c_convec = None
self._conductivity = None
self._heatcapacity = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if descr is not None:
self.descr = descr
if editable is not None:
self.editable = editable
if version is not None:
self.version = version
if current_version is not None:
self.current_version = current_version
if width is not None:
self.width = width
if height is not None:
self.height = height
if thickness is not None:
self.thickness = thickness
if r_convec is not None:
self.r_convec = r_convec
if c_convec is not None:
self.c_convec = c_convec
if conductivity is not None:
self.conductivity = conductivity
if heatcapacity is not None:
self.heatcapacity = heatcapacity
@property
def id(self):
"""Gets the id of this Heatsink. # noqa: E501
:return: The id of this Heatsink. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Heatsink.
:param id: The id of this Heatsink. # noqa: E501
:type: int
"""
self._id = id
@property
def name(self):
"""Gets the name of this Heatsink. # noqa: E501
:return: The name of this Heatsink. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Heatsink.
:param name: The name of this Heatsink. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) < 1):
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501
self._name = name
@property
def descr(self):
"""Gets the descr of this Heatsink. # noqa: E501
:return: The descr of this Heatsink. # noqa: E501
:rtype: str
"""
return self._descr
@descr.setter
def descr(self, descr):
"""Sets the descr of this Heatsink.
:param descr: The descr of this Heatsink. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
descr is not None and len(descr) < 1):
raise ValueError("Invalid value for `descr`, length must be greater than or equal to `1`") # noqa: E501
self._descr = descr
@property
def editable(self):
"""Gets the editable of this Heatsink. # noqa: E501
:return: The editable of this Heatsink. # noqa: E501
:rtype: bool
"""
return self._editable
@editable.setter
def editable(self, editable):
"""Sets the editable of this Heatsink.
:param editable: The editable of this Heatsink. # noqa: E501
:type: bool
"""
self._editable = editable
@property
def version(self):
"""Gets the version of this Heatsink. # noqa: E501
:return: The version of this Heatsink. # noqa: E501
:rtype: HeatsinkVersion
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this Heatsink.
:param version: The version of this Heatsink. # noqa: E501
:type: HeatsinkVersion
"""
self._version = version
@property
def current_version(self):
"""Gets the current_version of this Heatsink. # noqa: E501
:return: The current_version of this Heatsink. # noqa: E501
:rtype: str
"""
return self._current_version
@current_version.setter
def current_version(self, current_version):
"""Sets the current_version of this Heatsink.
:param current_version: The current_version of this Heatsink. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
current_version is not None and len(current_version) < 1):
raise ValueError("Invalid value for `current_version`, length must be greater than or equal to `1`") # noqa: E501
self._current_version = current_version
@property
def width(self):
"""Gets the width of this Heatsink. # noqa: E501
:return: The width of this Heatsink. # noqa: E501
:rtype: float
"""
return self._width
@width.setter
def width(self, width):
"""Sets the width of this Heatsink.
:param width: The width of this Heatsink. # noqa: E501
:type: float
"""
self._width = width
@property
def height(self):
"""Gets the height of this Heatsink. # noqa: E501
:return: The height of this Heatsink. # noqa: E501
:rtype: float
"""
return self._height
@height.setter
def height(self, height):
"""Sets the height of this Heatsink.
:param height: The height of this Heatsink. # noqa: E501
:type: float
"""
self._height = height
@property
def thickness(self):
"""Gets the thickness of this Heatsink. # noqa: E501
:return: The thickness of this Heatsink. # noqa: E501
:rtype: float
"""
return self._thickness
@thickness.setter
def thickness(self, thickness):
"""Sets the thickness of this Heatsink.
:param thickness: The thickness of this Heatsink. # noqa: E501
:type: float
"""
self._thickness = thickness
@property
def r_convec(self):
"""Gets the r_convec of this Heatsink. # noqa: E501
:return: The r_convec of this Heatsink. # noqa: E501
:rtype: float
"""
return self._r_convec
@r_convec.setter
def r_convec(self, r_convec):
"""Sets the r_convec of this Heatsink.
:param r_convec: The r_convec of this Heatsink. # noqa: E501
:type: float
"""
self._r_convec = r_convec
@property
def c_convec(self):
"""Gets the c_convec of this Heatsink. # noqa: E501
:return: The c_convec of this Heatsink. # noqa: E501
:rtype: float
"""
return self._c_convec
@c_convec.setter
def c_convec(self, c_convec):
"""Sets the c_convec of this Heatsink.
:param c_convec: The c_convec of this Heatsink. # noqa: E501
:type: float
"""
self._c_convec = c_convec
@property
def conductivity(self):
"""Gets the conductivity of this Heatsink. # noqa: E501
:return: The conductivity of this Heatsink. # noqa: E501
:rtype: float
"""
return self._conductivity
@conductivity.setter
def conductivity(self, conductivity):
"""Sets the conductivity of this Heatsink.
:param conductivity: The conductivity of this Heatsink. # noqa: E501
:type: float
"""
self._conductivity = conductivity
@property
def heatcapacity(self):
"""Gets the heatcapacity of this Heatsink. # noqa: E501
:return: The heatcapacity of this Heatsink. # noqa: E501
:rtype: float
"""
return self._heatcapacity
@heatcapacity.setter
def heatcapacity(self, heatcapacity):
"""Sets the heatcapacity of this Heatsink.
:param heatcapacity: The heatcapacity of this Heatsink. # noqa: E501
:type: float
"""
self._heatcapacity = heatcapacity
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Heatsink):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Heatsink):
return True
return self.to_dict() != other.to_dict()
| [
"six.iteritems",
"powermeter_api.configuration.Configuration"
] | [((10338, 10371), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (10351, 10371), False, 'import six\n'), ((1973, 1988), 'powermeter_api.configuration.Configuration', 'Configuration', ([], {}), '()\n', (1986, 1988), False, 'from powermeter_api.configuration import Configuration\n')] |
# Generated by Django 3.2.9 on 2021-11-21 15:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Problem',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64, verbose_name='title')),
('description', models.TextField(verbose_name='description')),
('pub_date', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='date')),
('file', models.FileField(upload_to=None)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='problems', to=settings.AUTH_USER_MODEL, verbose_name='author')),
],
),
]
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.FileField",
"django.db.models.BigAutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((247, 304), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (278, 304), False, 'from django.db import migrations, models\n'), ((436, 532), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (455, 532), False, 'from django.db import migrations, models\n'), ((557, 610), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)', 'verbose_name': '"""title"""'}), "(max_length=64, verbose_name='title')\n", (573, 610), False, 'from django.db import migrations, models\n'), ((645, 689), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""description"""'}), "(verbose_name='description')\n", (661, 689), False, 'from django.db import migrations, models\n'), ((721, 796), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'db_index': '(True)', 'verbose_name': '"""date"""'}), "(auto_now_add=True, db_index=True, verbose_name='date')\n", (741, 796), False, 'from django.db import migrations, models\n'), ((824, 856), 'django.db.models.FileField', 'models.FileField', ([], {'upload_to': 'None'}), '(upload_to=None)\n', (840, 856), False, 'from django.db import migrations, models\n'), ((886, 1030), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""problems"""', 'to': 'settings.AUTH_USER_MODEL', 'verbose_name': '"""author"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='problems', to=settings.AUTH_USER_MODEL, verbose_name='author')\n", (903, 1030), False, 'from django.db import migrations, models\n')] |
from django.urls import path
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path("", views.home_page, name="home_page"),
path("<int:id>/delete", views.delete_post, name = "delete_post"),
path("new_post/", views.new_post, name = "new_post"),
path("<int:pk>/", views.post_detail, name = "post_detail"),
path("<int:pk>", views.like, name = "likes"),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | [
"django.conf.urls.static.static",
"django.urls.path"
] | [((146, 189), 'django.urls.path', 'path', (['""""""', 'views.home_page'], {'name': '"""home_page"""'}), "('', views.home_page, name='home_page')\n", (150, 189), False, 'from django.urls import path\n'), ((195, 257), 'django.urls.path', 'path', (['"""<int:id>/delete"""', 'views.delete_post'], {'name': '"""delete_post"""'}), "('<int:id>/delete', views.delete_post, name='delete_post')\n", (199, 257), False, 'from django.urls import path\n'), ((265, 315), 'django.urls.path', 'path', (['"""new_post/"""', 'views.new_post'], {'name': '"""new_post"""'}), "('new_post/', views.new_post, name='new_post')\n", (269, 315), False, 'from django.urls import path\n'), ((323, 379), 'django.urls.path', 'path', (['"""<int:pk>/"""', 'views.post_detail'], {'name': '"""post_detail"""'}), "('<int:pk>/', views.post_detail, name='post_detail')\n", (327, 379), False, 'from django.urls import path\n'), ((387, 429), 'django.urls.path', 'path', (['"""<int:pk>"""', 'views.like'], {'name': '"""likes"""'}), "('<int:pk>', views.like, name='likes')\n", (391, 429), False, 'from django.urls import path\n'), ((472, 533), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (478, 533), False, 'from django.conf.urls.static import static\n')] |
# -*- coding: utf-8 -*-
import pandas as pd
from findy.interface import Region, Provider
from findy.interface.writer import df_to_db
from findy.database.schema.meta.stock_meta import Index
from findy.database.context import get_db_session
from findy.utils.pd import pd_valid
from findy.utils.time import to_pd_timestamp
CHINA_STOCK_MAIN_INDEX = [{'id': 'index_sh_000001',
'entity_id': 'index_sh_000001',
'code': '000001',
'name': '上证指数',
'timestamp': '1990-12-19',
'exchange': 'sh',
'entity_type': 'index',
'category': 'main'},
{'id': 'index_sh_000016',
'entity_id': 'index_sh_000016',
'code': '000016',
'name': '上证50',
'timestamp': '2004-01-02',
'exchange': 'sh',
'entity_type': 'index',
'category': 'main'},
{'id': 'index_sh_000905',
'entity_id': 'index_sh_000905',
'code': '000905',
'name': '中证500',
'timestamp': '2005-01-04',
'exchange': 'sh',
'entity_type': 'index',
'category': 'main'},
{'id': 'index_sz_399001',
'entity_id': 'index_sz_399001',
'code': '399001',
'name': '深证成指',
'timestamp': '1991-04-03',
'exchange': 'sz',
'entity_type': 'index',
'category': 'main'},
{'id': 'index_sz_399106',
'entity_id': 'index_sz_399106',
'code': '399106',
'name': '深证综指',
'timestamp': '1991-04-03',
'exchange': 'sz',
'entity_type': 'index',
'category': 'main'},
{'id': 'index_sz_399300',
'entity_id': 'index_sz_399300',
'code': '399300',
'name': '沪深300',
'timestamp': '2002-01-04',
'exchange': 'sz',
'entity_type': 'index',
'category': 'main'},
{'id': 'index_sz_399005',
'entity_id': 'index_sz_399005',
'code': '399005',
'name': '中小板指',
'timestamp': '2006-01-24',
'exchange': 'sz',
'entity_type': 'index',
'category': 'main'},
{'id': 'index_sz_399006',
'entity_id': 'index_sz_399006',
'code': '399006',
'name': '创业板指',
'timestamp': '2010-06-01',
'exchange': 'sz',
'entity_type': 'index',
'category': 'main'},
{'id': 'index_sh_000688',
'entity_id': 'index_sh_000688',
'code': '000688',
'name': '科创50',
'timestamp': '2019-01-01',
'exchange': 'sh',
'entity_type': 'index',
'category': 'main'},
# # 聚宽编码
# # 市场通编码 市场通名称
# # 310001 沪股通
# # 310002 深股通
# # 310003 港股通(沪)
# # 310004 港股通(深)
{'id': 'index_sz_310001',
'entity_id': 'index_sz_310001',
'code': '310001',
'name': '沪股通',
'timestamp': '2014-11-17',
'exchange': 'sz',
'entity_type': 'index',
'category': 'main'},
{'id': 'index_sz_310002',
'entity_id': 'index_sz_310002',
'code': '310002',
'name': '深股通',
'timestamp': '2014-11-17',
'exchange': 'sz',
'entity_type': 'index',
'category': 'main'},
{'id': 'index_sz_310003',
'entity_id': 'index_sz_310003',
'code': '310003',
'name': '港股通(沪)',
'timestamp': '2014-11-17',
'exchange': 'sz',
'entity_type': 'index',
'category': 'main'},
{'id': 'index_sz_310004',
'entity_id': 'index_sz_310004',
'code': '310004',
'name': '港股通(深)',
'timestamp': '2014-11-17',
'exchange': 'sz',
'entity_type': 'index',
'category': 'main'}
]
US_STOCK_MAIN_INDEX = [{'id': 'index_cme_SPY',
'entity_id': 'index_cme_SPY',
'code': 'SPY',
'name': "Standard & Poor's 500",
'timestamp': '1990-12-19',
'exchange': 'cme',
'entity_type': 'index',
'category': 'main'},
{'id': 'index_cme_^DJI',
'entity_id': 'index_cme_^DJI',
'code': '^DJI',
'name': "Dow Jones Industrial Average",
'timestamp': '1990-12-19',
'exchange': 'cme',
'entity_type': 'index',
'category': 'main'},
]
async def init_main_index(region: Region, provider=Provider.Exchange):
if region == Region.CHN:
for item in CHINA_STOCK_MAIN_INDEX:
item['timestamp'] = to_pd_timestamp(item['timestamp'])
df = pd.DataFrame(CHINA_STOCK_MAIN_INDEX)
elif region == Region.US:
for item in US_STOCK_MAIN_INDEX:
item['timestamp'] = to_pd_timestamp(item['timestamp'])
df = pd.DataFrame(US_STOCK_MAIN_INDEX)
else:
print("index not initialized, in file: init_main_index")
df = pd.DataFrame()
if pd_valid(df):
await df_to_db(region=region,
provider=provider,
data_schema=Index,
db_session=get_db_session(region, provider, Index),
df=df)
| [
"pandas.DataFrame",
"findy.database.context.get_db_session",
"findy.utils.pd.pd_valid",
"findy.utils.time.to_pd_timestamp"
] | [((7139, 7151), 'findy.utils.pd.pd_valid', 'pd_valid', (['df'], {}), '(df)\n', (7147, 7151), False, 'from findy.utils.pd import pd_valid\n'), ((6806, 6842), 'pandas.DataFrame', 'pd.DataFrame', (['CHINA_STOCK_MAIN_INDEX'], {}), '(CHINA_STOCK_MAIN_INDEX)\n', (6818, 6842), True, 'import pandas as pd\n'), ((6758, 6792), 'findy.utils.time.to_pd_timestamp', 'to_pd_timestamp', (["item['timestamp']"], {}), "(item['timestamp'])\n", (6773, 6792), False, 'from findy.utils.time import to_pd_timestamp\n'), ((6994, 7027), 'pandas.DataFrame', 'pd.DataFrame', (['US_STOCK_MAIN_INDEX'], {}), '(US_STOCK_MAIN_INDEX)\n', (7006, 7027), True, 'import pandas as pd\n'), ((7116, 7130), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7128, 7130), True, 'import pandas as pd\n'), ((6946, 6980), 'findy.utils.time.to_pd_timestamp', 'to_pd_timestamp', (["item['timestamp']"], {}), "(item['timestamp'])\n", (6961, 6980), False, 'from findy.utils.time import to_pd_timestamp\n'), ((7309, 7348), 'findy.database.context.get_db_session', 'get_db_session', (['region', 'provider', 'Index'], {}), '(region, provider, Index)\n', (7323, 7348), False, 'from findy.database.context import get_db_session\n')] |
import jinja2
from pathlib import Path
from typing import List
import pluggy
from . import utils
from pyroll.core import Unit
def _merge_properties(hook_results):
d = dict()
for r in hook_results:
d.update(r)
return d
class Reporter:
"""Class able to generate an HTML report sheet out of simulation results."""
plugin_manager = pluggy.PluginManager("pyroll_report")
hookspec = pluggy.HookspecMarker("pyroll_report")
hookimpl = pluggy.HookimplMarker("pyroll_report")
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(Path(__file__).parent, encoding="utf-8")
)
def render(self, units: List[Unit]) -> str:
"""
Render an HTML report from the specified units list.
:param units: list of units to take the data from
:returns: generated HTML code as string
"""
template = self.env.get_template("main.html")
unit_plots = [
list(map(
utils.get_svg_from_figure,
reversed(
self.plugin_manager.hook.unit_plot(unit=u)
)
))
for u in units
]
unit_properties = [
_merge_properties(self.plugin_manager.hook.unit_properties(unit=u))
for u in units
]
sequence_plots = map(
utils.get_svg_from_figure,
reversed(
self.plugin_manager.hook.sequence_plot(units=units)
)
)
sequence_properties = _merge_properties(self.plugin_manager.hook.sequence_properties(units=units))
rendered = template.render(
units=units,
unit_plots=unit_plots,
unit_properties=unit_properties,
unit_infos=zip(units, unit_properties, unit_plots),
sequence_plots=sequence_plots,
sequence_properties=sequence_properties
)
return rendered
| [
"pluggy.HookimplMarker",
"pathlib.Path",
"pluggy.PluginManager",
"pluggy.HookspecMarker"
] | [((365, 402), 'pluggy.PluginManager', 'pluggy.PluginManager', (['"""pyroll_report"""'], {}), "('pyroll_report')\n", (385, 402), False, 'import pluggy\n'), ((418, 456), 'pluggy.HookspecMarker', 'pluggy.HookspecMarker', (['"""pyroll_report"""'], {}), "('pyroll_report')\n", (439, 456), False, 'import pluggy\n'), ((472, 510), 'pluggy.HookimplMarker', 'pluggy.HookimplMarker', (['"""pyroll_report"""'], {}), "('pyroll_report')\n", (493, 510), False, 'import pluggy\n'), ((581, 595), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (585, 595), False, 'from pathlib import Path\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Convert text files between arbitrary encodings and line endings.
Usage:
convert_encoding.py [options] file1 file2 ...
Options are:
-f enc, --from=enc the input encoding
(default: locale.getpreferredencoding())
Set this to 'guess' if the encoding is unknown.
-t enc, --to=enc the output encoding
(default: locale.getpreferredencoding())
-e code, --eol=code the end-of-line mode (see below)
-o file, --out=file where to write the converted data. Conversion is
done in-place if this is omitted
-n, --nocodec ignore input and output encoding, just process eol
-r, --recursive go into directories recursively
-l, --followlinks follow symbolic links
-d, --dotfiles in combination with -r: process files that begin
with a dot, if they are found in a directory reached
by recursion. You still have to specify dotfiles
explicitly in the initial filelist.
-q, --quiet don't display warnings or status messages
For valid encoding names, see
https://docs.python.org/2.4/lib/standard-encodings.html
Accepted values for the eol code are: 'unix', 'linux', 'dos', 'win', 'mac'.
If you do not specify an end-of-line, it will be set to your system's default,
regardless of the original end-of-line.
Any '#' in the out-filename will be replaced with the full name of the input
file.
Note that guessing the input encoding is not foolproof in any way. Always
provide an explicit input encoding if you can.
The program supports full unix style globbing on all operation systems,
independently of your shell's capabilities. Also, environment variables
and '~' will be expanded following the usual conventions.
The file may also be used as a module from python.
"""
"""
Copyright (C) 2007 <NAME>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import codecs
import getopt
import os
import os.path
import locale
import shutil
def main():
"""Command line program for converting encodings and end-of-lines """
#
# command line parsing / initialization
global warn
try:
opts, files = getopt.getopt(sys.argv[1:], "hf:t:e:o:rlqnd",
["help", "from=", "to=","eol=",
"recursive", "followlinks",
"quiet", "nocodec", "out=",
"dotfiles"])
except getopt.GetoptError as details:
warn(details)
exit(2)
locale.setlocale(locale.LC_ALL, '')
from_enc = locale.getpreferredencoding()
to_enc = locale.getpreferredencoding()
eol = os.linesep
recursive = False
followlinks = False
dotfiles = False
guess = False
nocodec = False
outfilename = ""
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
if o in ("-f", "--from"):
if a == 'guess':
guess = True
else:
from_enc = a
if o in ("-t", "--to"):
to_enc = a
if o in ("-o", "--out"):
outfilename = os.path.normcase(a)
if o in ("-r", "--recursive"):
recursive = True
if o in ("-d", "--dotfiles"):
dotfiles = True
if o in ("-q", "--quiet"):
warn = no_warn
if o in ("-n", "--nocodec"):
nocodec = True
if o in ("-e", "--eol"):
eolmode = a.lower()
os_eols = {'unix':"\n",
'linux':"\n",
'dos':"\r\n",
'win':"\r\n",
'mac':"\r"}
try:
eol = os_eols[eolmode]
except KeyError:
warn("'%s' is not a valid name for a line ending." % eolmode)
warn("Use 'unix', 'linux', 'dos', 'win', or 'mac'.")
warn("Converting to your default line ending")
if o in ("-l", "--followlinks"):
recursive = True
if o in ("-l", "--followlinks"):
followlinks = True
#
# put together what we actually have to do
if nocodec:
rec_function = lambda infilename: convert_eol(infilename, eol, \
outfilename.replace('#', infilename))
else:
if guess:
warn("WARNING: guessing the input encoding is dangerous. "
+ "Make sure to check the results.\n")
rec_function = lambda infilename: \
convert_encoding(
infilename,
guess_file_encoding(infilename, from_enc),
to_enc,
eol,
outfilename.replace('#', infilename))
else:
rec_function = lambda infilename: \
convert_encoding(
infilename,
from_enc,
to_enc,
eol,
outfilename.replace('#', infilename))
#
# Run through all the files
file_recursor(rec_function, files, recursive, followlinks, dotfiles)
def warn(msg, nl=True):
""" print a warning message to stderr """
sys.stderr.write(str(msg))
if nl: sys.stderr.write("\n")
def no_warn(msg, nl=True):
pass
def file_recursor(function=None, file_list=(), recurse=True,
followlinks=True, dotfiles=False):
""" Call function(file) for each file in file_list.
If recurse is True, go into directories recursively. If followlinks is
True, follow symbolic links. If dotfiles is true, process all files and
dirs beginning with a dot, if they are in a subdirectory that is
processed recursively.
Notice that even when dotfiles is True, you still have to explicitly
put dotfiles in your initial filelist.
All names in the file_list will be fully expanded as a glob and with
environment variables
"""
from glob import glob
for name in file_list:
name = os.path.expanduser(name)
name = os.path.expandvars(name)
name = os.path.normpath(name)
if len(glob(name)) > 1:
file_recursor(function, glob(name), recurse, followlinks)
continue
elif len(glob(name)) == 1:
name = glob(name)[0]
if os.path.islink(name):
if not followlinks:
warn("'%s' is a symlink, but following links is not activated" \
% name)
continue
else:
name = os.readlink(name)
if os.path.isfile(name):
function(name)
elif os.path.isdir(name):
if name != '..':
if recurse:
file_recursor(function, glob(os.path.join(name, '*')),
recurse, followlinks)
if dotfiles:
file_recursor(function, glob(os.path.join(name, '.*')),
recurse, followlinks)
else:
warn("'%s' is a directory, but recursive handling is not activated" % name)
else:
warn("Can't process '%s'. Not found.\n" % name)
continue
def convert_encoding(infilename, from_enc, to_enc, eol=os.linesep,
outfilename=""):
""" Convert a text file from_enc to_enc
If eol is given, it must be an appropriate string to terminate a line,
i.e. "\\n", "\\r\\n", "\\r". It defaults to the standard line ending
for your OS (os.linesep)
If outfilename is given, the results will be written to that file, if
not, the conversion is done in-place
from_enc and to_enc are strings containing a name for any
encoding known to Python. See
http://docs.python.org/lib/standard-encodings.html
"""
def error_cleanup():
if hasattr(infile, 'close'):
infile.close()
if hasattr(outfile, 'close'):
outfile.close()
if os.path.isfile(outfilename) and os.path.isfile(infilename):
os.remove(outfilename)
warn("Processing %s ... " % infilename, nl=False)
if os.path.isfile(infilename):
#
# choose temp file
tempfilename = infilename + "." + to_enc
while os.path.isfile(tempfilename):
tempfilename = tempfilename + "x"
#
# open original file (infile) and tempfile (outfile)
infile = outfile = None
try:
infile = codecs.open(infilename, "rb", from_enc)
except Exception as details:
warn("Error opening %s: %s" % (infilename, details));
error_cleanup()
return None
try:
outfile = codecs.open(tempfilename, "wb", to_enc)
except Exception as details:
warn("Error opening %s: %s" % (tempfilename, details))
error_cleanup()
return None
#
# go through infile, convert, and write to outfile
try:
for line in infile:
try:
line = line.replace("\r\n", "\n") # win
line = line.replace("\r", "\n") # mac
line = line.replace("\n", eol)
outfile.write(line)
except Exception as details:
raise "Error writing to %s: %s" \
% (tempfilename, details);
except Exception as details:
warn("Error in I/O: %s" % details)
error_cleanup()
else:
#
# Finish up: overwrite original file with tempfile
try:
infile.close()
outfile.close()
shutil.copystat(infilename, tempfilename)
overwrite = False
if outfilename == "":
outfilename = infilename
overwrite = True
rename_file(tempfilename, outfilename, overwrite)
warn("%s was successfully converted from %s to %s" \
% (infilename, from_enc, to_enc))
warn("") # finish a block
except Exception as details:
warn("Renaming %s to %s FAILED. File was not converted: %s" \
% (tempfilename, infilename, details))
error_cleanup()
warn("") # finish a block
else:
warn("File '%s' does not exist\n" % file)
def convert_eol(infilename, eol=os.linesep, outfilename=""):
""" Convert just the end of line of a textfile
If eol is given, it must be an appropriate string to terminate a line,
i.e. "\\n", "\\r\\n", "\\r". It defaults to the standard line ending
for your os (os.linesep)
If outfilename is given, the results will be written to that file, if
not, the conversion is done in-place
The encoding of the file is left untouched
"""
def error_cleanup():
if hasattr(infile, 'close'):
infile.close()
if hasattr(outfile, 'close'):
outfile.close()
if os.path.isfile(tempfilename) and os.path.isfile(infilename):
os.remove(tempfilename)
warn("Processing %s ... " % infilename, nl=False)
if os.path.isfile(infilename):
#
# choose temp file
tempfilename = infilename + "." + "eol"
while os.path.isfile(tempfilename):
tempfilename = tempfilename + "x"
#
# open original file (infile) and tempfile (outfile)
infile = outfile = None
try:
infile = open(infilename, "rb")
except Exception as details:
warn("Error opening %s: %s" % (infilename, details));
error_cleanup()
return None
try:
outfile = open(tempfilename, "wb")
except Exception as details:
warn("Error opening %s: %s" % (tempfilename, details))
error_cleanup()
return None
#
# go through infile, convert, and write to outfile
try:
for line in infile:
try:
if "\x00\r" in line or "\x00\n" in line:
eol = eol.replace("\r", "\x00\r")
eol = eol.replace("\n", "\x00\n")
line = line.replace("\r\n", "\n") # win
line = line.replace("\x00\r\x00\n", "\n") # utf16 win
line = line.replace("\r", "\n") # mac
line = line.replace("\x00\r", "\n") # utf16 mac
line = line.replace("\x00\n", "\n") # utf16 unix
line = line.replace("\n", eol)
outfile.write(line)
except Exception as details:
raise "Error writing to %s: %s" \
% (tempfilename, details);
except Exception as details:
warn("Error in I/O: %s" % details)
error_cleanup()
else:
#
# Finish up: overwrite original file with tempfile
try:
infile.close()
outfile.close()
shutil.copystat(infilename, tempfilename)
overwrite = False
if outfilename == "":
outfilename = infilename
overwrite = True
rename_file(tempfilename, outfilename, overwrite)
warn("Successfully converted eol for %s" % infilename)
warn("") # finish a block
except Exception as details:
warn("Renaming %s to %s FAILED. File was not converted: %s" \
% (tempfilename, infilename, details))
error_cleanup()
warn("") # finish a block
else:
warn("File '%s' does not exist\n" % file)
def guess_encoding(data):
""" Given a byte string, attempt to decode it.
Taken from:
http://www.pyzine.com/Issue008/Section_Articles/article_Encodings.html#guessing-the-encoding
Tries the standard 'UTF8' and 'latin-1' encodings,
Plus several gathered from locale information.
The calling program *must* first call
locale.setlocale(locale.LC_ALL, '')
If successful it returns
(decoded_unicode, successful_encoding)
If unsuccessful it raises a ``UnicodeError``
"""
successful_encoding = None
# we make 'utf-8' the first encoding
encodings = ['utf-8']
#
# next we add anything we can learn from the locale
try:
encodings.append(locale.nl_langinfo(locale.CODESET))
except AttributeError:
pass
try:
encodings.append(locale.getlocale()[1])
except (AttributeError, IndexError):
pass
try:
encodings.append(locale.getdefaultlocale()[1])
except (AttributeError, IndexError):
pass
#
# we try try the rest now
encodings = encodings + ['latin-1', 'mbcs', 'big5', 'euc_jp', 'euc_kr',
'gb2312', 'gbk', 'gb18030', 'hz', 'iso2022_jp',
'koi8_u', 'ptcp154', 'shift_jis', 'unicode' 'cp1251' 'mac_cyrillic']
for enc in encodings:
# some of the locale calls
# may have returned None
if not enc:
continue
try:
decoded = str(data, enc)
successful_encoding = enc
except (UnicodeError, LookupError):
pass
else:
break
if not successful_encoding:
raise UnicodeError(
'Unable to decode input data. Tried the following encodings: %s.'
% ', '.join([repr(enc) for enc in encodings if enc]))
else:
return (decoded, successful_encoding)
def guess_file_encoding(filename, default):
""" Guess the encoding of a text file
If the function is able to guess an encoding for filename, return that
encoding, otherwise return the default.
Note that guessing an encoding is not fool-proof, this might return the
wrong encoding.
Adapted from:
http://www.pyzine.com/Issue008/Section_Articles/article_Encodings.html#guessing-the-encoding
"""
try:
f = open(filename, "rb")
the_text = f.read()
f.close()
except Exception as details:
warn("Error while trying to guess the encoding of file %s: %s" \
% (filename, details))
return default
bomdict = { codecs.BOM_UTF8 : 'UTF8',
codecs.BOM_UTF16_BE : 'UTF-16BE',
codecs.BOM_UTF16_LE : 'UTF-16LE' }
# check if there is Unicode signature
for bom, encoding in bomdict.items():
if the_text.startswith(bom):
the_text = the_text[len(bom):]
break
else:
bom = None
encoding = None
if encoding is None: # there was no BOM
try:
unicode_text, encoding = guess_encoding(the_text)
except UnicodeError:
warn("Can't work out the encoding of file '%s'." % filename)
warn("Assuming the default encoding: %s" % default)
return default
warn("Guessed encoding for file '%s': %s" % (filename, encoding))
return encoding
def rename_file(file1, file2, overwrite=False):
""" Rename file1 to file2, ask for directions if file2 already exists """
if os.path.isfile(file1):
if os.path.isfile(file2):
while not overwrite:
answer = raw_input("%s already exists. Do you want to overwrite? Yes [No] Abort: " \
% file2).lower()
if answer == 'yes':
overwrite = True
elif answer == 'abort':
return None
else:
answer = raw_input("Enter a new filename: ")
if answer != '': file2 = os.path.normcase(answer)
if not os.path.isfile(file2):
overwrite = True
if file2 != file1: os.remove(file2)
try:
os.rename(file1, file2)
except Exception as details:
warn( "Failed to rename %s to %s: %s" % (file1, file2, details) )
def usage():
""" Print usage for main program """
print (__doc__)
if __name__ == "__main__":
main()
| [
"sys.exit",
"os.path.islink",
"os.remove",
"os.readlink",
"os.path.normpath",
"os.path.isdir",
"glob.glob",
"shutil.copystat",
"os.path.expanduser",
"getopt.getopt",
"locale.setlocale",
"os.rename",
"locale.getpreferredencoding",
"os.path.isfile",
"sys.stderr.write",
"codecs.open",
"os.path.normcase",
"locale.getdefaultlocale",
"os.path.expandvars",
"os.path.join",
"locale.getlocale",
"locale.nl_langinfo"
] | [((3372, 3407), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL', '""""""'], {}), "(locale.LC_ALL, '')\n", (3388, 3407), False, 'import locale\n'), ((3423, 3452), 'locale.getpreferredencoding', 'locale.getpreferredencoding', ([], {}), '()\n', (3450, 3452), False, 'import locale\n'), ((3466, 3495), 'locale.getpreferredencoding', 'locale.getpreferredencoding', ([], {}), '()\n', (3493, 3495), False, 'import locale\n'), ((9296, 9322), 'os.path.isfile', 'os.path.isfile', (['infilename'], {}), '(infilename)\n', (9310, 9322), False, 'import os\n'), ((12441, 12467), 'os.path.isfile', 'os.path.isfile', (['infilename'], {}), '(infilename)\n', (12455, 12467), False, 'import os\n'), ((18622, 18643), 'os.path.isfile', 'os.path.isfile', (['file1'], {}), '(file1)\n', (18636, 18643), False, 'import os\n'), ((2942, 3098), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '"""hf:t:e:o:rlqnd"""', "['help', 'from=', 'to=', 'eol=', 'recursive', 'followlinks', 'quiet',\n 'nocodec', 'out=', 'dotfiles']"], {}), "(sys.argv[1:], 'hf:t:e:o:rlqnd', ['help', 'from=', 'to=',\n 'eol=', 'recursive', 'followlinks', 'quiet', 'nocodec', 'out=', 'dotfiles']\n )\n", (2955, 3098), False, 'import getopt\n'), ((6292, 6314), 'sys.stderr.write', 'sys.stderr.write', (['"""\n"""'], {}), "('\\n')\n", (6308, 6314), False, 'import sys\n'), ((7108, 7132), 'os.path.expanduser', 'os.path.expanduser', (['name'], {}), '(name)\n', (7126, 7132), False, 'import os\n'), ((7148, 7172), 'os.path.expandvars', 'os.path.expandvars', (['name'], {}), '(name)\n', (7166, 7172), False, 'import os\n'), ((7188, 7210), 'os.path.normpath', 'os.path.normpath', (['name'], {}), '(name)\n', (7204, 7210), False, 'import os\n'), ((7413, 7433), 'os.path.islink', 'os.path.islink', (['name'], {}), '(name)\n', (7427, 7433), False, 'import os\n'), ((7674, 7694), 'os.path.isfile', 'os.path.isfile', (['name'], {}), '(name)\n', (7688, 7694), False, 'import os\n'), ((9424, 9452), 'os.path.isfile', 'os.path.isfile', (['tempfilename'], {}), '(tempfilename)\n', (9438, 9452), False, 'import os\n'), ((12568, 12596), 'os.path.isfile', 'os.path.isfile', (['tempfilename'], {}), '(tempfilename)\n', (12582, 12596), False, 'import os\n'), ((18656, 18677), 'os.path.isfile', 'os.path.isfile', (['file2'], {}), '(file2)\n', (18670, 18677), False, 'import os\n'), ((3731, 3741), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3739, 3741), False, 'import sys\n'), ((3995, 4014), 'os.path.normcase', 'os.path.normcase', (['a'], {}), '(a)\n', (4011, 4014), False, 'import os\n'), ((7736, 7755), 'os.path.isdir', 'os.path.isdir', (['name'], {}), '(name)\n', (7749, 7755), False, 'import os\n'), ((9138, 9165), 'os.path.isfile', 'os.path.isfile', (['outfilename'], {}), '(outfilename)\n', (9152, 9165), False, 'import os\n'), ((9170, 9196), 'os.path.isfile', 'os.path.isfile', (['infilename'], {}), '(infilename)\n', (9184, 9196), False, 'import os\n'), ((9210, 9232), 'os.remove', 'os.remove', (['outfilename'], {}), '(outfilename)\n', (9219, 9232), False, 'import os\n'), ((9637, 9676), 'codecs.open', 'codecs.open', (['infilename', '"""rb"""', 'from_enc'], {}), "(infilename, 'rb', from_enc)\n", (9648, 9676), False, 'import codecs\n'), ((9867, 9906), 'codecs.open', 'codecs.open', (['tempfilename', '"""wb"""', 'to_enc'], {}), "(tempfilename, 'wb', to_enc)\n", (9878, 9906), False, 'import codecs\n'), ((12281, 12309), 'os.path.isfile', 'os.path.isfile', (['tempfilename'], {}), '(tempfilename)\n', (12295, 12309), False, 'import os\n'), ((12314, 12340), 'os.path.isfile', 'os.path.isfile', (['infilename'], {}), '(infilename)\n', (12328, 12340), False, 'import os\n'), ((12354, 12377), 'os.remove', 'os.remove', (['tempfilename'], {}), '(tempfilename)\n', (12363, 12377), False, 'import os\n'), ((15815, 15849), 'locale.nl_langinfo', 'locale.nl_langinfo', (['locale.CODESET'], {}), '(locale.CODESET)\n', (15833, 15849), False, 'import locale\n'), ((19332, 19355), 'os.rename', 'os.rename', (['file1', 'file2'], {}), '(file1, file2)\n', (19341, 19355), False, 'import os\n'), ((7226, 7236), 'glob.glob', 'glob', (['name'], {}), '(name)\n', (7230, 7236), False, 'from glob import glob\n'), ((7279, 7289), 'glob.glob', 'glob', (['name'], {}), '(name)\n', (7283, 7289), False, 'from glob import glob\n'), ((7645, 7662), 'os.readlink', 'os.readlink', (['name'], {}), '(name)\n', (7656, 7662), False, 'import os\n'), ((10870, 10911), 'shutil.copystat', 'shutil.copystat', (['infilename', 'tempfilename'], {}), '(infilename, tempfilename)\n', (10885, 10911), False, 'import shutil\n'), ((14371, 14412), 'shutil.copystat', 'shutil.copystat', (['infilename', 'tempfilename'], {}), '(infilename, tempfilename)\n', (14386, 14412), False, 'import shutil\n'), ((15925, 15943), 'locale.getlocale', 'locale.getlocale', ([], {}), '()\n', (15941, 15943), False, 'import locale\n'), ((16036, 16061), 'locale.getdefaultlocale', 'locale.getdefaultlocale', ([], {}), '()\n', (16059, 16061), False, 'import locale\n'), ((19290, 19306), 'os.remove', 'os.remove', (['file2'], {}), '(file2)\n', (19299, 19306), False, 'import os\n'), ((7351, 7361), 'glob.glob', 'glob', (['name'], {}), '(name)\n', (7355, 7361), False, 'from glob import glob\n'), ((7388, 7398), 'glob.glob', 'glob', (['name'], {}), '(name)\n', (7392, 7398), False, 'from glob import glob\n'), ((19143, 19167), 'os.path.normcase', 'os.path.normcase', (['answer'], {}), '(answer)\n', (19159, 19167), False, 'import os\n'), ((19195, 19216), 'os.path.isfile', 'os.path.isfile', (['file2'], {}), '(file2)\n', (19209, 19216), False, 'import os\n'), ((7863, 7886), 'os.path.join', 'os.path.join', (['name', '"""*"""'], {}), "(name, '*')\n", (7875, 7886), False, 'import os\n'), ((8032, 8056), 'os.path.join', 'os.path.join', (['name', '""".*"""'], {}), "(name, '.*')\n", (8044, 8056), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""Module with info command."""
from operator import attrgetter
from cliff.show import ShowOne
from ..account.managers import AccountManager
from ..core.commands import AbstractCommand
from ..core.commands.mixins import GetRelationMixin, SshConfigMergerMixin
from ..core.storage.strategies import RelatedGetStrategy
from ..core.models.terminal import Group, Host, SshConfig
class InfoCommand(SshConfigMergerMixin, GetRelationMixin,
ShowOne, AbstractCommand):
"""display info about a host or group"""
get_strategy = RelatedGetStrategy
model_class = SshConfig
@property
def formatter_namespace(self):
"""Return entrypoint with cliff formatters."""
return 'termius.info.formatters'
def extend_parser(self, parser):
"""Add more arguments to parser."""
parser.add_argument(
'-G', '--group', dest='entry_type',
action='store_const', const=Group, default=Host,
help='show info about group'
)
parser.add_argument(
'-H', '--host', dest='entry_type',
action='store_const', const=Host, default=Host,
help='show info about host'
)
parser.add_argument(
'-M', '--no-merge', action='store_true',
help='do not merge configs'
)
parser.add_argument(
'id_or_name',
metavar='ID or NAME',
help='display information about the group or host with ID'
)
return parser
# pylint: disable=unused-argument
def take_action(self, parsed_args):
"""Process CLI call."""
instance = self.get_relation(
parsed_args.entry_type, parsed_args.id_or_name
)
ssh_config = self.get_merged_ssh_config(instance)
return self.prepare_fields(ssh_config, instance)
def prepare_fields(self, ssh_config, instance):
"""Generate 2size tuple with ssh_config fields.
Warning there is one additional field - 'address'.
"""
ssh_config_fields = tuple(ssh_config.allowed_fields())
additional_fields = ('address', 'ssh_key_path', 'agent_forwarding')
keys = ssh_config_fields + additional_fields
ssh_key = ssh_config.get_ssh_key()
agent_forwarding = (
AccountManager(self.config).get_settings()
.get('agent_forwarding')
)
values = (
attrgetter(*ssh_config_fields)(ssh_config) +
(
getattr(instance, 'address', ''),
ssh_key and ssh_key.file_path(self),
agent_forwarding,
)
)
return (keys, values)
| [
"operator.attrgetter"
] | [((2455, 2485), 'operator.attrgetter', 'attrgetter', (['*ssh_config_fields'], {}), '(*ssh_config_fields)\n', (2465, 2485), False, 'from operator import attrgetter\n')] |
""" Web Routes """
from masonite.helpers.routes import group
from masonite.routes import Get, Post, Redirect
ROUTES = [
Get().route('/test', None).middleware('auth'),
Get().route('/queue', 'TestController@queue'),
Redirect('/redirect', 'test'),
Get().domain('test').route('/test', None).middleware('auth'),
Get().domain('test').route('/unit/test', 'TestController@testing').middleware('auth'),
Get().domain('test').route('/test/route', 'TestController@testing'),
Get().route('/json_response', 'TestController@json_response'),
Post().route('/test/post/route', 'TestController@post_test'),
Get().route('/login', 'TestController@testing').name('login'),
Get().route('/test/param/@id', 'TestController@testing'),
Post().route('/test/json/response/@id', 'TestController@json'),
Get().route('/test/set/test/session', 'TestController@session'),
group('/example', [
Get().route('/test/1', 'TestController@show'),
Get().route('/test/2', 'TestController@show')
])
]
| [
"masonite.routes.Get",
"masonite.routes.Redirect",
"masonite.routes.Post"
] | [((228, 257), 'masonite.routes.Redirect', 'Redirect', (['"""/redirect"""', '"""test"""'], {}), "('/redirect', 'test')\n", (236, 257), False, 'from masonite.routes import Get, Post, Redirect\n'), ((177, 182), 'masonite.routes.Get', 'Get', ([], {}), '()\n', (180, 182), False, 'from masonite.routes import Get, Post, Redirect\n'), ((493, 498), 'masonite.routes.Get', 'Get', ([], {}), '()\n', (496, 498), False, 'from masonite.routes import Get, Post, Redirect\n'), ((560, 566), 'masonite.routes.Post', 'Post', ([], {}), '()\n', (564, 566), False, 'from masonite.routes import Get, Post, Redirect\n'), ((693, 698), 'masonite.routes.Get', 'Get', ([], {}), '()\n', (696, 698), False, 'from masonite.routes import Get, Post, Redirect\n'), ((755, 761), 'masonite.routes.Post', 'Post', ([], {}), '()\n', (759, 761), False, 'from masonite.routes import Get, Post, Redirect\n'), ((823, 828), 'masonite.routes.Get', 'Get', ([], {}), '()\n', (826, 828), False, 'from masonite.routes import Get, Post, Redirect\n'), ((126, 131), 'masonite.routes.Get', 'Get', ([], {}), '()\n', (129, 131), False, 'from masonite.routes import Get, Post, Redirect\n'), ((420, 425), 'masonite.routes.Get', 'Get', ([], {}), '()\n', (423, 425), False, 'from masonite.routes import Get, Post, Redirect\n'), ((626, 631), 'masonite.routes.Get', 'Get', ([], {}), '()\n', (629, 631), False, 'from masonite.routes import Get, Post, Redirect\n'), ((920, 925), 'masonite.routes.Get', 'Get', ([], {}), '()\n', (923, 925), False, 'from masonite.routes import Get, Post, Redirect\n'), ((975, 980), 'masonite.routes.Get', 'Get', ([], {}), '()\n', (978, 980), False, 'from masonite.routes import Get, Post, Redirect\n'), ((263, 268), 'masonite.routes.Get', 'Get', ([], {}), '()\n', (266, 268), False, 'from masonite.routes import Get, Post, Redirect\n'), ((329, 334), 'masonite.routes.Get', 'Get', ([], {}), '()\n', (332, 334), False, 'from masonite.routes import Get, Post, Redirect\n')] |
# Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
import cv2
import torch
import numpy as np
import glob
from pysot.core.config import cfg
from pysot.models.model_builder import ModelBuilder
from pysot.tracker.tracker_builder import build_tracker
from pysot.utils.bbox import get_axis_aligned_bbox
from pysot.utils.model_load import load_pretrain
from toolkit.datasets import DatasetFactory
from toolkit.utils.region import vot_overlap, vot_float2str
import json
parser = argparse.ArgumentParser(description='siamrpn tracking')
parser.add_argument('--dataset', type=str,
help='datasets')
parser.add_argument('--config', default='', type=str,
help='config file')
parser.add_argument('--snapshot', default='', type=str,
help='snapshot of models to eval')
args = parser.parse_args()
torch.set_num_threads(1)
def main():
# load config
cfg.merge_from_file(args.config)
datasets=glob.glob(args.dataset +'/*')
# create model
model = ModelBuilder()
# load model
model = load_pretrain(model, args.snapshot).cuda().eval()
# build tracker
tracker = build_tracker(model)
for v_idx, dataset in enumerate(datasets):
if not os.path.isdir(dataset):
continue
print(dataset)
frames = sorted(glob.glob(dataset + '/*.jpg'))
rects = {}
first_frame = True
for img in frames:
frame = cv2.imread(img)
if first_frame:
init_rect = cv2.selectROI('aa', frame, False, False)
tracker.init(frame, init_rect)
first_frame = False
rects['/'.join(img.split('/')[-2:])] = init_rect
else:
outputs = tracker.track(frame)
bbox = list(map(int, outputs['bbox']))
rects['/'.join(img.split('/')[-2:])] = bbox
cv2.rectangle(frame, (bbox[0], bbox[1]),
(bbox[0] + bbox[2], bbox[1] + bbox[3]),
(0, 255, 0), 3)
cv2.imshow('aa',frame)
cv2.waitKey(10)
with open(dataset + '/trackings.json', 'w') as json_file:
json.dump(rects, json_file)
print('saving json.')
print('done.')
if __name__ == '__main__':
main()
| [
"cv2.rectangle",
"argparse.ArgumentParser",
"json.dump",
"pysot.utils.model_load.load_pretrain",
"torch.set_num_threads",
"cv2.imshow",
"pysot.core.config.cfg.merge_from_file",
"cv2.waitKey",
"os.path.isdir",
"cv2.selectROI",
"pysot.models.model_builder.ModelBuilder",
"cv2.imread",
"glob.glob",
"pysot.tracker.tracker_builder.build_tracker"
] | [((651, 706), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""siamrpn tracking"""'}), "(description='siamrpn tracking')\n", (674, 706), False, 'import argparse\n'), ((984, 1008), 'torch.set_num_threads', 'torch.set_num_threads', (['(1)'], {}), '(1)\n', (1005, 1008), False, 'import torch\n'), ((1044, 1076), 'pysot.core.config.cfg.merge_from_file', 'cfg.merge_from_file', (['args.config'], {}), '(args.config)\n', (1063, 1076), False, 'from pysot.core.config import cfg\n'), ((1091, 1121), 'glob.glob', 'glob.glob', (["(args.dataset + '/*')"], {}), "(args.dataset + '/*')\n", (1100, 1121), False, 'import glob\n'), ((1153, 1167), 'pysot.models.model_builder.ModelBuilder', 'ModelBuilder', ([], {}), '()\n', (1165, 1167), False, 'from pysot.models.model_builder import ModelBuilder\n'), ((1283, 1303), 'pysot.tracker.tracker_builder.build_tracker', 'build_tracker', (['model'], {}), '(model)\n', (1296, 1303), False, 'from pysot.tracker.tracker_builder import build_tracker\n'), ((1367, 1389), 'os.path.isdir', 'os.path.isdir', (['dataset'], {}), '(dataset)\n', (1380, 1389), False, 'import os\n'), ((1460, 1489), 'glob.glob', 'glob.glob', (["(dataset + '/*.jpg')"], {}), "(dataset + '/*.jpg')\n", (1469, 1489), False, 'import glob\n'), ((1584, 1599), 'cv2.imread', 'cv2.imread', (['img'], {}), '(img)\n', (1594, 1599), False, 'import cv2\n'), ((2350, 2377), 'json.dump', 'json.dump', (['rects', 'json_file'], {}), '(rects, json_file)\n', (2359, 2377), False, 'import json\n'), ((1656, 1696), 'cv2.selectROI', 'cv2.selectROI', (['"""aa"""', 'frame', '(False)', '(False)'], {}), "('aa', frame, False, False)\n", (1669, 1696), False, 'import cv2\n'), ((2042, 2143), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(bbox[0], bbox[1])', '(bbox[0] + bbox[2], bbox[1] + bbox[3])', '(0, 255, 0)', '(3)'], {}), '(frame, (bbox[0], bbox[1]), (bbox[0] + bbox[2], bbox[1] + bbox\n [3]), (0, 255, 0), 3)\n', (2055, 2143), False, 'import cv2\n'), ((2216, 2239), 'cv2.imshow', 'cv2.imshow', (['"""aa"""', 'frame'], {}), "('aa', frame)\n", (2226, 2239), False, 'import cv2\n'), ((2255, 2270), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (2266, 2270), False, 'import cv2\n'), ((1198, 1233), 'pysot.utils.model_load.load_pretrain', 'load_pretrain', (['model', 'args.snapshot'], {}), '(model, args.snapshot)\n', (1211, 1233), False, 'from pysot.utils.model_load import load_pretrain\n')] |
#!/usr/bin/env python
"""
Makes a lot of connections to a mongod.
Usage:
./make_lots_of_connections_to_servers.py [options]
Options:
-h --help Show this text.
--host <host> Host where the mongod is located [default: localhost]
--port <port> Port where the mongod is located [default: 27017]
--replset <set> Replica set name [default: replset]
-n <conns> Number of connections to make to the mongod. [default: 100]
"""
from docopt import docopt
from pymongo import MongoClient
from multiprocessing import Process
from time import sleep
def create_a_connection(host='localhost', port=27017, replica_set='replset'):
client = MongoClient(host=host, port=port, replicaSet=replica_set)
sleep(60) # Delete this line if you uncomment the following lines.
# try:
# sleep(5)
# client.test.test.find_one()
# sleep(5)
# client.test.test.find_one()
# sleep(10)
# client.test.test.find_one()
# sleep(60)
# except Exception, e:
# print ("I can handle this")
return True
def main():
opts = docopt(__doc__)
host = opts['--host']
port = int(opts['--port'])
replica_set = opts['--replset']
number_of_connections = int(opts['-n'])
processes = []
for i in xrange(number_of_connections):
p = Process(target=create_a_connection,
kwargs={"host": host,"port": port,
"replica_set": replica_set})
p.start()
processes.append(p)
if __name__ == '__main__':
main()
| [
"pymongo.MongoClient",
"multiprocessing.Process",
"docopt.docopt",
"time.sleep"
] | [((669, 726), 'pymongo.MongoClient', 'MongoClient', ([], {'host': 'host', 'port': 'port', 'replicaSet': 'replica_set'}), '(host=host, port=port, replicaSet=replica_set)\n', (680, 726), False, 'from pymongo import MongoClient\n'), ((731, 740), 'time.sleep', 'sleep', (['(60)'], {}), '(60)\n', (736, 740), False, 'from time import sleep\n'), ((1109, 1124), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (1115, 1124), False, 'from docopt import docopt\n'), ((1337, 1441), 'multiprocessing.Process', 'Process', ([], {'target': 'create_a_connection', 'kwargs': "{'host': host, 'port': port, 'replica_set': replica_set}"}), "(target=create_a_connection, kwargs={'host': host, 'port': port,\n 'replica_set': replica_set})\n", (1344, 1441), False, 'from multiprocessing import Process\n')] |
import enum
from typing import Type
import py_client
from py_client.aidm import Maybe
from py_client.conversion.converter_helpers import convert_to_datetime_format_or_return_self, _convert_to_camel_case
from py_client.conversion.aidm_to_json_post_processor import AIDMToJSONPostProcessorChain
def get_attribute_name_without_class_prefix(attribute_name_with_prefix):
last_occurrence_double_underscore = attribute_name_with_prefix.rfind('__')
return attribute_name_with_prefix[last_occurrence_double_underscore + 2:]
def convert_any_aidm_object_to_dict(aidm_object) -> dict:
attribute_dict = dict()
for attribute_name_with_class_prefix, value in aidm_object.__dict__.items():
attribute_name = get_attribute_name_without_class_prefix(attribute_name_with_class_prefix)
attribute_dict[_convert_to_camel_case(attribute_name)] = convert_any_object(
getattr(aidm_object, attribute_name))
return AIDMToJSONPostProcessorChain.post_process_aidm_as_json(attribute_dict, aidm_object)
def convert_any_object(obj):
if isinstance(obj, list):
return [convert_any_object(el) for el in obj]
if isinstance(obj, enum.Enum):
return obj.value
# The following is used at the moment only for end-to-end testing, for sake of simplicity it is here
if isinstance(obj, Maybe):
return convert_any_object(obj.get_value) if obj.has_value else None
is_from_aidm_package = hasattr(obj, "__module__") and obj.__module__.split('.')[0] == py_client.__name__
if is_from_aidm_package:
return convert_any_aidm_object_to_dict(obj)
else:
return convert_to_datetime_format_or_return_self(obj)
| [
"py_client.conversion.converter_helpers.convert_to_datetime_format_or_return_self",
"py_client.conversion.converter_helpers._convert_to_camel_case",
"py_client.conversion.aidm_to_json_post_processor.AIDMToJSONPostProcessorChain.post_process_aidm_as_json"
] | [((960, 1047), 'py_client.conversion.aidm_to_json_post_processor.AIDMToJSONPostProcessorChain.post_process_aidm_as_json', 'AIDMToJSONPostProcessorChain.post_process_aidm_as_json', (['attribute_dict', 'aidm_object'], {}), '(attribute_dict,\n aidm_object)\n', (1014, 1047), False, 'from py_client.conversion.aidm_to_json_post_processor import AIDMToJSONPostProcessorChain\n'), ((1667, 1713), 'py_client.conversion.converter_helpers.convert_to_datetime_format_or_return_self', 'convert_to_datetime_format_or_return_self', (['obj'], {}), '(obj)\n', (1708, 1713), False, 'from py_client.conversion.converter_helpers import convert_to_datetime_format_or_return_self, _convert_to_camel_case\n'), ((835, 873), 'py_client.conversion.converter_helpers._convert_to_camel_case', '_convert_to_camel_case', (['attribute_name'], {}), '(attribute_name)\n', (857, 873), False, 'from py_client.conversion.converter_helpers import convert_to_datetime_format_or_return_self, _convert_to_camel_case\n')] |
'''
WSGI Application Only
'''
from manage import application
if __name__ == "__main__":
application.run()
| [
"manage.application.run"
] | [((93, 110), 'manage.application.run', 'application.run', ([], {}), '()\n', (108, 110), False, 'from manage import application\n')] |
from django.contrib import admin
from django.urls import path
from django.conf.urls import url
from django.views.generic import TemplateView
import app.views as appViews
urlpatterns = [
url(r'^$', appViews.home, name='home'),
url(r'home/', appViews.home, name='home'),
url(r'offer/', appViews.offer, name='offer'),
url(r'pricing/', appViews.pricing, name='pricing'),
url(r'contact/', appViews.contact, name='contact'),
url(r'online/', appViews.online, name='online'),
path('admin/', admin.site.urls)
] | [
"django.conf.urls.url",
"django.urls.path"
] | [((188, 225), 'django.conf.urls.url', 'url', (['"""^$"""', 'appViews.home'], {'name': '"""home"""'}), "('^$', appViews.home, name='home')\n", (191, 225), False, 'from django.conf.urls import url\n'), ((229, 269), 'django.conf.urls.url', 'url', (['"""home/"""', 'appViews.home'], {'name': '"""home"""'}), "('home/', appViews.home, name='home')\n", (232, 269), False, 'from django.conf.urls import url\n'), ((273, 316), 'django.conf.urls.url', 'url', (['"""offer/"""', 'appViews.offer'], {'name': '"""offer"""'}), "('offer/', appViews.offer, name='offer')\n", (276, 316), False, 'from django.conf.urls import url\n'), ((320, 369), 'django.conf.urls.url', 'url', (['"""pricing/"""', 'appViews.pricing'], {'name': '"""pricing"""'}), "('pricing/', appViews.pricing, name='pricing')\n", (323, 369), False, 'from django.conf.urls import url\n'), ((373, 422), 'django.conf.urls.url', 'url', (['"""contact/"""', 'appViews.contact'], {'name': '"""contact"""'}), "('contact/', appViews.contact, name='contact')\n", (376, 422), False, 'from django.conf.urls import url\n'), ((426, 472), 'django.conf.urls.url', 'url', (['"""online/"""', 'appViews.online'], {'name': '"""online"""'}), "('online/', appViews.online, name='online')\n", (429, 472), False, 'from django.conf.urls import url\n'), ((479, 510), 'django.urls.path', 'path', (['"""admin/"""', 'admin.site.urls'], {}), "('admin/', admin.site.urls)\n", (483, 510), False, 'from django.urls import path\n')] |
from flask import request, make_response, redirect
from godmode.models.base import BaseAdminModel
from godmode.views.view import BaseView
from godmode.acl import ACL
from godmode.api import API
class GodModeAuthAdminModel(BaseAdminModel):
acl = ACL.OPEN
url_prefix = ""
place = None
enable_log = False
views = {
"login_view": "/login/",
"logout_view": "/logout/"
}
class LoginView(BaseView):
url = "/login/"
title = "Login"
template = "login.html"
acl = ACL.OPEN
def get(self):
context = {}
return self.render(**context)
def post(self):
login = API.get_str(request, "login")
password = API.get_str(request, "password")
from godmode.models.godmode_users import GodModeUsersAdminModel
user, client = GodModeUsersAdminModel.login(request, login, password)
response = make_response(redirect("/"))
response.set_cookie("gm_user_id", str(user.id))
response.set_cookie("gm_token", client.token)
return response
login_view = LoginView
class LogoutView(BaseView):
url = "/logout/"
acl = ACL.ALL
def get(self):
from godmode.models.godmode_users import GodModeUsersAdminModel
GodModeUsersAdminModel.logout(request)
response = make_response(redirect("/login/"))
response.set_cookie("gm_user_id", "", expires=0)
response.set_cookie("gm_token", "", expires=0)
return response
logout_view = LogoutView
| [
"flask.redirect",
"godmode.models.godmode_users.GodModeUsersAdminModel.logout",
"godmode.api.API.get_str",
"godmode.models.godmode_users.GodModeUsersAdminModel.login"
] | [((680, 709), 'godmode.api.API.get_str', 'API.get_str', (['request', '"""login"""'], {}), "(request, 'login')\n", (691, 709), False, 'from godmode.api import API\n'), ((733, 765), 'godmode.api.API.get_str', 'API.get_str', (['request', '"""password"""'], {}), "(request, 'password')\n", (744, 765), False, 'from godmode.api import API\n'), ((869, 923), 'godmode.models.godmode_users.GodModeUsersAdminModel.login', 'GodModeUsersAdminModel.login', (['request', 'login', 'password'], {}), '(request, login, password)\n', (897, 923), False, 'from godmode.models.godmode_users import GodModeUsersAdminModel\n'), ((1342, 1380), 'godmode.models.godmode_users.GodModeUsersAdminModel.logout', 'GodModeUsersAdminModel.logout', (['request'], {}), '(request)\n', (1371, 1380), False, 'from godmode.models.godmode_users import GodModeUsersAdminModel\n'), ((961, 974), 'flask.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (969, 974), False, 'from flask import request, make_response, redirect\n'), ((1418, 1437), 'flask.redirect', 'redirect', (['"""/login/"""'], {}), "('/login/')\n", (1426, 1437), False, 'from flask import request, make_response, redirect\n')] |
import logging
import multiprocessing as mp
import pathlib
import time
from typing import Optional, Union
from urllib.parse import urlparse
logger = logging.getLogger("hg.fuse")
class FuseProcess:
_mnt_name = "schemas"
_dircache_name = "cache"
def __init__(self):
self._fuse_process: Optional[mp.Process] = None
self._tmp_dir: Optional[pathlib.Path] = None
def start(self, tmp_dir: Union[str, pathlib.Path]):
try:
from ._httpfs import run
except ImportError as e:
raise ImportError(
'Install "fusepy" and "simple-httpfs" to enable FUSE.'
) from e
# no need to restart
tmp_dir = pathlib.Path(tmp_dir).absolute()
if self._fuse_process and tmp_dir == self._tmp_dir:
logger.debug("Skipping start. FUSE running in same directory %s", tmp_dir)
return
self.stop()
assert tmp_dir.is_dir(), f"mount dir doesn't exist: {tmp_dir}"
mount_point = tmp_dir / self._mnt_name
disk_cache_dir = tmp_dir / self._dircache_name
if not mount_point.exists():
mount_point.mkdir()
if not disk_cache_dir.exists():
disk_cache_dir.mkdir()
logger.info("Starting FUSE mount at %s", mount_point)
args = (str(mount_point) + "/", str(disk_cache_dir) + "/")
self._fuse_process = mp.Process(target=run, args=args, daemon=True)
self._fuse_process.start()
max_iters = 10
for i in range(max_iters):
# wait until http is mounted
if (mount_point / "http").exists():
break
if i == max_iters - 1:
self.stop()
raise RuntimeError("Failed to setup FUSE")
time.sleep(0.5)
self._tmp_dir = tmp_dir
def stop(self):
if self._fuse_process is None:
return
logger.info("Stopping FUSE running at %s", self._tmp_dir)
self._fuse_process.terminate()
self._fuse_process = None
self._tmp_dir = None
# TODO: remove cache and mount dirs?
# make sure stuff is no longer mounted
def path(self, href: str):
if self._tmp_dir is None:
raise RuntimeError("FUSE processes not started")
url = urlparse(href)
return str(
self._tmp_dir / self._mnt_name / f"{url.scheme}/{url.netloc}{url.path}.."
)
fuse = FuseProcess()
| [
"logging.getLogger",
"urllib.parse.urlparse",
"pathlib.Path",
"multiprocessing.Process",
"time.sleep"
] | [((150, 178), 'logging.getLogger', 'logging.getLogger', (['"""hg.fuse"""'], {}), "('hg.fuse')\n", (167, 178), False, 'import logging\n'), ((1401, 1447), 'multiprocessing.Process', 'mp.Process', ([], {'target': 'run', 'args': 'args', 'daemon': '(True)'}), '(target=run, args=args, daemon=True)\n', (1411, 1447), True, 'import multiprocessing as mp\n'), ((2322, 2336), 'urllib.parse.urlparse', 'urlparse', (['href'], {}), '(href)\n', (2330, 2336), False, 'from urllib.parse import urlparse\n'), ((1790, 1805), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1800, 1805), False, 'import time\n'), ((700, 721), 'pathlib.Path', 'pathlib.Path', (['tmp_dir'], {}), '(tmp_dir)\n', (712, 721), False, 'import pathlib\n')] |
#!/usr/bin/env python
#-*- coding:utf-8 -*-
import logging
import tornado.escape
import tornado.ioloop
import tornado.web
import tornado.websocket
import os.path
from tornado.options import define, options, parse_command_line
define("port", default=8888, help="run on the given port", type=int)
define("debug", default=False, help="run in debug mode")
define("history", default="records.dump", help="save records of game to the given file")
define("AIvsAI", default=True, help="clients just watch A.I. vs. A.I.")
from base import *
from Board import Board
from Handler import MainHandler, SocketHandler
from AI import AIPlayer,RandomAIPlayer
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/socket", SocketHandler),
]
settings = dict(
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
debug=options.debug,
)
tornado.web.Application.__init__(self, handlers, **settings)
def main():
parse_command_line()
app = Application()
app.listen(options.port)
ai = AIPlayer(app)
if options.AIvsAI:
ai2 = RandomAIPlayer(app)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
#TODO: ログの再生用インターフェース
#TODO: 機械学習のAI
#TODO: ニアリーチが出てきたら、ランダムチョイスじゃなくて、各場合の探索を行い始めるとか
| [
"AI.RandomAIPlayer",
"AI.AIPlayer",
"tornado.options.define",
"tornado.options.parse_command_line"
] | [((229, 297), 'tornado.options.define', 'define', (['"""port"""'], {'default': '(8888)', 'help': '"""run on the given port"""', 'type': 'int'}), "('port', default=8888, help='run on the given port', type=int)\n", (235, 297), False, 'from tornado.options import define, options, parse_command_line\n'), ((298, 354), 'tornado.options.define', 'define', (['"""debug"""'], {'default': '(False)', 'help': '"""run in debug mode"""'}), "('debug', default=False, help='run in debug mode')\n", (304, 354), False, 'from tornado.options import define, options, parse_command_line\n'), ((355, 448), 'tornado.options.define', 'define', (['"""history"""'], {'default': '"""records.dump"""', 'help': '"""save records of game to the given file"""'}), "('history', default='records.dump', help=\n 'save records of game to the given file')\n", (361, 448), False, 'from tornado.options import define, options, parse_command_line\n'), ((444, 515), 'tornado.options.define', 'define', (['"""AIvsAI"""'], {'default': '(True)', 'help': '"""clients just watch A.I. vs. A.I."""'}), "('AIvsAI', default=True, help='clients just watch A.I. vs. A.I.')\n", (450, 515), False, 'from tornado.options import define, options, parse_command_line\n'), ((1249, 1269), 'tornado.options.parse_command_line', 'parse_command_line', ([], {}), '()\n', (1267, 1269), False, 'from tornado.options import define, options, parse_command_line\n'), ((1332, 1345), 'AI.AIPlayer', 'AIPlayer', (['app'], {}), '(app)\n', (1340, 1345), False, 'from AI import AIPlayer, RandomAIPlayer\n'), ((1383, 1402), 'AI.RandomAIPlayer', 'RandomAIPlayer', (['app'], {}), '(app)\n', (1397, 1402), False, 'from AI import AIPlayer, RandomAIPlayer\n')] |
from area import *
from effect import *
from general import General
from sieve import *
from skill import *
from status import *
import libtcodpy as libtcod
import random
class Bloodrotter(General):
def __init__(self, battleground, side, x=-1, y=-1, name="Bloodrotter", color=libtcod.darker_red):
super(Bloodrotter, self).__init__(battleground, side, x, y, name, color)
self.max_hp = 250
self.power = 11
self.thirst_charges = 0
self.prev_thirst_charges = 0
def initialize_skills(self):
self.skills = []
bloodrage_duration = 50
self.skills.append(Skill(self, apply_statuses, 90,
[[Empower(None, self, bloodrage_duration, "Bloodrage empower", 1),
FreezeCooldowns(None, self, bloodrage_duration, "Bloodrage silence"),
Poison(None, self, 1, 1, bloodrage_duration/2, "Bloodrage poison")]],
"Bloodrage", "Gives higher power to a unit, but takes damage and silence",
SingleTarget(self.bg, is_unit, self, is_inrange_close)))
self.skills.append(DummySkill("Blood Bath", "Gain health for every unit killed"))
self.skills.append(DummySkill("Thirst", "Gets damage and speed based on enemy's missing health"))
self.skills.append(Skill(self, nuke_statuses, 140, [40, TempEffect(self.bg, char='*', color=self.color),
"magical", [Bleeding(owner=self, power=30, duration=40, name="Rupture")]],
"Rupture", "Deals initial damage plus extra damage if the unit moves",
SingleTarget(self.bg, is_enemy, self, is_inrange_close)))
def register_kill(self, killed):
super(Bloodrotter, self).register_kill(killed)
# Blood Bath
self.get_healed(int(killed.max_hp * 0.25))
def thirst(self, enemy):
self.thirst_charges = (enemy.max_hp-enemy.hp)/int(enemy.max_hp*0.33)
diff = self.thirst_charges - self.prev_thirst_charges
if diff:
self.power += 3 * diff
self.prev_thirst_charges = self.thirst_charges
def update(self):
self.thirst(self.bg.generals[(self.side+1)%2])
self.next_action -= self.thirst_charges
super(Bloodrotter, self).update()
class Ox(General):
def __init__(self, battleground, side, x=-1, y=-1, name="Ox", color=libtcod.dark_red):
super(Ox, self).__init__(battleground, side, x, y, name, color)
self.rand = random.Random()
self.max_hp = 400
self.helix_index = 2
def get_attacked(self, enemy, power=None, attack_effect=None, attack_type=None):
if not attack_type:
attack_type = enemy.attack_type
if attack_type == "physical" and self.rand.randint(1,6) == 6:
self.use_skill(-1, 0, 0)
super(Ox, self).get_attacked(enemy, power, attack_effect, attack_type)
def initialize_skills(self):
self.skills = []
taunt_duration = 80
self.skills.append(Skill(self, [apply_status, apply_status], 35, [[Taunted(None, self, taunt_duration)],
[Shield(name="<NAME>", armor=1, duration=taunt_duration), True]], "Berserker's Call",
"Taunts nearby units and gains bonus armor", Circle(self.bg, is_enemy, self, None, True, 5), True))
self.skills.append(Skill(self, apply_status, 100, [PoisonHunger(None, self, 4, 6, 20)], "Battle Hunger",
"Enemy gets slowed and takes damage until it kills a unit",
SingleTarget(self.bg, is_enemy, self, is_inrange_close)))
self.skills.append(Skill(self, place_entity, 2, [Slash(self.bg, side=self.side, power=10)], "Counter Helix",
"When attacked, performs a helix counter attack", SingleTarget(self.bg)))
self.skills.append(Skill(self, decapitate, 80, [0.33], "Culling Blade",
"Decapitates enemies with low health", SingleTarget(self.bg, is_enemy, self, is_adjacent)))
def start_battle(self):
super(Ox, self).start_battle()
self.rand.seed(self.max_hp)
def use_skill(self, i, x, y):
if i == self.helix_index:
# Counter helix can't be used like that
return False
else:
last = self.last_skill_used
if i == -1:
i = self.helix_index # Forced counter helix
skill_used = super(Ox, self).use_skill(i, x, y)
if skill_used and i == self.helix_index:
self.last_skill_used = last
return skill_used
class Pock(General):
def __init__(self, battleground, side, x=-1, y=-1, name="Pock", color=libtcod.sky):
super(Pock, self).__init__(battleground, side, x, y, name, color)
self.max_hp = 250
self.armor["physical"] = 1
self.orb = Orb(self.bg, self.side, char='o', color=self.color)
self.orb_index = 0
self.jaunt_index = 4
def initialize_skills(self):
self.skills = []
self.skills.append(Skill(self, place_entity, 80, [self.orb], "Illusory Orb",
"Launches a magic orb that damages and might be teleported into",
SingleTarget(self.bg, general=self, reach_function=is_inrange_long, selfcentered=True)))
self.skills.append(Skill(self, nuke_statuses, 60, [15, TempEffect(self.bg, char='`', color=self.color),
"magical", [FreezeCooldowns(None, self, 20, "Waning Rift silence")]],
"Waning Rift", "Deals damage and silences enemy units nearby",
Circle(self.bg, is_enemy, self, selfcentered=True, radius=2)))
self.skills.append(Skill(self, apply_status, 25, [Phasing(None, 5)], "Phase Shift",
"Enter another dimension and get immune from harm",
SingleTarget(self.bg, general=self, selfcentered=True)))
self.skills.append(Skill(self, explosion, 140, [20, Circle(self.bg, is_enemy, self, radius=3),
[Linked(None, self, 40, name="Dream Coil", power=40, radius=4, status=Stunned(duration=30))]],
"Dream Coil", "Binds enemies to a single point and take damage if they leave it",
SingleTarget(self.bg, general=self, reach_function=is_inrange_close)))
self.skills.append(Skill(self, teleport, 20, [None, self], "Ethereal Jaunt", "Shifts into the Illusory Orb"))
def use_skill(self, i, x, y):
if i == self.jaunt_index:
if self.orb.alive:
self.skills[i].parameters[0] = self.bg.tiles[(self.orb.x, self.orb.y)]
else:
return False
skill_used = super(Pock, self).use_skill(i, x, y)
if skill_used:
if i == self.orb_index:
self.orb = self.bg.tiles[(self.x, self.y)].effects[-1]
self.orb.path = Line(self.bg, origin=(self.x, self.y)).get_tiles(x, y)[:20]
elif i == self.jaunt_index:
self.orb.dissapear()
return skill_used
class Rubock(General):
def __init__(self, battleground, side, x=-1, y=-1, name="Rubock", color=libtcod.green):
super(Rubock, self).__init__(battleground, side, x, y, name, color)
self.copied_skill = 2
self.armor["physical"] = 1
def initialize_skills(self):
self.skills = []
self.skills.append(Skill(self, apply_status, 120, [Lifted(None, self, 15,
land_area=Circle(self.bg, is_enemy, self, radius=4),land_status=Stunned(None, self, 15))],
"Telekinesis", "Lifts an enemy into the air that stuns around on landing",
SingleTarget(self.bg, is_enemy, self, is_inrange_close)))
self.skills.append(Skill(self, apply_status, 40, [Jumping(None, self, 1, "Fade Bolt", 10, -1,
Circle(self.bg, is_enemy, self, radius=2), Empower(duration = 20, name="Fade Bolt debuff",
power_ratio=-0.25))], "Fade Bolt", "Travels between units damaging and weakening them",
SingleTarget(self.bg, is_enemy, self, is_inrange_close)))
self.skills.append(DummySkill("Null Field", "Grants magic resistance to all allies"))
self.skills.append(Skill(self, copy_spell, 140, [], "Spell Steal", "Copies the last spell used by the enemy",
SingleTarget(self.bg, is_enemy_general, self, is_inrange_long)))
self.skills.append(Skill(self, null, 1, [], "Spell Stolen", "Copy of the last spell used by the enemy"))
def start_battle(self):
super(Rubock, self).start_battle()
Aura(self, self, name="Null Field aura", area=Circle(self.bg, is_ally, self, radius=6),
status=Shield(name="Null Field", armor=2, armor_type="magical"))
| [
"random.Random"
] | [((2397, 2412), 'random.Random', 'random.Random', ([], {}), '()\n', (2410, 2412), False, 'import random\n')] |
import bar, baz
def main():
bar.set_config({"host": "192.168.0.1", "port": 8000})
baz.do_something()
if __name__ == "__main__":
main() | [
"baz.do_something",
"bar.set_config"
] | [((34, 87), 'bar.set_config', 'bar.set_config', (["{'host': '192.168.0.1', 'port': 8000}"], {}), "({'host': '192.168.0.1', 'port': 8000})\n", (48, 87), False, 'import bar, baz\n'), ((92, 110), 'baz.do_something', 'baz.do_something', ([], {}), '()\n', (108, 110), False, 'import bar, baz\n')] |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from datetime import datetime
from sqlalchemy import create_engine, Column, Integer, String
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
__version__ = "0.1.0"
engine = create_engine('sqlite:///apps/database.sqlite3', echo=False)
Base = declarative_base()
Session = sessionmaker(bind=engine)
class Users(Base):
__tablename__ = 'users'
id = Column('id', Integer, primary_key=True)
user_name = Column('user_name', String)
user_password = Column('user_password', String)
created_at = Column('created_at', String)
def __repr__(self):
return '<Users(id=%s, user_name=%s, user_password=%s, created_at=%s, )>' \
% (self.id, self.user_name, self.user_password, self.created_at)
class TokenTable(Base):
__tablename__ = 'tokentable'
id = Column('id', Integer, primary_key=True)
token = Column('token', String)
user_id = Column('user_id', Integer)
def ___repr__(self):
return '<TokenTable(id=%s, token=%s, user_id=%s, )>' \
% (self.id, self.token, self.user_id) | [
"sqlalchemy.orm.sessionmaker",
"sqlalchemy.create_engine",
"sqlalchemy.Column",
"sqlalchemy.ext.declarative.declarative_base"
] | [((320, 380), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite:///apps/database.sqlite3"""'], {'echo': '(False)'}), "('sqlite:///apps/database.sqlite3', echo=False)\n", (333, 380), False, 'from sqlalchemy import create_engine, Column, Integer, String\n'), ((388, 406), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (404, 406), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((418, 443), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine'}), '(bind=engine)\n', (430, 443), False, 'from sqlalchemy.orm import sessionmaker\n'), ((495, 534), 'sqlalchemy.Column', 'Column', (['"""id"""', 'Integer'], {'primary_key': '(True)'}), "('id', Integer, primary_key=True)\n", (501, 534), False, 'from sqlalchemy import create_engine, Column, Integer, String\n'), ((548, 575), 'sqlalchemy.Column', 'Column', (['"""user_name"""', 'String'], {}), "('user_name', String)\n", (554, 575), False, 'from sqlalchemy import create_engine, Column, Integer, String\n'), ((593, 624), 'sqlalchemy.Column', 'Column', (['"""user_password"""', 'String'], {}), "('user_password', String)\n", (599, 624), False, 'from sqlalchemy import create_engine, Column, Integer, String\n'), ((639, 667), 'sqlalchemy.Column', 'Column', (['"""created_at"""', 'String'], {}), "('created_at', String)\n", (645, 667), False, 'from sqlalchemy import create_engine, Column, Integer, String\n'), ((896, 935), 'sqlalchemy.Column', 'Column', (['"""id"""', 'Integer'], {'primary_key': '(True)'}), "('id', Integer, primary_key=True)\n", (902, 935), False, 'from sqlalchemy import create_engine, Column, Integer, String\n'), ((945, 968), 'sqlalchemy.Column', 'Column', (['"""token"""', 'String'], {}), "('token', String)\n", (951, 968), False, 'from sqlalchemy import create_engine, Column, Integer, String\n'), ((980, 1006), 'sqlalchemy.Column', 'Column', (['"""user_id"""', 'Integer'], {}), "('user_id', Integer)\n", (986, 1006), False, 'from sqlalchemy import create_engine, Column, Integer, String\n')] |
#random search class
#not used in the project
import random
class RandomBitSearch:
def __init__(self,qubo_utility):
self.qubo_utility=qubo_utility
def prepare_random_bit(self):
bit_list=random.choices([0,1],k=self.qubo_utility.bit_length)
bit_list=self.qubo_utility.calc_interactions(bit_list)
return bit_list
def explore_batch(self,model,batch_size=3200):
explore_list=[self.prepare_random_bit() for i in range(batch_size)]
predY=model.predict(explore_list)
res_list=list(zip(predY,explore_list))
res_list=sorted(res_list,reverse=True)
best_res=res_list[0]
return best_res[0],self.qubo_utility.get_uninteracted_bit(best_res[1]) | [
"random.choices"
] | [((215, 269), 'random.choices', 'random.choices', (['[0, 1]'], {'k': 'self.qubo_utility.bit_length'}), '([0, 1], k=self.qubo_utility.bit_length)\n', (229, 269), False, 'import random\n')] |
import os
import oyaml as yaml
def process_image(link):
pass
def process_program(link):
pass
def process_dir(link):
pass
counter = 1
def clean_path(repos):
for repo in repos:
if repo["path"].startswith("/"):
repo["path"] = repo["path"][1:]
return repos
def json_flatten(data,
book="BOOK",
title="{title}",
section="{counter} {path} {topic} {url} {line}",
header="{counter} {path} {topic} {url} {line}",
level=0,
indent=""):
verbose = False
global counter
counter = 0
r = len(yaml.dump(data).splitlines())
out = ['undefined'] * r
out[0] = {
"url": "",
"topic": book,
"title": title,
"book": book,
"output": title,
"header": header,
"level": level,
"indent": indent,
"kind": "title",
"path": "."
}
def _flatten(entry,
book=book,
title=title,
path='',
name='',
section=section,
header=header,
level=level,
indent=indent):
global counter
if type(entry) is dict:
for a in entry:
level = level + 1
counter = counter + 1
key = list(entry.keys())[0]
topic = a
d = {
"title": title,
"name": a,
"kind": "header",
"output": header,
"url": "",
"line": key,
"basename": f"{topic}",
"path": f"{path}/{topic}",
"counter": counter,
"level": level,
"indent": level * indent,
"topic": a.replace("-", " ")
}
if verbose:
print("-----", d)
# display = header.format(**d)
# out[counter] = display
out[counter] = d
_flatten(entry[a],
book=book,
title=title,
path=f"{path}/{a}",
name=f"{name}{a}/",
section=section,
header=header,
level=level,
indent=indent)
elif type(entry) is list:
i = 0
level = level + 1
for a in entry:
_flatten(a,
book=book,
title=title,
path=f"{path}",
name=f"{name}{i}/",
section=section,
header=header,
level=level,
indent=indent)
i += 1
else:
counter = counter + 1
# out[counter] = (line, f"{path}/{line}", name, counter)
if entry.startswith("i "):
entry = entry.replace("i ", "")
process_image(entry)
elif entry.startswith("p "):
entry = entry.replace("p ", "")
process_program(entry)
elif entry.startswith("r "):
entry = entry.replace("r ", "")
process_dir(entry)
location = entry
key = entry
basename = os.path.basename(entry)
name = basename.replace(".md", "")
d = {
"title": title,
"output": section,
"kind": "section",
"url": location,
"line": entry,
"basename": basename,
"path": path,
"counter": counter,
"name": name,
"level": level,
"indent": level * indent,
"topic": name.replace("-", " ").capitalize()
}
if verbose:
print(" >>>>>", d)
# result = output.format(**d)
# out[counter] = result
out[counter] = d
try:
_flatten(
data,
book=book,
title=title,
section=section,
header=header,
level=level,
indent=indent)
# except KeyError as e:
except Exception as e:
print()
print(f"ERROR: The key {e} could not be found")
d = out[:counter + 1]
return clean_path(d)
| [
"oyaml.dump",
"os.path.basename"
] | [((646, 661), 'oyaml.dump', 'yaml.dump', (['data'], {}), '(data)\n', (655, 661), True, 'import oyaml as yaml\n'), ((3558, 3581), 'os.path.basename', 'os.path.basename', (['entry'], {}), '(entry)\n', (3574, 3581), False, 'import os\n')] |
"""Serializador de Viajes"""
# Django REST Framework
from rest_framework import serializers
# Models
from cride.rides.models import Ride
from cride.circles.models import Membership
from cride.users.models import User
# Utilities
from django.utils import timezone
from datetime import timedelta
# Serializers
from cride.users.serializers import UserModelSerializer
class RideModelSerializer(serializers.ModelSerializer):
"""Serializador para el modelo Ride."""
offered_by = UserModelSerializer(read_only=True)
offered_in = serializers.StringRelatedField()
passengers = UserModelSerializer(read_only=True, many=True)
class Meta:
"""Clase Meta."""
model = Ride
fields = '__all__' # Proporciona todos los campos del modelo
read_only_fields = (
'offered_by',
'offered_in',
'rating'
)
def update(self, instance, data): # El campo data en la documentacion esta como validate_data,
# pero podemos llamarlo como querramos.
"""Permite actualizaciones solo antes de la fecha de salida."""
now = timezone.now()
if instance.departure_date <= now:
raise serializers.ValidationError("Los viajes en curso no se pueden modificar.")
return super(RideModelSerializer, self).update(instance, data)
class CreateRideSerializer(serializers.ModelSerializer):
"""Serializador para crear viajes"""
offered_by = serializers.HiddenField(default=serializers.CurrentUserDefault())
available_seats = serializers.IntegerField(min_value=1, max_value=15)
class Meta:
"""Clase Meta."""
model = Ride
exclude = ('offered_in', 'passengers', 'rating', 'is_active') # Traera todos los campos a excepcion
# de los que coloquemos en tupla exclude
def validate_departure_date(self, data):
"""Verifica que fecha no haya pasado."""
min_date = timezone.now() + timedelta(minutes=10)
if data < min_date:
raise serializers.ValidationError(
'La hora de salida debe ser al menos pasando los próximos 20 minutos'
)
return data
def validate(self, data):
"""Validar.
Verifica que la persona que ofrece los viajes es miembro
y también el mismo usuario que realiza la solicitud
"""
if self.context['request'].user != data['offered_by']: # Verificamos que el usuario pasado en el
# contexto dentro de request sea igual a la persona que trae por defecto igualmente desde el request¿?.
raise serializers.ValidationError('No se permiten viajes ofrecidos en nombre de otros.')
user = data['offered_by']
circle = self.context['circle']
try:
membership = Membership.objects.get(
user=user,
circle=circle,
is_active=True
)
except Membership.DoesNotExist:
raise serializers.ValidationError('El usuario no es un miembro activo del circulo.')
self.context['membership'] = membership
if data['arrival_date'] <= data['departure_date']:
raise serializers.ValidationError('La fecha de llegada tiene que suceder despues de la fecha de salida.')
return data
def create(self, data):
"""Crea un viaje y actualiza las estadisticas."""
circle = self.context['circle']
ride = Ride.objects.create(**data, offered_in=circle)
# Circle
circle.rides_offered += 1
circle.save()
# Membership
membership = self.context['membership']
membership.rides_offered += 1
membership.save()
# Profile
profile = data['offered_by'].profile
profile.rides_offered += 1
profile.save()
return ride
class JoinRideSerializer(serializers.ModelSerializer):
"""Serializador para unirse a viajes."""
passenger = serializers.IntegerField()
class Meta:
"""Clase Meta."""
model = Ride
fields = ('passenger',)
def validate_passenger(self, data):
"""Verifica que el pasajero existe y es miembro del circulo."""
try:
user = User.objects.get(pk=data)
except User.DoesNotExist:
raise serializers.ValidationError('Pasajero erroneo.')
circle = self.context['circle']
try:
membership = Membership.objects.get(
user=user,
circle=circle,
is_active=True
)
except Membership.DoesNotExist:
raise serializers.ValidationError('El usuario no es un miembro activo del circulo.')
# Si quieres validar que exista un objecto lo podemos hacer de 2 formas una usando un try/expept con
# el query 'get' En la exception mandamos el <nombre modelo>.DoesNotExist: como tipo de exception.
# La otra es usando un if con el query filter y verificamos si existen los datos con el 'exists'.
self.context['user'] = user # Colocamos en el contexto el objeto user validado.
self.context['member'] = membership
return data
def validate(self, data):
"""Verifica que los viajes permitan nuevos pasajeros."""
ride = self.context['ride']
if ride.departure_date <= timezone.now():
raise serializers.ValidationError("No puedes unirte a este paseo ahora.")
if ride.available_seats < 1:
raise serializers.ValidationError("¡El viaje ya está lleno!")
if ride.passengers.filter(pk=self.context['user'].pk).exists():
raise serializers.ValidationError('El pasajero ya está en este viaje.')
return data
def update(self, instance, data):
"""Agrega pasajeros al viaje y actualiza las estadísticas."""
ride = self.context['ride']
user = self.context['user']
ride.passengers.add(user) # Agregamos a nuestro campo Relacionado el pasajero
# Profile
profile = user.profile
profile.rides_taken += 1
profile.save() # Actualizamos los datos del perfil y lo guardamos
# Membership
member = self.context['member']
member.rides_taken += 1
member.save()
# Circle
circle = self.context['circle']
circle.rides_taken += 1
circle.save()
return ride
class EndRideSerializer(serializers.ModelSerializer):
"""Serializador para terminar un viaje."""
current_time = serializers.DateTimeField() # Especificamos que el valor enviado por la vista es de tipo
# fecha y hora.
class Meta:
"""Clase Meta."""
model = Ride
fields = ('is_active', 'current_time') # Colocamos a current_time como campos, aunque no sea un
# campo del modelo, esto es para no perder el dato que es enviado.
def validate_current_time(self, data):
"""Verifique que el viaje haya comenzado."""
ride = self.context['view'].get_object()
if data <= ride.departure_date:
raise serializers.ValidationError("El viaje aun no ha comenzado.")
return data
| [
"rest_framework.serializers.DateTimeField",
"rest_framework.serializers.IntegerField",
"cride.circles.models.Membership.objects.get",
"rest_framework.serializers.ValidationError",
"rest_framework.serializers.StringRelatedField",
"django.utils.timezone.now",
"cride.rides.models.Ride.objects.create",
"rest_framework.serializers.CurrentUserDefault",
"cride.users.serializers.UserModelSerializer",
"datetime.timedelta",
"cride.users.models.User.objects.get"
] | [((487, 522), 'cride.users.serializers.UserModelSerializer', 'UserModelSerializer', ([], {'read_only': '(True)'}), '(read_only=True)\n', (506, 522), False, 'from cride.users.serializers import UserModelSerializer\n'), ((540, 572), 'rest_framework.serializers.StringRelatedField', 'serializers.StringRelatedField', ([], {}), '()\n', (570, 572), False, 'from rest_framework import serializers\n'), ((590, 636), 'cride.users.serializers.UserModelSerializer', 'UserModelSerializer', ([], {'read_only': '(True)', 'many': '(True)'}), '(read_only=True, many=True)\n', (609, 636), False, 'from cride.users.serializers import UserModelSerializer\n'), ((1546, 1597), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'min_value': '(1)', 'max_value': '(15)'}), '(min_value=1, max_value=15)\n', (1570, 1597), False, 'from rest_framework import serializers\n'), ((3963, 3989), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {}), '()\n', (3987, 3989), False, 'from rest_framework import serializers\n'), ((6532, 6559), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {}), '()\n', (6557, 6559), False, 'from rest_framework import serializers\n'), ((1118, 1132), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1130, 1132), False, 'from django.utils import timezone\n'), ((3449, 3495), 'cride.rides.models.Ride.objects.create', 'Ride.objects.create', ([], {'offered_in': 'circle'}), '(**data, offered_in=circle)\n', (3468, 3495), False, 'from cride.rides.models import Ride\n'), ((1194, 1268), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""Los viajes en curso no se pueden modificar."""'], {}), "('Los viajes en curso no se pueden modificar.')\n", (1221, 1268), False, 'from rest_framework import serializers\n'), ((1490, 1522), 'rest_framework.serializers.CurrentUserDefault', 'serializers.CurrentUserDefault', ([], {}), '()\n', (1520, 1522), False, 'from rest_framework import serializers\n'), ((1934, 1948), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1946, 1948), False, 'from django.utils import timezone\n'), ((1951, 1972), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(10)'}), '(minutes=10)\n', (1960, 1972), False, 'from datetime import timedelta\n'), ((2019, 2122), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""La hora de salida debe ser al menos pasando los próximos 20 minutos"""'], {}), "(\n 'La hora de salida debe ser al menos pasando los próximos 20 minutos')\n", (2046, 2122), False, 'from rest_framework import serializers\n'), ((2598, 2685), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""No se permiten viajes ofrecidos en nombre de otros."""'], {}), "(\n 'No se permiten viajes ofrecidos en nombre de otros.')\n", (2625, 2685), False, 'from rest_framework import serializers\n'), ((2793, 2857), 'cride.circles.models.Membership.objects.get', 'Membership.objects.get', ([], {'user': 'user', 'circle': 'circle', 'is_active': '(True)'}), '(user=user, circle=circle, is_active=True)\n', (2815, 2857), False, 'from cride.circles.models import Membership\n'), ((3186, 3290), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""La fecha de llegada tiene que suceder despues de la fecha de salida."""'], {}), "(\n 'La fecha de llegada tiene que suceder despues de la fecha de salida.')\n", (3213, 3290), False, 'from rest_framework import serializers\n'), ((4231, 4256), 'cride.users.models.User.objects.get', 'User.objects.get', ([], {'pk': 'data'}), '(pk=data)\n', (4247, 4256), False, 'from cride.users.models import User\n'), ((4436, 4500), 'cride.circles.models.Membership.objects.get', 'Membership.objects.get', ([], {'user': 'user', 'circle': 'circle', 'is_active': '(True)'}), '(user=user, circle=circle, is_active=True)\n', (4458, 4500), False, 'from cride.circles.models import Membership\n'), ((5345, 5359), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (5357, 5359), False, 'from django.utils import timezone\n'), ((5379, 5446), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""No puedes unirte a este paseo ahora."""'], {}), "('No puedes unirte a este paseo ahora.')\n", (5406, 5446), False, 'from rest_framework import serializers\n'), ((5502, 5557), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""¡El viaje ya está lleno!"""'], {}), "('¡El viaje ya está lleno!')\n", (5529, 5557), False, 'from rest_framework import serializers\n'), ((5648, 5713), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""El pasajero ya está en este viaje."""'], {}), "('El pasajero ya está en este viaje.')\n", (5675, 5713), False, 'from rest_framework import serializers\n'), ((7090, 7150), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""El viaje aun no ha comenzado."""'], {}), "('El viaje aun no ha comenzado.')\n", (7117, 7150), False, 'from rest_framework import serializers\n'), ((2982, 3060), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""El usuario no es un miembro activo del circulo."""'], {}), "('El usuario no es un miembro activo del circulo.')\n", (3009, 3060), False, 'from rest_framework import serializers\n'), ((4309, 4357), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""Pasajero erroneo."""'], {}), "('Pasajero erroneo.')\n", (4336, 4357), False, 'from rest_framework import serializers\n'), ((4625, 4703), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (['"""El usuario no es un miembro activo del circulo."""'], {}), "('El usuario no es un miembro activo del circulo.')\n", (4652, 4703), False, 'from rest_framework import serializers\n')] |
'''helper facilities for logging
The datalake client does not generally configure its own logging. But the
command line client may choose to configure logging in some cases. Users with
sentry accounts may wish to configure it by installing the sentry extras.
'''
import os
import logging
from .common.errors import InsufficientConfiguration
def sentry_available():
try:
import raven.handlers.logging
return hasattr(raven.handlers.logging, 'SentryHandler')
except ImportError:
return False
def _get_sentry_handler():
dsn = os.environ.get('DATALAKE_SENTRY_DSN')
if not dsn:
return None
if not sentry_available():
msg = 'DATALAKE_SENTRY_DSN is configured but raven is not installed. '
msg += '`pip install datalake[sentry]` to turn this feature on.'
raise InsufficientConfiguration(msg)
return {
'level': 'ERROR',
'class': 'raven.handlers.logging.SentryHandler',
'dsn': dsn
}
def prepare_logging():
conf = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(asctime)s %(name)s %(levelname)s %(message)s',
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'formatter': 'simple',
'stream': 'ext://sys.stdout'
},
},
'root': {
'level': 'INFO',
'handlers': ['console']
}
}
sentry_handler = _get_sentry_handler()
if sentry_handler:
conf['handlers']['sentry'] = sentry_handler
conf['root']['handlers'].append('sentry')
logging.config.dictConfig(conf)
| [
"logging.config.dictConfig",
"os.environ.get"
] | [((564, 601), 'os.environ.get', 'os.environ.get', (['"""DATALAKE_SENTRY_DSN"""'], {}), "('DATALAKE_SENTRY_DSN')\n", (578, 601), False, 'import os\n'), ((1755, 1786), 'logging.config.dictConfig', 'logging.config.dictConfig', (['conf'], {}), '(conf)\n', (1780, 1786), False, 'import logging\n')] |
Subsets and Splits