code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
# Copyright 2018 TNG Technology Consulting GmbH, Unterföhring, Germany
# Licensed under the Apache License, Version 2.0 - see LICENSE.md in project root directory
import logging
from xml.sax.saxutils import escape
log = logging.getLogger()
class Todo:
def __init__(self, file_path, line_number, content):
self.file_path = file_path
self.line_number = line_number
self.content = content
self.is_valid = True
self.error_reason = None
def __str__(self):
return 'Todo in file ' + self.file_path + ':' + str(self.line_number) + ' | ' + self.content
def mark_as_valid(self):
self.is_valid = True
self.error_reason = None
def mark_as_invalid(self, error_reason):
self.is_valid = False
self.error_reason = error_reason
def print(self, show_valid=False):
if not show_valid and self.is_valid:
return
log.error('[REASON] %s' % self.error_reason)
log.error('[FILE] %s' % self.file_path)
log.error('[LINE] %s' % self.line_number)
log.error('[CONTENT] %s' % self.content)
def print_xml(self, xml_file):
if self.is_valid:
xml_file.write('\t<testcase classname="{}" name="line {}" />\n'.format(self.file_path, self.line_number))
else:
xml_file.write('\t<testcase classname="{}" name="line {}" >\n'.format(self.file_path, self.line_number))
xml_file.write('\t\t<failure message="{}">{}</failure>\n'.format(self.error_reason, escape(self.content)))
xml_file.write('\t</testcase>\n')
| [
"logging.getLogger",
"xml.sax.saxutils.escape"
]
| [((223, 242), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (240, 242), False, 'import logging\n'), ((1538, 1558), 'xml.sax.saxutils.escape', 'escape', (['self.content'], {}), '(self.content)\n', (1544, 1558), False, 'from xml.sax.saxutils import escape\n')] |
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import plotly.express as px
from plotly.subplots import make_subplots
import pandas as pd
import math
from datetime import datetime, time
from utils import MONTH_NAMES, month_range
def section(title, content, gray=False):
return html.Section(className=f'hero is-fullheight is-medium {"has-background-grey-lighter" if gray else ""}', children=[
html.Div(className='hero-body', children=[
html.Div(className='container', children=[
html.Div(className='columns is-centered', children=[
html.Div(className='column is-four-fifths is-full-mobile', children=[
html.Div(className='level', children=[
html.H2(title, className='title')
]),
] + content)
])
])
])
])
def quality_index(df):
indexes = df.sort_values('Valor', ascending=False).fillna('?').values
return html.Div(className='columns is-multiline is-4 is-variable', children=[
html.Div(className=f'column is-one-quarter index-container {"unknown-data" if i[1] == "?" else ""}', children=[
html.H1(i[1], className='title'),
html.H2(i[0], className='subtitle')
]) for i in indexes
])
def month_selector(df, first_month=None):
current_month = datetime.now().month
return html.Div(dcc.RangeSlider(
id='month-range-slider',
marks={i+1: MONTH_NAMES[i] for i in range(first_month-1, current_month)},
min=first_month, max=current_month,
value=[current_month-2,current_month],
pushable=1
), className='slider-frame')
def point_list(items):
return html.Ul([html.Li(item) for item in items])
def first():
return html.Section(className='hero is-fullheight', children=[
html.Div(className='hero-body', children=[
html.Div(className='container', children=[
html.Div(className='columns is-vcentered is-centered', children=[
html.Div(className='column is-5', children=[
html.Figure(className='image is-4by4', children=[
html.Img(src='/indicadores/assets/logo.png', alt='FabLab UTFSM'),
]),
]),
html.Div(className='column is-5 main-title', children=[
html.H1('Informe de Gestión de Operaciones', className='title')
])
])
]),
])
])
def last():
return html.Footer(className='footer has-background-white', children=[
html.Div(className='content has-text-centered', children=[
html.Img(src='/indicadores/assets/footer.png', alt='FabLab UTFSM'),
html.P(className='is-size-7', children=[
'FabLab UTFSM 2019', html.Br(),
'UTFSM Campus San Joaquín, Edificio C', html.Br(),
'Av. <NAME> 3939, Santiago de Chile', html.Br(),
'Desarrollado bajo licencia MIT'
])
])
])
def fig_records(df, months=None, stacked=False):
machine_list = df['Tipo Máquina'].unique()
months = month_range(months)
def create_frame(df, serie_name):
count = df['Tipo Máquina'].value_counts()
frame = pd.DataFrame({'Tipo de Máquina': machine_list})
frame[serie_name] = [count.get(machine, 0) for machine in machine_list]
return frame
extras = {'barmode': 'relative' if stacked else 'group'}
figure = go.Figure()
for m in months:
name = MONTH_NAMES[m-1]
frame = create_frame(df[df.index.month == m], name)
figure.add_trace(go.Bar(x=frame['Tipo de Máquina'], y=frame[name], name=name, hoverinfo='name+y'))
if stacked and months:
frame = create_frame(df[df.index.month.isin(months)], 'Total')
figure.add_trace(go.Scatter(
x=frame['Tipo de Máquina'],
y=frame['Total'],
text=frame['Total'],
textposition='top center',
mode='text',
showlegend=False,
hoverinfo='skip'
))
figure.update_layout(yaxis={ 'title': 'Número de registros'}, **extras)
return figure
def fig_hours(df, months=None, stacked=False):
machine_list = df['Tipo Máquina'].unique()
months=month_range(months)
def create_frame(df, serie_name):
count = df.groupby('Tipo Máquina').sum()['Tiempo de uso en minutos'].divide(60).round(0)
frame = pd.DataFrame({'Tipo de Máquina': machine_list})
frame[serie_name] = [count.get(machine, 0) for machine in machine_list]
return frame
if months and type(months) == list:
df = df[df.index.month.isin(months)]
frame = create_frame(df, 'Total')
figure = go.Figure()
extras = {'barmode': 'relative' if stacked else 'group'}
for m in months:
name = MONTH_NAMES[m-1]
frame = create_frame(df[df.index.month == m], name)
figure.add_trace(go.Bar(y=frame['Tipo de Máquina'], x=frame[name], name=name, hoverinfo='name+x', orientation='h'))
if stacked and months:
frame = create_frame(df[df.index.month.isin(months)], 'Total')
figure.add_trace(go.Scatter(
y=frame['Tipo de Máquina'],
x=frame['Total'],
text=frame['Total'],
textposition='middle right',
mode='text',
showlegend=False,
hoverinfo='skip'
))
figure.update_layout(xaxis={ 'title': f'Horas de uso {"total" if stacked else ""}'}, **extras)
return figure
def cap_per_machine_per_month(month_caps, machine, month):
this_month = month_caps[month_caps['Mes'] == month]
machine_count = {'Impresora 3D': 5, 'Cortadora Láser': 2, 'Router CNC': 3, 'Torno': 1, 'Cirqoid': 1}
return (this_month['Dias'] * this_month['Horas']).values[0] * 60 * machine_count[machine]
def fig_total_capacity_2(df, month_caps, months):
machine_list = df['Tipo Máquina'].unique()
months = month_range(months)
month_names = [MONTH_NAMES[m-1] for m in months]
figure = go.Figure()
for machine in machine_list:
texts = []
caps = []
for month in months:
total_cap = cap_per_machine_per_month(month_caps, machine, month)
hours = total_cap // 60
used_cap = df[df.index.month==month].groupby('Tipo Máquina')['Tiempo de uso en minutos'].sum().divide(total_cap).multiply(100).round(2).get(machine, 0)
caps.append(used_cap)
texts.append(f'{used_cap}% utilizado de una capacidad total de {hours} horas.')
figure.add_trace(go.Bar(x=month_names, y=caps, name=machine, hovertext=texts))
figure.update_layout(barmode='group', yaxis=dict(type='linear', ticksuffix='%', title='Capacidad Utilizada'))
return figure
"""
TODO: Terminar el heatmap de alguna manera...
def fig_uses(df, months):
dias = ['Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes']
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']
data = df[df.index.month.isin(month_range(months))]
figure = go.Figure()
times = data.groupby([data.index.weekday_name, pd.Grouper(freq='60min', key='Hora Inicio')]).fillna(0).sum().reset_index()
day_times = times[times['Marca temporal'] == 'Monday']['Hora Inicio'].dt.time
z_dict = dict()
for i, d in enumerate(days):
z_dict.update({dias[i]: times[times['Marca temporal'] == d]['Tiempo de uso en minutos'].fillna(0).values})
z_values = pd.DataFrame(z_dict).values
figure.add_trace(go.Heatmap(
x=dias,
y=day_times,
z=z_values))
return figure
"""
def trace_context_use(df, level=None, **kwargs):
grouped = None
if not level:
grouped = df.groupby('Contexto 1')
else:
grouped = df[df['Contexto 1'] == level].groupby('Contexto 2')
context_data = grouped.sum()['Tiempo de uso en minutos']
return go.Pie(labels=context_data.index, values=context_data.values, **kwargs)
def fig_contexts_use(df, months, level, **kwargs):
col_count = 3
row_count = math.ceil(len(month_range(months))/col_count)
figure = make_subplots(row_count, col_count, specs=[[{'type':'domain'} for c in range(col_count)] for r in range(row_count)],
subplot_titles=[MONTH_NAMES[m-1] for m in month_range(months)])
def take_month(months):
for m in month_range(months):
yield trace_context_use(df[df.index.month == m], level, name=MONTH_NAMES[m-1])
pie_factory = take_month(months)
try:
for r in range(row_count):
for c in range(col_count):
figure.add_trace(next(pie_factory), r+1, c+1)
except StopIteration as stop:
pass
return figure
def records_per_machine(df, months=None, stacked=False):
return dcc.Graph(figure=fig_records(df, months=months, stacked=stacked), style={'height': '80vh'})
def time_per_machine(df, months=None, stacked=False):
return dcc.Graph(figure=fig_hours(df, months=months, stacked=stacked), style={'height': '80vh'})
def machine_capacity(df, caps, months=None):
return dcc.Graph(figure=fig_total_capacity_2(df, caps, months), style={'height': '80vh'})
#def uses(df, months):
# return dcc.Graph(figure=fig_uses(df, months), style={'height': '80vh'})
def contexts(df, months, level=None):
return dcc.Graph(figure=fig_contexts_use(df, months, level), style={'height': '80vh'}) | [
"dash_html_components.Br",
"dash_html_components.Li",
"plotly.graph_objs.Scatter",
"datetime.datetime.now",
"plotly.graph_objs.Pie",
"plotly.graph_objs.Bar",
"dash_html_components.H2",
"dash_html_components.H1",
"pandas.DataFrame",
"plotly.graph_objs.Figure",
"dash_html_components.Img",
"utils.month_range"
]
| [((3295, 3314), 'utils.month_range', 'month_range', (['months'], {}), '(months)\n', (3306, 3314), False, 'from utils import MONTH_NAMES, month_range\n'), ((3645, 3656), 'plotly.graph_objs.Figure', 'go.Figure', ([], {}), '()\n', (3654, 3656), True, 'import plotly.graph_objs as go\n'), ((4455, 4474), 'utils.month_range', 'month_range', (['months'], {}), '(months)\n', (4466, 4474), False, 'from utils import MONTH_NAMES, month_range\n'), ((4915, 4926), 'plotly.graph_objs.Figure', 'go.Figure', ([], {}), '()\n', (4924, 4926), True, 'import plotly.graph_objs as go\n'), ((6150, 6169), 'utils.month_range', 'month_range', (['months'], {}), '(months)\n', (6161, 6169), False, 'from utils import MONTH_NAMES, month_range\n'), ((6236, 6247), 'plotly.graph_objs.Figure', 'go.Figure', ([], {}), '()\n', (6245, 6247), True, 'import plotly.graph_objs as go\n'), ((8077, 8148), 'plotly.graph_objs.Pie', 'go.Pie', ([], {'labels': 'context_data.index', 'values': 'context_data.values'}), '(labels=context_data.index, values=context_data.values, **kwargs)\n', (8083, 8148), True, 'import plotly.graph_objs as go\n'), ((1447, 1461), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1459, 1461), False, 'from datetime import datetime, time\n'), ((3420, 3467), 'pandas.DataFrame', 'pd.DataFrame', (["{'Tipo de Máquina': machine_list}"], {}), "({'Tipo de Máquina': machine_list})\n", (3432, 3467), True, 'import pandas as pd\n'), ((4627, 4674), 'pandas.DataFrame', 'pd.DataFrame', (["{'Tipo de Máquina': machine_list}"], {}), "({'Tipo de Máquina': machine_list})\n", (4639, 4674), True, 'import pandas as pd\n'), ((8540, 8559), 'utils.month_range', 'month_range', (['months'], {}), '(months)\n', (8551, 8559), False, 'from utils import MONTH_NAMES, month_range\n'), ((1807, 1820), 'dash_html_components.Li', 'html.Li', (['item'], {}), '(item)\n', (1814, 1820), True, 'import dash_html_components as html\n'), ((3795, 3880), 'plotly.graph_objs.Bar', 'go.Bar', ([], {'x': "frame['Tipo de Máquina']", 'y': 'frame[name]', 'name': 'name', 'hoverinfo': '"""name+y"""'}), "(x=frame['Tipo de Máquina'], y=frame[name], name=name, hoverinfo='name+y'\n )\n", (3801, 3880), True, 'import plotly.graph_objs as go\n'), ((4005, 4168), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': "frame['Tipo de Máquina']", 'y': "frame['Total']", 'text': "frame['Total']", 'textposition': '"""top center"""', 'mode': '"""text"""', 'showlegend': '(False)', 'hoverinfo': '"""skip"""'}), "(x=frame['Tipo de Máquina'], y=frame['Total'], text=frame['Total'\n ], textposition='top center', mode='text', showlegend=False, hoverinfo=\n 'skip')\n", (4015, 4168), True, 'import plotly.graph_objs as go\n'), ((5128, 5230), 'plotly.graph_objs.Bar', 'go.Bar', ([], {'y': "frame['Tipo de Máquina']", 'x': 'frame[name]', 'name': 'name', 'hoverinfo': '"""name+x"""', 'orientation': '"""h"""'}), "(y=frame['Tipo de Máquina'], x=frame[name], name=name, hoverinfo=\n 'name+x', orientation='h')\n", (5134, 5230), True, 'import plotly.graph_objs as go\n'), ((5351, 5515), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'y': "frame['Tipo de Máquina']", 'x': "frame['Total']", 'text': "frame['Total']", 'textposition': '"""middle right"""', 'mode': '"""text"""', 'showlegend': '(False)', 'hoverinfo': '"""skip"""'}), "(y=frame['Tipo de Máquina'], x=frame['Total'], text=frame['Total'\n ], textposition='middle right', mode='text', showlegend=False,\n hoverinfo='skip')\n", (5361, 5515), True, 'import plotly.graph_objs as go\n'), ((6776, 6836), 'plotly.graph_objs.Bar', 'go.Bar', ([], {'x': 'month_names', 'y': 'caps', 'name': 'machine', 'hovertext': 'texts'}), '(x=month_names, y=caps, name=machine, hovertext=texts)\n', (6782, 6836), True, 'import plotly.graph_objs as go\n'), ((8249, 8268), 'utils.month_range', 'month_range', (['months'], {}), '(months)\n', (8260, 8268), False, 'from utils import MONTH_NAMES, month_range\n'), ((8473, 8492), 'utils.month_range', 'month_range', (['months'], {}), '(months)\n', (8484, 8492), False, 'from utils import MONTH_NAMES, month_range\n'), ((1267, 1299), 'dash_html_components.H1', 'html.H1', (['i[1]'], {'className': '"""title"""'}), "(i[1], className='title')\n", (1274, 1299), True, 'import dash_html_components as html\n'), ((1313, 1348), 'dash_html_components.H2', 'html.H2', (['i[0]'], {'className': '"""subtitle"""'}), "(i[0], className='subtitle')\n", (1320, 1348), True, 'import dash_html_components as html\n'), ((2802, 2868), 'dash_html_components.Img', 'html.Img', ([], {'src': '"""/indicadores/assets/footer.png"""', 'alt': '"""FabLab UTFSM"""'}), "(src='/indicadores/assets/footer.png', alt='FabLab UTFSM')\n", (2810, 2868), True, 'import dash_html_components as html\n'), ((2960, 2969), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (2967, 2969), True, 'import dash_html_components as html\n'), ((3028, 3037), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (3035, 3037), True, 'import dash_html_components as html\n'), ((3092, 3101), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (3099, 3101), True, 'import dash_html_components as html\n'), ((2495, 2558), 'dash_html_components.H1', 'html.H1', (['"""Informe de Gestión de Operaciones"""'], {'className': '"""title"""'}), "('Informe de Gestión de Operaciones', className='title')\n", (2502, 2558), True, 'import dash_html_components as html\n'), ((2277, 2341), 'dash_html_components.Img', 'html.Img', ([], {'src': '"""/indicadores/assets/logo.png"""', 'alt': '"""FabLab UTFSM"""'}), "(src='/indicadores/assets/logo.png', alt='FabLab UTFSM')\n", (2285, 2341), True, 'import dash_html_components as html\n'), ((808, 841), 'dash_html_components.H2', 'html.H2', (['title'], {'className': '"""title"""'}), "(title, className='title')\n", (815, 841), True, 'import dash_html_components as html\n')] |
# -*- coding: utf-8 -*-
from gengine.app.tests.base import BaseDBTest
from gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language
from gengine.metadata import DBSession
from gengine.app.model import AuthUser
class TestUserCreation(BaseDBTest):
def test_user_creation(self):
lang = get_or_create_language("en")
user = create_user(
lat = 12.1,
lng = 12.2,
#country = "RO",
#region = "Transylvania",
#city = "Cluj-Napoca",
timezone = "Europe/Bukarest",
language = "en",
additional_public_data = {
"first_name" : "Rudolf",
"last_name" : "<NAME>"
}
)
self.assertTrue(user.lat == 12.1)
self.assertTrue(user.lng == 12.2)
#self.assertTrue(user.country == "RO")
#self.assertTrue(user.region == "Transylvania")
#self.assertTrue(user.city == "Cluj-Napoca")
self.assertTrue(user.timezone == "Europe/Bukarest")
self.assertTrue(user.language_id == lang.id)
self.assertTrue(user.additional_public_data["first_name"] == "Rudolf")
self.assertTrue(user.additional_public_data["last_name"] == "<NAME>")
def test_user_updation(self):
lang = get_or_create_language("en")
user = create_user()
user = update_user(
user_id = user.id,
lat = 14.2,
lng = 16.3,
#country = "EN",
#region = "Transylvania",
#city = "Cluj-Napoca",
timezone = "Europe/Bukarest",
language = "en",
additional_public_data = {
"first_name" : "Rudolf",
"last_name" : "<NAME>"
}
)
# Correct cases
self.assertTrue(user.lat == 14.2)
self.assertTrue(user.lng == 16.3)
#self.assertTrue(user.country == "EN")
#self.assertTrue(user.region == "Transylvania")
#self.assertTrue(user.city == "Cluj-Napoca")
self.assertTrue(user.timezone == "Europe/Bukarest")
self.assertTrue(user.language_id == lang.id)
def test_user_deletion(self):
user1 = create_user()
# Create Second user
user2 = create_user(
lat=85.59,
lng=65.75,
#country="DE",
#region="Niedersachsen",
#city="Osnabrück",
timezone="Europe/Berlin",
language="de",
additional_public_data={
"first_name": "Michael",
"last_name": "Clarke"
},
friends=[1]
)
remaining_users = delete_user(
user_id = user1.id
)
# Correct cases
self.assertNotIn(user1.id, remaining_users)
self.assertEqual(user2.id, remaining_users[0].id)
def test_verify_password(self):
auth_user = AuthUser()
auth_user.password = "<PASSWORD>"
auth_user.active = True
auth_user.email = "<EMAIL>"
DBSession.add(auth_user)
iscorrect = auth_user.verify_password("<PASSWORD>")
self.assertEqual(iscorrect, True)
def test_create_token(self):
user = create_user()
auth_user = AuthUser()
auth_user.user_id = user.id
auth_user.password = "<PASSWORD>"
auth_user.active = True
auth_user.email = "<EMAIL>"
DBSession.add(auth_user)
if auth_user.verify_password("<PASSWORD>"):
token = auth_user.get_or_create_token()
self.assertNotEqual(token, None)
| [
"gengine.app.tests.helpers.create_user",
"gengine.metadata.DBSession.add",
"gengine.app.tests.helpers.delete_user",
"gengine.app.tests.helpers.update_user",
"gengine.app.tests.helpers.get_or_create_language",
"gengine.app.model.AuthUser"
]
| [((337, 365), 'gengine.app.tests.helpers.get_or_create_language', 'get_or_create_language', (['"""en"""'], {}), "('en')\n", (359, 365), False, 'from gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language\n'), ((381, 531), 'gengine.app.tests.helpers.create_user', 'create_user', ([], {'lat': '(12.1)', 'lng': '(12.2)', 'timezone': '"""Europe/Bukarest"""', 'language': '"""en"""', 'additional_public_data': "{'first_name': 'Rudolf', 'last_name': '<NAME>'}"}), "(lat=12.1, lng=12.2, timezone='Europe/Bukarest', language='en',\n additional_public_data={'first_name': 'Rudolf', 'last_name': '<NAME>'})\n", (392, 531), False, 'from gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language\n'), ((1320, 1348), 'gengine.app.tests.helpers.get_or_create_language', 'get_or_create_language', (['"""en"""'], {}), "('en')\n", (1342, 1348), False, 'from gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language\n'), ((1364, 1377), 'gengine.app.tests.helpers.create_user', 'create_user', ([], {}), '()\n', (1375, 1377), False, 'from gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language\n'), ((1393, 1564), 'gengine.app.tests.helpers.update_user', 'update_user', ([], {'user_id': 'user.id', 'lat': '(14.2)', 'lng': '(16.3)', 'timezone': '"""Europe/Bukarest"""', 'language': '"""en"""', 'additional_public_data': "{'first_name': 'Rudolf', 'last_name': '<NAME>'}"}), "(user_id=user.id, lat=14.2, lng=16.3, timezone='Europe/Bukarest',\n language='en', additional_public_data={'first_name': 'Rudolf',\n 'last_name': '<NAME>'})\n", (1404, 1564), False, 'from gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language\n'), ((2231, 2244), 'gengine.app.tests.helpers.create_user', 'create_user', ([], {}), '()\n', (2242, 2244), False, 'from gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language\n'), ((2291, 2459), 'gengine.app.tests.helpers.create_user', 'create_user', ([], {'lat': '(85.59)', 'lng': '(65.75)', 'timezone': '"""Europe/Berlin"""', 'language': '"""de"""', 'additional_public_data': "{'first_name': 'Michael', 'last_name': 'Clarke'}", 'friends': '[1]'}), "(lat=85.59, lng=65.75, timezone='Europe/Berlin', language='de',\n additional_public_data={'first_name': 'Michael', 'last_name': 'Clarke'},\n friends=[1])\n", (2302, 2459), False, 'from gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language\n'), ((2702, 2731), 'gengine.app.tests.helpers.delete_user', 'delete_user', ([], {'user_id': 'user1.id'}), '(user_id=user1.id)\n', (2713, 2731), False, 'from gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language\n'), ((2948, 2958), 'gengine.app.model.AuthUser', 'AuthUser', ([], {}), '()\n', (2956, 2958), False, 'from gengine.app.model import AuthUser\n'), ((3077, 3101), 'gengine.metadata.DBSession.add', 'DBSession.add', (['auth_user'], {}), '(auth_user)\n', (3090, 3101), False, 'from gengine.metadata import DBSession\n'), ((3255, 3268), 'gengine.app.tests.helpers.create_user', 'create_user', ([], {}), '()\n', (3266, 3268), False, 'from gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language\n'), ((3289, 3299), 'gengine.app.model.AuthUser', 'AuthUser', ([], {}), '()\n', (3297, 3299), False, 'from gengine.app.model import AuthUser\n'), ((3454, 3478), 'gengine.metadata.DBSession.add', 'DBSession.add', (['auth_user'], {}), '(auth_user)\n', (3467, 3478), False, 'from gengine.metadata import DBSession\n')] |
from __future__ import annotations
import collections
import copy
import itertools
import math
import os
import posixpath
from io import BytesIO, StringIO
from textwrap import indent
from typing import Any, Dict, List, MutableMapping, Optional, Tuple, Union
from fontTools.misc import etree as ET
from fontTools.misc import plistlib
from fontTools.misc.loggingTools import LogMixin
from fontTools.misc.textTools import tobytes, tostr
"""
designSpaceDocument
- read and write designspace files
"""
__all__ = [
'DesignSpaceDocumentError', 'DesignSpaceDocument', 'SourceDescriptor',
'InstanceDescriptor', 'AxisDescriptor', 'RuleDescriptor', 'BaseDocReader',
'BaseDocWriter'
]
# ElementTree allows to find namespace-prefixed elements, but not attributes
# so we have to do it ourselves for 'xml:lang'
XML_NS = "{http://www.w3.org/XML/1998/namespace}"
XML_LANG = XML_NS + "lang"
def posix(path):
"""Normalize paths using forward slash to work also on Windows."""
new_path = posixpath.join(*path.split(os.path.sep))
if path.startswith('/'):
# The above transformation loses absolute paths
new_path = '/' + new_path
elif path.startswith(r'\\'):
# The above transformation loses leading slashes of UNC path mounts
new_path = '//' + new_path
return new_path
def posixpath_property(private_name):
"""Generate a propery that holds a path always using forward slashes."""
def getter(self):
# Normal getter
return getattr(self, private_name)
def setter(self, value):
# The setter rewrites paths using forward slashes
if value is not None:
value = posix(value)
setattr(self, private_name, value)
return property(getter, setter)
class DesignSpaceDocumentError(Exception):
def __init__(self, msg, obj=None):
self.msg = msg
self.obj = obj
def __str__(self):
return str(self.msg) + (
": %r" % self.obj if self.obj is not None else "")
class AsDictMixin(object):
def asdict(self):
d = {}
for attr, value in self.__dict__.items():
if attr.startswith("_"):
continue
if hasattr(value, "asdict"):
value = value.asdict()
elif isinstance(value, list):
value = [
v.asdict() if hasattr(v, "asdict") else v for v in value
]
d[attr] = value
return d
class SimpleDescriptor(AsDictMixin):
""" Containers for a bunch of attributes"""
# XXX this is ugly. The 'print' is inappropriate here, and instead of
# assert, it should simply return True/False
def compare(self, other):
# test if this object contains the same data as the other
for attr in self._attrs:
try:
assert(getattr(self, attr) == getattr(other, attr))
except AssertionError:
print("failed attribute", attr, getattr(self, attr), "!=", getattr(other, attr))
def __repr__(self):
attrs = [f"{a}={repr(getattr(self, a))}," for a in self._attrs]
attrs = indent('\n'.join(attrs), ' ')
return f"{self.__class__.__name__}(\n{attrs}\n)"
class SourceDescriptor(SimpleDescriptor):
"""Simple container for data related to the source
.. code:: python
doc = DesignSpaceDocument()
s1 = SourceDescriptor()
s1.path = masterPath1
s1.name = "master.ufo1"
s1.font = defcon.Font("master.ufo1")
s1.location = dict(weight=0)
s1.familyName = "MasterFamilyName"
s1.styleName = "MasterStyleNameOne"
s1.localisedFamilyName = dict(fr="Caractère")
s1.mutedGlyphNames.append("A")
s1.mutedGlyphNames.append("Z")
doc.addSource(s1)
"""
flavor = "source"
_attrs = ['filename', 'path', 'name', 'layerName',
'location', 'copyLib',
'copyGroups', 'copyFeatures',
'muteKerning', 'muteInfo',
'mutedGlyphNames',
'familyName', 'styleName', 'localisedFamilyName']
filename = posixpath_property("_filename")
path = posixpath_property("_path")
def __init__(
self,
*,
filename=None,
path=None,
font=None,
name=None,
location=None,
designLocation=None,
layerName=None,
familyName=None,
styleName=None,
localisedFamilyName=None,
copyLib=False,
copyInfo=False,
copyGroups=False,
copyFeatures=False,
muteKerning=False,
muteInfo=False,
mutedGlyphNames=None,
):
self.filename = filename
"""string. A relative path to the source file, **as it is in the document**.
MutatorMath + VarLib.
"""
self.path = path
"""The absolute path, calculated from filename."""
self.font = font
"""Any Python object. Optional. Points to a representation of this
source font that is loaded in memory, as a Python object (e.g. a
``defcon.Font`` or a ``fontTools.ttFont.TTFont``).
The default document reader will not fill-in this attribute, and the
default writer will not use this attribute. It is up to the user of
``designspaceLib`` to either load the resource identified by
``filename`` and store it in this field, or write the contents of
this field to the disk and make ```filename`` point to that.
"""
self.name = name
"""string. Optional. Unique identifier name for this source.
MutatorMath + Varlib.
"""
self.designLocation = designLocation if designLocation is not None else location or {}
"""dict. Axis values for this source, in design space coordinates.
MutatorMath + Varlib.
This may be only part of the full design location.
See :meth:`getFullDesignLocation()`
.. versionadded:: 5.0
"""
self.layerName = layerName
"""string. The name of the layer in the source to look for
outline data. Default ``None`` which means ``foreground``.
"""
self.familyName = familyName
"""string. Family name of this source. Though this data
can be extracted from the font, it can be efficient to have it right
here.
Varlib.
"""
self.styleName = styleName
"""string. Style name of this source. Though this data
can be extracted from the font, it can be efficient to have it right
here.
Varlib.
"""
self.localisedFamilyName = localisedFamilyName or {}
"""dict. A dictionary of localised family name strings, keyed by
language code.
If present, will be used to build localized names for all instances.
.. versionadded:: 5.0
"""
self.copyLib = copyLib
"""bool. Indicates if the contents of the font.lib need to
be copied to the instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.copyInfo = copyInfo
"""bool. Indicates if the non-interpolating font.info needs
to be copied to the instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.copyGroups = copyGroups
"""bool. Indicates if the groups need to be copied to the
instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.copyFeatures = copyFeatures
"""bool. Indicates if the feature text needs to be
copied to the instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.muteKerning = muteKerning
"""bool. Indicates if the kerning data from this source
needs to be muted (i.e. not be part of the calculations).
MutatorMath only.
"""
self.muteInfo = muteInfo
"""bool. Indicated if the interpolating font.info data for
this source needs to be muted.
MutatorMath only.
"""
self.mutedGlyphNames = mutedGlyphNames or []
"""list. Glyphnames that need to be muted in the
instances.
MutatorMath only.
"""
@property
def location(self):
"""dict. Axis values for this source, in design space coordinates.
MutatorMath + Varlib.
.. deprecated:: 5.0
Use the more explicit alias for this property :attr:`designLocation`.
"""
return self.designLocation
@location.setter
def location(self, location: Optional[AnisotropicLocationDict]):
self.designLocation = location or {}
def setFamilyName(self, familyName, languageCode="en"):
"""Setter for :attr:`localisedFamilyName`
.. versionadded:: 5.0
"""
self.localisedFamilyName[languageCode] = tostr(familyName)
def getFamilyName(self, languageCode="en"):
"""Getter for :attr:`localisedFamilyName`
.. versionadded:: 5.0
"""
return self.localisedFamilyName.get(languageCode)
def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict:
"""Get the complete design location of this source, from its
:attr:`designLocation` and the document's axis defaults.
.. versionadded:: 5.0
"""
result: AnisotropicLocationDict = {}
for axis in doc.axes:
if axis.name in self.designLocation:
result[axis.name] = self.designLocation[axis.name]
else:
result[axis.name] = axis.map_forward(axis.default)
return result
class RuleDescriptor(SimpleDescriptor):
"""Represents the rule descriptor element: a set of glyph substitutions to
trigger conditionally in some parts of the designspace.
.. code:: python
r1 = RuleDescriptor()
r1.name = "unique.rule.name"
r1.conditionSets.append([dict(name="weight", minimum=-10, maximum=10), dict(...)])
r1.conditionSets.append([dict(...), dict(...)])
r1.subs.append(("a", "a.alt"))
.. code:: xml
<!-- optional: list of substitution rules -->
<rules>
<rule name="vertical.bars">
<conditionset>
<condition minimum="250.000000" maximum="750.000000" name="weight"/>
<condition minimum="100" name="width"/>
<condition minimum="10" maximum="40" name="optical"/>
</conditionset>
<sub name="cent" with="cent.alt"/>
<sub name="dollar" with="dollar.alt"/>
</rule>
</rules>
"""
_attrs = ['name', 'conditionSets', 'subs'] # what do we need here
def __init__(self, *, name=None, conditionSets=None, subs=None):
self.name = name
"""string. Unique name for this rule. Can be used to reference this rule data."""
# list of lists of dict(name='aaaa', minimum=0, maximum=1000)
self.conditionSets = conditionSets or []
"""a list of conditionsets.
- Each conditionset is a list of conditions.
- Each condition is a dict with ``name``, ``minimum`` and ``maximum`` keys.
"""
# list of substitutions stored as tuples of glyphnames ("a", "a.alt")
self.subs = subs or []
"""list of substitutions.
- Each substitution is stored as tuples of glyphnames, e.g. ("a", "a.alt").
- Note: By default, rules are applied first, before other text
shaping/OpenType layout, as they are part of the
`Required Variation Alternates OpenType feature <https://docs.microsoft.com/en-us/typography/opentype/spec/features_pt#-tag-rvrn>`_.
See ref:`rules-element` § Attributes.
"""
def evaluateRule(rule, location):
"""Return True if any of the rule's conditionsets matches the given location."""
return any(evaluateConditions(c, location) for c in rule.conditionSets)
def evaluateConditions(conditions, location):
"""Return True if all the conditions matches the given location.
- If a condition has no minimum, check for < maximum.
- If a condition has no maximum, check for > minimum.
"""
for cd in conditions:
value = location[cd['name']]
if cd.get('minimum') is None:
if value > cd['maximum']:
return False
elif cd.get('maximum') is None:
if cd['minimum'] > value:
return False
elif not cd['minimum'] <= value <= cd['maximum']:
return False
return True
def processRules(rules, location, glyphNames):
"""Apply these rules at this location to these glyphnames.
Return a new list of glyphNames with substitutions applied.
- rule order matters
"""
newNames = []
for rule in rules:
if evaluateRule(rule, location):
for name in glyphNames:
swap = False
for a, b in rule.subs:
if name == a:
swap = True
break
if swap:
newNames.append(b)
else:
newNames.append(name)
glyphNames = newNames
newNames = []
return glyphNames
AnisotropicLocationDict = Dict[str, Union[float, Tuple[float, float]]]
SimpleLocationDict = Dict[str, float]
class InstanceDescriptor(SimpleDescriptor):
"""Simple container for data related to the instance
.. code:: python
i2 = InstanceDescriptor()
i2.path = instancePath2
i2.familyName = "InstanceFamilyName"
i2.styleName = "InstanceStyleName"
i2.name = "instance.ufo2"
# anisotropic location
i2.designLocation = dict(weight=500, width=(400,300))
i2.postScriptFontName = "InstancePostscriptName"
i2.styleMapFamilyName = "InstanceStyleMapFamilyName"
i2.styleMapStyleName = "InstanceStyleMapStyleName"
i2.lib['com.coolDesignspaceApp.specimenText'] = 'Hamburgerwhatever'
doc.addInstance(i2)
"""
flavor = "instance"
_defaultLanguageCode = "en"
_attrs = ['filename',
'path',
'name',
'locationLabel',
'designLocation',
'userLocation',
'familyName',
'styleName',
'postScriptFontName',
'styleMapFamilyName',
'styleMapStyleName',
'localisedFamilyName',
'localisedStyleName',
'localisedStyleMapFamilyName',
'localisedStyleMapStyleName',
'glyphs',
'kerning',
'info',
'lib']
filename = posixpath_property("_filename")
path = posixpath_property("_path")
def __init__(
self,
*,
filename=None,
path=None,
font=None,
name=None,
location=None,
locationLabel=None,
designLocation=None,
userLocation=None,
familyName=None,
styleName=None,
postScriptFontName=None,
styleMapFamilyName=None,
styleMapStyleName=None,
localisedFamilyName=None,
localisedStyleName=None,
localisedStyleMapFamilyName=None,
localisedStyleMapStyleName=None,
glyphs=None,
kerning=True,
info=True,
lib=None,
):
self.filename = filename
"""string. Relative path to the instance file, **as it is
in the document**. The file may or may not exist.
MutatorMath + VarLib.
"""
self.path = path
"""string. Absolute path to the instance file, calculated from
the document path and the string in the filename attr. The file may
or may not exist.
MutatorMath.
"""
self.font = font
"""Same as :attr:`SourceDescriptor.font`
.. seealso:: :attr:`SourceDescriptor.font`
"""
self.name = name
"""string. Unique identifier name of the instance, used to
identify it if it needs to be referenced from elsewhere in the
document.
"""
self.locationLabel = locationLabel
"""Name of a :class:`LocationLabelDescriptor`. If
provided, the instance should have the same location as the
LocationLabel.
.. seealso::
:meth:`getFullDesignLocation`
:meth:`getFullUserLocation`
.. versionadded:: 5.0
"""
self.designLocation: AnisotropicLocationDict = designLocation if designLocation is not None else (location or {})
"""dict. Axis values for this instance, in design space coordinates.
MutatorMath + Varlib.
.. seealso:: This may be only part of the full location. See:
:meth:`getFullDesignLocation`
:meth:`getFullUserLocation`
.. versionadded:: 5.0
"""
self.userLocation: SimpleLocationDict = userLocation or {}
"""dict. Axis values for this instance, in user space coordinates.
MutatorMath + Varlib.
.. seealso:: This may be only part of the full location. See:
:meth:`getFullDesignLocation`
:meth:`getFullUserLocation`
.. versionadded:: 5.0
"""
self.familyName = familyName
"""string. Family name of this instance.
MutatorMath + Varlib.
"""
self.styleName = styleName
"""string. Style name of this instance.
MutatorMath + Varlib.
"""
self.postScriptFontName = postScriptFontName
"""string. Postscript fontname for this instance.
MutatorMath + Varlib.
"""
self.styleMapFamilyName = styleMapFamilyName
"""string. StyleMap familyname for this instance.
MutatorMath + Varlib.
"""
self.styleMapStyleName = styleMapStyleName
"""string. StyleMap stylename for this instance.
MutatorMath + Varlib.
"""
self.localisedFamilyName = localisedFamilyName or {}
"""dict. A dictionary of localised family name
strings, keyed by language code.
"""
self.localisedStyleName = localisedStyleName or {}
"""dict. A dictionary of localised stylename
strings, keyed by language code.
"""
self.localisedStyleMapFamilyName = localisedStyleMapFamilyName or {}
"""A dictionary of localised style map
familyname strings, keyed by language code.
"""
self.localisedStyleMapStyleName = localisedStyleMapStyleName or {}
"""A dictionary of localised style map
stylename strings, keyed by language code.
"""
self.glyphs = glyphs or {}
"""dict for special master definitions for glyphs. If glyphs
need special masters (to record the results of executed rules for
example).
MutatorMath.
.. deprecated:: 5.0
Use rules or sparse sources instead.
"""
self.kerning = kerning
""" bool. Indicates if this instance needs its kerning
calculated.
MutatorMath.
.. deprecated:: 5.0
"""
self.info = info
"""bool. Indicated if this instance needs the interpolating
font.info calculated.
.. deprecated:: 5.0
"""
self.lib = lib or {}
"""Custom data associated with this instance."""
@property
def location(self):
"""dict. Axis values for this instance.
MutatorMath + Varlib.
.. deprecated:: 5.0
Use the more explicit alias for this property :attr:`designLocation`.
"""
return self.designLocation
@location.setter
def location(self, location: Optional[AnisotropicLocationDict]):
self.designLocation = location or {}
def setStyleName(self, styleName, languageCode="en"):
"""These methods give easier access to the localised names."""
self.localisedStyleName[languageCode] = tostr(styleName)
def getStyleName(self, languageCode="en"):
return self.localisedStyleName.get(languageCode)
def setFamilyName(self, familyName, languageCode="en"):
self.localisedFamilyName[languageCode] = tostr(familyName)
def getFamilyName(self, languageCode="en"):
return self.localisedFamilyName.get(languageCode)
def setStyleMapStyleName(self, styleMapStyleName, languageCode="en"):
self.localisedStyleMapStyleName[languageCode] = tostr(styleMapStyleName)
def getStyleMapStyleName(self, languageCode="en"):
return self.localisedStyleMapStyleName.get(languageCode)
def setStyleMapFamilyName(self, styleMapFamilyName, languageCode="en"):
self.localisedStyleMapFamilyName[languageCode] = tostr(styleMapFamilyName)
def getStyleMapFamilyName(self, languageCode="en"):
return self.localisedStyleMapFamilyName.get(languageCode)
def clearLocation(self, axisName: Optional[str] = None):
"""Clear all location-related fields. Ensures that
:attr:``designLocation`` and :attr:``userLocation`` are dictionaries
(possibly empty if clearing everything).
In order to update the location of this instance wholesale, a user
should first clear all the fields, then change the field(s) for which
they have data.
.. code:: python
instance.clearLocation()
instance.designLocation = {'Weight': (34, 36.5), 'Width': 100}
instance.userLocation = {'Opsz': 16}
In order to update a single axis location, the user should only clear
that axis, then edit the values:
.. code:: python
instance.clearLocation('Weight')
instance.designLocation['Weight'] = (34, 36.5)
Args:
axisName: if provided, only clear the location for that axis.
.. versionadded:: 5.0
"""
self.locationLabel = None
if axisName is None:
self.designLocation = {}
self.userLocation = {}
else:
if self.designLocation is None:
self.designLocation = {}
if axisName in self.designLocation:
del self.designLocation[axisName]
if self.userLocation is None:
self.userLocation = {}
if axisName in self.userLocation:
del self.userLocation[axisName]
def getLocationLabelDescriptor(self, doc: 'DesignSpaceDocument') -> Optional[LocationLabelDescriptor]:
"""Get the :class:`LocationLabelDescriptor` instance that matches
this instances's :attr:`locationLabel`.
Raises if the named label can't be found.
.. versionadded:: 5.0
"""
if self.locationLabel is None:
return None
label = doc.getLocationLabel(self.locationLabel)
if label is None:
raise DesignSpaceDocumentError(
'InstanceDescriptor.getLocationLabelDescriptor(): '
f'unknown location label `{self.locationLabel}` in instance `{self.name}`.'
)
return label
def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict:
"""Get the complete design location of this instance, by combining data
from the various location fields, default axis values and mappings, and
top-level location labels.
The source of truth for this instance's location is determined for each
axis independently by taking the first not-None field in this list:
- ``locationLabel``: the location along this axis is the same as the
matching STAT format 4 label. No anisotropy.
- ``designLocation[axisName]``: the explicit design location along this
axis, possibly anisotropic.
- ``userLocation[axisName]``: the explicit user location along this
axis. No anisotropy.
- ``axis.default``: default axis value. No anisotropy.
.. versionadded:: 5.0
"""
label = self.getLocationLabelDescriptor(doc)
if label is not None:
return doc.map_forward(label.userLocation) # type: ignore
result: AnisotropicLocationDict = {}
for axis in doc.axes:
if axis.name in self.designLocation:
result[axis.name] = self.designLocation[axis.name]
elif axis.name in self.userLocation:
result[axis.name] = axis.map_forward(self.userLocation[axis.name])
else:
result[axis.name] = axis.map_forward(axis.default)
return result
def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict:
"""Get the complete user location for this instance.
.. seealso:: :meth:`getFullDesignLocation`
.. versionadded:: 5.0
"""
return doc.map_backward(self.getFullDesignLocation(doc))
def tagForAxisName(name):
# try to find or make a tag name for this axis name
names = {
'weight': ('wght', dict(en = 'Weight')),
'width': ('wdth', dict(en = 'Width')),
'optical': ('opsz', dict(en = 'Optical Size')),
'slant': ('slnt', dict(en = 'Slant')),
'italic': ('ital', dict(en = 'Italic')),
}
if name.lower() in names:
return names[name.lower()]
if len(name) < 4:
tag = name + "*" * (4 - len(name))
else:
tag = name[:4]
return tag, dict(en=name)
class AbstractAxisDescriptor(SimpleDescriptor):
flavor = "axis"
def __init__(
self,
*,
tag=None,
name=None,
labelNames=None,
hidden=False,
map=None,
axisOrdering=None,
axisLabels=None,
):
# opentype tag for this axis
self.tag = tag
"""string. Four letter tag for this axis. Some might be
registered at the `OpenType
specification <https://www.microsoft.com/typography/otspec/fvar.htm#VAT>`__.
Privately-defined axis tags must begin with an uppercase letter and
use only uppercase letters or digits.
"""
# name of the axis used in locations
self.name = name
"""string. Name of the axis as it is used in the location dicts.
MutatorMath + Varlib.
"""
# names for UI purposes, if this is not a standard axis,
self.labelNames = labelNames or {}
"""dict. When defining a non-registered axis, it will be
necessary to define user-facing readable names for the axis. Keyed by
xml:lang code. Values are required to be ``unicode`` strings, even if
they only contain ASCII characters.
"""
self.hidden = hidden
"""bool. Whether this axis should be hidden in user interfaces.
"""
self.map = map or []
"""list of input / output values that can describe a warp of user space
to design space coordinates. If no map values are present, it is assumed
user space is the same as design space, as in [(minimum, minimum),
(maximum, maximum)].
Varlib.
"""
self.axisOrdering = axisOrdering
"""STAT table field ``axisOrdering``.
See: `OTSpec STAT Axis Record <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-records>`_
.. versionadded:: 5.0
"""
self.axisLabels: List[AxisLabelDescriptor] = axisLabels or []
"""STAT table entries for Axis Value Tables format 1, 2, 3.
See: `OTSpec STAT Axis Value Tables <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-tables>`_
.. versionadded:: 5.0
"""
class AxisDescriptor(AbstractAxisDescriptor):
""" Simple container for the axis data.
Add more localisations?
.. code:: python
a1 = AxisDescriptor()
a1.minimum = 1
a1.maximum = 1000
a1.default = 400
a1.name = "weight"
a1.tag = "wght"
a1.labelNames['fa-IR'] = "قطر"
a1.labelNames['en'] = "Wéíght"
a1.map = [(1.0, 10.0), (400.0, 66.0), (1000.0, 990.0)]
a1.axisOrdering = 1
a1.axisLabels = [
AxisLabelDescriptor(name="Regular", userValue=400, elidable=True)
]
doc.addAxis(a1)
"""
_attrs = ['tag', 'name', 'maximum', 'minimum', 'default', 'map', 'axisOrdering', 'axisLabels']
def __init__(
self,
*,
tag=None,
name=None,
labelNames=None,
minimum=None,
default=None,
maximum=None,
hidden=False,
map=None,
axisOrdering=None,
axisLabels=None,
):
super().__init__(
tag=tag,
name=name,
labelNames=labelNames,
hidden=hidden,
map=map,
axisOrdering=axisOrdering,
axisLabels=axisLabels,
)
self.minimum = minimum
"""number. The minimum value for this axis in user space.
MutatorMath + Varlib.
"""
self.maximum = maximum
"""number. The maximum value for this axis in user space.
MutatorMath + Varlib.
"""
self.default = default
"""number. The default value for this axis, i.e. when a new location is
created, this is the value this axis will get in user space.
MutatorMath + Varlib.
"""
def serialize(self):
# output to a dict, used in testing
return dict(
tag=self.tag,
name=self.name,
labelNames=self.labelNames,
maximum=self.maximum,
minimum=self.minimum,
default=self.default,
hidden=self.hidden,
map=self.map,
axisOrdering=self.axisOrdering,
axisLabels=self.axisLabels,
)
def map_forward(self, v):
"""Maps value from axis mapping's input (user) to output (design)."""
from fontTools.varLib.models import piecewiseLinearMap
if not self.map:
return v
return piecewiseLinearMap(v, {k: v for k, v in self.map})
def map_backward(self, v):
"""Maps value from axis mapping's output (design) to input (user)."""
from fontTools.varLib.models import piecewiseLinearMap
if isinstance(v, tuple):
v = v[0]
if not self.map:
return v
return piecewiseLinearMap(v, {v: k for k, v in self.map})
class DiscreteAxisDescriptor(AbstractAxisDescriptor):
"""Container for discrete axis data.
Use this for axes that do not interpolate. The main difference from a
continuous axis is that a continuous axis has a ``minimum`` and ``maximum``,
while a discrete axis has a list of ``values``.
Example: an Italic axis with 2 stops, Roman and Italic, that are not
compatible. The axis still allows to bind together the full font family,
which is useful for the STAT table, however it can't become a variation
axis in a VF.
.. code:: python
a2 = DiscreteAxisDescriptor()
a2.values = [0, 1]
a2.name = "Italic"
a2.tag = "ITAL"
a2.labelNames['fr'] = "Italique"
a2.map = [(0, 0), (1, -11)]
a2.axisOrdering = 2
a2.axisLabels = [
AxisLabelDescriptor(name="Roman", userValue=0, elidable=True)
]
doc.addAxis(a2)
.. versionadded:: 5.0
"""
flavor = "axis"
_attrs = ('tag', 'name', 'values', 'default', 'map', 'axisOrdering', 'axisLabels')
def __init__(
self,
*,
tag=None,
name=None,
labelNames=None,
values=None,
default=None,
hidden=False,
map=None,
axisOrdering=None,
axisLabels=None,
):
super().__init__(
tag=tag,
name=name,
labelNames=labelNames,
hidden=hidden,
map=map,
axisOrdering=axisOrdering,
axisLabels=axisLabels,
)
self.default: float = default
"""The default value for this axis, i.e. when a new location is
created, this is the value this axis will get in user space.
However, this default value is less important than in continuous axes:
- it doesn't define the "neutral" version of outlines from which
deltas would apply, as this axis does not interpolate.
- it doesn't provide the reference glyph set for the designspace, as
fonts at each value can have different glyph sets.
"""
self.values: List[float] = values or []
"""List of possible values for this axis. Contrary to continuous axes,
only the values in this list can be taken by the axis, nothing in-between.
"""
def map_forward(self, value):
"""Maps value from axis mapping's input to output.
Returns value unchanged if no mapping entry is found.
Note: for discrete axes, each value must have its mapping entry, if
you intend that value to be mapped.
"""
return next((v for k, v in self.map if k == value), value)
def map_backward(self, value):
"""Maps value from axis mapping's output to input.
Returns value unchanged if no mapping entry is found.
Note: for discrete axes, each value must have its mapping entry, if
you intend that value to be mapped.
"""
if isinstance(value, tuple):
value = value[0]
return next((k for k, v in self.map if v == value), value)
class AxisLabelDescriptor(SimpleDescriptor):
"""Container for axis label data.
Analogue of OpenType's STAT data for a single axis (formats 1, 2 and 3).
All values are user values.
See: `OTSpec STAT Axis value table, format 1, 2, 3 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-1>`_
The STAT format of the Axis value depends on which field are filled-in,
see :meth:`getFormat`
.. versionadded:: 5.0
"""
flavor = "label"
_attrs = ('userMinimum', 'userValue', 'userMaximum', 'name', 'elidable', 'olderSibling', 'linkedUserValue', 'labelNames')
def __init__(
self,
*,
name,
userValue,
userMinimum=None,
userMaximum=None,
elidable=False,
olderSibling=False,
linkedUserValue=None,
labelNames=None,
):
self.userMinimum: Optional[float] = userMinimum
"""STAT field ``rangeMinValue`` (format 2)."""
self.userValue: float = userValue
"""STAT field ``value`` (format 1, 3) or ``nominalValue`` (format 2)."""
self.userMaximum: Optional[float] = userMaximum
"""STAT field ``rangeMaxValue`` (format 2)."""
self.name: str = name
"""Label for this axis location, STAT field ``valueNameID``."""
self.elidable: bool = elidable
"""STAT flag ``ELIDABLE_AXIS_VALUE_NAME``.
See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_
"""
self.olderSibling: bool = olderSibling
"""STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``.
See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_
"""
self.linkedUserValue: Optional[float] = linkedUserValue
"""STAT field ``linkedValue`` (format 3)."""
self.labelNames: MutableMapping[str, str] = labelNames or {}
"""User-facing translations of this location's label. Keyed by
``xml:lang`` code.
"""
def getFormat(self) -> int:
"""Determine which format of STAT Axis value to use to encode this label.
=========== ========= =========== =========== ===============
STAT Format userValue userMinimum userMaximum linkedUserValue
=========== ========= =========== =========== ===============
1 ✅ ❌ ❌ ❌
2 ✅ ✅ ✅ ❌
3 ✅ ❌ ❌ ✅
=========== ========= =========== =========== ===============
"""
if self.linkedUserValue is not None:
return 3
if self.userMinimum is not None or self.userMaximum is not None:
return 2
return 1
@property
def defaultName(self) -> str:
"""Return the English name from :attr:`labelNames` or the :attr:`name`."""
return self.labelNames.get("en") or self.name
class LocationLabelDescriptor(SimpleDescriptor):
"""Container for location label data.
Analogue of OpenType's STAT data for a free-floating location (format 4).
All values are user values.
See: `OTSpec STAT Axis value table, format 4 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-4>`_
.. versionadded:: 5.0
"""
flavor = "label"
_attrs = ('name', 'elidable', 'olderSibling', 'userLocation', 'labelNames')
def __init__(
self,
*,
name,
userLocation,
elidable=False,
olderSibling=False,
labelNames=None,
):
self.name: str = name
"""Label for this named location, STAT field ``valueNameID``."""
self.userLocation: SimpleLocationDict = userLocation or {}
"""Location in user coordinates along each axis.
If an axis is not mentioned, it is assumed to be at its default location.
.. seealso:: This may be only part of the full location. See:
:meth:`getFullUserLocation`
"""
self.elidable: bool = elidable
"""STAT flag ``ELIDABLE_AXIS_VALUE_NAME``.
See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_
"""
self.olderSibling: bool = olderSibling
"""STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``.
See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_
"""
self.labelNames: Dict[str, str] = labelNames or {}
"""User-facing translations of this location's label. Keyed by
xml:lang code.
"""
@property
def defaultName(self) -> str:
"""Return the English name from :attr:`labelNames` or the :attr:`name`."""
return self.labelNames.get("en") or self.name
def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict:
"""Get the complete user location of this label, by combining data
from the explicit user location and default axis values.
.. versionadded:: 5.0
"""
return {
axis.name: self.userLocation.get(axis.name, axis.default)
for axis in doc.axes
}
class VariableFontDescriptor(SimpleDescriptor):
"""Container for variable fonts, sub-spaces of the Designspace.
Use-cases:
- From a single DesignSpace with discrete axes, define 1 variable font
per value on the discrete axes. Before version 5, you would have needed
1 DesignSpace per such variable font, and a lot of data duplication.
- From a big variable font with many axes, define subsets of that variable
font that only include some axes and freeze other axes at a given location.
.. versionadded:: 5.0
"""
flavor = "variable-font"
_attrs = ('filename', 'axisSubsets', 'lib')
filename = posixpath_property("_filename")
def __init__(self, *, name, filename=None, axisSubsets=None, lib=None):
self.name: str = name
"""string, required. Name of this variable to identify it during the
build process and from other parts of the document, and also as a
filename in case the filename property is empty.
VarLib.
"""
self.filename: str = filename
"""string, optional. Relative path to the variable font file, **as it is
in the document**. The file may or may not exist.
If not specified, the :attr:`name` will be used as a basename for the file.
"""
self.axisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = axisSubsets or []
"""Axis subsets to include in this variable font.
If an axis is not mentioned, assume that we only want the default
location of that axis (same as a :class:`ValueAxisSubsetDescriptor`).
"""
self.lib: MutableMapping[str, Any] = lib or {}
"""Custom data associated with this variable font."""
class RangeAxisSubsetDescriptor(SimpleDescriptor):
"""Subset of a continuous axis to include in a variable font.
.. versionadded:: 5.0
"""
flavor = "axis-subset"
_attrs = ('name', 'userMinimum', 'userDefault', 'userMaximum')
def __init__(self, *, name, userMinimum=-math.inf, userDefault=None, userMaximum=math.inf):
self.name: str = name
"""Name of the :class:`AxisDescriptor` to subset."""
self.userMinimum: float = userMinimum
"""New minimum value of the axis in the target variable font.
If not specified, assume the same minimum value as the full axis.
(default = ``-math.inf``)
"""
self.userDefault: Optional[float] = userDefault
"""New default value of the axis in the target variable font.
If not specified, assume the same default value as the full axis.
(default = ``None``)
"""
self.userMaximum: float = userMaximum
"""New maximum value of the axis in the target variable font.
If not specified, assume the same maximum value as the full axis.
(default = ``math.inf``)
"""
class ValueAxisSubsetDescriptor(SimpleDescriptor):
"""Single value of a discrete or continuous axis to use in a variable font.
.. versionadded:: 5.0
"""
flavor = "axis-subset"
_attrs = ('name', 'userValue')
def __init__(self, *, name, userValue):
self.name: str = name
"""Name of the :class:`AxisDescriptor` or :class:`DiscreteAxisDescriptor`
to "snapshot" or "freeze".
"""
self.userValue: float = userValue
"""Value in user coordinates at which to freeze the given axis."""
class BaseDocWriter(object):
_whiteSpace = " "
axisDescriptorClass = AxisDescriptor
discreteAxisDescriptorClass = DiscreteAxisDescriptor
axisLabelDescriptorClass = AxisLabelDescriptor
locationLabelDescriptorClass = LocationLabelDescriptor
ruleDescriptorClass = RuleDescriptor
sourceDescriptorClass = SourceDescriptor
variableFontDescriptorClass = VariableFontDescriptor
valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor
rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor
instanceDescriptorClass = InstanceDescriptor
@classmethod
def getAxisDecriptor(cls):
return cls.axisDescriptorClass()
@classmethod
def getSourceDescriptor(cls):
return cls.sourceDescriptorClass()
@classmethod
def getInstanceDescriptor(cls):
return cls.instanceDescriptorClass()
@classmethod
def getRuleDescriptor(cls):
return cls.ruleDescriptorClass()
def __init__(self, documentPath, documentObject: DesignSpaceDocument):
self.path = documentPath
self.documentObject = documentObject
self.effectiveFormatTuple = self._getEffectiveFormatTuple()
self.root = ET.Element("designspace")
def write(self, pretty=True, encoding="UTF-8", xml_declaration=True):
self.root.attrib['format'] = ".".join(str(i) for i in self.effectiveFormatTuple)
if self.documentObject.axes or self.documentObject.elidedFallbackName is not None:
axesElement = ET.Element("axes")
if self.documentObject.elidedFallbackName is not None:
axesElement.attrib['elidedfallbackname'] = self.documentObject.elidedFallbackName
self.root.append(axesElement)
for axisObject in self.documentObject.axes:
self._addAxis(axisObject)
if self.documentObject.locationLabels:
labelsElement = ET.Element("labels")
for labelObject in self.documentObject.locationLabels:
self._addLocationLabel(labelsElement, labelObject)
self.root.append(labelsElement)
if self.documentObject.rules:
if getattr(self.documentObject, "rulesProcessingLast", False):
attributes = {"processing": "last"}
else:
attributes = {}
self.root.append(ET.Element("rules", attributes))
for ruleObject in self.documentObject.rules:
self._addRule(ruleObject)
if self.documentObject.sources:
self.root.append(ET.Element("sources"))
for sourceObject in self.documentObject.sources:
self._addSource(sourceObject)
if self.documentObject.variableFonts:
variableFontsElement = ET.Element("variable-fonts")
for variableFont in self.documentObject.variableFonts:
self._addVariableFont(variableFontsElement, variableFont)
self.root.append(variableFontsElement)
if self.documentObject.instances:
self.root.append(ET.Element("instances"))
for instanceObject in self.documentObject.instances:
self._addInstance(instanceObject)
if self.documentObject.lib:
self._addLib(self.root, self.documentObject.lib, 2)
tree = ET.ElementTree(self.root)
tree.write(
self.path,
encoding=encoding,
method='xml',
xml_declaration=xml_declaration,
pretty_print=pretty,
)
def _getEffectiveFormatTuple(self):
"""Try to use the version specified in the document, or a sufficiently
recent version to be able to encode what the document contains.
"""
minVersion = self.documentObject.formatTuple
if (
any(
isinstance(axis, DiscreteAxisDescriptor) or
axis.axisOrdering is not None or
axis.axisLabels
for axis in self.documentObject.axes
) or
self.documentObject.locationLabels or
any(
source.localisedFamilyName
for source in self.documentObject.sources
) or
self.documentObject.variableFonts or
any(
instance.locationLabel or
instance.userLocation
for instance in self.documentObject.instances
)
):
if minVersion < (5, 0):
minVersion = (5, 0)
return minVersion
def _makeLocationElement(self, locationObject, name=None):
""" Convert Location dict to a locationElement."""
locElement = ET.Element("location")
if name is not None:
locElement.attrib['name'] = name
validatedLocation = self.documentObject.newDefaultLocation()
for axisName, axisValue in locationObject.items():
if axisName in validatedLocation:
# only accept values we know
validatedLocation[axisName] = axisValue
for dimensionName, dimensionValue in validatedLocation.items():
dimElement = ET.Element('dimension')
dimElement.attrib['name'] = dimensionName
if type(dimensionValue) == tuple:
dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue[0])
dimElement.attrib['yvalue'] = self.intOrFloat(dimensionValue[1])
else:
dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue)
locElement.append(dimElement)
return locElement, validatedLocation
def intOrFloat(self, num):
if int(num) == num:
return "%d" % num
return ("%f" % num).rstrip('0').rstrip('.')
def _addRule(self, ruleObject):
# if none of the conditions have minimum or maximum values, do not add the rule.
ruleElement = ET.Element('rule')
if ruleObject.name is not None:
ruleElement.attrib['name'] = ruleObject.name
for conditions in ruleObject.conditionSets:
conditionsetElement = ET.Element('conditionset')
for cond in conditions:
if cond.get('minimum') is None and cond.get('maximum') is None:
# neither is defined, don't add this condition
continue
conditionElement = ET.Element('condition')
conditionElement.attrib['name'] = cond.get('name')
if cond.get('minimum') is not None:
conditionElement.attrib['minimum'] = self.intOrFloat(cond.get('minimum'))
if cond.get('maximum') is not None:
conditionElement.attrib['maximum'] = self.intOrFloat(cond.get('maximum'))
conditionsetElement.append(conditionElement)
if len(conditionsetElement):
ruleElement.append(conditionsetElement)
for sub in ruleObject.subs:
subElement = ET.Element('sub')
subElement.attrib['name'] = sub[0]
subElement.attrib['with'] = sub[1]
ruleElement.append(subElement)
if len(ruleElement):
self.root.findall('.rules')[0].append(ruleElement)
def _addAxis(self, axisObject):
axisElement = ET.Element('axis')
axisElement.attrib['tag'] = axisObject.tag
axisElement.attrib['name'] = axisObject.name
self._addLabelNames(axisElement, axisObject.labelNames)
if axisObject.map:
for inputValue, outputValue in axisObject.map:
mapElement = ET.Element('map')
mapElement.attrib['input'] = self.intOrFloat(inputValue)
mapElement.attrib['output'] = self.intOrFloat(outputValue)
axisElement.append(mapElement)
if axisObject.axisOrdering or axisObject.axisLabels:
labelsElement = ET.Element('labels')
if axisObject.axisOrdering is not None:
labelsElement.attrib['ordering'] = str(axisObject.axisOrdering)
for label in axisObject.axisLabels:
self._addAxisLabel(labelsElement, label)
axisElement.append(labelsElement)
if isinstance(axisObject, AxisDescriptor):
axisElement.attrib['minimum'] = self.intOrFloat(axisObject.minimum)
axisElement.attrib['maximum'] = self.intOrFloat(axisObject.maximum)
elif isinstance(axisObject, DiscreteAxisDescriptor):
axisElement.attrib['values'] = " ".join(self.intOrFloat(v) for v in axisObject.values)
axisElement.attrib['default'] = self.intOrFloat(axisObject.default)
if axisObject.hidden:
axisElement.attrib['hidden'] = "1"
self.root.findall('.axes')[0].append(axisElement)
def _addAxisLabel(self, axisElement: ET.Element, label: AxisLabelDescriptor) -> None:
labelElement = ET.Element('label')
labelElement.attrib['uservalue'] = self.intOrFloat(label.userValue)
if label.userMinimum is not None:
labelElement.attrib['userminimum'] = self.intOrFloat(label.userMinimum)
if label.userMaximum is not None:
labelElement.attrib['usermaximum'] = self.intOrFloat(label.userMaximum)
labelElement.attrib['name'] = label.name
if label.elidable:
labelElement.attrib['elidable'] = "true"
if label.olderSibling:
labelElement.attrib['oldersibling'] = "true"
if label.linkedUserValue is not None:
labelElement.attrib['linkeduservalue'] = self.intOrFloat(label.linkedUserValue)
self._addLabelNames(labelElement, label.labelNames)
axisElement.append(labelElement)
def _addLabelNames(self, parentElement, labelNames):
for languageCode, labelName in sorted(labelNames.items()):
languageElement = ET.Element('labelname')
languageElement.attrib[XML_LANG] = languageCode
languageElement.text = labelName
parentElement.append(languageElement)
def _addLocationLabel(self, parentElement: ET.Element, label: LocationLabelDescriptor) -> None:
labelElement = ET.Element('label')
labelElement.attrib['name'] = label.name
if label.elidable:
labelElement.attrib['elidable'] = "true"
if label.olderSibling:
labelElement.attrib['oldersibling'] = "true"
self._addLabelNames(labelElement, label.labelNames)
self._addLocationElement(labelElement, userLocation=label.userLocation)
parentElement.append(labelElement)
def _addLocationElement(
self,
parentElement,
*,
designLocation: AnisotropicLocationDict = None,
userLocation: SimpleLocationDict = None
):
locElement = ET.Element("location")
for axis in self.documentObject.axes:
if designLocation is not None and axis.name in designLocation:
dimElement = ET.Element('dimension')
dimElement.attrib['name'] = axis.name
value = designLocation[axis.name]
if isinstance(value, tuple):
dimElement.attrib['xvalue'] = self.intOrFloat(value[0])
dimElement.attrib['yvalue'] = self.intOrFloat(value[1])
else:
dimElement.attrib['xvalue'] = self.intOrFloat(value)
locElement.append(dimElement)
elif userLocation is not None and axis.name in userLocation:
dimElement = ET.Element('dimension')
dimElement.attrib['name'] = axis.name
value = userLocation[axis.name]
dimElement.attrib['uservalue'] = self.intOrFloat(value)
locElement.append(dimElement)
if len(locElement) > 0:
parentElement.append(locElement)
def _addInstance(self, instanceObject):
instanceElement = ET.Element('instance')
if instanceObject.name is not None:
instanceElement.attrib['name'] = instanceObject.name
if instanceObject.locationLabel is not None:
instanceElement.attrib['location'] = instanceObject.locationLabel
if instanceObject.familyName is not None:
instanceElement.attrib['familyname'] = instanceObject.familyName
if instanceObject.styleName is not None:
instanceElement.attrib['stylename'] = instanceObject.styleName
# add localisations
if instanceObject.localisedStyleName:
languageCodes = list(instanceObject.localisedStyleName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
localisedStyleNameElement = ET.Element('stylename')
localisedStyleNameElement.attrib[XML_LANG] = code
localisedStyleNameElement.text = instanceObject.getStyleName(code)
instanceElement.append(localisedStyleNameElement)
if instanceObject.localisedFamilyName:
languageCodes = list(instanceObject.localisedFamilyName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
localisedFamilyNameElement = ET.Element('familyname')
localisedFamilyNameElement.attrib[XML_LANG] = code
localisedFamilyNameElement.text = instanceObject.getFamilyName(code)
instanceElement.append(localisedFamilyNameElement)
if instanceObject.localisedStyleMapStyleName:
languageCodes = list(instanceObject.localisedStyleMapStyleName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue
localisedStyleMapStyleNameElement = ET.Element('stylemapstylename')
localisedStyleMapStyleNameElement.attrib[XML_LANG] = code
localisedStyleMapStyleNameElement.text = instanceObject.getStyleMapStyleName(code)
instanceElement.append(localisedStyleMapStyleNameElement)
if instanceObject.localisedStyleMapFamilyName:
languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue
localisedStyleMapFamilyNameElement = ET.Element('stylemapfamilyname')
localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code
localisedStyleMapFamilyNameElement.text = instanceObject.getStyleMapFamilyName(code)
instanceElement.append(localisedStyleMapFamilyNameElement)
if self.effectiveFormatTuple >= (5, 0):
if instanceObject.locationLabel is None:
self._addLocationElement(
instanceElement,
designLocation=instanceObject.designLocation,
userLocation=instanceObject.userLocation
)
else:
# Pre-version 5.0 code was validating and filling in the location
# dict while writing it out, as preserved below.
if instanceObject.location is not None:
locationElement, instanceObject.location = self._makeLocationElement(instanceObject.location)
instanceElement.append(locationElement)
if instanceObject.filename is not None:
instanceElement.attrib['filename'] = instanceObject.filename
if instanceObject.postScriptFontName is not None:
instanceElement.attrib['postscriptfontname'] = instanceObject.postScriptFontName
if instanceObject.styleMapFamilyName is not None:
instanceElement.attrib['stylemapfamilyname'] = instanceObject.styleMapFamilyName
if instanceObject.styleMapStyleName is not None:
instanceElement.attrib['stylemapstylename'] = instanceObject.styleMapStyleName
if self.effectiveFormatTuple < (5, 0):
# Deprecated members as of version 5.0
if instanceObject.glyphs:
if instanceElement.findall('.glyphs') == []:
glyphsElement = ET.Element('glyphs')
instanceElement.append(glyphsElement)
glyphsElement = instanceElement.findall('.glyphs')[0]
for glyphName, data in sorted(instanceObject.glyphs.items()):
glyphElement = self._writeGlyphElement(instanceElement, instanceObject, glyphName, data)
glyphsElement.append(glyphElement)
if instanceObject.kerning:
kerningElement = ET.Element('kerning')
instanceElement.append(kerningElement)
if instanceObject.info:
infoElement = ET.Element('info')
instanceElement.append(infoElement)
self._addLib(instanceElement, instanceObject.lib, 4)
self.root.findall('.instances')[0].append(instanceElement)
def _addSource(self, sourceObject):
sourceElement = ET.Element("source")
if sourceObject.filename is not None:
sourceElement.attrib['filename'] = sourceObject.filename
if sourceObject.name is not None:
if sourceObject.name.find("temp_master") != 0:
# do not save temporary source names
sourceElement.attrib['name'] = sourceObject.name
if sourceObject.familyName is not None:
sourceElement.attrib['familyname'] = sourceObject.familyName
if sourceObject.styleName is not None:
sourceElement.attrib['stylename'] = sourceObject.styleName
if sourceObject.layerName is not None:
sourceElement.attrib['layer'] = sourceObject.layerName
if sourceObject.localisedFamilyName:
languageCodes = list(sourceObject.localisedFamilyName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
localisedFamilyNameElement = ET.Element('familyname')
localisedFamilyNameElement.attrib[XML_LANG] = code
localisedFamilyNameElement.text = sourceObject.getFamilyName(code)
sourceElement.append(localisedFamilyNameElement)
if sourceObject.copyLib:
libElement = ET.Element('lib')
libElement.attrib['copy'] = "1"
sourceElement.append(libElement)
if sourceObject.copyGroups:
groupsElement = ET.Element('groups')
groupsElement.attrib['copy'] = "1"
sourceElement.append(groupsElement)
if sourceObject.copyFeatures:
featuresElement = ET.Element('features')
featuresElement.attrib['copy'] = "1"
sourceElement.append(featuresElement)
if sourceObject.copyInfo or sourceObject.muteInfo:
infoElement = ET.Element('info')
if sourceObject.copyInfo:
infoElement.attrib['copy'] = "1"
if sourceObject.muteInfo:
infoElement.attrib['mute'] = "1"
sourceElement.append(infoElement)
if sourceObject.muteKerning:
kerningElement = ET.Element("kerning")
kerningElement.attrib["mute"] = '1'
sourceElement.append(kerningElement)
if sourceObject.mutedGlyphNames:
for name in sourceObject.mutedGlyphNames:
glyphElement = ET.Element("glyph")
glyphElement.attrib["name"] = name
glyphElement.attrib["mute"] = '1'
sourceElement.append(glyphElement)
if self.effectiveFormatTuple >= (5, 0):
self._addLocationElement(sourceElement, designLocation=sourceObject.location)
else:
# Pre-version 5.0 code was validating and filling in the location
# dict while writing it out, as preserved below.
locationElement, sourceObject.location = self._makeLocationElement(sourceObject.location)
sourceElement.append(locationElement)
self.root.findall('.sources')[0].append(sourceElement)
def _addVariableFont(self, parentElement: ET.Element, vf: VariableFontDescriptor) -> None:
vfElement = ET.Element('variable-font')
vfElement.attrib['name'] = vf.name
if vf.filename is not None:
vfElement.attrib['filename'] = vf.filename
if vf.axisSubsets:
subsetsElement = ET.Element('axis-subsets')
for subset in vf.axisSubsets:
subsetElement = ET.Element('axis-subset')
subsetElement.attrib['name'] = subset.name
if isinstance(subset, RangeAxisSubsetDescriptor):
if subset.userMinimum != -math.inf:
subsetElement.attrib['userminimum'] = self.intOrFloat(subset.userMinimum)
if subset.userMaximum != math.inf:
subsetElement.attrib['usermaximum'] = self.intOrFloat(subset.userMaximum)
if subset.userDefault is not None:
subsetElement.attrib['userdefault'] = self.intOrFloat(subset.userDefault)
elif isinstance(subset, ValueAxisSubsetDescriptor):
subsetElement.attrib['uservalue'] = self.intOrFloat(subset.userValue)
subsetsElement.append(subsetElement)
vfElement.append(subsetsElement)
self._addLib(vfElement, vf.lib, 4)
parentElement.append(vfElement)
def _addLib(self, parentElement: ET.Element, data: Any, indent_level: int) -> None:
if not data:
return
libElement = ET.Element('lib')
libElement.append(plistlib.totree(data, indent_level=indent_level))
parentElement.append(libElement)
def _writeGlyphElement(self, instanceElement, instanceObject, glyphName, data):
glyphElement = ET.Element('glyph')
if data.get('mute'):
glyphElement.attrib['mute'] = "1"
if data.get('unicodes') is not None:
glyphElement.attrib['unicode'] = " ".join([hex(u) for u in data.get('unicodes')])
if data.get('instanceLocation') is not None:
locationElement, data['instanceLocation'] = self._makeLocationElement(data.get('instanceLocation'))
glyphElement.append(locationElement)
if glyphName is not None:
glyphElement.attrib['name'] = glyphName
if data.get('note') is not None:
noteElement = ET.Element('note')
noteElement.text = data.get('note')
glyphElement.append(noteElement)
if data.get('masters') is not None:
mastersElement = ET.Element("masters")
for m in data.get('masters'):
masterElement = ET.Element("master")
if m.get('glyphName') is not None:
masterElement.attrib['glyphname'] = m.get('glyphName')
if m.get('font') is not None:
masterElement.attrib['source'] = m.get('font')
if m.get('location') is not None:
locationElement, m['location'] = self._makeLocationElement(m.get('location'))
masterElement.append(locationElement)
mastersElement.append(masterElement)
glyphElement.append(mastersElement)
return glyphElement
class BaseDocReader(LogMixin):
axisDescriptorClass = AxisDescriptor
discreteAxisDescriptorClass = DiscreteAxisDescriptor
axisLabelDescriptorClass = AxisLabelDescriptor
locationLabelDescriptorClass = LocationLabelDescriptor
ruleDescriptorClass = RuleDescriptor
sourceDescriptorClass = SourceDescriptor
variableFontsDescriptorClass = VariableFontDescriptor
valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor
rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor
instanceDescriptorClass = InstanceDescriptor
def __init__(self, documentPath, documentObject):
self.path = documentPath
self.documentObject = documentObject
tree = ET.parse(self.path)
self.root = tree.getroot()
self.documentObject.formatVersion = self.root.attrib.get("format", "3.0")
self._axes = []
self.rules = []
self.sources = []
self.instances = []
self.axisDefaults = {}
self._strictAxisNames = True
@classmethod
def fromstring(cls, string, documentObject):
f = BytesIO(tobytes(string, encoding="utf-8"))
self = cls(f, documentObject)
self.path = None
return self
def read(self):
self.readAxes()
self.readLabels()
self.readRules()
self.readVariableFonts()
self.readSources()
self.readInstances()
self.readLib()
def readRules(self):
# we also need to read any conditions that are outside of a condition set.
rules = []
rulesElement = self.root.find(".rules")
if rulesElement is not None:
processingValue = rulesElement.attrib.get("processing", "first")
if processingValue not in {"first", "last"}:
raise DesignSpaceDocumentError(
"<rules> processing attribute value is not valid: %r, "
"expected 'first' or 'last'" % processingValue)
self.documentObject.rulesProcessingLast = processingValue == "last"
for ruleElement in self.root.findall(".rules/rule"):
ruleObject = self.ruleDescriptorClass()
ruleName = ruleObject.name = ruleElement.attrib.get("name")
# read any stray conditions outside a condition set
externalConditions = self._readConditionElements(
ruleElement,
ruleName,
)
if externalConditions:
ruleObject.conditionSets.append(externalConditions)
self.log.info(
"Found stray rule conditions outside a conditionset. "
"Wrapped them in a new conditionset."
)
# read the conditionsets
for conditionSetElement in ruleElement.findall('.conditionset'):
conditionSet = self._readConditionElements(
conditionSetElement,
ruleName,
)
if conditionSet is not None:
ruleObject.conditionSets.append(conditionSet)
for subElement in ruleElement.findall('.sub'):
a = subElement.attrib['name']
b = subElement.attrib['with']
ruleObject.subs.append((a, b))
rules.append(ruleObject)
self.documentObject.rules = rules
def _readConditionElements(self, parentElement, ruleName=None):
cds = []
for conditionElement in parentElement.findall('.condition'):
cd = {}
cdMin = conditionElement.attrib.get("minimum")
if cdMin is not None:
cd['minimum'] = float(cdMin)
else:
# will allow these to be None, assume axis.minimum
cd['minimum'] = None
cdMax = conditionElement.attrib.get("maximum")
if cdMax is not None:
cd['maximum'] = float(cdMax)
else:
# will allow these to be None, assume axis.maximum
cd['maximum'] = None
cd['name'] = conditionElement.attrib.get("name")
# # test for things
if cd.get('minimum') is None and cd.get('maximum') is None:
raise DesignSpaceDocumentError(
"condition missing required minimum or maximum in rule" +
(" '%s'" % ruleName if ruleName is not None else ""))
cds.append(cd)
return cds
def readAxes(self):
# read the axes elements, including the warp map.
axesElement = self.root.find(".axes")
if axesElement is not None and 'elidedfallbackname' in axesElement.attrib:
self.documentObject.elidedFallbackName = axesElement.attrib['elidedfallbackname']
axisElements = self.root.findall(".axes/axis")
if not axisElements:
return
for axisElement in axisElements:
if self.documentObject.formatTuple >= (5, 0) and "values" in axisElement.attrib:
axisObject = self.discreteAxisDescriptorClass()
axisObject.values = [float(s) for s in axisElement.attrib["values"].split(" ")]
else:
axisObject = self.axisDescriptorClass()
axisObject.minimum = float(axisElement.attrib.get("minimum"))
axisObject.maximum = float(axisElement.attrib.get("maximum"))
axisObject.default = float(axisElement.attrib.get("default"))
axisObject.name = axisElement.attrib.get("name")
if axisElement.attrib.get('hidden', False):
axisObject.hidden = True
axisObject.tag = axisElement.attrib.get("tag")
for mapElement in axisElement.findall('map'):
a = float(mapElement.attrib['input'])
b = float(mapElement.attrib['output'])
axisObject.map.append((a, b))
for labelNameElement in axisElement.findall('labelname'):
# Note: elementtree reads the "xml:lang" attribute name as
# '{http://www.w3.org/XML/1998/namespace}lang'
for key, lang in labelNameElement.items():
if key == XML_LANG:
axisObject.labelNames[lang] = tostr(labelNameElement.text)
labelElement = axisElement.find(".labels")
if labelElement is not None:
if "ordering" in labelElement.attrib:
axisObject.axisOrdering = int(labelElement.attrib["ordering"])
for label in labelElement.findall(".label"):
axisObject.axisLabels.append(self.readAxisLabel(label))
self.documentObject.axes.append(axisObject)
self.axisDefaults[axisObject.name] = axisObject.default
def readAxisLabel(self, element: ET.Element):
xml_attrs = {'userminimum', 'uservalue', 'usermaximum', 'name', 'elidable', 'oldersibling', 'linkeduservalue'}
unknown_attrs = set(element.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(f"label element contains unknown attributes: {', '.join(unknown_attrs)}")
name = element.get("name")
if name is None:
raise DesignSpaceDocumentError("label element must have a name attribute.")
valueStr = element.get("uservalue")
if valueStr is None:
raise DesignSpaceDocumentError("label element must have a uservalue attribute.")
value = float(valueStr)
minimumStr = element.get("userminimum")
minimum = float(minimumStr) if minimumStr is not None else None
maximumStr = element.get("usermaximum")
maximum = float(maximumStr) if maximumStr is not None else None
linkedValueStr = element.get("linkeduservalue")
linkedValue = float(linkedValueStr) if linkedValueStr is not None else None
elidable = True if element.get("elidable") == "true" else False
olderSibling = True if element.get("oldersibling") == "true" else False
labelNames = {
lang: label_name.text or ""
for label_name in element.findall("labelname")
for attr, lang in label_name.items()
if attr == XML_LANG
# Note: elementtree reads the "xml:lang" attribute name as
# '{http://www.w3.org/XML/1998/namespace}lang'
}
return self.axisLabelDescriptorClass(
name=name,
userValue=value,
userMinimum=minimum,
userMaximum=maximum,
elidable=elidable,
olderSibling=olderSibling,
linkedUserValue=linkedValue,
labelNames=labelNames,
)
def readLabels(self):
if self.documentObject.formatTuple < (5, 0):
return
xml_attrs = {'name', 'elidable', 'oldersibling'}
for labelElement in self.root.findall(".labels/label"):
unknown_attrs = set(labelElement.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(f"Label element contains unknown attributes: {', '.join(unknown_attrs)}")
name = labelElement.get("name")
if name is None:
raise DesignSpaceDocumentError("label element must have a name attribute.")
designLocation, userLocation = self.locationFromElement(labelElement)
if designLocation:
raise DesignSpaceDocumentError(f'<label> element "{name}" must only have user locations (using uservalue="").')
elidable = True if labelElement.get("elidable") == "true" else False
olderSibling = True if labelElement.get("oldersibling") == "true" else False
labelNames = {
lang: label_name.text or ""
for label_name in labelElement.findall("labelname")
for attr, lang in label_name.items()
if attr == XML_LANG
# Note: elementtree reads the "xml:lang" attribute name as
# '{http://www.w3.org/XML/1998/namespace}lang'
}
locationLabel = self.locationLabelDescriptorClass(
name=name,
userLocation=userLocation,
elidable=elidable,
olderSibling=olderSibling,
labelNames=labelNames,
)
self.documentObject.locationLabels.append(locationLabel)
def readVariableFonts(self):
if self.documentObject.formatTuple < (5, 0):
return
xml_attrs = {'name', 'filename'}
for variableFontElement in self.root.findall(".variable-fonts/variable-font"):
unknown_attrs = set(variableFontElement.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(f"variable-font element contains unknown attributes: {', '.join(unknown_attrs)}")
name = variableFontElement.get("name")
if name is None:
raise DesignSpaceDocumentError("variable-font element must have a name attribute.")
filename = variableFontElement.get("filename")
axisSubsetsElement = variableFontElement.find(".axis-subsets")
if axisSubsetsElement is None:
raise DesignSpaceDocumentError("variable-font element must contain an axis-subsets element.")
axisSubsets = []
for axisSubset in axisSubsetsElement.iterfind(".axis-subset"):
axisSubsets.append(self.readAxisSubset(axisSubset))
lib = None
libElement = variableFontElement.find(".lib")
if libElement is not None:
lib = plistlib.fromtree(libElement[0])
variableFont = self.variableFontsDescriptorClass(
name=name,
filename=filename,
axisSubsets=axisSubsets,
lib=lib,
)
self.documentObject.variableFonts.append(variableFont)
def readAxisSubset(self, element: ET.Element):
if "uservalue" in element.attrib:
xml_attrs = {'name', 'uservalue'}
unknown_attrs = set(element.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(f"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}")
name = element.get("name")
if name is None:
raise DesignSpaceDocumentError("axis-subset element must have a name attribute.")
userValueStr = element.get("uservalue")
if userValueStr is None:
raise DesignSpaceDocumentError(
"The axis-subset element for a discrete subset must have a uservalue attribute."
)
userValue = float(userValueStr)
return self.valueAxisSubsetDescriptorClass(name=name, userValue=userValue)
else:
xml_attrs = {'name', 'userminimum', 'userdefault', 'usermaximum'}
unknown_attrs = set(element.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(f"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}")
name = element.get("name")
if name is None:
raise DesignSpaceDocumentError("axis-subset element must have a name attribute.")
userMinimum = element.get("userminimum")
userDefault = element.get("userdefault")
userMaximum = element.get("usermaximum")
if userMinimum is not None and userDefault is not None and userMaximum is not None:
return self.rangeAxisSubsetDescriptorClass(
name=name,
userMinimum=float(userMinimum),
userDefault=float(userDefault),
userMaximum=float(userMaximum),
)
if all(v is None for v in (userMinimum, userDefault, userMaximum)):
return self.rangeAxisSubsetDescriptorClass(name=name)
raise DesignSpaceDocumentError(
"axis-subset element must have min/max/default values or none at all."
)
def readSources(self):
for sourceCount, sourceElement in enumerate(self.root.findall(".sources/source")):
filename = sourceElement.attrib.get('filename')
if filename is not None and self.path is not None:
sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename))
else:
sourcePath = None
sourceName = sourceElement.attrib.get('name')
if sourceName is None:
# add a temporary source name
sourceName = "temp_master.%d" % (sourceCount)
sourceObject = self.sourceDescriptorClass()
sourceObject.path = sourcePath # absolute path to the ufo source
sourceObject.filename = filename # path as it is stored in the document
sourceObject.name = sourceName
familyName = sourceElement.attrib.get("familyname")
if familyName is not None:
sourceObject.familyName = familyName
styleName = sourceElement.attrib.get("stylename")
if styleName is not None:
sourceObject.styleName = styleName
for familyNameElement in sourceElement.findall('familyname'):
for key, lang in familyNameElement.items():
if key == XML_LANG:
familyName = familyNameElement.text
sourceObject.setFamilyName(familyName, lang)
designLocation, userLocation = self.locationFromElement(sourceElement)
if userLocation:
raise DesignSpaceDocumentError(f'<source> element "{sourceName}" must only have design locations (using xvalue="").')
sourceObject.location = designLocation
layerName = sourceElement.attrib.get('layer')
if layerName is not None:
sourceObject.layerName = layerName
for libElement in sourceElement.findall('.lib'):
if libElement.attrib.get('copy') == '1':
sourceObject.copyLib = True
for groupsElement in sourceElement.findall('.groups'):
if groupsElement.attrib.get('copy') == '1':
sourceObject.copyGroups = True
for infoElement in sourceElement.findall(".info"):
if infoElement.attrib.get('copy') == '1':
sourceObject.copyInfo = True
if infoElement.attrib.get('mute') == '1':
sourceObject.muteInfo = True
for featuresElement in sourceElement.findall(".features"):
if featuresElement.attrib.get('copy') == '1':
sourceObject.copyFeatures = True
for glyphElement in sourceElement.findall(".glyph"):
glyphName = glyphElement.attrib.get('name')
if glyphName is None:
continue
if glyphElement.attrib.get('mute') == '1':
sourceObject.mutedGlyphNames.append(glyphName)
for kerningElement in sourceElement.findall(".kerning"):
if kerningElement.attrib.get('mute') == '1':
sourceObject.muteKerning = True
self.documentObject.sources.append(sourceObject)
def locationFromElement(self, element):
"""Read a nested ``<location>`` element inside the given ``element``.
.. versionchanged:: 5.0
Return a tuple of (designLocation, userLocation)
"""
elementLocation = (None, None)
for locationElement in element.findall('.location'):
elementLocation = self.readLocationElement(locationElement)
break
return elementLocation
def readLocationElement(self, locationElement):
"""Read a ``<location>`` element.
.. versionchanged:: 5.0
Return a tuple of (designLocation, userLocation)
"""
if self._strictAxisNames and not self.documentObject.axes:
raise DesignSpaceDocumentError("No axes defined")
userLoc = {}
designLoc = {}
for dimensionElement in locationElement.findall(".dimension"):
dimName = dimensionElement.attrib.get("name")
if self._strictAxisNames and dimName not in self.axisDefaults:
# In case the document contains no axis definitions,
self.log.warning("Location with undefined axis: \"%s\".", dimName)
continue
userValue = xValue = yValue = None
try:
userValue = dimensionElement.attrib.get('uservalue')
if userValue is not None:
userValue = float(userValue)
except ValueError:
self.log.warning("ValueError in readLocation userValue %3.3f", userValue)
try:
xValue = dimensionElement.attrib.get('xvalue')
if xValue is not None:
xValue = float(xValue)
except ValueError:
self.log.warning("ValueError in readLocation xValue %3.3f", xValue)
try:
yValue = dimensionElement.attrib.get('yvalue')
if yValue is not None:
yValue = float(yValue)
except ValueError:
self.log.warning("ValueError in readLocation yValue %3.3f", yValue)
if userValue is None == xValue is None:
raise DesignSpaceDocumentError(f'Exactly one of uservalue="" or xvalue="" must be provided for location dimension "{dimName}"')
if yValue is not None:
if xValue is None:
raise DesignSpaceDocumentError(f'Missing xvalue="" for the location dimension "{dimName}"" with yvalue="{yValue}"')
designLoc[dimName] = (xValue, yValue)
elif xValue is not None:
designLoc[dimName] = xValue
else:
userLoc[dimName] = userValue
return designLoc, userLoc
def readInstances(self, makeGlyphs=True, makeKerning=True, makeInfo=True):
instanceElements = self.root.findall('.instances/instance')
for instanceElement in instanceElements:
self._readSingleInstanceElement(instanceElement, makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo)
def _readSingleInstanceElement(self, instanceElement, makeGlyphs=True, makeKerning=True, makeInfo=True):
filename = instanceElement.attrib.get('filename')
if filename is not None and self.documentObject.path is not None:
instancePath = os.path.join(os.path.dirname(self.documentObject.path), filename)
else:
instancePath = None
instanceObject = self.instanceDescriptorClass()
instanceObject.path = instancePath # absolute path to the instance
instanceObject.filename = filename # path as it is stored in the document
name = instanceElement.attrib.get("name")
if name is not None:
instanceObject.name = name
familyname = instanceElement.attrib.get('familyname')
if familyname is not None:
instanceObject.familyName = familyname
stylename = instanceElement.attrib.get('stylename')
if stylename is not None:
instanceObject.styleName = stylename
postScriptFontName = instanceElement.attrib.get('postscriptfontname')
if postScriptFontName is not None:
instanceObject.postScriptFontName = postScriptFontName
styleMapFamilyName = instanceElement.attrib.get('stylemapfamilyname')
if styleMapFamilyName is not None:
instanceObject.styleMapFamilyName = styleMapFamilyName
styleMapStyleName = instanceElement.attrib.get('stylemapstylename')
if styleMapStyleName is not None:
instanceObject.styleMapStyleName = styleMapStyleName
# read localised names
for styleNameElement in instanceElement.findall('stylename'):
for key, lang in styleNameElement.items():
if key == XML_LANG:
styleName = styleNameElement.text
instanceObject.setStyleName(styleName, lang)
for familyNameElement in instanceElement.findall('familyname'):
for key, lang in familyNameElement.items():
if key == XML_LANG:
familyName = familyNameElement.text
instanceObject.setFamilyName(familyName, lang)
for styleMapStyleNameElement in instanceElement.findall('stylemapstylename'):
for key, lang in styleMapStyleNameElement.items():
if key == XML_LANG:
styleMapStyleName = styleMapStyleNameElement.text
instanceObject.setStyleMapStyleName(styleMapStyleName, lang)
for styleMapFamilyNameElement in instanceElement.findall('stylemapfamilyname'):
for key, lang in styleMapFamilyNameElement.items():
if key == XML_LANG:
styleMapFamilyName = styleMapFamilyNameElement.text
instanceObject.setStyleMapFamilyName(styleMapFamilyName, lang)
designLocation, userLocation = self.locationFromElement(instanceElement)
locationLabel = instanceElement.attrib.get('location')
if (designLocation or userLocation) and locationLabel is not None:
raise DesignSpaceDocumentError('instance element must have at most one of the location="..." attribute or the nested location element')
instanceObject.locationLabel = locationLabel
instanceObject.userLocation = userLocation or {}
instanceObject.designLocation = designLocation or {}
for glyphElement in instanceElement.findall('.glyphs/glyph'):
self.readGlyphElement(glyphElement, instanceObject)
for infoElement in instanceElement.findall("info"):
self.readInfoElement(infoElement, instanceObject)
for libElement in instanceElement.findall('lib'):
self.readLibElement(libElement, instanceObject)
self.documentObject.instances.append(instanceObject)
def readLibElement(self, libElement, instanceObject):
"""Read the lib element for the given instance."""
instanceObject.lib = plistlib.fromtree(libElement[0])
def readInfoElement(self, infoElement, instanceObject):
""" Read the info element."""
instanceObject.info = True
def readGlyphElement(self, glyphElement, instanceObject):
"""
Read the glyph element, which could look like either one of these:
.. code-block:: xml
<glyph name="b" unicode="0x62"/>
<glyph name="b"/>
<glyph name="b">
<master location="location-token-bbb" source="master-token-aaa2"/>
<master glyphname="b.alt1" location="location-token-ccc" source="master-token-aaa3"/>
<note>
This is an instance from an anisotropic interpolation.
</note>
</glyph>
"""
glyphData = {}
glyphName = glyphElement.attrib.get('name')
if glyphName is None:
raise DesignSpaceDocumentError("Glyph object without name attribute")
mute = glyphElement.attrib.get("mute")
if mute == "1":
glyphData['mute'] = True
# unicode
unicodes = glyphElement.attrib.get('unicode')
if unicodes is not None:
try:
unicodes = [int(u, 16) for u in unicodes.split(" ")]
glyphData['unicodes'] = unicodes
except ValueError:
raise DesignSpaceDocumentError("unicode values %s are not integers" % unicodes)
for noteElement in glyphElement.findall('.note'):
glyphData['note'] = noteElement.text
break
designLocation, userLocation = self.locationFromElement(glyphElement)
if userLocation:
raise DesignSpaceDocumentError(f'<glyph> element "{glyphName}" must only have design locations (using xvalue="").')
if designLocation is not None:
glyphData['instanceLocation'] = designLocation
glyphSources = None
for masterElement in glyphElement.findall('.masters/master'):
fontSourceName = masterElement.attrib.get('source')
designLocation, userLocation = self.locationFromElement(masterElement)
if userLocation:
raise DesignSpaceDocumentError(f'<master> element "{fontSourceName}" must only have design locations (using xvalue="").')
masterGlyphName = masterElement.attrib.get('glyphname')
if masterGlyphName is None:
# if we don't read a glyphname, use the one we have
masterGlyphName = glyphName
d = dict(font=fontSourceName,
location=designLocation,
glyphName=masterGlyphName)
if glyphSources is None:
glyphSources = []
glyphSources.append(d)
if glyphSources is not None:
glyphData['masters'] = glyphSources
instanceObject.glyphs[glyphName] = glyphData
def readLib(self):
"""Read the lib element for the whole document."""
for libElement in self.root.findall(".lib"):
self.documentObject.lib = plistlib.fromtree(libElement[0])
class DesignSpaceDocument(LogMixin, AsDictMixin):
"""The DesignSpaceDocument object can read and write ``.designspace`` data.
It imports the axes, sources, variable fonts and instances to very basic
**descriptor** objects that store the data in attributes. Data is added to
the document by creating such descriptor objects, filling them with data
and then adding them to the document. This makes it easy to integrate this
object in different contexts.
The **DesignSpaceDocument** object can be subclassed to work with
different objects, as long as they have the same attributes. Reader and
Writer objects can be subclassed as well.
**Note:** Python attribute names are usually camelCased, the
corresponding `XML <document-xml-structure>`_ attributes are usually
all lowercase.
.. code:: python
from fontTools.designspaceLib import DesignSpaceDocument
doc = DesignSpaceDocument.fromfile("some/path/to/my.designspace")
doc.formatVersion
doc.elidedFallbackName
doc.axes
doc.locationLabels
doc.rules
doc.rulesProcessingLast
doc.sources
doc.variableFonts
doc.instances
doc.lib
"""
def __init__(self, readerClass=None, writerClass=None):
self.path = None
"""String, optional. When the document is read from the disk, this is
the full path that was given to :meth:`read` or :meth:`fromfile`.
"""
self.filename = None
"""String, optional. When the document is read from the disk, this is
its original file name, i.e. the last part of its path.
When the document is produced by a Python script and still only exists
in memory, the producing script can write here an indication of a
possible "good" filename, in case one wants to save the file somewhere.
"""
self.formatVersion: Optional[str] = None
"""Format version for this document, as a string. E.g. "4.0" """
self.elidedFallbackName: Optional[str] = None
"""STAT Style Attributes Header field ``elidedFallbackNameID``.
See: `OTSpec STAT Style Attributes Header <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#style-attributes-header>`_
.. versionadded:: 5.0
"""
self.axes: List[Union[AxisDescriptor, DiscreteAxisDescriptor]] = []
"""List of this document's axes."""
self.locationLabels: List[LocationLabelDescriptor] = []
"""List of this document's STAT format 4 labels.
.. versionadded:: 5.0"""
self.rules: List[RuleDescriptor] = []
"""List of this document's rules."""
self.rulesProcessingLast: bool = False
"""This flag indicates whether the substitution rules should be applied
before or after other glyph substitution features.
- False: before
- True: after.
Default is False. For new projects, you probably want True. See
the following issues for more information:
`fontTools#1371 <https://github.com/fonttools/fonttools/issues/1371#issuecomment-590214572>`__
`fontTools#2050 <https://github.com/fonttools/fonttools/issues/2050#issuecomment-678691020>`__
If you want to use a different feature altogether, e.g. ``calt``,
use the lib key ``com.github.fonttools.varLib.featureVarsFeatureTag``
.. code:: xml
<lib>
<dict>
<key>com.github.fonttools.varLib.featureVarsFeatureTag</key>
<string>calt</string>
</dict>
</lib>
"""
self.sources: List[SourceDescriptor] = []
"""List of this document's sources."""
self.variableFonts: List[VariableFontDescriptor] = []
"""List of this document's variable fonts.
.. versionadded:: 5.0"""
self.instances: List[InstanceDescriptor] = []
"""List of this document's instances."""
self.lib: Dict = {}
"""User defined, custom data associated with the whole document.
Use reverse-DNS notation to identify your own data.
Respect the data stored by others.
"""
self.default: Optional[str] = None
"""Name of the default master.
This attribute is updated by the :meth:`findDefault`
"""
if readerClass is not None:
self.readerClass = readerClass
else:
self.readerClass = BaseDocReader
if writerClass is not None:
self.writerClass = writerClass
else:
self.writerClass = BaseDocWriter
@classmethod
def fromfile(cls, path, readerClass=None, writerClass=None):
"""Read a designspace file from ``path`` and return a new instance of
:class:.
"""
self = cls(readerClass=readerClass, writerClass=writerClass)
self.read(path)
return self
@classmethod
def fromstring(cls, string, readerClass=None, writerClass=None):
self = cls(readerClass=readerClass, writerClass=writerClass)
reader = self.readerClass.fromstring(string, self)
reader.read()
if self.sources:
self.findDefault()
return self
def tostring(self, encoding=None):
"""Returns the designspace as a string. Default encoding ``utf-8``."""
if encoding is str or (
encoding is not None and encoding.lower() == "unicode"
):
f = StringIO()
xml_declaration = False
elif encoding is None or encoding == "utf-8":
f = BytesIO()
encoding = "UTF-8"
xml_declaration = True
else:
raise ValueError("unsupported encoding: '%s'" % encoding)
writer = self.writerClass(f, self)
writer.write(encoding=encoding, xml_declaration=xml_declaration)
return f.getvalue()
def read(self, path):
"""Read a designspace file from ``path`` and populates the fields of
``self`` with the data.
"""
if hasattr(path, "__fspath__"): # support os.PathLike objects
path = path.__fspath__()
self.path = path
self.filename = os.path.basename(path)
reader = self.readerClass(path, self)
reader.read()
if self.sources:
self.findDefault()
def write(self, path):
"""Write this designspace to ``path``."""
if hasattr(path, "__fspath__"): # support os.PathLike objects
path = path.__fspath__()
self.path = path
self.filename = os.path.basename(path)
self.updatePaths()
writer = self.writerClass(path, self)
writer.write()
def _posixRelativePath(self, otherPath):
relative = os.path.relpath(otherPath, os.path.dirname(self.path))
return posix(relative)
def updatePaths(self):
"""
Right before we save we need to identify and respond to the following situations:
In each descriptor, we have to do the right thing for the filename attribute.
::
case 1.
descriptor.filename == None
descriptor.path == None
-- action:
write as is, descriptors will not have a filename attr.
useless, but no reason to interfere.
case 2.
descriptor.filename == "../something"
descriptor.path == None
-- action:
write as is. The filename attr should not be touched.
case 3.
descriptor.filename == None
descriptor.path == "~/absolute/path/there"
-- action:
calculate the relative path for filename.
We're not overwriting some other value for filename, it should be fine
case 4.
descriptor.filename == '../somewhere'
descriptor.path == "~/absolute/path/there"
-- action:
there is a conflict between the given filename, and the path.
So we know where the file is relative to the document.
Can't guess why they're different, we just choose for path to be correct and update filename.
"""
assert self.path is not None
for descriptor in self.sources + self.instances:
if descriptor.path is not None:
# case 3 and 4: filename gets updated and relativized
descriptor.filename = self._posixRelativePath(descriptor.path)
def addSource(self, sourceDescriptor: SourceDescriptor):
"""Add the given ``sourceDescriptor`` to ``doc.sources``."""
self.sources.append(sourceDescriptor)
def addSourceDescriptor(self, **kwargs):
"""Instantiate a new :class:`SourceDescriptor` using the given
``kwargs`` and add it to ``doc.sources``.
"""
source = self.writerClass.sourceDescriptorClass(**kwargs)
self.addSource(source)
return source
def addInstance(self, instanceDescriptor: InstanceDescriptor):
"""Add the given ``instanceDescriptor`` to :attr:`instances`."""
self.instances.append(instanceDescriptor)
def addInstanceDescriptor(self, **kwargs):
"""Instantiate a new :class:`InstanceDescriptor` using the given
``kwargs`` and add it to :attr:`instances`.
"""
instance = self.writerClass.instanceDescriptorClass(**kwargs)
self.addInstance(instance)
return instance
def addAxis(self, axisDescriptor: Union[AxisDescriptor, DiscreteAxisDescriptor]):
"""Add the given ``axisDescriptor`` to :attr:`axes`."""
self.axes.append(axisDescriptor)
def addAxisDescriptor(self, **kwargs):
"""Instantiate a new :class:`AxisDescriptor` using the given
``kwargs`` and add it to :attr:`axes`.
The axis will be and instance of :class:`DiscreteAxisDescriptor` if
the ``kwargs`` provide a ``value``, or a :class:`AxisDescriptor` otherwise.
"""
if "values" in kwargs:
axis = self.writerClass.discreteAxisDescriptorClass(**kwargs)
else:
axis = self.writerClass.axisDescriptorClass(**kwargs)
self.addAxis(axis)
return axis
def addRule(self, ruleDescriptor: RuleDescriptor):
"""Add the given ``ruleDescriptor`` to :attr:`rules`."""
self.rules.append(ruleDescriptor)
def addRuleDescriptor(self, **kwargs):
"""Instantiate a new :class:`RuleDescriptor` using the given
``kwargs`` and add it to :attr:`rules`.
"""
rule = self.writerClass.ruleDescriptorClass(**kwargs)
self.addRule(rule)
return rule
def addVariableFont(self, variableFontDescriptor: VariableFontDescriptor):
"""Add the given ``variableFontDescriptor`` to :attr:`variableFonts`.
.. versionadded:: 5.0
"""
self.variableFonts.append(variableFontDescriptor)
def addVariableFontDescriptor(self, **kwargs):
"""Instantiate a new :class:`VariableFontDescriptor` using the given
``kwargs`` and add it to :attr:`variableFonts`.
.. versionadded:: 5.0
"""
variableFont = self.writerClass.variableFontDescriptorClass(**kwargs)
self.addVariableFont(variableFont)
return variableFont
def addLocationLabel(self, locationLabelDescriptor: LocationLabelDescriptor):
"""Add the given ``locationLabelDescriptor`` to :attr:`locationLabels`.
.. versionadded:: 5.0
"""
self.locationLabels.append(locationLabelDescriptor)
def addLocationLabelDescriptor(self, **kwargs):
"""Instantiate a new :class:`LocationLabelDescriptor` using the given
``kwargs`` and add it to :attr:`locationLabels`.
.. versionadded:: 5.0
"""
locationLabel = self.writerClass.locationLabelDescriptorClass(**kwargs)
self.addLocationLabel(locationLabel)
return locationLabel
def newDefaultLocation(self):
"""Return a dict with the default location in design space coordinates."""
# Without OrderedDict, output XML would be non-deterministic.
# https://github.com/LettError/designSpaceDocument/issues/10
loc = collections.OrderedDict()
for axisDescriptor in self.axes:
loc[axisDescriptor.name] = axisDescriptor.map_forward(
axisDescriptor.default
)
return loc
def labelForUserLocation(self, userLocation: SimpleLocationDict) -> Optional[LocationLabelDescriptor]:
"""Return the :class:`LocationLabel` that matches the given
``userLocation``, or ``None`` if no such label exists.
.. versionadded:: 5.0
"""
return next(
(label for label in self.locationLabels if label.userLocation == userLocation), None
)
def updateFilenameFromPath(self, masters=True, instances=True, force=False):
"""Set a descriptor filename attr from the path and this document path.
If the filename attribute is not None: skip it.
"""
if masters:
for descriptor in self.sources:
if descriptor.filename is not None and not force:
continue
if self.path is not None:
descriptor.filename = self._posixRelativePath(descriptor.path)
if instances:
for descriptor in self.instances:
if descriptor.filename is not None and not force:
continue
if self.path is not None:
descriptor.filename = self._posixRelativePath(descriptor.path)
def newAxisDescriptor(self):
"""Ask the writer class to make us a new axisDescriptor."""
return self.writerClass.getAxisDecriptor()
def newSourceDescriptor(self):
"""Ask the writer class to make us a new sourceDescriptor."""
return self.writerClass.getSourceDescriptor()
def newInstanceDescriptor(self):
"""Ask the writer class to make us a new instanceDescriptor."""
return self.writerClass.getInstanceDescriptor()
def getAxisOrder(self):
"""Return a list of axis names, in the same order as defined in the document."""
names = []
for axisDescriptor in self.axes:
names.append(axisDescriptor.name)
return names
def getAxis(self, name):
"""Return the axis with the given ``name``, or ``None`` if no such axis exists."""
for axisDescriptor in self.axes:
if axisDescriptor.name == name:
return axisDescriptor
return None
def getLocationLabel(self, name: str) -> Optional[LocationLabelDescriptor]:
"""Return the top-level location label with the given ``name``, or
``None`` if no such label exists.
.. versionadded:: 5.0
"""
for label in self.locationLabels:
if label.name == name:
return label
return None
def map_forward(self, userLocation: SimpleLocationDict) -> SimpleLocationDict:
"""Map a user location to a design location.
Assume that missing coordinates are at the default location for that axis.
Note: the output won't be anisotropic, only the xvalue is set.
.. versionadded:: 5.0
"""
return {
axis.name: axis.map_forward(userLocation.get(axis.name, axis.default))
for axis in self.axes
}
def map_backward(self, designLocation: AnisotropicLocationDict) -> SimpleLocationDict:
"""Map a design location to a user location.
Assume that missing coordinates are at the default location for that axis.
When the input has anisotropic locations, only the xvalue is used.
.. versionadded:: 5.0
"""
return {
axis.name: (
axis.map_backward(designLocation[axis.name])
if axis.name in designLocation
else axis.default
)
for axis in self.axes
}
def findDefault(self):
"""Set and return SourceDescriptor at the default location or None.
The default location is the set of all `default` values in user space
of all axes.
This function updates the document's :attr:`default` value.
.. versionchanged:: 5.0
Allow the default source to not specify some of the axis values, and
they are assumed to be the default.
See :meth:`SourceDescriptor.getFullDesignLocation()`
"""
self.default = None
# Convert the default location from user space to design space before comparing
# it against the SourceDescriptor locations (always in design space).
defaultDesignLocation = self.newDefaultLocation()
for sourceDescriptor in self.sources:
if sourceDescriptor.getFullDesignLocation(self) == defaultDesignLocation:
self.default = sourceDescriptor
return sourceDescriptor
return None
def normalizeLocation(self, location):
"""Return a dict with normalized axis values."""
from fontTools.varLib.models import normalizeValue
new = {}
for axis in self.axes:
if axis.name not in location:
# skipping this dimension it seems
continue
value = location[axis.name]
# 'anisotropic' location, take first coord only
if isinstance(value, tuple):
value = value[0]
triple = [
axis.map_forward(v) for v in (axis.minimum, axis.default, axis.maximum)
]
new[axis.name] = normalizeValue(value, triple)
return new
def normalize(self):
"""
Normalise the geometry of this designspace:
- scale all the locations of all masters and instances to the -1 - 0 - 1 value.
- we need the axis data to do the scaling, so we do those last.
"""
# masters
for item in self.sources:
item.location = self.normalizeLocation(item.location)
# instances
for item in self.instances:
# glyph masters for this instance
for _, glyphData in item.glyphs.items():
glyphData['instanceLocation'] = self.normalizeLocation(glyphData['instanceLocation'])
for glyphMaster in glyphData['masters']:
glyphMaster['location'] = self.normalizeLocation(glyphMaster['location'])
item.location = self.normalizeLocation(item.location)
# the axes
for axis in self.axes:
# scale the map first
newMap = []
for inputValue, outputValue in axis.map:
newOutputValue = self.normalizeLocation({axis.name: outputValue}).get(axis.name)
newMap.append((inputValue, newOutputValue))
if newMap:
axis.map = newMap
# finally the axis values
minimum = self.normalizeLocation({axis.name: axis.minimum}).get(axis.name)
maximum = self.normalizeLocation({axis.name: axis.maximum}).get(axis.name)
default = self.normalizeLocation({axis.name: axis.default}).get(axis.name)
# and set them in the axis.minimum
axis.minimum = minimum
axis.maximum = maximum
axis.default = default
# now the rules
for rule in self.rules:
newConditionSets = []
for conditions in rule.conditionSets:
newConditions = []
for cond in conditions:
if cond.get('minimum') is not None:
minimum = self.normalizeLocation({cond['name']: cond['minimum']}).get(cond['name'])
else:
minimum = None
if cond.get('maximum') is not None:
maximum = self.normalizeLocation({cond['name']: cond['maximum']}).get(cond['name'])
else:
maximum = None
newConditions.append(dict(name=cond['name'], minimum=minimum, maximum=maximum))
newConditionSets.append(newConditions)
rule.conditionSets = newConditionSets
def loadSourceFonts(self, opener, **kwargs):
"""Ensure SourceDescriptor.font attributes are loaded, and return list of fonts.
Takes a callable which initializes a new font object (e.g. TTFont, or
defcon.Font, etc.) from the SourceDescriptor.path, and sets the
SourceDescriptor.font attribute.
If the font attribute is already not None, it is not loaded again.
Fonts with the same path are only loaded once and shared among SourceDescriptors.
For example, to load UFO sources using defcon:
designspace = DesignSpaceDocument.fromfile("path/to/my.designspace")
designspace.loadSourceFonts(defcon.Font)
Or to load masters as FontTools binary fonts, including extra options:
designspace.loadSourceFonts(ttLib.TTFont, recalcBBoxes=False)
Args:
opener (Callable): takes one required positional argument, the source.path,
and an optional list of keyword arguments, and returns a new font object
loaded from the path.
**kwargs: extra options passed on to the opener function.
Returns:
List of font objects in the order they appear in the sources list.
"""
# we load fonts with the same source.path only once
loaded = {}
fonts = []
for source in self.sources:
if source.font is not None: # font already loaded
fonts.append(source.font)
continue
if source.path in loaded:
source.font = loaded[source.path]
else:
if source.path is None:
raise DesignSpaceDocumentError(
"Designspace source '%s' has no 'path' attribute"
% (source.name or "<Unknown>")
)
source.font = opener(source.path, **kwargs)
loaded[source.path] = source.font
fonts.append(source.font)
return fonts
@property
def formatTuple(self):
"""Return the formatVersion as a tuple of (major, minor).
.. versionadded:: 5.0
"""
if self.formatVersion is None:
return (5, 0)
numbers = (int(i) for i in self.formatVersion.split("."))
major = next(numbers)
minor = next(numbers, 0)
return (major, minor)
def getVariableFonts(self) -> List[VariableFontDescriptor]:
"""Return all variable fonts defined in this document, or implicit
variable fonts that can be built from the document's continuous axes.
In the case of Designspace documents before version 5, the whole
document was implicitly describing a variable font that covers the
whole space.
In version 5 and above documents, there can be as many variable fonts
as there are locations on discrete axes.
.. seealso:: :func:`splitInterpolable`
.. versionadded:: 5.0
"""
if self.variableFonts:
return self.variableFonts
variableFonts = []
discreteAxes = []
rangeAxisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = []
for axis in self.axes:
if isinstance(axis, DiscreteAxisDescriptor):
discreteAxes.append(axis)
else:
rangeAxisSubsets.append(RangeAxisSubsetDescriptor(name=axis.name))
valueCombinations = itertools.product(*[axis.values for axis in discreteAxes])
for values in valueCombinations:
basename = None
if self.filename is not None:
basename = os.path.splitext(self.filename)[0] + "-VF"
if self.path is not None:
basename = os.path.splitext(os.path.basename(self.path))[0] + "-VF"
if basename is None:
basename = "VF"
axisNames = "".join([f"-{axis.tag}{value}" for axis, value in zip(discreteAxes, values)])
variableFonts.append(VariableFontDescriptor(
name=f"{basename}{axisNames}",
axisSubsets=rangeAxisSubsets + [
ValueAxisSubsetDescriptor(name=axis.name, userValue=value)
for axis, value in zip(discreteAxes, values)
]
))
return variableFonts
def deepcopyExceptFonts(self):
"""Allow deep-copying a DesignSpace document without deep-copying
attached UFO fonts or TTFont objects. The :attr:`font` attribute
is shared by reference between the original and the copy.
.. versionadded:: 5.0
"""
fonts = [source.font for source in self.sources]
try:
for source in self.sources:
source.font = None
res = copy.deepcopy(self)
for source, font in zip(res.sources, fonts):
res.font = font
return res
finally:
for source, font in zip(self.sources, fonts):
source.font = font
| [
"collections.OrderedDict",
"fontTools.misc.textTools.tobytes",
"fontTools.varLib.models.normalizeValue",
"copy.deepcopy",
"fontTools.misc.textTools.tostr",
"itertools.product",
"fontTools.misc.etree.parse",
"io.BytesIO",
"os.path.splitext",
"fontTools.misc.etree.Element",
"fontTools.misc.plistlib.totree",
"os.path.dirname",
"os.path.basename",
"fontTools.varLib.models.piecewiseLinearMap",
"fontTools.misc.etree.ElementTree",
"io.StringIO",
"fontTools.misc.plistlib.fromtree"
]
| [((8905, 8922), 'fontTools.misc.textTools.tostr', 'tostr', (['familyName'], {}), '(familyName)\n', (8910, 8922), False, 'from fontTools.misc.textTools import tobytes, tostr\n'), ((20148, 20164), 'fontTools.misc.textTools.tostr', 'tostr', (['styleName'], {}), '(styleName)\n', (20153, 20164), False, 'from fontTools.misc.textTools import tobytes, tostr\n'), ((20380, 20397), 'fontTools.misc.textTools.tostr', 'tostr', (['familyName'], {}), '(familyName)\n', (20385, 20397), False, 'from fontTools.misc.textTools import tobytes, tostr\n'), ((20636, 20660), 'fontTools.misc.textTools.tostr', 'tostr', (['styleMapStyleName'], {}), '(styleMapStyleName)\n', (20641, 20660), False, 'from fontTools.misc.textTools import tobytes, tostr\n'), ((20916, 20941), 'fontTools.misc.textTools.tostr', 'tostr', (['styleMapFamilyName'], {}), '(styleMapFamilyName)\n', (20921, 20941), False, 'from fontTools.misc.textTools import tobytes, tostr\n'), ((30249, 30299), 'fontTools.varLib.models.piecewiseLinearMap', 'piecewiseLinearMap', (['v', '{k: v for k, v in self.map}'], {}), '(v, {k: v for k, v in self.map})\n', (30267, 30299), False, 'from fontTools.varLib.models import piecewiseLinearMap\n'), ((30589, 30639), 'fontTools.varLib.models.piecewiseLinearMap', 'piecewiseLinearMap', (['v', '{v: k for k, v in self.map}'], {}), '(v, {v: k for k, v in self.map})\n', (30607, 30639), False, 'from fontTools.varLib.models import piecewiseLinearMap\n'), ((43672, 43697), 'fontTools.misc.etree.Element', 'ET.Element', (['"""designspace"""'], {}), "('designspace')\n", (43682, 43697), True, 'from fontTools.misc import etree as ET\n'), ((45756, 45781), 'fontTools.misc.etree.ElementTree', 'ET.ElementTree', (['self.root'], {}), '(self.root)\n', (45770, 45781), True, 'from fontTools.misc import etree as ET\n'), ((47128, 47150), 'fontTools.misc.etree.Element', 'ET.Element', (['"""location"""'], {}), "('location')\n", (47138, 47150), True, 'from fontTools.misc import etree as ET\n'), ((48356, 48374), 'fontTools.misc.etree.Element', 'ET.Element', (['"""rule"""'], {}), "('rule')\n", (48366, 48374), True, 'from fontTools.misc import etree as ET\n'), ((49740, 49758), 'fontTools.misc.etree.Element', 'ET.Element', (['"""axis"""'], {}), "('axis')\n", (49750, 49758), True, 'from fontTools.misc import etree as ET\n'), ((51344, 51363), 'fontTools.misc.etree.Element', 'ET.Element', (['"""label"""'], {}), "('label')\n", (51354, 51363), True, 'from fontTools.misc import etree as ET\n'), ((52606, 52625), 'fontTools.misc.etree.Element', 'ET.Element', (['"""label"""'], {}), "('label')\n", (52616, 52625), True, 'from fontTools.misc import etree as ET\n'), ((53236, 53258), 'fontTools.misc.etree.Element', 'ET.Element', (['"""location"""'], {}), "('location')\n", (53246, 53258), True, 'from fontTools.misc import etree as ET\n'), ((54369, 54391), 'fontTools.misc.etree.Element', 'ET.Element', (['"""instance"""'], {}), "('instance')\n", (54379, 54391), True, 'from fontTools.misc import etree as ET\n'), ((59664, 59684), 'fontTools.misc.etree.Element', 'ET.Element', (['"""source"""'], {}), "('source')\n", (59674, 59684), True, 'from fontTools.misc import etree as ET\n'), ((62917, 62944), 'fontTools.misc.etree.Element', 'ET.Element', (['"""variable-font"""'], {}), "('variable-font')\n", (62927, 62944), True, 'from fontTools.misc import etree as ET\n'), ((64336, 64353), 'fontTools.misc.etree.Element', 'ET.Element', (['"""lib"""'], {}), "('lib')\n", (64346, 64353), True, 'from fontTools.misc import etree as ET\n'), ((64579, 64598), 'fontTools.misc.etree.Element', 'ET.Element', (['"""glyph"""'], {}), "('glyph')\n", (64589, 64598), True, 'from fontTools.misc import etree as ET\n'), ((66764, 66783), 'fontTools.misc.etree.parse', 'ET.parse', (['self.path'], {}), '(self.path)\n', (66772, 66783), True, 'from fontTools.misc import etree as ET\n'), ((90558, 90590), 'fontTools.misc.plistlib.fromtree', 'plistlib.fromtree', (['libElement[0]'], {}), '(libElement[0])\n', (90575, 90590), False, 'from fontTools.misc import plistlib\n'), ((99936, 99958), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (99952, 99958), False, 'import os\n'), ((100318, 100340), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (100334, 100340), False, 'import os\n'), ((105967, 105992), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (105990, 105992), False, 'import collections\n'), ((117573, 117631), 'itertools.product', 'itertools.product', (['*[axis.values for axis in discreteAxes]'], {}), '(*[axis.values for axis in discreteAxes])\n', (117590, 117631), False, 'import itertools\n'), ((43980, 43998), 'fontTools.misc.etree.Element', 'ET.Element', (['"""axes"""'], {}), "('axes')\n", (43990, 43998), True, 'from fontTools.misc import etree as ET\n'), ((44372, 44392), 'fontTools.misc.etree.Element', 'ET.Element', (['"""labels"""'], {}), "('labels')\n", (44382, 44392), True, 'from fontTools.misc import etree as ET\n'), ((45214, 45242), 'fontTools.misc.etree.Element', 'ET.Element', (['"""variable-fonts"""'], {}), "('variable-fonts')\n", (45224, 45242), True, 'from fontTools.misc import etree as ET\n'), ((47597, 47620), 'fontTools.misc.etree.Element', 'ET.Element', (['"""dimension"""'], {}), "('dimension')\n", (47607, 47620), True, 'from fontTools.misc import etree as ET\n'), ((48558, 48584), 'fontTools.misc.etree.Element', 'ET.Element', (['"""conditionset"""'], {}), "('conditionset')\n", (48568, 48584), True, 'from fontTools.misc import etree as ET\n'), ((49434, 49451), 'fontTools.misc.etree.Element', 'ET.Element', (['"""sub"""'], {}), "('sub')\n", (49444, 49451), True, 'from fontTools.misc import etree as ET\n'), ((50344, 50364), 'fontTools.misc.etree.Element', 'ET.Element', (['"""labels"""'], {}), "('labels')\n", (50354, 50364), True, 'from fontTools.misc import etree as ET\n'), ((52303, 52326), 'fontTools.misc.etree.Element', 'ET.Element', (['"""labelname"""'], {}), "('labelname')\n", (52313, 52326), True, 'from fontTools.misc import etree as ET\n'), ((61011, 61028), 'fontTools.misc.etree.Element', 'ET.Element', (['"""lib"""'], {}), "('lib')\n", (61021, 61028), True, 'from fontTools.misc import etree as ET\n'), ((61182, 61202), 'fontTools.misc.etree.Element', 'ET.Element', (['"""groups"""'], {}), "('groups')\n", (61192, 61202), True, 'from fontTools.misc import etree as ET\n'), ((61366, 61388), 'fontTools.misc.etree.Element', 'ET.Element', (['"""features"""'], {}), "('features')\n", (61376, 61388), True, 'from fontTools.misc import etree as ET\n'), ((61573, 61591), 'fontTools.misc.etree.Element', 'ET.Element', (['"""info"""'], {}), "('info')\n", (61583, 61591), True, 'from fontTools.misc import etree as ET\n'), ((61878, 61899), 'fontTools.misc.etree.Element', 'ET.Element', (['"""kerning"""'], {}), "('kerning')\n", (61888, 61899), True, 'from fontTools.misc import etree as ET\n'), ((63135, 63161), 'fontTools.misc.etree.Element', 'ET.Element', (['"""axis-subsets"""'], {}), "('axis-subsets')\n", (63145, 63161), True, 'from fontTools.misc import etree as ET\n'), ((64380, 64428), 'fontTools.misc.plistlib.totree', 'plistlib.totree', (['data'], {'indent_level': 'indent_level'}), '(data, indent_level=indent_level)\n', (64395, 64428), False, 'from fontTools.misc import plistlib\n'), ((65180, 65198), 'fontTools.misc.etree.Element', 'ET.Element', (['"""note"""'], {}), "('note')\n", (65190, 65198), True, 'from fontTools.misc import etree as ET\n'), ((65365, 65386), 'fontTools.misc.etree.Element', 'ET.Element', (['"""masters"""'], {}), "('masters')\n", (65375, 65386), True, 'from fontTools.misc import etree as ET\n'), ((67158, 67191), 'fontTools.misc.textTools.tobytes', 'tobytes', (['string'], {'encoding': '"""utf-8"""'}), "(string, encoding='utf-8')\n", (67165, 67191), False, 'from fontTools.misc.textTools import tobytes, tostr\n'), ((93654, 93686), 'fontTools.misc.plistlib.fromtree', 'plistlib.fromtree', (['libElement[0]'], {}), '(libElement[0])\n', (93671, 93686), False, 'from fontTools.misc import plistlib\n'), ((99210, 99220), 'io.StringIO', 'StringIO', ([], {}), '()\n', (99218, 99220), False, 'from io import BytesIO, StringIO\n'), ((100529, 100555), 'os.path.dirname', 'os.path.dirname', (['self.path'], {}), '(self.path)\n', (100544, 100555), False, 'import os\n'), ((111470, 111499), 'fontTools.varLib.models.normalizeValue', 'normalizeValue', (['value', 'triple'], {}), '(value, triple)\n', (111484, 111499), False, 'from fontTools.varLib.models import normalizeValue\n'), ((118916, 118935), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (118929, 118935), False, 'import copy\n'), ((44816, 44847), 'fontTools.misc.etree.Element', 'ET.Element', (['"""rules"""', 'attributes'], {}), "('rules', attributes)\n", (44826, 44847), True, 'from fontTools.misc import etree as ET\n'), ((45010, 45031), 'fontTools.misc.etree.Element', 'ET.Element', (['"""sources"""'], {}), "('sources')\n", (45020, 45031), True, 'from fontTools.misc import etree as ET\n'), ((45507, 45530), 'fontTools.misc.etree.Element', 'ET.Element', (['"""instances"""'], {}), "('instances')\n", (45517, 45530), True, 'from fontTools.misc import etree as ET\n'), ((48832, 48855), 'fontTools.misc.etree.Element', 'ET.Element', (['"""condition"""'], {}), "('condition')\n", (48842, 48855), True, 'from fontTools.misc import etree as ET\n'), ((50042, 50059), 'fontTools.misc.etree.Element', 'ET.Element', (['"""map"""'], {}), "('map')\n", (50052, 50059), True, 'from fontTools.misc import etree as ET\n'), ((53409, 53432), 'fontTools.misc.etree.Element', 'ET.Element', (['"""dimension"""'], {}), "('dimension')\n", (53419, 53432), True, 'from fontTools.misc import etree as ET\n'), ((55253, 55276), 'fontTools.misc.etree.Element', 'ET.Element', (['"""stylename"""'], {}), "('stylename')\n", (55263, 55276), True, 'from fontTools.misc import etree as ET\n'), ((55837, 55861), 'fontTools.misc.etree.Element', 'ET.Element', (['"""familyname"""'], {}), "('familyname')\n", (55847, 55861), True, 'from fontTools.misc import etree as ET\n'), ((56404, 56435), 'fontTools.misc.etree.Element', 'ET.Element', (['"""stylemapstylename"""'], {}), "('stylemapstylename')\n", (56414, 56435), True, 'from fontTools.misc import etree as ET\n'), ((57009, 57041), 'fontTools.misc.etree.Element', 'ET.Element', (['"""stylemapfamilyname"""'], {}), "('stylemapfamilyname')\n", (57019, 57041), True, 'from fontTools.misc import etree as ET\n'), ((59257, 59278), 'fontTools.misc.etree.Element', 'ET.Element', (['"""kerning"""'], {}), "('kerning')\n", (59267, 59278), True, 'from fontTools.misc import etree as ET\n'), ((59400, 59418), 'fontTools.misc.etree.Element', 'ET.Element', (['"""info"""'], {}), "('info')\n", (59410, 59418), True, 'from fontTools.misc import etree as ET\n'), ((60713, 60737), 'fontTools.misc.etree.Element', 'ET.Element', (['"""familyname"""'], {}), "('familyname')\n", (60723, 60737), True, 'from fontTools.misc import etree as ET\n'), ((62123, 62142), 'fontTools.misc.etree.Element', 'ET.Element', (['"""glyph"""'], {}), "('glyph')\n", (62133, 62142), True, 'from fontTools.misc import etree as ET\n'), ((63236, 63261), 'fontTools.misc.etree.Element', 'ET.Element', (['"""axis-subset"""'], {}), "('axis-subset')\n", (63246, 63261), True, 'from fontTools.misc import etree as ET\n'), ((65461, 65481), 'fontTools.misc.etree.Element', 'ET.Element', (['"""master"""'], {}), "('master')\n", (65471, 65481), True, 'from fontTools.misc import etree as ET\n'), ((77735, 77767), 'fontTools.misc.plistlib.fromtree', 'plistlib.fromtree', (['libElement[0]'], {}), '(libElement[0])\n', (77752, 77767), False, 'from fontTools.misc import plistlib\n'), ((86875, 86916), 'os.path.dirname', 'os.path.dirname', (['self.documentObject.path'], {}), '(self.documentObject.path)\n', (86890, 86916), False, 'import os\n'), ((99327, 99336), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (99334, 99336), False, 'from io import BytesIO, StringIO\n'), ((53977, 54000), 'fontTools.misc.etree.Element', 'ET.Element', (['"""dimension"""'], {}), "('dimension')\n", (53987, 54000), True, 'from fontTools.misc import etree as ET\n'), ((58794, 58814), 'fontTools.misc.etree.Element', 'ET.Element', (['"""glyphs"""'], {}), "('glyphs')\n", (58804, 58814), True, 'from fontTools.misc import etree as ET\n'), ((72323, 72351), 'fontTools.misc.textTools.tostr', 'tostr', (['labelNameElement.text'], {}), '(labelNameElement.text)\n', (72328, 72351), False, 'from fontTools.misc.textTools import tobytes, tostr\n'), ((80546, 80572), 'os.path.dirname', 'os.path.dirname', (['self.path'], {}), '(self.path)\n', (80561, 80572), False, 'import os\n'), ((117770, 117801), 'os.path.splitext', 'os.path.splitext', (['self.filename'], {}), '(self.filename)\n', (117786, 117801), False, 'import os\n'), ((117895, 117922), 'os.path.basename', 'os.path.basename', (['self.path'], {}), '(self.path)\n', (117911, 117922), False, 'import os\n')] |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Optional, Tuple
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.monte_carlo import qSimpleRegret
from botorch.acquisition.objective import ConstrainedMCObjective, GenericMCObjective
from botorch.acquisition.utils import get_infeasible_cost
from botorch.models.model import Model
from botorch.utils import (
get_objective_weights_transform,
get_outcome_constraint_transforms,
)
from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization
from botorch.utils.transforms import squeeze_last_dim
from torch import Tensor
def get_PosteriorMean(
model: Model,
objective_weights: Tensor,
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
X_observed: Optional[Tensor] = None,
X_pending: Optional[Tensor] = None,
**kwargs: Any,
) -> AcquisitionFunction:
r"""Instantiates a PosteriorMean acquisition function.
Note: If no OutcomeConstraints given, return an analytic acquisition
function. This requires {optimizer_kwargs: {joint_optimization: True}} or an
optimizer that does not assume pending point support.
Args:
objective_weights: The objective is to maximize a weighted sum of
the columns of f(x). These are the weights.
outcome_constraints: A tuple of (A, b). For k outcome constraints
and m outputs at f(x), A is (k x m) and b is (k x 1) such that
A f(x) <= b. (Not used by single task models)
X_observed: A tensor containing points observed for all objective
outcomes and outcomes that appear in the outcome constraints (if
there are any).
X_pending: A tensor containing points whose evaluation is pending (i.e.
that have been submitted for evaluation) present for all objective
outcomes and outcomes that appear in the outcome constraints (if
there are any).
Returns:
PosteriorMean: The instantiated acquisition function.
"""
if X_observed is None:
raise ValueError("There are no feasible observed points.")
# construct Objective module
if kwargs.get("chebyshev_scalarization", False):
obj_tf = get_chebyshev_scalarization(
weights=objective_weights,
Y=squeeze_last_dim(torch.stack(kwargs.get("Ys")).transpose(0, 1)),
)
else:
obj_tf = get_objective_weights_transform(objective_weights)
if outcome_constraints is None:
objective = GenericMCObjective(objective=obj_tf)
else:
con_tfs = get_outcome_constraint_transforms(outcome_constraints)
inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf)
objective = ConstrainedMCObjective(
objective=obj_tf, constraints=con_tfs or [], infeasible_cost=inf_cost
)
# Use qSimpleRegret, not analytic posterior, to handle arbitrary objective fns.
acq_func = qSimpleRegret(model, objective=objective)
return acq_func
| [
"botorch.acquisition.objective.GenericMCObjective",
"botorch.acquisition.monte_carlo.qSimpleRegret",
"botorch.utils.get_outcome_constraint_transforms",
"botorch.acquisition.utils.get_infeasible_cost",
"botorch.utils.get_objective_weights_transform",
"botorch.acquisition.objective.ConstrainedMCObjective"
]
| [((3168, 3209), 'botorch.acquisition.monte_carlo.qSimpleRegret', 'qSimpleRegret', (['model'], {'objective': 'objective'}), '(model, objective=objective)\n', (3181, 3209), False, 'from botorch.acquisition.monte_carlo import qSimpleRegret\n'), ((2622, 2672), 'botorch.utils.get_objective_weights_transform', 'get_objective_weights_transform', (['objective_weights'], {}), '(objective_weights)\n', (2653, 2672), False, 'from botorch.utils import get_objective_weights_transform, get_outcome_constraint_transforms\n'), ((2729, 2765), 'botorch.acquisition.objective.GenericMCObjective', 'GenericMCObjective', ([], {'objective': 'obj_tf'}), '(objective=obj_tf)\n', (2747, 2765), False, 'from botorch.acquisition.objective import ConstrainedMCObjective, GenericMCObjective\n'), ((2794, 2848), 'botorch.utils.get_outcome_constraint_transforms', 'get_outcome_constraint_transforms', (['outcome_constraints'], {}), '(outcome_constraints)\n', (2827, 2848), False, 'from botorch.utils import get_objective_weights_transform, get_outcome_constraint_transforms\n'), ((2868, 2932), 'botorch.acquisition.utils.get_infeasible_cost', 'get_infeasible_cost', ([], {'X': 'X_observed', 'model': 'model', 'objective': 'obj_tf'}), '(X=X_observed, model=model, objective=obj_tf)\n', (2887, 2932), False, 'from botorch.acquisition.utils import get_infeasible_cost\n'), ((2953, 3050), 'botorch.acquisition.objective.ConstrainedMCObjective', 'ConstrainedMCObjective', ([], {'objective': 'obj_tf', 'constraints': '(con_tfs or [])', 'infeasible_cost': 'inf_cost'}), '(objective=obj_tf, constraints=con_tfs or [],\n infeasible_cost=inf_cost)\n', (2975, 3050), False, 'from botorch.acquisition.objective import ConstrainedMCObjective, GenericMCObjective\n')] |
# Copyright 2018 the Autoware Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Co-developed by Tier IV, Inc. and Apex.AI, Inc.
import ament_index_python
import launch
import launch.actions
import launch_ros.actions
import lidar_integration
def generate_test_description(ready_fn):
PORT = lidar_integration.get_open_port()
# The node under test and the checker node that will pass/fail our tests:
test_topic = "veloyne_cloud_node_test_topic"
velodyne_cloud_node = launch_ros.actions.Node(
package="velodyne_nodes",
node_executable="velodyne_cloud_node_exe",
node_name="vlp16_driver_node",
node_namespace="lidar_front",
parameters=[
"{}/param/vlp16_test.param.yaml".format(
ament_index_python.get_package_share_directory("velodyne_nodes")
),
{
"port": PORT,
"expected_num_subscribers": 1,
}
],
remappings=[("points_raw", test_topic)],
arguments=["--model", "vlp16"]
)
pcl_checker = lidar_integration.make_pcl_checker(
topic=test_topic,
size=55000,
period=100,
period_tolerance=2.2,
size_tolerance=1.4,
)
return lidar_integration.get_lidar_launch_description(
test_nodes=[velodyne_cloud_node],
checkers=[pcl_checker],
other_actions=[
launch.actions.OpaqueFunction(function=lambda context: ready_fn())
],
port=PORT
)
# Test cases are created automatically by the lidar_integration package. We just need to
# instantiate them
active = lidar_integration.make_active_tests()
after_shutdown = lidar_integration.make_post_shutdown_tests()
| [
"lidar_integration.make_post_shutdown_tests",
"ament_index_python.get_package_share_directory",
"lidar_integration.make_pcl_checker",
"lidar_integration.make_active_tests",
"lidar_integration.get_open_port"
]
| [((2139, 2176), 'lidar_integration.make_active_tests', 'lidar_integration.make_active_tests', ([], {}), '()\n', (2174, 2176), False, 'import lidar_integration\n'), ((2195, 2239), 'lidar_integration.make_post_shutdown_tests', 'lidar_integration.make_post_shutdown_tests', ([], {}), '()\n', (2237, 2239), False, 'import lidar_integration\n'), ((807, 840), 'lidar_integration.get_open_port', 'lidar_integration.get_open_port', ([], {}), '()\n', (838, 840), False, 'import lidar_integration\n'), ((1581, 1703), 'lidar_integration.make_pcl_checker', 'lidar_integration.make_pcl_checker', ([], {'topic': 'test_topic', 'size': '(55000)', 'period': '(100)', 'period_tolerance': '(2.2)', 'size_tolerance': '(1.4)'}), '(topic=test_topic, size=55000, period=100,\n period_tolerance=2.2, size_tolerance=1.4)\n', (1615, 1703), False, 'import lidar_integration\n'), ((1272, 1336), 'ament_index_python.get_package_share_directory', 'ament_index_python.get_package_share_directory', (['"""velodyne_nodes"""'], {}), "('velodyne_nodes')\n", (1318, 1336), False, 'import ament_index_python\n')] |
# USAGE
# python example.py --source images/ocean_sunset.jpg --target images/ocean_day.jpg
# import the necessary packages
from color_transfer import color_transfer
import numpy as np
import argparse
import cv2
def show_image(title, image, width = 300):
# resize the image to have a constant width, just to
# make displaying the images take up less screen real
# estate
r = width / float(image.shape[1])
dim = (width, int(image.shape[0] * r))
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
# show the resized image
cv2.imshow(title, resized)
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--source", required = True,
help = "Path to the source image")
ap.add_argument("-t", "--target", required = True,
help = "Path to the target image")
ap.add_argument("-o", "--output", help = "Path to the output image (optional)")
args = vars(ap.parse_args())
# load the images
source = cv2.imread(args["source"])
target = cv2.imread(args["target"])
# transfer the color distribution from the source image
# to the target image
transfer = color_transfer(source, target)
# check to see if the output image should be saved
if args["output"] is not None:
cv2.imwrite(args["output"], transfer)
# show the images and wait for a key press
show_image("Source", source)
show_image("Target", target)
show_image("Transfer", transfer)
cv2.waitKey(0) | [
"cv2.imwrite",
"cv2.resize",
"argparse.ArgumentParser",
"cv2.imshow",
"color_transfer.color_transfer",
"cv2.waitKey",
"cv2.imread"
]
| [((633, 658), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (656, 658), False, 'import argparse\n'), ((970, 996), 'cv2.imread', 'cv2.imread', (["args['source']"], {}), "(args['source'])\n", (980, 996), False, 'import cv2\n'), ((1006, 1032), 'cv2.imread', 'cv2.imread', (["args['target']"], {}), "(args['target'])\n", (1016, 1032), False, 'import cv2\n'), ((1123, 1153), 'color_transfer.color_transfer', 'color_transfer', (['source', 'target'], {}), '(source, target)\n', (1137, 1153), False, 'from color_transfer import color_transfer\n'), ((1411, 1425), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (1422, 1425), False, 'import cv2\n'), ((461, 513), 'cv2.resize', 'cv2.resize', (['image', 'dim'], {'interpolation': 'cv2.INTER_AREA'}), '(image, dim, interpolation=cv2.INTER_AREA)\n', (471, 513), False, 'import cv2\n'), ((544, 570), 'cv2.imshow', 'cv2.imshow', (['title', 'resized'], {}), '(title, resized)\n', (554, 570), False, 'import cv2\n'), ((1238, 1275), 'cv2.imwrite', 'cv2.imwrite', (["args['output']", 'transfer'], {}), "(args['output'], transfer)\n", (1249, 1275), False, 'import cv2\n')] |
import copy
import numpy as np
import open3d as o3d
from tqdm import tqdm
from scipy import stats
import utils_o3d as utils
def remove_ground_plane(pcd, z_thresh=-2.7):
cropped = copy.deepcopy(pcd)
cropped_points = np.array(cropped.points)
cropped_points = cropped_points[cropped_points[:, -1] > z_thresh]
pcd_final = o3d.geometry.PointCloud()
pcd_final.points = o3d.utility.Vector3dVector(cropped_points)
return pcd_final
def remove_y_plane(pcd, y_thresh=5):
cropped = copy.deepcopy(pcd)
cropped_points = np.array(cropped.points)
cropped_points = cropped_points[cropped_points[:, 0] < y_thresh]
cropped_points[:, -1] = -cropped_points[:, -1]
pcd_final = o3d.geometry.PointCloud()
pcd_final.points = o3d.utility.Vector3dVector(cropped_points)
return pcd_final
def compute_features(pcd, voxel_size, normals_nn=100, features_nn=120, downsample=True):
normals_radius = voxel_size * 2
features_radius = voxel_size * 4
# Downsample the point cloud using Voxel grids
if downsample:
print(':: Input size:', np.array(pcd.points).shape)
pcd_down = utils.downsample_point_cloud(pcd, voxel_size)
print(':: Downsample with a voxel size %.3f' % voxel_size)
print(':: Downsample size', np.array(pcd_down.points).shape)
else: pcd_down = copy.deepcopy(pcd)
# Estimate normals
print(':: Estimate normal with search radius %.3f' % normals_radius)
pcd_down.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=normals_radius, max_nn=normals_nn))
# Compute FPFH features
print(':: Compute FPFH feature with search radius %.3f' % features_radius)
features = o3d.registration.compute_fpfh_feature(pcd_down,
o3d.geometry.KDTreeSearchParamHybrid(radius=features_radius, max_nn=features_nn))
return pcd_down, features
def match_features(pcd0, pcd1, feature0, feature1, thresh=None, display=False):
pcd0, pcd1 = copy.deepcopy(pcd0), copy.deepcopy(pcd1)
print(':: Input size 0:', np.array(pcd0.points).shape)
print(':: Input size 1:', np.array(pcd1.points).shape)
print(':: Features size 0:', np.array(feature0.data).shape)
print(':: Features size 1:', np.array(feature1.data).shape)
utils.paint_uniform_color(pcd0, color=[1, 0.706, 0])
utils.paint_uniform_color(pcd1, color=[0, 0.651, 0.929])
scores, indices = [], []
fpfh_tree = o3d.geometry.KDTreeFlann(feature1)
for i in tqdm(range(len(pcd0.points)), desc=':: Feature Matching'):
[_, idx, _] = fpfh_tree.search_knn_vector_xd(feature0.data[:, i], 1)
scores.append(np.linalg.norm(pcd0.points[i] - pcd1.points[idx[0]]))
indices.append([i, idx[0]])
scores, indices = np.array(scores), np.array(indices)
median = np.median(scores)
if thresh is None: thresh = median
inliers_idx = np.where(scores <= thresh)[0]
pcd0_idx = indices[inliers_idx, 0]
pcd1_idx = indices[inliers_idx, 1]
print(':: Score stats: Min=%0.3f, Max=%0.3f, Median=%0.3f, N<Thresh=%d' % (
np.min(scores), np.max(scores), median, len(inliers_idx)))
if display:
for i, j in zip(pcd0_idx, pcd1_idx):
pcd0.colors[i] = [1, 0, 0]
pcd1.colors[j] = [1, 0, 0]
utils.display([pcd0, pcd1])
return pcd0_idx, pcd1_idx
def estimate_scale(pcd0, pcd1, pcd0_idx, pcd1_idx, top_percent=1.0,
ransac_iters=5000, sample_size=50):
points0 = np.asarray(pcd0.points)[pcd0_idx]
points1 = np.asarray(pcd1.points)[pcd1_idx]
mean0 = np.mean(points0, axis=0)
mean1 = np.mean(points1, axis=0)
top_count = int(top_percent * len(pcd0_idx))
assert top_count > sample_size, 'top_count <= sample_size'
scales = []
for i in tqdm(range(ransac_iters), desc=':: Scale Estimation RANSAC'):
args = np.random.choice(top_count, sample_size, replace=False)
points0_r = points0[args]
points1_r = points1[args]
score0 = np.sum((points0_r - mean0) ** 2, axis=1)
score1 = np.sum((points1_r - mean1) ** 2, axis=1)
scale = np.sqrt(np.mean(score1) / np.mean(score0))
scales.append(scale)
best_scale = stats.mode(scales)[0][0]
print(':: Estimated scale:', best_scale)
return best_scale
def global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size,
distance_threshold=1.0, num_iters=4000000, num_val_iters=500):
print(':: Distance threshold %.3f' % distance_threshold)
result = o3d.registration.registration_ransac_based_on_feature_matching(
source_down, target_down, source_fpfh, target_fpfh, distance_threshold,
o3d.registration.TransformationEstimationPointToPoint(False), 4, [
o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
o3d.registration.CorrespondenceCheckerBasedOnDistance(
distance_threshold)
], o3d.registration.RANSACConvergenceCriteria(num_iters, num_val_iters))
return result
def fast_global_registration(source_down, target_down,
source_fpfh, target_fpfh, voxel_size):
distance_threshold = 1.0
result = o3d.registration.registration_fast_based_on_feature_matching(
source_down, target_down, source_fpfh, target_fpfh,
o3d.registration.FastGlobalRegistrationOption(
maximum_correspondence_distance=distance_threshold))
return result
def refine_registration(source, target, source_fpfh, target_fpfh, initial_result, voxel_size):
distance_threshold = 0.1
print(':: Distance threshold %.3f' % distance_threshold)
result = o3d.registration.registration_icp(
source, target, distance_threshold, initial_result.transformation,
o3d.registration.TransformationEstimationPointToPlane())
return result
def registration(pcd0, pcd1, feature1, feature2, voxel_size, method='global'):
if method == 'global':
print('\nRANSAC global registration on scaled point clouds...')
initial_result = global_registration(pcd0, pcd1, feature1, feature2, voxel_size)
elif method == 'fast_global':
print('\nFast global registration on scaled point clouds...')
initial_result = fast_global_registration(pcd0, pcd1, feature1, feature2, voxel_size)
else:
print(':: Registration method not supported')
return
print(':: Initial registration results:')
print(initial_result)
print('\nDisplaying initial result...')
draw_registration_result(pcd0, pcd1, initial_result.transformation)
print('\nRefine registration...')
result = refine_registration(pcd0, pcd1, feature1, feature2, initial_result, voxel_size)
print(':: Final registration results:')
print(result)
return result
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
o3d.visualization.draw_geometries([source_temp, target_temp])
def run():
voxel_size = 0.2
dso_scale = 0.03
pcd_lidar = o3d.io.read_point_cloud('../maps/scans/scan_050.pcd')
pcd_lidar = remove_ground_plane(pcd_lidar)
pcd_dso = o3d.io.read_point_cloud('../maps/dso_map_cleaned.pcd')
pcd_dso = remove_ground_plane(pcd_dso, z_thresh=4.5)
pcd_dso = remove_y_plane(pcd_dso, y_thresh=0.2)
# pcd_dso = utils.scale_point_cloud(pcd_dso, dso_scale).rotate([0.5, 0.5, 0.5]).translate([10, 20, 30])
# Ground plane removal results
# utils.display(pcds=[pcd_lidar, pcd_dso], colors=[[1, 0.706, 0], [0, 0.651, 0.929]])
# utils.display(pcds=[pcd_dso], colors=[[0, 0.651, 0.929]])
# return
print('\nComputing FPFH features for lidar point cloud...')
pcd_lidar_down, features_lidar = compute_features(pcd_lidar, voxel_size=voxel_size)
print('\nComputing FPFH features for DSO point cloud...')
pcd_dso_down, features_dso = compute_features(pcd_dso, voxel_size=voxel_size * (dso_scale if dso_scale < 1 else 1))
print('\nMatching FPFH features...')
pcd_lidar_idx, pcd_dso_idx = match_features(pcd_lidar_down, pcd_dso_down,
features_lidar, features_dso, thresh=None)
print('\nEstimating scale using matches...')
scale = estimate_scale(pcd_lidar_down, pcd_dso_down, pcd_lidar_idx, pcd_dso_idx)
scale = 0.06
print('\nCorrecting scale...')
pcd_dso_scaled = utils.scale_point_cloud(pcd_dso, 1.0 / scale)
utils.display(pcds=[pcd_lidar, pcd_dso_scaled], colors=[[1, 0.706, 0], [0, 0.651, 0.929]])
# return
# Registration
pcd_dso_scaled_down, features_dso_scaled = compute_features(
pcd_dso_scaled, voxel_size=voxel_size)
result = registration(pcd_lidar_down, pcd_dso_scaled_down, features_lidar,
features_dso_scaled, voxel_size, method='global')
print('\nDisplaying result...')
draw_registration_result(pcd_lidar, pcd_dso_scaled, result.transformation)
if __name__ == '__main__':
run()
| [
"open3d.registration.TransformationEstimationPointToPlane",
"numpy.array",
"copy.deepcopy",
"numpy.linalg.norm",
"numpy.mean",
"numpy.where",
"numpy.asarray",
"numpy.max",
"open3d.registration.RANSACConvergenceCriteria",
"numpy.min",
"open3d.geometry.KDTreeFlann",
"open3d.registration.CorrespondenceCheckerBasedOnEdgeLength",
"utils_o3d.downsample_point_cloud",
"numpy.random.choice",
"open3d.geometry.KDTreeSearchParamHybrid",
"open3d.visualization.draw_geometries",
"open3d.io.read_point_cloud",
"open3d.registration.TransformationEstimationPointToPoint",
"utils_o3d.display",
"open3d.utility.Vector3dVector",
"numpy.median",
"open3d.registration.FastGlobalRegistrationOption",
"scipy.stats.mode",
"utils_o3d.scale_point_cloud",
"numpy.sum",
"open3d.geometry.PointCloud",
"utils_o3d.paint_uniform_color",
"open3d.registration.CorrespondenceCheckerBasedOnDistance"
]
| [((187, 205), 'copy.deepcopy', 'copy.deepcopy', (['pcd'], {}), '(pcd)\n', (200, 205), False, 'import copy\n'), ((227, 251), 'numpy.array', 'np.array', (['cropped.points'], {}), '(cropped.points)\n', (235, 251), True, 'import numpy as np\n'), ((338, 363), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (361, 363), True, 'import open3d as o3d\n'), ((387, 429), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['cropped_points'], {}), '(cropped_points)\n', (413, 429), True, 'import open3d as o3d\n'), ((504, 522), 'copy.deepcopy', 'copy.deepcopy', (['pcd'], {}), '(pcd)\n', (517, 522), False, 'import copy\n'), ((544, 568), 'numpy.array', 'np.array', (['cropped.points'], {}), '(cropped.points)\n', (552, 568), True, 'import numpy as np\n'), ((705, 730), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (728, 730), True, 'import open3d as o3d\n'), ((754, 796), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['cropped_points'], {}), '(cropped_points)\n', (780, 796), True, 'import open3d as o3d\n'), ((2258, 2310), 'utils_o3d.paint_uniform_color', 'utils.paint_uniform_color', (['pcd0'], {'color': '[1, 0.706, 0]'}), '(pcd0, color=[1, 0.706, 0])\n', (2283, 2310), True, 'import utils_o3d as utils\n'), ((2315, 2371), 'utils_o3d.paint_uniform_color', 'utils.paint_uniform_color', (['pcd1'], {'color': '[0, 0.651, 0.929]'}), '(pcd1, color=[0, 0.651, 0.929])\n', (2340, 2371), True, 'import utils_o3d as utils\n'), ((2418, 2452), 'open3d.geometry.KDTreeFlann', 'o3d.geometry.KDTreeFlann', (['feature1'], {}), '(feature1)\n', (2442, 2452), True, 'import open3d as o3d\n'), ((2786, 2803), 'numpy.median', 'np.median', (['scores'], {}), '(scores)\n', (2795, 2803), True, 'import numpy as np\n'), ((3542, 3566), 'numpy.mean', 'np.mean', (['points0'], {'axis': '(0)'}), '(points0, axis=0)\n', (3549, 3566), True, 'import numpy as np\n'), ((3579, 3603), 'numpy.mean', 'np.mean', (['points1'], {'axis': '(0)'}), '(points1, axis=0)\n', (3586, 3603), True, 'import numpy as np\n'), ((6823, 6844), 'copy.deepcopy', 'copy.deepcopy', (['source'], {}), '(source)\n', (6836, 6844), False, 'import copy\n'), ((6863, 6884), 'copy.deepcopy', 'copy.deepcopy', (['target'], {}), '(target)\n', (6876, 6884), False, 'import copy\n'), ((7037, 7098), 'open3d.visualization.draw_geometries', 'o3d.visualization.draw_geometries', (['[source_temp, target_temp]'], {}), '([source_temp, target_temp])\n', (7070, 7098), True, 'import open3d as o3d\n'), ((7171, 7224), 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['"""../maps/scans/scan_050.pcd"""'], {}), "('../maps/scans/scan_050.pcd')\n", (7194, 7224), True, 'import open3d as o3d\n'), ((7287, 7341), 'open3d.io.read_point_cloud', 'o3d.io.read_point_cloud', (['"""../maps/dso_map_cleaned.pcd"""'], {}), "('../maps/dso_map_cleaned.pcd')\n", (7310, 7341), True, 'import open3d as o3d\n'), ((8478, 8523), 'utils_o3d.scale_point_cloud', 'utils.scale_point_cloud', (['pcd_dso', '(1.0 / scale)'], {}), '(pcd_dso, 1.0 / scale)\n', (8501, 8523), True, 'import utils_o3d as utils\n'), ((8528, 8623), 'utils_o3d.display', 'utils.display', ([], {'pcds': '[pcd_lidar, pcd_dso_scaled]', 'colors': '[[1, 0.706, 0], [0, 0.651, 0.929]]'}), '(pcds=[pcd_lidar, pcd_dso_scaled], colors=[[1, 0.706, 0], [0, \n 0.651, 0.929]])\n', (8541, 8623), True, 'import utils_o3d as utils\n'), ((1134, 1179), 'utils_o3d.downsample_point_cloud', 'utils.downsample_point_cloud', (['pcd', 'voxel_size'], {}), '(pcd, voxel_size)\n', (1162, 1179), True, 'import utils_o3d as utils\n'), ((1337, 1355), 'copy.deepcopy', 'copy.deepcopy', (['pcd'], {}), '(pcd)\n', (1350, 1355), False, 'import copy\n'), ((1494, 1572), 'open3d.geometry.KDTreeSearchParamHybrid', 'o3d.geometry.KDTreeSearchParamHybrid', ([], {'radius': 'normals_radius', 'max_nn': 'normals_nn'}), '(radius=normals_radius, max_nn=normals_nn)\n', (1530, 1572), True, 'import open3d as o3d\n'), ((1754, 1839), 'open3d.geometry.KDTreeSearchParamHybrid', 'o3d.geometry.KDTreeSearchParamHybrid', ([], {'radius': 'features_radius', 'max_nn': 'features_nn'}), '(radius=features_radius, max_nn=features_nn\n )\n', (1790, 1839), True, 'import open3d as o3d\n'), ((1966, 1985), 'copy.deepcopy', 'copy.deepcopy', (['pcd0'], {}), '(pcd0)\n', (1979, 1985), False, 'import copy\n'), ((1987, 2006), 'copy.deepcopy', 'copy.deepcopy', (['pcd1'], {}), '(pcd1)\n', (2000, 2006), False, 'import copy\n'), ((2737, 2753), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (2745, 2753), True, 'import numpy as np\n'), ((2755, 2772), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (2763, 2772), True, 'import numpy as np\n'), ((2861, 2887), 'numpy.where', 'np.where', (['(scores <= thresh)'], {}), '(scores <= thresh)\n', (2869, 2887), True, 'import numpy as np\n'), ((3265, 3292), 'utils_o3d.display', 'utils.display', (['[pcd0, pcd1]'], {}), '([pcd0, pcd1])\n', (3278, 3292), True, 'import utils_o3d as utils\n'), ((3448, 3471), 'numpy.asarray', 'np.asarray', (['pcd0.points'], {}), '(pcd0.points)\n', (3458, 3471), True, 'import numpy as np\n'), ((3496, 3519), 'numpy.asarray', 'np.asarray', (['pcd1.points'], {}), '(pcd1.points)\n', (3506, 3519), True, 'import numpy as np\n'), ((3823, 3878), 'numpy.random.choice', 'np.random.choice', (['top_count', 'sample_size'], {'replace': '(False)'}), '(top_count, sample_size, replace=False)\n', (3839, 3878), True, 'import numpy as np\n'), ((3965, 4005), 'numpy.sum', 'np.sum', (['((points0_r - mean0) ** 2)'], {'axis': '(1)'}), '((points0_r - mean0) ** 2, axis=1)\n', (3971, 4005), True, 'import numpy as np\n'), ((4023, 4063), 'numpy.sum', 'np.sum', (['((points1_r - mean1) ** 2)'], {'axis': '(1)'}), '((points1_r - mean1) ** 2, axis=1)\n', (4029, 4063), True, 'import numpy as np\n'), ((4647, 4707), 'open3d.registration.TransformationEstimationPointToPoint', 'o3d.registration.TransformationEstimationPointToPoint', (['(False)'], {}), '(False)\n', (4700, 4707), True, 'import open3d as o3d\n'), ((4902, 4970), 'open3d.registration.RANSACConvergenceCriteria', 'o3d.registration.RANSACConvergenceCriteria', (['num_iters', 'num_val_iters'], {}), '(num_iters, num_val_iters)\n', (4944, 4970), True, 'import open3d as o3d\n'), ((5263, 5365), 'open3d.registration.FastGlobalRegistrationOption', 'o3d.registration.FastGlobalRegistrationOption', ([], {'maximum_correspondence_distance': 'distance_threshold'}), '(maximum_correspondence_distance\n =distance_threshold)\n', (5308, 5365), True, 'import open3d as o3d\n'), ((5713, 5768), 'open3d.registration.TransformationEstimationPointToPlane', 'o3d.registration.TransformationEstimationPointToPlane', ([], {}), '()\n', (5766, 5768), True, 'import open3d as o3d\n'), ((2037, 2058), 'numpy.array', 'np.array', (['pcd0.points'], {}), '(pcd0.points)\n', (2045, 2058), True, 'import numpy as np\n'), ((2096, 2117), 'numpy.array', 'np.array', (['pcd1.points'], {}), '(pcd1.points)\n', (2104, 2117), True, 'import numpy as np\n'), ((2158, 2181), 'numpy.array', 'np.array', (['feature0.data'], {}), '(feature0.data)\n', (2166, 2181), True, 'import numpy as np\n'), ((2222, 2245), 'numpy.array', 'np.array', (['feature1.data'], {}), '(feature1.data)\n', (2230, 2245), True, 'import numpy as np\n'), ((2624, 2676), 'numpy.linalg.norm', 'np.linalg.norm', (['(pcd0.points[i] - pcd1.points[idx[0]])'], {}), '(pcd0.points[i] - pcd1.points[idx[0]])\n', (2638, 2676), True, 'import numpy as np\n'), ((4170, 4188), 'scipy.stats.mode', 'stats.mode', (['scales'], {}), '(scales)\n', (4180, 4188), False, 'from scipy import stats\n'), ((4726, 4786), 'open3d.registration.CorrespondenceCheckerBasedOnEdgeLength', 'o3d.registration.CorrespondenceCheckerBasedOnEdgeLength', (['(0.9)'], {}), '(0.9)\n', (4781, 4786), True, 'import open3d as o3d\n'), ((4800, 4873), 'open3d.registration.CorrespondenceCheckerBasedOnDistance', 'o3d.registration.CorrespondenceCheckerBasedOnDistance', (['distance_threshold'], {}), '(distance_threshold)\n', (4853, 4873), True, 'import open3d as o3d\n'), ((1087, 1107), 'numpy.array', 'np.array', (['pcd.points'], {}), '(pcd.points)\n', (1095, 1107), True, 'import numpy as np\n'), ((1283, 1308), 'numpy.array', 'np.array', (['pcd_down.points'], {}), '(pcd_down.points)\n', (1291, 1308), True, 'import numpy as np\n'), ((3058, 3072), 'numpy.min', 'np.min', (['scores'], {}), '(scores)\n', (3064, 3072), True, 'import numpy as np\n'), ((3074, 3088), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (3080, 3088), True, 'import numpy as np\n'), ((4088, 4103), 'numpy.mean', 'np.mean', (['score1'], {}), '(score1)\n', (4095, 4103), True, 'import numpy as np\n'), ((4106, 4121), 'numpy.mean', 'np.mean', (['score0'], {}), '(score0)\n', (4113, 4121), True, 'import numpy as np\n')] |
import os
import imp
from setuptools import setup, find_packages
__version__ = imp.load_source(
"hsfs.version", os.path.join("hsfs", "version.py")
).__version__
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="hsfs",
version=__version__,
install_requires=[
"pyhumps==1.6.1",
"requests",
"furl",
"boto3",
"pandas",
"numpy",
"pyjks",
"mock",
"avro==1.10.2",
"sqlalchemy",
"PyMySQL",
],
extras_require={
"dev": [
"pytest",
"flake8",
"black"],
"docs": [
"mkdocs==1.1.2",
"mkdocs-material==6.2.2",
"mike==0.5.5",
"sphinx==3.5.4",
"keras_autodoc @ git+https://[email protected]/moritzmeister/keras-autodoc@split-tags-properties",
"markdown-include"],
"hive": ["pyhopshive[thrift]"]
},
author="Logical Clocks AB",
author_email="<EMAIL>",
description="HSFS: An environment independent client to interact with the Hopsworks Featurestore",
license="Apache License 2.0",
keywords="Hopsworks, Feature Store, Spark, Machine Learning, MLOps, DataOps",
url="https://github.com/logicalclocks/feature-store-api",
download_url="https://github.com/logicalclocks/feature-store-api/releases/tag/"
+ __version__,
packages=find_packages(),
long_description=read("../README.md"),
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
],
)
| [
"os.path.dirname",
"setuptools.find_packages",
"os.path.join"
]
| [((118, 152), 'os.path.join', 'os.path.join', (['"""hsfs"""', '"""version.py"""'], {}), "('hsfs', 'version.py')\n", (130, 152), False, 'import os\n'), ((1439, 1454), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1452, 1454), False, 'from setuptools import setup, find_packages\n'), ((215, 240), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (230, 240), False, 'import os\n')] |
# Code Taken from https://github.com/LYH-YF/MWPToolkit
# -*- encoding: utf-8 -*-
# @Author: <NAME>
# @Time: 2021/08/21 04:59:55
# @File: sausolver.py
import random
import torch
from torch import nn
import copy
from module.Encoder.rnn_encoder import BasicRNNEncoder
from module.Embedder.basic_embedder import BasicEmbedder
from module.Decoder.tree_decoder import SARTreeDecoder
from module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding
from module.Layer.tree_layers import Prediction, GenerateNode, Merge, SemanticAlignmentModule
from module.Strategy.beam_search import TreeBeam
from loss.masked_cross_entropy_loss import MaskedCrossEntropyLoss, masked_cross_entropy
from loss.mse_loss import MSELoss
from utils.utils import copy_list
from utils.enum_type import NumMask, SpecialTokens
class SAUSolver(nn.Module):
"""
Reference:
Qin et al. "Semantically-Aligned Universal Tree-Structured Solver for Math Word Problems" in EMNLP 2020.
"""
def __init__(self, config, dataset):
super(SAUSolver, self).__init__()
# parameter
self.hidden_size = config["hidden_size"]
self.device = config["device"]
self.USE_CUDA = True if self.device == torch.device('cuda') else False
self.beam_size = config['beam_size']
self.max_out_len = config['max_output_len']
self.embedding_size = config["embedding_size"]
self.dropout_ratio = config["dropout_ratio"]
self.num_layers = config["num_layers"]
self.rnn_cell_type = config["rnn_cell_type"]
self.loss_weight = config['loss_weight']
self.vocab_size = len(dataset.in_idx2word)
self.out_symbol2idx = dataset.out_symbol2idx
self.out_idx2symbol = dataset.out_idx2symbol
generate_list = dataset.generate_list
self.generate_nums = [self.out_symbol2idx[symbol] for symbol in generate_list]
self.mask_list = NumMask.number
self.num_start = dataset.num_start
self.operator_nums = dataset.operator_nums
self.generate_size = len(generate_list)
self.unk_token = self.out_symbol2idx[SpecialTokens.UNK_TOKEN]
try:
self.out_sos_token = self.out_symbol2idx[SpecialTokens.SOS_TOKEN]
except:
self.out_sos_token = None
try:
self.out_eos_token = self.out_symbol2idx[SpecialTokens.EOS_TOKEN]
except:
self.out_eos_token = None
try:
self.out_pad_token = self.out_symbol2idx[SpecialTokens.PAD_TOKEN]
except:
self.out_pad_token = None
# module
self.embedder = BasicEmbedder(self.vocab_size, self.embedding_size, self.dropout_ratio)
# self.t_encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio)
self.encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type,
self.dropout_ratio, batch_first=False)
#self.decoder = SARTreeDecoder(self.hidden_size, self.operator_nums, self.generate_size, self.dropout_ratio)
self.decoder = Prediction(self.hidden_size,self.operator_nums,self.generate_size,self.dropout_ratio)
self.node_generater = GenerateNode(self.hidden_size, self.operator_nums, self.embedding_size,
self.dropout_ratio)
self.merge = Merge(self.hidden_size, self.embedding_size, self.dropout_ratio)
self.sa = SemanticAlignmentModule(self.hidden_size,self.hidden_size,self.hidden_size)
self.loss1 = MaskedCrossEntropyLoss()
#
def calculate_loss(self, batch_data:dict) -> float:
"""Finish forward-propagating, calculating loss and back-propagation.
:param batch_data: one batch data.
:return: loss value.
batch_data should include keywords 'question', 'ques len', 'equation', 'equ len',
'num stack', 'num size', 'num pos'
"""
seq = torch.tensor(batch_data["question"]).to(self.device)
seq_length = torch.tensor(batch_data["ques len"]).long()
target = torch.tensor(batch_data["equation"]).to(self.device)
target_length = torch.LongTensor(batch_data["equ len"]).to(self.device)
nums_stack = copy.deepcopy(batch_data["num stack"])
num_size = batch_data["num size"]
num_pos = batch_data["num pos"]
generate_nums = self.generate_nums
num_start = self.num_start
# sequence mask for attention
unk = self.unk_token
loss = self.train_tree(seq, seq_length, target, target_length, nums_stack, num_size, generate_nums, num_pos, unk, num_start)
return loss
def model_test(self, batch_data:dict) -> tuple:
"""Model test.
:param batch_data: one batch data.
:return: predicted equation, target equation.
batch_data should include keywords 'question', 'ques len', 'equation',
'num stack', 'num pos', 'num list'
"""
seq = torch.tensor(batch_data["question"]).to(self.device)
seq_length = torch.tensor(batch_data["ques len"]).long()
target = torch.tensor(batch_data["equation"]).to(self.device)
nums_stack = copy.deepcopy(batch_data["num stack"])
num_pos = batch_data["num pos"]
num_list = batch_data['num list']
generate_nums = self.generate_nums
num_start = self.num_start
# sequence mask for attention
all_node_output = self.evaluate_tree(seq, seq_length, generate_nums, num_pos, num_start, self.beam_size,
self.max_out_len)
all_output = self.convert_idx2symbol(all_node_output, num_list[0], copy_list(nums_stack[0]))
targets = self.convert_idx2symbol(target[0], num_list[0], copy_list(nums_stack[0]))
return all_output, targets
def train_tree(self,input_batch, input_length, target_batch, target_length, nums_stack_batch, num_size_batch, generate_nums, num_pos, unk, num_start,
english=False,var_nums=[], batch_first=False):
# sequence mask for attention
seq_mask = []
max_len = max(input_length)
for i in input_length:
seq_mask.append([0 for _ in range(i)] + [1 for _ in range(i, max_len)])
seq_mask = torch.ByteTensor(seq_mask)
num_mask = []
max_num_size = max(num_size_batch) + len(generate_nums) + len(var_nums) # 最大的位置列表数目+常识数字数目+未知数列表
for i in num_size_batch:
d = i + len(generate_nums) + len(var_nums)
num_mask.append([0] * d + [1] * (max_num_size - d))
num_mask = torch.ByteTensor(num_mask) # 用于屏蔽无关数字,防止生成错误的Nx
#unk = output_lang.word2index["UNK"]
# Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size)
input_var = input_batch.transpose(0, 1)
target = target_batch.transpose(0, 1)
padding_hidden = torch.FloatTensor([0.0 for _ in range(self.decoder.hidden_size)]).unsqueeze(0)
batch_size = len(input_length)
if self.USE_CUDA:
input_var = input_var.cuda()
seq_mask = seq_mask.cuda()
padding_hidden = padding_hidden.cuda()
num_mask = num_mask.cuda()
# Zero gradients of both optimizers
# Run words through encoder
#encoder_outputs, problem_output = self.encoder(input_var, input_length)
seq_emb = self.embedder(input_var)
pade_outputs, _ = self.encoder(seq_emb, input_length)
problem_output = pade_outputs[-1, :, :self.hidden_size] + pade_outputs[0, :, self.hidden_size:]
encoder_outputs = pade_outputs[:, :, :self.hidden_size] + pade_outputs[:, :, self.hidden_size:]
# Prepare input and output variables
node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)] # root embedding B x 1
max_target_length = max(target_length)
all_node_outputs = []
all_sa_outputs = []
# all_leafs = []
copy_num_len = [len(_) for _ in num_pos]
num_size = max(copy_num_len)
# 提取与问题相关的数字embedding
all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size,
self.encoder.hidden_size)
embeddings_stacks = [[] for _ in range(batch_size)] # B x 1 当前的tree state/ subtree embedding / output
left_childs = [None for _ in range(batch_size)] # B x 1
for t in range(max_target_length):
num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder(
node_stacks, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask, num_mask)
# all_leafs.append(p_leaf)
outputs = torch.cat((op, num_score), 1)
all_node_outputs.append(outputs)
target_t, generate_input = self.generate_tree_input(target[t].tolist(), outputs, nums_stack_batch, num_start,
unk)
target[t] = target_t
if self.USE_CUDA:
generate_input = generate_input.cuda()
left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input, current_context)
left_childs = []
for idx, l, r, node_stack, i, o in zip(range(batch_size), left_child.split(1), right_child.split(1),
node_stacks, target[t].tolist(), embeddings_stacks):
if len(node_stack) != 0:
node = node_stack.pop()
else:
left_childs.append(None)
continue
# 未知数当数字处理,SEP当操作符处理
if i < num_start: # 非数字
node_stack.append(TreeNode(r))
node_stack.append(TreeNode(l, left_flag=True))
o.append(TreeEmbedding(node_label[idx].unsqueeze(0), terminal=False))
# print(o[-1].embedding.size())
# print(encoder_outputs[idx].size())
else: # 数字
current_num = current_nums_embeddings[idx, i - num_start].unsqueeze(0)
while len(o) > 0 and o[-1].terminal:
sub_stree = o.pop()
op = o.pop()
current_num = self.merge(op.embedding, sub_stree.embedding, current_num) # Subtree embedding
if batch_first:
encoder_mapping, decoder_mapping = self.sa(current_num, encoder_outputs[idx])
else:
temp_encoder_outputs = encoder_outputs.transpose(0, 1)
encoder_mapping, decoder_mapping = self.sa(current_num,temp_encoder_outputs[idx])
all_sa_outputs.append((encoder_mapping, decoder_mapping))
o.append(TreeEmbedding(current_num, terminal=True))
if len(o) > 0 and o[-1].terminal:
left_childs.append(o[-1].embedding)
else:
left_childs.append(None)
# all_leafs = torch.stack(all_leafs, dim=1) # B x S x 2
all_node_outputs = torch.stack(all_node_outputs, dim=1) # B x S x N
target = target.transpose(0, 1).contiguous() # B x S
if self.USE_CUDA:
# all_leafs = all_leafs.cuda()
all_node_outputs = all_node_outputs.cuda()
target = target.cuda()
new_all_sa_outputs = []
for sa_pair in all_sa_outputs:
new_all_sa_outputs.append((sa_pair[0].cuda(), sa_pair[1].cuda()))
all_sa_outputs = new_all_sa_outputs
# target_length = torch.LongTensor(target_length).cuda()
else:
pass
# target_length = torch.LongTensor(target_length)
semantic_alignment_loss = nn.MSELoss()
total_semanti_alognment_loss = 0
sa_len = len(all_sa_outputs)
for sa_pair in all_sa_outputs:
total_semanti_alognment_loss += semantic_alignment_loss(sa_pair[0], sa_pair[1])
# print(total_semanti_alognment_loss)
total_semanti_alognment_loss = total_semanti_alognment_loss / sa_len
# print(total_semanti_alognment_loss)
# op_target = target < num_start
# loss_0 = masked_cross_entropy_without_logit(all_leafs, op_target.long(), target_length)
loss = masked_cross_entropy(all_node_outputs, target,target_length) + 0.01 * total_semanti_alognment_loss
# loss = loss_0 + loss_1
loss.backward()
# clip the grad
# torch.nn.utils.clip_grad_norm_(encoder.parameters(), 5)
# torch.nn.utils.clip_grad_norm_(predict.parameters(), 5)
# torch.nn.utils.clip_grad_norm_(generate.parameters(), 5)
# Update parameters with optimizers
return loss.item() # , loss_0.item(), loss_1.item()
def evaluate_tree(self, input_batch, input_length, generate_nums, num_pos, num_start, beam_size=5, max_length=30):
seq_mask = torch.BoolTensor(1, input_length).fill_(0)
# Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size)
input_var = input_batch.transpose(0, 1)
num_mask = torch.BoolTensor(1, len(num_pos[0]) + len(generate_nums)).fill_(0)
padding_hidden = torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0)
batch_size = 1
if self.USE_CUDA:
input_var = input_var.cuda()
seq_mask = seq_mask.cuda()
padding_hidden = padding_hidden.cuda()
num_mask = num_mask.cuda()
# Run words through encoder
seq_emb = self.embedder(input_var)
pade_outputs, _ = self.encoder(seq_emb, input_length)
problem_output = pade_outputs[-1, :, :self.hidden_size] + pade_outputs[0, :, self.hidden_size:]
encoder_outputs = pade_outputs[:, :, :self.hidden_size] + pade_outputs[:, :, self.hidden_size:]
# Prepare input and output variables
node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)]
num_size = len(num_pos[0])
all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size,
self.hidden_size)
# B x P x N
embeddings_stacks = [[] for _ in range(batch_size)]
left_childs = [None for _ in range(batch_size)]
beams = [TreeBeam(0.0, node_stacks, embeddings_stacks, left_childs, [])]
for t in range(max_length):
current_beams = []
while len(beams) > 0:
b = beams.pop()
if len(b.node_stack[0]) == 0:
current_beams.append(b)
continue
# left_childs = torch.stack(b.left_childs)
left_childs = b.left_childs
num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder(b.node_stack,
left_childs,
encoder_outputs,
all_nums_encoder_outputs,
padding_hidden,
seq_mask,
num_mask)
out_score = nn.functional.log_softmax(torch.cat((op, num_score), dim=1), dim=1)
# out_score = p_leaf * out_score
topv, topi = out_score.topk(beam_size)
for tv, ti in zip(topv.split(1, dim=1), topi.split(1, dim=1)):
current_node_stack = copy_list(b.node_stack)
current_left_childs = []
current_embeddings_stacks = copy_list(b.embedding_stack)
current_out = copy.deepcopy(b.out)
out_token = int(ti)
current_out.append(out_token)
node = current_node_stack[0].pop()
if out_token < num_start:
generate_input = torch.LongTensor([out_token])
if self.USE_CUDA:
generate_input = generate_input.cuda()
left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input,
current_context)
current_node_stack[0].append(TreeNode(right_child))
current_node_stack[0].append(TreeNode(left_child, left_flag=True))
current_embeddings_stacks[0].append(TreeEmbedding(node_label[0].unsqueeze(0), False))
else:
current_num = current_nums_embeddings[0, out_token - num_start].unsqueeze(0)
while len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal:
sub_stree = current_embeddings_stacks[0].pop()
op = current_embeddings_stacks[0].pop()
current_num = self.merge(op.embedding, sub_stree.embedding, current_num)
current_embeddings_stacks[0].append(TreeEmbedding(current_num, True))
if len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal:
current_left_childs.append(current_embeddings_stacks[0][-1].embedding)
else:
current_left_childs.append(None)
current_beams.append(TreeBeam(b.score + float(tv), current_node_stack, current_embeddings_stacks,
current_left_childs, current_out))
beams = sorted(current_beams, key=lambda x: x.score, reverse=True)
beams = beams[:beam_size]
flag = True
for b in beams:
if len(b.node_stack[0]) != 0:
flag = False
if flag:
break
return beams[0].out
def get_all_number_encoder_outputs(self, encoder_outputs, num_pos, batch_size, num_size, hidden_size):
indices = list()
sen_len = encoder_outputs.size(0)
masked_index = []
temp_1 = [1 for _ in range(hidden_size)]
temp_0 = [0 for _ in range(hidden_size)]
for b in range(batch_size):
for i in num_pos[b]:
indices.append(i + b * sen_len)
masked_index.append(temp_0)
indices += [0 for _ in range(len(num_pos[b]), num_size)]
masked_index += [temp_1 for _ in range(len(num_pos[b]), num_size)]
indices = torch.LongTensor(indices)
masked_index = torch.BoolTensor(masked_index)
masked_index = masked_index.view(batch_size, num_size, hidden_size)
if self.USE_CUDA:
indices = indices.cuda()
masked_index = masked_index.cuda()
all_outputs = encoder_outputs.transpose(0, 1).contiguous()
all_embedding = all_outputs.view(-1, encoder_outputs.size(2)) # S x B x H -> (B x S) x H
all_num = all_embedding.index_select(0, indices)
all_num = all_num.view(batch_size, num_size, hidden_size)
return all_num.masked_fill_(masked_index, 0.0)
def generate_tree_input(self, target, decoder_output, nums_stack_batch, num_start, unk):
# when the decoder input is copied num but the num has two pos, chose the max
target_input = copy.deepcopy(target)
for i in range(len(target)):
if target[i] == unk:
num_stack = nums_stack_batch[i].pop()
max_score = -float("1e12")
for num in num_stack:
if decoder_output[i, num_start + num] > max_score:
target[i] = num + num_start
max_score = decoder_output[i, num_start + num]
if target_input[i] >= num_start:
target_input[i] = 0
return torch.LongTensor(target), torch.LongTensor(target_input)
def mse_loss(self, outputs, targets, mask=None):
# outputs : [batch_size,output_len,hidden_size]
# targets : [batch_size,output_len,hidden_size]
# mask : [batch_size,output_len]
mask = mask.to(self.device)
x = torch.sqrt(torch.sum(torch.square((outputs - targets)), dim=-1)) # [batch_size,output_len]
y = torch.sum(x * mask, dim=-1) / torch.sum(mask, dim=-1) # [batch_size]
return torch.sum(y)
def convert_idx2symbol(self, output, num_list, num_stack):
# batch_size=output.size(0)
'''batch_size=1'''
seq_len = len(output)
num_len = len(num_list)
output_list = []
res = []
for s_i in range(seq_len):
idx = output[s_i]
if idx in [self.out_sos_token, self.out_eos_token, self.out_pad_token]:
break
symbol = self.out_idx2symbol[idx]
if "NUM" in symbol:
num_idx = self.mask_list.index(symbol)
if num_idx >= num_len:
res = []
break
res.append(num_list[num_idx])
elif symbol == SpecialTokens.UNK_TOKEN:
try:
pos_list = num_stack.pop()
c = num_list[pos_list[0]]
res.append(c)
except:
return None
else:
res.append(symbol)
output_list.append(res)
return output_list
| [
"module.Layer.tree_layers.TreeEmbedding",
"torch.LongTensor",
"torch.square",
"torch.nn.MSELoss",
"torch.sum",
"loss.masked_cross_entropy_loss.MaskedCrossEntropyLoss",
"copy.deepcopy",
"torch.ByteTensor",
"utils.utils.copy_list",
"module.Encoder.rnn_encoder.BasicRNNEncoder",
"module.Strategy.beam_search.TreeBeam",
"module.Layer.tree_layers.SemanticAlignmentModule",
"module.Embedder.basic_embedder.BasicEmbedder",
"torch.BoolTensor",
"module.Layer.tree_layers.Merge",
"module.Layer.tree_layers.GenerateNode",
"torch.cat",
"torch.device",
"torch.stack",
"module.Layer.tree_layers.TreeNode",
"torch.tensor",
"loss.masked_cross_entropy_loss.masked_cross_entropy",
"module.Layer.tree_layers.Prediction"
]
| [((2639, 2710), 'module.Embedder.basic_embedder.BasicEmbedder', 'BasicEmbedder', (['self.vocab_size', 'self.embedding_size', 'self.dropout_ratio'], {}), '(self.vocab_size, self.embedding_size, self.dropout_ratio)\n', (2652, 2710), False, 'from module.Embedder.basic_embedder import BasicEmbedder\n'), ((2873, 3007), 'module.Encoder.rnn_encoder.BasicRNNEncoder', 'BasicRNNEncoder', (['self.embedding_size', 'self.hidden_size', 'self.num_layers', 'self.rnn_cell_type', 'self.dropout_ratio'], {'batch_first': '(False)'}), '(self.embedding_size, self.hidden_size, self.num_layers,\n self.rnn_cell_type, self.dropout_ratio, batch_first=False)\n', (2888, 3007), False, 'from module.Encoder.rnn_encoder import BasicRNNEncoder\n'), ((3183, 3276), 'module.Layer.tree_layers.Prediction', 'Prediction', (['self.hidden_size', 'self.operator_nums', 'self.generate_size', 'self.dropout_ratio'], {}), '(self.hidden_size, self.operator_nums, self.generate_size, self.\n dropout_ratio)\n', (3193, 3276), False, 'from module.Layer.tree_layers import Prediction, GenerateNode, Merge, SemanticAlignmentModule\n'), ((3299, 3394), 'module.Layer.tree_layers.GenerateNode', 'GenerateNode', (['self.hidden_size', 'self.operator_nums', 'self.embedding_size', 'self.dropout_ratio'], {}), '(self.hidden_size, self.operator_nums, self.embedding_size,\n self.dropout_ratio)\n', (3311, 3394), False, 'from module.Layer.tree_layers import Prediction, GenerateNode, Merge, SemanticAlignmentModule\n'), ((3455, 3519), 'module.Layer.tree_layers.Merge', 'Merge', (['self.hidden_size', 'self.embedding_size', 'self.dropout_ratio'], {}), '(self.hidden_size, self.embedding_size, self.dropout_ratio)\n', (3460, 3519), False, 'from module.Layer.tree_layers import Prediction, GenerateNode, Merge, SemanticAlignmentModule\n'), ((3538, 3615), 'module.Layer.tree_layers.SemanticAlignmentModule', 'SemanticAlignmentModule', (['self.hidden_size', 'self.hidden_size', 'self.hidden_size'], {}), '(self.hidden_size, self.hidden_size, self.hidden_size)\n', (3561, 3615), False, 'from module.Layer.tree_layers import Prediction, GenerateNode, Merge, SemanticAlignmentModule\n'), ((3635, 3659), 'loss.masked_cross_entropy_loss.MaskedCrossEntropyLoss', 'MaskedCrossEntropyLoss', ([], {}), '()\n', (3657, 3659), False, 'from loss.masked_cross_entropy_loss import MaskedCrossEntropyLoss, masked_cross_entropy\n'), ((4327, 4365), 'copy.deepcopy', 'copy.deepcopy', (["batch_data['num stack']"], {}), "(batch_data['num stack'])\n", (4340, 4365), False, 'import copy\n'), ((5278, 5316), 'copy.deepcopy', 'copy.deepcopy', (["batch_data['num stack']"], {}), "(batch_data['num stack'])\n", (5291, 5316), False, 'import copy\n'), ((6404, 6430), 'torch.ByteTensor', 'torch.ByteTensor', (['seq_mask'], {}), '(seq_mask)\n', (6420, 6430), False, 'import torch\n'), ((6731, 6757), 'torch.ByteTensor', 'torch.ByteTensor', (['num_mask'], {}), '(num_mask)\n', (6747, 6757), False, 'import torch\n'), ((11450, 11486), 'torch.stack', 'torch.stack', (['all_node_outputs'], {'dim': '(1)'}), '(all_node_outputs, dim=1)\n', (11461, 11486), False, 'import torch\n'), ((12129, 12141), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (12139, 12141), False, 'from torch import nn\n'), ((19478, 19503), 'torch.LongTensor', 'torch.LongTensor', (['indices'], {}), '(indices)\n', (19494, 19503), False, 'import torch\n'), ((19527, 19557), 'torch.BoolTensor', 'torch.BoolTensor', (['masked_index'], {}), '(masked_index)\n', (19543, 19557), False, 'import torch\n'), ((20290, 20311), 'copy.deepcopy', 'copy.deepcopy', (['target'], {}), '(target)\n', (20303, 20311), False, 'import copy\n'), ((21317, 21329), 'torch.sum', 'torch.sum', (['y'], {}), '(y)\n', (21326, 21329), False, 'import torch\n'), ((5767, 5791), 'utils.utils.copy_list', 'copy_list', (['nums_stack[0]'], {}), '(nums_stack[0])\n', (5776, 5791), False, 'from utils.utils import copy_list\n'), ((5859, 5883), 'utils.utils.copy_list', 'copy_list', (['nums_stack[0]'], {}), '(nums_stack[0])\n', (5868, 5883), False, 'from utils.utils import copy_list\n'), ((8958, 8987), 'torch.cat', 'torch.cat', (['(op, num_score)', '(1)'], {}), '((op, num_score), 1)\n', (8967, 8987), False, 'import torch\n'), ((12675, 12736), 'loss.masked_cross_entropy_loss.masked_cross_entropy', 'masked_cross_entropy', (['all_node_outputs', 'target', 'target_length'], {}), '(all_node_outputs, target, target_length)\n', (12695, 12736), False, 'from loss.masked_cross_entropy_loss import MaskedCrossEntropyLoss, masked_cross_entropy\n'), ((14771, 14833), 'module.Strategy.beam_search.TreeBeam', 'TreeBeam', (['(0.0)', 'node_stacks', 'embeddings_stacks', 'left_childs', '[]'], {}), '(0.0, node_stacks, embeddings_stacks, left_childs, [])\n', (14779, 14833), False, 'from module.Strategy.beam_search import TreeBeam\n'), ((20807, 20831), 'torch.LongTensor', 'torch.LongTensor', (['target'], {}), '(target)\n', (20823, 20831), False, 'import torch\n'), ((20833, 20863), 'torch.LongTensor', 'torch.LongTensor', (['target_input'], {}), '(target_input)\n', (20849, 20863), False, 'import torch\n'), ((21232, 21259), 'torch.sum', 'torch.sum', (['(x * mask)'], {'dim': '(-1)'}), '(x * mask, dim=-1)\n', (21241, 21259), False, 'import torch\n'), ((21262, 21285), 'torch.sum', 'torch.sum', (['mask'], {'dim': '(-1)'}), '(mask, dim=-1)\n', (21271, 21285), False, 'import torch\n'), ((1233, 1253), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1245, 1253), False, 'import torch\n'), ((4038, 4074), 'torch.tensor', 'torch.tensor', (["batch_data['question']"], {}), "(batch_data['question'])\n", (4050, 4074), False, 'import torch\n'), ((4112, 4148), 'torch.tensor', 'torch.tensor', (["batch_data['ques len']"], {}), "(batch_data['ques len'])\n", (4124, 4148), False, 'import torch\n'), ((4173, 4209), 'torch.tensor', 'torch.tensor', (["batch_data['equation']"], {}), "(batch_data['equation'])\n", (4185, 4209), False, 'import torch\n'), ((4250, 4289), 'torch.LongTensor', 'torch.LongTensor', (["batch_data['equ len']"], {}), "(batch_data['equ len'])\n", (4266, 4289), False, 'import torch\n'), ((5069, 5105), 'torch.tensor', 'torch.tensor', (["batch_data['question']"], {}), "(batch_data['question'])\n", (5081, 5105), False, 'import torch\n'), ((5143, 5179), 'torch.tensor', 'torch.tensor', (["batch_data['ques len']"], {}), "(batch_data['ques len'])\n", (5155, 5179), False, 'import torch\n'), ((5204, 5240), 'torch.tensor', 'torch.tensor', (["batch_data['equation']"], {}), "(batch_data['equation'])\n", (5216, 5240), False, 'import torch\n'), ((7911, 7922), 'module.Layer.tree_layers.TreeNode', 'TreeNode', (['_'], {}), '(_)\n', (7919, 7922), False, 'from module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding\n'), ((13299, 13332), 'torch.BoolTensor', 'torch.BoolTensor', (['(1)', 'input_length'], {}), '(1, input_length)\n', (13315, 13332), False, 'import torch\n'), ((14319, 14330), 'module.Layer.tree_layers.TreeNode', 'TreeNode', (['_'], {}), '(_)\n', (14327, 14330), False, 'from module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding\n'), ((21149, 21180), 'torch.square', 'torch.square', (['(outputs - targets)'], {}), '(outputs - targets)\n', (21161, 21180), False, 'import torch\n'), ((16102, 16135), 'torch.cat', 'torch.cat', (['(op, num_score)'], {'dim': '(1)'}), '((op, num_score), dim=1)\n', (16111, 16135), False, 'import torch\n'), ((16371, 16394), 'utils.utils.copy_list', 'copy_list', (['b.node_stack'], {}), '(b.node_stack)\n', (16380, 16394), False, 'from utils.utils import copy_list\n'), ((16488, 16516), 'utils.utils.copy_list', 'copy_list', (['b.embedding_stack'], {}), '(b.embedding_stack)\n', (16497, 16516), False, 'from utils.utils import copy_list\n'), ((16551, 16571), 'copy.deepcopy', 'copy.deepcopy', (['b.out'], {}), '(b.out)\n', (16564, 16571), False, 'import copy\n'), ((10005, 10016), 'module.Layer.tree_layers.TreeNode', 'TreeNode', (['r'], {}), '(r)\n', (10013, 10016), False, 'from module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding\n'), ((10056, 10083), 'module.Layer.tree_layers.TreeNode', 'TreeNode', (['l'], {'left_flag': '(True)'}), '(l, left_flag=True)\n', (10064, 10083), False, 'from module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding\n'), ((11139, 11180), 'module.Layer.tree_layers.TreeEmbedding', 'TreeEmbedding', (['current_num'], {'terminal': '(True)'}), '(current_num, terminal=True)\n', (11152, 11180), False, 'from module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding\n'), ((16827, 16856), 'torch.LongTensor', 'torch.LongTensor', (['[out_token]'], {}), '([out_token])\n', (16843, 16856), False, 'import torch\n'), ((17237, 17258), 'module.Layer.tree_layers.TreeNode', 'TreeNode', (['right_child'], {}), '(right_child)\n', (17245, 17258), False, 'from module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding\n'), ((17313, 17349), 'module.Layer.tree_layers.TreeNode', 'TreeNode', (['left_child'], {'left_flag': '(True)'}), '(left_child, left_flag=True)\n', (17321, 17349), False, 'from module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding\n'), ((18009, 18041), 'module.Layer.tree_layers.TreeEmbedding', 'TreeEmbedding', (['current_num', '(True)'], {}), '(current_num, True)\n', (18022, 18041), False, 'from module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding\n')] |
import time
import srt
import re
import datetime
from mqtthandler import MQTTHandler
INIT_STATUS={
"video": {
"title": None,
"series_title": None,
"season": None,
"episode": None
},
"time": None,
"events": None
}
class SubtitleHandler:
subtitles = []
phrases = []
def __init__(self, broker):
self.mqtt = MQTTHandler(broker)
def parseSRT(self, srt_filename):
f=open(srt_filename, "r")
subtitle_generate = srt.parse(f.read())
f.close()
self.subtitles = list(subtitle_generate)
return self.subtitles
def parsePhrases(self, phrase_filename):
f=open(phrase_filename, "r")
lines = f.readlines()
for line in lines:
phrase = line.rstrip("\n\r").split("/")
self.phrases.append(phrase)
return self.phrases
def isPhraseInLine(self,phrase, sub, content):
sub_line = re.sub('[^A-Za-z0-9\s]+', '', str(content)).lower()
phrase = re.sub('[^A-Za-z0-9\s]+', '', str(phrase)).lower()
count = 0
while bool(re.search(phrase, sub_line)):
count += 1
sub_line = sub_line.replace(phrase, '', 1)
return count
def getEventTime(self,sub):
middle = sub.end - sub.start
between_sec = datetime.timedelta.total_seconds(middle) / 2
sec = between_sec + datetime.timedelta.total_seconds(sub.start)
return int(sec)
def matchEventToMovie(self, movie, subtitles, phrases, time_offset):
global INIT_STATUS
status = INIT_STATUS
status["video"]["title"] = movie
#TODO determine how to set up phrase data
for sub in subtitles:
c = sub.content.replace('\n', ' ')
c = c.split(" ")
firstpart, secondpart = " ".join(c[:len(c)//2]), " ".join(c[len(c)//2:])
mult = 0
for phrase in phrases:
line = phrase[0]
events = phrase[1]
mult += self.isPhraseInLine(line,sub,sub.content)
#f = self.isPhraseInLine(line,sub, firstpart)
#s = self.isPhraseInLine(line,sub, secondpart)
#if f + s == 0:
# mult += self.isPhraseInLine(line,sub,sub.content )
#else:
# mult += f+s
## DEAR LESS DRUNK SELF
# this currently adds the number of events over the entire subtitle
# what you need to do if you wish to accept it, is to split each subtitle into to two parts
# the first part will the the half that has the first bit of text, which will have the correct time to event for the work
# the second half will have the correct time to event gfor the second half
# you could have three if statements that check and each toher them reach a send.message()
if mult > 0: # wotn work properly if events is greater than 1
status["time"] = self.getEventTime(sub) + time_offset
status["events"] = int(events) * mult
self.sendMessage(status)
#print(sub.content)
def sendMessage(self, msg):
self.mqtt.send(msg)
print(msg)
return msg
def isDone(self):
return True | [
"mqtthandler.MQTTHandler",
"datetime.timedelta.total_seconds",
"re.search"
]
| [((380, 399), 'mqtthandler.MQTTHandler', 'MQTTHandler', (['broker'], {}), '(broker)\n', (391, 399), False, 'from mqtthandler import MQTTHandler\n'), ((1112, 1139), 're.search', 're.search', (['phrase', 'sub_line'], {}), '(phrase, sub_line)\n', (1121, 1139), False, 'import re\n'), ((1333, 1373), 'datetime.timedelta.total_seconds', 'datetime.timedelta.total_seconds', (['middle'], {}), '(middle)\n', (1365, 1373), False, 'import datetime\n'), ((1406, 1449), 'datetime.timedelta.total_seconds', 'datetime.timedelta.total_seconds', (['sub.start'], {}), '(sub.start)\n', (1438, 1449), False, 'import datetime\n')] |
#!/usr/bin/env python
import os
import numpy as np
import pandas as pd
os.getcwd()
# Request for the filename
# Current version of this script works only with TSV type files
mainFilename = input('Input your file name (diabetes.tab.txt or housing.data.txt): ')
print()
# To create proper dataframe, transforming it with numpy
# Then changing it with pandas
filenameData = np.genfromtxt(mainFilename, dtype='str')
filenameData = pd.DataFrame(filenameData)
# Obtains first row to identify header is string or numeric
headers = filenameData.iloc[0]
try:
pd.to_numeric(headers)
except:
filenameData = pd.DataFrame(filenameData.values[1:], columns=headers)
# Changes strings to numbers (self identifies for float or integer)
filenameData = filenameData.apply(pd.to_numeric)
# Obtains the mean and standard deviation of the columns
listMean = filenameData.mean()
listStd = filenameData.std()
print(filenameData)
# Prints out the results
print('Mean for each column:')
for idx in filenameData.columns:
print(idx,':',listMean[idx])
print()
print('Standard deviation for each column:')
for idx in filenameData.columns:
print(idx,':',listStd[idx])
| [
"pandas.DataFrame",
"numpy.genfromtxt",
"pandas.to_numeric",
"os.getcwd"
]
| [((73, 84), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (82, 84), False, 'import os\n'), ((375, 415), 'numpy.genfromtxt', 'np.genfromtxt', (['mainFilename'], {'dtype': '"""str"""'}), "(mainFilename, dtype='str')\n", (388, 415), True, 'import numpy as np\n'), ((432, 458), 'pandas.DataFrame', 'pd.DataFrame', (['filenameData'], {}), '(filenameData)\n', (444, 458), True, 'import pandas as pd\n'), ((561, 583), 'pandas.to_numeric', 'pd.to_numeric', (['headers'], {}), '(headers)\n', (574, 583), True, 'import pandas as pd\n'), ((611, 665), 'pandas.DataFrame', 'pd.DataFrame', (['filenameData.values[1:]'], {'columns': 'headers'}), '(filenameData.values[1:], columns=headers)\n', (623, 665), True, 'import pandas as pd\n')] |
from donkeycar.parts.web_controller.web import WebSocketCalibrateAPI
from functools import partial
from tornado import testing
import tornado.websocket
import tornado.web
import tornado.ioloop
import json
from unittest.mock import Mock
from donkeycar.parts.actuator import PWMSteering, PWMThrottle
class WebSocketCalibrateTest(testing.AsyncHTTPTestCase):
"""
Example of WebSocket usage as a client
in AsyncHTTPTestCase-based unit tests.
"""
def get_app(self):
app = tornado.web.Application([('/', WebSocketCalibrateAPI)])
self.app = app
return app
def get_ws_url(self):
return "ws://localhost:" + str(self.get_http_port()) + "/"
@tornado.testing.gen_test
def test_calibrate_servo_esc_1(self):
ws_client = yield tornado.websocket.websocket_connect(self.get_ws_url())
# Now we can run a test on the WebSocket.
self.app.drive_train = dict()
self.app.drive_train['steering'] = Mock()
self.app.drive_train_type = "SERVO_ESC"
data = {"config": {"STEERING_LEFT_PWM": 444}}
yield ws_client.write_message(json.dumps(data))
yield ws_client.close()
assert self.app.drive_train['steering'].left_pulse == 444
assert isinstance(self.app.drive_train['steering'].right_pulse, Mock)
@tornado.testing.gen_test
def test_calibrate_servo_esc_2(self):
ws_client = yield tornado.websocket.websocket_connect(self.get_ws_url())
# Now we can run a test on the WebSocket.
self.app.drive_train = dict()
self.app.drive_train['steering'] = Mock()
self.app.drive_train_type = "SERVO_ESC"
data = {"config": {"STEERING_RIGHT_PWM": 555}}
yield ws_client.write_message(json.dumps(data))
yield ws_client.close()
assert self.app.drive_train['steering'].right_pulse == 555
assert isinstance(self.app.drive_train['steering'].left_pulse, Mock)
@tornado.testing.gen_test
def test_calibrate_servo_esc_3(self):
ws_client = yield tornado.websocket.websocket_connect(self.get_ws_url())
# Now we can run a test on the WebSocket.
self.app.drive_train = dict()
self.app.drive_train['throttle'] = Mock()
self.app.drive_train_type = "SERVO_ESC"
data = {"config": {"THROTTLE_FORWARD_PWM": 666}}
yield ws_client.write_message(json.dumps(data))
yield ws_client.close()
assert self.app.drive_train['throttle'].max_pulse == 666
assert isinstance(self.app.drive_train['throttle'].min_pulse, Mock)
@tornado.testing.gen_test
def test_calibrate_mm1(self):
ws_client = yield tornado.websocket.websocket_connect(self.get_ws_url())
# Now we can run a test on the WebSocket.
self.app.drive_train = Mock()
self.app.drive_train_type = "MM1"
data = {"config": {"MM1_STEERING_MID": 1234}}
yield ws_client.write_message(json.dumps(data))
yield ws_client.close()
assert self.app.drive_train.STEERING_MID == 1234
| [
"json.dumps",
"unittest.mock.Mock"
]
| [((978, 984), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (982, 984), False, 'from unittest.mock import Mock\n'), ((1607, 1613), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (1611, 1613), False, 'from unittest.mock import Mock\n'), ((2237, 2243), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (2241, 2243), False, 'from unittest.mock import Mock\n'), ((2808, 2814), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (2812, 2814), False, 'from unittest.mock import Mock\n'), ((1126, 1142), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1136, 1142), False, 'import json\n'), ((1756, 1772), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1766, 1772), False, 'import json\n'), ((2388, 2404), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2398, 2404), False, 'import json\n'), ((2949, 2965), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (2959, 2965), False, 'import json\n')] |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google Stackdriver Logging API."""
import os
try:
from google.cloud.gapic.logging.v2.config_service_v2_api import (
ConfigServiceV2Api as GeneratedSinksAPI)
from google.cloud.gapic.logging.v2.logging_service_v2_api import (
LoggingServiceV2Api as GeneratedLoggingAPI)
from google.cloud.gapic.logging.v2.metrics_service_v2_api import (
MetricsServiceV2Api as GeneratedMetricsAPI)
from google.cloud.logging._gax import _LoggingAPI as GAXLoggingAPI
from google.cloud.logging._gax import _MetricsAPI as GAXMetricsAPI
from google.cloud.logging._gax import _SinksAPI as GAXSinksAPI
except ImportError: # pragma: NO COVER
_HAVE_GAX = False
GeneratedLoggingAPI = GAXLoggingAPI = None
GeneratedMetricsAPI = GAXMetricsAPI = None
GeneratedSinksAPI = GAXSinksAPI = None
else:
_HAVE_GAX = True
from google.cloud.client import JSONClient
from google.cloud.environment_vars import DISABLE_GRPC
from google.cloud.logging.connection import Connection
from google.cloud.logging.connection import _LoggingAPI as JSONLoggingAPI
from google.cloud.logging.connection import _MetricsAPI as JSONMetricsAPI
from google.cloud.logging.connection import _SinksAPI as JSONSinksAPI
from google.cloud.logging.entries import ProtobufEntry
from google.cloud.logging.entries import StructEntry
from google.cloud.logging.entries import TextEntry
from google.cloud.logging.logger import Logger
from google.cloud.logging.metric import Metric
from google.cloud.logging.sink import Sink
_DISABLE_GAX = os.getenv(DISABLE_GRPC, False)
_USE_GAX = _HAVE_GAX and not _DISABLE_GAX
class Client(JSONClient):
"""Client to bundle configuration needed for API requests.
:type project: str
:param project: the project which the client acts on behalf of.
If not passed, falls back to the default inferred
from the environment.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: The OAuth2 Credentials to use for the connection
owned by this client. If not passed (and if no ``http``
object is passed), falls back to the default inferred
from the environment.
:type http: :class:`httplib2.Http` or class that defines ``request()``.
:param http: An optional HTTP object to make requests. If not passed, an
``http`` object is created that is bound to the
``credentials`` for the current object.
"""
_connection_class = Connection
_logging_api = _sinks_api = _metrics_api = None
@property
def logging_api(self):
"""Helper for logging-related API calls.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.logs
"""
if self._logging_api is None:
if _USE_GAX:
generated = GeneratedLoggingAPI()
self._logging_api = GAXLoggingAPI(generated)
else:
self._logging_api = JSONLoggingAPI(self.connection)
return self._logging_api
@property
def sinks_api(self):
"""Helper for log sink-related API calls.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks
"""
if self._sinks_api is None:
if _USE_GAX:
generated = GeneratedSinksAPI()
self._sinks_api = GAXSinksAPI(generated)
else:
self._sinks_api = JSONSinksAPI(self.connection)
return self._sinks_api
@property
def metrics_api(self):
"""Helper for log metric-related API calls.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics
"""
if self._metrics_api is None:
if _USE_GAX:
generated = GeneratedMetricsAPI()
self._metrics_api = GAXMetricsAPI(generated)
else:
self._metrics_api = JSONMetricsAPI(self.connection)
return self._metrics_api
def logger(self, name):
"""Creates a logger bound to the current client.
:type name: str
:param name: the name of the logger to be constructed.
:rtype: :class:`google.cloud.logging.logger.Logger`
:returns: Logger created with the current client.
"""
return Logger(name, client=self)
def _entry_from_resource(self, resource, loggers):
"""Detect correct entry type from resource and instantiate.
:type resource: dict
:param resource: one entry resource from API response
:type loggers: dict or None
:param loggers: A mapping of logger fullnames -> loggers. If not
passed, the entry will have a newly-created logger.
:rtype: One of:
:class:`google.cloud.logging.entries.TextEntry`,
:class:`google.cloud.logging.entries.StructEntry`,
:class:`google.cloud.logging.entries.ProtobufEntry`
:returns: the entry instance, constructed via the resource
"""
if 'textPayload' in resource:
return TextEntry.from_api_repr(resource, self, loggers)
elif 'jsonPayload' in resource:
return StructEntry.from_api_repr(resource, self, loggers)
elif 'protoPayload' in resource:
return ProtobufEntry.from_api_repr(resource, self, loggers)
raise ValueError('Cannot parse log entry resource')
def list_entries(self, projects=None, filter_=None, order_by=None,
page_size=None, page_token=None):
"""Return a page of log entries.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/list
:type projects: list of strings
:param projects: project IDs to include. If not passed,
defaults to the project bound to the client.
:type filter_: str
:param filter_: a filter expression. See:
https://cloud.google.com/logging/docs/view/advanced_filters
:type order_by: str
:param order_by: One of :data:`~google.cloud.logging.ASCENDING`
or :data:`~google.cloud.logging.DESCENDING`.
:type page_size: int
:param page_size: maximum number of entries to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of entries. If not
passed, the API will return the first page of
entries.
:rtype: tuple, (list, str)
:returns: list of :class:`google.cloud.logging.entry.TextEntry`, plus a
"next page token" string: if not None, indicates that
more entries can be retrieved with another call (pass that
value as ``page_token``).
"""
if projects is None:
projects = [self.project]
resources, token = self.logging_api.list_entries(
projects=projects, filter_=filter_, order_by=order_by,
page_size=page_size, page_token=page_token)
loggers = {}
entries = [self._entry_from_resource(resource, loggers)
for resource in resources]
return entries, token
def sink(self, name, filter_=None, destination=None):
"""Creates a sink bound to the current client.
:type name: str
:param name: the name of the sink to be constructed.
:type filter_: str
:param filter_: (optional) the advanced logs filter expression
defining the entries exported by the sink. If not
passed, the instance should already exist, to be
refreshed via :meth:`Sink.reload`.
:type destination: str
:param destination: destination URI for the entries exported by
the sink. If not passed, the instance should
already exist, to be refreshed via
:meth:`Sink.reload`.
:rtype: :class:`google.cloud.logging.sink.Sink`
:returns: Sink created with the current client.
"""
return Sink(name, filter_, destination, client=self)
def list_sinks(self, page_size=None, page_token=None):
"""List sinks for the project associated with this client.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/list
:type page_size: int
:param page_size: maximum number of sinks to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of sinks. If not
passed, the API will return the first page of
sinks.
:rtype: tuple, (list, str)
:returns: list of :class:`google.cloud.logging.sink.Sink`, plus a
"next page token" string: if not None, indicates that
more sinks can be retrieved with another call (pass that
value as ``page_token``).
"""
resources, token = self.sinks_api.list_sinks(
self.project, page_size, page_token)
sinks = [Sink.from_api_repr(resource, self)
for resource in resources]
return sinks, token
def metric(self, name, filter_=None, description=''):
"""Creates a metric bound to the current client.
:type name: str
:param name: the name of the metric to be constructed.
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries tracked by the metric. If not
passed, the instance should already exist, to be
refreshed via :meth:`Metric.reload`.
:type description: str
:param description: the description of the metric to be constructed.
If not passed, the instance should already exist,
to be refreshed via :meth:`Metric.reload`.
:rtype: :class:`google.cloud.logging.metric.Metric`
:returns: Metric created with the current client.
"""
return Metric(name, filter_, client=self, description=description)
def list_metrics(self, page_size=None, page_token=None):
"""List metrics for the project associated with this client.
See:
https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/list
:type page_size: int
:param page_size: maximum number of metrics to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of metrics. If not
passed, the API will return the first page of
metrics.
:rtype: tuple, (list, str)
:returns: list of :class:`google.cloud.logging.metric.Metric`, plus a
"next page token" string: if not None, indicates that
more metrics can be retrieved with another call (pass that
value as ``page_token``).
"""
resources, token = self.metrics_api.list_metrics(
self.project, page_size, page_token)
metrics = [Metric.from_api_repr(resource, self)
for resource in resources]
return metrics, token
| [
"google.cloud.logging._gax._MetricsAPI",
"google.cloud.gapic.logging.v2.config_service_v2_api.ConfigServiceV2Api",
"os.getenv",
"google.cloud.logging.entries.TextEntry.from_api_repr",
"google.cloud.logging.connection._LoggingAPI",
"google.cloud.logging.connection._SinksAPI",
"google.cloud.logging.connection._MetricsAPI",
"google.cloud.logging._gax._SinksAPI",
"google.cloud.gapic.logging.v2.metrics_service_v2_api.MetricsServiceV2Api",
"google.cloud.logging.entries.StructEntry.from_api_repr",
"google.cloud.gapic.logging.v2.logging_service_v2_api.LoggingServiceV2Api",
"google.cloud.logging._gax._LoggingAPI",
"google.cloud.logging.sink.Sink.from_api_repr",
"google.cloud.logging.metric.Metric.from_api_repr",
"google.cloud.logging.logger.Logger",
"google.cloud.logging.sink.Sink",
"google.cloud.logging.metric.Metric",
"google.cloud.logging.entries.ProtobufEntry.from_api_repr"
]
| [((2148, 2178), 'os.getenv', 'os.getenv', (['DISABLE_GRPC', '(False)'], {}), '(DISABLE_GRPC, False)\n', (2157, 2178), False, 'import os\n'), ((5172, 5197), 'google.cloud.logging.logger.Logger', 'Logger', (['name'], {'client': 'self'}), '(name, client=self)\n', (5178, 5197), False, 'from google.cloud.logging.logger import Logger\n'), ((9133, 9178), 'google.cloud.logging.sink.Sink', 'Sink', (['name', 'filter_', 'destination'], {'client': 'self'}), '(name, filter_, destination, client=self)\n', (9137, 9178), False, 'from google.cloud.logging.sink import Sink\n'), ((11246, 11305), 'google.cloud.logging.metric.Metric', 'Metric', (['name', 'filter_'], {'client': 'self', 'description': 'description'}), '(name, filter_, client=self, description=description)\n', (11252, 11305), False, 'from google.cloud.logging.metric import Metric\n'), ((5962, 6010), 'google.cloud.logging.entries.TextEntry.from_api_repr', 'TextEntry.from_api_repr', (['resource', 'self', 'loggers'], {}), '(resource, self, loggers)\n', (5985, 6010), False, 'from google.cloud.logging.entries import TextEntry\n'), ((10233, 10267), 'google.cloud.logging.sink.Sink.from_api_repr', 'Sink.from_api_repr', (['resource', 'self'], {}), '(resource, self)\n', (10251, 10267), False, 'from google.cloud.logging.sink import Sink\n'), ((12384, 12420), 'google.cloud.logging.metric.Metric.from_api_repr', 'Metric.from_api_repr', (['resource', 'self'], {}), '(resource, self)\n', (12404, 12420), False, 'from google.cloud.logging.metric import Metric\n'), ((3660, 3681), 'google.cloud.gapic.logging.v2.logging_service_v2_api.LoggingServiceV2Api', 'GeneratedLoggingAPI', ([], {}), '()\n', (3679, 3681), True, 'from google.cloud.gapic.logging.v2.logging_service_v2_api import LoggingServiceV2Api as GeneratedLoggingAPI\n'), ((3718, 3742), 'google.cloud.logging._gax._LoggingAPI', 'GAXLoggingAPI', (['generated'], {}), '(generated)\n', (3731, 3742), True, 'from google.cloud.logging._gax import _LoggingAPI as GAXLoggingAPI\n'), ((3797, 3828), 'google.cloud.logging.connection._LoggingAPI', 'JSONLoggingAPI', (['self.connection'], {}), '(self.connection)\n', (3811, 3828), True, 'from google.cloud.logging.connection import _LoggingAPI as JSONLoggingAPI\n'), ((4157, 4176), 'google.cloud.gapic.logging.v2.config_service_v2_api.ConfigServiceV2Api', 'GeneratedSinksAPI', ([], {}), '()\n', (4174, 4176), True, 'from google.cloud.gapic.logging.v2.config_service_v2_api import ConfigServiceV2Api as GeneratedSinksAPI\n'), ((4211, 4233), 'google.cloud.logging._gax._SinksAPI', 'GAXSinksAPI', (['generated'], {}), '(generated)\n', (4222, 4233), True, 'from google.cloud.logging._gax import _SinksAPI as GAXSinksAPI\n'), ((4286, 4315), 'google.cloud.logging.connection._SinksAPI', 'JSONSinksAPI', (['self.connection'], {}), '(self.connection)\n', (4298, 4315), True, 'from google.cloud.logging.connection import _SinksAPI as JSONSinksAPI\n'), ((4650, 4671), 'google.cloud.gapic.logging.v2.metrics_service_v2_api.MetricsServiceV2Api', 'GeneratedMetricsAPI', ([], {}), '()\n', (4669, 4671), True, 'from google.cloud.gapic.logging.v2.metrics_service_v2_api import MetricsServiceV2Api as GeneratedMetricsAPI\n'), ((4708, 4732), 'google.cloud.logging._gax._MetricsAPI', 'GAXMetricsAPI', (['generated'], {}), '(generated)\n', (4721, 4732), True, 'from google.cloud.logging._gax import _MetricsAPI as GAXMetricsAPI\n'), ((4787, 4818), 'google.cloud.logging.connection._MetricsAPI', 'JSONMetricsAPI', (['self.connection'], {}), '(self.connection)\n', (4801, 4818), True, 'from google.cloud.logging.connection import _MetricsAPI as JSONMetricsAPI\n'), ((6070, 6120), 'google.cloud.logging.entries.StructEntry.from_api_repr', 'StructEntry.from_api_repr', (['resource', 'self', 'loggers'], {}), '(resource, self, loggers)\n', (6095, 6120), False, 'from google.cloud.logging.entries import StructEntry\n'), ((6181, 6233), 'google.cloud.logging.entries.ProtobufEntry.from_api_repr', 'ProtobufEntry.from_api_repr', (['resource', 'self', 'loggers'], {}), '(resource, self, loggers)\n', (6208, 6233), False, 'from google.cloud.logging.entries import ProtobufEntry\n')] |
from __future__ import absolute_import
from unittest import TestCase
import os
import importlib
import inspect
from plotly.basedatatypes import BasePlotlyType, BaseFigure
datatypes_root = "new_plotly/graph_objs"
datatype_modules = [
dirpath.replace("/", ".")
for dirpath, _, _ in os.walk(datatypes_root)
if not dirpath.endswith("__pycache__")
]
class HierarchyTest(TestCase):
def test_construct_datatypes(self):
for datatypes_module in datatype_modules:
module = importlib.import_module(datatypes_module)
for name in getattr(module, "__all__", []):
if name.startswith("_") or name[0].islower() or name == "FigureWidget":
continue
obj = getattr(module, name)
try:
v = obj()
except Exception:
print(
"Failed to construct {obj} in module {module}".format(
obj=obj, module=datatypes_module
)
)
raise
if obj.__module__ == "new_plotly.graph_objs._deprecations":
self.assertTrue(isinstance(v, list) or isinstance(v, dict))
obj()
elif name in ("Figure", "FigureWidget"):
self.assertIsInstance(v, BaseFigure)
else:
self.assertIsInstance(v, BasePlotlyType)
| [
"importlib.import_module",
"os.walk"
]
| [((290, 313), 'os.walk', 'os.walk', (['datatypes_root'], {}), '(datatypes_root)\n', (297, 313), False, 'import os\n'), ((503, 544), 'importlib.import_module', 'importlib.import_module', (['datatypes_module'], {}), '(datatypes_module)\n', (526, 544), False, 'import importlib\n')] |
"""The Wolf SmartSet Service integration."""
from datetime import timedelta
import logging
from httpx import ConnectError, ConnectTimeout
from wolf_smartset.token_auth import InvalidAuth
from wolf_smartset.wolf_client import FetchFailed, ParameterReadError, WolfClient
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
COORDINATOR,
DEVICE_GATEWAY,
DEVICE_ID,
DEVICE_NAME,
DOMAIN,
PARAMETERS,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor"]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Wolf SmartSet Service from a config entry."""
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
device_name = entry.data[DEVICE_NAME]
device_id = entry.data[DEVICE_ID]
gateway_id = entry.data[DEVICE_GATEWAY]
refetch_parameters = False
_LOGGER.debug(
"Setting up wolflink integration for device: %s (ID: %s, gateway: %s)",
device_name,
device_id,
gateway_id,
)
wolf_client = WolfClient(username, password)
parameters = await fetch_parameters_init(wolf_client, gateway_id, device_id)
async def async_update_data():
"""Update all stored entities for Wolf SmartSet."""
try:
nonlocal refetch_parameters
nonlocal parameters
await wolf_client.update_session()
if not wolf_client.fetch_system_state_list(device_id, gateway_id):
refetch_parameters = True
raise UpdateFailed(
"Could not fetch values from server because device is Offline."
)
if refetch_parameters:
parameters = await fetch_parameters(wolf_client, gateway_id, device_id)
hass.data[DOMAIN][entry.entry_id][PARAMETERS] = parameters
refetch_parameters = False
values = {
v.value_id: v.value
for v in await wolf_client.fetch_value(
gateway_id, device_id, parameters
)
}
return {
parameter.parameter_id: (
parameter.value_id,
values[parameter.value_id],
)
for parameter in parameters
if parameter.value_id in values
}
except ConnectError as exception:
raise UpdateFailed(
f"Error communicating with API: {exception}"
) from exception
except FetchFailed as exception:
raise UpdateFailed(
f"Could not fetch values from server due to: {exception}"
) from exception
except ParameterReadError as exception:
refetch_parameters = True
raise UpdateFailed(
"Could not fetch values for parameter. Refreshing value IDs."
) from exception
except InvalidAuth as exception:
raise UpdateFailed("Invalid authentication during update.") from exception
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=DOMAIN,
update_method=async_update_data,
update_interval=timedelta(minutes=1),
)
await coordinator.async_refresh()
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {}
hass.data[DOMAIN][entry.entry_id][PARAMETERS] = parameters
hass.data[DOMAIN][entry.entry_id][COORDINATOR] = coordinator
hass.data[DOMAIN][entry.entry_id][DEVICE_ID] = device_id
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def fetch_parameters(client: WolfClient, gateway_id: int, device_id: int):
"""
Fetch all available parameters with usage of WolfClient.
By default Reglertyp entity is removed because API will not provide value for this parameter.
"""
fetched_parameters = await client.fetch_parameters(gateway_id, device_id)
return [param for param in fetched_parameters if param.name != "Reglertyp"]
async def fetch_parameters_init(client: WolfClient, gateway_id: int, device_id: int):
"""Fetch all available parameters with usage of WolfClient but handles all exceptions and results in ConfigEntryNotReady."""
try:
return await fetch_parameters(client, gateway_id, device_id)
except (ConnectError, ConnectTimeout, FetchFailed) as exception:
raise ConfigEntryNotReady(
f"Error communicating with API: {exception}"
) from exception
| [
"logging.getLogger",
"wolf_smartset.wolf_client.WolfClient",
"homeassistant.exceptions.ConfigEntryNotReady",
"homeassistant.helpers.update_coordinator.UpdateFailed",
"datetime.timedelta"
]
| [((708, 735), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (725, 735), False, 'import logging\n'), ((1321, 1351), 'wolf_smartset.wolf_client.WolfClient', 'WolfClient', (['username', 'password'], {}), '(username, password)\n', (1331, 1351), False, 'from wolf_smartset.wolf_client import FetchFailed, ParameterReadError, WolfClient\n'), ((3490, 3510), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(1)'}), '(minutes=1)\n', (3499, 3510), False, 'from datetime import timedelta\n'), ((4979, 5044), 'homeassistant.exceptions.ConfigEntryNotReady', 'ConfigEntryNotReady', (['f"""Error communicating with API: {exception}"""'], {}), "(f'Error communicating with API: {exception}')\n", (4998, 5044), False, 'from homeassistant.exceptions import ConfigEntryNotReady\n'), ((1805, 1882), 'homeassistant.helpers.update_coordinator.UpdateFailed', 'UpdateFailed', (['"""Could not fetch values from server because device is Offline."""'], {}), "('Could not fetch values from server because device is Offline.')\n", (1817, 1882), False, 'from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed\n'), ((2698, 2756), 'homeassistant.helpers.update_coordinator.UpdateFailed', 'UpdateFailed', (['f"""Error communicating with API: {exception}"""'], {}), "(f'Error communicating with API: {exception}')\n", (2710, 2756), False, 'from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed\n'), ((2861, 2932), 'homeassistant.helpers.update_coordinator.UpdateFailed', 'UpdateFailed', (['f"""Could not fetch values from server due to: {exception}"""'], {}), "(f'Could not fetch values from server due to: {exception}')\n", (2873, 2932), False, 'from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed\n'), ((3082, 3157), 'homeassistant.helpers.update_coordinator.UpdateFailed', 'UpdateFailed', (['"""Could not fetch values for parameter. Refreshing value IDs."""'], {}), "('Could not fetch values for parameter. Refreshing value IDs.')\n", (3094, 3157), False, 'from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed\n'), ((3262, 3315), 'homeassistant.helpers.update_coordinator.UpdateFailed', 'UpdateFailed', (['"""Invalid authentication during update."""'], {}), "('Invalid authentication during update.')\n", (3274, 3315), False, 'from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed\n')] |
import torch
from torch import nn
from torch.nn import functional as F
from torchdrug import layers
class ConditionalFlow(nn.Module):
"""
Conditional flow transformation from `Masked Autoregressive Flow for Density Estimation`_.
.. _Masked Autoregressive Flow for Density Estimation:
https://arxiv.org/pdf/1705.07057.pdf
Parameters:
input_dim (int): input & output dimension
condition_dim (int): condition dimension
hidden_dims (list of int, optional): hidden dimensions
activation (str or function, optional): activation function
"""
def __init__(self, input_dim, condition_dim, hidden_dims=None, activation="relu"):
super(ConditionalFlow, self).__init__()
self.input_dim = input_dim
self.output_dim = input_dim
if hidden_dims is None:
hidden_dims = []
self.mlp = layers.MLP(condition_dim, list(hidden_dims) + [input_dim * 2], activation)
self.rescale = nn.Parameter(torch.zeros(1))
def forward(self, input, condition):
"""
Transform data into latent representations.
Parameters:
input (Tensor): input representations
condition (Tensor): conditional representations
Returns:
(Tensor, Tensor): latent representations, log-likelihood of the transformation
"""
scale, bias = self.mlp(condition).chunk(2, dim=-1)
scale = (F.tanh(scale) * self.rescale)
output = (input + bias) * scale.exp()
log_det = scale
return output, log_det
def reverse(self, latent, condition):
"""
Transform latent representations into data.
Parameters:
latent (Tensor): latent representations
condition (Tensor): conditional representations
Returns:
(Tensor, Tensor): input representations, log-likelihood of the transformation
"""
scale, bias = self.mlp(condition).chunk(2, dim=-1)
scale = (F.tanh(scale) * self.rescale)
output = latent / scale.exp() - bias
log_det = scale
return output, log_det | [
"torch.nn.functional.tanh",
"torch.zeros"
]
| [((999, 1013), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (1010, 1013), False, 'import torch\n'), ((1449, 1462), 'torch.nn.functional.tanh', 'F.tanh', (['scale'], {}), '(scale)\n', (1455, 1462), True, 'from torch.nn import functional as F\n'), ((2016, 2029), 'torch.nn.functional.tanh', 'F.tanh', (['scale'], {}), '(scale)\n', (2022, 2029), True, 'from torch.nn import functional as F\n')] |
import numpy as np
from sklearn.utils.multiclass import type_of_target
from mindware.base_estimator import BaseEstimator
from mindware.components.utils.constants import type_dict, MULTILABEL_CLS, IMG_CLS, TEXT_CLS, OBJECT_DET
from mindware.components.feature_engineering.transformation_graph import DataNode
class Classifier(BaseEstimator):
"""This class implements the classification task. """
def initialize(self, data: DataNode, **kwargs):
if self.metric is None:
self.metric = 'acc'
# Check the task type: {binary, multiclass}
task_type = type_of_target(data.data[1])
if task_type in type_dict:
task_type = type_dict[task_type]
else:
raise ValueError("Invalid Task Type: %s!" % task_type)
self.task_type = task_type
super().initialize(data=data, **kwargs)
def fit(self, data: DataNode, **kwargs):
"""
Fit the classifier to given training data.
:param data: instance of DataNode
:return: self
"""
if self._ml_engine is None:
self.initialize(data=data, **kwargs)
super().fit(data, **kwargs)
return self
def predict(self, X, batch_size=None, n_jobs=1):
"""
Predict classes for X.
:param X: Datanode
:param batch_size: int
:param n_jobs: int
:return: y : array of shape = [n_samples]
The predicted classes.
"""
if not isinstance(X, DataNode):
raise ValueError("X is supposed to be a Data Node, but get %s" % type(X))
return super().predict(X, batch_size=batch_size, n_jobs=n_jobs)
def refit(self):
return super().refit()
def predict_proba(self, X, batch_size=None, n_jobs=1):
"""
Predict probabilities of classes for all samples X.
:param X: Datanode
:param batch_size: int
:param n_jobs: int
:return: y : array of shape = [n_samples, n_classes]
The predicted class probabilities.
"""
if not isinstance(X, DataNode):
raise ValueError("X is supposed to be a Data Node, but get %s" % type(X))
pred_proba = super().predict_proba(X, batch_size=batch_size, n_jobs=n_jobs)
if self.task_type != MULTILABEL_CLS:
assert (
np.allclose(
np.sum(pred_proba, axis=1),
np.ones_like(pred_proba[:, 0]))
), "Prediction probability does not sum up to 1!"
# Check that all probability values lie between 0 and 1.
assert (
(pred_proba >= 0).all() and (pred_proba <= 1).all()
), "Found prediction probability value outside of [0, 1]!"
return pred_proba
def get_tree_importance(self, data: DataNode):
from lightgbm import LGBMClassifier
import pandas as pd
X, y = self.data_transformer(data).data
lgb = LGBMClassifier(random_state=1)
lgb.fit(X, y)
_importance = lgb.feature_importances_
h = {}
h['feature_id'] = np.array(range(len(_importance)))
h['feature_importance'] = _importance
return pd.DataFrame(h)
def get_linear_importance(self, data: DataNode):
from sklearn.linear_model import LogisticRegression
import pandas as pd
X, y = self.data_transformer(data).data
clf = LogisticRegression(random_state=1)
clf.fit(X, y)
_ef = clf.coef_
std_array = np.std(_ef, ddof=1, axis=0)
abs_array = abs(_ef)
mean_array = np.mean(abs_array, axis=0)
_importance = std_array / mean_array
h = {}
h['feature_id'] = np.array(range(len(_importance)))
h['feature_importance'] = _importance
return pd.DataFrame(h)
def get_linear_impact(self, data: DataNode):
from sklearn.linear_model import LogisticRegression
import pandas as pd
if (len(set(data.data[1]))) > 2:
print('ERROR! Only binary classification is supported!')
return 0
X, y = self.data_transformer(data).data
clf = LogisticRegression(random_state=1)
clf.fit(X, y)
_ef = clf.coef_
_impact = _ef[0]
h = {}
h['feature_id'] = np.array(range(len(_impact)))
h['feature_impact'] = _impact
return pd.DataFrame(h)
class Regressor(BaseEstimator):
"""This class implements the regression task. """
def initialize(self, data: DataNode, **kwargs):
self.metric = 'mse' if self.metric is None else self.metric
# Check the task type: {continuous}
task_type = type_dict['continuous']
self.task_type = task_type
super().initialize(data=data, **kwargs)
def fit(self, data, **kwargs):
"""
Fit the regressor to given training data.
:param data: DataNode
:return: self
"""
if self._ml_engine is None:
self.initialize(data=data, **kwargs)
super().fit(data, **kwargs)
return self
def predict(self, X, batch_size=None, n_jobs=1):
"""
Make predictions for X.
:param X: DataNode
:param batch_size: int
:param n_jobs: int
:return: y : array of shape = [n_samples] or [n_samples, n_labels]
The predicted classes.
"""
if not isinstance(X, DataNode):
raise ValueError("X is supposed to be a Data Node, but get %s" % type(X))
return super().predict(X, batch_size=batch_size, n_jobs=n_jobs)
def get_tree_importance(self, data: DataNode):
from lightgbm import LGBMRegressor
import pandas as pd
X, y = self.data_transformer(data).data
lgb = LGBMRegressor(random_state=1)
lgb.fit(X, y)
_importance = lgb.feature_importances_
h = {}
h['feature_id'] = np.array(range(len(_importance)))
h['feature_importance'] = _importance
return pd.DataFrame(h)
def get_linear_impact(self, data: DataNode):
from sklearn.linear_model import LinearRegression
import pandas as pd
X, y = self.data_transformer(data).data
reg = LinearRegression()
reg.fit(X, y)
_impact = reg.coef_
h = {}
h['feature_id'] = np.array(range(len(_impact)))
h['feature_impact'] = _impact
return pd.DataFrame(h)
| [
"numpy.mean",
"numpy.ones_like",
"lightgbm.LGBMClassifier",
"lightgbm.LGBMRegressor",
"sklearn.linear_model.LogisticRegression",
"numpy.sum",
"sklearn.utils.multiclass.type_of_target",
"numpy.std",
"pandas.DataFrame",
"sklearn.linear_model.LinearRegression"
]
| [((591, 619), 'sklearn.utils.multiclass.type_of_target', 'type_of_target', (['data.data[1]'], {}), '(data.data[1])\n', (605, 619), False, 'from sklearn.utils.multiclass import type_of_target\n'), ((2957, 2987), 'lightgbm.LGBMClassifier', 'LGBMClassifier', ([], {'random_state': '(1)'}), '(random_state=1)\n', (2971, 2987), False, 'from lightgbm import LGBMClassifier\n'), ((3193, 3208), 'pandas.DataFrame', 'pd.DataFrame', (['h'], {}), '(h)\n', (3205, 3208), True, 'import pandas as pd\n'), ((3413, 3447), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(1)'}), '(random_state=1)\n', (3431, 3447), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3514, 3541), 'numpy.std', 'np.std', (['_ef'], {'ddof': '(1)', 'axis': '(0)'}), '(_ef, ddof=1, axis=0)\n', (3520, 3541), True, 'import numpy as np\n'), ((3592, 3618), 'numpy.mean', 'np.mean', (['abs_array'], {'axis': '(0)'}), '(abs_array, axis=0)\n', (3599, 3618), True, 'import numpy as np\n'), ((3800, 3815), 'pandas.DataFrame', 'pd.DataFrame', (['h'], {}), '(h)\n', (3812, 3815), True, 'import pandas as pd\n'), ((4147, 4181), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(1)'}), '(random_state=1)\n', (4165, 4181), False, 'from sklearn.linear_model import LogisticRegression\n'), ((4377, 4392), 'pandas.DataFrame', 'pd.DataFrame', (['h'], {}), '(h)\n', (4389, 4392), True, 'import pandas as pd\n'), ((5766, 5795), 'lightgbm.LGBMRegressor', 'LGBMRegressor', ([], {'random_state': '(1)'}), '(random_state=1)\n', (5779, 5795), False, 'from lightgbm import LGBMRegressor\n'), ((6001, 6016), 'pandas.DataFrame', 'pd.DataFrame', (['h'], {}), '(h)\n', (6013, 6016), True, 'import pandas as pd\n'), ((6215, 6233), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (6231, 6233), False, 'from sklearn.linear_model import LinearRegression\n'), ((6408, 6423), 'pandas.DataFrame', 'pd.DataFrame', (['h'], {}), '(h)\n', (6420, 6423), True, 'import pandas as pd\n'), ((2384, 2410), 'numpy.sum', 'np.sum', (['pred_proba'], {'axis': '(1)'}), '(pred_proba, axis=1)\n', (2390, 2410), True, 'import numpy as np\n'), ((2432, 2462), 'numpy.ones_like', 'np.ones_like', (['pred_proba[:, 0]'], {}), '(pred_proba[:, 0])\n', (2444, 2462), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import scrapy
import json
import os
import codecs
from AnimeSpider.items import AnimespiderItem
class AinmelinklistSpider(scrapy.Spider):
name = 'AinmeLinkList'
allowed_domains = ['bilibili.com']
start_urls = ['http://bilibili.com/']
def start_requests(self):
jsonpath = os.path.dirname(__file__) + '/output'
jsonfile = codecs.open('%s/AinmeList_items.json' % jsonpath, 'r', encoding='utf-8')
for line in jsonfile:
ainme = json.loads(line)
ainmename = ainme["name"]
url = ainme["link"].replace("//", "https://")
yield scrapy.Request(url=url, callback=self.parse, meta={'ainmename': ainmename})
def parse(self, response):
item = AnimespiderItem()
item["info_link"] = response.css(".media-title").xpath('@href').get()
yield item
| [
"json.loads",
"os.path.dirname",
"scrapy.Request",
"codecs.open",
"AnimeSpider.items.AnimespiderItem"
]
| [((379, 451), 'codecs.open', 'codecs.open', (["('%s/AinmeList_items.json' % jsonpath)", '"""r"""'], {'encoding': '"""utf-8"""'}), "('%s/AinmeList_items.json' % jsonpath, 'r', encoding='utf-8')\n", (390, 451), False, 'import codecs\n'), ((756, 773), 'AnimeSpider.items.AnimespiderItem', 'AnimespiderItem', ([], {}), '()\n', (771, 773), False, 'from AnimeSpider.items import AnimespiderItem\n'), ((322, 347), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (337, 347), False, 'import os\n'), ((502, 518), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (512, 518), False, 'import json\n'), ((633, 708), 'scrapy.Request', 'scrapy.Request', ([], {'url': 'url', 'callback': 'self.parse', 'meta': "{'ainmename': ainmename}"}), "(url=url, callback=self.parse, meta={'ainmename': ainmename})\n", (647, 708), False, 'import scrapy\n')] |
"""Setup for pytest-testplan plugin."""
from setuptools import setup
setup(
name='pytest-testplan',
version='0.1.0',
description='A pytest plugin to generate a CSV test report.',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
py_modules=['pytest_testplan'],
install_requires=['pytest'],
entry_points={'pytest11': ['testplan = pytest_testplan', ]},
)
| [
"setuptools.setup"
]
| [((71, 373), 'setuptools.setup', 'setup', ([], {'name': '"""pytest-testplan"""', 'version': '"""0.1.0"""', 'description': '"""A pytest plugin to generate a CSV test report."""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'py_modules': "['pytest_testplan']", 'install_requires': "['pytest']", 'entry_points': "{'pytest11': ['testplan = pytest_testplan']}"}), "(name='pytest-testplan', version='0.1.0', description=\n 'A pytest plugin to generate a CSV test report.', author='<NAME>',\n author_email='<EMAIL>', license='MIT', py_modules=['pytest_testplan'],\n install_requires=['pytest'], entry_points={'pytest11': [\n 'testplan = pytest_testplan']})\n", (76, 373), False, 'from setuptools import setup\n')] |
# Generated by Django 3.1.4 on 2021-01-07 19:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20210107_2010'),
]
operations = [
migrations.AlterField(
model_name='extrainfo',
name='rodzaj',
field=models.IntegerField(choices=[(2, 'Sci-Fi'), (0, 'Nieznany'), (5, 'Komedia'), (3, 'Dramat'), (1, 'Horror')], default=0),
),
migrations.CreateModel(
name='Recenzja',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('opis', models.TextField(default='')),
('gwizdki', models.IntegerField(default=5)),
('film', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.film')),
],
),
]
| [
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.TextField",
"django.db.models.IntegerField"
]
| [((368, 490), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(2, 'Sci-Fi'), (0, 'Nieznany'), (5, 'Komedia'), (3, 'Dramat'), (1, 'Horror')]", 'default': '(0)'}), "(choices=[(2, 'Sci-Fi'), (0, 'Nieznany'), (5, 'Komedia'),\n (3, 'Dramat'), (1, 'Horror')], default=0)\n", (387, 490), False, 'from django.db import migrations, models\n'), ((604, 697), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (620, 697), False, 'from django.db import migrations, models\n'), ((721, 749), 'django.db.models.TextField', 'models.TextField', ([], {'default': '""""""'}), "(default='')\n", (737, 749), False, 'from django.db import migrations, models\n'), ((780, 810), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(5)'}), '(default=5)\n', (799, 810), False, 'from django.db import migrations, models\n'), ((838, 915), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""api.film"""'}), "(on_delete=django.db.models.deletion.CASCADE, to='api.film')\n", (855, 915), False, 'from django.db import migrations, models\n')] |
import time
import warnings
import matplotlib.pyplot as plt
import numpy as np
import sympy as sp
from .global_qbx import global_qbx_self
from .mesh import apply_interp_mat, gauss_rule, panelize_symbolic_surface, upsample
def find_dcutoff_refine(kernel, src, tol, plot=False):
# prep step 1: find d_cutoff and d_refine
# The goal is to estimate the error due to the QBX local patch
# The local surface will have singularities at the tips where it is cut off
# These singularities will cause error in the QBX expansion. We want to make
# the local patch large enough that these singularities are irrelevant.
# To isolate the QBX patch cutoff error, we will use a very high upsampling.
# We'll also choose p to be the minimum allowed value since that will result in
# the largest cutoff error. Increasing p will reduce the cutoff error guaranteeing that
# we never need to worry about cutoff error.
density = np.ones_like(src.pts[:, 0]) # np.cos(src.pts[:,0] * src.pts[:,1])
if plot:
plt.figure(figsize=(9, 13))
params = []
d_cutoffs = [1.1, 1.3, 1.6, 2.0]
ps = np.arange(1, 55, 3)
for di, direction in enumerate([-1.0, 1.0]):
baseline = global_qbx_self(kernel, src, p=30, kappa=8, direction=direction)
baseline_v = baseline.dot(density)
# Check that the local qbx method matches the simple global qbx approach when d_cutoff is very large
d_refine_high = 8.0
with warnings.catch_warnings():
warnings.simplefilter("ignore")
local_baseline = kernel.integrate(
src.pts,
src,
d_cutoff=3.0,
tol=1e-20,
max_p=50,
d_refine=d_refine_high,
on_src_direction=direction,
)
local_baseline_v = local_baseline.dot(density)
err = np.max(np.abs(baseline_v - local_baseline_v))
print(err)
assert err < tol / 2
n_qbx_panels = []
drefine_optimal = []
p_for_full_accuracy = []
if plot:
plt.subplot(3, 2, 1 + di)
for i_d, d_cutoff in enumerate(d_cutoffs):
errs = []
for i_p, p in enumerate(ps):
# print(p, d_cutoff)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
test, report = kernel.integrate(
src.pts,
src,
d_cutoff=d_cutoff,
tol=1e-15,
max_p=p,
on_src_direction=direction,
d_refine=d_refine_high,
return_report=True,
)
testv = test.dot(density)
err = np.max(np.abs(baseline_v - testv))
errs.append(err)
# print(p, err)
if err < tol:
for d_refine_decrease in np.arange(1.0, d_refine_high, 0.25):
refine_test, refine_report = kernel.integrate(
src.pts,
src,
d_cutoff=d_cutoff,
tol=1e-15,
max_p=p
+ 10, # Increase p here to have a refinement safety margin
on_src_direction=direction,
d_refine=d_refine_decrease,
return_report=True,
)
refine_testv = refine_test.dot(density)
refine_err = np.max(np.abs(baseline_v - refine_testv))
if refine_err < tol:
drefine_optimal.append(d_refine_decrease)
n_qbx_panels.append(refine_report["n_qbx_panels"])
p_for_full_accuracy.append(p)
break
if len(n_qbx_panels) <= i_d:
print(f"Failed to find parameters for {d_cutoff}")
drefine_optimal.append(1000)
n_qbx_panels.append(1e6)
p_for_full_accuracy.append(1e3)
break
if plot:
print(d_cutoff, errs)
plt.plot(ps[: i_p + 1], np.log10(errs), label=str(d_cutoff))
params.append((direction, n_qbx_panels, drefine_optimal, p_for_full_accuracy))
if plot:
plt.legend()
plt.title("interior" if direction > 0 else "exterior")
plt.xlabel(r"$p_{\textrm{max}}$")
if di == 0:
plt.ylabel(r"$\log_{10}(\textrm{error})$")
plt.yticks(-np.arange(0, 16, 3))
plt.xticks(np.arange(0, 61, 10))
plt.ylim([-15, 0])
plt.subplot(3, 2, 3 + di)
plt.plot(d_cutoffs, np.array(n_qbx_panels) / src.n_pts, "k-*")
plt.xlabel(r"$d_{\textrm{cutoff}}$")
plt.ylim([0, 8])
if di == 0:
plt.ylabel("QBX panels per point")
plt.subplot(3, 2, 5 + di)
plt.plot(d_cutoffs, np.array(drefine_optimal), "k-*")
plt.xlabel(r"$d_{\textrm{cutoff}}$")
plt.ylim([0, 6])
if di == 0:
plt.ylabel(r"$d_{\textrm{refine}}$")
if plot:
plt.tight_layout()
plt.show()
total_cost = 0
for i in [0, 1]:
direction, n_qbx_panels, drefine_optimal, p_for_full_accuracy = params[i]
appx_cost = (
np.array(p_for_full_accuracy)
* np.array(n_qbx_panels)
* np.array(drefine_optimal)
)
if plot:
print(direction, appx_cost)
total_cost += appx_cost
if plot:
plt.plot(d_cutoffs, total_cost, "k-o")
plt.show()
best_idx = np.argmin(total_cost)
d_cutoff = d_cutoffs[best_idx]
d_refine = drefine_optimal[best_idx]
return d_cutoff, d_refine
# prep step 2: find the minimum distance at which integrals are computed
# to the required tolerance
def _find_d_up_helper(kernel, nq, max_curvature, start_d, tol, kappa):
t = sp.var("t")
n_panels = 2
while True:
panel_edges = np.linspace(-1, 1, n_panels + 1)
panel_bounds = np.stack((panel_edges[:-1], panel_edges[1:]), axis=1)
circle = panelize_symbolic_surface(
t, sp.cos(sp.pi * t), sp.sin(sp.pi * t), panel_bounds, *gauss_rule(nq)
)
n_panels_new = np.max(circle.panel_length / max_curvature * circle.panel_radius)
if n_panels_new <= n_panels:
break
n_panels = np.ceil(n_panels_new).astype(int)
# print(f"\nusing {n_panels} panels with max_curvature={max_curvature}")
circle_kappa, _ = upsample(circle, kappa)
circle_upsample, interp_mat_upsample = upsample(circle_kappa, 2)
# TODO: Write more about the underlying regularity assumptions!!
# Why is it acceptable to use this test_density here? Empirically, any
# well-resolved density has approximately the same error as integrating sin(x).
# For example, integrating: 1, cos(x)^2.
# If we integrate a poorly resolved density, we do see higher errors.
#
# How poorly resolved does the density need to be in order to see higher error?
# It seems like an interpolation Linfinity error of around 1e-5 causes the d_up value to start to drift upwards.
#
# As a simple heuristic that seems to perform very well, we compute the
# error when integrating a constant and then double the required distance
# in order to account for integrands that are not quite so perfectly
# resolved.
# if assume_regularity:
# omega = 1.0
# else:
# omega = 999.0# / max_curvature
# f = lambda x: np.sin(omega * x)
# test_density = interp_mat_upsample.dot(f(circle.pts[:,0]))
# test_density_upsampled = f(circle_upsample.pts[:,0])
# print('l2 err', np.linalg.norm(test_density - test_density_upsampled) / np.linalg.norm(test_density_upsampled))
# print('linf err', np.max(np.abs(test_density - test_density_upsampled)))
# test_density = f(circle.pts[:,0])
# test_density = np.sin(999 * circle.pts[:,0])
test_density = np.ones(circle_kappa.n_pts)
d_up = 0
for direction in [-1.0, 1.0]:
d = start_d
for i in range(50):
# In actuality, we only need to test interior points because the curvature
# of the surface ensures that more source panels are near the observation
# points and, as a result, the error will be higher for any given value of d.
L = np.repeat(circle_kappa.panel_length, circle_kappa.panel_order)
dist = L * d
test_pts = (
circle_kappa.pts + direction * circle_kappa.normals * dist[:, None]
)
# Check to make sure that the closest distance to a source point is
# truly `dist`. This check might fail if the interior test_pts are
# crossing over into the other half of the circle.
min_src_dist = np.min(
np.linalg.norm((test_pts[:, None] - circle_kappa.pts[None, :]), axis=2),
axis=1,
)
if not np.allclose(min_src_dist, dist):
return False, d
upsample_mat = np.transpose(
apply_interp_mat(
kernel._direct(test_pts, circle_upsample), interp_mat_upsample
),
(0, 2, 1),
)
est_mat = np.transpose(kernel._direct(test_pts, circle_kappa), (0, 2, 1))
# err = np.max(np.abs(upsample_mat - est_mat).sum(axis=2))
err = np.max(
np.abs(upsample_mat.dot(test_density) - est_mat.dot(test_density))
)
# print(d, err)
if err < tol:
d_up = max(d, d_up)
break
d *= 1.2
return True, d_up
def find_d_up(kernel, nq, max_curvature, start_d, tol, kappa):
d = start_d
for i in range(10):
d_up = _find_d_up_helper(kernel, nq, max_curvature * (0.8) ** i, d, tol, kappa)
if d_up[0]:
return d_up[1]
d = d_up[1]
def final_check(kernel, src):
density = np.ones_like(src.pts[:, 0]) # np.cos(source.pts[:,0] * src.pts[:,1])
baseline = global_qbx_self(kernel, src, p=50, kappa=10, direction=1.0)
baseline_v = baseline.dot(density)
tols = 10.0 ** np.arange(0, -15, -1)
errs = []
runtimes = []
for tol in tols:
runs = []
for i in range(10):
start = time.time()
local_baseline, report = kernel.integrate(
src.pts,
src,
tol=tol,
on_src_direction=1.0,
return_report=True,
)
runs.append(time.time() - start)
runtimes.append(np.min(runs))
local_baseline_v = local_baseline.dot(density)
errs.append(np.max(np.abs(baseline_v - local_baseline_v)))
# print(tol, errs[-1], runtime)
# assert(np.max(np.abs(baseline_v-local_baseline_v)) < 5e-14)
plt.figure(figsize=(9, 5))
plt.subplot(1, 2, 1)
plt.plot(-np.log10(tols), np.log10(errs))
plt.subplot(1, 2, 2)
plt.plot(-np.log10(tols), runtimes)
plt.tight_layout()
plt.show()
| [
"sympy.cos",
"numpy.log10",
"matplotlib.pyplot.ylabel",
"numpy.array",
"sympy.var",
"numpy.linalg.norm",
"numpy.arange",
"numpy.repeat",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.stack",
"numpy.linspace",
"numpy.min",
"warnings.simplefilter",
"numpy.argmin",
"matplotlib.pyplot.ylim",
"numpy.abs",
"sympy.sin",
"numpy.ceil",
"numpy.allclose",
"numpy.ones",
"matplotlib.pyplot.title",
"time.time",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"numpy.ones_like",
"warnings.catch_warnings",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplot"
]
| [((951, 978), 'numpy.ones_like', 'np.ones_like', (['src.pts[:, 0]'], {}), '(src.pts[:, 0])\n', (963, 978), True, 'import numpy as np\n'), ((1130, 1149), 'numpy.arange', 'np.arange', (['(1)', '(55)', '(3)'], {}), '(1, 55, 3)\n', (1139, 1149), True, 'import numpy as np\n'), ((6070, 6091), 'numpy.argmin', 'np.argmin', (['total_cost'], {}), '(total_cost)\n', (6079, 6091), True, 'import numpy as np\n'), ((6380, 6391), 'sympy.var', 'sp.var', (['"""t"""'], {}), "('t')\n", (6386, 6391), True, 'import sympy as sp\n'), ((8460, 8487), 'numpy.ones', 'np.ones', (['circle_kappa.n_pts'], {}), '(circle_kappa.n_pts)\n', (8467, 8487), True, 'import numpy as np\n'), ((10506, 10533), 'numpy.ones_like', 'np.ones_like', (['src.pts[:, 0]'], {}), '(src.pts[:, 0])\n', (10518, 10533), True, 'import numpy as np\n'), ((11396, 11422), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 5)'}), '(figsize=(9, 5))\n', (11406, 11422), True, 'import matplotlib.pyplot as plt\n'), ((11427, 11447), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (11438, 11447), True, 'import matplotlib.pyplot as plt\n'), ((11498, 11518), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (11509, 11518), True, 'import matplotlib.pyplot as plt\n'), ((11563, 11581), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11579, 11581), True, 'import matplotlib.pyplot as plt\n'), ((11586, 11596), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11594, 11596), True, 'import matplotlib.pyplot as plt\n'), ((1039, 1066), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 13)'}), '(figsize=(9, 13))\n', (1049, 1066), True, 'import matplotlib.pyplot as plt\n'), ((5574, 5592), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5590, 5592), True, 'import matplotlib.pyplot as plt\n'), ((5601, 5611), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5609, 5611), True, 'import matplotlib.pyplot as plt\n'), ((5996, 6034), 'matplotlib.pyplot.plot', 'plt.plot', (['d_cutoffs', 'total_cost', '"""k-o"""'], {}), "(d_cutoffs, total_cost, 'k-o')\n", (6004, 6034), True, 'import matplotlib.pyplot as plt\n'), ((6043, 6053), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6051, 6053), True, 'import matplotlib.pyplot as plt\n'), ((6448, 6480), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(n_panels + 1)'], {}), '(-1, 1, n_panels + 1)\n', (6459, 6480), True, 'import numpy as np\n'), ((6504, 6557), 'numpy.stack', 'np.stack', (['(panel_edges[:-1], panel_edges[1:])'], {'axis': '(1)'}), '((panel_edges[:-1], panel_edges[1:]), axis=1)\n', (6512, 6557), True, 'import numpy as np\n'), ((6718, 6783), 'numpy.max', 'np.max', (['(circle.panel_length / max_curvature * circle.panel_radius)'], {}), '(circle.panel_length / max_curvature * circle.panel_radius)\n', (6724, 6783), True, 'import numpy as np\n'), ((10709, 10730), 'numpy.arange', 'np.arange', (['(0)', '(-15)', '(-1)'], {}), '(0, -15, -1)\n', (10718, 10730), True, 'import numpy as np\n'), ((11478, 11492), 'numpy.log10', 'np.log10', (['errs'], {}), '(errs)\n', (11486, 11492), True, 'import numpy as np\n'), ((1477, 1502), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (1500, 1502), False, 'import warnings\n'), ((1516, 1547), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1537, 1547), False, 'import warnings\n'), ((1898, 1935), 'numpy.abs', 'np.abs', (['(baseline_v - local_baseline_v)'], {}), '(baseline_v - local_baseline_v)\n', (1904, 1935), True, 'import numpy as np\n'), ((2103, 2128), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(1 + di)'], {}), '(3, 2, 1 + di)\n', (2114, 2128), True, 'import matplotlib.pyplot as plt\n'), ((4696, 4708), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4706, 4708), True, 'import matplotlib.pyplot as plt\n'), ((4721, 4775), 'matplotlib.pyplot.title', 'plt.title', (["('interior' if direction > 0 else 'exterior')"], {}), "('interior' if direction > 0 else 'exterior')\n", (4730, 4775), True, 'import matplotlib.pyplot as plt\n'), ((4788, 4821), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$p_{\\\\textrm{max}}$"""'], {}), "('$p_{\\\\textrm{max}}$')\n", (4798, 4821), True, 'import matplotlib.pyplot as plt\n'), ((5007, 5025), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-15, 0]'], {}), '([-15, 0])\n', (5015, 5025), True, 'import matplotlib.pyplot as plt\n'), ((5039, 5064), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(3 + di)'], {}), '(3, 2, 3 + di)\n', (5050, 5064), True, 'import matplotlib.pyplot as plt\n'), ((5152, 5188), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$d_{\\\\textrm{cutoff}}$"""'], {}), "('$d_{\\\\textrm{cutoff}}$')\n", (5162, 5188), True, 'import matplotlib.pyplot as plt\n'), ((5201, 5217), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 8]'], {}), '([0, 8])\n', (5209, 5217), True, 'import matplotlib.pyplot as plt\n'), ((5306, 5331), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(2)', '(5 + di)'], {}), '(3, 2, 5 + di)\n', (5317, 5331), True, 'import matplotlib.pyplot as plt\n'), ((5410, 5446), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$d_{\\\\textrm{cutoff}}$"""'], {}), "('$d_{\\\\textrm{cutoff}}$')\n", (5420, 5446), True, 'import matplotlib.pyplot as plt\n'), ((5459, 5475), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 6]'], {}), '([0, 6])\n', (5467, 5475), True, 'import matplotlib.pyplot as plt\n'), ((5850, 5875), 'numpy.array', 'np.array', (['drefine_optimal'], {}), '(drefine_optimal)\n', (5858, 5875), True, 'import numpy as np\n'), ((6617, 6634), 'sympy.cos', 'sp.cos', (['(sp.pi * t)'], {}), '(sp.pi * t)\n', (6623, 6634), True, 'import sympy as sp\n'), ((6636, 6653), 'sympy.sin', 'sp.sin', (['(sp.pi * t)'], {}), '(sp.pi * t)\n', (6642, 6653), True, 'import sympy as sp\n'), ((8863, 8925), 'numpy.repeat', 'np.repeat', (['circle_kappa.panel_length', 'circle_kappa.panel_order'], {}), '(circle_kappa.panel_length, circle_kappa.panel_order)\n', (8872, 8925), True, 'import numpy as np\n'), ((10850, 10861), 'time.time', 'time.time', ([], {}), '()\n', (10859, 10861), False, 'import time\n'), ((11145, 11157), 'numpy.min', 'np.min', (['runs'], {}), '(runs)\n', (11151, 11157), True, 'import numpy as np\n'), ((11462, 11476), 'numpy.log10', 'np.log10', (['tols'], {}), '(tols)\n', (11470, 11476), True, 'import numpy as np\n'), ((11533, 11547), 'numpy.log10', 'np.log10', (['tols'], {}), '(tols)\n', (11541, 11547), True, 'import numpy as np\n'), ((4862, 4905), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\log_{10}(\\\\textrm{error})$"""'], {}), "('$\\\\log_{10}(\\\\textrm{error})$')\n", (4872, 4905), True, 'import matplotlib.pyplot as plt\n'), ((4973, 4993), 'numpy.arange', 'np.arange', (['(0)', '(61)', '(10)'], {}), '(0, 61, 10)\n', (4982, 4993), True, 'import numpy as np\n'), ((5258, 5292), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""QBX panels per point"""'], {}), "('QBX panels per point')\n", (5268, 5292), True, 'import matplotlib.pyplot as plt\n'), ((5364, 5389), 'numpy.array', 'np.array', (['drefine_optimal'], {}), '(drefine_optimal)\n', (5372, 5389), True, 'import numpy as np\n'), ((5516, 5552), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$d_{\\\\textrm{refine}}$"""'], {}), "('$d_{\\\\textrm{refine}}$')\n", (5526, 5552), True, 'import matplotlib.pyplot as plt\n'), ((5769, 5798), 'numpy.array', 'np.array', (['p_for_full_accuracy'], {}), '(p_for_full_accuracy)\n', (5777, 5798), True, 'import numpy as np\n'), ((5813, 5835), 'numpy.array', 'np.array', (['n_qbx_panels'], {}), '(n_qbx_panels)\n', (5821, 5835), True, 'import numpy as np\n'), ((6858, 6879), 'numpy.ceil', 'np.ceil', (['n_panels_new'], {}), '(n_panels_new)\n', (6865, 6879), True, 'import numpy as np\n'), ((9349, 9418), 'numpy.linalg.norm', 'np.linalg.norm', (['(test_pts[:, None] - circle_kappa.pts[None, :])'], {'axis': '(2)'}), '(test_pts[:, None] - circle_kappa.pts[None, :], axis=2)\n', (9363, 9418), True, 'import numpy as np\n'), ((9479, 9510), 'numpy.allclose', 'np.allclose', (['min_src_dist', 'dist'], {}), '(min_src_dist, dist)\n', (9490, 9510), True, 'import numpy as np\n'), ((11241, 11278), 'numpy.abs', 'np.abs', (['(baseline_v - local_baseline_v)'], {}), '(baseline_v - local_baseline_v)\n', (11247, 11278), True, 'import numpy as np\n'), ((2301, 2326), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (2324, 2326), False, 'import warnings\n'), ((2348, 2379), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (2369, 2379), False, 'import warnings\n'), ((4541, 4555), 'numpy.log10', 'np.log10', (['errs'], {}), '(errs)\n', (4549, 4555), True, 'import numpy as np\n'), ((4929, 4948), 'numpy.arange', 'np.arange', (['(0)', '(16)', '(3)'], {}), '(0, 16, 3)\n', (4938, 4948), True, 'import numpy as np\n'), ((5097, 5119), 'numpy.array', 'np.array', (['n_qbx_panels'], {}), '(n_qbx_panels)\n', (5105, 5119), True, 'import numpy as np\n'), ((11100, 11111), 'time.time', 'time.time', ([], {}), '()\n', (11109, 11111), False, 'import time\n'), ((2851, 2877), 'numpy.abs', 'np.abs', (['(baseline_v - testv)'], {}), '(baseline_v - testv)\n', (2857, 2877), True, 'import numpy as np\n'), ((3035, 3070), 'numpy.arange', 'np.arange', (['(1.0)', 'd_refine_high', '(0.25)'], {}), '(1.0, d_refine_high, 0.25)\n', (3044, 3070), True, 'import numpy as np\n'), ((3769, 3802), 'numpy.abs', 'np.abs', (['(baseline_v - refine_testv)'], {}), '(baseline_v - refine_testv)\n', (3775, 3802), True, 'import numpy as np\n')] |
import logging as log
class Log:
def __init__(self, level):
self.level = level
log.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
level=level)
self.log = log
def info(self, msg):
self.log.info(msg)
def debug(self, msg):
self.log.debug(msg)
def warn(self, msg):
self.log.warn(msg)
def error(self, msg):
self.log.error(msg)
| [
"logging.basicConfig"
]
| [((101, 221), 'logging.basicConfig', 'log.basicConfig', ([], {'format': '"""%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s"""', 'level': 'level'}), "(format=\n '%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',\n level=level)\n", (116, 221), True, 'import logging as log\n')] |
import aspose.email
from aspose.email.clients.imap import ImapClient
from aspose.email.clients import SecurityOptions
from aspose.email.clients.imap import ImapQueryBuilder
import datetime as dt
def run():
dataDir = ""
#ExStart: FetchEmailMessageFromServer
client = ImapClient("imap.gmail.com", 993, "username", "password")
client.select_folder("Inbox")
builder = ImapQueryBuilder()
builder.subject.contains("Newsletter")
builder.internal_date.on(dt.datetime.now())
query = builder.get_query()
msgsColl = client.list_messages(query)
print("Total Messages fulfilling search criterion: " + str(len(msgsColl)))
#ExEnd: FetchEmailMessageFromServer
if __name__ == '__main__':
run()
| [
"datetime.datetime.now",
"aspose.email.clients.imap.ImapQueryBuilder",
"aspose.email.clients.imap.ImapClient"
]
| [((281, 338), 'aspose.email.clients.imap.ImapClient', 'ImapClient', (['"""imap.gmail.com"""', '(993)', '"""username"""', '"""password"""'], {}), "('imap.gmail.com', 993, 'username', 'password')\n", (291, 338), False, 'from aspose.email.clients.imap import ImapClient\n'), ((387, 405), 'aspose.email.clients.imap.ImapQueryBuilder', 'ImapQueryBuilder', ([], {}), '()\n', (403, 405), False, 'from aspose.email.clients.imap import ImapQueryBuilder\n'), ((478, 495), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (493, 495), True, 'import datetime as dt\n')] |
import subprocess
def run(cmd):
subprocess.run(cmd.split(' '))
def ls():
subprocess.call(["ls", "-l"]) | [
"subprocess.call"
]
| [((82, 111), 'subprocess.call', 'subprocess.call', (["['ls', '-l']"], {}), "(['ls', '-l'])\n", (97, 111), False, 'import subprocess\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
~Gros
'''
from hashlib import sha256
import random
def add_padding(data, block_size=16):
"""add PKCS#7 padding"""
size = block_size - (len(data)%block_size)
return data+chr(size)*size
def strip_padding(data, block_size=16):
"""strip PKCS#7 padding"""
padding = ord(data[-1])
if padding == 0 or padding > block_size or data[-padding:] != chr(padding)*padding:
raise Exception("Invalid padding")
return data[:-padding]
def random_bytes(amount=1):
return ''.join([chr(random.randint(0,255)) for x in range(amount)])
def derive_key(key_int, block_size=16):
return sha256(str(key_int)).digest()[:16] | [
"random.randint"
]
| [((563, 585), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (577, 585), False, 'import random\n')] |
#
# Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from click.testing import CliRunner
from cli_text_consts import ModelExportCmdTexts as Texts
from commands.model.common import workflow_description
from commands.model.export import export
from platform_resources.workflow import ArgoWorkflow, QUEUED_PHASE
FEM_NAME = "EXPORT_1"
SEM_NAME = "EXPORT_2"
FEM_PARAMETERS = "PARAMS_1"
SEM_PARAMETERS = "PARAMS_2"
FEM_START_DATE = '2000-01-01'
FEM_NAMESPACE = 'test-namespace'
TEST_AGROWORKFLOW = ArgoWorkflow(name=FEM_NAME, started_at=FEM_START_DATE, finished_at=None,
namespace=FEM_NAMESPACE, phase=None)
TWO_MODEL_OUTPUT = [workflow_description(name=FEM_NAME, parameters=FEM_PARAMETERS),
workflow_description(name=SEM_NAME, parameters=SEM_PARAMETERS)]
def setup_mocks(mocker):
mocker.patch('commands.model.export.get_kubectl_current_context_namespace',
return_value='fake-namespace')
mocker.patch('platform_resources.workflow.ArgoWorkflow.from_yaml',
return_value=mocker.MagicMock())
mocker.patch('platform_resources.workflow.ArgoWorkflow.get',
return_value=TEST_AGROWORKFLOW)
mocker.patch('os.listdir', return_value=['openvino.yaml', 'tensorflow.yaml', 'some_other_file'])
mocker.patch('commands.model.export.NAUTAConfigMap', return_value=mocker.MagicMock(registry='fake-addr'))
mocker.patch('commands.model.export.Config')
mocker.patch('os.path.isdir', return_value=True)
def test_export(mocker):
setup_mocks(mocker)
result = CliRunner().invoke(export, ["/fake/path", "openvino"])
assert result.exit_code == 0
assert "Successfully created export workflow" in result.output
assert QUEUED_PHASE in result.output
assert FEM_NAME in result.output
assert FEM_START_DATE in result.output
assert FEM_NAMESPACE in result.output
def test_export_inexistent_format(mocker):
setup_mocks(mocker)
result = CliRunner().invoke(export, ["/fake/path", "bad"])
assert result.exit_code == 2
assert "Format: bad does not exist. Choose from:" in result.output
def test_export_failure(mocker):
setup_mocks(mocker)
mocker.patch('platform_resources.workflow.ArgoWorkflow.from_yaml',
return_value=mocker.MagicMock(create=lambda: RuntimeError))
result = CliRunner().invoke(export, ["/fake/path", "openvino"])
assert result.exit_code == 1
assert "Failed to create export workflow" in result.output
def test_export_list(mocker):
mocker.patch("commands.model.export.get_list_of_workflows", return_value=TWO_MODEL_OUTPUT)
result = CliRunner().invoke(export, ["formats"])
assert FEM_NAME in result.output
assert SEM_NAME in result.output
assert FEM_PARAMETERS in result.output
assert SEM_PARAMETERS in result.output
def test_export_list_error(mocker):
mocker.patch("commands.model.export.get_list_of_workflows", side_effect=RuntimeError)
result = CliRunner().invoke(export, ["formats"])
assert Texts.EXPORT_LIST_ERROR_MSG in result.output
def test_export_missing_format(mocker):
setup_mocks(mocker)
result = CliRunner().invoke(export, ["wrong-option"])
assert Texts.MISSING_EXPORT_FORMAT.format(formats=["openvino", "tensorflow"]) in result.output
| [
"cli_text_consts.ModelExportCmdTexts.MISSING_EXPORT_FORMAT.format",
"platform_resources.workflow.ArgoWorkflow",
"commands.model.common.workflow_description",
"click.testing.CliRunner"
]
| [((1034, 1147), 'platform_resources.workflow.ArgoWorkflow', 'ArgoWorkflow', ([], {'name': 'FEM_NAME', 'started_at': 'FEM_START_DATE', 'finished_at': 'None', 'namespace': 'FEM_NAMESPACE', 'phase': 'None'}), '(name=FEM_NAME, started_at=FEM_START_DATE, finished_at=None,\n namespace=FEM_NAMESPACE, phase=None)\n', (1046, 1147), False, 'from platform_resources.workflow import ArgoWorkflow, QUEUED_PHASE\n'), ((1198, 1260), 'commands.model.common.workflow_description', 'workflow_description', ([], {'name': 'FEM_NAME', 'parameters': 'FEM_PARAMETERS'}), '(name=FEM_NAME, parameters=FEM_PARAMETERS)\n', (1218, 1260), False, 'from commands.model.common import workflow_description\n'), ((1282, 1344), 'commands.model.common.workflow_description', 'workflow_description', ([], {'name': 'SEM_NAME', 'parameters': 'SEM_PARAMETERS'}), '(name=SEM_NAME, parameters=SEM_PARAMETERS)\n', (1302, 1344), False, 'from commands.model.common import workflow_description\n'), ((3760, 3830), 'cli_text_consts.ModelExportCmdTexts.MISSING_EXPORT_FORMAT.format', 'Texts.MISSING_EXPORT_FORMAT.format', ([], {'formats': "['openvino', 'tensorflow']"}), "(formats=['openvino', 'tensorflow'])\n", (3794, 3830), True, 'from cli_text_consts import ModelExportCmdTexts as Texts\n'), ((2113, 2124), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (2122, 2124), False, 'from click.testing import CliRunner\n'), ((2515, 2526), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (2524, 2526), False, 'from click.testing import CliRunner\n'), ((2891, 2902), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (2900, 2902), False, 'from click.testing import CliRunner\n'), ((3184, 3195), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (3193, 3195), False, 'from click.testing import CliRunner\n'), ((3527, 3538), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (3536, 3538), False, 'from click.testing import CliRunner\n'), ((3703, 3714), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (3712, 3714), False, 'from click.testing import CliRunner\n')] |
from requests import get
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import datetime as dt
from bme280 import BME280
try:
from smbus2 import SMBus
except ImportError:
from smbus import SMBus
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
xs = []
ys =[]
bus = SMBus(1)
bme280 = BME280(i2c_dev=bus)
def animate(i, xs, ys):
pressure = bme280.get_pressure()
xs.append(dt.datetime.now().strftime('%H:%M:%S'))
ys.append(pressure)
xs = xs[-20:]
ys = ys[-20:]
ax.clear()
ax.plot(xs, ys)
plt.xticks(rotation=45, ha='right')
plt.subplots_adjust(bottom=0.30)
plt.title('Pressure over time')
plt.ylabel("pressure")
ani = animation.FuncAnimation(fig, animate, fargs=(xs, ys), interval=60000)
plt.show()
| [
"bme280.BME280",
"smbus.SMBus",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"matplotlib.animation.FuncAnimation",
"datetime.datetime.now",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.show"
]
| [((237, 249), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (247, 249), True, 'import matplotlib.pyplot as plt\n'), ((302, 310), 'smbus.SMBus', 'SMBus', (['(1)'], {}), '(1)\n', (307, 310), False, 'from smbus import SMBus\n'), ((320, 339), 'bme280.BME280', 'BME280', ([], {'i2c_dev': 'bus'}), '(i2c_dev=bus)\n', (326, 339), False, 'from bme280 import BME280\n'), ((702, 771), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'animate'], {'fargs': '(xs, ys)', 'interval': '(60000)'}), '(fig, animate, fargs=(xs, ys), interval=60000)\n', (725, 771), True, 'import matplotlib.animation as animation\n'), ((772, 782), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (780, 782), True, 'import matplotlib.pyplot as plt\n'), ((555, 590), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)', 'ha': '"""right"""'}), "(rotation=45, ha='right')\n", (565, 590), True, 'import matplotlib.pyplot as plt\n'), ((595, 626), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'bottom': '(0.3)'}), '(bottom=0.3)\n', (614, 626), True, 'import matplotlib.pyplot as plt\n'), ((632, 663), 'matplotlib.pyplot.title', 'plt.title', (['"""Pressure over time"""'], {}), "('Pressure over time')\n", (641, 663), True, 'import matplotlib.pyplot as plt\n'), ((668, 690), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""pressure"""'], {}), "('pressure')\n", (678, 690), True, 'import matplotlib.pyplot as plt\n'), ((416, 433), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (431, 433), True, 'import datetime as dt\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.misc import modules
constrain_path = {
('threeD', 'normal'): (True, True, ''),
('threeD', 'depth'): (True, True, ''),
('normal', 'depth'): (True, True, ''),
('depth', 'normal'): (True, True, ''),
}
class UnwarpNet(nn.Module):
def __init__(self, use_simple=False, combine_num=3, use_constrain=True, constrain_configure=None):
super(UnwarpNet, self).__init__()
self.combine_num = combine_num
self.use_simple = use_simple
self.use_constrain = use_constrain
self.constrain_configure = constrain_configure
self.geo_encoder = modules.Encoder(downsample=6, in_channels=3)
self.threeD_decoder = modules.Decoder(downsample=6, out_channels=3, combine_num=self.combine_num)
self.normal_decoder = modules.Decoder(downsample=6, out_channels=3, combine_num=self.combine_num)
self.depth_decoder = modules.Decoder(downsample=6, out_channels=1, combine_num=self.combine_num)
self.mask_decoder = modules.Decoder(downsample=6, out_channels=1, combine_num=0)
bottle_neck = sum([2 ** (i + 4) for i in range(self.combine_num)])
self.second_encoder = modules.Encoder(downsample=6, in_channels=bottle_neck * 3 + 3)
self.uv_decoder = modules.Decoder(downsample=6, out_channels=2, combine_num=0)
# self.albedo_decoder = modules.AlbedoDecoder(downsample=6, out_channels=1)
self.albedo_decoder = modules.Decoder(downsample=6, out_channels=1, combine_num=0)
self.deform_decoder = modules.Decoder(downsample=6, out_channels=2, combine_num=0)
self.dep2nor = None
self.threeD_to_nor2dep = None
self.nor2dep = None
def forward(self, x):
gxvals, gx_encode = self.geo_encoder(x)
threeD_map, threeD_feature = self.threeD_decoder(gxvals, gx_encode)
threeD_map = nn.functional.tanh(threeD_map)
dep_map, dep_feature = self.depth_decoder(gxvals, gx_encode)
dep_map = nn.functional.tanh(dep_map)
nor_map, nor_feature = self.normal_decoder(gxvals, gx_encode)
nor_map = nn.functional.tanh(nor_map)
mask_map, mask_feature = self.mask_decoder(gxvals, gx_encode)
mask_map = torch.nn.functional.sigmoid(mask_map)
# geo_feature = torch.cat([threeD_feature, nor_feature, dep_feature], dim=1)
geo_feature = torch.cat([threeD_feature, nor_feature, dep_feature, x], dim=1)
b, c, h, w = geo_feature.size()
geo_feature_mask = geo_feature.mul(mask_map.expand(b, c, h, w))
secvals, sec_encode = self.second_encoder(geo_feature_mask)
uv_map, _ = self.uv_decoder(secvals, sec_encode)
uv_map = nn.functional.tanh(uv_map)
alb_map, _ = self.albedo_decoder(secvals, sec_encode)
alb_map = nn.functional.tanh(alb_map)
deform_map, _ = self.deform_decoder(secvals, sec_encode)
deform_map = nn.functional.tanh(deform_map)
return uv_map, threeD_map, nor_map, alb_map, dep_map, mask_map, \
None, None, None, None, None, deform_map
| [
"torch.nn.functional.tanh",
"models.misc.modules.Encoder",
"torch.nn.functional.sigmoid",
"models.misc.modules.Decoder",
"torch.cat"
]
| [((680, 724), 'models.misc.modules.Encoder', 'modules.Encoder', ([], {'downsample': '(6)', 'in_channels': '(3)'}), '(downsample=6, in_channels=3)\n', (695, 724), False, 'from models.misc import modules\n'), ((755, 830), 'models.misc.modules.Decoder', 'modules.Decoder', ([], {'downsample': '(6)', 'out_channels': '(3)', 'combine_num': 'self.combine_num'}), '(downsample=6, out_channels=3, combine_num=self.combine_num)\n', (770, 830), False, 'from models.misc import modules\n'), ((861, 936), 'models.misc.modules.Decoder', 'modules.Decoder', ([], {'downsample': '(6)', 'out_channels': '(3)', 'combine_num': 'self.combine_num'}), '(downsample=6, out_channels=3, combine_num=self.combine_num)\n', (876, 936), False, 'from models.misc import modules\n'), ((966, 1041), 'models.misc.modules.Decoder', 'modules.Decoder', ([], {'downsample': '(6)', 'out_channels': '(1)', 'combine_num': 'self.combine_num'}), '(downsample=6, out_channels=1, combine_num=self.combine_num)\n', (981, 1041), False, 'from models.misc import modules\n'), ((1070, 1130), 'models.misc.modules.Decoder', 'modules.Decoder', ([], {'downsample': '(6)', 'out_channels': '(1)', 'combine_num': '(0)'}), '(downsample=6, out_channels=1, combine_num=0)\n', (1085, 1130), False, 'from models.misc import modules\n'), ((1236, 1298), 'models.misc.modules.Encoder', 'modules.Encoder', ([], {'downsample': '(6)', 'in_channels': '(bottle_neck * 3 + 3)'}), '(downsample=6, in_channels=bottle_neck * 3 + 3)\n', (1251, 1298), False, 'from models.misc import modules\n'), ((1325, 1385), 'models.misc.modules.Decoder', 'modules.Decoder', ([], {'downsample': '(6)', 'out_channels': '(2)', 'combine_num': '(0)'}), '(downsample=6, out_channels=2, combine_num=0)\n', (1340, 1385), False, 'from models.misc import modules\n'), ((1500, 1560), 'models.misc.modules.Decoder', 'modules.Decoder', ([], {'downsample': '(6)', 'out_channels': '(1)', 'combine_num': '(0)'}), '(downsample=6, out_channels=1, combine_num=0)\n', (1515, 1560), False, 'from models.misc import modules\n'), ((1591, 1651), 'models.misc.modules.Decoder', 'modules.Decoder', ([], {'downsample': '(6)', 'out_channels': '(2)', 'combine_num': '(0)'}), '(downsample=6, out_channels=2, combine_num=0)\n', (1606, 1651), False, 'from models.misc import modules\n'), ((1926, 1956), 'torch.nn.functional.tanh', 'nn.functional.tanh', (['threeD_map'], {}), '(threeD_map)\n', (1944, 1956), True, 'import torch.nn as nn\n'), ((2044, 2071), 'torch.nn.functional.tanh', 'nn.functional.tanh', (['dep_map'], {}), '(dep_map)\n', (2062, 2071), True, 'import torch.nn as nn\n'), ((2160, 2187), 'torch.nn.functional.tanh', 'nn.functional.tanh', (['nor_map'], {}), '(nor_map)\n', (2178, 2187), True, 'import torch.nn as nn\n'), ((2277, 2314), 'torch.nn.functional.sigmoid', 'torch.nn.functional.sigmoid', (['mask_map'], {}), '(mask_map)\n', (2304, 2314), False, 'import torch\n'), ((2422, 2485), 'torch.cat', 'torch.cat', (['[threeD_feature, nor_feature, dep_feature, x]'], {'dim': '(1)'}), '([threeD_feature, nor_feature, dep_feature, x], dim=1)\n', (2431, 2485), False, 'import torch\n'), ((2740, 2766), 'torch.nn.functional.tanh', 'nn.functional.tanh', (['uv_map'], {}), '(uv_map)\n', (2758, 2766), True, 'import torch.nn as nn\n'), ((2847, 2874), 'torch.nn.functional.tanh', 'nn.functional.tanh', (['alb_map'], {}), '(alb_map)\n', (2865, 2874), True, 'import torch.nn as nn\n'), ((2961, 2991), 'torch.nn.functional.tanh', 'nn.functional.tanh', (['deform_map'], {}), '(deform_map)\n', (2979, 2991), True, 'import torch.nn as nn\n')] |
#!/usr/bin/env python
from typing import List
import aoc
from collections import defaultdict
@aoc.timing
def solve(inp: str, part2=False):
def find_path(current: str, path: List[str] = []):
if current == 'end':
yield path
return
for nxt in caves[current]:
if nxt == 'start':
continue
if nxt.islower() and nxt in path:
if not part2:
continue
elif any(path.count(c) > 1 for c in path if c.islower()):
continue
yield from find_path(nxt, path + [nxt])
caves = defaultdict(list)
for line in inp.splitlines():
parts = line.split('-')
caves[parts[0]].append(parts[1])
caves[parts[1]].append(parts[0])
return len(list(find_path('start')))
@aoc.timing
def part2(inp: str):
return inp
with open('test2.txt', 'r') as f:
inp = f.read()
print("Part 1:", solve(inp))
print("Part 2:", solve(inp, True))
with open('input.txt', 'r') as f:
inp = f.read()
print("Part 1:", solve(inp))
print("Part 2:", solve(inp, True))
| [
"collections.defaultdict"
]
| [((634, 651), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (645, 651), False, 'from collections import defaultdict\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import List, Pattern
from recognizers_text.utilities import RegExpUtility
from recognizers_text.extractor import Extractor
from recognizers_number.number.italian.extractors import ItalianIntegerExtractor
from ...resources.italian_date_time import ItalianDateTime
from ..extractors import DateTimeExtractor
from ..base_timeperiod import TimePeriodExtractorConfiguration, MatchedIndex
from ..base_time import BaseTimeExtractor
from ..base_timezone import BaseTimeZoneExtractor
from .time_extractor_config import ItalianTimeExtractorConfiguration
from .base_configs import ItalianDateTimeUtilityConfiguration
from .timezone_extractor_config import ItalianTimeZoneExtractorConfiguration
class ItalianTimePeriodExtractorConfiguration(TimePeriodExtractorConfiguration):
@property
def check_both_before_after(self) -> bool:
return self._check_both_before_after
@property
def simple_cases_regex(self) -> List[Pattern]:
return self._simple_cases_regex
@property
def till_regex(self) -> Pattern:
return self._till_regex
@property
def time_of_day_regex(self) -> Pattern:
return self._time_of_day_regex
@property
def general_ending_regex(self) -> Pattern:
return self._general_ending_regex
@property
def single_time_extractor(self) -> DateTimeExtractor:
return self._single_time_extractor
@property
def integer_extractor(self) -> Extractor:
return self._integer_extractor
@property
def token_before_date(self) -> str:
return self._token_before_date
@property
def pure_number_regex(self) -> List[Pattern]:
return self._pure_number_regex
@property
def time_zone_extractor(self) -> DateTimeExtractor:
return self._time_zone_extractor
def __init__(self):
super().__init__()
self._check_both_before_after = ItalianDateTime.CheckBothBeforeAfter
self._single_time_extractor = BaseTimeExtractor(
ItalianTimeExtractorConfiguration())
self._integer_extractor = ItalianIntegerExtractor()
self.utility_configuration = ItalianDateTimeUtilityConfiguration()
self._simple_cases_regex: List[Pattern] = [
RegExpUtility.get_safe_reg_exp(ItalianDateTime.PureNumFromTo),
RegExpUtility.get_safe_reg_exp(ItalianDateTime.PureNumBetweenAnd),
RegExpUtility.get_safe_reg_exp(ItalianDateTime.PmRegex),
RegExpUtility.get_safe_reg_exp(ItalianDateTime.AmRegex)
]
self._till_regex: Pattern = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.TillRegex)
self._time_of_day_regex: Pattern = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.TimeOfDayRegex)
self._general_ending_regex: Pattern = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.GeneralEndingRegex)
self.from_regex = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.FromRegex2)
self.connector_and_regex = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.ConnectorAndRegex)
self.before_regex = RegExpUtility.get_safe_reg_exp(
ItalianDateTime.BeforeRegex2)
self._token_before_date = ItalianDateTime.TokenBeforeDate
self._pure_number_regex = [ItalianDateTime.PureNumFromTo, ItalianDateTime.PureNumFromTo]
self._time_zone_extractor = BaseTimeZoneExtractor(
ItalianTimeZoneExtractorConfiguration())
def get_from_token_index(self, source: str) -> MatchedIndex:
match = self.from_regex.search(source)
if match:
return MatchedIndex(True, match.start())
return MatchedIndex(False, -1)
def get_between_token_index(self, source: str) -> MatchedIndex:
match = self.before_regex.search(source)
if match:
return MatchedIndex(True, match.start())
return MatchedIndex(False, -1)
def is_connector_token(self, source: str):
return self.connector_and_regex.match(source)
| [
"recognizers_number.number.italian.extractors.ItalianIntegerExtractor",
"recognizers_text.utilities.RegExpUtility.get_safe_reg_exp"
]
| [((2169, 2194), 'recognizers_number.number.italian.extractors.ItalianIntegerExtractor', 'ItalianIntegerExtractor', ([], {}), '()\n', (2192, 2194), False, 'from recognizers_number.number.italian.extractors import ItalianIntegerExtractor\n'), ((2661, 2718), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', (['ItalianDateTime.TillRegex'], {}), '(ItalianDateTime.TillRegex)\n', (2691, 2718), False, 'from recognizers_text.utilities import RegExpUtility\n'), ((2775, 2837), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', (['ItalianDateTime.TimeOfDayRegex'], {}), '(ItalianDateTime.TimeOfDayRegex)\n', (2805, 2837), False, 'from recognizers_text.utilities import RegExpUtility\n'), ((2897, 2963), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', (['ItalianDateTime.GeneralEndingRegex'], {}), '(ItalianDateTime.GeneralEndingRegex)\n', (2927, 2963), False, 'from recognizers_text.utilities import RegExpUtility\n'), ((3004, 3062), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', (['ItalianDateTime.FromRegex2'], {}), '(ItalianDateTime.FromRegex2)\n', (3034, 3062), False, 'from recognizers_text.utilities import RegExpUtility\n'), ((3111, 3176), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', (['ItalianDateTime.ConnectorAndRegex'], {}), '(ItalianDateTime.ConnectorAndRegex)\n', (3141, 3176), False, 'from recognizers_text.utilities import RegExpUtility\n'), ((3218, 3278), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', (['ItalianDateTime.BeforeRegex2'], {}), '(ItalianDateTime.BeforeRegex2)\n', (3248, 3278), False, 'from recognizers_text.utilities import RegExpUtility\n'), ((2335, 2396), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', (['ItalianDateTime.PureNumFromTo'], {}), '(ItalianDateTime.PureNumFromTo)\n', (2365, 2396), False, 'from recognizers_text.utilities import RegExpUtility\n'), ((2410, 2475), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', (['ItalianDateTime.PureNumBetweenAnd'], {}), '(ItalianDateTime.PureNumBetweenAnd)\n', (2440, 2475), False, 'from recognizers_text.utilities import RegExpUtility\n'), ((2489, 2544), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', (['ItalianDateTime.PmRegex'], {}), '(ItalianDateTime.PmRegex)\n', (2519, 2544), False, 'from recognizers_text.utilities import RegExpUtility\n'), ((2558, 2613), 'recognizers_text.utilities.RegExpUtility.get_safe_reg_exp', 'RegExpUtility.get_safe_reg_exp', (['ItalianDateTime.AmRegex'], {}), '(ItalianDateTime.AmRegex)\n', (2588, 2613), False, 'from recognizers_text.utilities import RegExpUtility\n')] |
from pathlib import Path
import pytest
from oval_graph.arf_xml_parser.arf_xml_parser import ARFXMLParser
def get_arf_report_path(src="global_test_data/ssg-fedora-ds-arf.xml"):
return str(Path(__file__).parent.parent / src)
@pytest.mark.parametrize("rule_id, result", [
(
"xccdf_org.ssgproject.content_rule_accounts_passwords_pam_faillock_deny",
"false",
),
(
"xccdf_org.ssgproject.content_rule_sshd_disable_gssapi_auth",
"false",
),
(
"xccdf_org.ssgproject.content_rule_service_debug-shell_disabled",
"true",
),
(
"xccdf_org.ssgproject.content_rule_mount_option_dev_shm_noexec",
"false",
),
(
"xccdf_org.ssgproject.content_rule_audit_rules_unsuccessful_file_modification_creat",
"false",
),
(
"xccdf_org.ssgproject.content_rule_audit_rules_file_deletion_events_rmdir",
"false",
),
(
"xccdf_org.ssgproject.content_rule_require_singleuser_auth",
"true",
),
])
def test_parsing_and_evaluate_scan_rule(rule_id, result):
path = get_arf_report_path()
parser = ARFXMLParser(path)
oval_tree = parser.get_oval_tree(rule_id)
assert oval_tree.evaluate_tree() == result
def test_parsing_arf_report_without_system_data():
path = get_arf_report_path("global_test_data/arf_no_system_data.xml")
rule_id = "xccdf_com.example.www_rule_test-fail"
parser = ARFXMLParser(path)
oval_tree = parser.get_oval_tree(rule_id)
assert oval_tree.evaluate_tree() == "false"
@pytest.mark.parametrize("rule_id, pattern", [
("hello", "404 rule \"hello\" not found!"),
("xccdf_org.ssgproject.content_rule_ntpd_specify_remote_server", "notselected"),
("xccdf_org.ssgproject.content_rule_configure_bind_crypto_policy", "notchecked"),
("xccdf_org.ssgproject.content_rule_ensure_gpgcheck_local_packages", "notapplicable"),
])
def test_parsing_bad_rule(rule_id, pattern):
path = get_arf_report_path()
parser = ARFXMLParser(path)
with pytest.raises(Exception, match=pattern):
assert parser.get_oval_tree(rule_id)
def test_use_bad_report_file():
src = 'global_test_data/xccdf_org.ssgproject.content_profile_ospp-results-initial.xml'
path = get_arf_report_path(src)
with pytest.raises(Exception, match=r"arf\b|ARF\b"):
assert ARFXMLParser(path)
| [
"oval_graph.arf_xml_parser.arf_xml_parser.ARFXMLParser",
"pytest.mark.parametrize",
"pytest.raises",
"pathlib.Path"
]
| [((234, 897), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""rule_id, result"""', "[('xccdf_org.ssgproject.content_rule_accounts_passwords_pam_faillock_deny',\n 'false'), ('xccdf_org.ssgproject.content_rule_sshd_disable_gssapi_auth',\n 'false'), (\n 'xccdf_org.ssgproject.content_rule_service_debug-shell_disabled',\n 'true'), (\n 'xccdf_org.ssgproject.content_rule_mount_option_dev_shm_noexec',\n 'false'), (\n 'xccdf_org.ssgproject.content_rule_audit_rules_unsuccessful_file_modification_creat'\n , 'false'), (\n 'xccdf_org.ssgproject.content_rule_audit_rules_file_deletion_events_rmdir',\n 'false'), ('xccdf_org.ssgproject.content_rule_require_singleuser_auth',\n 'true')]"], {}), "('rule_id, result', [(\n 'xccdf_org.ssgproject.content_rule_accounts_passwords_pam_faillock_deny',\n 'false'), ('xccdf_org.ssgproject.content_rule_sshd_disable_gssapi_auth',\n 'false'), (\n 'xccdf_org.ssgproject.content_rule_service_debug-shell_disabled',\n 'true'), (\n 'xccdf_org.ssgproject.content_rule_mount_option_dev_shm_noexec',\n 'false'), (\n 'xccdf_org.ssgproject.content_rule_audit_rules_unsuccessful_file_modification_creat'\n , 'false'), (\n 'xccdf_org.ssgproject.content_rule_audit_rules_file_deletion_events_rmdir',\n 'false'), ('xccdf_org.ssgproject.content_rule_require_singleuser_auth',\n 'true')])\n", (257, 897), False, 'import pytest\n'), ((1562, 1930), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""rule_id, pattern"""', '[(\'hello\', \'404 rule "hello" not found!\'), (\n \'xccdf_org.ssgproject.content_rule_ntpd_specify_remote_server\',\n \'notselected\'), (\n \'xccdf_org.ssgproject.content_rule_configure_bind_crypto_policy\',\n \'notchecked\'), (\n \'xccdf_org.ssgproject.content_rule_ensure_gpgcheck_local_packages\',\n \'notapplicable\')]'], {}), '(\'rule_id, pattern\', [(\'hello\',\n \'404 rule "hello" not found!\'), (\n \'xccdf_org.ssgproject.content_rule_ntpd_specify_remote_server\',\n \'notselected\'), (\n \'xccdf_org.ssgproject.content_rule_configure_bind_crypto_policy\',\n \'notchecked\'), (\n \'xccdf_org.ssgproject.content_rule_ensure_gpgcheck_local_packages\',\n \'notapplicable\')])\n', (1585, 1930), False, 'import pytest\n'), ((1141, 1159), 'oval_graph.arf_xml_parser.arf_xml_parser.ARFXMLParser', 'ARFXMLParser', (['path'], {}), '(path)\n', (1153, 1159), False, 'from oval_graph.arf_xml_parser.arf_xml_parser import ARFXMLParser\n'), ((1446, 1464), 'oval_graph.arf_xml_parser.arf_xml_parser.ARFXMLParser', 'ARFXMLParser', (['path'], {}), '(path)\n', (1458, 1464), False, 'from oval_graph.arf_xml_parser.arf_xml_parser import ARFXMLParser\n'), ((2012, 2030), 'oval_graph.arf_xml_parser.arf_xml_parser.ARFXMLParser', 'ARFXMLParser', (['path'], {}), '(path)\n', (2024, 2030), False, 'from oval_graph.arf_xml_parser.arf_xml_parser import ARFXMLParser\n'), ((2041, 2080), 'pytest.raises', 'pytest.raises', (['Exception'], {'match': 'pattern'}), '(Exception, match=pattern)\n', (2054, 2080), False, 'import pytest\n'), ((2297, 2344), 'pytest.raises', 'pytest.raises', (['Exception'], {'match': '"""arf\\\\b|ARF\\\\b"""'}), "(Exception, match='arf\\\\b|ARF\\\\b')\n", (2310, 2344), False, 'import pytest\n'), ((2360, 2378), 'oval_graph.arf_xml_parser.arf_xml_parser.ARFXMLParser', 'ARFXMLParser', (['path'], {}), '(path)\n', (2372, 2378), False, 'from oval_graph.arf_xml_parser.arf_xml_parser import ARFXMLParser\n'), ((195, 209), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (199, 209), False, 'from pathlib import Path\n')] |
import os
import logging
import argparse
import sys
import signal
import subprocess
from functools import wraps
from dotenv import load_dotenv
load_dotenv(verbose=True)
from app.config import configure_app
from app.bot import TrumpBotScheduler
from app.sentimentbot import SentimentBot
parser = argparse.ArgumentParser(description=r"""
""")
ROOT = os.getcwd()
PID_FILE_PATH = os.path.join(ROOT, 'var/run-dev.pid')
CMDS = []
FNCS = []
try:
os.setpgrp()
if not os.path.exists(os.path.dirname(PID_FILE_PATH)):
os.makedirs(os.path.dirname(PID_FILE_PATH))
with open(PID_FILE_PATH, 'w+') as file:
file.write(str(os.getpgrp()) + '\n')
except Exception as e:
logging.error(e)
def _file_path_sanity_check(*args):
for path in args:
if not os.path.exists(path):
raise Exception('Unable to find file %s' % path)
def _start_client_server(*args, **kwargs):
cmd = [
'npm', '--prefix', '%s/client' % ROOT, 'run', 'start'
]
CMDS.append(cmd)
def inject_file_paths(fn):
requests_path = os.environ.get('REQUESTS_FILE_PATH', 'requests/request.json')
auth_path = os.environ.get('AUTH_FILE_PATH', 'requests/auth.json')
_file_path_sanity_check(requests_path, auth_path)
@wraps(fn)
def wrapper(*args, **kwargs):
return fn(requests_path=requests_path, auth_path=auth_path, *args, **kwargs)
return wrapper
@inject_file_paths
def _initialize_trump_bot(auth_path, requests_path,
send_posts: bool=True,
*args, **kwargs) -> TrumpBotScheduler:
trump_bot: TrumpBotScheduler = None
if send_posts:
logging.info('Post requests are not being sent.')
class PostOverride(TrumpBotScheduler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __send_tweet_msg__(self, content, headers=None):
return 200
trump_bot = PostOverride(file_path=requests_path, auth_file_path=auth_path)
else:
trump_bot = TrumpBotScheduler(file_path=requests_path, auth_file_path=auth_path)
# this functions initialize the trump bot by getting the latest tweets
# and trying to send any tweets that contained errors
trump_bot.send_latest_tweets()
trump_bot.resend_bad_tweets()
logging.info('Trump bot initialization finished... please press ctrl-c to close program if finished.')
return trump_bot
@inject_file_paths
def _start_sentiment_bot(auth_path: str, requests_path: str,
trump_bot: TrumpBotScheduler,
send_posts: bool=True) -> SentimentBot:
bot: SentimentBot = None
if send_posts:
logging.info('Sentiment bot is not running')
class PostOverride(SentimentBot):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __send_tweet_msg__(self, content) -> int:
return 200
bot = PostOverride(file_path=requests_path, auth_file_path=auth_path)
else:
bot = SentimentBot(auth_file_path=auth_path, file_path=requests_path)
trump_bot.add_job(bot.send_todays_tone, 'interval', hours=24, max_instances=1)
return bot
def _start_flask_server(*args, **kwargs):
from app import app
logging.info('Starting the flask server...')
level = os.environ.get('CONFIG_LEVEL')
configure_app(app, status='production' if level is None else level)
port = app.config.get('PORT')
app.run(host='0.0.0.0', port=port)
def _start_dev_server(*args, **kwargs):
_start_client_server()
FNCS.append(_start_flask_server)
def _start_prod_server(*args, **kwargs):
_start_trump_bot(*args, **kwargs)
_start_flask_server(*args, **kwargs)
def _start_trump_bot(send_posts=True, start_sentiment_bot=False, *args, **kwargs):
logging.info('Starting the trump bot...')
# requests_path = os.environ.get('REQUESTS_FILE_PATH', 'requests/request.json')
# auth_path = os.environ.get('AUTH_FILE_PATH', 'requests/auth.json')
# _file_path_sanity_check(requests_path, auth_path)
bot = _initialize_trump_bot(send_posts=send_posts)
if not start_sentiment_bot:
_start_sentiment_bot(trump_bot=bot, send_posts=send_posts)
bot.start()
ACTIONS = {
"initialize": _initialize_trump_bot,
"client": _start_client_server,
"trumpbot": _start_trump_bot,
"flask": _start_flask_server,
"dev": _start_dev_server,
"prod": _start_prod_server,
}
parser.add_argument('action',
help='start the Flask app',
type=str,
choices=[key for key, v in ACTIONS.items()])
parser.add_argument('-np', '--no-post',
dest='send_posts',
action='store_true',
help='Do not send post requests')
parser.add_argument('-nsb', '--no-sentiment-bot',
dest='start_sentiment_bot',
action='store_true',
help='Do not to start the sentiment bot')
def signal_handler(sig, frame):
os.killpg(0, signal.SIGTERM)
os.remove(PID_FILE_PATH)
sys.exit(0)
def main():
options = parser.parse_args()
for s in (signal.SIGINT, signal.SIGTERM):
signal.signal(s, signal_handler)
ACTIONS.get(options.action)(**options.__dict__)
env = os.environ.copy()
for cmd in CMDS:
subprocess.Popen(cmd, env=env)
for fn in FNCS:
subprocess.Popen(fn(), env=env)
signal.pause()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
main()
| [
"app.app.run",
"app.config.configure_app",
"sys.exit",
"app.sentimentbot.SentimentBot",
"logging.info",
"logging.error",
"os.remove",
"os.path.exists",
"argparse.ArgumentParser",
"app.app.config.get",
"subprocess.Popen",
"functools.wraps",
"dotenv.load_dotenv",
"os.setpgrp",
"signal.pause",
"os.path.dirname",
"os.killpg",
"logging.basicConfig",
"os.getpgrp",
"signal.signal",
"os.path.join",
"os.environ.get",
"os.environ.copy",
"os.getcwd",
"app.bot.TrumpBotScheduler"
]
| [((144, 169), 'dotenv.load_dotenv', 'load_dotenv', ([], {'verbose': '(True)'}), '(verbose=True)\n', (155, 169), False, 'from dotenv import load_dotenv\n'), ((298, 339), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""\n"""'}), "(description='\\n')\n", (321, 339), False, 'import argparse\n'), ((353, 364), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (362, 364), False, 'import os\n'), ((381, 418), 'os.path.join', 'os.path.join', (['ROOT', '"""var/run-dev.pid"""'], {}), "(ROOT, 'var/run-dev.pid')\n", (393, 418), False, 'import os\n'), ((449, 461), 'os.setpgrp', 'os.setpgrp', ([], {}), '()\n', (459, 461), False, 'import os\n'), ((1063, 1124), 'os.environ.get', 'os.environ.get', (['"""REQUESTS_FILE_PATH"""', '"""requests/request.json"""'], {}), "('REQUESTS_FILE_PATH', 'requests/request.json')\n", (1077, 1124), False, 'import os\n'), ((1141, 1195), 'os.environ.get', 'os.environ.get', (['"""AUTH_FILE_PATH"""', '"""requests/auth.json"""'], {}), "('AUTH_FILE_PATH', 'requests/auth.json')\n", (1155, 1195), False, 'import os\n'), ((1256, 1265), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (1261, 1265), False, 'from functools import wraps\n'), ((2352, 2464), 'logging.info', 'logging.info', (['"""Trump bot initialization finished... please press ctrl-c to close program if finished."""'], {}), "(\n 'Trump bot initialization finished... please press ctrl-c to close program if finished.'\n )\n", (2364, 2464), False, 'import logging\n'), ((3354, 3398), 'logging.info', 'logging.info', (['"""Starting the flask server..."""'], {}), "('Starting the flask server...')\n", (3366, 3398), False, 'import logging\n'), ((3411, 3441), 'os.environ.get', 'os.environ.get', (['"""CONFIG_LEVEL"""'], {}), "('CONFIG_LEVEL')\n", (3425, 3441), False, 'import os\n'), ((3447, 3514), 'app.config.configure_app', 'configure_app', (['app'], {'status': "('production' if level is None else level)"}), "(app, status='production' if level is None else level)\n", (3460, 3514), False, 'from app.config import configure_app\n'), ((3526, 3548), 'app.app.config.get', 'app.config.get', (['"""PORT"""'], {}), "('PORT')\n", (3540, 3548), False, 'from app import app\n'), ((3553, 3587), 'app.app.run', 'app.run', ([], {'host': '"""0.0.0.0"""', 'port': 'port'}), "(host='0.0.0.0', port=port)\n", (3560, 3587), False, 'from app import app\n'), ((3908, 3949), 'logging.info', 'logging.info', (['"""Starting the trump bot..."""'], {}), "('Starting the trump bot...')\n", (3920, 3949), False, 'import logging\n'), ((5149, 5177), 'os.killpg', 'os.killpg', (['(0)', 'signal.SIGTERM'], {}), '(0, signal.SIGTERM)\n', (5158, 5177), False, 'import os\n'), ((5182, 5206), 'os.remove', 'os.remove', (['PID_FILE_PATH'], {}), '(PID_FILE_PATH)\n', (5191, 5206), False, 'import os\n'), ((5211, 5222), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (5219, 5222), False, 'import sys\n'), ((5425, 5442), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (5440, 5442), False, 'import os\n'), ((5569, 5583), 'signal.pause', 'signal.pause', ([], {}), '()\n', (5581, 5583), False, 'import signal\n'), ((5617, 5656), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (5636, 5656), False, 'import logging\n'), ((692, 708), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (705, 708), False, 'import logging\n'), ((1661, 1710), 'logging.info', 'logging.info', (['"""Post requests are not being sent."""'], {}), "('Post requests are not being sent.')\n", (1673, 1710), False, 'import logging\n'), ((2075, 2143), 'app.bot.TrumpBotScheduler', 'TrumpBotScheduler', ([], {'file_path': 'requests_path', 'auth_file_path': 'auth_path'}), '(file_path=requests_path, auth_file_path=auth_path)\n', (2092, 2143), False, 'from app.bot import TrumpBotScheduler\n'), ((2737, 2781), 'logging.info', 'logging.info', (['"""Sentiment bot is not running"""'], {}), "('Sentiment bot is not running')\n", (2749, 2781), False, 'import logging\n'), ((3118, 3181), 'app.sentimentbot.SentimentBot', 'SentimentBot', ([], {'auth_file_path': 'auth_path', 'file_path': 'requests_path'}), '(auth_file_path=auth_path, file_path=requests_path)\n', (3130, 3181), False, 'from app.sentimentbot import SentimentBot\n'), ((5325, 5357), 'signal.signal', 'signal.signal', (['s', 'signal_handler'], {}), '(s, signal_handler)\n', (5338, 5357), False, 'import signal\n'), ((5472, 5502), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'env': 'env'}), '(cmd, env=env)\n', (5488, 5502), False, 'import subprocess\n'), ((489, 519), 'os.path.dirname', 'os.path.dirname', (['PID_FILE_PATH'], {}), '(PID_FILE_PATH)\n', (504, 519), False, 'import os\n'), ((542, 572), 'os.path.dirname', 'os.path.dirname', (['PID_FILE_PATH'], {}), '(PID_FILE_PATH)\n', (557, 572), False, 'import os\n'), ((784, 804), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (798, 804), False, 'import os\n'), ((642, 654), 'os.getpgrp', 'os.getpgrp', ([], {}), '()\n', (652, 654), False, 'import os\n')] |
from setuptools import setup, Extension, find_packages
import subprocess
import errno
import re
import os
import shutil
import sys
import zipfile
from urllib.request import urlretrieve
import numpy
from Cython.Build import cythonize
isWindows = os.name == 'nt'
isMac = sys.platform == 'darwin'
is64Bit = sys.maxsize > 2**32
# adapted from cffi's setup.py
# the following may be overridden if pkg-config exists
libraries = ['lensfun']
include_dirs = []
library_dirs = []
extra_compile_args = []
extra_link_args = []
def _ask_pkg_config(resultlist, option, result_prefix='', sysroot=False):
pkg_config = os.environ.get('PKG_CONFIG','pkg-config')
try:
p = subprocess.Popen([pkg_config, option, 'lensfun'],
stdout=subprocess.PIPE)
except OSError as e:
if e.errno != errno.ENOENT:
raise
else:
t = p.stdout.read().decode().strip()
if p.wait() == 0:
res = t.split()
# '-I/usr/...' -> '/usr/...'
for x in res:
assert x.startswith(result_prefix)
res = [x[len(result_prefix):] for x in res]
sysroot = sysroot and os.environ.get('PKG_CONFIG_SYSROOT_DIR', '')
if sysroot:
# old versions of pkg-config don't support this env var,
# so here we emulate its effect if needed
res = [path if path.startswith(sysroot)
else sysroot + path
for path in res]
resultlist[:] = res
def use_pkg_config():
_ask_pkg_config(include_dirs, '--cflags-only-I', '-I', sysroot=True)
_ask_pkg_config(extra_compile_args, '--cflags-only-other')
_ask_pkg_config(library_dirs, '--libs-only-L', '-L', sysroot=True)
_ask_pkg_config(extra_link_args, '--libs-only-other')
_ask_pkg_config(libraries, '--libs-only-l', '-l')
if isWindows or isMac:
cmake_build = os.path.abspath('external/lensfun/build')
install_dir = os.path.join(cmake_build, 'install')
include_dirs += [os.path.join(install_dir, 'include', 'lensfun')]
library_dirs += [os.path.join(install_dir, 'lib')]
else:
use_pkg_config()
# this must be after use_pkg_config()!
include_dirs += [numpy.get_include()]
# for version_helper.h
include_dirs += [os.path.abspath('lensfunpy')]
def clone_submodules():
if not os.path.exists('external/lensfun/README.md'):
print('lensfun git submodule not cloned yet, will invoke "git submodule update --init" now')
if os.system('git submodule update --init') != 0:
raise Exception('git failed')
def windows_lensfun_compile():
clone_submodules()
cwd = os.getcwd()
# Download cmake to build lensfun
cmake_version = '3.13.4'
cmake_url = 'https://github.com/Kitware/CMake/releases/download/v{v}/cmake-{v}-win32-x86.zip'.format(v=cmake_version)
cmake = os.path.abspath('external/cmake-{}-win32-x86/bin/cmake.exe'.format(cmake_version))
# Download vcpkg to build dependencies of lensfun
vcpkg_commit = '2021.05.12'
vcpkg_url = 'https://github.com/Microsoft/vcpkg/archive/{}.zip'.format(vcpkg_commit)
vcpkg_dir = os.path.abspath('external/vcpkg-{}'.format(vcpkg_commit))
vcpkg_bootstrap = os.path.join(vcpkg_dir, 'bootstrap-vcpkg.bat')
vcpkg = os.path.join(vcpkg_dir, 'vcpkg.exe')
files = [(cmake_url, 'external', cmake),
(vcpkg_url, 'external', vcpkg_bootstrap)]
for url, extractdir, extractcheck in files:
if not os.path.exists(extractcheck):
path = 'external/' + os.path.basename(url)
if not os.path.exists(path):
print('Downloading', url)
try:
urlretrieve(url, path)
except:
# repeat once in case of network issues
urlretrieve(url, path)
with zipfile.ZipFile(path) as z:
print('Extracting', path, 'into', extractdir)
z.extractall(extractdir)
if not os.path.exists(path):
raise RuntimeError(path + ' not found!')
# Bootstrap vcpkg
os.chdir(vcpkg_dir)
if not os.path.exists(vcpkg):
code = os.system(vcpkg_bootstrap)
if code != 0:
sys.exit(code)
# lensfun depends on glib2, so let's build it with vcpkg
vcpkg_arch = 'x64' if is64Bit else 'x86'
vcpkg_triplet = '{}-windows'.format(vcpkg_arch)
code = os.system(vcpkg + ' install glib:' + vcpkg_triplet)
if code != 0:
sys.exit(code)
vcpkg_install_dir = os.path.join(vcpkg_dir, 'installed', vcpkg_triplet)
# bundle runtime dlls
vcpkg_bin_dir = os.path.join(vcpkg_install_dir, 'bin')
glib2_dll = os.path.join(vcpkg_bin_dir, 'glib-2.0-0.dll')
# configure and compile lensfun
if not os.path.exists(cmake_build):
os.mkdir(cmake_build)
os.chdir(cmake_build)
# temporary hack to avoid https://stackoverflow.com/a/53547931
# (python module not needed here anyway)
patch_path = '../apps/CMakeLists.txt'
with open(patch_path) as f:
content = f.read()
content = content.replace('IF(PYTHON)', 'IF(FALSE)')
with open(patch_path, 'w') as f:
f.write(content)
cmds = [cmake + ' .. -G "NMake Makefiles" -DCMAKE_BUILD_TYPE=Release ' +\
'-DBUILD_TESTS=off -DINSTALL_HELPER_SCRIPTS=off ' +\
'-DCMAKE_TOOLCHAIN_FILE={}/scripts/buildsystems/vcpkg.cmake '.format(vcpkg_dir) +\
'-DGLIB2_BASE_DIR={} -DGLIB2_DLL={} -DCMAKE_INSTALL_PREFIX=install'.format(vcpkg_install_dir, glib2_dll),
cmake + ' --build .',
cmake + ' --build . --target install',
]
for cmd in cmds:
print(cmd)
code = os.system(cmd)
if code != 0:
sys.exit(code)
os.chdir(cwd)
dll_runtime_libs = [('lensfun.dll', os.path.join(install_dir, 'bin')),
('glib-2.0-0.dll', vcpkg_bin_dir),
# dependencies of glib
('pcre.dll', vcpkg_bin_dir),
('iconv-2.dll', vcpkg_bin_dir),
('charset-1.dll', vcpkg_bin_dir),
('intl-8.dll', vcpkg_bin_dir),
]
for filename, folder in dll_runtime_libs:
src = os.path.join(folder, filename)
dest = 'lensfunpy/' + filename
print('copying', src, '->', dest)
shutil.copyfile(src, dest)
def mac_lensfun_compile():
clone_submodules()
# configure and compile lensfun
cwd = os.getcwd()
if not os.path.exists(cmake_build):
os.mkdir(cmake_build)
os.chdir(cmake_build)
install_name_dir = os.path.join(install_dir, 'lib')
cmds = ['cmake .. -DCMAKE_BUILD_TYPE=Release ' +\
'-DBUILD_TESTS=off -DINSTALL_HELPER_SCRIPTS=off ' +\
'-DCMAKE_INSTALL_PREFIX=install ' +\
'-DCMAKE_INSTALL_NAME_DIR=' + install_name_dir,
'cmake --build .',
'cmake --build . --target install',
]
for cmd in cmds:
print(cmd)
code = os.system(cmd)
if code != 0:
sys.exit(code)
os.chdir(cwd)
def bundle_db_files():
import glob
db_files = 'lensfunpy/db_files'
if not os.path.exists(db_files):
os.makedirs(db_files)
for path in glob.glob('external/lensfun/data/db/*.xml'):
dest = os.path.join(db_files, os.path.basename(path))
print('copying', path, '->', dest)
shutil.copyfile(path, dest)
package_data = {'lensfunpy': []}
# evil hack, check cmd line for relevant commands
# custom cmdclasses didn't work out in this case
cmdline = ''.join(sys.argv[1:])
needsCompile = any(s in cmdline for s in ['install', 'bdist', 'build_ext', 'wheel', 'nosetests'])
if isWindows and needsCompile:
windows_lensfun_compile()
package_data['lensfunpy'].append('*.dll')
elif isMac and needsCompile:
mac_lensfun_compile()
if any(s in cmdline for s in ['clean', 'sdist']):
# When running sdist after a previous run of bdist or build_ext
# then even with the 'clean' command the .egg-info folder stays.
# This folder contains SOURCES.txt which in turn is used by sdist
# to include package data files, but we don't want .dll's and .xml
# files in our source distribution. Therefore, to prevent accidents,
# we help a little...
egg_info = 'lensfunpy.egg-info'
print('removing', egg_info)
shutil.rmtree(egg_info, ignore_errors=True)
if 'sdist' not in cmdline:
# This assumes that the lensfun version from external/lensfun was used.
# If that's not the case, the bundled files may fail to load, for example,
# if lensfunpy was linked against an older lensfun version already on
# the system (Linux mostly) and the database format changed in an incompatible way.
# In that case, loading of bundled files can still be disabled
# with Database(load_bundled=False).
package_data['lensfunpy'].append('db_files/*.xml')
bundle_db_files()
# Support for optional Cython line tracing
# run the following to generate a test coverage report:
# $ export LINETRACE=1
# $ python setup.py build_ext --inplace
# $ nosetests --with-coverage --cover-html --cover-package=lensfunpy
compdirectives = {}
macros = []
if (os.environ.get('LINETRACE', False)):
compdirectives['linetrace'] = True
macros.append(('CYTHON_TRACE', '1'))
extensions = cythonize([Extension("lensfunpy._lensfun",
include_dirs=include_dirs,
sources=[os.path.join('lensfunpy', '_lensfun.pyx')],
libraries=libraries,
library_dirs=library_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
define_macros=macros
)],
compiler_directives=compdirectives)
# make __version__ available (https://stackoverflow.com/a/16084844)
exec(open('lensfunpy/_version.py').read())
setup(
name = 'lensfunpy',
version = __version__,
description = 'Lens distortion correction for Python, a wrapper for lensfun',
long_description = open('README.rst').read(),
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/letmaik/lensfunpy',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Cython',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Topic :: Multimedia :: Graphics',
'Topic :: Software Development :: Libraries',
],
packages = find_packages(),
ext_modules = extensions,
package_data = package_data,
install_requires=['numpy']
)
| [
"zipfile.ZipFile",
"sys.exit",
"os.path.exists",
"urllib.request.urlretrieve",
"subprocess.Popen",
"setuptools.find_packages",
"os.mkdir",
"numpy.get_include",
"glob.glob",
"shutil.copyfile",
"os.makedirs",
"os.environ.get",
"os.path.join",
"os.getcwd",
"os.chdir",
"os.path.basename",
"shutil.rmtree",
"os.path.abspath",
"os.system"
]
| [((9442, 9476), 'os.environ.get', 'os.environ.get', (['"""LINETRACE"""', '(False)'], {}), "('LINETRACE', False)\n", (9456, 9476), False, 'import os\n'), ((613, 655), 'os.environ.get', 'os.environ.get', (['"""PKG_CONFIG"""', '"""pkg-config"""'], {}), "('PKG_CONFIG', 'pkg-config')\n", (627, 655), False, 'import os\n'), ((1962, 2003), 'os.path.abspath', 'os.path.abspath', (['"""external/lensfun/build"""'], {}), "('external/lensfun/build')\n", (1977, 2003), False, 'import os\n'), ((2022, 2058), 'os.path.join', 'os.path.join', (['cmake_build', '"""install"""'], {}), "(cmake_build, 'install')\n", (2034, 2058), False, 'import os\n'), ((2277, 2296), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (2294, 2296), False, 'import numpy\n'), ((2339, 2367), 'os.path.abspath', 'os.path.abspath', (['"""lensfunpy"""'], {}), "('lensfunpy')\n", (2354, 2367), False, 'import os\n'), ((2718, 2729), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2727, 2729), False, 'import os\n'), ((3291, 3337), 'os.path.join', 'os.path.join', (['vcpkg_dir', '"""bootstrap-vcpkg.bat"""'], {}), "(vcpkg_dir, 'bootstrap-vcpkg.bat')\n", (3303, 3337), False, 'import os\n'), ((3350, 3386), 'os.path.join', 'os.path.join', (['vcpkg_dir', '"""vcpkg.exe"""'], {}), "(vcpkg_dir, 'vcpkg.exe')\n", (3362, 3386), False, 'import os\n'), ((4214, 4233), 'os.chdir', 'os.chdir', (['vcpkg_dir'], {}), '(vcpkg_dir)\n', (4222, 4233), False, 'import os\n'), ((4530, 4581), 'os.system', 'os.system', (["(vcpkg + ' install glib:' + vcpkg_triplet)"], {}), "(vcpkg + ' install glib:' + vcpkg_triplet)\n", (4539, 4581), False, 'import os\n'), ((4647, 4698), 'os.path.join', 'os.path.join', (['vcpkg_dir', '"""installed"""', 'vcpkg_triplet'], {}), "(vcpkg_dir, 'installed', vcpkg_triplet)\n", (4659, 4698), False, 'import os\n'), ((4750, 4788), 'os.path.join', 'os.path.join', (['vcpkg_install_dir', '"""bin"""'], {}), "(vcpkg_install_dir, 'bin')\n", (4762, 4788), False, 'import os\n'), ((4805, 4850), 'os.path.join', 'os.path.join', (['vcpkg_bin_dir', '"""glib-2.0-0.dll"""'], {}), "(vcpkg_bin_dir, 'glib-2.0-0.dll')\n", (4817, 4850), False, 'import os\n'), ((4962, 4983), 'os.chdir', 'os.chdir', (['cmake_build'], {}), '(cmake_build)\n', (4970, 4983), False, 'import os\n'), ((5919, 5932), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (5927, 5932), False, 'import os\n'), ((6682, 6693), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6691, 6693), False, 'import os\n'), ((6768, 6789), 'os.chdir', 'os.chdir', (['cmake_build'], {}), '(cmake_build)\n', (6776, 6789), False, 'import os\n'), ((6813, 6845), 'os.path.join', 'os.path.join', (['install_dir', '"""lib"""'], {}), "(install_dir, 'lib')\n", (6825, 6845), False, 'import os\n'), ((7314, 7327), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (7322, 7327), False, 'import os\n'), ((7487, 7530), 'glob.glob', 'glob.glob', (['"""external/lensfun/data/db/*.xml"""'], {}), "('external/lensfun/data/db/*.xml')\n", (7496, 7530), False, 'import glob\n'), ((8600, 8643), 'shutil.rmtree', 'shutil.rmtree', (['egg_info'], {'ignore_errors': '(True)'}), '(egg_info, ignore_errors=True)\n', (8613, 8643), False, 'import shutil\n'), ((676, 749), 'subprocess.Popen', 'subprocess.Popen', (["[pkg_config, option, 'lensfun']"], {'stdout': 'subprocess.PIPE'}), "([pkg_config, option, 'lensfun'], stdout=subprocess.PIPE)\n", (692, 749), False, 'import subprocess\n'), ((2085, 2132), 'os.path.join', 'os.path.join', (['install_dir', '"""include"""', '"""lensfun"""'], {}), "(install_dir, 'include', 'lensfun')\n", (2097, 2132), False, 'import os\n'), ((2155, 2187), 'os.path.join', 'os.path.join', (['install_dir', '"""lib"""'], {}), "(install_dir, 'lib')\n", (2167, 2187), False, 'import os\n'), ((2405, 2449), 'os.path.exists', 'os.path.exists', (['"""external/lensfun/README.md"""'], {}), "('external/lensfun/README.md')\n", (2419, 2449), False, 'import os\n'), ((4245, 4266), 'os.path.exists', 'os.path.exists', (['vcpkg'], {}), '(vcpkg)\n', (4259, 4266), False, 'import os\n'), ((4283, 4309), 'os.system', 'os.system', (['vcpkg_bootstrap'], {}), '(vcpkg_bootstrap)\n', (4292, 4309), False, 'import os\n'), ((4608, 4622), 'sys.exit', 'sys.exit', (['code'], {}), '(code)\n', (4616, 4622), False, 'import sys\n'), ((4899, 4926), 'os.path.exists', 'os.path.exists', (['cmake_build'], {}), '(cmake_build)\n', (4913, 4926), False, 'import os\n'), ((4936, 4957), 'os.mkdir', 'os.mkdir', (['cmake_build'], {}), '(cmake_build)\n', (4944, 4957), False, 'import os\n'), ((5850, 5864), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (5859, 5864), False, 'import os\n'), ((6428, 6458), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (6440, 6458), False, 'import os\n'), ((6548, 6574), 'shutil.copyfile', 'shutil.copyfile', (['src', 'dest'], {}), '(src, dest)\n', (6563, 6574), False, 'import shutil\n'), ((6705, 6732), 'os.path.exists', 'os.path.exists', (['cmake_build'], {}), '(cmake_build)\n', (6719, 6732), False, 'import os\n'), ((6742, 6763), 'os.mkdir', 'os.mkdir', (['cmake_build'], {}), '(cmake_build)\n', (6750, 6763), False, 'import os\n'), ((7246, 7260), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (7255, 7260), False, 'import os\n'), ((7415, 7439), 'os.path.exists', 'os.path.exists', (['db_files'], {}), '(db_files)\n', (7429, 7439), False, 'import os\n'), ((7449, 7470), 'os.makedirs', 'os.makedirs', (['db_files'], {}), '(db_files)\n', (7460, 7470), False, 'import os\n'), ((7645, 7672), 'shutil.copyfile', 'shutil.copyfile', (['path', 'dest'], {}), '(path, dest)\n', (7660, 7672), False, 'import shutil\n'), ((11227, 11242), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (11240, 11242), False, 'from setuptools import setup, Extension, find_packages\n'), ((2563, 2603), 'os.system', 'os.system', (['"""git submodule update --init"""'], {}), "('git submodule update --init')\n", (2572, 2603), False, 'import os\n'), ((3556, 3584), 'os.path.exists', 'os.path.exists', (['extractcheck'], {}), '(extractcheck)\n', (3570, 3584), False, 'import os\n'), ((4344, 4358), 'sys.exit', 'sys.exit', (['code'], {}), '(code)\n', (4352, 4358), False, 'import sys\n'), ((5899, 5913), 'sys.exit', 'sys.exit', (['code'], {}), '(code)\n', (5907, 5913), False, 'import sys\n'), ((5974, 6006), 'os.path.join', 'os.path.join', (['install_dir', '"""bin"""'], {}), "(install_dir, 'bin')\n", (5986, 6006), False, 'import os\n'), ((7295, 7309), 'sys.exit', 'sys.exit', (['code'], {}), '(code)\n', (7303, 7309), False, 'import sys\n'), ((7570, 7592), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (7586, 7592), False, 'import os\n'), ((1176, 1220), 'os.environ.get', 'os.environ.get', (['"""PKG_CONFIG_SYSROOT_DIR"""', '""""""'], {}), "('PKG_CONFIG_SYSROOT_DIR', '')\n", (1190, 1220), False, 'import os\n'), ((3619, 3640), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (3635, 3640), False, 'import os\n'), ((3660, 3680), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3674, 3680), False, 'import os\n'), ((3941, 3962), 'zipfile.ZipFile', 'zipfile.ZipFile', (['path'], {}), '(path)\n', (3956, 3962), False, 'import zipfile\n'), ((4108, 4128), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (4122, 4128), False, 'import os\n'), ((3765, 3787), 'urllib.request.urlretrieve', 'urlretrieve', (['url', 'path'], {}), '(url, path)\n', (3776, 3787), False, 'from urllib.request import urlretrieve\n'), ((9680, 9721), 'os.path.join', 'os.path.join', (['"""lensfunpy"""', '"""_lensfun.pyx"""'], {}), "('lensfunpy', '_lensfun.pyx')\n", (9692, 9721), False, 'import os\n'), ((3892, 3914), 'urllib.request.urlretrieve', 'urlretrieve', (['url', 'path'], {}), '(url, path)\n', (3903, 3914), False, 'from urllib.request import urlretrieve\n')] |
"""
Post processing on detected objects
"""
import pymongo
from pymongo import MongoClient
import time
import logging
logging.basicConfig(format='%(levelname)s :: %(asctime)s :: %(message)s', level=logging.DEBUG)
from joblib import Parallel, delayed
import click
from xgboost_model.inference import run_inference, PostprocessException
import os
def load_detected_pages(db, buffer_size):
"""
"""
current_docs = []
for doc in db.propose_pages.find({'postprocess': None, 'ocr': True}, no_cursor_timeout=True):
current_docs.append(doc)
if len(current_docs) == buffer_size:
yield current_docs
current_docs = []
yield current_docs
def do_skip(page, client):
db = client.pdfs
coll = db.postprocess_pages
return coll.count_documents({'pdf_name': page['pdf_name'], 'page_num': page['page_num']}, limit=1) != 0
def postprocess(db_insert_fn, num_processes, weights_pth, skip):
logging.info('Starting post-processing over detected objects')
start_time = time.time()
client = MongoClient(os.environ["DBCONNECT"])
logging.info(f'Connected to client: {client}.')
db = client.pdfs
for batch in load_detected_pages(db, 100):
logging.info('Loaded next batch. Running postprocessing')
try:
pages = Parallel(n_jobs=num_processes)(delayed(run_inference)(page, weights_pth) for page in batch)
except PostprocessException as e:
logging.error(f'Postprocessing error in referenced page: {e.page}')
logging.error(f'Original Exception: {e.original_exception}')
continue
db_insert_fn(pages, client)
end_time = time.time()
logging.info(f'Exiting post-processing. Time up: {end_time - start_time}')
def mongo_insert_fn(objs, client):
db = client.pdfs
for obj in objs:
try:
result = db.propose_pages.update_one({'_id': obj['_id']},
{'$set':
{
'pp_detected_objs': obj['pp_detected_objs'],
'postprocess': True
}
}, upsert=False)
logging.info(f'Updated result: {result}')
except pymongo.errors.WriterError as e:
logging.error(f'Document write error: {e}\n Document id: obj["_id"]')
@click.command()
@click.argument("num_processes")
@click.argument("weights_pth")
@click.option('--skip/--no-skip')
def click_wrapper(num_processes, weights_pth, skip):
postprocess(mongo_insert_fn, int(num_processes), weights_pth, skip)
if __name__ == '__main__':
click_wrapper()
| [
"logging.basicConfig",
"click.argument",
"click.option",
"joblib.delayed",
"joblib.Parallel",
"time.time",
"pymongo.MongoClient",
"click.command",
"logging.info",
"logging.error"
]
| [((118, 216), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s :: %(asctime)s :: %(message)s"""', 'level': 'logging.DEBUG'}), "(format='%(levelname)s :: %(asctime)s :: %(message)s',\n level=logging.DEBUG)\n", (137, 216), False, 'import logging\n'), ((2490, 2505), 'click.command', 'click.command', ([], {}), '()\n', (2503, 2505), False, 'import click\n'), ((2507, 2538), 'click.argument', 'click.argument', (['"""num_processes"""'], {}), "('num_processes')\n", (2521, 2538), False, 'import click\n'), ((2540, 2569), 'click.argument', 'click.argument', (['"""weights_pth"""'], {}), "('weights_pth')\n", (2554, 2569), False, 'import click\n'), ((2571, 2603), 'click.option', 'click.option', (['"""--skip/--no-skip"""'], {}), "('--skip/--no-skip')\n", (2583, 2603), False, 'import click\n'), ((946, 1008), 'logging.info', 'logging.info', (['"""Starting post-processing over detected objects"""'], {}), "('Starting post-processing over detected objects')\n", (958, 1008), False, 'import logging\n'), ((1026, 1037), 'time.time', 'time.time', ([], {}), '()\n', (1035, 1037), False, 'import time\n'), ((1051, 1087), 'pymongo.MongoClient', 'MongoClient', (["os.environ['DBCONNECT']"], {}), "(os.environ['DBCONNECT'])\n", (1062, 1087), False, 'from pymongo import MongoClient\n'), ((1092, 1139), 'logging.info', 'logging.info', (['f"""Connected to client: {client}."""'], {}), "(f'Connected to client: {client}.')\n", (1104, 1139), False, 'import logging\n'), ((1667, 1678), 'time.time', 'time.time', ([], {}), '()\n', (1676, 1678), False, 'import time\n'), ((1683, 1757), 'logging.info', 'logging.info', (['f"""Exiting post-processing. Time up: {end_time - start_time}"""'], {}), "(f'Exiting post-processing. Time up: {end_time - start_time}')\n", (1695, 1757), False, 'import logging\n'), ((1216, 1273), 'logging.info', 'logging.info', (['"""Loaded next batch. Running postprocessing"""'], {}), "('Loaded next batch. Running postprocessing')\n", (1228, 1273), False, 'import logging\n'), ((2316, 2357), 'logging.info', 'logging.info', (['f"""Updated result: {result}"""'], {}), "(f'Updated result: {result}')\n", (2328, 2357), False, 'import logging\n'), ((1307, 1337), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_processes'}), '(n_jobs=num_processes)\n', (1315, 1337), False, 'from joblib import Parallel, delayed\n'), ((1453, 1520), 'logging.error', 'logging.error', (['f"""Postprocessing error in referenced page: {e.page}"""'], {}), "(f'Postprocessing error in referenced page: {e.page}')\n", (1466, 1520), False, 'import logging\n'), ((1533, 1593), 'logging.error', 'logging.error', (['f"""Original Exception: {e.original_exception}"""'], {}), "(f'Original Exception: {e.original_exception}')\n", (1546, 1593), False, 'import logging\n'), ((2418, 2490), 'logging.error', 'logging.error', (['f"""Document write error: {e}\n Document id: obj["_id"]"""'], {}), '(f"""Document write error: {e}\n Document id: obj["_id"]""")\n', (2431, 2490), False, 'import logging\n'), ((1338, 1360), 'joblib.delayed', 'delayed', (['run_inference'], {}), '(run_inference)\n', (1345, 1360), False, 'from joblib import Parallel, delayed\n')] |
#
# Copyright (c) 2018-2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.common import constants
from sysinv.common import exception
from sysinv.common import utils
from sysinv.helm import common
from sysinv.helm import base
class GarbdHelm(base.BaseHelm):
"""Class to encapsulate helm operations for the galera arbitrator chart"""
# The service name is used to build the standard docker image location.
# It is intentionally "mariadb" and not "garbd" as they both use the
# same docker image.
SERVICE_NAME = common.HELM_CHART_MARIADB
CHART = common.HELM_CHART_GARBD
SUPPORTED_NAMESPACES = \
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_OPENSTACK]
SUPPORTED_APP_NAMESPACES = {
constants.HELM_APP_OPENSTACK:
base.BaseHelm.SUPPORTED_NAMESPACES + [common.HELM_NS_OPENSTACK]
}
def _is_enabled(self, app_name, chart_name, namespace):
# First, see if this chart is enabled by the user then adjust based on
# system conditions
enabled = super(GarbdHelm, self)._is_enabled(
app_name, chart_name, namespace)
# If there are fewer than 2 controllers or we're on AIO-DX or we are on
# distributed cloud system controller, we'll use a single mariadb server
# and so we don't want to run garbd.
if enabled and (self._num_controllers() < 2 or
utils.is_aio_duplex_system(self.dbapi) or
(self._distributed_cloud_role() ==
constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER)):
enabled = False
return enabled
def execute_manifest_updates(self, operator):
# On application load this chart is enabled in the mariadb chart group
if not self._is_enabled(operator.APP,
self.CHART, common.HELM_NS_OPENSTACK):
operator.chart_group_chart_delete(
operator.CHART_GROUPS_LUT[self.CHART],
operator.CHARTS_LUT[self.CHART])
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_OPENSTACK: {
}
}
if namespace in self.SUPPORTED_NAMESPACES:
return overrides[namespace]
elif namespace:
raise exception.InvalidHelmNamespace(chart=self.CHART,
namespace=namespace)
else:
return overrides
| [
"sysinv.common.utils.is_aio_duplex_system",
"sysinv.common.exception.InvalidHelmNamespace"
]
| [((1437, 1475), 'sysinv.common.utils.is_aio_duplex_system', 'utils.is_aio_duplex_system', (['self.dbapi'], {}), '(self.dbapi)\n', (1463, 1475), False, 'from sysinv.common import utils\n'), ((2331, 2400), 'sysinv.common.exception.InvalidHelmNamespace', 'exception.InvalidHelmNamespace', ([], {'chart': 'self.CHART', 'namespace': 'namespace'}), '(chart=self.CHART, namespace=namespace)\n', (2361, 2400), False, 'from sysinv.common import exception\n')] |
#!/usr/bin/env python
"""Generate frame counts dict for a dataset.
Usage:
frame_counter.py [options]
Options:
-h, --help Print help message
--root=<str> Path to root of dataset (should contain video folders that contain images)
[default: /vision/vision_users/azou/data/hmdb51_flow/u/]
--output=<str> Output filename [default: hmdb_frame_count.pickle]
"""
from __future__ import print_function
from docopt import docopt
import os
import sys
import pickle
if __name__ == '__main__':
args = docopt(__doc__)
print(args)
# Final counts
counts = {}
min_count = sys.maxint
# Generate list of video folders
for root, dirs, files in os.walk(args['--root']):
# Skip the root directory
if len(dirs) != 0:
continue
# Process a directory and frame count into a dictionary entry
name = os.path.basename(os.path.normpath(root))
print('{}: {} frames'.format(name, len(files)))
counts[name] = len(files)
# Track minimum count
if len(files) < min_count:
min_count = len(files)
with open(args['--output'], 'wb') as ofile:
pickle.dump(counts, ofile)
print('Minimum frame count = {}'.format(min_count))
| [
"os.path.normpath",
"pickle.dump",
"docopt.docopt",
"os.walk"
]
| [((552, 567), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (558, 567), False, 'from docopt import docopt\n'), ((714, 737), 'os.walk', 'os.walk', (["args['--root']"], {}), "(args['--root'])\n", (721, 737), False, 'import os\n'), ((1196, 1222), 'pickle.dump', 'pickle.dump', (['counts', 'ofile'], {}), '(counts, ofile)\n', (1207, 1222), False, 'import pickle\n'), ((924, 946), 'os.path.normpath', 'os.path.normpath', (['root'], {}), '(root)\n', (940, 946), False, 'import os\n')] |
#!/usr/bin/env python
# Copyright (c) 2016 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging # noqa: E402
logging.basicConfig(level=logging.DEBUG)
from kmip.services.server import server # noqa: E402
if __name__ == '__main__':
print('Starting PyKMIP server on 0.0.0.0:5696')
server.main()
| [
"logging.basicConfig",
"kmip.services.server.server.main"
]
| [((699, 739), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (718, 739), False, 'import logging\n'), ((879, 892), 'kmip.services.server.server.main', 'server.main', ([], {}), '()\n', (890, 892), False, 'from kmip.services.server import server\n')] |
#
# test_tempo_event.py
# crest-python
#
# Copyright (C) 2017 <NAME>
# Distributed under the MIT License.
#
import crest_loader
import unittest
from crest.events.meta import TempoEvent
class TestTempoEvent(unittest.TestCase):
def test_ctor(self):
TempoEvent()
TempoEvent(120)
def test_message(self):
evt = TempoEvent(120)
self.assertEqual(evt.Message, [0xFF, 0x51, 0x03, 0x07, 0xA1, 0x20])
def test_property(self):
evt = TempoEvent(120)
self.assertEqual(evt.Tempo, 120)
self.assertEqual(evt.MicroSeconds, 500000)
evt.Tempo = 60
self.assertEqual(evt.Tempo, 60)
self.assertEqual(evt.MicroSeconds, 1000000)
evt.MicroSeconds = 250000
self.assertEqual(evt.Tempo, 240)
self.assertEqual(evt.MicroSeconds, 250000)
if (__name__ == '__main__'):
unittest.main()
| [
"unittest.main",
"crest.events.meta.TempoEvent"
]
| [((869, 884), 'unittest.main', 'unittest.main', ([], {}), '()\n', (882, 884), False, 'import unittest\n'), ((269, 281), 'crest.events.meta.TempoEvent', 'TempoEvent', ([], {}), '()\n', (279, 281), False, 'from crest.events.meta import TempoEvent\n'), ((290, 305), 'crest.events.meta.TempoEvent', 'TempoEvent', (['(120)'], {}), '(120)\n', (300, 305), False, 'from crest.events.meta import TempoEvent\n'), ((349, 364), 'crest.events.meta.TempoEvent', 'TempoEvent', (['(120)'], {}), '(120)\n', (359, 364), False, 'from crest.events.meta import TempoEvent\n'), ((485, 500), 'crest.events.meta.TempoEvent', 'TempoEvent', (['(120)'], {}), '(120)\n', (495, 500), False, 'from crest.events.meta import TempoEvent\n')] |
# Copyright (c) 2008,2015,2016,2017,2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Contains a collection of basic calculations.
These include:
* wind components
* heat index
* windchill
"""
import warnings
import numpy as np
from scipy.ndimage import gaussian_filter
from .. import constants as mpconsts
from ..package_tools import Exporter
from ..units import atleast_1d, check_units, masked_array, units
from ..xarray import preprocess_xarray
exporter = Exporter(globals())
# The following variables are constants for a standard atmosphere
t0 = 288. * units.kelvin
p0 = 1013.25 * units.hPa
@exporter.export
@preprocess_xarray
@check_units('[speed]', '[speed]')
def wind_speed(u, v):
r"""Compute the wind speed from u and v-components.
Parameters
----------
u : `pint.Quantity`
Wind component in the X (East-West) direction
v : `pint.Quantity`
Wind component in the Y (North-South) direction
Returns
-------
wind speed: `pint.Quantity`
The speed of the wind
See Also
--------
wind_components
"""
speed = np.sqrt(u * u + v * v)
return speed
@exporter.export
@preprocess_xarray
@check_units('[speed]', '[speed]')
def wind_direction(u, v, convention='from'):
r"""Compute the wind direction from u and v-components.
Parameters
----------
u : `pint.Quantity`
Wind component in the X (East-West) direction
v : `pint.Quantity`
Wind component in the Y (North-South) direction
convention : str
Convention to return direction. 'from' returns the direction the wind is coming from
(meteorological convention). 'to' returns the direction the wind is going towards
(oceanographic convention). Default is 'from'.
Returns
-------
direction: `pint.Quantity`
The direction of the wind in interval [0, 360] degrees, with 360 being North, with the
direction defined by the convention kwarg.
See Also
--------
wind_components
Notes
-----
In the case of calm winds (where `u` and `v` are zero), this function returns a direction
of 0.
"""
wdir = 90. * units.deg - np.arctan2(-v, -u)
origshape = wdir.shape
wdir = atleast_1d(wdir)
# Handle oceanographic convection
if convention == 'to':
wdir -= 180 * units.deg
elif convention not in ('to', 'from'):
raise ValueError('Invalid kwarg for "convention". Valid options are "from" or "to".')
wdir[wdir <= 0] += 360. * units.deg
# avoid unintended modification of `pint.Quantity` by direct use of magnitude
calm_mask = (np.asarray(u.magnitude) == 0.) & (np.asarray(v.magnitude) == 0.)
# np.any check required for legacy numpy which treats 0-d False boolean index as zero
if np.any(calm_mask):
wdir[calm_mask] = 0. * units.deg
return wdir.reshape(origshape).to('degrees')
@exporter.export
@preprocess_xarray
@check_units('[speed]')
def wind_components(speed, wdir):
r"""Calculate the U, V wind vector components from the speed and direction.
Parameters
----------
speed : `pint.Quantity`
The wind speed (magnitude)
wdir : `pint.Quantity`
The wind direction, specified as the direction from which the wind is
blowing (0-2 pi radians or 0-360 degrees), with 360 degrees being North.
Returns
-------
u, v : tuple of `pint.Quantity`
The wind components in the X (East-West) and Y (North-South)
directions, respectively.
See Also
--------
wind_speed
wind_direction
Examples
--------
>>> from metpy.units import units
>>> metpy.calc.wind_components(10. * units('m/s'), 225. * units.deg)
(<Quantity(7.071067811865475, 'meter / second')>,
<Quantity(7.071067811865477, 'meter / second')>)
"""
wdir = _check_radians(wdir, max_radians=4 * np.pi)
u = -speed * np.sin(wdir)
v = -speed * np.cos(wdir)
return u, v
@exporter.export
@preprocess_xarray
@check_units(temperature='[temperature]', speed='[speed]')
def windchill(temperature, speed, face_level_winds=False, mask_undefined=True):
r"""Calculate the Wind Chill Temperature Index (WCTI).
Calculates WCTI from the current temperature and wind speed using the formula
outlined by the FCM [FCMR192003]_.
Specifically, these formulas assume that wind speed is measured at
10m. If, instead, the speeds are measured at face level, the winds
need to be multiplied by a factor of 1.5 (this can be done by specifying
`face_level_winds` as `True`.)
Parameters
----------
temperature : `pint.Quantity`
The air temperature
speed : `pint.Quantity`
The wind speed at 10m. If instead the winds are at face level,
`face_level_winds` should be set to `True` and the 1.5 multiplicative
correction will be applied automatically.
face_level_winds : bool, optional
A flag indicating whether the wind speeds were measured at facial
level instead of 10m, thus requiring a correction. Defaults to
`False`.
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values where wind chill is undefined masked. These are values where
the temperature > 50F or wind speed <= 3 miles per hour. Defaults
to `True`.
Returns
-------
`pint.Quantity`
The corresponding Wind Chill Temperature Index value(s)
See Also
--------
heat_index
"""
# Correct for lower height measurement of winds if necessary
if face_level_winds:
# No in-place so that we copy
# noinspection PyAugmentAssignment
speed = speed * 1.5
temp_limit, speed_limit = 10. * units.degC, 3 * units.mph
speed_factor = speed.to('km/hr').magnitude ** 0.16
wcti = units.Quantity((0.6215 + 0.3965 * speed_factor) * temperature.to('degC').magnitude
- 11.37 * speed_factor + 13.12, units.degC).to(temperature.units)
# See if we need to mask any undefined values
if mask_undefined:
mask = np.array((temperature > temp_limit) | (speed <= speed_limit))
if mask.any():
wcti = masked_array(wcti, mask=mask)
return wcti
@exporter.export
@preprocess_xarray
@check_units('[temperature]')
def heat_index(temperature, rh, mask_undefined=True):
r"""Calculate the Heat Index from the current temperature and relative humidity.
The implementation uses the formula outlined in [Rothfusz1990]_, which is a
multi-variable least-squares regression of the values obtained in [Steadman1979]_.
Additional conditional corrections are applied to match what the National
Weather Service operationally uses. See Figure 3 of [Anderson2013]_ for a
depiction of this algorithm and further discussion.
Parameters
----------
temperature : `pint.Quantity`
Air temperature
rh : `pint.Quantity`
The relative humidity expressed as a unitless ratio in the range [0, 1].
Can also pass a percentage if proper units are attached.
Returns
-------
`pint.Quantity`
The corresponding Heat Index value(s)
Other Parameters
----------------
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values masked where the temperature < 80F. Defaults to `True`.
See Also
--------
windchill
"""
temperature = atleast_1d(temperature)
rh = atleast_1d(rh)
# assign units to rh if they currently are not present
if not hasattr(rh, 'units'):
rh = rh * units.dimensionless
delta = temperature.to(units.degF) - 0. * units.degF
rh2 = rh * rh
delta2 = delta * delta
# Simplifed Heat Index -- constants converted for RH in [0, 1]
a = -10.3 * units.degF + 1.1 * delta + 4.7 * units.delta_degF * rh
# More refined Heat Index -- constants converted for RH in [0, 1]
b = (-42.379 * units.degF
+ 2.04901523 * delta
+ 1014.333127 * units.delta_degF * rh
- 22.475541 * delta * rh
- 6.83783e-3 / units.delta_degF * delta2
- 5.481717e2 * units.delta_degF * rh2
+ 1.22874e-1 / units.delta_degF * delta2 * rh
+ 8.5282 * delta * rh2
- 1.99e-2 / units.delta_degF * delta2 * rh2)
# Create return heat index
hi = np.full(np.shape(temperature), np.nan) * units.degF
# Retain masked status of temperature with resulting heat index
if hasattr(temperature, 'mask'):
hi = masked_array(hi)
# If T <= 40F, Heat Index is T
sel = (temperature <= 40. * units.degF)
if np.any(sel):
hi[sel] = temperature[sel].to(units.degF)
# If a < 79F and hi is unset, Heat Index is a
sel = (a < 79. * units.degF) & np.isnan(hi)
if np.any(sel):
hi[sel] = a[sel]
# Use b now for anywhere hi has yet to be set
sel = np.isnan(hi)
if np.any(sel):
hi[sel] = b[sel]
# Adjustment for RH <= 13% and 80F <= T <= 112F
sel = ((rh <= 13. * units.percent) & (temperature >= 80. * units.degF)
& (temperature <= 112. * units.degF))
if np.any(sel):
rh15adj = ((13. - rh * 100.) / 4.
* ((17. * units.delta_degF - np.abs(delta - 95. * units.delta_degF))
/ 17. * units.delta_degF) ** 0.5)
hi[sel] = hi[sel] - rh15adj[sel]
# Adjustment for RH > 85% and 80F <= T <= 87F
sel = ((rh > 85. * units.percent) & (temperature >= 80. * units.degF)
& (temperature <= 87. * units.degF))
if np.any(sel):
rh85adj = 0.02 * (rh * 100. - 85.) * (87. * units.delta_degF - delta)
hi[sel] = hi[sel] + rh85adj[sel]
# See if we need to mask any undefined values
if mask_undefined:
mask = np.array(temperature < 80. * units.degF)
if mask.any():
hi = masked_array(hi, mask=mask)
return hi
@exporter.export
@preprocess_xarray
@check_units(temperature='[temperature]', speed='[speed]')
def apparent_temperature(temperature, rh, speed, face_level_winds=False, mask_undefined=True):
r"""Calculate the current apparent temperature.
Calculates the current apparent temperature based on the wind chill or heat index
as appropriate for the current conditions. Follows [NWS10201]_.
Parameters
----------
temperature : `pint.Quantity`
The air temperature
rh : `pint.Quantity`
The relative humidity expressed as a unitless ratio in the range [0, 1].
Can also pass a percentage if proper units are attached.
speed : `pint.Quantity`
The wind speed at 10m. If instead the winds are at face level,
`face_level_winds` should be set to `True` and the 1.5 multiplicative
correction will be applied automatically.
face_level_winds : bool, optional
A flag indicating whether the wind speeds were measured at facial
level instead of 10m, thus requiring a correction. Defaults to
`False`.
mask_undefined : bool, optional
A flag indicating whether a masked array should be returned with
values where wind chill or heat_index is undefined masked. For wind
chill, these are values where the temperature > 50F or
wind speed <= 3 miles per hour. For heat index, these are values
where the temperature < 80F.
Defaults to `True`.
Returns
-------
`pint.Quantity`
The corresponding apparent temperature value(s)
See Also
--------
heat_index, windchill
"""
is_not_scalar = isinstance(temperature.m, (list, tuple, np.ndarray))
temperature = atleast_1d(temperature)
rh = atleast_1d(rh)
speed = atleast_1d(speed)
# NB: mask_defined=True is needed to know where computed values exist
wind_chill_temperature = windchill(temperature, speed, face_level_winds=face_level_winds,
mask_undefined=True).to(temperature.units)
heat_index_temperature = heat_index(temperature, rh,
mask_undefined=True).to(temperature.units)
# Combine the heat index and wind chill arrays (no point has a value in both)
# NB: older numpy.ma.where does not return a masked array
app_temperature = masked_array(
np.ma.where(masked_array(wind_chill_temperature).mask,
heat_index_temperature.to(temperature.units),
wind_chill_temperature.to(temperature.units)
), temperature.units)
# If mask_undefined is False, then set any masked values to the temperature
if not mask_undefined:
app_temperature[app_temperature.mask] = temperature[app_temperature.mask]
# If no values are masked and provided temperature does not have a mask
# we should return a non-masked array
if not np.any(app_temperature.mask) and not hasattr(temperature, 'mask'):
app_temperature = np.array(app_temperature.m) * temperature.units
if is_not_scalar:
return app_temperature
else:
return atleast_1d(app_temperature)[0]
@exporter.export
@preprocess_xarray
@check_units('[pressure]')
def pressure_to_height_std(pressure):
r"""Convert pressure data to heights using the U.S. standard atmosphere [NOAA1976]_.
The implementation uses the formula outlined in [Hobbs1977]_ pg.60-61.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure
Returns
-------
`pint.Quantity`
The corresponding height value(s)
Notes
-----
.. math:: Z = \frac{T_0}{\Gamma}[1-\frac{p}{p_0}^\frac{R\Gamma}{g}]
"""
gamma = 6.5 * units('K/km')
return (t0 / gamma) * (1 - (pressure / p0).to('dimensionless')**(
mpconsts.Rd * gamma / mpconsts.g))
@exporter.export
@preprocess_xarray
@check_units('[length]')
def height_to_geopotential(height):
r"""Compute geopotential for a given height.
Calculates the geopotential from height using the following formula, which is derived from
the definition of geopotential as given in [Hobbs2006]_ Pg. 69 Eq 3.21:
.. math:: \Phi = G m_e \left( \frac{1}{R_e} - \frac{1}{R_e + z}\right)
(where :math:`\Phi` is geopotential, :math:`z` is height, :math:`R_e` is average Earth
radius, :math:`G` is the (universal) gravitational constant, and :math:`m_e` is the
approximate mass of Earth.)
Parameters
----------
height : `pint.Quantity`
Height above sea level
Returns
-------
`pint.Quantity`
The corresponding geopotential value(s)
Examples
--------
>>> import metpy.calc
>>> from metpy.units import units
>>> height = np.linspace(0, 10000, num=11) * units.m
>>> geopot = metpy.calc.height_to_geopotential(height)
>>> geopot
<Quantity([ 0. 9817.46806283 19631.85526579 29443.16305887
39251.39289118 49056.54621087 58858.62446524 68657.62910064
78453.56156252 88246.42329544 98036.21574305], 'meter ** 2 / second ** 2')>
"""
# Direct implementation of formula from Hobbs yields poor numerical results (see
# gh-1075), so was replaced with algebraic equivalent.
return (mpconsts.G * mpconsts.me / mpconsts.Re) * (height / (mpconsts.Re + height))
@exporter.export
@preprocess_xarray
def geopotential_to_height(geopot):
r"""Compute height from a given geopotential.
Calculates the height from geopotential using the following formula, which is derived from
the definition of geopotential as given in [Hobbs2006]_ Pg. 69 Eq 3.21:
.. math:: z = \frac{1}{\frac{1}{R_e} - \frac{\Phi}{G m_e}} - R_e
(where :math:`\Phi` is geopotential, :math:`z` is height, :math:`R_e` is average Earth
radius, :math:`G` is the (universal) gravitational constant, and :math:`m_e` is the
approximate mass of Earth.)
Parameters
----------
geopotential : `pint.Quantity`
Geopotential
Returns
-------
`pint.Quantity`
The corresponding height value(s)
Examples
--------
>>> import metpy.calc
>>> from metpy.units import units
>>> height = np.linspace(0, 10000, num=11) * units.m
>>> geopot = metpy.calc.height_to_geopotential(height)
>>> geopot
<Quantity([ 0. 9817.46806283 19631.85526579 29443.16305887
39251.39289118 49056.54621087 58858.62446524 68657.62910064
78453.56156252 88246.42329544 98036.21574305], 'meter ** 2 / second ** 2')>
>>> height = metpy.calc.geopotential_to_height(geopot)
>>> height
<Quantity([ 0. 1000. 2000. 3000. 4000. 5000. 6000. 7000. 8000.
9000. 10000.], 'meter')>
"""
# Direct implementation of formula from Hobbs yields poor numerical results (see
# gh-1075), so was replaced with algebraic equivalent.
scaled = geopot * mpconsts.Re
return scaled * mpconsts.Re / (mpconsts.G * mpconsts.me - scaled)
@exporter.export
@preprocess_xarray
@check_units('[length]')
def height_to_pressure_std(height):
r"""Convert height data to pressures using the U.S. standard atmosphere [NOAA1976]_.
The implementation inverts the formula outlined in [Hobbs1977]_ pg.60-61.
Parameters
----------
height : `pint.Quantity`
Atmospheric height
Returns
-------
`pint.Quantity`
The corresponding pressure value(s)
Notes
-----
.. math:: p = p_0 e^{\frac{g}{R \Gamma} \text{ln}(1-\frac{Z \Gamma}{T_0})}
"""
gamma = 6.5 * units('K/km')
return p0 * (1 - (gamma / t0) * height) ** (mpconsts.g / (mpconsts.Rd * gamma))
@exporter.export
@preprocess_xarray
def coriolis_parameter(latitude):
r"""Calculate the coriolis parameter at each point.
The implementation uses the formula outlined in [Hobbs1977]_ pg.370-371.
Parameters
----------
latitude : array_like
Latitude at each point
Returns
-------
`pint.Quantity`
The corresponding coriolis force at each point
"""
latitude = _check_radians(latitude, max_radians=np.pi / 2)
return (2. * mpconsts.omega * np.sin(latitude)).to('1/s')
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[length]')
def add_height_to_pressure(pressure, height):
r"""Calculate the pressure at a certain height above another pressure level.
This assumes a standard atmosphere [NOAA1976]_.
Parameters
----------
pressure : `pint.Quantity`
Pressure level
height : `pint.Quantity`
Height above a pressure level
Returns
-------
`pint.Quantity`
The corresponding pressure value for the height above the pressure level
See Also
--------
pressure_to_height_std, height_to_pressure_std, add_pressure_to_height
"""
pressure_level_height = pressure_to_height_std(pressure)
return height_to_pressure_std(pressure_level_height + height)
@exporter.export
@preprocess_xarray
@check_units('[length]', '[pressure]')
def add_pressure_to_height(height, pressure):
r"""Calculate the height at a certain pressure above another height.
This assumes a standard atmosphere [NOAA1976]_.
Parameters
----------
height : `pint.Quantity`
Height level
pressure : `pint.Quantity`
Pressure above height level
Returns
-------
`pint.Quantity`
The corresponding height value for the pressure above the height level
See Also
--------
pressure_to_height_std, height_to_pressure_std, add_height_to_pressure
"""
pressure_at_height = height_to_pressure_std(height)
return pressure_to_height_std(pressure_at_height - pressure)
@exporter.export
@preprocess_xarray
@check_units('[dimensionless]', '[pressure]', '[pressure]')
def sigma_to_pressure(sigma, psfc, ptop):
r"""Calculate pressure from sigma values.
Parameters
----------
sigma : ndarray
The sigma levels to be converted to pressure levels.
psfc : `pint.Quantity`
The surface pressure value.
ptop : `pint.Quantity`
The pressure value at the top of the model domain.
Returns
-------
`pint.Quantity`
The pressure values at the given sigma levels.
Notes
-----
Sigma definition adapted from [Philips1957]_.
.. math:: p = \sigma * (p_{sfc} - p_{top}) + p_{top}
* :math:`p` is pressure at a given `\sigma` level
* :math:`\sigma` is non-dimensional, scaled pressure
* :math:`p_{sfc}` is pressure at the surface or model floor
* :math:`p_{top}` is pressure at the top of the model domain
"""
if np.any(sigma < 0) or np.any(sigma > 1):
raise ValueError('Sigma values should be bounded by 0 and 1')
if psfc.magnitude < 0 or ptop.magnitude < 0:
raise ValueError('Pressure input should be non-negative')
return sigma * (psfc - ptop) + ptop
@exporter.export
@preprocess_xarray
def smooth_gaussian(scalar_grid, n):
"""Filter with normal distribution of weights.
Parameters
----------
scalar_grid : `pint.Quantity`
Some n-dimensional scalar grid. If more than two axes, smoothing
is only done across the last two.
n : int
Degree of filtering
Returns
-------
`pint.Quantity`
The filtered 2D scalar grid
Notes
-----
This function is a close replication of the GEMPAK function GWFS,
but is not identical. The following notes are incorporated from
the GEMPAK source code:
This function smoothes a scalar grid using a moving average
low-pass filter whose weights are determined by the normal
(Gaussian) probability distribution function for two dimensions.
The weight given to any grid point within the area covered by the
moving average for a target grid point is proportional to
EXP [ -( D ** 2 ) ],
where D is the distance from that point to the target point divided
by the standard deviation of the normal distribution. The value of
the standard deviation is determined by the degree of filtering
requested. The degree of filtering is specified by an integer.
This integer is the number of grid increments from crest to crest
of the wave for which the theoretical response is 1/e = .3679. If
the grid increment is called delta_x, and the value of this integer
is represented by N, then the theoretical filter response function
value for the N * delta_x wave will be 1/e. The actual response
function will be greater than the theoretical value.
The larger N is, the more severe the filtering will be, because the
response function for all wavelengths shorter than N * delta_x
will be less than 1/e. Furthermore, as N is increased, the slope
of the filter response function becomes more shallow; so, the
response at all wavelengths decreases, but the amount of decrease
lessens with increasing wavelength. (The theoretical response
function can be obtained easily--it is the Fourier transform of the
weight function described above.)
The area of the patch covered by the moving average varies with N.
As N gets bigger, the smoothing gets stronger, and weight values
farther from the target grid point are larger because the standard
deviation of the normal distribution is bigger. Thus, increasing
N has the effect of expanding the moving average window as well as
changing the values of weights. The patch is a square covering all
points whose weight values are within two standard deviations of the
mean of the two dimensional normal distribution.
The key difference between GEMPAK's GWFS and this function is that,
in GEMPAK, the leftover weight values representing the fringe of the
distribution are applied to the target grid point. In this
function, the leftover weights are not used.
When this function is invoked, the first argument is the grid to be
smoothed, the second is the value of N as described above:
GWFS ( S, N )
where N > 1. If N <= 1, N = 2 is assumed. For example, if N = 4,
then the 4 delta x wave length is passed with approximate response
1/e.
"""
# Compute standard deviation in a manner consistent with GEMPAK
n = int(round(n))
if n < 2:
n = 2
sgma = n / (2 * np.pi)
# Construct sigma sequence so smoothing occurs only in horizontal direction
nax = len(scalar_grid.shape)
# Assume the last two axes represent the horizontal directions
sgma_seq = [sgma if i > nax - 3 else 0 for i in range(nax)]
# Compute smoothed field and reattach units
res = gaussian_filter(scalar_grid, sgma_seq, truncate=2 * np.sqrt(2))
if hasattr(scalar_grid, 'units'):
res = res * scalar_grid.units
return res
@exporter.export
@preprocess_xarray
def smooth_n_point(scalar_grid, n=5, passes=1):
"""Filter with normal distribution of weights.
Parameters
----------
scalar_grid : array-like or `pint.Quantity`
Some 2D scalar grid to be smoothed.
n: int
The number of points to use in smoothing, only valid inputs
are 5 and 9. Defaults to 5.
passes : int
The number of times to apply the filter to the grid. Defaults
to 1.
Returns
-------
array-like or `pint.Quantity`
The filtered 2D scalar grid.
Notes
-----
This function is a close replication of the GEMPAK function SM5S
and SM9S depending on the choice of the number of points to use
for smoothing. This function can be applied multiple times to
create a more smoothed field and will only smooth the interior
points, leaving the end points with their original values. If a
masked value or NaN values exists in the array, it will propagate
to any point that uses that particular grid point in the smoothing
calculation. Applying the smoothing function multiple times will
propogate NaNs further throughout the domain.
"""
if n == 9:
p = 0.25
q = 0.125
r = 0.0625
elif n == 5:
p = 0.5
q = 0.125
r = 0.0
else:
raise ValueError('The number of points to use in the smoothing '
'calculation must be either 5 or 9.')
smooth_grid = scalar_grid[:].copy()
for _i in range(passes):
smooth_grid[1:-1, 1:-1] = (p * smooth_grid[1:-1, 1:-1]
+ q * (smooth_grid[2:, 1:-1] + smooth_grid[1:-1, 2:]
+ smooth_grid[:-2, 1:-1] + smooth_grid[1:-1, :-2])
+ r * (smooth_grid[2:, 2:] + smooth_grid[2:, :-2] +
+ smooth_grid[:-2, 2:] + smooth_grid[:-2, :-2]))
return smooth_grid
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[length]')
def altimeter_to_station_pressure(altimeter_value, height):
r"""Convert the altimeter measurement to station pressure.
This function is useful for working with METARs since they do not provide
altimeter values, but not sea-level pressure or station pressure.
The following definitions of altimeter setting and station pressure
are taken from [Smithsonian1951]_ Altimeter setting is the
pressure value to which an aircraft altimeter scale is set so that it will
indicate the altitude above mean sea-level of an aircraft on the ground at the
location for which the value is determined. It assumes a standard atmosphere [NOAA1976]_.
Station pressure is the atmospheric pressure at the designated station elevation.
Finding the station pressure can be helpful for calculating sea-level pressure
or other parameters.
Parameters
----------
altimeter_value : `pint.Quantity`
The altimeter setting value as defined by the METAR or other observation,
which can be measured in either inches of mercury (in. Hg) or millibars (mb)
height: `pint.Quantity`
Elevation of the station measuring pressure.
Returns
-------
`pint.Quantity`
The station pressure in hPa or in. Hg, which can be used to calculate sea-level
pressure
See Also
--------
altimeter_to_sea_level_pressure
Notes
-----
This function is implemented using the following equations from the
Smithsonian Handbook (1951) p. 269
Equation 1:
.. math:: A_{mb} = (p_{mb} - 0.3)F
Equation 3:
.. math:: F = \left [1 + \left(\frac{p_{0}^n a}{T_{0}} \right)
\frac{H_{b}}{p_{1}^n} \right ] ^ \frac{1}{n}
Where
:math:`p_{0}` = standard sea-level pressure = 1013.25 mb
:math:`p_{1} = p_{mb} - 0.3` when :math:`p_{0} = 1013.25 mb`
gamma = lapse rate in [NOAA1976]_ standard atmosphere below the isothermal layer
:math:`6.5^{\circ}C. km.^{-1}`
:math:`t_{0}` = standard sea-level temperature 288 K
:math:`H_{b} =` station elevation in meters (elevation for which station
pressure is given)
:math:`n = \frac{a R_{d}}{g} = 0.190284` where :math:`R_{d}` is the gas
constant for dry air
And solving for :math:`p_{mb}` results in the equation below, which is used to
calculate station pressure :math:`(p_{mb})`
.. math:: p_{mb} = \left [A_{mb} ^ n - \left (\frac{p_{0} a H_{b}}{T_0}
\right) \right] ^ \frac{1}{n} + 0.3
"""
# Gamma Value for this case
gamma = 0.0065 * units('K/m')
# N-Value
n = (mpconsts.Rd * gamma / mpconsts.g).to_base_units()
return ((altimeter_value ** n
- ((p0.to(altimeter_value.units) ** n * gamma * height) / t0)) ** (1 / n)
+ 0.3 * units.hPa)
@exporter.export
@preprocess_xarray
@check_units('[pressure]', '[length]', '[temperature]')
def altimeter_to_sea_level_pressure(altimeter_value, height, temperature):
r"""Convert the altimeter setting to sea-level pressure.
This function is useful for working with METARs since most provide
altimeter values, but not sea-level pressure, which is often plotted
on surface maps. The following definitions of altimeter setting, station pressure, and
sea-level pressure are taken from [Smithsonian1951]_
Altimeter setting is the pressure value to which an aircraft altimeter scale
is set so that it will indicate the altitude above mean sea-level of an aircraft
on the ground at the location for which the value is determined. It assumes a standard
atmosphere. Station pressure is the atmospheric pressure at the designated station
elevation. Sea-level pressure is a pressure value obtained by the theoretical reduction
of barometric pressure to sea level. It is assumed that atmosphere extends to sea level
below the station and that the properties of the atmosphere are related to conditions
observed at the station. This value is recorded by some surface observation stations,
but not all. If the value is recorded, it can be found in the remarks section. Finding
the sea-level pressure is helpful for plotting purposes and different calculations.
Parameters
----------
altimeter_value : 'pint.Quantity'
The altimeter setting value is defined by the METAR or other observation,
with units of inches of mercury (in Hg) or millibars (hPa)
height : 'pint.Quantity'
Elevation of the station measuring pressure. Often times measured in meters
temperature : 'pint.Quantity'
Temperature at the station
Returns
-------
'pint.Quantity'
The sea-level pressure in hPa and makes pressure values easier to compare
between different stations
See Also
--------
altimeter_to_station_pressure
Notes
-----
This function is implemented using the following equations from Wallace and Hobbs (1977)
Equation 2.29:
.. math::
\Delta z = Z_{2} - Z_{1}
= \frac{R_{d} \bar T_{v}}{g_0}ln\left(\frac{p_{1}}{p_{2}}\right)
= \bar H ln \left (\frac {p_{1}}{p_{2}} \right)
Equation 2.31:
.. math::
p_{0} = p_{g}exp \left(\frac{Z_{g}}{\bar H} \right) \\
= p_{g}exp \left(\frac{g_{0}Z_{g}}{R_{d}\bar T_{v}} \right)
Then by substituting :math:`Delta_{Z}` for :math:`Z_{g}` in Equation 2.31:
.. math:: p_{sea_level} = p_{station} exp\left(\frac{\Delta z}{H}\right)
where :math:`Delta_{Z}` is the elevation in meters and :math:`H = \frac{R_{d}T}{g}`
"""
# Calculate the station pressure using function altimeter_to_station_pressure()
psfc = altimeter_to_station_pressure(altimeter_value, height)
# Calculate the scale height
h = mpconsts.Rd * temperature / mpconsts.g
return psfc * np.exp(height / h)
def _check_radians(value, max_radians=2 * np.pi):
"""Input validation of values that could be in degrees instead of radians.
Parameters
----------
value : `pint.Quantity`
The input value to check.
max_radians : float
Maximum absolute value of radians before warning.
Returns
-------
`pint.Quantity`
The input value
"""
try:
value = value.to('radians').m
except AttributeError:
pass
if np.greater(np.nanmax(np.abs(value)), max_radians):
warnings.warn('Input over {} radians. '
'Ensure proper units are given.'.format(max_radians))
return value
| [
"numpy.abs",
"numpy.sqrt",
"numpy.asarray",
"numpy.any",
"numpy.exp",
"numpy.array",
"numpy.arctan2",
"numpy.isnan",
"numpy.cos",
"numpy.sin",
"numpy.shape"
]
| [((1176, 1198), 'numpy.sqrt', 'np.sqrt', (['(u * u + v * v)'], {}), '(u * u + v * v)\n', (1183, 1198), True, 'import numpy as np\n'), ((2865, 2882), 'numpy.any', 'np.any', (['calm_mask'], {}), '(calm_mask)\n', (2871, 2882), True, 'import numpy as np\n'), ((8772, 8783), 'numpy.any', 'np.any', (['sel'], {}), '(sel)\n', (8778, 8783), True, 'import numpy as np\n'), ((8941, 8952), 'numpy.any', 'np.any', (['sel'], {}), '(sel)\n', (8947, 8952), True, 'import numpy as np\n'), ((9040, 9052), 'numpy.isnan', 'np.isnan', (['hi'], {}), '(hi)\n', (9048, 9052), True, 'import numpy as np\n'), ((9060, 9071), 'numpy.any', 'np.any', (['sel'], {}), '(sel)\n', (9066, 9071), True, 'import numpy as np\n'), ((9282, 9293), 'numpy.any', 'np.any', (['sel'], {}), '(sel)\n', (9288, 9293), True, 'import numpy as np\n'), ((9702, 9713), 'numpy.any', 'np.any', (['sel'], {}), '(sel)\n', (9708, 9713), True, 'import numpy as np\n'), ((2254, 2272), 'numpy.arctan2', 'np.arctan2', (['(-v)', '(-u)'], {}), '(-v, -u)\n', (2264, 2272), True, 'import numpy as np\n'), ((3982, 3994), 'numpy.sin', 'np.sin', (['wdir'], {}), '(wdir)\n', (3988, 3994), True, 'import numpy as np\n'), ((4012, 4024), 'numpy.cos', 'np.cos', (['wdir'], {}), '(wdir)\n', (4018, 4024), True, 'import numpy as np\n'), ((6208, 6269), 'numpy.array', 'np.array', (['((temperature > temp_limit) | (speed <= speed_limit))'], {}), '((temperature > temp_limit) | (speed <= speed_limit))\n', (6216, 6269), True, 'import numpy as np\n'), ((8921, 8933), 'numpy.isnan', 'np.isnan', (['hi'], {}), '(hi)\n', (8929, 8933), True, 'import numpy as np\n'), ((9923, 9964), 'numpy.array', 'np.array', (['(temperature < 80.0 * units.degF)'], {}), '(temperature < 80.0 * units.degF)\n', (9931, 9964), True, 'import numpy as np\n'), ((20720, 20737), 'numpy.any', 'np.any', (['(sigma < 0)'], {}), '(sigma < 0)\n', (20726, 20737), True, 'import numpy as np\n'), ((20741, 20758), 'numpy.any', 'np.any', (['(sigma > 1)'], {}), '(sigma > 1)\n', (20747, 20758), True, 'import numpy as np\n'), ((32848, 32866), 'numpy.exp', 'np.exp', (['(height / h)'], {}), '(height / h)\n', (32854, 32866), True, 'import numpy as np\n'), ((2703, 2726), 'numpy.asarray', 'np.asarray', (['u.magnitude'], {}), '(u.magnitude)\n', (2713, 2726), True, 'import numpy as np\n'), ((2737, 2760), 'numpy.asarray', 'np.asarray', (['v.magnitude'], {}), '(v.magnitude)\n', (2747, 2760), True, 'import numpy as np\n'), ((8506, 8527), 'numpy.shape', 'np.shape', (['temperature'], {}), '(temperature)\n', (8514, 8527), True, 'import numpy as np\n'), ((12986, 13014), 'numpy.any', 'np.any', (['app_temperature.mask'], {}), '(app_temperature.mask)\n', (12992, 13014), True, 'import numpy as np\n'), ((13079, 13106), 'numpy.array', 'np.array', (['app_temperature.m'], {}), '(app_temperature.m)\n', (13087, 13106), True, 'import numpy as np\n'), ((33367, 33380), 'numpy.abs', 'np.abs', (['value'], {}), '(value)\n', (33373, 33380), True, 'import numpy as np\n'), ((18231, 18247), 'numpy.sin', 'np.sin', (['latitude'], {}), '(latitude)\n', (18237, 18247), True, 'import numpy as np\n'), ((24835, 24845), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (24842, 24845), True, 'import numpy as np\n'), ((9385, 9424), 'numpy.abs', 'np.abs', (['(delta - 95.0 * units.delta_degF)'], {}), '(delta - 95.0 * units.delta_degF)\n', (9391, 9424), True, 'import numpy as np\n')] |
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework.reverse import reverse
from rest_framework_simplejwt.tokens import RefreshToken
@api_view(['GET'])
def root(request, fmt=None):
return Response({
'v1': reverse('api_v1:root', request=request, format=fmt),
})
@api_view(['GET'])
def v1_root(request, fmt=None):
root_navigation = {
'redirects': reverse('api_v1:redirects:redirect-list', request=request, format=fmt),
'token': reverse('api_v1:token_root', request=request, format=fmt)
}
return Response(root_navigation)
@api_view(['GET'])
def token_root(request, fmt=None):
token_navigation = {
'auth': reverse('api_v1:token_auth', request=request, format=fmt),
'refresh': reverse('api_v1:token_refresh', request=request, format=fmt),
'verify': reverse('api_v1:token_verify', request=request, format=fmt),
}
return Response(token_navigation)
@api_view(['POST'])
def token_refresh(request):
token = request.COOKIES.get("burl_refresh_token")
if token:
refresh = RefreshToken(str(token))
access = str(refresh.access_token)
if access:
return Response({"access": access}, 200)
else:
return Response({"unauthorized"}, 401)
return Response("unauthorized", 401)
@api_view(['POST'])
def token_refresh_revoke(_request):
response = Response("ok")
response.delete_cookie("burl_refresh_token")
return response
| [
"rest_framework.response.Response",
"rest_framework.decorators.api_view",
"rest_framework.reverse.reverse"
]
| [((195, 212), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (203, 212), False, 'from rest_framework.decorators import api_view\n'), ((341, 358), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (349, 358), False, 'from rest_framework.decorators import api_view\n'), ((629, 646), 'rest_framework.decorators.api_view', 'api_view', (["['GET']"], {}), "(['GET'])\n", (637, 646), False, 'from rest_framework.decorators import api_view\n'), ((989, 1007), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (997, 1007), False, 'from rest_framework.decorators import api_view\n'), ((1371, 1389), 'rest_framework.decorators.api_view', 'api_view', (["['POST']"], {}), "(['POST'])\n", (1379, 1389), False, 'from rest_framework.decorators import api_view\n'), ((600, 625), 'rest_framework.response.Response', 'Response', (['root_navigation'], {}), '(root_navigation)\n', (608, 625), False, 'from rest_framework.response import Response\n'), ((959, 985), 'rest_framework.response.Response', 'Response', (['token_navigation'], {}), '(token_navigation)\n', (967, 985), False, 'from rest_framework.response import Response\n'), ((1338, 1367), 'rest_framework.response.Response', 'Response', (['"""unauthorized"""', '(401)'], {}), "('unauthorized', 401)\n", (1346, 1367), False, 'from rest_framework.response import Response\n'), ((1441, 1455), 'rest_framework.response.Response', 'Response', (['"""ok"""'], {}), "('ok')\n", (1449, 1455), False, 'from rest_framework.response import Response\n'), ((436, 506), 'rest_framework.reverse.reverse', 'reverse', (['"""api_v1:redirects:redirect-list"""'], {'request': 'request', 'format': 'fmt'}), "('api_v1:redirects:redirect-list', request=request, format=fmt)\n", (443, 506), False, 'from rest_framework.reverse import reverse\n'), ((525, 582), 'rest_framework.reverse.reverse', 'reverse', (['"""api_v1:token_root"""'], {'request': 'request', 'format': 'fmt'}), "('api_v1:token_root', request=request, format=fmt)\n", (532, 582), False, 'from rest_framework.reverse import reverse\n'), ((723, 780), 'rest_framework.reverse.reverse', 'reverse', (['"""api_v1:token_auth"""'], {'request': 'request', 'format': 'fmt'}), "('api_v1:token_auth', request=request, format=fmt)\n", (730, 780), False, 'from rest_framework.reverse import reverse\n'), ((801, 861), 'rest_framework.reverse.reverse', 'reverse', (['"""api_v1:token_refresh"""'], {'request': 'request', 'format': 'fmt'}), "('api_v1:token_refresh', request=request, format=fmt)\n", (808, 861), False, 'from rest_framework.reverse import reverse\n'), ((881, 940), 'rest_framework.reverse.reverse', 'reverse', (['"""api_v1:token_verify"""'], {'request': 'request', 'format': 'fmt'}), "('api_v1:token_verify', request=request, format=fmt)\n", (888, 940), False, 'from rest_framework.reverse import reverse\n'), ((278, 329), 'rest_framework.reverse.reverse', 'reverse', (['"""api_v1:root"""'], {'request': 'request', 'format': 'fmt'}), "('api_v1:root', request=request, format=fmt)\n", (285, 329), False, 'from rest_framework.reverse import reverse\n'), ((1228, 1261), 'rest_framework.response.Response', 'Response', (["{'access': access}", '(200)'], {}), "({'access': access}, 200)\n", (1236, 1261), False, 'from rest_framework.response import Response\n'), ((1295, 1326), 'rest_framework.response.Response', 'Response', (["{'unauthorized'}", '(401)'], {}), "({'unauthorized'}, 401)\n", (1303, 1326), False, 'from rest_framework.response import Response\n')] |
#!/usr/bin/python3
"""
Author: <NAME>
Date: 20181013
Purpose: Send a Tweet with IP and MAC address of a Raspberry Pi
Install:
pip3 install tweepy
Usage:
python3 startuptweet.py 'this is my tweet'
"""
import tweepy
import sys
import socket
import subprocess
from uuid import getnode as get_mac
from datetime import datetime
# Create variables for each key, secret, token
from my_config import hash_tag
from my_config import consumer_key
from my_config import consumer_secret
from my_config import access_token
from my_config import access_token_secret
message = ''
if len( sys.argv ) > 1:
message = sys.argv[1]
# Set up OAuth and integrate with API
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
#
thetime = datetime.now().strftime('%Y%m%d %H:%M:%S')
ip = subprocess.check_output(['hostname', '--all-ip-addresses'])
ip = ip.decode('utf-8').strip()
hostname = socket.gethostname()
mac = get_mac()
mac = hex(mac)
tweet = thetime + ' ' + hostname + ' ' + ip + ' ' + mac + ' ' + message + ' ' + hash_tag
print('tweeting:', tweet)
api.update_status(status=tweet)
| [
"subprocess.check_output",
"uuid.getnode",
"datetime.datetime.now",
"tweepy.API",
"socket.gethostname",
"tweepy.OAuthHandler"
]
| [((667, 717), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['consumer_key', 'consumer_secret'], {}), '(consumer_key, consumer_secret)\n', (686, 717), False, 'import tweepy\n'), ((781, 797), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (791, 797), False, 'import tweepy\n'), ((861, 920), 'subprocess.check_output', 'subprocess.check_output', (["['hostname', '--all-ip-addresses']"], {}), "(['hostname', '--all-ip-addresses'])\n", (884, 920), False, 'import subprocess\n'), ((965, 985), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (983, 985), False, 'import socket\n'), ((993, 1002), 'uuid.getnode', 'get_mac', ([], {}), '()\n', (1000, 1002), True, 'from uuid import getnode as get_mac\n'), ((812, 826), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (824, 826), False, 'from datetime import datetime\n')] |
#!/usr/bin/python
from bs4 import BeautifulSoup
import sqlite3
class DB:
"""
Abstraction for the profile database
"""
def __init__(self, filename):
"""
Creates a new connection to the database
filename - The name of the database file to use
"""
self.Filename = filename
self.Connection = sqlite3.connect(filename)
self.Cursor = self.Connection.cursor()
def SaveProfile(self, data):
"""
Saves the profile to the database
data - A dictionary of profile information
"""
self.Cursor.execute("INSERT INTO profiles (url, e0, e1, e2, e3, e4, e5, e6, e7, e8, gender, age, orientation, status, location) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (data['url'], data['e0'], data['e1'], data['e2'], data['e3'], data['e4'], data['e5'], data['e6'], data['e7'], data['e8'], data['gender'], data['age'], data['orientation'], data['status'], data['location']))
self.Connection.commit()
def HasVisited(self, url):
"""
Returns true if the given URL is in the database, false otherwise
url - The URL to check
"""
self.Cursor.execute("SELECT 1 FROM profiles WHERE url = ? LIMIT 1", (url,))
return self.Cursor.fetchone() is not None
| [
"sqlite3.connect"
]
| [((355, 380), 'sqlite3.connect', 'sqlite3.connect', (['filename'], {}), '(filename)\n', (370, 380), False, 'import sqlite3\n')] |
"""Objects representing regions in space."""
import math
import random
import itertools
import numpy
import scipy.spatial
import shapely.geometry
import shapely.ops
from scenic.core.distributions import Samplable, RejectionException, needsSampling
from scenic.core.lazy_eval import valueInContext
from scenic.core.vectors import Vector, OrientedVector, VectorDistribution
from scenic.core.geometry import RotatedRectangle
from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors
from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion
from scenic.core.type_support import toVector
from scenic.core.utils import cached, areEquivalent
def toPolygon(thing):
if needsSampling(thing):
return None
if hasattr(thing, 'polygon'):
return thing.polygon
if hasattr(thing, 'polygons'):
return thing.polygons
if hasattr(thing, 'lineString'):
return thing.lineString
return None
def regionFromShapelyObject(obj, orientation=None):
"""Build a 'Region' from Shapely geometry."""
assert obj.is_valid, obj
if obj.is_empty:
return nowhere
elif isinstance(obj, (shapely.geometry.Polygon, shapely.geometry.MultiPolygon)):
return PolygonalRegion(polygon=obj, orientation=orientation)
elif isinstance(obj, (shapely.geometry.LineString, shapely.geometry.MultiLineString)):
return PolylineRegion(polyline=obj, orientation=orientation)
else:
raise RuntimeError(f'unhandled type of Shapely geometry: {obj}')
class PointInRegionDistribution(VectorDistribution):
"""Uniform distribution over points in a Region"""
def __init__(self, region):
super().__init__(region)
self.region = region
def sampleGiven(self, value):
return value[self.region].uniformPointInner()
def __str__(self):
return f'PointIn({self.region})'
class Region(Samplable):
"""Abstract class for regions."""
def __init__(self, name, *dependencies, orientation=None):
super().__init__(dependencies)
self.name = name
self.orientation = orientation
def sampleGiven(self, value):
return self
def intersect(self, other, triedReversed=False):
"""Get a `Region` representing the intersection of this one with another."""
if triedReversed:
return IntersectionRegion(self, other)
else:
return other.intersect(self, triedReversed=True)
@staticmethod
def uniformPointIn(region):
"""Get a uniform `Distribution` over points in a `Region`."""
return PointInRegionDistribution(region)
def uniformPoint(self):
"""Sample a uniformly-random point in this `Region`.
Can only be called on fixed Regions with no random parameters.
"""
assert not needsSampling(self)
return self.uniformPointInner()
def uniformPointInner(self):
"""Do the actual random sampling. Implemented by subclasses."""
raise NotImplementedError()
def containsPoint(self, point):
"""Check if the `Region` contains a point. Implemented by subclasses."""
raise NotImplementedError()
def containsObject(self, obj):
"""Check if the `Region` contains an :obj:`~scenic.core.object_types.Object`.
The default implementation assumes the `Region` is convex; subclasses must
override the method if this is not the case.
"""
for corner in obj.corners:
if not self.containsPoint(corner):
return False
return True
def __contains__(self, thing):
"""Check if this `Region` contains an object or vector."""
from scenic.core.object_types import Object
if isinstance(thing, Object):
return self.containsObject(thing)
vec = toVector(thing, '"X in Y" with X not an Object or a vector')
return self.containsPoint(vec)
def getAABB(self):
"""Axis-aligned bounding box for this `Region`. Implemented by some subclasses."""
raise NotImplementedError()
def orient(self, vec):
"""Orient the given vector along the region's orientation, if any."""
if self.orientation is None:
return vec
else:
return OrientedVector(vec.x, vec.y, self.orientation[vec])
def __str__(self):
return f'<Region {self.name}>'
class AllRegion(Region):
"""Region consisting of all space."""
def intersect(self, other, triedReversed=False):
return other
def containsPoint(self, point):
return True
def containsObject(self, obj):
return True
def __eq__(self, other):
return type(other) is AllRegion
def __hash__(self):
return hash(AllRegion)
class EmptyRegion(Region):
"""Region containing no points."""
def intersect(self, other, triedReversed=False):
return self
def uniformPointInner(self):
raise RejectionException(f'sampling empty Region')
def containsPoint(self, point):
return False
def containsObject(self, obj):
return False
def show(self, plt, style=None):
pass
def __eq__(self, other):
return type(other) is EmptyRegion
def __hash__(self):
return hash(EmptyRegion)
everywhere = AllRegion('everywhere')
nowhere = EmptyRegion('nowhere')
class CircularRegion(Region):
def __init__(self, center, radius, resolution=32):
super().__init__('Circle', center, radius)
self.center = center.toVector()
self.radius = radius
self.circumcircle = (self.center, self.radius)
if not (needsSampling(self.center) or needsSampling(self.radius)):
ctr = shapely.geometry.Point(self.center)
self.polygon = ctr.buffer(self.radius, resolution=resolution)
def sampleGiven(self, value):
return CircularRegion(value[self.center], value[self.radius])
def evaluateInner(self, context):
center = valueInContext(self.center, context)
radius = valueInContext(self.radius, context)
return CircularRegion(center, radius)
def containsPoint(self, point):
point = point.toVector()
return point.distanceTo(self.center) <= self.radius
def uniformPointInner(self):
x, y = self.center
r = random.triangular(0, self.radius, self.radius)
t = random.uniform(-math.pi, math.pi)
pt = Vector(x + (r * cos(t)), y + (r * sin(t)))
return self.orient(pt)
def getAABB(self):
x, y = self.center
r = self.radius
return ((x - r, y - r), (x + r, y + r))
def isEquivalentTo(self, other):
if type(other) is not CircularRegion:
return False
return (areEquivalent(other.center, self.center)
and areEquivalent(other.radius, self.radius))
def __str__(self):
return f'CircularRegion({self.center}, {self.radius})'
class SectorRegion(Region):
def __init__(self, center, radius, heading, angle, resolution=32):
super().__init__('Sector', center, radius, heading, angle)
self.center = center.toVector()
self.radius = radius
self.heading = heading
self.angle = angle
r = (radius / 2) * cos(angle / 2)
self.circumcircle = (self.center.offsetRadially(r, heading), r)
if not any(needsSampling(x) for x in (self.center, radius, heading, angle)):
ctr = shapely.geometry.Point(self.center)
circle = ctr.buffer(self.radius, resolution=resolution)
if angle >= math.tau - 0.001:
self.polygon = circle
else:
mask = shapely.geometry.Polygon([
self.center,
self.center.offsetRadially(radius, heading + angle/2),
self.center.offsetRadially(2*radius, heading),
self.center.offsetRadially(radius, heading - angle/2)
])
self.polygon = circle & mask
def sampleGiven(self, value):
return SectorRegion(value[self.center], value[self.radius],
value[self.heading], value[self.angle])
def evaluateInner(self, context):
center = valueInContext(self.center, context)
radius = valueInContext(self.radius, context)
heading = valueInContext(self.heading, context)
angle = valueInContext(self.angle, context)
return SectorRegion(center, radius, heading, angle)
def containsPoint(self, point):
point = point.toVector()
if not pointIsInCone(tuple(point), tuple(self.center), self.heading, self.angle):
return False
return point.distanceTo(self.center) <= self.radius
def uniformPointInner(self):
x, y = self.center
heading, angle, maxDist = self.heading, self.angle, self.radius
r = random.triangular(0, maxDist, maxDist)
ha = angle / 2.0
t = random.uniform(-ha, ha) + (heading + (math.pi / 2))
pt = Vector(x + (r * cos(t)), y + (r * sin(t)))
return self.orient(pt)
def isEquivalentTo(self, other):
if type(other) is not SectorRegion:
return False
return (areEquivalent(other.center, self.center)
and areEquivalent(other.radius, self.radius)
and areEquivalent(other.heading, self.heading)
and areEquivalent(other.angle, self.angle))
def __str__(self):
return f'SectorRegion({self.center},{self.radius},{self.heading},{self.angle})'
class RectangularRegion(RotatedRectangle, Region):
def __init__(self, position, heading, width, height):
super().__init__('Rectangle', position, heading, width, height)
self.position = position.toVector()
self.heading = heading
self.width = width
self.height = height
self.hw = hw = width / 2
self.hh = hh = height / 2
self.radius = hypot(hw, hh) # circumcircle; for collision detection
self.corners = tuple(position.offsetRotated(heading, Vector(*offset))
for offset in ((hw, hh), (-hw, hh), (-hw, -hh), (hw, -hh)))
self.circumcircle = (self.position, self.radius)
def sampleGiven(self, value):
return RectangularRegion(value[self.position], value[self.heading],
value[self.width], value[self.height])
def evaluateInner(self, context):
position = valueInContext(self.position, context)
heading = valueInContext(self.heading, context)
width = valueInContext(self.width, context)
height = valueInContext(self.height, context)
return RectangularRegion(position, heading, width, height)
def uniformPointInner(self):
hw, hh = self.hw, self.hh
rx = random.uniform(-hw, hw)
ry = random.uniform(-hh, hh)
pt = self.position.offsetRotated(self.heading, Vector(rx, ry))
return self.orient(pt)
def getAABB(self):
x, y = zip(*self.corners)
minx, maxx = findMinMax(x)
miny, maxy = findMinMax(y)
return ((minx, miny), (maxx, maxy))
def isEquivalentTo(self, other):
if type(other) is not RectangularRegion:
return False
return (areEquivalent(other.position, self.position)
and areEquivalent(other.heading, self.heading)
and areEquivalent(other.width, self.width)
and areEquivalent(other.height, self.height))
def __str__(self):
return f'RectangularRegion({self.position},{self.heading},{self.width},{self.height})'
class PolylineRegion(Region):
"""Region given by one or more polylines (chain of line segments)"""
def __init__(self, points=None, polyline=None, orientation=True):
super().__init__('Polyline', orientation=orientation)
if points is not None:
points = tuple(points)
if len(points) < 2:
raise RuntimeError('tried to create PolylineRegion with < 2 points')
self.points = points
self.lineString = shapely.geometry.LineString(points)
elif polyline is not None:
if isinstance(polyline, shapely.geometry.LineString):
if len(polyline.coords) < 2:
raise RuntimeError('tried to create PolylineRegion with <2-point LineString')
elif isinstance(polyline, shapely.geometry.MultiLineString):
if len(polyline) == 0:
raise RuntimeError('tried to create PolylineRegion from empty MultiLineString')
for line in polyline:
assert len(line.coords) >= 2
else:
raise RuntimeError('tried to create PolylineRegion from non-LineString')
self.lineString = polyline
else:
raise RuntimeError('must specify points or polyline for PolylineRegion')
if not self.lineString.is_valid:
raise RuntimeError('tried to create PolylineRegion with '
f'invalid LineString {self.lineString}')
self.segments = self.segmentsOf(self.lineString)
cumulativeLengths = []
total = 0
for p, q in self.segments:
dx, dy = p[0] - q[0], p[1] - q[1]
total += math.hypot(dx, dy)
cumulativeLengths.append(total)
self.cumulativeLengths = cumulativeLengths
@classmethod
def segmentsOf(cls, lineString):
if isinstance(lineString, shapely.geometry.LineString):
segments = []
points = list(lineString.coords)
if len(points) < 2:
raise RuntimeError('LineString has fewer than 2 points')
last = points[0]
for point in points[1:]:
segments.append((last, point))
last = point
return segments
elif isinstance(lineString, shapely.geometry.MultiLineString):
allSegments = []
for line in lineString:
allSegments.extend(cls.segmentsOf(line))
return allSegments
else:
raise RuntimeError('called segmentsOf on non-linestring')
def uniformPointInner(self):
pointA, pointB = random.choices(self.segments,
cum_weights=self.cumulativeLengths)[0]
interpolation = random.random()
x, y = averageVectors(pointA, pointB, weight=interpolation)
if self.orientation is True:
return OrientedVector(x, y, headingOfSegment(pointA, pointB))
else:
return self.orient(Vector(x, y))
def intersect(self, other, triedReversed=False):
poly = toPolygon(other)
if poly is not None:
intersection = self.lineString & poly
if (intersection.is_empty or
not isinstance(intersection, (shapely.geometry.LineString,
shapely.geometry.MultiLineString))):
# TODO handle points!
return nowhere
return PolylineRegion(polyline=intersection)
return super().intersect(other, triedReversed)
def containsPoint(self, point):
return self.lineString.intersects(shapely.geometry.Point(point))
def containsObject(self, obj):
return False
def getAABB(self):
xmin, ymin, xmax, ymax = self.lineString.bounds
return ((xmin, ymin), (xmax, ymax))
def show(self, plt, style='r-'):
for pointA, pointB in self.segments:
plt.plot([pointA[0], pointB[0]], [pointA[1], pointB[1]], style)
def __str__(self):
return f'PolylineRegion({self.lineString})'
def __eq__(self, other):
if type(other) is not PolylineRegion:
return NotImplemented
return (other.lineString == self.lineString)
@cached
def __hash__(self):
return hash(str(self.lineString))
class PolygonalRegion(Region):
"""Region given by one or more polygons (possibly with holes)"""
def __init__(self, points=None, polygon=None, orientation=None):
super().__init__('Polygon', orientation=orientation)
if polygon is None and points is None:
raise RuntimeError('must specify points or polygon for PolygonalRegion')
if polygon is None:
points = tuple(points)
if len(points) == 0:
raise RuntimeError('tried to create PolygonalRegion from empty point list!')
for point in points:
if needsSampling(point):
raise RuntimeError('only fixed PolygonalRegions are supported')
self.points = points
polygon = shapely.geometry.Polygon(points)
if isinstance(polygon, shapely.geometry.Polygon):
self.polygons = shapely.geometry.MultiPolygon([polygon])
elif isinstance(polygon, shapely.geometry.MultiPolygon):
self.polygons = polygon
else:
raise RuntimeError(f'tried to create PolygonalRegion from non-polygon {polygon}')
if not self.polygons.is_valid:
raise RuntimeError('tried to create PolygonalRegion with '
f'invalid polygon {self.polygons}')
if points is None and len(self.polygons) == 1 and len(self.polygons[0].interiors) == 0:
self.points = tuple(self.polygons[0].exterior.coords[:-1])
if self.polygons.is_empty:
raise RuntimeError('tried to create empty PolygonalRegion')
triangles = []
for polygon in self.polygons:
triangles.extend(triangulatePolygon(polygon))
assert len(triangles) > 0, self.polygons
self.trianglesAndBounds = tuple((tri, tri.bounds) for tri in triangles)
areas = (triangle.area for triangle in triangles)
self.cumulativeTriangleAreas = tuple(itertools.accumulate(areas))
def uniformPointInner(self):
triangle, bounds = random.choices(
self.trianglesAndBounds,
cum_weights=self.cumulativeTriangleAreas)[0]
minx, miny, maxx, maxy = bounds
# TODO improve?
while True:
x, y = random.uniform(minx, maxx), random.uniform(miny, maxy)
if triangle.intersects(shapely.geometry.Point(x, y)):
return self.orient(Vector(x, y))
def intersect(self, other, triedReversed=False):
poly = toPolygon(other)
orientation = other.orientation if self.orientation is None else self.orientation
if poly is not None:
intersection = self.polygons & poly
if intersection.is_empty:
return nowhere
elif isinstance(intersection, (shapely.geometry.Polygon,
shapely.geometry.MultiPolygon)):
return PolygonalRegion(polygon=intersection, orientation=orientation)
elif isinstance(intersection, shapely.geometry.GeometryCollection):
polys = []
for geom in intersection:
if isinstance(geom, shapely.geometry.Polygon):
polys.append(geom)
if len(polys) == 0:
# TODO handle points, lines
raise RuntimeError('unhandled type of polygon intersection')
intersection = shapely.geometry.MultiPolygon(polys)
return PolygonalRegion(polygon=intersection, orientation=orientation)
else:
# TODO handle points, lines
raise RuntimeError('unhandled type of polygon intersection')
return super().intersect(other, triedReversed)
def union(self, other):
poly = toPolygon(other)
if not poly:
raise RuntimeError(f'cannot take union of PolygonalRegion with {other}')
union = polygonUnion((self.polygons, poly))
return PolygonalRegion(polygon=union)
def containsPoint(self, point):
return self.polygons.intersects(shapely.geometry.Point(point))
def containsObject(self, obj):
objPoly = obj.polygon
if objPoly is None:
raise RuntimeError('tried to test containment of symbolic Object!')
# TODO improve boundary handling?
return self.polygons.contains(objPoly)
def getAABB(self):
xmin, xmax, ymin, ymax = self.polygons.bounds
return ((xmin, ymin), (xmax, ymax))
def show(self, plt, style='r-'):
plotPolygon(self.polygons, plt, style=style)
def __str__(self):
return '<PolygonalRegion>'
def __eq__(self, other):
if type(other) is not PolygonalRegion:
return NotImplemented
return (other.polygons == self.polygons
and other.orientation == self.orientation)
@cached
def __hash__(self):
# TODO better way to hash mutable Shapely geometries? (also for PolylineRegion)
return hash((str(self.polygons), self.orientation))
class PointSetRegion(Region):
"""Region consisting of a set of discrete points.
No :obj:`~scenic.core.object_types.Object` can be contained in a `PointSetRegion`,
since the latter is discrete. (This may not be true for subclasses, e.g.
`GridRegion`.)
Args:
name (str): name for debugging
points (iterable): set of points comprising the region
kdtree (:obj:`scipy.spatial.KDTree`, optional): k-D tree for the points (one will
be computed if none is provided)
orientation (:obj:`~scenic.core.vectors.VectorField`, optional): orientation for
the region
tolerance (float, optional): distance tolerance for checking whether a point lies
in the region
"""
def __init__(self, name, points, kdTree=None, orientation=None, tolerance=1e-6):
super().__init__(name, orientation=orientation)
self.points = tuple(points)
for point in self.points:
if needsSampling(point):
raise RuntimeError('only fixed PointSetRegions are supported')
self.kdTree = scipy.spatial.cKDTree(self.points) if kdTree is None else kdTree
self.orientation = orientation
self.tolerance = tolerance
def uniformPointInner(self):
return self.orient(Vector(*random.choice(self.points)))
def intersect(self, other, triedReversed=False):
def sampler(intRegion):
o = intRegion.regions[1]
center, radius = o.circumcircle
possibles = (Vector(*self.kdTree.data[i])
for i in self.kdTree.query_ball_point(center, radius))
intersection = [p for p in possibles if o.containsPoint(p)]
if len(intersection) == 0:
raise RejectionException(f'empty intersection of Regions {self} and {o}')
return self.orient(random.choice(intersection))
return IntersectionRegion(self, other, sampler=sampler, orientation=self.orientation)
def containsPoint(self, point):
distance, location = self.kdTree.query(point)
return (distance <= self.tolerance)
def containsObject(self, obj):
raise NotImplementedError()
def __eq__(self, other):
if type(other) is not PointSetRegion:
return NotImplemented
return (other.name == self.name
and other.points == self.points
and other.orientation == self.orientation)
def __hash__(self):
return hash((self.name, self.points, self.orientation))
class GridRegion(PointSetRegion):
"""A Region given by an obstacle grid.
A point is considered to be in a `GridRegion` if the nearest grid point is
not an obstacle.
Args:
name (str): name for debugging
grid: 2D list, tuple, or NumPy array of 0s and 1s, where 1 indicates an obstacle
and 0 indicates free space
Ax (float): spacing between grid points along X axis
Ay (float): spacing between grid points along Y axis
Bx (float): X coordinate of leftmost grid column
By (float): Y coordinate of lowest grid row
orientation (:obj:`~scenic.core.vectors.VectorField`, optional): orientation of region
"""
def __init__(self, name, grid, Ax, Ay, Bx, By, orientation=None):
self.grid = numpy.array(grid)
self.sizeY, self.sizeX = self.grid.shape
self.Ax, self.Ay = Ax, Ay
self.Bx, self.By = Bx, By
y, x = numpy.where(self.grid == 0)
points = [self.gridToPoint(point) for point in zip(x, y)]
super().__init__(name, points, orientation=orientation)
def gridToPoint(self, gp):
x, y = gp
return ((self.Ax * x) + self.Bx, (self.Ay * y) + self.By)
def pointToGrid(self, point):
x, y = point
x = (x - self.Bx) / self.Ax
y = (y - self.By) / self.Ay
nx = int(round(x))
if nx < 0 or nx >= self.sizeX:
return None
ny = int(round(y))
if ny < 0 or ny >= self.sizeY:
return None
return (nx, ny)
def containsPoint(self, point):
gp = self.pointToGrid(point)
if gp is None:
return False
x, y = gp
return (self.grid[y, x] == 0)
def containsObject(self, obj):
# TODO improve this procedure!
# Fast check
for c in obj.corners:
if not self.containsPoint(c):
return False
# Slow check
gps = [self.pointToGrid(corner) for corner in obj.corners]
x, y = zip(*gps)
minx, maxx = findMinMax(x)
miny, maxy = findMinMax(y)
for x in range(minx, maxx+1):
for y in range(miny, maxy+1):
p = self.gridToPoint((x, y))
if self.grid[y, x] == 1 and obj.containsPoint(p):
return False
return True
class IntersectionRegion(Region):
def __init__(self, *regions, orientation=None, sampler=None):
self.regions = tuple(regions)
if len(self.regions) < 2:
raise RuntimeError('tried to take intersection of fewer than 2 regions')
super().__init__('Intersection', *self.regions, orientation=orientation)
if sampler is None:
sampler = self.genericSampler
self.sampler = sampler
def sampleGiven(self, value):
regs = [value[reg] for reg in self.regions]
# Now that regions have been sampled, attempt intersection again in the hopes
# there is a specialized sampler to handle it (unless we already have one)
if self.sampler is self.genericSampler:
failed = False
intersection = regs[0]
for region in regs[1:]:
intersection = intersection.intersect(region)
if isinstance(intersection, IntersectionRegion):
failed = True
break
if not failed:
intersection.orientation = value[self.orientation]
return intersection
return IntersectionRegion(*regs, orientation=value[self.orientation],
sampler=self.sampler)
def evaluateInner(self, context):
regs = (valueInContext(reg, context) for reg in self.regions)
orientation = valueInContext(self.orientation, context)
return IntersectionRegion(*regs, orientation=orientation, sampler=self.sampler)
def containsPoint(self, point):
return all(region.containsPoint(point) for region in self.regions)
def uniformPointInner(self):
return self.orient(self.sampler(self))
@staticmethod
def genericSampler(intersection):
regs = intersection.regions
point = regs[0].uniformPointInner()
for region in regs[1:]:
if not region.containsPoint(point):
raise RejectionException(
f'sampling intersection of Regions {regs[0]} and {region}')
return point
def isEquivalentTo(self, other):
if type(other) is not IntersectionRegion:
return False
return (areEquivalent(set(other.regions), set(self.regions))
and other.orientation == self.orientation)
def __str__(self):
return f'IntersectionRegion({self.regions})'
| [
"random.triangular",
"scenic.core.vectors.OrientedVector",
"numpy.array",
"random.choices",
"scenic.core.lazy_eval.valueInContext",
"math.hypot",
"scenic.core.geometry.findMinMax",
"scenic.core.geometry.triangulatePolygon",
"scenic.core.type_support.toVector",
"scenic.core.geometry.hypot",
"scenic.core.vectors.Vector",
"numpy.where",
"scenic.core.geometry.polygonUnion",
"scenic.core.geometry.sin",
"scenic.core.utils.areEquivalent",
"scenic.core.geometry.headingOfSegment",
"random.uniform",
"random.choice",
"scenic.core.geometry.plotPolygon",
"scenic.core.distributions.RejectionException",
"scenic.core.geometry.cos",
"itertools.accumulate",
"scenic.core.geometry.averageVectors",
"random.random",
"scenic.core.distributions.needsSampling"
]
| [((739, 759), 'scenic.core.distributions.needsSampling', 'needsSampling', (['thing'], {}), '(thing)\n', (752, 759), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((3511, 3571), 'scenic.core.type_support.toVector', 'toVector', (['thing', '""""X in Y" with X not an Object or a vector"""'], {}), '(thing, \'"X in Y" with X not an Object or a vector\')\n', (3519, 3571), False, 'from scenic.core.type_support import toVector\n'), ((4507, 4551), 'scenic.core.distributions.RejectionException', 'RejectionException', (['f"""sampling empty Region"""'], {}), "(f'sampling empty Region')\n", (4525, 4551), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((5431, 5467), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.center', 'context'], {}), '(self.center, context)\n', (5445, 5467), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((5479, 5515), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.radius', 'context'], {}), '(self.radius, context)\n', (5493, 5515), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((5729, 5775), 'random.triangular', 'random.triangular', (['(0)', 'self.radius', 'self.radius'], {}), '(0, self.radius, self.radius)\n', (5746, 5775), False, 'import random\n'), ((5782, 5815), 'random.uniform', 'random.uniform', (['(-math.pi)', 'math.pi'], {}), '(-math.pi, math.pi)\n', (5796, 5815), False, 'import random\n'), ((7347, 7383), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.center', 'context'], {}), '(self.center, context)\n', (7361, 7383), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((7395, 7431), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.radius', 'context'], {}), '(self.radius, context)\n', (7409, 7431), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((7444, 7481), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.heading', 'context'], {}), '(self.heading, context)\n', (7458, 7481), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((7492, 7527), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.angle', 'context'], {}), '(self.angle, context)\n', (7506, 7527), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((7921, 7959), 'random.triangular', 'random.triangular', (['(0)', 'maxDist', 'maxDist'], {}), '(0, maxDist, maxDist)\n', (7938, 7959), False, 'import random\n'), ((8872, 8885), 'scenic.core.geometry.hypot', 'hypot', (['hw', 'hh'], {}), '(hw, hh)\n', (8877, 8885), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((9306, 9344), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.position', 'context'], {}), '(self.position, context)\n', (9320, 9344), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((9357, 9394), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.heading', 'context'], {}), '(self.heading, context)\n', (9371, 9394), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((9405, 9440), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.width', 'context'], {}), '(self.width, context)\n', (9419, 9440), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((9452, 9488), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.height', 'context'], {}), '(self.height, context)\n', (9466, 9488), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((9616, 9639), 'random.uniform', 'random.uniform', (['(-hw)', 'hw'], {}), '(-hw, hw)\n', (9630, 9639), False, 'import random\n'), ((9647, 9670), 'random.uniform', 'random.uniform', (['(-hh)', 'hh'], {}), '(-hh, hh)\n', (9661, 9670), False, 'import random\n'), ((9825, 9838), 'scenic.core.geometry.findMinMax', 'findMinMax', (['x'], {}), '(x)\n', (9835, 9838), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((9854, 9867), 'scenic.core.geometry.findMinMax', 'findMinMax', (['y'], {}), '(y)\n', (9864, 9867), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((12627, 12642), 'random.random', 'random.random', ([], {}), '()\n', (12640, 12642), False, 'import random\n'), ((12652, 12704), 'scenic.core.geometry.averageVectors', 'averageVectors', (['pointA', 'pointB'], {'weight': 'interpolation'}), '(pointA, pointB, weight=interpolation)\n', (12666, 12704), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((17273, 17308), 'scenic.core.geometry.polygonUnion', 'polygonUnion', (['(self.polygons, poly)'], {}), '((self.polygons, poly))\n', (17285, 17308), False, 'from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion\n'), ((17819, 17863), 'scenic.core.geometry.plotPolygon', 'plotPolygon', (['self.polygons', 'plt'], {'style': 'style'}), '(self.polygons, plt, style=style)\n', (17830, 17863), False, 'from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion\n'), ((21225, 21242), 'numpy.array', 'numpy.array', (['grid'], {}), '(grid)\n', (21236, 21242), False, 'import numpy\n'), ((21351, 21378), 'numpy.where', 'numpy.where', (['(self.grid == 0)'], {}), '(self.grid == 0)\n', (21362, 21378), False, 'import numpy\n'), ((22268, 22281), 'scenic.core.geometry.findMinMax', 'findMinMax', (['x'], {}), '(x)\n', (22278, 22281), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((22297, 22310), 'scenic.core.geometry.findMinMax', 'findMinMax', (['y'], {}), '(y)\n', (22307, 22310), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((23700, 23741), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['self.orientation', 'context'], {}), '(self.orientation, context)\n', (23714, 23741), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((2632, 2651), 'scenic.core.distributions.needsSampling', 'needsSampling', (['self'], {}), '(self)\n', (2645, 2651), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((3901, 3952), 'scenic.core.vectors.OrientedVector', 'OrientedVector', (['vec.x', 'vec.y', 'self.orientation[vec]'], {}), '(vec.x, vec.y, self.orientation[vec])\n', (3915, 3952), False, 'from scenic.core.vectors import Vector, OrientedVector, VectorDistribution\n'), ((6094, 6134), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.center', 'self.center'], {}), '(other.center, self.center)\n', (6107, 6134), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((6149, 6189), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.radius', 'self.radius'], {}), '(other.radius, self.radius)\n', (6162, 6189), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((6551, 6565), 'scenic.core.geometry.cos', 'cos', (['(angle / 2)'], {}), '(angle / 2)\n', (6554, 6565), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((7985, 8008), 'random.uniform', 'random.uniform', (['(-ha)', 'ha'], {}), '(-ha, ha)\n', (7999, 8008), False, 'import random\n'), ((8211, 8251), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.center', 'self.center'], {}), '(other.center, self.center)\n', (8224, 8251), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((8266, 8306), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.radius', 'self.radius'], {}), '(other.radius, self.radius)\n', (8279, 8306), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((8321, 8363), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.heading', 'self.heading'], {}), '(other.heading, self.heading)\n', (8334, 8363), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((8378, 8416), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.angle', 'self.angle'], {}), '(other.angle, self.angle)\n', (8391, 8416), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((9720, 9734), 'scenic.core.vectors.Vector', 'Vector', (['rx', 'ry'], {}), '(rx, ry)\n', (9726, 9734), False, 'from scenic.core.vectors import Vector, OrientedVector, VectorDistribution\n'), ((10010, 10054), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.position', 'self.position'], {}), '(other.position, self.position)\n', (10023, 10054), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((10069, 10111), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.heading', 'self.heading'], {}), '(other.heading, self.heading)\n', (10082, 10111), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((10126, 10164), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.width', 'self.width'], {}), '(other.width, self.width)\n', (10139, 10164), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((10179, 10219), 'scenic.core.utils.areEquivalent', 'areEquivalent', (['other.height', 'self.height'], {}), '(other.height, self.height)\n', (10192, 10219), False, 'from scenic.core.utils import cached, areEquivalent\n'), ((11746, 11764), 'math.hypot', 'math.hypot', (['dx', 'dy'], {}), '(dx, dy)\n', (11756, 11764), False, 'import math\n'), ((12506, 12571), 'random.choices', 'random.choices', (['self.segments'], {'cum_weights': 'self.cumulativeLengths'}), '(self.segments, cum_weights=self.cumulativeLengths)\n', (12520, 12571), False, 'import random\n'), ((15651, 15678), 'itertools.accumulate', 'itertools.accumulate', (['areas'], {}), '(areas)\n', (15671, 15678), False, 'import itertools\n'), ((15732, 15818), 'random.choices', 'random.choices', (['self.trianglesAndBounds'], {'cum_weights': 'self.cumulativeTriangleAreas'}), '(self.trianglesAndBounds, cum_weights=self.\n cumulativeTriangleAreas)\n', (15746, 15818), False, 'import random\n'), ((19147, 19167), 'scenic.core.distributions.needsSampling', 'needsSampling', (['point'], {}), '(point)\n', (19160, 19167), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((23630, 23658), 'scenic.core.lazy_eval.valueInContext', 'valueInContext', (['reg', 'context'], {}), '(reg, context)\n', (23644, 23658), False, 'from scenic.core.lazy_eval import valueInContext\n'), ((5119, 5145), 'scenic.core.distributions.needsSampling', 'needsSampling', (['self.center'], {}), '(self.center)\n', (5132, 5145), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((5149, 5175), 'scenic.core.distributions.needsSampling', 'needsSampling', (['self.radius'], {}), '(self.radius)\n', (5162, 5175), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((12767, 12799), 'scenic.core.geometry.headingOfSegment', 'headingOfSegment', (['pointA', 'pointB'], {}), '(pointA, pointB)\n', (12783, 12799), False, 'from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion\n'), ((12831, 12843), 'scenic.core.vectors.Vector', 'Vector', (['x', 'y'], {}), '(x, y)\n', (12837, 12843), False, 'from scenic.core.vectors import Vector, OrientedVector, VectorDistribution\n'), ((14492, 14512), 'scenic.core.distributions.needsSampling', 'needsSampling', (['point'], {}), '(point)\n', (14505, 14512), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((15414, 15441), 'scenic.core.geometry.triangulatePolygon', 'triangulatePolygon', (['polygon'], {}), '(polygon)\n', (15432, 15441), False, 'from scenic.core.geometry import headingOfSegment, triangulatePolygon, plotPolygon, polygonUnion\n'), ((15900, 15926), 'random.uniform', 'random.uniform', (['minx', 'maxx'], {}), '(minx, maxx)\n', (15914, 15926), False, 'import random\n'), ((15928, 15954), 'random.uniform', 'random.uniform', (['miny', 'maxy'], {}), '(miny, maxy)\n', (15942, 15954), False, 'import random\n'), ((19624, 19652), 'scenic.core.vectors.Vector', 'Vector', (['*self.kdTree.data[i]'], {}), '(*self.kdTree.data[i])\n', (19630, 19652), False, 'from scenic.core.vectors import Vector, OrientedVector, VectorDistribution\n'), ((19827, 19894), 'scenic.core.distributions.RejectionException', 'RejectionException', (['f"""empty intersection of Regions {self} and {o}"""'], {}), "(f'empty intersection of Regions {self} and {o}')\n", (19845, 19894), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((19917, 19944), 'random.choice', 'random.choice', (['intersection'], {}), '(intersection)\n', (19930, 19944), False, 'import random\n'), ((24193, 24271), 'scenic.core.distributions.RejectionException', 'RejectionException', (['f"""sampling intersection of Regions {regs[0]} and {region}"""'], {}), "(f'sampling intersection of Regions {regs[0]} and {region}')\n", (24211, 24271), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((5839, 5845), 'scenic.core.geometry.cos', 'cos', (['t'], {}), '(t)\n', (5842, 5845), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((5857, 5863), 'scenic.core.geometry.sin', 'sin', (['t'], {}), '(t)\n', (5860, 5863), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((6646, 6662), 'scenic.core.distributions.needsSampling', 'needsSampling', (['x'], {}), '(x)\n', (6659, 6662), False, 'from scenic.core.distributions import Samplable, RejectionException, needsSampling\n'), ((8060, 8066), 'scenic.core.geometry.cos', 'cos', (['t'], {}), '(t)\n', (8063, 8066), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((8078, 8084), 'scenic.core.geometry.sin', 'sin', (['t'], {}), '(t)\n', (8081, 8084), False, 'from scenic.core.geometry import sin, cos, hypot, findMinMax, pointIsInCone, averageVectors\n'), ((8982, 8997), 'scenic.core.vectors.Vector', 'Vector', (['*offset'], {}), '(*offset)\n', (8988, 8997), False, 'from scenic.core.vectors import Vector, OrientedVector, VectorDistribution\n'), ((16035, 16047), 'scenic.core.vectors.Vector', 'Vector', (['x', 'y'], {}), '(x, y)\n', (16041, 16047), False, 'from scenic.core.vectors import Vector, OrientedVector, VectorDistribution\n'), ((19439, 19465), 'random.choice', 'random.choice', (['self.points'], {}), '(self.points)\n', (19452, 19465), False, 'import random\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import logging
import time
import json
import click
import matplotlib.pyplot as plt
import orangery as o
from orangery.cli import defaults, util
from orangery.tools.plotting import get_scale_factor
@click.command(options_metavar='<options>')
@click.argument('file1', nargs=1, type=click.Path(exists=True), metavar='<file_t0>') # help="survey representing the initial condition"
@click.argument('file2', nargs=1, type=click.Path(exists=True), metavar='<file_t1>') # help="survey representing the final condition"
@click.argument('fields', nargs=1, metavar='<fields>') # help="character string identifying the columns"
@click.argument('xs_name', nargs=1, metavar='<name>') # help="name of the cross-section to plot"
@click.option('--codes', 'codes_f', nargs=1, type=click.Path(exists=True), metavar='<codes_file>', help="JSON file representing the usage intent of a set of survey codes")
@click.option('--show/--save', is_flag=True, default=True, help="Show the plot or save to files; --show is the default")
@click.option('--summary/--no-summary', default=True, help="Print summary information; --summary is the default")
@click.option('--units', type=click.Choice(['m','sft','ft']), default='m', help="Unit to show in axis labels")
@click.option('--labels', nargs=2, metavar='<text text>', help="Labels to display in the legend")
@click.option('--exaggeration', metavar='<int>', default=3, help="Vertical exaggeration of plot")
@click.option('--scale', nargs=2, metavar='<float int>', type=click.Tuple([float, int]), default=(10, 300), help="Scale where first argument is units per-inch on the horizontal axis and second argument is output DPI")
@click.option('--close/--no-close', default=True, help="Close the line ends; --close is the default")
@click.option('--reverse', type=click.Choice(['t0','t1','tx']), help="Reverse a line or lines of section (t0=initial, t1=final, tx=both)")
@click.option('--exclude', nargs=2, type=click.Tuple([str, click.Choice(['t0','t1','tx'])]), multiple=True, metavar='<str choice>', help="Exclude a survey code from a line or lines of section (t0=initial, t1=final, tx=both)")
@click.option('--overlay', nargs=1, type=click.Path(exists=True))
@click.option('-v', '--verbose', is_flag=True, help="Enables verbose mode")
def cutfill(file1, file2, fields, xs_name, codes_f, show, summary, units, labels, exaggeration, scale, close, reverse, exclude, overlay, verbose):
"""Displays a plot of a repeat survey with cut and fill.
\b
The cutfill subcommand takes four arguments:
<file_t0> : survey data representing the initial condition in csv format
<file_t1> : survey data representing the final condition in csv format
<fields> : series of characters describing the data columns
<name> : name of cross-section to plot
Options allow to set various properties of the plot. The default is to --show the plot.
With the --save option the plot will be saved as an image along with a csv file containing
data about cross-sectional cut-and-fill areas along the line of secion.
\b
Example:
orangery cutfill file_2004.csv file_2010.csv pxyzctr XS-7 --reverse t0
"""
if verbose is True:
loglevel = 2
else:
loglevel = 0
logging.basicConfig(stream=sys.stderr, level=loglevel or logging.INFO)
# load the configuration
codes = defaults.codes.copy()
if codes_f:
user_codes = util.load_config(codes_f)
codes.update(user_codes)
# load the survey data
s1 = o.Survey(file1, fields, codes, 0)
s2 = o.Survey(file2, fields, codes, 0)
if overlay:
s3 = o.Survey(overlay, fields, codes, 0)
exclude_t0 = []
exclude_t1 = []
for code in exclude:
if code[1] in ('t0', 'tx'):
exclude_t0.append(code[0])
if code[1] in ('t1', 'tx'):
exclude_t1.append(code[0])
# select a group of points, in this case a cross section
xs_pts1 = o.group(s1.data, s1.code_table, group=xs_name, exclude=exclude_t0)
xs_pts2 = o.group(s2.data, s2.code_table, group=xs_name, exclude=exclude_t1)
# xs_pts_overlay = o.group(s3.data, s3.code_table, group=xs_name)
# get the endpoints of the group
p1, p2 = o.endpoints(xs_pts1, reverse=reverse in ('t0','tx'))
# make the sections
xs1 = o.Section(xs_pts1, p1, p2, reverse=reverse in ('t0','tx'))
xs2 = o.Section(xs_pts2, p1, p2, reverse=reverse in ('t1','tx'))
# xs_overlay = o.Section(xs_pts_overlay, p1, p2)
if labels:
label_t0 = labels[0]
label_t1 = labels[1]
label_overlay = labels[3]
elif 't' in fields:
label_t0 = (xs1.data.iloc[0]['t']).split('T')[0]
label_t1 = (xs2.data.iloc[0]['t']).split('T')[0]
# label_overlay = (xs_overlay.data.iloc[0]['t']).split('T')[0]
else:
label_t0 = 't0'
label_t1 = 't1'
# label_overlay = 'pre-restoration'
# calculate the change
chg = o.Change(xs1, xs2, close_ends=close)
if summary:
chg.summarize()
import matplotlib
font = {'family':'normal','weight':'normal','size':16}
matplotlib.rc('font', **font)
# plot the change between two cross-sections
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_aspect(exaggeration)
# xs_overlay.plot(ax=ax, marker='None', linestyle='-', linewidth=3, color='tab:red', label=label_overlay)
xs1.plot(ax=ax, marker='o', markersize=4, markerfacecolor='white', markeredgecolor='black', linestyle='-', color='gray', label=label_t0)
xs2.plot(ax=ax, marker='o', markersize=4, markerfacecolor='black', markeredgecolor='black', linestyle='-', color='black', label=label_t1)
chg.polygon_plot(ax=ax, fill_label='Fill', cut_label='Cut')
chg.annotate_plot(ax=ax)
ax.set_xlabel('Distance ({0})'.format(units))
ax.set_ylabel('Elevation ({0}), {1}x exaggeration'.format(units, exaggeration))
plt.legend(loc='best')
plt.title('Cross-section {0}'.format(xs_name))
if show:
plt.show()
else:
fname = xs_name + '-' + label_t0.replace('-', '') + '-' + label_t1.replace('-', '')
scale_factor = get_scale_factor(fig, ax, scale[0])
dims = fig.get_size_inches()
fig.set_size_inches(dims[0]*scale_factor, dims[1]*scale_factor)
fig.savefig(fname+'.png', dpi=scale[1])
click.echo('Figure saved to: {}'.format(fname+'.png'))
chg.save(fname+'.csv')
click.echo('Data saved to: {}'.format(fname+'.csv'))
| [
"click.Choice",
"matplotlib.rc",
"orangery.cli.defaults.codes.copy",
"orangery.group",
"orangery.Survey",
"orangery.Change",
"click.option",
"orangery.endpoints",
"orangery.Section",
"click.command",
"click.argument",
"click.Tuple",
"orangery.tools.plotting.get_scale_factor",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"logging.basicConfig",
"matplotlib.pyplot.figure",
"orangery.cli.util.load_config",
"click.Path"
]
| [((261, 303), 'click.command', 'click.command', ([], {'options_metavar': '"""<options>"""'}), "(options_metavar='<options>')\n", (274, 303), False, 'import click\n'), ((575, 628), 'click.argument', 'click.argument', (['"""fields"""'], {'nargs': '(1)', 'metavar': '"""<fields>"""'}), "('fields', nargs=1, metavar='<fields>')\n", (589, 628), False, 'import click\n'), ((680, 732), 'click.argument', 'click.argument', (['"""xs_name"""'], {'nargs': '(1)', 'metavar': '"""<name>"""'}), "('xs_name', nargs=1, metavar='<name>')\n", (694, 732), False, 'import click\n'), ((949, 1073), 'click.option', 'click.option', (['"""--show/--save"""'], {'is_flag': '(True)', 'default': '(True)', 'help': '"""Show the plot or save to files; --show is the default"""'}), "('--show/--save', is_flag=True, default=True, help=\n 'Show the plot or save to files; --show is the default')\n", (961, 1073), False, 'import click\n'), ((1070, 1187), 'click.option', 'click.option', (['"""--summary/--no-summary"""'], {'default': '(True)', 'help': '"""Print summary information; --summary is the default"""'}), "('--summary/--no-summary', default=True, help=\n 'Print summary information; --summary is the default')\n", (1082, 1187), False, 'import click\n'), ((1295, 1396), 'click.option', 'click.option', (['"""--labels"""'], {'nargs': '(2)', 'metavar': '"""<text text>"""', 'help': '"""Labels to display in the legend"""'}), "('--labels', nargs=2, metavar='<text text>', help=\n 'Labels to display in the legend')\n", (1307, 1396), False, 'import click\n'), ((1393, 1494), 'click.option', 'click.option', (['"""--exaggeration"""'], {'metavar': '"""<int>"""', 'default': '(3)', 'help': '"""Vertical exaggeration of plot"""'}), "('--exaggeration', metavar='<int>', default=3, help=\n 'Vertical exaggeration of plot')\n", (1405, 1494), False, 'import click\n'), ((1709, 1814), 'click.option', 'click.option', (['"""--close/--no-close"""'], {'default': '(True)', 'help': '"""Close the line ends; --close is the default"""'}), "('--close/--no-close', default=True, help=\n 'Close the line ends; --close is the default')\n", (1721, 1814), False, 'import click\n'), ((2242, 2316), 'click.option', 'click.option', (['"""-v"""', '"""--verbose"""'], {'is_flag': '(True)', 'help': '"""Enables verbose mode"""'}), "('-v', '--verbose', is_flag=True, help='Enables verbose mode')\n", (2254, 2316), False, 'import click\n'), ((3291, 3361), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stderr', 'level': '(loglevel or logging.INFO)'}), '(stream=sys.stderr, level=loglevel or logging.INFO)\n', (3310, 3361), False, 'import logging\n'), ((3404, 3425), 'orangery.cli.defaults.codes.copy', 'defaults.codes.copy', ([], {}), '()\n', (3423, 3425), False, 'from orangery.cli import defaults, util\n'), ((3559, 3592), 'orangery.Survey', 'o.Survey', (['file1', 'fields', 'codes', '(0)'], {}), '(file1, fields, codes, 0)\n', (3567, 3592), True, 'import orangery as o\n'), ((3602, 3635), 'orangery.Survey', 'o.Survey', (['file2', 'fields', 'codes', '(0)'], {}), '(file2, fields, codes, 0)\n', (3610, 3635), True, 'import orangery as o\n'), ((3994, 4060), 'orangery.group', 'o.group', (['s1.data', 's1.code_table'], {'group': 'xs_name', 'exclude': 'exclude_t0'}), '(s1.data, s1.code_table, group=xs_name, exclude=exclude_t0)\n', (4001, 4060), True, 'import orangery as o\n'), ((4075, 4141), 'orangery.group', 'o.group', (['s2.data', 's2.code_table'], {'group': 'xs_name', 'exclude': 'exclude_t1'}), '(s2.data, s2.code_table, group=xs_name, exclude=exclude_t1)\n', (4082, 4141), True, 'import orangery as o\n'), ((4264, 4317), 'orangery.endpoints', 'o.endpoints', (['xs_pts1'], {'reverse': "(reverse in ('t0', 'tx'))"}), "(xs_pts1, reverse=reverse in ('t0', 'tx'))\n", (4275, 4317), True, 'import orangery as o\n'), ((4352, 4411), 'orangery.Section', 'o.Section', (['xs_pts1', 'p1', 'p2'], {'reverse': "(reverse in ('t0', 'tx'))"}), "(xs_pts1, p1, p2, reverse=reverse in ('t0', 'tx'))\n", (4361, 4411), True, 'import orangery as o\n'), ((4421, 4480), 'orangery.Section', 'o.Section', (['xs_pts2', 'p1', 'p2'], {'reverse': "(reverse in ('t1', 'tx'))"}), "(xs_pts2, p1, p2, reverse=reverse in ('t1', 'tx'))\n", (4430, 4480), True, 'import orangery as o\n'), ((4991, 5027), 'orangery.Change', 'o.Change', (['xs1', 'xs2'], {'close_ends': 'close'}), '(xs1, xs2, close_ends=close)\n', (4999, 5027), True, 'import orangery as o\n'), ((5154, 5183), 'matplotlib.rc', 'matplotlib.rc', (['"""font"""'], {}), "('font', **font)\n", (5167, 5183), False, 'import matplotlib\n'), ((5243, 5255), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5253, 5255), True, 'import matplotlib.pyplot as plt\n'), ((5943, 5965), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (5953, 5965), True, 'import matplotlib.pyplot as plt\n'), ((3463, 3488), 'orangery.cli.util.load_config', 'util.load_config', (['codes_f'], {}), '(codes_f)\n', (3479, 3488), False, 'from orangery.cli import defaults, util\n'), ((3666, 3701), 'orangery.Survey', 'o.Survey', (['overlay', 'fields', 'codes', '(0)'], {}), '(overlay, fields, codes, 0)\n', (3674, 3701), True, 'import orangery as o\n'), ((6039, 6049), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6047, 6049), True, 'import matplotlib.pyplot as plt\n'), ((6176, 6211), 'orangery.tools.plotting.get_scale_factor', 'get_scale_factor', (['fig', 'ax', 'scale[0]'], {}), '(fig, ax, scale[0])\n', (6192, 6211), False, 'from orangery.tools.plotting import get_scale_factor\n'), ((343, 366), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (353, 366), False, 'import click\n'), ((479, 502), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (489, 502), False, 'import click\n'), ((826, 849), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (836, 849), False, 'import click\n'), ((1213, 1245), 'click.Choice', 'click.Choice', (["['m', 'sft', 'ft']"], {}), "(['m', 'sft', 'ft'])\n", (1225, 1245), False, 'import click\n'), ((1552, 1577), 'click.Tuple', 'click.Tuple', (['[float, int]'], {}), '([float, int])\n', (1563, 1577), False, 'import click\n'), ((1842, 1874), 'click.Choice', 'click.Choice', (["['t0', 't1', 'tx']"], {}), "(['t0', 't1', 'tx'])\n", (1854, 1874), False, 'import click\n'), ((2216, 2239), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (2226, 2239), False, 'import click\n'), ((2008, 2040), 'click.Choice', 'click.Choice', (["['t0', 't1', 'tx']"], {}), "(['t0', 't1', 'tx'])\n", (2020, 2040), False, 'import click\n')] |
import os, sys
DICTIONARY_FILE = os.path.join(sys.prefix, 'dictionaries/ice_pron_dict_standard_clear.csv')
HEAD_FILE = os.path.join(sys.prefix, 'data/head_map.csv')
MODIFIER_FILE = os.path.join(sys.prefix, 'data/modifier_map.csv')
VOWELS_FILE = os.path.join(sys.prefix, 'data/vowels_sampa.txt')
CONS_CLUSTERS_FILE = os.path.join(sys.prefix, 'data/cons_clusters_sampa.txt')
def read_map(filename):
with open(filename) as f:
file_content = f.read().splitlines()
dict_map = {}
for line in file_content:
arr = line.split('\t')
if len(arr) > 1:
values = arr[1:]
else:
values = []
key = arr[0]
dict_map[key] = values
return dict_map
def read_dictionary(filename):
with open(filename) as f:
file_content = f.read().splitlines()
pronDict = {}
for line in file_content:
word, transcr = line.split('\t')
pronDict[word] = transcr
return pronDict
def read_list(filename):
with open(filename) as f:
file_content = f.read().splitlines()
return file_content
def get_head_map():
return read_map(HEAD_FILE)
def get_modifier_map():
return read_map(MODIFIER_FILE)
def get_dictionary():
return read_dictionary(DICTIONARY_FILE)
def get_vowels():
return read_list(VOWELS_FILE)
def get_cons_clusters():
return read_list(CONS_CLUSTERS_FILE)
| [
"os.path.join"
]
| [((33, 106), 'os.path.join', 'os.path.join', (['sys.prefix', '"""dictionaries/ice_pron_dict_standard_clear.csv"""'], {}), "(sys.prefix, 'dictionaries/ice_pron_dict_standard_clear.csv')\n", (45, 106), False, 'import os, sys\n'), ((119, 164), 'os.path.join', 'os.path.join', (['sys.prefix', '"""data/head_map.csv"""'], {}), "(sys.prefix, 'data/head_map.csv')\n", (131, 164), False, 'import os, sys\n'), ((181, 230), 'os.path.join', 'os.path.join', (['sys.prefix', '"""data/modifier_map.csv"""'], {}), "(sys.prefix, 'data/modifier_map.csv')\n", (193, 230), False, 'import os, sys\n'), ((245, 294), 'os.path.join', 'os.path.join', (['sys.prefix', '"""data/vowels_sampa.txt"""'], {}), "(sys.prefix, 'data/vowels_sampa.txt')\n", (257, 294), False, 'import os, sys\n'), ((316, 372), 'os.path.join', 'os.path.join', (['sys.prefix', '"""data/cons_clusters_sampa.txt"""'], {}), "(sys.prefix, 'data/cons_clusters_sampa.txt')\n", (328, 372), False, 'import os, sys\n')] |
from pylabelbuddy import _annotations_notebook
def test_annotations_notebook(root, annotations_mock, dataset_mock):
nb = _annotations_notebook.AnnotationsNotebook(
root, annotations_mock, dataset_mock
)
nb.change_database()
assert nb.notebook.index(nb.notebook.select()) == 2
nb.go_to_annotations()
assert nb.notebook.index(nb.notebook.select()) == 0
| [
"pylabelbuddy._annotations_notebook.AnnotationsNotebook"
]
| [((127, 206), 'pylabelbuddy._annotations_notebook.AnnotationsNotebook', '_annotations_notebook.AnnotationsNotebook', (['root', 'annotations_mock', 'dataset_mock'], {}), '(root, annotations_mock, dataset_mock)\n', (168, 206), False, 'from pylabelbuddy import _annotations_notebook\n')] |
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from mock import MagicMock
from requests import HTTPError
from datadog_checks.base import AgentCheck
from datadog_checks.dev.http import MockResponse
from .common import HARBOR_COMPONENTS, HARBOR_VERSION, VERSION_1_5, VERSION_1_6, VERSION_1_8
@pytest.mark.usefixtures("patch_requests")
def test_check_health(aggregator, harbor_check, harbor_api):
base_tags = ['tag1:val1', 'tag2']
harbor_check._check_health(harbor_api, base_tags)
if harbor_api.harbor_version >= VERSION_1_8:
components = HARBOR_COMPONENTS
for c in components:
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags + ['component:{}'.format(c)])
elif harbor_api.harbor_version >= VERSION_1_6:
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags + ['component:chartmuseum'])
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags)
elif harbor_api.harbor_version >= VERSION_1_5:
aggregator.assert_service_check('harbor.status', AgentCheck.OK, tags=base_tags)
else:
aggregator.assert_service_check('harbor.status', AgentCheck.UNKNOWN, tags=base_tags)
@pytest.mark.usefixtures("patch_requests")
def test_check_registries_health(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._check_registries_health(harbor_api, tags)
tags.append('registry:demo')
aggregator.assert_service_check('harbor.registry.status', AgentCheck.OK, tags=tags)
@pytest.mark.usefixtures("patch_requests")
def test_submit_project_metrics(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._submit_project_metrics(harbor_api, tags)
aggregator.assert_metric('harbor.projects.count', 2, tags=tags)
@pytest.mark.usefixtures("patch_requests")
def test_submit_disk_metrics(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._submit_disk_metrics(harbor_api, tags)
aggregator.assert_metric('harbor.disk.free', 5e5, tags=tags)
aggregator.assert_metric('harbor.disk.total', 1e6, tags=tags)
@pytest.mark.usefixtures("patch_requests")
@pytest.mark.skipif(HARBOR_VERSION < VERSION_1_5, reason="The registry.read_only metric is submitted for Harbor 1.5+")
def test_submit_read_only_status(aggregator, harbor_check, harbor_api):
tags = ['tag1:val1', 'tag2']
harbor_check._submit_read_only_status(harbor_api, tags)
aggregator.assert_metric('harbor.registry.read_only', 0, tags=tags)
def test_api__make_get_request(harbor_api):
harbor_api.http = MagicMock()
harbor_api.http.get = MagicMock(return_value=MockResponse(json_data={'json': True}))
assert harbor_api._make_get_request('{base_url}/api/path') == {"json": True}
harbor_api.http.get = MagicMock(return_value=MockResponse(status_code=500))
with pytest.raises(HTTPError):
harbor_api._make_get_request('{base_url}/api/path')
def test_api__make_paginated_get_request(harbor_api):
expected_result = [{'item': i} for i in range(20)]
paginated_result = [[expected_result[i], expected_result[i + 1]] for i in range(0, len(expected_result) - 1, 2)]
values = []
for r in paginated_result:
values.append(MockResponse(json_data=r, headers={'link': 'Link: <unused_url>; rel=next; type="text/plain"'}))
values[-1].headers.pop('link')
harbor_api.http = MagicMock()
harbor_api.http.get = MagicMock(side_effect=values)
assert harbor_api._make_paginated_get_request('{base_url}/api/path') == expected_result
def test_api__make_post_request(harbor_api):
harbor_api.http = MagicMock()
harbor_api.http.post = MagicMock(return_value=MockResponse(json_data={'json': True}))
assert harbor_api._make_post_request('{base_url}/api/path') == {"json": True}
harbor_api.http.post = MagicMock(return_value=MockResponse(status_code=500))
with pytest.raises(HTTPError):
harbor_api._make_post_request('{base_url}/api/path')
| [
"datadog_checks.dev.http.MockResponse",
"pytest.raises",
"pytest.mark.usefixtures",
"pytest.mark.skipif",
"mock.MagicMock"
]
| [((377, 418), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""patch_requests"""'], {}), "('patch_requests')\n", (400, 418), False, 'import pytest\n'), ((1311, 1352), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""patch_requests"""'], {}), "('patch_requests')\n", (1334, 1352), False, 'import pytest\n'), ((1642, 1683), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""patch_requests"""'], {}), "('patch_requests')\n", (1665, 1683), False, 'import pytest\n'), ((1918, 1959), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""patch_requests"""'], {}), "('patch_requests')\n", (1941, 1959), False, 'import pytest\n'), ((2251, 2292), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""patch_requests"""'], {}), "('patch_requests')\n", (2274, 2292), False, 'import pytest\n'), ((2294, 2416), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(HARBOR_VERSION < VERSION_1_5)'], {'reason': '"""The registry.read_only metric is submitted for Harbor 1.5+"""'}), "(HARBOR_VERSION < VERSION_1_5, reason=\n 'The registry.read_only metric is submitted for Harbor 1.5+')\n", (2312, 2416), False, 'import pytest\n'), ((2717, 2728), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (2726, 2728), False, 'from mock import MagicMock\n'), ((3526, 3537), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (3535, 3537), False, 'from mock import MagicMock\n'), ((3564, 3593), 'mock.MagicMock', 'MagicMock', ([], {'side_effect': 'values'}), '(side_effect=values)\n', (3573, 3593), False, 'from mock import MagicMock\n'), ((3756, 3767), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (3765, 3767), False, 'from mock import MagicMock\n'), ((2989, 3013), 'pytest.raises', 'pytest.raises', (['HTTPError'], {}), '(HTTPError)\n', (3002, 3013), False, 'import pytest\n'), ((4031, 4055), 'pytest.raises', 'pytest.raises', (['HTTPError'], {}), '(HTTPError)\n', (4044, 4055), False, 'import pytest\n'), ((2778, 2816), 'datadog_checks.dev.http.MockResponse', 'MockResponse', ([], {'json_data': "{'json': True}"}), "(json_data={'json': True})\n", (2790, 2816), False, 'from datadog_checks.dev.http import MockResponse\n'), ((2949, 2978), 'datadog_checks.dev.http.MockResponse', 'MockResponse', ([], {'status_code': '(500)'}), '(status_code=500)\n', (2961, 2978), False, 'from datadog_checks.dev.http import MockResponse\n'), ((3372, 3470), 'datadog_checks.dev.http.MockResponse', 'MockResponse', ([], {'json_data': 'r', 'headers': '{\'link\': \'Link: <unused_url>; rel=next; type="text/plain"\'}'}), '(json_data=r, headers={\'link\':\n \'Link: <unused_url>; rel=next; type="text/plain"\'})\n', (3384, 3470), False, 'from datadog_checks.dev.http import MockResponse\n'), ((3818, 3856), 'datadog_checks.dev.http.MockResponse', 'MockResponse', ([], {'json_data': "{'json': True}"}), "(json_data={'json': True})\n", (3830, 3856), False, 'from datadog_checks.dev.http import MockResponse\n'), ((3991, 4020), 'datadog_checks.dev.http.MockResponse', 'MockResponse', ([], {'status_code': '(500)'}), '(status_code=500)\n', (4003, 4020), False, 'from datadog_checks.dev.http import MockResponse\n')] |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vsm_dashboard.api import swift
from .utils import TestDataContainer
def data(TEST):
TEST.containers = TestDataContainer()
TEST.objects = TestDataContainer()
container_1 = swift.Container(dict(name=u"container_one\u6346"))
container_2 = swift.Container(dict(name=u"container_two\u6346"))
TEST.containers.add(container_1, container_2)
object_dict = {"name": u"test_object\u6346",
"content_type": u"text/plain",
"bytes": 128,
"last_modified": None,
"hash": u"object_hash"}
obj_dicts = [object_dict]
obj_data = "Fake Data"
for obj_dict in obj_dicts:
swift_object = swift.StorageObject(obj_dict,
container_1.name,
data=obj_data)
TEST.objects.add(swift_object)
| [
"vsm_dashboard.api.swift.StorageObject"
]
| [((1295, 1357), 'vsm_dashboard.api.swift.StorageObject', 'swift.StorageObject', (['obj_dict', 'container_1.name'], {'data': 'obj_data'}), '(obj_dict, container_1.name, data=obj_data)\n', (1314, 1357), False, 'from vsm_dashboard.api import swift\n')] |
# Copyright (C) 2013 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for all backup drivers."""
import abc
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from cinder.db import base
from cinder import exception
from cinder.i18n import _
from cinder import keymgr as key_manager
service_opts = [
cfg.IntOpt('backup_metadata_version', default=2,
help='Backup metadata version to be used when backing up '
'volume metadata. If this number is bumped, make sure the '
'service doing the restore supports the new version.'),
cfg.IntOpt('backup_object_number_per_notification',
default=10,
help='The number of chunks or objects, for which one '
'Ceilometer notification will be sent'),
cfg.IntOpt('backup_timer_interval',
default=120,
help='Interval, in seconds, between two progress notifications '
'reporting the backup status'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
LOG = logging.getLogger(__name__)
class BackupMetadataAPI(base.Base):
TYPE_TAG_VOL_BASE_META = 'volume-base-metadata'
TYPE_TAG_VOL_META = 'volume-metadata'
TYPE_TAG_VOL_GLANCE_META = 'volume-glance-metadata'
def __init__(self, context, db=None):
super(BackupMetadataAPI, self).__init__(db)
self.context = context
@staticmethod
def _is_serializable(value):
"""Returns True if value is serializable."""
try:
jsonutils.dumps(value)
except TypeError:
LOG.info("Value with type=%s is not serializable",
type(value))
return False
return True
def _save_vol_base_meta(self, container, volume_id):
"""Save base volume metadata to container.
This will fetch all fields from the db Volume object for volume_id and
save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_BASE_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for key, value in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(value):
LOG.info("Unable to serialize field '%s' - excluding "
"from backup", key)
continue
# Copy the encryption key UUID for backup
if key is 'encryption_key_id' and value is not None:
km = key_manager.API(CONF)
value = km.store(self.context, km.get(self.context, value))
LOG.debug("Copying encryption key UUID for backup.")
container[type_tag][key] = value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_meta(self, container, volume_id):
"""Save volume metadata to container.
This will fetch all fields from the db VolumeMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_META
LOG.debug("Getting metadata type '%s'", type_tag)
meta = self.db.volume_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(meta[entry]):
LOG.info("Unable to serialize field '%s' - excluding "
"from backup", entry)
continue
container[type_tag][entry] = meta[entry]
LOG.debug("Completed fetching metadata type '%s'", type_tag)
else:
LOG.debug("No metadata type '%s' available", type_tag)
def _save_vol_glance_meta(self, container, volume_id):
"""Save volume Glance metadata to container.
This will fetch all fields from the db VolumeGlanceMetadata object for
volume_id and save them in the provided container dictionary.
"""
type_tag = self.TYPE_TAG_VOL_GLANCE_META
LOG.debug("Getting metadata type '%s'", type_tag)
try:
meta = self.db.volume_glance_metadata_get(self.context, volume_id)
if meta:
container[type_tag] = {}
for entry in meta:
# Exclude fields that are "not JSON serializable"
if not self._is_serializable(entry.value):
LOG.info("Unable to serialize field '%s' - "
"excluding from backup", entry)
continue
container[type_tag][entry.key] = entry.value
LOG.debug("Completed fetching metadata type '%s'", type_tag)
except exception.GlanceMetadataNotFound:
LOG.debug("No metadata type '%s' available", type_tag)
@staticmethod
def _filter(metadata, fields, excludes=None):
"""Returns set of metadata restricted to required fields.
If fields is empty list, the full set is returned.
:param metadata: master set of metadata
:param fields: list of fields we want to extract
:param excludes: fields to be excluded
:returns: filtered metadata
"""
if not fields:
return metadata
if not excludes:
excludes = []
subset = {}
for field in fields:
if field in metadata and field not in excludes:
subset[field] = metadata[field]
else:
LOG.debug("Excluding field '%s'", field)
return subset
def _restore_vol_base_meta(self, metadata, volume_id, fields):
"""Restore values to Volume object for provided fields."""
LOG.debug("Restoring volume base metadata")
excludes = []
# Ignore unencrypted backups.
key = 'encryption_key_id'
if key in fields and key in metadata and metadata[key] is not None:
self._restore_vol_encryption_meta(volume_id,
metadata['volume_type_id'])
# NOTE(dosaboy): if the target volume looks like it was auto-created
# as part of this restore operation and we have a name to restore
# then apply the name to the target volume. However, if that target
# volume already existed and it has a name or we do not have a name to
# restore, then ignore this key. This is intended to be a less drastic
# solution than commit 7ee80f7.
key = 'display_name'
if key in fields and key in metadata:
target_vol = self.db.volume_get(self.context, volume_id)
name = target_vol.get(key, '')
if (not metadata.get(key) or name and
not name.startswith('restore_backup_')):
excludes.append(key)
excludes.append('display_description')
metadata = self._filter(metadata, fields, excludes=excludes)
self.db.volume_update(self.context, volume_id, metadata)
def _restore_vol_encryption_meta(self, volume_id, src_volume_type_id):
"""Restores the volume_type_id for encryption if needed.
Only allow restoration of an encrypted backup if the destination
volume has the same volume type as the source volume. Otherwise
encryption will not work. If volume types are already the same,
no action is needed.
"""
dest_vol = self.db.volume_get(self.context, volume_id)
if dest_vol['volume_type_id'] != src_volume_type_id:
LOG.debug("Volume type id's do not match.")
# If the volume types do not match, and the destination volume
# does not have a volume type, force the destination volume
# to have the encrypted volume type, provided it still exists.
if dest_vol['volume_type_id'] is None:
try:
self.db.volume_type_get(
self.context, src_volume_type_id)
except exception.VolumeTypeNotFound:
LOG.debug("Volume type of source volume has been "
"deleted. Encrypted backup restore has "
"failed.")
msg = _("The source volume type '%s' is not "
"available.") % (src_volume_type_id)
raise exception.EncryptedBackupOperationFailed(msg)
# Update dest volume with src volume's volume_type_id.
LOG.debug("The volume type of the destination volume "
"will become the volume type of the source "
"volume.")
self.db.volume_update(self.context, volume_id,
{'volume_type_id': src_volume_type_id})
else:
# Volume type id's do not match, and destination volume
# has a volume type. Throw exception.
LOG.warning("Destination volume type is different from "
"source volume type for an encrypted volume. "
"Encrypted backup restore has failed.")
msg = (_("The source volume type '%(src)s' is different "
"than the destination volume type '%(dest)s'.") %
{'src': src_volume_type_id,
'dest': dest_vol['volume_type_id']})
raise exception.EncryptedBackupOperationFailed(msg)
def _restore_vol_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeMetadata object for provided fields."""
LOG.debug("Restoring volume metadata")
metadata = self._filter(metadata, fields)
self.db.volume_metadata_update(self.context, volume_id, metadata, True)
def _restore_vol_glance_meta(self, metadata, volume_id, fields):
"""Restore values to VolumeGlanceMetadata object for provided fields.
First delete any existing metadata then save new values.
"""
LOG.debug("Restoring volume glance metadata")
metadata = self._filter(metadata, fields)
self.db.volume_glance_metadata_delete_by_volume(self.context,
volume_id)
for key, value in metadata.items():
self.db.volume_glance_metadata_create(self.context,
volume_id,
key, value)
# Now mark the volume as bootable
self.db.volume_update(self.context, volume_id,
{'bootable': True})
def _v1_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{<type tag>: (<restore function>, <fields list>)}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_BASE_META:
(self._restore_vol_base_meta,
['display_name', 'display_description']),
self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def _v2_restore_factory(self):
"""All metadata is backed up but we selectively restore.
Returns a dictionary of the form:
{<type tag>: (<restore function>, <fields list>)}
Empty field list indicates that all backed up fields should be
restored.
"""
return {self.TYPE_TAG_VOL_BASE_META:
(self._restore_vol_base_meta,
['display_name', 'display_description', 'encryption_key_id']),
self.TYPE_TAG_VOL_META:
(self._restore_vol_meta, []),
self.TYPE_TAG_VOL_GLANCE_META:
(self._restore_vol_glance_meta, [])}
def get(self, volume_id):
"""Get volume metadata.
Returns a json-encoded dict containing all metadata and the restore
version i.e. the version used to decide what actually gets restored
from this container when doing a backup restore.
"""
container = {'version': CONF.backup_metadata_version}
self._save_vol_base_meta(container, volume_id)
self._save_vol_meta(container, volume_id)
self._save_vol_glance_meta(container, volume_id)
if container:
return jsonutils.dumps(container)
else:
return None
def put(self, volume_id, json_metadata):
"""Restore volume metadata to a volume.
The json container should contain a version that is supported here.
"""
meta_container = jsonutils.loads(json_metadata)
version = meta_container['version']
if version == 1:
factory = self._v1_restore_factory()
elif version == 2:
factory = self._v2_restore_factory()
else:
msg = (_("Unsupported backup metadata version (%s)") % (version))
raise exception.BackupMetadataUnsupportedVersion(msg)
for type in factory:
func = factory[type][0]
fields = factory[type][1]
if type in meta_container:
func(meta_container[type], volume_id, fields)
else:
LOG.debug("No metadata of type '%s' to restore", type)
@six.add_metaclass(abc.ABCMeta)
class BackupDriver(base.Base):
def __init__(self, context, db=None):
super(BackupDriver, self).__init__(db)
self.context = context
self.backup_meta_api = BackupMetadataAPI(context, db)
# This flag indicates if backup driver supports force
# deletion. So it should be set to True if the driver that inherits
# from BackupDriver supports the force deletion function.
self.support_force_delete = False
def get_metadata(self, volume_id):
return self.backup_meta_api.get(volume_id)
def put_metadata(self, volume_id, json_metadata):
self.backup_meta_api.put(volume_id, json_metadata)
@abc.abstractmethod
def backup(self, backup, volume_file, backup_metadata=False):
"""Start a backup of a specified volume.
Some I/O operations may block greenthreads, so in order to prevent
starvation parameter volume_file will be a proxy that will execute all
methods in native threads, so the method implementation doesn't need to
worry about that..
"""
return
@abc.abstractmethod
def restore(self, backup, volume_id, volume_file):
"""Restore a saved backup.
Some I/O operations may block greenthreads, so in order to prevent
starvation parameter volume_file will be a proxy that will execute all
methods in native threads, so the method implementation doesn't need to
worry about that..
"""
return
@abc.abstractmethod
def delete_backup(self, backup):
"""Delete a saved backup."""
return
def export_record(self, backup):
"""Export driver specific backup record information.
If backup backend needs additional driver specific information to
import backup record back into the system it must overwrite this method
and return it here as a dictionary so it can be serialized into a
string.
Default backup driver implementation has no extra information.
:param backup: backup object to export
:returns: driver_info - dictionary with extra information
"""
return {}
def import_record(self, backup, driver_info):
"""Import driver specific backup record information.
If backup backend needs additional driver specific information to
import backup record back into the system it must overwrite this method
since it will be called with the extra information that was provided by
export_record when exporting the backup.
Default backup driver implementation does nothing since it didn't
export any specific data in export_record.
:param backup: backup object to export
:param driver_info: dictionary with driver specific backup record
information
:returns: nothing
"""
return
def check_for_setup_error(self):
"""Method for checking if backup backend is successfully installed."""
return
@six.add_metaclass(abc.ABCMeta)
class BackupDriverWithVerify(BackupDriver):
@abc.abstractmethod
def verify(self, backup):
"""Verify that the backup exists on the backend.
Verify that the backup is OK, possibly following an import record
operation.
:param backup: backup id of the backup to verify
:raises InvalidBackup, NotImplementedError:
"""
return
| [
"cinder.exception.EncryptedBackupOperationFailed",
"six.add_metaclass",
"oslo_config.cfg.IntOpt",
"cinder.i18n._",
"cinder.exception.BackupMetadataUnsupportedVersion",
"oslo_serialization.jsonutils.dumps",
"cinder.keymgr.API",
"oslo_serialization.jsonutils.loads",
"oslo_log.log.getLogger"
]
| [((1707, 1734), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1724, 1734), True, 'from oslo_log import log as logging\n'), ((14479, 14509), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (14496, 14509), False, 'import six\n'), ((17554, 17584), 'six.add_metaclass', 'six.add_metaclass', (['abc.ABCMeta'], {}), '(abc.ABCMeta)\n', (17571, 17584), False, 'import six\n'), ((955, 1181), 'oslo_config.cfg.IntOpt', 'cfg.IntOpt', (['"""backup_metadata_version"""'], {'default': '(2)', 'help': '"""Backup metadata version to be used when backing up volume metadata. If this number is bumped, make sure the service doing the restore supports the new version."""'}), "('backup_metadata_version', default=2, help=\n 'Backup metadata version to be used when backing up volume metadata. If this number is bumped, make sure the service doing the restore supports the new version.'\n )\n", (965, 1181), False, 'from oslo_config import cfg\n'), ((1238, 1403), 'oslo_config.cfg.IntOpt', 'cfg.IntOpt', (['"""backup_object_number_per_notification"""'], {'default': '(10)', 'help': '"""The number of chunks or objects, for which one Ceilometer notification will be sent"""'}), "('backup_object_number_per_notification', default=10, help=\n 'The number of chunks or objects, for which one Ceilometer notification will be sent'\n )\n", (1248, 1403), False, 'from oslo_config import cfg\n'), ((1452, 1603), 'oslo_config.cfg.IntOpt', 'cfg.IntOpt', (['"""backup_timer_interval"""'], {'default': '(120)', 'help': '"""Interval, in seconds, between two progress notifications reporting the backup status"""'}), "('backup_timer_interval', default=120, help=\n 'Interval, in seconds, between two progress notifications reporting the backup status'\n )\n", (1462, 1603), False, 'from oslo_config import cfg\n'), ((13799, 13829), 'oslo_serialization.jsonutils.loads', 'jsonutils.loads', (['json_metadata'], {}), '(json_metadata)\n', (13814, 13829), False, 'from oslo_serialization import jsonutils\n'), ((2180, 2202), 'oslo_serialization.jsonutils.dumps', 'jsonutils.dumps', (['value'], {}), '(value)\n', (2195, 2202), False, 'from oslo_serialization import jsonutils\n'), ((13526, 13552), 'oslo_serialization.jsonutils.dumps', 'jsonutils.dumps', (['container'], {}), '(container)\n', (13541, 13552), False, 'from oslo_serialization import jsonutils\n'), ((10457, 10502), 'cinder.exception.EncryptedBackupOperationFailed', 'exception.EncryptedBackupOperationFailed', (['msg'], {}), '(msg)\n', (10497, 10502), False, 'from cinder import exception\n'), ((14134, 14181), 'cinder.exception.BackupMetadataUnsupportedVersion', 'exception.BackupMetadataUnsupportedVersion', (['msg'], {}), '(msg)\n', (14176, 14181), False, 'from cinder import exception\n'), ((3307, 3328), 'cinder.keymgr.API', 'key_manager.API', (['CONF'], {}), '(CONF)\n', (3322, 3328), True, 'from cinder import keymgr as key_manager\n'), ((10197, 10297), 'cinder.i18n._', '_', (['"""The source volume type \'%(src)s\' is different than the destination volume type \'%(dest)s\'."""'], {}), '("The source volume type \'%(src)s\' is different than the destination volume type \'%(dest)s\'."\n )\n', (10198, 10297), False, 'from cinder.i18n import _\n'), ((14057, 14102), 'cinder.i18n._', '_', (['"""Unsupported backup metadata version (%s)"""'], {}), "('Unsupported backup metadata version (%s)')\n", (14058, 14102), False, 'from cinder.i18n import _\n'), ((9377, 9422), 'cinder.exception.EncryptedBackupOperationFailed', 'exception.EncryptedBackupOperationFailed', (['msg'], {}), '(msg)\n', (9417, 9422), False, 'from cinder import exception\n'), ((9246, 9296), 'cinder.i18n._', '_', (['"""The source volume type \'%s\' is not available."""'], {}), '("The source volume type \'%s\' is not available.")\n', (9247, 9296), False, 'from cinder.i18n import _\n')] |
import unittest
from QuerySciGraph import QuerySciGraph
class QuerySciGraphTestCase(unittest.TestCase):
def test_get_disont_ids_for_mesh_id(self):
disont_ids = QuerySciGraph.get_disont_ids_for_mesh_id('MESH:D005199')
known_ids = {'DOID:13636'}
self.assertSetEqual(disont_ids, known_ids)
def test_query_sub_phenotypes_for_phenotype(self):
sub_phenotypes = QuerySciGraph.query_sub_phenotypes_for_phenotype("HP:0000107") # Renal cyst
known_phenotypes = {'HP:0100877': 'Renal diverticulum',
'HP:0000108': 'Renal corticomedullary cysts',
'HP:0000803': 'Renal cortical cysts',
'HP:0000003': 'Multicystic kidney dysplasia',
'HP:0008659': 'Multiple small medullary renal cysts',
'HP:0005562': 'Multiple renal cysts',
'HP:0000800': 'Cystic renal dysplasia',
'HP:0012581': 'Solitary renal cyst'}
self.assertDictEqual(sub_phenotypes, known_phenotypes)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"QuerySciGraph.QuerySciGraph.query_sub_phenotypes_for_phenotype",
"QuerySciGraph.QuerySciGraph.get_disont_ids_for_mesh_id"
]
| [((1130, 1145), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1143, 1145), False, 'import unittest\n'), ((174, 230), 'QuerySciGraph.QuerySciGraph.get_disont_ids_for_mesh_id', 'QuerySciGraph.get_disont_ids_for_mesh_id', (['"""MESH:D005199"""'], {}), "('MESH:D005199')\n", (214, 230), False, 'from QuerySciGraph import QuerySciGraph\n'), ((398, 460), 'QuerySciGraph.QuerySciGraph.query_sub_phenotypes_for_phenotype', 'QuerySciGraph.query_sub_phenotypes_for_phenotype', (['"""HP:0000107"""'], {}), "('HP:0000107')\n", (446, 460), False, 'from QuerySciGraph import QuerySciGraph\n')] |
from typing import Any
from ledis import Ledis
from ledis.exceptions import InvalidUsage
class CLI:
__slots__ = {"ledis", "commands"}
def __init__(self):
self.ledis = Ledis()
self.commands = {
"set": self.ledis.set,
"get": self.ledis.get,
"sadd": self.ledis.sadd,
"srem": self.ledis.srem,
"smembers": self.ledis.smembers,
"sinter": self.ledis.sinter,
"keys": self.ledis.keys,
"del": self.ledis.delete,
"expire": self.ledis.expire,
"ttl": self.ledis.ttl,
"save": self.ledis.save,
"restore": self.ledis.restore,
}
def call(self, query: str) -> Any:
if " " in query:
command, data = query.split(" ", 1)
data = data.split()
else:
command = query
data = []
if command.lower() not in self.commands:
allowed_commands = ", ".join(key.upper() for key in self.commands)
raise InvalidUsage(
f"Command '{command}' is invalid. "
f"Allowed commands are {allowed_commands}."
)
try:
return self.commands[command.lower()](*data)
except TypeError:
raise InvalidUsage("Invalid command format")
| [
"ledis.Ledis",
"ledis.exceptions.InvalidUsage"
]
| [((187, 194), 'ledis.Ledis', 'Ledis', ([], {}), '()\n', (192, 194), False, 'from ledis import Ledis\n'), ((1048, 1147), 'ledis.exceptions.InvalidUsage', 'InvalidUsage', (['f"""Command \'{command}\' is invalid. Allowed commands are {allowed_commands}."""'], {}), '(\n f"Command \'{command}\' is invalid. Allowed commands are {allowed_commands}."\n )\n', (1060, 1147), False, 'from ledis.exceptions import InvalidUsage\n'), ((1303, 1341), 'ledis.exceptions.InvalidUsage', 'InvalidUsage', (['"""Invalid command format"""'], {}), "('Invalid command format')\n", (1315, 1341), False, 'from ledis.exceptions import InvalidUsage\n')] |
#group 1: Question 1(b)
# A control system for positioning the head of a laser printer has the closed loop transfer function:
# !pip install control
import matplotlib.pyplot as plt
import control
a=10 #Value for a
b=50 #value for b
sys1 = control.tf(20*b,[1,20+a,b+20*a,20*b])
print('3rd order system transfer function T1(s)=',sys1)
sys2=control.tf(b,[1,a,b])
print('2nd order system transfer funtion T2(s)',sys2)
value = sys1.pole()
list_of_poles = [pole.round(2) for pole in value]
print('poles',list_of_poles)
y1=control.step_response(sys1)
y2=control.step_response(sys2)
plt.plot(y1[0],y1[1],'r--', label='3rd order actual system')
plt.plot(y2[0],y2[1],'g', label='2nd order approximation system')
plt.legend()
plt.grid()
plt.xlabel('time (s)')
plt.ylabel('step response y(t)')
plt.title('step response comparison of 3rd and 2nd order system')
plt.show()
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.title",
"control.tf",
"control.step_response",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
]
| [((244, 295), 'control.tf', 'control.tf', (['(20 * b)', '[1, 20 + a, b + 20 * a, 20 * b]'], {}), '(20 * b, [1, 20 + a, b + 20 * a, 20 * b])\n', (254, 295), False, 'import control\n'), ((343, 367), 'control.tf', 'control.tf', (['b', '[1, a, b]'], {}), '(b, [1, a, b])\n', (353, 367), False, 'import control\n'), ((523, 550), 'control.step_response', 'control.step_response', (['sys1'], {}), '(sys1)\n', (544, 550), False, 'import control\n'), ((554, 581), 'control.step_response', 'control.step_response', (['sys2'], {}), '(sys2)\n', (575, 581), False, 'import control\n'), ((582, 644), 'matplotlib.pyplot.plot', 'plt.plot', (['y1[0]', 'y1[1]', '"""r--"""'], {'label': '"""3rd order actual system"""'}), "(y1[0], y1[1], 'r--', label='3rd order actual system')\n", (590, 644), True, 'import matplotlib.pyplot as plt\n'), ((643, 710), 'matplotlib.pyplot.plot', 'plt.plot', (['y2[0]', 'y2[1]', '"""g"""'], {'label': '"""2nd order approximation system"""'}), "(y2[0], y2[1], 'g', label='2nd order approximation system')\n", (651, 710), True, 'import matplotlib.pyplot as plt\n'), ((709, 721), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (719, 721), True, 'import matplotlib.pyplot as plt\n'), ((722, 732), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (730, 732), True, 'import matplotlib.pyplot as plt\n'), ((733, 755), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (s)"""'], {}), "('time (s)')\n", (743, 755), True, 'import matplotlib.pyplot as plt\n'), ((756, 788), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""step response y(t)"""'], {}), "('step response y(t)')\n", (766, 788), True, 'import matplotlib.pyplot as plt\n'), ((789, 854), 'matplotlib.pyplot.title', 'plt.title', (['"""step response comparison of 3rd and 2nd order system"""'], {}), "('step response comparison of 3rd and 2nd order system')\n", (798, 854), True, 'import matplotlib.pyplot as plt\n'), ((855, 865), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (863, 865), True, 'import matplotlib.pyplot as plt\n')] |
import re
from precise_bbcode.bbcode.tag import BBCodeTag
from precise_bbcode.tag_pool import tag_pool
color_re = re.compile(r'^([a-z]+|#[0-9abcdefABCDEF]{3,6})$')
class SubTag(BBCodeTag):
name = 'sub'
def render(self, value, option=None, parent=None):
return '<sub>%s</sub>' % value
class PreTag(BBCodeTag):
name = 'pre'
render_embedded = False
def render(self, value, option=None, parent=None):
return '<pre>%s</pre>' % value
class SizeTag(BBCodeTag):
name = 'size'
definition_string = '[size={RANGE=4,7}]{TEXT}[/size]'
format_string = '<span style="font-size:{RANGE=4,7}px;">{TEXT}</span>'
class FruitTag(BBCodeTag):
name = 'fruit'
definition_string = '[fruit]{CHOICE=tomato,orange,apple}[/fruit]'
format_string = '<h5>{CHOICE=tomato,orange,apple}</h5>'
class PhoneLinkTag(BBCodeTag):
name = 'phone'
definition_string = '[phone]{PHONENUMBER}[/phone]'
format_string = '<a href="tel:{PHONENUMBER}">{PHONENUMBER}</a>'
def render(self, value, option=None, parent=None):
href = 'tel:{}'.format(value)
return '<a href="{0}">{0}</a>'.format(href, value)
class StartsWithATag(BBCodeTag):
name = 'startswitha'
definition_string = '[startswitha]{STARTSWITH=a}[/startswitha]'
format_string = '<span>{STARTSWITH=a}</span>'
class RoundedBBCodeTag(BBCodeTag):
name = 'rounded'
class Options:
strip = False
def render(self, value, option=None, parent=None):
if option and re.search(color_re, option) is not None:
return '<div class="rounded" style="border-color:{};">{}</div>'.format(option, value)
return '<div class="rounded">{}</div>'.format(value)
tag_pool.register_tag(SubTag)
tag_pool.register_tag(PreTag)
tag_pool.register_tag(SizeTag)
tag_pool.register_tag(FruitTag)
tag_pool.register_tag(PhoneLinkTag)
tag_pool.register_tag(StartsWithATag)
tag_pool.register_tag(RoundedBBCodeTag)
| [
"precise_bbcode.tag_pool.tag_pool.register_tag",
"re.search",
"re.compile"
]
| [((117, 165), 're.compile', 're.compile', (['"""^([a-z]+|#[0-9abcdefABCDEF]{3,6})$"""'], {}), "('^([a-z]+|#[0-9abcdefABCDEF]{3,6})$')\n", (127, 165), False, 'import re\n'), ((1716, 1745), 'precise_bbcode.tag_pool.tag_pool.register_tag', 'tag_pool.register_tag', (['SubTag'], {}), '(SubTag)\n', (1737, 1745), False, 'from precise_bbcode.tag_pool import tag_pool\n'), ((1746, 1775), 'precise_bbcode.tag_pool.tag_pool.register_tag', 'tag_pool.register_tag', (['PreTag'], {}), '(PreTag)\n', (1767, 1775), False, 'from precise_bbcode.tag_pool import tag_pool\n'), ((1776, 1806), 'precise_bbcode.tag_pool.tag_pool.register_tag', 'tag_pool.register_tag', (['SizeTag'], {}), '(SizeTag)\n', (1797, 1806), False, 'from precise_bbcode.tag_pool import tag_pool\n'), ((1807, 1838), 'precise_bbcode.tag_pool.tag_pool.register_tag', 'tag_pool.register_tag', (['FruitTag'], {}), '(FruitTag)\n', (1828, 1838), False, 'from precise_bbcode.tag_pool import tag_pool\n'), ((1839, 1874), 'precise_bbcode.tag_pool.tag_pool.register_tag', 'tag_pool.register_tag', (['PhoneLinkTag'], {}), '(PhoneLinkTag)\n', (1860, 1874), False, 'from precise_bbcode.tag_pool import tag_pool\n'), ((1875, 1912), 'precise_bbcode.tag_pool.tag_pool.register_tag', 'tag_pool.register_tag', (['StartsWithATag'], {}), '(StartsWithATag)\n', (1896, 1912), False, 'from precise_bbcode.tag_pool import tag_pool\n'), ((1913, 1952), 'precise_bbcode.tag_pool.tag_pool.register_tag', 'tag_pool.register_tag', (['RoundedBBCodeTag'], {}), '(RoundedBBCodeTag)\n', (1934, 1952), False, 'from precise_bbcode.tag_pool import tag_pool\n'), ((1514, 1541), 're.search', 're.search', (['color_re', 'option'], {}), '(color_re, option)\n', (1523, 1541), False, 'import re\n')] |
## Program: VMTK
## Language: Python
## Date: January 10, 2018
## Version: 1.4
## Copyright (c) <NAME>, <NAME>, All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this code was contributed by
## <NAME> (Github @rlizzo)
## University at Buffalo
import pytest
import vmtk.vmtksurfacescaling as scaling
def test_isotropic_scale(aorta_surface, compare_surfaces):
name = __name__ + '_test_isotropic_scale.vtp'
scaler = scaling.vmtkSurfaceScaling()
scaler.Surface = aorta_surface
scaler.ScaleFactor = 2
scaler.Execute()
assert compare_surfaces(scaler.Surface, name, tolerance=1.0) == True
@pytest.mark.parametrize('xfactor,yfactor,zfactor,paramid', [
(2, None, None, '0'),
(None, 2, None, '1'),
(None, None, 2, '2'),
(2, 2, None, '3'),
(2, None, 2, '4'),
(None, 2, 2, '5'),
])
def test_xyz_scale_factors(aorta_surface, compare_surfaces, xfactor,
yfactor, zfactor, paramid):
name = __name__ + '_test_xyz_scale_factors_' + paramid + '.vtp'
scaler = scaling.vmtkSurfaceScaling()
scaler.Surface = aorta_surface
scaler.ScaleFactorX = xfactor
scaler.ScaleFactorY = yfactor
scaler.ScaleFactorZ = zfactor
scaler.Execute()
assert compare_surfaces(scaler.Surface, name, tolerance=1.0) == True
| [
"pytest.mark.parametrize",
"vmtk.vmtksurfacescaling.vmtkSurfaceScaling"
]
| [((873, 1064), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""xfactor,yfactor,zfactor,paramid"""', "[(2, None, None, '0'), (None, 2, None, '1'), (None, None, 2, '2'), (2, 2,\n None, '3'), (2, None, 2, '4'), (None, 2, 2, '5')]"], {}), "('xfactor,yfactor,zfactor,paramid', [(2, None, None,\n '0'), (None, 2, None, '1'), (None, None, 2, '2'), (2, 2, None, '3'), (2,\n None, 2, '4'), (None, 2, 2, '5')])\n", (896, 1064), False, 'import pytest\n'), ((684, 712), 'vmtk.vmtksurfacescaling.vmtkSurfaceScaling', 'scaling.vmtkSurfaceScaling', ([], {}), '()\n', (710, 712), True, 'import vmtk.vmtksurfacescaling as scaling\n'), ((1289, 1317), 'vmtk.vmtksurfacescaling.vmtkSurfaceScaling', 'scaling.vmtkSurfaceScaling', ([], {}), '()\n', (1315, 1317), True, 'import vmtk.vmtksurfacescaling as scaling\n')] |
#!/usr/bin/env python
"""Distribution functions
This module provides functions for dealing with normal distributions
and generating error maps.
When called directly as main, it allows for converting a threshold map
into an error map.
```
$ python -m mlcsim.dist --help
usage: dist.py [-h] [-b {1,2,3,4}] -f F [-o O]
options:
-h, --help show this help message and exit
-b {1,2,3,4} bits per cell
-f F Threshold map json to convert
-o O output to file
```
"""
import argparse
import json
from pprint import pprint
from typing import Dict, List
import numpy as np
from scipy import stats as ss # type: ignore
# https://stackoverflow.com/a/32574638/9047818
# https://stackoverflow.com/a/13072714/9047818
def normalMidpoint(mean_a: float, mean_b: float, std_a: float, std_b: float) -> float:
"""Find the midpoint between two normal distributions
Args:
mean_a (float): Mean of first distribution
mean_b (float): Mean of second distribution
std_a (float): Std dev of first distribution
std_b (float): Std dev of second distribution
Returns:
float: Midpoint between distributions
"""
a = 1 / (2 * std_a**2) - 1 / (2 * std_b**2)
b = mean_b / (std_b**2) - mean_a / (std_a**2)
c = (
mean_a**2 / (2 * std_a**2)
- mean_b**2 / (2 * std_b**2)
- np.log(std_b / std_a)
)
roots = np.roots([a, b, c])
masked = np.ma.masked_outside(roots, mean_a, mean_b)
return float(masked[~masked.mask][0][0])
# https://www.askpython.com/python/normal-distribution
def normalChance(mean: float, stdev: float, thr: float) -> float:
"""Find the chance of a normal distribution above/below a given value
Args:
mean (float): Mean of the distribution
stdev (float): Std dev of the distribution
thr (float): Threshold to check above/below
Returns:
float: Chance for threshold to end up above/below the given point in the distribution
"""
chance = ss.norm(loc=mean, scale=stdev).cdf(thr)
return float(chance if mean > thr else 1 - chance)
def genErrorMap(thr_maps: Dict[str, List[List[float]]], bpc: int) -> List[List[float]]:
"""Generate an error map from a threshold map
Args:
thr_maps (dict): Threshold map
bpc (int): Bits per cell
Raises:
ValueError: if the given bpc is not in the threshold map
Returns:
list: Error map from the threshold map
"""
if str(bpc) not in thr_maps.keys():
raise ValueError(f"Threshold map does not have values for {bpc} levels")
thr_map: List[List[float]] = thr_maps[str(bpc)]
err_map = [[0.0]]
for i in range(len(thr_map) - 1):
mid = normalMidpoint(
thr_map[i][0], thr_map[i + 1][0], thr_map[i][1], thr_map[i + 1][1]
)
up = normalChance(thr_map[i][0], thr_map[i][1], mid)
dn = normalChance(thr_map[i + 1][0], thr_map[i + 1][1], mid)
err_map[i].append(up)
err_map.append([dn])
err_map[-1].append(0.0)
return err_map
def _main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-b", type=int, default=2, choices=range(1, 5), help="bits per cell"
)
parser.add_argument("-f", required=True, help="Threshold map json to convert")
parser.add_argument("-o", type=str, help="output to file")
args = parser.parse_args()
with open(args.f) as f:
thr_map = json.load(f)
err_map = genErrorMap(thr_map, args.b)
if args.o:
with open(args.o, "w") as f:
json.dump(err_map, f)
else:
pprint(err_map)
if __name__ == "__main__":
_main()
| [
"numpy.ma.masked_outside",
"argparse.ArgumentParser",
"scipy.stats.norm",
"numpy.log",
"numpy.roots",
"json.load",
"pprint.pprint",
"json.dump"
]
| [((1404, 1423), 'numpy.roots', 'np.roots', (['[a, b, c]'], {}), '([a, b, c])\n', (1412, 1423), True, 'import numpy as np\n'), ((1437, 1480), 'numpy.ma.masked_outside', 'np.ma.masked_outside', (['roots', 'mean_a', 'mean_b'], {}), '(roots, mean_a, mean_b)\n', (1457, 1480), True, 'import numpy as np\n'), ((3098, 3123), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3121, 3123), False, 'import argparse\n'), ((1364, 1385), 'numpy.log', 'np.log', (['(std_b / std_a)'], {}), '(std_b / std_a)\n', (1370, 1385), True, 'import numpy as np\n'), ((3458, 3470), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3467, 3470), False, 'import json\n'), ((3620, 3635), 'pprint.pprint', 'pprint', (['err_map'], {}), '(err_map)\n', (3626, 3635), False, 'from pprint import pprint\n'), ((2013, 2043), 'scipy.stats.norm', 'ss.norm', ([], {'loc': 'mean', 'scale': 'stdev'}), '(loc=mean, scale=stdev)\n', (2020, 2043), True, 'from scipy import stats as ss\n'), ((3580, 3601), 'json.dump', 'json.dump', (['err_map', 'f'], {}), '(err_map, f)\n', (3589, 3601), False, 'import json\n')] |
import pytest
from playhouse.test_utils import assert_query_count
from data.registry_model import registry_model
from data.database import Manifest
from endpoints.api.test.shared import conduct_api_call
from endpoints.test.shared import client_with_identity
from endpoints.api.tag import RepositoryTag, RestoreTag, ListRepositoryTags
from test.fixtures import *
@pytest.mark.parametrize(
"expiration_time, expected_status",
[
(None, 201),
("aksdjhasd", 400),
],
)
def test_change_tag_expiration_default(expiration_time, expected_status, client, app):
with client_with_identity("devtable", client) as cl:
params = {
"repository": "devtable/simple",
"tag": "latest",
}
request_body = {
"expiration": expiration_time,
}
conduct_api_call(cl, RepositoryTag, "put", params, request_body, expected_status)
def test_change_tag_expiration(client, app):
with client_with_identity("devtable", client) as cl:
params = {
"repository": "devtable/simple",
"tag": "latest",
}
repo_ref = registry_model.lookup_repository("devtable", "simple")
tag = registry_model.get_repo_tag(repo_ref, "latest")
updated_expiration = tag.lifetime_start_ts + 60 * 60 * 24
request_body = {
"expiration": updated_expiration,
}
conduct_api_call(cl, RepositoryTag, "put", params, request_body, 201)
tag = registry_model.get_repo_tag(repo_ref, "latest")
assert tag.lifetime_end_ts == updated_expiration
@pytest.mark.parametrize(
"manifest_exists,test_tag,expected_status",
[
(True, "-INVALID-TAG-NAME", 400),
(True, ".INVALID-TAG-NAME", 400),
(
True,
"INVALID-TAG_NAME-BECAUSE-THIS-IS-WAY-WAY-TOO-LOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOONG",
400,
),
(False, "newtag", 404),
(True, "generatemanifestfail", None),
(True, "latest", 201),
(True, "newtag", 201),
],
)
def test_move_tag(manifest_exists, test_tag, expected_status, client, app):
with client_with_identity("devtable", client) as cl:
test_image = "unknown"
if manifest_exists:
repo_ref = registry_model.lookup_repository("devtable", "simple")
tag_ref = registry_model.get_repo_tag(repo_ref, "latest")
assert tag_ref
test_image = tag_ref.manifest.digest
params = {"repository": "devtable/simple", "tag": test_tag}
request_body = {"manifest_digest": test_image}
if expected_status is None:
with pytest.raises(Exception):
conduct_api_call(cl, RepositoryTag, "put", params, request_body, expected_status)
else:
conduct_api_call(cl, RepositoryTag, "put", params, request_body, expected_status)
@pytest.mark.parametrize(
"repo_namespace, repo_name, query_count",
[
("devtable", "simple", 4),
("devtable", "history", 4),
("devtable", "complex", 4),
("devtable", "gargantuan", 4),
("buynlarge", "orgrepo", 6), # +2 for permissions checks.
("buynlarge", "anotherorgrepo", 6), # +2 for permissions checks.
],
)
def test_list_repo_tags(repo_namespace, repo_name, client, query_count, app):
# Pre-cache media type loads to ensure consistent query count.
Manifest.media_type.get_name(1)
params = {"repository": repo_namespace + "/" + repo_name}
with client_with_identity("devtable", client) as cl:
with assert_query_count(query_count):
tags = conduct_api_call(cl, ListRepositoryTags, "get", params).json["tags"]
repo_ref = registry_model.lookup_repository(repo_namespace, repo_name)
history, _ = registry_model.list_repository_tag_history(repo_ref)
assert len(tags) == len(history)
| [
"data.registry_model.registry_model.list_repository_tag_history",
"playhouse.test_utils.assert_query_count",
"data.registry_model.registry_model.get_repo_tag",
"data.registry_model.registry_model.lookup_repository",
"pytest.mark.parametrize",
"endpoints.test.shared.client_with_identity",
"pytest.raises",
"data.database.Manifest.media_type.get_name",
"endpoints.api.test.shared.conduct_api_call"
]
| [((369, 468), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""expiration_time, expected_status"""', "[(None, 201), ('aksdjhasd', 400)]"], {}), "('expiration_time, expected_status', [(None, 201), (\n 'aksdjhasd', 400)])\n", (392, 468), False, 'import pytest\n'), ((1609, 2020), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""manifest_exists,test_tag,expected_status"""', "[(True, '-INVALID-TAG-NAME', 400), (True, '.INVALID-TAG-NAME', 400), (True,\n 'INVALID-TAG_NAME-BECAUSE-THIS-IS-WAY-WAY-TOO-LOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOONG'\n , 400), (False, 'newtag', 404), (True, 'generatemanifestfail', None), (\n True, 'latest', 201), (True, 'newtag', 201)]"], {}), "('manifest_exists,test_tag,expected_status', [(True,\n '-INVALID-TAG-NAME', 400), (True, '.INVALID-TAG-NAME', 400), (True,\n 'INVALID-TAG_NAME-BECAUSE-THIS-IS-WAY-WAY-TOO-LOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOONG'\n , 400), (False, 'newtag', 404), (True, 'generatemanifestfail', None), (\n True, 'latest', 201), (True, 'newtag', 201)])\n", (1632, 2020), False, 'import pytest\n'), ((2953, 3213), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""repo_namespace, repo_name, query_count"""', "[('devtable', 'simple', 4), ('devtable', 'history', 4), ('devtable',\n 'complex', 4), ('devtable', 'gargantuan', 4), ('buynlarge', 'orgrepo', \n 6), ('buynlarge', 'anotherorgrepo', 6)]"], {}), "('repo_namespace, repo_name, query_count', [(\n 'devtable', 'simple', 4), ('devtable', 'history', 4), ('devtable',\n 'complex', 4), ('devtable', 'gargantuan', 4), ('buynlarge', 'orgrepo', \n 6), ('buynlarge', 'anotherorgrepo', 6)])\n", (2976, 3213), False, 'import pytest\n'), ((3475, 3506), 'data.database.Manifest.media_type.get_name', 'Manifest.media_type.get_name', (['(1)'], {}), '(1)\n', (3503, 3506), False, 'from data.database import Manifest\n'), ((594, 634), 'endpoints.test.shared.client_with_identity', 'client_with_identity', (['"""devtable"""', 'client'], {}), "('devtable', client)\n", (614, 634), False, 'from endpoints.test.shared import client_with_identity\n'), ((833, 918), 'endpoints.api.test.shared.conduct_api_call', 'conduct_api_call', (['cl', 'RepositoryTag', '"""put"""', 'params', 'request_body', 'expected_status'], {}), "(cl, RepositoryTag, 'put', params, request_body,\n expected_status)\n", (849, 918), False, 'from endpoints.api.test.shared import conduct_api_call\n'), ((971, 1011), 'endpoints.test.shared.client_with_identity', 'client_with_identity', (['"""devtable"""', 'client'], {}), "('devtable', client)\n", (991, 1011), False, 'from endpoints.test.shared import client_with_identity\n'), ((1142, 1196), 'data.registry_model.registry_model.lookup_repository', 'registry_model.lookup_repository', (['"""devtable"""', '"""simple"""'], {}), "('devtable', 'simple')\n", (1174, 1196), False, 'from data.registry_model import registry_model\n'), ((1211, 1258), 'data.registry_model.registry_model.get_repo_tag', 'registry_model.get_repo_tag', (['repo_ref', '"""latest"""'], {}), "(repo_ref, 'latest')\n", (1238, 1258), False, 'from data.registry_model import registry_model\n'), ((1417, 1486), 'endpoints.api.test.shared.conduct_api_call', 'conduct_api_call', (['cl', 'RepositoryTag', '"""put"""', 'params', 'request_body', '(201)'], {}), "(cl, RepositoryTag, 'put', params, request_body, 201)\n", (1433, 1486), False, 'from endpoints.api.test.shared import conduct_api_call\n'), ((1501, 1548), 'data.registry_model.registry_model.get_repo_tag', 'registry_model.get_repo_tag', (['repo_ref', '"""latest"""'], {}), "(repo_ref, 'latest')\n", (1528, 1548), False, 'from data.registry_model import registry_model\n'), ((2209, 2249), 'endpoints.test.shared.client_with_identity', 'client_with_identity', (['"""devtable"""', 'client'], {}), "('devtable', client)\n", (2229, 2249), False, 'from endpoints.test.shared import client_with_identity\n'), ((3579, 3619), 'endpoints.test.shared.client_with_identity', 'client_with_identity', (['"""devtable"""', 'client'], {}), "('devtable', client)\n", (3599, 3619), False, 'from endpoints.test.shared import client_with_identity\n'), ((3781, 3840), 'data.registry_model.registry_model.lookup_repository', 'registry_model.lookup_repository', (['repo_namespace', 'repo_name'], {}), '(repo_namespace, repo_name)\n', (3813, 3840), False, 'from data.registry_model import registry_model\n'), ((3862, 3914), 'data.registry_model.registry_model.list_repository_tag_history', 'registry_model.list_repository_tag_history', (['repo_ref'], {}), '(repo_ref)\n', (3904, 3914), False, 'from data.registry_model import registry_model\n'), ((2339, 2393), 'data.registry_model.registry_model.lookup_repository', 'registry_model.lookup_repository', (['"""devtable"""', '"""simple"""'], {}), "('devtable', 'simple')\n", (2371, 2393), False, 'from data.registry_model import registry_model\n'), ((2416, 2463), 'data.registry_model.registry_model.get_repo_tag', 'registry_model.get_repo_tag', (['repo_ref', '"""latest"""'], {}), "(repo_ref, 'latest')\n", (2443, 2463), False, 'from data.registry_model import registry_model\n'), ((2868, 2953), 'endpoints.api.test.shared.conduct_api_call', 'conduct_api_call', (['cl', 'RepositoryTag', '"""put"""', 'params', 'request_body', 'expected_status'], {}), "(cl, RepositoryTag, 'put', params, request_body,\n expected_status)\n", (2884, 2953), False, 'from endpoints.api.test.shared import conduct_api_call\n'), ((3640, 3671), 'playhouse.test_utils.assert_query_count', 'assert_query_count', (['query_count'], {}), '(query_count)\n', (3658, 3671), False, 'from playhouse.test_utils import assert_query_count\n'), ((2718, 2742), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2731, 2742), False, 'import pytest\n'), ((2760, 2845), 'endpoints.api.test.shared.conduct_api_call', 'conduct_api_call', (['cl', 'RepositoryTag', '"""put"""', 'params', 'request_body', 'expected_status'], {}), "(cl, RepositoryTag, 'put', params, request_body,\n expected_status)\n", (2776, 2845), False, 'from endpoints.api.test.shared import conduct_api_call\n'), ((3692, 3747), 'endpoints.api.test.shared.conduct_api_call', 'conduct_api_call', (['cl', 'ListRepositoryTags', '"""get"""', 'params'], {}), "(cl, ListRepositoryTags, 'get', params)\n", (3708, 3747), False, 'from endpoints.api.test.shared import conduct_api_call\n')] |
import json
import os
import random
import requests
from passlib.hash import pbkdf2_sha256 as pbk
from PyQt5.QtSql import QSqlDatabase, QSqlQuery
from pprint import pprint
ENCODING = 'utf-8'
DB_PATH = os.path.join(os.path.curdir, 'inventory.db')
def scrambleWord(word):
"""Randomize the letters in word and return the resulting string."""
word_list = list(word)
random.shuffle(word_list)
word = ''.join(word_list)
return word
def generateItems():
"""Generate a dictionary of retail products and store the data in items.json.
Pulls a list of items and artificially doubles it with scrambled item names.
Each item is given a random PLU, UPC, and department number.
Each dictionary key is the item's PLU.
"""
response = requests.get('https://www.randomlists.com/data/things.json')
json_data = response.json()
items = json_data['RandL']['items']
#double sample size by scrambling item names
scrambled_list = []
for item in items:
scrambled_item = scrambleWord(item)
scrambled_list.append(scrambled_item)
items = items + scrambled_list
data = {}
for item in items:
random.seed(item)
upc = random.randint(100000000000, 999999999999)
plu = random.randint(1000, 9999999)
department = (plu % 7) + 1
print('UPC:{0} | PLU:{1} | Item:{2} | D{3}'.format(upc, plu, item, department))
if plu in data:
print('Duplicate found: {}'.format(plu))
continue
data[plu] = {'upc':upc, 'department':department, 'model':item}
with open('items.json', 'w') as f:
json.dump(data, f)
def generatePO():
"""Create dumby Purchase Orders and store them in pos.json.
Each PO is asigned one random vendor and department number,
along with a random length list of items belonging to said department.
Returns: True if items.json successfully opens, False otherwise.
"""
try:
with open('items.json', 'r') as f:
items_dict = json.load(f)
except FileNotFoundError:
return False
vendors = ['Dyson', 'Ingrammicro', 'LKG', 'Inland', 'Sandisk', 'Seagate', 'Hasbro', 'Mattel',\
'Gear Head', 'Logitech', 'NTE', 'Dell', 'Microsoft', 'Right Stuff', 'Alliance', 'Energizer']
po_dict = {}
for i in range(50):
po_num = 24000000 + random.randint(1, 999999)
if po_num in po_dict:
continue
po_dict[po_num] = {'department': (po_num % 7) + 1, 'items': {}, 'vendor': random.choice(vendors)}
for key in items_dict:
match_found = False
loops = 0
while not match_found:
loops += 1
if loops > 200:
print('\n\nToo many loops.\n\n')
break
po, department = random.choice(list(po_dict.items()))
department = department['department']
print('PO department: {}'.format(department))
print('item plu: {} department: {}'.format(key, items_dict[key]['department']))
if items_dict[key]['department'] == department:
max_count = random.randint(1, 20)
po_dict[po]['items'][key] = max_count
match_found = True
with open('pos.json', 'w') as f:
json.dump(po_dict, f)
return True
def fillDB():
"""Create a database and populate two tables(named items and purchase_order).
The 'items' and 'purchase_order' tables are populated with the data from items.json
and pos.json respectively.
"""
with open('items.json') as f:
data = json.load(f)
db = QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName(DB_PATH)
if not db.open():
print('DB could not be opened')
error = QSqlDatabase.lastError()
print(error.text())
return False
query = QSqlQuery()
if query.exec_("drop table items"):
print('successfully dropped table')
else:
print('unsuccessfully dropped table')
print(query.lastError().text())
if query.exec_("create table items(plu int primary key, upc varchar(12) unique, "
"model varchar(20), department int)"):
print('success')
else:
print('failure')
print(query.lastError().text())
for key in data:
if query.exec_("insert into items values({}, '{}', '{}', {})".format(key, data[key]['upc'],
data[key]['model'], data[key]['department'])):
print("values({}, {}, {}, {}) successfully inserted.".format(key, data[key]['upc'], data[key]['model'], data[key]['department']))
else:
print("values({}, {}, {}, {}) unsuccessfully inserted.".format(key, data[key]['upc'], data[key]['model'], data[key]['department']))
print(query.lastError().text())
with open('pos.json') as f:
po_dict = json.load(f)
if query.exec_("drop table purchase_order"):
print('successfully dropped table')
else:
print('unsuccessfully dropped table')
print(query.lastError().text())
if query.exec_("create table purchase_order(po int primary key, vendor varchar(30), "
"department int, items blob)"):
print('success')
else:
print('failure')
print(query.lastError().text())
for key in po_dict:
item_string = json.dumps(po_dict[key]['items'])
item_blob = item_string.encode(ENCODING)
if query.exec_("insert into purchase_order values({}, '{}', {}, '{}')"\
.format(key, po_dict[key]['vendor'], po_dict[key]['department'], item_string)):
print("values({}, {}, {}, {}) successfully inserted."\
.format(key, po_dict[key]['vendor'], po_dict[key]['department'], item_string))
else:
print("values({}, {}, {}, {}) unsuccessfully inserted."\
.format(key, po_dict[key]['vendor'], po_dict[key]['department'], item_blob))
print(query.lastError().text())
def createEmployeeTable():
db = QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName(DB_PATH)
if not db.open():
print('DB could not be opened')
error = QSqlDatabase.lastError()
print(error.text())
return False
query = QSqlQuery()
if not query.exec_("drop table employee"):
print(query.lastError().text())
if not query.exec_("create table employee(id int primary key, first_name varchar(10), "\
"last_name varchar(10), posistion int, pass_hash varchar(200))"):
print(query.lastError().text())
if not query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(162973, 'Jon', 'Michie', 2, pbk.hash('Michie'))):
print(query.lastError().text())
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(131901, 'Ben', 'Terry', 3, pbk.hash('Terry')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(150697, 'Daniel', 'Silva', 2, pbk.hash('Silva')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(68412, 'James', 'Hutchetson', 2, pbk.hash('Hutchetson')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(161844, 'MacKenly', 'Gamble', 1, pbk.hash('Gamble')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(141047, 'George', 'Huston', 1, pbk.hash('Huston')))
query.exec_("insert into employee values({}, '{}', '{}', {}, '{}')".\
format(46045, 'Arthur', 'Art', 1, pbk.hash('Art')))
def testHashVerification(name):
db = QSqlDatabase.addDatabase('QSQLITE')
db.setDatabaseName(DB_PATH)
if not db.open():
print('DB could not be opened')
error = QSqlDatabase.lastError()
print(error.text())
return False
query = QSqlQuery()
if not query.exec_("select pass_hash from employee where last_name = '{}'".format(name)):
print(query.lastError().text())
elif not query.next():
print('Table values not found')
else:
pass_hash = query.value(0)
if pbk.verify(name, pass_hash):
print('It\'s a match!')
else:
print('Match not found.')
if __name__ == '__main__':
generateItems()
generatePO()
fillDB()
createEmployeeTable()
testHashVerification('Terry')
| [
"PyQt5.QtSql.QSqlQuery",
"random.choice",
"passlib.hash.pbkdf2_sha256.hash",
"random.shuffle",
"passlib.hash.pbkdf2_sha256.verify",
"json.dumps",
"os.path.join",
"requests.get",
"random.seed",
"PyQt5.QtSql.QSqlDatabase.lastError",
"PyQt5.QtSql.QSqlDatabase.addDatabase",
"json.load",
"random.randint",
"json.dump"
]
| [((203, 247), 'os.path.join', 'os.path.join', (['os.path.curdir', '"""inventory.db"""'], {}), "(os.path.curdir, 'inventory.db')\n", (215, 247), False, 'import os\n'), ((378, 403), 'random.shuffle', 'random.shuffle', (['word_list'], {}), '(word_list)\n', (392, 403), False, 'import random\n'), ((768, 828), 'requests.get', 'requests.get', (['"""https://www.randomlists.com/data/things.json"""'], {}), "('https://www.randomlists.com/data/things.json')\n", (780, 828), False, 'import requests\n'), ((3632, 3667), 'PyQt5.QtSql.QSqlDatabase.addDatabase', 'QSqlDatabase.addDatabase', (['"""QSQLITE"""'], {}), "('QSQLITE')\n", (3656, 3667), False, 'from PyQt5.QtSql import QSqlDatabase, QSqlQuery\n'), ((3865, 3876), 'PyQt5.QtSql.QSqlQuery', 'QSqlQuery', ([], {}), '()\n', (3874, 3876), False, 'from PyQt5.QtSql import QSqlDatabase, QSqlQuery\n'), ((6111, 6146), 'PyQt5.QtSql.QSqlDatabase.addDatabase', 'QSqlDatabase.addDatabase', (['"""QSQLITE"""'], {}), "('QSQLITE')\n", (6135, 6146), False, 'from PyQt5.QtSql import QSqlDatabase, QSqlQuery\n'), ((6343, 6354), 'PyQt5.QtSql.QSqlQuery', 'QSqlQuery', ([], {}), '()\n', (6352, 6354), False, 'from PyQt5.QtSql import QSqlDatabase, QSqlQuery\n'), ((7782, 7817), 'PyQt5.QtSql.QSqlDatabase.addDatabase', 'QSqlDatabase.addDatabase', (['"""QSQLITE"""'], {}), "('QSQLITE')\n", (7806, 7817), False, 'from PyQt5.QtSql import QSqlDatabase, QSqlQuery\n'), ((8014, 8025), 'PyQt5.QtSql.QSqlQuery', 'QSqlQuery', ([], {}), '()\n', (8023, 8025), False, 'from PyQt5.QtSql import QSqlDatabase, QSqlQuery\n'), ((1170, 1187), 'random.seed', 'random.seed', (['item'], {}), '(item)\n', (1181, 1187), False, 'import random\n'), ((1202, 1244), 'random.randint', 'random.randint', (['(100000000000)', '(999999999999)'], {}), '(100000000000, 999999999999)\n', (1216, 1244), False, 'import random\n'), ((1259, 1288), 'random.randint', 'random.randint', (['(1000)', '(9999999)'], {}), '(1000, 9999999)\n', (1273, 1288), False, 'import random\n'), ((1631, 1649), 'json.dump', 'json.dump', (['data', 'f'], {}), '(data, f)\n', (1640, 1649), False, 'import json\n'), ((3296, 3317), 'json.dump', 'json.dump', (['po_dict', 'f'], {}), '(po_dict, f)\n', (3305, 3317), False, 'import json\n'), ((3609, 3621), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3618, 3621), False, 'import json\n'), ((3778, 3802), 'PyQt5.QtSql.QSqlDatabase.lastError', 'QSqlDatabase.lastError', ([], {}), '()\n', (3800, 3802), False, 'from PyQt5.QtSql import QSqlDatabase, QSqlQuery\n'), ((4931, 4943), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4940, 4943), False, 'import json\n'), ((5421, 5454), 'json.dumps', 'json.dumps', (["po_dict[key]['items']"], {}), "(po_dict[key]['items'])\n", (5431, 5454), False, 'import json\n'), ((6257, 6281), 'PyQt5.QtSql.QSqlDatabase.lastError', 'QSqlDatabase.lastError', ([], {}), '()\n', (6279, 6281), False, 'from PyQt5.QtSql import QSqlDatabase, QSqlQuery\n'), ((7928, 7952), 'PyQt5.QtSql.QSqlDatabase.lastError', 'QSqlDatabase.lastError', ([], {}), '()\n', (7950, 7952), False, 'from PyQt5.QtSql import QSqlDatabase, QSqlQuery\n'), ((2029, 2041), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2038, 2041), False, 'import json\n'), ((2375, 2400), 'random.randint', 'random.randint', (['(1)', '(999999)'], {}), '(1, 999999)\n', (2389, 2400), False, 'import random\n'), ((2534, 2556), 'random.choice', 'random.choice', (['vendors'], {}), '(vendors)\n', (2547, 2556), False, 'import random\n'), ((6975, 6992), 'passlib.hash.pbkdf2_sha256.hash', 'pbk.hash', (['"""Terry"""'], {}), "('Terry')\n", (6983, 6992), True, 'from passlib.hash import pbkdf2_sha256 as pbk\n'), ((7122, 7139), 'passlib.hash.pbkdf2_sha256.hash', 'pbk.hash', (['"""Silva"""'], {}), "('Silva')\n", (7130, 7139), True, 'from passlib.hash import pbkdf2_sha256 as pbk\n'), ((7272, 7294), 'passlib.hash.pbkdf2_sha256.hash', 'pbk.hash', (['"""Hutchetson"""'], {}), "('Hutchetson')\n", (7280, 7294), True, 'from passlib.hash import pbkdf2_sha256 as pbk\n'), ((7427, 7445), 'passlib.hash.pbkdf2_sha256.hash', 'pbk.hash', (['"""Gamble"""'], {}), "('Gamble')\n", (7435, 7445), True, 'from passlib.hash import pbkdf2_sha256 as pbk\n'), ((7576, 7594), 'passlib.hash.pbkdf2_sha256.hash', 'pbk.hash', (['"""Huston"""'], {}), "('Huston')\n", (7584, 7594), True, 'from passlib.hash import pbkdf2_sha256 as pbk\n'), ((7721, 7736), 'passlib.hash.pbkdf2_sha256.hash', 'pbk.hash', (['"""Art"""'], {}), "('Art')\n", (7729, 7736), True, 'from passlib.hash import pbkdf2_sha256 as pbk\n'), ((8284, 8311), 'passlib.hash.pbkdf2_sha256.verify', 'pbk.verify', (['name', 'pass_hash'], {}), '(name, pass_hash)\n', (8294, 8311), True, 'from passlib.hash import pbkdf2_sha256 as pbk\n'), ((3139, 3160), 'random.randint', 'random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (3153, 3160), False, 'import random\n'), ((6789, 6807), 'passlib.hash.pbkdf2_sha256.hash', 'pbk.hash', (['"""Michie"""'], {}), "('Michie')\n", (6797, 6807), True, 'from passlib.hash import pbkdf2_sha256 as pbk\n')] |
import requests
from flask import abort, redirect, request, url_for
from lnurl import LnurlWithdrawResponse, handle as handle_lnurl
from lnurl.exceptions import LnurlException
from time import sleep
from lnbits.core import core_app
from lnbits.helpers import Status
from lnbits.settings import WALLET
from ..crud import create_account, get_user, create_wallet, create_payment
@core_app.route("/lnurlwallet")
def lnurlwallet():
memo = "LNbits LNURL funding"
try:
withdraw_res = handle_lnurl(request.args.get("lightning"), response_class=LnurlWithdrawResponse)
except LnurlException:
abort(Status.INTERNAL_SERVER_ERROR, "Could not process withdraw LNURL.")
try:
ok, checking_id, payment_request, error_message = WALLET.create_invoice(withdraw_res.max_sats, memo)
except Exception as e:
ok, error_message = False, str(e)
if not ok:
abort(Status.INTERNAL_SERVER_ERROR, error_message)
r = requests.get(
withdraw_res.callback.base,
params={**withdraw_res.callback.query_params, **{"k1": withdraw_res.k1, "pr": payment_request}},
)
if not r.ok:
abort(Status.INTERNAL_SERVER_ERROR, "Could not process withdraw LNURL.")
for i in range(10):
invoice_status = WALLET.get_invoice_status(checking_id)
sleep(i)
if not invoice_status.paid:
continue
break
user = get_user(create_account().id)
wallet = create_wallet(user_id=user.id)
create_payment(
wallet_id=wallet.id,
checking_id=checking_id,
amount=withdraw_res.max_sats * 1000,
memo=memo,
pending=invoice_status.pending,
)
return redirect(url_for("core.wallet", usr=user.id, wal=wallet.id))
| [
"flask.request.args.get",
"lnbits.settings.WALLET.get_invoice_status",
"requests.get",
"time.sleep",
"flask.url_for",
"flask.abort",
"lnbits.core.core_app.route",
"lnbits.settings.WALLET.create_invoice"
]
| [((382, 412), 'lnbits.core.core_app.route', 'core_app.route', (['"""/lnurlwallet"""'], {}), "('/lnurlwallet')\n", (396, 412), False, 'from lnbits.core import core_app\n'), ((961, 1103), 'requests.get', 'requests.get', (['withdraw_res.callback.base'], {'params': "{**withdraw_res.callback.query_params, **{'k1': withdraw_res.k1, 'pr':\n payment_request}}"}), "(withdraw_res.callback.base, params={**withdraw_res.callback.\n query_params, **{'k1': withdraw_res.k1, 'pr': payment_request}})\n", (973, 1103), False, 'import requests\n'), ((757, 807), 'lnbits.settings.WALLET.create_invoice', 'WALLET.create_invoice', (['withdraw_res.max_sats', 'memo'], {}), '(withdraw_res.max_sats, memo)\n', (778, 807), False, 'from lnbits.settings import WALLET\n'), ((901, 951), 'flask.abort', 'abort', (['Status.INTERNAL_SERVER_ERROR', 'error_message'], {}), '(Status.INTERNAL_SERVER_ERROR, error_message)\n', (906, 951), False, 'from flask import abort, redirect, request, url_for\n'), ((1148, 1220), 'flask.abort', 'abort', (['Status.INTERNAL_SERVER_ERROR', '"""Could not process withdraw LNURL."""'], {}), "(Status.INTERNAL_SERVER_ERROR, 'Could not process withdraw LNURL.')\n", (1153, 1220), False, 'from flask import abort, redirect, request, url_for\n'), ((1271, 1309), 'lnbits.settings.WALLET.get_invoice_status', 'WALLET.get_invoice_status', (['checking_id'], {}), '(checking_id)\n', (1296, 1309), False, 'from lnbits.settings import WALLET\n'), ((1318, 1326), 'time.sleep', 'sleep', (['i'], {}), '(i)\n', (1323, 1326), False, 'from time import sleep\n'), ((1697, 1747), 'flask.url_for', 'url_for', (['"""core.wallet"""'], {'usr': 'user.id', 'wal': 'wallet.id'}), "('core.wallet', usr=user.id, wal=wallet.id)\n", (1704, 1747), False, 'from flask import abort, redirect, request, url_for\n'), ((512, 541), 'flask.request.args.get', 'request.args.get', (['"""lightning"""'], {}), "('lightning')\n", (528, 541), False, 'from flask import abort, redirect, request, url_for\n'), ((616, 688), 'flask.abort', 'abort', (['Status.INTERNAL_SERVER_ERROR', '"""Could not process withdraw LNURL."""'], {}), "(Status.INTERNAL_SERVER_ERROR, 'Could not process withdraw LNURL.')\n", (621, 688), False, 'from flask import abort, redirect, request, url_for\n')] |
# Import libraries
import argparse
from azureml.core import Run
import joblib
import json
import os
import pandas as pd
import shutil
# Import functions from train.py
from train import split_data, train_model, get_model_metrics
# Get the output folder for the model from the '--output_folder' parameter
parser = argparse.ArgumentParser()
parser.add_argument('--output_folder', type=str, dest='output_folder', default="outputs")
args = parser.parse_args()
print(args)
output_folder = args.output_folder
# Get the experiment run context
run = Run.get_context()
# load the safe driver prediction dataset
train_df = pd.read_csv('porto_seguro_safe_driver_prediction_input.csv')
# Load the parameters for training the model from the file
with open("parameters.json") as f:
pars = json.load(f)
parameters = pars["training"]
# Log each of the parameters to the run
for param_name, param_value in parameters.items():
run.log(param_name, param_value)
# Call the functions defined in this file
train_data, valid_data = split_data(train_df)
data = [train_data, valid_data]
model = train_model(data, parameters)
# Print the resulting metrics for the model
model_metrics = get_model_metrics(model, data)
print(model_metrics)
for k, v in model_metrics.items():
run.log(k, v)
# Save the trained model to the output folder
os.makedirs(output_folder, exist_ok=True)
output_path = output_folder + "/porto_seguro_safe_driver_model.pkl"
joblib.dump(value=model, filename=output_path)
run.complete()
| [
"train.train_model",
"train.split_data",
"argparse.ArgumentParser",
"pandas.read_csv",
"os.makedirs",
"azureml.core.Run.get_context",
"train.get_model_metrics",
"json.load",
"joblib.dump"
]
| [((314, 339), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (337, 339), False, 'import argparse\n'), ((546, 563), 'azureml.core.Run.get_context', 'Run.get_context', ([], {}), '()\n', (561, 563), False, 'from azureml.core import Run\n'), ((618, 678), 'pandas.read_csv', 'pd.read_csv', (['"""porto_seguro_safe_driver_prediction_input.csv"""'], {}), "('porto_seguro_safe_driver_prediction_input.csv')\n", (629, 678), True, 'import pandas as pd\n'), ((1029, 1049), 'train.split_data', 'split_data', (['train_df'], {}), '(train_df)\n', (1039, 1049), False, 'from train import split_data, train_model, get_model_metrics\n'), ((1090, 1119), 'train.train_model', 'train_model', (['data', 'parameters'], {}), '(data, parameters)\n', (1101, 1119), False, 'from train import split_data, train_model, get_model_metrics\n'), ((1181, 1211), 'train.get_model_metrics', 'get_model_metrics', (['model', 'data'], {}), '(model, data)\n', (1198, 1211), False, 'from train import split_data, train_model, get_model_metrics\n'), ((1334, 1375), 'os.makedirs', 'os.makedirs', (['output_folder'], {'exist_ok': '(True)'}), '(output_folder, exist_ok=True)\n', (1345, 1375), False, 'import os\n'), ((1444, 1490), 'joblib.dump', 'joblib.dump', ([], {'value': 'model', 'filename': 'output_path'}), '(value=model, filename=output_path)\n', (1455, 1490), False, 'import joblib\n'), ((785, 797), 'json.load', 'json.load', (['f'], {}), '(f)\n', (794, 797), False, 'import json\n')] |
from lua_imports import lua_importer
lua_importer.register()
| [
"lua_imports.lua_importer.register"
]
| [((38, 61), 'lua_imports.lua_importer.register', 'lua_importer.register', ([], {}), '()\n', (59, 61), False, 'from lua_imports import lua_importer\n')] |
from mock_gripper_op import MockGripType
from std_msgs.msg import Bool
from erdos.op import Op
from erdos.data_stream import DataStream
from erdos.message import Message
class MockGraspObjectOperator(Op):
"""
Sends a "close" action to the gripper.
"""
gripper_stream = "gripper-output-stream"
action_complete_stream_name = "grasp-action-complete-stream"
def __init__(self, name):
"""
Initializes a lock which blocks future actions to be sent until the
past actions are completed.
"""
super(MockGraspObjectOperator, self).__init__(name)
self.move_ahead_lock = True
@staticmethod
def setup_streams(input_streams, trigger_stream_name, gripper_stream_name):
"""
Registers callbacks on the given streams and returns two streams, one
of which sends the action to the gripper and the other returns a
message upon the completion of the action.
"""
input_streams.filter_name(trigger_stream_name)\
.add_callback(MockGraspObjectOperator.grasp_object)
input_streams.filter_name(gripper_stream_name)\
.add_callback(MockGraspObjectOperator.release_lock)
return [
DataStream(
data_type=MockGripType,
name=MockGraspObjectOperator.gripper_stream),
DataStream(
data_type=Bool,
name=MockGraspObjectOperator.action_complete_stream_name)
]
def grasp_object(self, msg):
"""
Sends a close action to the gripper and waits for its completion.
"""
mock_grasp_object = MockGripType("close")
mock_grasp_msg = Message(mock_grasp_object, msg.timestamp)
self.move_ahead_lock = False
self.get_output_stream(
MockGraspObjectOperator.gripper_stream).send(mock_grasp_msg)
while not self.move_ahead_lock:
pass
action_complete_msg = Message(True, msg.timestamp)
self.get_output_stream(
MockGraspObjectOperator.action_complete_stream_name).send(
action_complete_msg)
def release_lock(self, msg):
"""
Releases the lock so that new actions can be sent to the gripper.
"""
self.move_ahead_lock = True
def execute(self):
self.spin()
| [
"erdos.message.Message",
"erdos.data_stream.DataStream",
"mock_gripper_op.MockGripType"
]
| [((1648, 1669), 'mock_gripper_op.MockGripType', 'MockGripType', (['"""close"""'], {}), "('close')\n", (1660, 1669), False, 'from mock_gripper_op import MockGripType\n'), ((1695, 1736), 'erdos.message.Message', 'Message', (['mock_grasp_object', 'msg.timestamp'], {}), '(mock_grasp_object, msg.timestamp)\n', (1702, 1736), False, 'from erdos.message import Message\n'), ((1967, 1995), 'erdos.message.Message', 'Message', (['(True)', 'msg.timestamp'], {}), '(True, msg.timestamp)\n', (1974, 1995), False, 'from erdos.message import Message\n'), ((1234, 1313), 'erdos.data_stream.DataStream', 'DataStream', ([], {'data_type': 'MockGripType', 'name': 'MockGraspObjectOperator.gripper_stream'}), '(data_type=MockGripType, name=MockGraspObjectOperator.gripper_stream)\n', (1244, 1313), False, 'from erdos.data_stream import DataStream\n'), ((1360, 1449), 'erdos.data_stream.DataStream', 'DataStream', ([], {'data_type': 'Bool', 'name': 'MockGraspObjectOperator.action_complete_stream_name'}), '(data_type=Bool, name=MockGraspObjectOperator.\n action_complete_stream_name)\n', (1370, 1449), False, 'from erdos.data_stream import DataStream\n')] |
import torch
import sys
import os
sys.path.append(os.getcwd())
from utils.helper_modules import Sequential2
from unimodals.common_models import Linear, MLP, MaxOut_MLP
from datasets.imdb.get_data import get_dataloader
from fusions.common_fusions import Concat
from objective_functions.objectives_for_supervised_learning import MFM_objective
from objective_functions.recon import sigmloss1d
from training_structures.Supervised_Learning import train, test
filename = "best_mfm.pt"
traindata, validdata, testdata = get_dataloader(
"../video/multimodal_imdb.hdf5", "../video/mmimdb", vgg=True, batch_size=128)
classes = 23
n_latent = 512
fuse = Sequential2(Concat(), MLP(2*n_latent, n_latent, n_latent//2)).cuda()
encoders = [MaxOut_MLP(512, 512, 300, n_latent, False).cuda(
), MaxOut_MLP(512, 1024, 4096, n_latent, False).cuda()]
head = Linear(n_latent//2, classes).cuda()
decoders = [MLP(n_latent, 600, 300).cuda(), MLP(n_latent, 2048, 4096).cuda()]
intermediates = [MLP(n_latent, n_latent//2, n_latent//2).cuda(),
MLP(n_latent, n_latent//2, n_latent//2).cuda()]
recon_loss = MFM_objective(2.0, [sigmloss1d, sigmloss1d], [
1.0, 1.0], criterion=torch.nn.BCEWithLogitsLoss())
train(encoders, fuse, head, traindata, validdata, 1000, decoders+intermediates, early_stop=True, task="multilabel",
objective_args_dict={"decoders": decoders, "intermediates": intermediates}, save=filename, optimtype=torch.optim.AdamW, lr=5e-3, weight_decay=0.01, objective=recon_loss)
print("Testing:")
model = torch.load(filename).cuda()
test(model, testdata, method_name="MFM", dataset="imdb",
criterion=torch.nn.BCEWithLogitsLoss(), task="multilabel")
| [
"unimodals.common_models.MaxOut_MLP",
"fusions.common_fusions.Concat",
"torch.load",
"os.getcwd",
"unimodals.common_models.Linear",
"training_structures.Supervised_Learning.train",
"datasets.imdb.get_data.get_dataloader",
"torch.nn.BCEWithLogitsLoss",
"unimodals.common_models.MLP"
]
| [((515, 611), 'datasets.imdb.get_data.get_dataloader', 'get_dataloader', (['"""../video/multimodal_imdb.hdf5"""', '"""../video/mmimdb"""'], {'vgg': '(True)', 'batch_size': '(128)'}), "('../video/multimodal_imdb.hdf5', '../video/mmimdb', vgg=True,\n batch_size=128)\n", (529, 611), False, 'from datasets.imdb.get_data import get_dataloader\n'), ((1227, 1533), 'training_structures.Supervised_Learning.train', 'train', (['encoders', 'fuse', 'head', 'traindata', 'validdata', '(1000)', '(decoders + intermediates)'], {'early_stop': '(True)', 'task': '"""multilabel"""', 'objective_args_dict': "{'decoders': decoders, 'intermediates': intermediates}", 'save': 'filename', 'optimtype': 'torch.optim.AdamW', 'lr': '(0.005)', 'weight_decay': '(0.01)', 'objective': 'recon_loss'}), "(encoders, fuse, head, traindata, validdata, 1000, decoders +\n intermediates, early_stop=True, task='multilabel', objective_args_dict=\n {'decoders': decoders, 'intermediates': intermediates}, save=filename,\n optimtype=torch.optim.AdamW, lr=0.005, weight_decay=0.01, objective=\n recon_loss)\n", (1232, 1533), False, 'from training_structures.Supervised_Learning import train, test\n'), ((51, 62), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (60, 62), False, 'import os\n'), ((842, 872), 'unimodals.common_models.Linear', 'Linear', (['(n_latent // 2)', 'classes'], {}), '(n_latent // 2, classes)\n', (848, 872), False, 'from unimodals.common_models import Linear, MLP, MaxOut_MLP\n'), ((1196, 1224), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ([], {}), '()\n', (1222, 1224), False, 'import torch\n'), ((1546, 1566), 'torch.load', 'torch.load', (['filename'], {}), '(filename)\n', (1556, 1566), False, 'import torch\n'), ((1646, 1674), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ([], {}), '()\n', (1672, 1674), False, 'import torch\n'), ((661, 669), 'fusions.common_fusions.Concat', 'Concat', ([], {}), '()\n', (667, 669), False, 'from fusions.common_fusions import Concat\n'), ((671, 713), 'unimodals.common_models.MLP', 'MLP', (['(2 * n_latent)', 'n_latent', '(n_latent // 2)'], {}), '(2 * n_latent, n_latent, n_latent // 2)\n', (674, 713), False, 'from unimodals.common_models import Linear, MLP, MaxOut_MLP\n'), ((730, 772), 'unimodals.common_models.MaxOut_MLP', 'MaxOut_MLP', (['(512)', '(512)', '(300)', 'n_latent', '(False)'], {}), '(512, 512, 300, n_latent, False)\n', (740, 772), False, 'from unimodals.common_models import Linear, MLP, MaxOut_MLP\n'), ((782, 826), 'unimodals.common_models.MaxOut_MLP', 'MaxOut_MLP', (['(512)', '(1024)', '(4096)', 'n_latent', '(False)'], {}), '(512, 1024, 4096, n_latent, False)\n', (792, 826), False, 'from unimodals.common_models import Linear, MLP, MaxOut_MLP\n'), ((891, 914), 'unimodals.common_models.MLP', 'MLP', (['n_latent', '(600)', '(300)'], {}), '(n_latent, 600, 300)\n', (894, 914), False, 'from unimodals.common_models import Linear, MLP, MaxOut_MLP\n'), ((923, 948), 'unimodals.common_models.MLP', 'MLP', (['n_latent', '(2048)', '(4096)'], {}), '(n_latent, 2048, 4096)\n', (926, 948), False, 'from unimodals.common_models import Linear, MLP, MaxOut_MLP\n'), ((974, 1017), 'unimodals.common_models.MLP', 'MLP', (['n_latent', '(n_latent // 2)', '(n_latent // 2)'], {}), '(n_latent, n_latent // 2, n_latent // 2)\n', (977, 1017), False, 'from unimodals.common_models import Linear, MLP, MaxOut_MLP\n'), ((1039, 1082), 'unimodals.common_models.MLP', 'MLP', (['n_latent', '(n_latent // 2)', '(n_latent // 2)'], {}), '(n_latent, n_latent // 2, n_latent // 2)\n', (1042, 1082), False, 'from unimodals.common_models import Linear, MLP, MaxOut_MLP\n')] |
# Generated by Django 4.0.2 on 2022-06-01 04:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=50)),
('isexist', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='', max_length=50)),
('lyrics', models.CharField(default='', max_length=5000)),
('url', models.CharField(blank=True, default='', max_length=50, null=True)),
('isexist', models.BooleanField(default=True)),
('channel', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='song_channel', to='subeana.channel')),
('imitate', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='song_imitate', to='subeana.song')),
],
),
]
| [
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.BigAutoField",
"django.db.models.BooleanField"
]
| [((336, 432), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (355, 432), False, 'from django.db import migrations, models\n'), ((456, 499), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(50)'}), "(default='', max_length=50)\n", (472, 499), False, 'from django.db import migrations, models\n'), ((530, 563), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (549, 563), False, 'from django.db import migrations, models\n'), ((693, 789), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (712, 789), False, 'from django.db import migrations, models\n'), ((814, 857), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(50)'}), "(default='', max_length=50)\n", (830, 857), False, 'from django.db import migrations, models\n'), ((887, 932), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(5000)'}), "(default='', max_length=5000)\n", (903, 932), False, 'from django.db import migrations, models\n'), ((959, 1025), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(50)', 'null': '(True)'}), "(blank=True, default='', max_length=50, null=True)\n", (975, 1025), False, 'from django.db import migrations, models\n'), ((1056, 1089), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1075, 1089), False, 'from django.db import migrations, models\n'), ((1120, 1264), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.DO_NOTHING', 'related_name': '"""song_channel"""', 'to': '"""subeana.channel"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.DO_NOTHING, related_name='song_channel', to='subeana.channel')\n", (1137, 1264), False, 'from django.db import migrations, models\n'), ((1290, 1431), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.DO_NOTHING', 'related_name': '"""song_imitate"""', 'to': '"""subeana.song"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.DO_NOTHING, related_name='song_imitate', to='subeana.song')\n", (1307, 1431), False, 'from django.db import migrations, models\n')] |
import matplotlib.pyplot as plt
import math
xtab = []
ytab = []
for i in range(0, 628):
# Calculate polar coordinates for provided equation
phi = float(i) / 100.0
r = 4 * math.cos(2 * phi)
# Convert to Cartesian and store in lists
x = r * math.cos(phi)
y = r * math.sin(phi)
xtab.append(x)
ytab.append(y)
plt.plot(xtab, ytab)
plt.show() | [
"math.cos",
"math.sin",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
]
| [((341, 361), 'matplotlib.pyplot.plot', 'plt.plot', (['xtab', 'ytab'], {}), '(xtab, ytab)\n', (349, 361), True, 'import matplotlib.pyplot as plt\n'), ((362, 372), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (370, 372), True, 'import matplotlib.pyplot as plt\n'), ((186, 203), 'math.cos', 'math.cos', (['(2 * phi)'], {}), '(2 * phi)\n', (194, 203), False, 'import math\n'), ((262, 275), 'math.cos', 'math.cos', (['phi'], {}), '(phi)\n', (270, 275), False, 'import math\n'), ((288, 301), 'math.sin', 'math.sin', (['phi'], {}), '(phi)\n', (296, 301), False, 'import math\n')] |
# Copyright (c) 2003-2010 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:<EMAIL>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# copyright 2003-2010 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:<EMAIL>
# copyright 2003-2010 <NAME>, all rights reserved.
# contact mailto:<EMAIL>
#
# This file is part of logilab-astng.
#
# logilab-astng is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# logilab-astng is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-astng. If not, see <http://www.gnu.org/licenses/>.
"""
logilab.astng packaging information
"""
distname = 'logilab-astng'
modname = 'astng'
subpackage_of = 'logilab'
numversion = (0, 20, 1)
version = '.'.join([str(num) for num in numversion])
install_requires = ['logilab-common >= 0.49.0']
pyversions = ["2.3", "2.4", "2.5", '2.6']
license = 'LGPL'
author = 'Logilab'
author_email = '<EMAIL>'
mailinglist = "mailto://%s" % author_email
web = "http://www.logilab.org/project/%s" % distname
ftp = "ftp://ftp.logilab.org/pub/%s" % modname
short_desc = "rebuild a new abstract syntax tree from Python's ast"
long_desc = """The aim of this module is to provide a common base \
representation of python source code for projects such as pychecker, pyreverse,
pylint... Well, actually the development of this library is essentially
governed by pylint's needs.
It rebuilds the tree generated by the compiler.ast [1] module (python <= 2.4)
or by the builtin _ast module (python >= 2.5) by recursively walking down the
AST and building an extended ast (let's call it astng ;). The new node classes
have additional methods and attributes for different usages.
Furthermore, astng builds partial trees by inspecting living objects."""
from os.path import join
include_dirs = [join('test', 'regrtest_data'),
join('test', 'data'), join('test', 'data2')]
| [
"os.path.join"
]
| [((2947, 2976), 'os.path.join', 'join', (['"""test"""', '"""regrtest_data"""'], {}), "('test', 'regrtest_data')\n", (2951, 2976), False, 'from os.path import join\n'), ((2994, 3014), 'os.path.join', 'join', (['"""test"""', '"""data"""'], {}), "('test', 'data')\n", (2998, 3014), False, 'from os.path import join\n'), ((3016, 3037), 'os.path.join', 'join', (['"""test"""', '"""data2"""'], {}), "('test', 'data2')\n", (3020, 3037), False, 'from os.path import join\n')] |
import fire
import gtfparse
from pathlib import Path
GENCODE_CATEGORY_MAP = {
'IG_C_gene': 'protein_coding',
'IG_D_gene': 'protein_coding',
'IG_J_gene': 'protein_coding',
'IG_V_gene': 'protein_coding',
'IG_LV_gene': 'protein_coding',
'TR_C_gene': 'protein_coding',
'TR_J_gene': 'protein_coding',
'TR_V_gene': 'protein_coding',
'TR_D_gene': 'protein_coding',
'TEC': 'protein_coding',
'nonsense_mediated_decay': 'protein_coding',
'non_stop_decay': 'protein_coding',
'retained_intron': 'lncRNA',
'protein_coding': 'protein_coding',
'ambiguous_orf': 'lncRNA',
'Mt_rRNA': 'ncRNA',
'Mt_tRNA': 'ncRNA',
'miRNA': 'ncRNA',
'misc_RNA': 'ncRNA',
'rRNA': 'ncRNA',
'snRNA': 'ncRNA',
'snoRNA': 'ncRNA',
'ribozyme': 'ncRNA',
'sRNA': 'ncRNA',
'scaRNA': 'ncRNA',
'scRNA': 'ncRNA',
'non_coding': 'lncRNA',
'known_ncrna': 'ncRNA',
'3prime_overlapping_ncrna': 'lncRNA',
'3prime_overlapping_ncRNA': 'lncRNA',
'vaultRNA': 'ncRNA',
'processed_transcript': 'lncRNA',
'lincRNA': 'lncRNA',
'macro_lncRNA': 'lncRNA',
'sense_intronic': 'lncRNA',
'sense_overlapping': 'lncRNA',
'antisense': 'lncRNA',
'antisense_RNA': 'lncRNA',
'bidirectional_promoter_lncRNA': 'lncRNA',
'IG_pseudogene': 'pseudogene',
'IG_D_pseudogene': 'pseudogene',
'IG_C_pseudogene': 'pseudogene',
'IG_J_pseudogene': 'pseudogene',
'IG_V_pseudogene': 'pseudogene',
'TR_V_pseudogene': 'pseudogene',
'TR_J_pseudogene': 'pseudogene',
'Mt_tRNA_pseudogene': 'pseudogene',
'tRNA_pseudogene': 'pseudogene',
'snoRNA_pseudogene': 'pseudogene',
'snRNA_pseudogene': 'pseudogene',
'scRNA_pseudogene': 'pseudogene',
'rRNA_pseudogene': 'pseudogene',
'misc_RNA_pseudogene': 'pseudogene',
'miRNA_pseudogene': 'pseudogene',
'pseudogene': 'pseudogene',
'processed_pseudogene': 'pseudogene',
'polymorphic_pseudogene': 'pseudogene',
'retrotransposed': 'pseudogene',
'transcribed_processed_pseudogene': 'pseudogene',
'transcribed_unprocessed_pseudogene': 'pseudogene',
'transcribed_unitary_pseudogene': 'pseudogene',
'translated_processed_pseudogene': 'pseudogene',
'translated_unprocessed_pseudogene': 'pseudogene',
'unitary_pseudogene': 'pseudogene',
'unprocessed_pseudogene': 'pseudogene',
'novel_lncRNA': 'lncRNA',
'TUCP': 'TUCP',
'lncRNA': 'lncRNA'
}
def simplify_gene_type(gene_type):
if gene_type in GENCODE_CATEGORY_MAP:
sim_type = GENCODE_CATEGORY_MAP.get(gene_type)
if sim_type == 'lncRNA':
sim_type = f'annotated_{sim_type}'
elif sim_type == 'ncRNA':
sim_type = f'other_{sim_type}'
else:
pass
return sim_type
else:
raise ValueError(gene_type)
def dfline2gtfline(dfline):
basic_inf = dfline[:8]
basic_inf.fillna('.', inplace=True)
basic_inf.frame = '.'
basic_inf_list = [str(each) for each in basic_inf]
basic_inf_line = '\t'.join(basic_inf_list)
attr_inf = dfline[8:]
attr_inf_list = []
for key, val in attr_inf.items():
if val:
attr_inf_list.append(f'{key} "{val}";')
attr_inf_line = ' '.join(attr_inf_list)
return f'{basic_inf_line}\t{attr_inf_line}\n'
def split_gtf(gtf, outdir, novel=False):
gtf_df = gtfparse.read_gtf(gtf)
if 'gene_type' in gtf_df.columns:
gtf_df.loc[:, 'gene_biotype'] = gtf_df.gene_type
gtf_df.drop('gene_type', axis=1, inplace=True)
elif 'gene_biotype' in gtf_df.columns:
pass
else:
gtf_df.loc[:, 'gene_biotype'] = 'protein_coding'
type_label = 'gene_biotype'
if novel:
gtf_df.loc[
:, type_label] = gtf_df.loc[:, type_label].map(
GENCODE_CATEGORY_MAP)
else:
gtf_df.loc[
:, type_label] = gtf_df.loc[:, type_label].map(
simplify_gene_type)
outdir = Path(outdir)
outdir.mkdir(parents=True, exist_ok=True)
for gt, grp in gtf_df.groupby(type_label):
gt_file = outdir / f'{gt}.gtf'
with open(gt_file, 'w') as gt_inf:
for idx in grp.index:
outline = dfline2gtfline(grp.loc[idx])
gt_inf.write(outline)
if __name__ == '__main__':
fire.Fire(split_gtf)
| [
"gtfparse.read_gtf",
"fire.Fire",
"pathlib.Path"
]
| [((3371, 3393), 'gtfparse.read_gtf', 'gtfparse.read_gtf', (['gtf'], {}), '(gtf)\n', (3388, 3393), False, 'import gtfparse\n'), ((3969, 3981), 'pathlib.Path', 'Path', (['outdir'], {}), '(outdir)\n', (3973, 3981), False, 'from pathlib import Path\n'), ((4318, 4338), 'fire.Fire', 'fire.Fire', (['split_gtf'], {}), '(split_gtf)\n', (4327, 4338), False, 'import fire\n')] |
import math
class Q(object):
def __init__(self,a,b=1):
gcd=math.gcd(a,b)
self.a=a//gcd
self.b=b//gcd
def __repr__(self):
if self.b==1:
return str(self.a)
return f'{self.a}/{self.b}'
def __add__(self,q):
a=self.a
b=self.b
c=q.a
d=q.b
return Q(a*d+b*c,b*d)
def __sub__(self,q):
a=self.a
b=self.b
c=q.a
d=q.b
return Q(a*d-b*c,b*d)
def __mul__(self,q):
a=self.a
b=self.b
c=q.a
d=q.b
return Q(a*c,b*d)
def __truediv__(self,q):
a=self.a
b=self.b
c=q.a
d=q.b
return Q(a*d,b*c)
q1=Q(1,2)
q2=Q(1,3)
print(q1/q2) | [
"math.gcd"
]
| [((76, 90), 'math.gcd', 'math.gcd', (['a', 'b'], {}), '(a, b)\n', (84, 90), False, 'import math\n')] |
from __future__ import unicode_literals
import glob
import os
from dbdiff.fixture import Fixture
from .base import TestImportBase, FixtureDir
from ..settings import DATA_DIR
class TestImport(TestImportBase):
"""Load test."""
def test_single_city(self):
"""Load single city."""
fixture_dir = FixtureDir('import')
self.import_data(
fixture_dir,
'angouleme_country',
'angouleme_region',
'angouleme_subregion',
'angouleme_city',
'angouleme_translations'
)
Fixture(fixture_dir.get_file_path('angouleme.json')).assertNoDiff()
def test_single_city_zip(self):
"""Load single city."""
filelist = glob.glob(os.path.join(DATA_DIR, "angouleme_*.txt"))
for f in filelist:
os.remove(f)
fixture_dir = FixtureDir('import_zip')
self.import_data(
fixture_dir,
'angouleme_country',
'angouleme_region',
'angouleme_subregion',
'angouleme_city',
'angouleme_translations',
file_type="zip"
)
Fixture(FixtureDir('import').get_file_path('angouleme.json')).assertNoDiff()
def test_city_wrong_timezone(self):
"""Load single city with wrong timezone."""
fixture_dir = FixtureDir('import')
self.import_data(
fixture_dir,
'angouleme_country',
'angouleme_region',
'angouleme_subregion',
'angouleme_city_wtz',
'angouleme_translations'
)
Fixture(fixture_dir.get_file_path('angouleme_wtz.json')).assertNoDiff()
from ..loading import get_cities_model
city_model = get_cities_model('City')
cities = city_model.objects.all()
for city in cities:
print(city.get_timezone_info().zone)
| [
"os.path.join",
"os.remove"
]
| [((743, 784), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""angouleme_*.txt"""'], {}), "(DATA_DIR, 'angouleme_*.txt')\n", (755, 784), False, 'import os\n'), ((825, 837), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (834, 837), False, 'import os\n')] |
from sklearn.mixture import GaussianMixture
import operator
import numpy as np
import math
class GMMSet:
def __init__(self, gmm_order = 32):
self.gmms = []
self.gmm_order = gmm_order
self.y = []
def fit_new(self, x, label):
self.y.append(label)
gmm = GaussianMixture(self.gmm_order)
gmm.fit(x)
self.gmms.append(gmm)
def gmm_score(self, gmm, x):
return np.sum(gmm.score(x))
@staticmethod
def softmax(scores):
scores_sum = sum([math.exp(i) for i in scores])
score_max = math.exp(max(scores))
return round(score_max / scores_sum, 3)
def predict_one(self, x):
scores = [self.gmm_score(gmm, x) / len(x) for gmm in self.gmms]
p = sorted(enumerate(scores), key=operator.itemgetter(1), reverse=True)
p = [(str(self.y[i]), y, p[0][1] - y) for i, y in p]
result = [(self.y[index], value) for (index, value) in enumerate(scores)]
p = max(result, key=operator.itemgetter(1))
softmax_score = self.softmax(scores)
return p[0], softmax_score
def before_pickle(self):
pass
def after_pickle(self):
pass
| [
"operator.itemgetter",
"math.exp",
"sklearn.mixture.GaussianMixture"
]
| [((311, 342), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', (['self.gmm_order'], {}), '(self.gmm_order)\n', (326, 342), False, 'from sklearn.mixture import GaussianMixture\n'), ((532, 543), 'math.exp', 'math.exp', (['i'], {}), '(i)\n', (540, 543), False, 'import math\n'), ((798, 820), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (817, 820), False, 'import operator\n'), ((1007, 1029), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (1026, 1029), False, 'import operator\n')] |
from typing import Sequence
from eth.constants import ZERO_HASH32
from eth_typing import Hash32
import ssz
from ssz.sedes import Vector, bytes32
from eth2.configs import Eth2Config
from .defaults import default_tuple, default_tuple_of_size
class HistoricalBatch(ssz.Serializable):
fields = [("block_roots", Vector(bytes32, 1)), ("state_roots", Vector(bytes32, 1))]
def __init__(
self,
*,
block_roots: Sequence[Hash32] = default_tuple,
state_roots: Sequence[Hash32] = default_tuple,
config: Eth2Config = None
) -> None:
if config:
# try to provide sane defaults
if block_roots == default_tuple:
block_roots = default_tuple_of_size(
config.SLOTS_PER_HISTORICAL_ROOT, ZERO_HASH32
)
if state_roots == default_tuple:
state_roots = default_tuple_of_size(
config.SLOTS_PER_HISTORICAL_ROOT, ZERO_HASH32
)
super().__init__(block_roots=block_roots, state_roots=state_roots)
| [
"ssz.sedes.Vector"
]
| [((317, 335), 'ssz.sedes.Vector', 'Vector', (['bytes32', '(1)'], {}), '(bytes32, 1)\n', (323, 335), False, 'from ssz.sedes import Vector, bytes32\n'), ((354, 372), 'ssz.sedes.Vector', 'Vector', (['bytes32', '(1)'], {}), '(bytes32, 1)\n', (360, 372), False, 'from ssz.sedes import Vector, bytes32\n')] |
import os
from pydantic import BaseSettings
class Settings(BaseSettings):
DEBUG: bool
DATABASE_URL: str
class Config:
env_file = os.getenv("CONFIG_FILE", ".env")
| [
"os.getenv"
]
| [((153, 185), 'os.getenv', 'os.getenv', (['"""CONFIG_FILE"""', '""".env"""'], {}), "('CONFIG_FILE', '.env')\n", (162, 185), False, 'import os\n')] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import itertools as it
from django.db import models, migrations
def convert_status(apps, schema_editor):
''' Migrate Visit.skipped and ScheduledPhoneCall.skipped -> status
(pending,missed,deleted,attended)
'''
Visit = apps.get_model("contacts","Visit")
ScheduledPhoneCall = apps.get_model("contacts","ScheduledPhoneCall")
for obj in it.chain(Visit.objects.all(), ScheduledPhoneCall.objects.all()):
if obj.skipped is None:
obj.status = 'pending'
elif obj.skipped == False:
obj.status = 'attended'
elif obj.skipped == True:
obj.status = 'missed'
obj.save()
def unconvert_status(apps, schema_editor):
''' Reverse function sets skipped based on status'''
Visit = apps.get_model("contacts","Visit")
ScheduledPhoneCall = apps.get_model("contacts","ScheduledPhoneCall")
for obj in it.chain(Visit.objects.all(), ScheduledPhoneCall.objects.all()):
if obj.status == 'pending':
obj.skipped = None
elif obj.status == 'attended':
obj.skipped = False
elif obj.status == 'missed':
obj.skipped = True
obj.save()
class Migration(migrations.Migration):
dependencies = [
('contacts', '0005_auto_add_visit_status'),
]
operations = [
migrations.RunPython(convert_status,unconvert_status),
]
| [
"django.db.migrations.RunPython"
]
| [((1395, 1449), 'django.db.migrations.RunPython', 'migrations.RunPython', (['convert_status', 'unconvert_status'], {}), '(convert_status, unconvert_status)\n', (1415, 1449), False, 'from django.db import models, migrations\n')] |
import os
import sys
from datetime import time
import unittest
sys.path.append(
os.path.dirname(
os.path.dirname(os.path.join("..", "..", "..", os.path.dirname("__file__")))
)
)
from core.controller import BaseTimeRangeController
class TestTimeRangeController(unittest.TestCase):
def test_time_range(self):
start_at = time(10, 0, 0)
end_at = time(12, 0, 0)
time_range_controller = BaseTimeRangeController(start_at, end_at)
time_now = time(11, 0, 0)
time_range_controller.set_current_time(time_now)
self.assertTrue(time_range_controller.action)
time_now = time(12, 15, 0)
time_range_controller.set_current_time(time_now)
self.assertFalse(time_range_controller.action)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"core.controller.BaseTimeRangeController",
"os.path.dirname",
"datetime.time"
]
| [((797, 812), 'unittest.main', 'unittest.main', ([], {}), '()\n', (810, 812), False, 'import unittest\n'), ((349, 363), 'datetime.time', 'time', (['(10)', '(0)', '(0)'], {}), '(10, 0, 0)\n', (353, 363), False, 'from datetime import time\n'), ((381, 395), 'datetime.time', 'time', (['(12)', '(0)', '(0)'], {}), '(12, 0, 0)\n', (385, 395), False, 'from datetime import time\n'), ((428, 469), 'core.controller.BaseTimeRangeController', 'BaseTimeRangeController', (['start_at', 'end_at'], {}), '(start_at, end_at)\n', (451, 469), False, 'from core.controller import BaseTimeRangeController\n'), ((490, 504), 'datetime.time', 'time', (['(11)', '(0)', '(0)'], {}), '(11, 0, 0)\n', (494, 504), False, 'from datetime import time\n'), ((636, 651), 'datetime.time', 'time', (['(12)', '(15)', '(0)'], {}), '(12, 15, 0)\n', (640, 651), False, 'from datetime import time\n'), ((157, 184), 'os.path.dirname', 'os.path.dirname', (['"""__file__"""'], {}), "('__file__')\n", (172, 184), False, 'import os\n')] |
# -*- coding: utf-8 -*-
import json
import os
import pyminifier
try:
import io as StringIO
except ImportError:
import cStringIO as StringIO # lint:ok
# Check to see if slimit or some other minification library is installed and
# Set minify equal to slimit's minify function.
try:
import slimit
js_minify = slimit.minify
except ImportError as error:
print(error)
js_minify = slimit = None
###############################################################################
def process_unittest(filename):
"""Process a VFS filename for Brython."""
print("Generating {}".format(filename))
nb = 0
nb_err = 0
_main_root = os.path.dirname(filename)
_VFS = {}
for _mydir in ("Lib",):
for _root, _dir, _files in os.walk(os.path.join(_main_root, _mydir)):
if 'unittest' not in _root:
continue
if '__pycache__' in _root:
continue
for _file in _files:
_ext = os.path.splitext(_file)[1]
if _ext not in ('.py'):
continue
nb += 1
file_name = os.path.join(_root, _file)
try: # python 3
with open(file_name, encoding="utf-8") as file_with_data:
_data = file_with_data.read()
except Exception as reason: # python 2
with open(file_name, "r") as file_with_data:
_data = str(file_with_data.read()).decode("utf-8")
if not len(_data):
print("No data for {} ({}).".format(_file, type(_data)))
if _ext.lower() == '.py' and _data:
try:
_data = pyminifier.remove_comments_and_docstrings(
_data)
_data = pyminifier.dedent(_data)
except Exception as error:
print(error)
nb_err += 1
_vfs_filename = os.path.join(
_root, _file).replace(_main_root, '')
_vfs_filename = _vfs_filename.replace("\\", "/")
mod_name = _vfs_filename[len(_mydir) + 2:].replace('/', '.')
mod_name, ext = os.path.splitext(mod_name)
is_package = mod_name.endswith('__init__')
if is_package:
mod_name = mod_name[:-9]
_VFS[mod_name] = [_data, 1]
else:
_VFS[mod_name] = [_data]
print(("Adding %s %s" % (mod_name, _vfs_filename)))
print('%s files, %s errors' % (nb, nb_err))
with open(filename, "w") as file_to_write_VFS:
file_to_write_VFS.write('__BRYTHON__.libs = __BRYTHON__.libs || {};\n')
file_to_write_VFS.write("__BRYTHON__.=libs['unittest']=%s;\n\n" % json.dumps(_VFS))
file_to_write_VFS.write("""
__BRYTHON__.import_from_unittest function(mod_name){
var stored = __BRYTHON__.libs['unittest'][mod_name]
if(stored!==undefined){
var module_contents = stored[0]
var is_package = stored[1]
var path = 'py_unittest'
var module = {name:mod_name,__class__:$B.$ModuleDict,is_package:is_package}
if(is_package){var package=mod_name}
else{
var elts = mod_name.split('.')
elts.pop()
var package = elts.join('.')
}
$B.modules[mod_name].$package = is_package
$B.modules[mod_name].__package__ = package
run_py(module,path,module_contents)
return true
}
return null
}
// add this import function to brython by doing the following:
// <body onload="brython({custom_import_funcs:[__BRYTHON__.import_from_unittest]})">
// this will allow us to import unittest modules.
""")
def process(filename, exclude_dirs=['unittest',]):
"""Process a VFS filename for Brython."""
print("Generating {}".format(filename))
nb = 0
nb_err = 0
_main_root = os.path.dirname(filename)
_VFS = {}
for _mydir in ("libs", "Lib"):
for _root, _dir, _files in os.walk(os.path.join(_main_root, _mydir)):
#if _root.endswith('lib_migration'):
_flag=False
for _exclude in exclude_dirs:
if _exclude in _root: #_root.endswith(_exclude):
_flag=True
continue
if _flag:
continue # skip these modules
if '__pycache__' in _root:
continue
nb += 1
for _file in _files:
_ext = os.path.splitext(_file)[1]
if _ext not in ('.js', '.py'):
continue
nb += 1
with open(os.path.join(_root, _file), "r") as file_with_data:
_data = file_with_data.read()
if len(_data) == 0:
print('no data for %s' % _file)
_data = unicode('')
print(_data, type(_data))
else:
_data = _data.decode('utf-8')
if _ext in '.js':
if js_minify is not None:
try:
_data = js_minify(_data)
except Exception as error:
print(error)
elif _ext == '.py' and len(_data) > 0:
try:
_data = pyminifier.remove_comments_and_docstrings(_data)
_data = pyminifier.dedent(_data)
except Exception as error:
print(error)
nb_err += 1
_vfs_filename = os.path.join(_root, _file).replace(_main_root, '')
_vfs_filename = _vfs_filename.replace("\\", "/")
if _vfs_filename.startswith('/libs/crypto_js/rollups/'):
if _file not in ('md5.js', 'sha1.js', 'sha3.js',
'sha224.js', 'sha384.js', 'sha512.js'):
continue
mod_name = _vfs_filename[len(_mydir) + 2:].replace('/', '.')
mod_name, ext = os.path.splitext(mod_name)
is_package = mod_name.endswith('__init__')
if is_package:
mod_name = mod_name[:-9]
_VFS[mod_name] = [ext, _data, 1]
else:
_VFS[mod_name] = [ext, _data]
print(("adding %s %s" % (mod_name, _vfs_filename)))
print('%s files, %s errors' % (nb, nb_err))
with open(filename, "w") as file_to_write_VFS:
file_to_write_VFS.write('__BRYTHON__.use_VFS = true;\n')
file_to_write_VFS.write('__BRYTHON__.VFS=%s;\n\n' % json.dumps(_VFS))
###############################################################################
if __name__ == '__main__':
_main_root = os.path.join(os.getcwd(), '../src')
process(os.path.join(_main_root, "py_VFS.js"))
| [
"json.dumps",
"os.path.splitext",
"os.path.join",
"pyminifier.remove_comments_and_docstrings",
"os.getcwd",
"os.path.dirname",
"pyminifier.dedent"
]
| [((665, 690), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (680, 690), False, 'import os\n'), ((3962, 3987), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (3977, 3987), False, 'import os\n'), ((6872, 6883), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6881, 6883), False, 'import os\n'), ((6907, 6944), 'os.path.join', 'os.path.join', (['_main_root', '"""py_VFS.js"""'], {}), "(_main_root, 'py_VFS.js')\n", (6919, 6944), False, 'import os\n'), ((776, 808), 'os.path.join', 'os.path.join', (['_main_root', '_mydir'], {}), '(_main_root, _mydir)\n', (788, 808), False, 'import os\n'), ((4080, 4112), 'os.path.join', 'os.path.join', (['_main_root', '_mydir'], {}), '(_main_root, _mydir)\n', (4092, 4112), False, 'import os\n'), ((1144, 1170), 'os.path.join', 'os.path.join', (['_root', '_file'], {}), '(_root, _file)\n', (1156, 1170), False, 'import os\n'), ((2291, 2317), 'os.path.splitext', 'os.path.splitext', (['mod_name'], {}), '(mod_name)\n', (2307, 2317), False, 'import os\n'), ((2890, 2906), 'json.dumps', 'json.dumps', (['_VFS'], {}), '(_VFS)\n', (2900, 2906), False, 'import json\n'), ((6141, 6167), 'os.path.splitext', 'os.path.splitext', (['mod_name'], {}), '(mod_name)\n', (6157, 6167), False, 'import os\n'), ((6713, 6729), 'json.dumps', 'json.dumps', (['_VFS'], {}), '(_VFS)\n', (6723, 6729), False, 'import json\n'), ((995, 1018), 'os.path.splitext', 'os.path.splitext', (['_file'], {}), '(_file)\n', (1011, 1018), False, 'import os\n'), ((4562, 4585), 'os.path.splitext', 'os.path.splitext', (['_file'], {}), '(_file)\n', (4578, 4585), False, 'import os\n'), ((1756, 1804), 'pyminifier.remove_comments_and_docstrings', 'pyminifier.remove_comments_and_docstrings', (['_data'], {}), '(_data)\n', (1797, 1804), False, 'import pyminifier\n'), ((1866, 1890), 'pyminifier.dedent', 'pyminifier.dedent', (['_data'], {}), '(_data)\n', (1883, 1890), False, 'import pyminifier\n'), ((2044, 2070), 'os.path.join', 'os.path.join', (['_root', '_file'], {}), '(_root, _file)\n', (2056, 2070), False, 'import os\n'), ((4715, 4741), 'os.path.join', 'os.path.join', (['_root', '_file'], {}), '(_root, _file)\n', (4727, 4741), False, 'import os\n'), ((5670, 5696), 'os.path.join', 'os.path.join', (['_root', '_file'], {}), '(_root, _file)\n', (5682, 5696), False, 'import os\n'), ((5421, 5469), 'pyminifier.remove_comments_and_docstrings', 'pyminifier.remove_comments_and_docstrings', (['_data'], {}), '(_data)\n', (5462, 5469), False, 'import pyminifier\n'), ((5499, 5523), 'pyminifier.dedent', 'pyminifier.dedent', (['_data'], {}), '(_data)\n', (5516, 5523), False, 'import pyminifier\n')] |
#
import os
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
| [
"os.path.dirname"
]
| [((40, 65), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (55, 65), False, 'import os\n')] |
from __future__ import print_function
import argparse
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms
from tqdm import tqdm
from pyromancy import pyromq
from pyromancy.losses import LossGroup, NegativeLogLikelihood
from pyromancy.metrics import MetricGroup, Accuracy
from pyromancy.subscribers import LogSubscriber
def parse_args():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--weight-decay', default=1e-4, type=float)
parser.add_argument('--grad-clip-norm', default=10.0, type=float)
parser.add_argument('--disable-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# Name the experiment
parser.add_argument('--experiment-name', required=True)
parser.add_argument("--experimentdb", default=None)
parser.add_argument('--log-to-console', default=False, action='store_true')
args = parser.parse_args()
if args.experimentdb is None:
args.experimentdb = args.experiment_name + '.db'
return args
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
# noinspection PyCallingNonCallable,PyCallingNonCallable
def run_once(args, train_loader, test_loader):
broker = pyromq.Broker()
model = Net()
if args.cuda:
model.cuda()
training_events = pyromq.TrainingEventPublisher(broker=broker)
broker.add_subscriber(LogSubscriber(experiment_uid=args.experiment_name,
log_file=os.path.join('logs', args.experiment_name),
to_console=args.log_to_console))
opt = torch.optim.SGD(params=model.parameters(),
lr=args.lr,
weight_decay=args.weight_decay,
momentum=args.momentum)
losses = LossGroup(optimizer=opt,
grad_clip_norm=args.grad_clip_norm,
name='losses',
channel_name=pyromq.channels.METRIC_EVENTS,
broker=broker)
losses.add(NegativeLogLikelihood(name='nll',
target_name='y_target',
output_name='y_pred'),
data_target='train')
# Metrics
metrics = MetricGroup(name='metrics',
channel_name=pyromq.channels.METRIC_EVENTS,
broker=broker)
metrics.add(Accuracy(name='acc',
target_name='y_target',
output_name='y_pred'),
data_target='*')
metrics.add(NegativeLogLikelihood(name='nll',
target_name='y_target',
output_name='y_pred'),
data_target='val')
training_events.training_start()
for _ in tqdm(range(args.epochs), total=args.epochs):
training_events.epoch_start()
model.train(True)
for data, target in train_loader:
# From the original example
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
# put the incoming batch data into a dictionary
batch_dict = {'x_data': data, 'y_target': target}
# Training Event
training_events.batch_start()
# Get model outputs
predictions = {'y_pred': model(batch_dict['x_data'])}
# Compute Metrics
metrics.compute(in_dict=batch_dict, out_dict=predictions,
data_type='train')
# Compute Losses
losses.compute(in_dict=batch_dict, out_dict=predictions,
data_type='train')
losses.step()
# Training Event
training_events.batch_end()
model.train(False)
for data, target in test_loader:
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
batch_dict = {'x_data': data, 'y_target': target}
# Training Event
training_events.batch_start()
predictions = {'y_pred': model(batch_dict['x_data'])}
metrics.compute(in_dict=batch_dict,
out_dict=predictions,
data_type='val')
training_events.batch_end()
training_events.epoch_end()
def main():
args = parse_args()
args.cuda = not args.disable_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
dataload_kwargs = {}
if args.cuda:
dataload_kwargs = {'num_workers': 1, 'pin_memory': True}
train_dataset = datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
# noinspection PyUnresolvedReferences
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True, **dataload_kwargs)
test_dataset = datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
# noinspection PyUnresolvedReferences
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=args.batch_size,
shuffle=True, **dataload_kwargs)
run_once(args, train_loader, test_loader)
if __name__ == "__main__":
main()
| [
"torch.cuda.is_available",
"pyromancy.pyromq.TrainingEventPublisher",
"pyromancy.losses.LossGroup",
"argparse.ArgumentParser",
"torchvision.transforms.ToTensor",
"torch.autograd.Variable",
"torch.nn.Dropout2d",
"torch.nn.functional.dropout",
"pyromancy.metrics.Accuracy",
"torch.nn.functional.log_softmax",
"torchvision.transforms.Normalize",
"pyromancy.pyromq.Broker",
"pyromancy.losses.NegativeLogLikelihood",
"torch.manual_seed",
"os.path.join",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"pyromancy.metrics.MetricGroup",
"torch.cuda.manual_seed"
]
| [((485, 545), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch MNIST Example"""'}), "(description='PyTorch MNIST Example')\n", (508, 545), False, 'import argparse\n'), ((2616, 2631), 'pyromancy.pyromq.Broker', 'pyromq.Broker', ([], {}), '()\n', (2629, 2631), False, 'from pyromancy import pyromq\n'), ((2713, 2757), 'pyromancy.pyromq.TrainingEventPublisher', 'pyromq.TrainingEventPublisher', ([], {'broker': 'broker'}), '(broker=broker)\n', (2742, 2757), False, 'from pyromancy import pyromq\n'), ((3216, 3354), 'pyromancy.losses.LossGroup', 'LossGroup', ([], {'optimizer': 'opt', 'grad_clip_norm': 'args.grad_clip_norm', 'name': '"""losses"""', 'channel_name': 'pyromq.channels.METRIC_EVENTS', 'broker': 'broker'}), "(optimizer=opt, grad_clip_norm=args.grad_clip_norm, name='losses',\n channel_name=pyromq.channels.METRIC_EVENTS, broker=broker)\n", (3225, 3354), False, 'from pyromancy.losses import LossGroup, NegativeLogLikelihood\n'), ((3680, 3770), 'pyromancy.metrics.MetricGroup', 'MetricGroup', ([], {'name': '"""metrics"""', 'channel_name': 'pyromq.channels.METRIC_EVENTS', 'broker': 'broker'}), "(name='metrics', channel_name=pyromq.channels.METRIC_EVENTS,\n broker=broker)\n", (3691, 3770), False, 'from pyromancy.metrics import MetricGroup, Accuracy\n'), ((6031, 6059), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (6048, 6059), False, 'import torch\n'), ((6613, 6720), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=args.batch_size,\n shuffle=True, **dataload_kwargs)\n', (6640, 6720), False, 'import torch\n'), ((7051, 7157), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(test_dataset, batch_size=args.batch_size,\n shuffle=True, **dataload_kwargs)\n', (7078, 7157), False, 'import torch\n'), ((1984, 2015), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(10)'], {'kernel_size': '(5)'}), '(1, 10, kernel_size=5)\n', (1993, 2015), True, 'import torch.nn as nn\n'), ((2037, 2069), 'torch.nn.Conv2d', 'nn.Conv2d', (['(10)', '(20)'], {'kernel_size': '(5)'}), '(10, 20, kernel_size=5)\n', (2046, 2069), True, 'import torch.nn as nn\n'), ((2096, 2110), 'torch.nn.Dropout2d', 'nn.Dropout2d', ([], {}), '()\n', (2108, 2110), True, 'import torch.nn as nn\n'), ((2130, 2148), 'torch.nn.Linear', 'nn.Linear', (['(320)', '(50)'], {}), '(320, 50)\n', (2139, 2148), True, 'import torch.nn as nn\n'), ((2168, 2185), 'torch.nn.Linear', 'nn.Linear', (['(50)', '(10)'], {}), '(50, 10)\n', (2177, 2185), True, 'import torch.nn as nn\n'), ((2404, 2440), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'training': 'self.training'}), '(x, training=self.training)\n', (2413, 2440), True, 'import torch.nn.functional as F\n'), ((2480, 2496), 'torch.nn.functional.log_softmax', 'F.log_softmax', (['x'], {}), '(x)\n', (2493, 2496), True, 'import torch.nn.functional as F\n'), ((3459, 3538), 'pyromancy.losses.NegativeLogLikelihood', 'NegativeLogLikelihood', ([], {'name': '"""nll"""', 'target_name': '"""y_target"""', 'output_name': '"""y_pred"""'}), "(name='nll', target_name='y_target', output_name='y_pred')\n", (3480, 3538), False, 'from pyromancy.losses import LossGroup, NegativeLogLikelihood\n'), ((3836, 3902), 'pyromancy.metrics.Accuracy', 'Accuracy', ([], {'name': '"""acc"""', 'target_name': '"""y_target"""', 'output_name': '"""y_pred"""'}), "(name='acc', target_name='y_target', output_name='y_pred')\n", (3844, 3902), False, 'from pyromancy.metrics import MetricGroup, Accuracy\n'), ((4004, 4083), 'pyromancy.losses.NegativeLogLikelihood', 'NegativeLogLikelihood', ([], {'name': '"""nll"""', 'target_name': '"""y_target"""', 'output_name': '"""y_pred"""'}), "(name='nll', target_name='y_target', output_name='y_pred')\n", (4025, 4083), False, 'from pyromancy.losses import LossGroup, NegativeLogLikelihood\n'), ((6000, 6025), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6023, 6025), False, 'import torch\n'), ((6086, 6119), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (6108, 6119), False, 'import torch\n'), ((2885, 2927), 'os.path.join', 'os.path.join', (['"""logs"""', 'args.experiment_name'], {}), "('logs', args.experiment_name)\n", (2897, 2927), False, 'import os\n'), ((4552, 4566), 'torch.autograd.Variable', 'Variable', (['data'], {}), '(data)\n', (4560, 4566), False, 'from torch.autograd import Variable\n'), ((4568, 4584), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (4576, 4584), False, 'from torch.autograd import Variable\n'), ((5449, 5478), 'torch.autograd.Variable', 'Variable', (['data'], {'volatile': '(True)'}), '(data, volatile=True)\n', (5457, 5478), False, 'from torch.autograd import Variable\n'), ((5480, 5496), 'torch.autograd.Variable', 'Variable', (['target'], {}), '(target)\n', (5488, 5496), False, 'from torch.autograd import Variable\n'), ((6408, 6429), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6427, 6429), False, 'from torchvision import datasets, transforms\n'), ((6470, 6512), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (6490, 6512), False, 'from torchvision import datasets, transforms\n'), ((6909, 6930), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6928, 6930), False, 'from torchvision import datasets, transforms\n'), ((6940, 6982), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.1307,)', '(0.3081,)'], {}), '((0.1307,), (0.3081,))\n', (6960, 6982), False, 'from torchvision import datasets, transforms\n')] |
import argparse
import copy
import logging
import sys
from dataclasses import dataclass
from datetime import datetime, timedelta
from slack_sdk import WebClient
from typing import Dict, Optional, List
import pytz
from hunter import config
from hunter.attributes import get_back_links
from hunter.config import ConfigError, Config
from hunter.data_selector import DataSelector
from hunter.grafana import GrafanaError, Grafana, Annotation
from hunter.graphite import GraphiteError
from hunter.importer import DataImportError, Importers
from hunter.report import Report
from hunter.series import (
AnalysisOptions,
ChangePointGroup,
SeriesComparison,
compare,
AnalyzedSeries,
)
from hunter.slack import SlackNotifier, NotificationError
from hunter.test_config import TestConfigError, TestConfig, GraphiteTestConfig
from hunter.util import parse_datetime, DateFormatError, interpolate
@dataclass
class HunterError(Exception):
message: str
class Hunter:
__conf: Config
__importers: Importers
__grafana: Optional[Grafana]
__slack: Optional[SlackNotifier]
def __init__(self, conf: Config):
self.__conf = conf
self.__importers = Importers(conf)
self.__grafana = None
self.__slack = self.__maybe_create_slack_notifier()
def list_tests(self, group_names: Optional[List[str]]):
if group_names is not None:
test_names = []
for group_name in group_names:
group = self.__conf.test_groups.get(group_name)
if group is None:
raise HunterError(f"Test group not found: {group_name}")
test_names += (t.name for t in group)
else:
test_names = self.__conf.tests
for test_name in sorted(test_names):
print(test_name)
def list_test_groups(self):
for group_name in sorted(self.__conf.test_groups):
print(group_name)
def get_test(self, test_name: str) -> TestConfig:
test = self.__conf.tests.get(test_name)
if test is None:
raise HunterError(f"Test not found {test_name}")
return test
def get_tests(self, *names: str) -> List[TestConfig]:
tests = []
for name in names:
group = self.__conf.test_groups.get(name)
if group is not None:
tests += group
else:
test = self.__conf.tests.get(name)
if test is not None:
tests.append(test)
else:
raise HunterError(f"Test or group not found: {name}")
return tests
def list_metrics(self, test: TestConfig):
importer = self.__importers.get(test)
for metric_name in importer.fetch_all_metric_names(test):
print(metric_name)
def analyze(
self, test: TestConfig, selector: DataSelector, options: AnalysisOptions
) -> AnalyzedSeries:
importer = self.__importers.get(test)
series = importer.fetch_data(test, selector)
analyzed_series = series.analyze(options)
change_points = analyzed_series.change_points_by_time
report = Report(series, change_points)
print(test.name + ":")
print(report.format_log_annotated())
return analyzed_series
def __get_grafana(self) -> Grafana:
if self.__grafana is None:
self.__grafana = Grafana(self.__conf.grafana)
return self.__grafana
def update_grafana_annotations(self, test: GraphiteTestConfig, series: AnalyzedSeries):
grafana = self.__get_grafana()
begin = datetime.fromtimestamp(series.time()[0], tz=pytz.UTC)
end = datetime.fromtimestamp(series.time()[len(series.time()) - 1], tz=pytz.UTC)
logging.info(f"Fetching Grafana annotations for test {test.name}...")
tags_to_query = ["hunter", "change-point", "test:" + test.name]
old_annotations_for_test = grafana.fetch_annotations(begin, end, list(tags_to_query))
logging.info(f"Found {len(old_annotations_for_test)} annotations")
created_count = 0
for metric_name, change_points in series.change_points.items():
path = test.get_path(series.branch_name(), metric_name)
metric_tag = f"metric:{metric_name}"
tags_to_create = (
tags_to_query
+ [metric_tag]
+ test.tags
+ test.annotate
+ test.metrics[metric_name].annotate
)
substitutions = {
"TEST_NAME": test.name,
"METRIC_NAME": metric_name,
"GRAPHITE_PATH": [path],
"GRAPHITE_PATH_COMPONENTS": path.split("."),
"GRAPHITE_PREFIX": [test.prefix],
"GRAPHITE_PREFIX_COMPONENTS": test.prefix.split("."),
}
tmp_tags_to_create = []
for t in tags_to_create:
tmp_tags_to_create += interpolate(t, substitutions)
tags_to_create = tmp_tags_to_create
old_annotations = [a for a in old_annotations_for_test if metric_tag in a.tags]
old_annotation_times = set((a.time for a in old_annotations if a.tags))
target_annotations = []
for cp in change_points:
attributes = series.attributes_at(cp.index)
annotation_text = get_back_links(attributes)
target_annotations.append(
Annotation(
id=None,
time=datetime.fromtimestamp(cp.time, tz=pytz.UTC),
text=annotation_text,
tags=tags_to_create,
)
)
target_annotation_times = set((a.time for a in target_annotations))
to_delete = [a for a in old_annotations if a.time not in target_annotation_times]
if to_delete:
logging.info(
f"Removing {len(to_delete)} annotations "
f"for test {test.name} and metric {metric_name}..."
)
grafana.delete_annotations(*(a.id for a in to_delete))
to_create = [a for a in target_annotations if a.time not in old_annotation_times]
if to_create:
logging.info(
f"Creating {len(to_create)} annotations "
f"for test {test.name} and metric {metric_name}..."
)
grafana.create_annotations(*to_create)
created_count += len(to_create)
if created_count == 0:
logging.info("All annotations up-to-date. No new annotations needed.")
else:
logging.info(f"Created {created_count} annotations.")
def remove_grafana_annotations(self, test: Optional[TestConfig], force: bool):
"""Removes all Hunter annotations (optionally for a given test) in Grafana"""
grafana = self.__get_grafana()
if test:
logging.info(f"Fetching Grafana annotations for test {test.name}...")
else:
logging.info(f"Fetching Grafana annotations...")
tags_to_query = {"hunter", "change-point"}
if test:
tags_to_query.add("test:" + test.name)
annotations = grafana.fetch_annotations(None, None, list(tags_to_query))
if not annotations:
logging.info("No annotations found.")
return
if not force:
print(
f"Are you sure to remove {len(annotations)} annotations from {grafana.url}? [y/N]"
)
decision = input().strip()
if decision.lower() != "y" and decision.lower() != "yes":
return
logging.info(f"Removing {len(annotations)} annotations...")
grafana.delete_annotations(*(a.id for a in annotations))
def regressions(
self, test: TestConfig, selector: DataSelector, options: AnalysisOptions
) -> bool:
importer = self.__importers.get(test)
# Even if user is interested only in performance difference since some point X,
# we really need to fetch some earlier points than X.
# Otherwise, if performance went down very early after X, e.g. at X + 1, we'd have
# insufficient number of data points to compute the baseline performance.
# Instead of using `since-` selector, we're fetching everything from the
# beginning and then we find the baseline performance around the time pointed by
# the original selector.
since_version = selector.since_version
since_commit = selector.since_commit
since_time = selector.since_time
baseline_selector = copy.deepcopy(selector)
baseline_selector.last_n_points = sys.maxsize
baseline_selector.branch = None
baseline_selector.since_version = None
baseline_selector.since_commit = None
baseline_selector.since_time = since_time - timedelta(days=30)
baseline_series = importer.fetch_data(test, baseline_selector)
if since_version:
baseline_index = baseline_series.find_by_attribute("version", since_version)
if not baseline_index:
raise HunterError(f"No runs of test {test.name} with version {since_version}")
baseline_index = max(baseline_index)
elif since_commit:
baseline_index = baseline_series.find_by_attribute("commit", since_commit)
if not baseline_index:
raise HunterError(f"No runs of test {test.name} with commit {since_commit}")
baseline_index = max(baseline_index)
else:
baseline_index = baseline_series.find_first_not_earlier_than(since_time)
baseline_series = baseline_series.analyze()
if selector.branch:
target_series = importer.fetch_data(test, selector).analyze()
else:
target_series = baseline_series
cmp = compare(baseline_series, baseline_index, target_series, target_series.len())
regressions = []
for metric_name, stats in cmp.stats.items():
direction = baseline_series.metric(metric_name).direction
m1 = stats.mean_1
m2 = stats.mean_2
change_percent = stats.forward_rel_change() * 100.0
if m2 * direction < m1 * direction and stats.pvalue < options.max_pvalue:
regressions.append(
" {:16}: {:#8.3g} --> {:#8.3g} ({:+6.1f}%)".format(
metric_name, m1, m2, change_percent
)
)
if regressions:
print(f"{test.name}:")
for r in regressions:
print(r)
else:
print(f"{test.name}: OK")
return len(regressions) > 0
def __maybe_create_slack_notifier(self):
if not self.__conf.slack:
return None
return SlackNotifier(WebClient(token=self.__conf.slack.bot_token))
def notify_slack(
self,
test_change_points: Dict[str, AnalyzedSeries],
selector: DataSelector,
channels: List[str],
since: datetime,
):
if not self.__slack:
logging.error(
"Slack definition is missing from the configuration, cannot send notification"
)
return
self.__slack.notify(test_change_points, selector=selector, channels=channels, since=since)
def validate(self):
valid = True
unique_metrics = set()
for name, test in self.__conf.tests.items():
logging.info("Checking {}".format(name))
test_metrics = test.fully_qualified_metric_names()
for test_metric in test_metrics:
if test_metric not in unique_metrics:
unique_metrics.add(test_metric)
else:
valid = False
logging.error(f"Found duplicated metric: {test_metric}")
try:
importer = self.__importers.get(test)
series = importer.fetch_data(test)
for metric, metric_data in series.data.items():
if not metric_data:
logging.warning(f"Test's metric does not have data: {name} {metric}")
except Exception as err:
logging.error(f"Invalid test definition: {name}\n{repr(err)}\n")
valid = False
logging.info(f"Validation finished: {'VALID' if valid else 'INVALID'}")
if not valid:
exit(1)
def setup_data_selector_parser(parser: argparse.ArgumentParser):
parser.add_argument(
"--branch", metavar="STRING", dest="branch", help="name of the branch", nargs="?"
)
parser.add_argument(
"--metrics",
metavar="LIST",
dest="metrics",
help="a comma-separated list of metrics to analyze",
)
parser.add_argument(
"--attrs",
metavar="LIST",
dest="attributes",
help="a comma-separated list of attribute names associated with the runs "
"(e.g. commit, branch, version); "
"if not specified, it will be automatically filled based on available information",
)
since_group = parser.add_mutually_exclusive_group()
since_group.add_argument(
"--since-commit",
metavar="STRING",
dest="since_commit",
help="the commit at the start of the time span to analyze",
)
since_group.add_argument(
"--since-version",
metavar="STRING",
dest="since_version",
help="the version at the start of the time span to analyze",
)
since_group.add_argument(
"--since",
metavar="DATE",
dest="since_time",
help="the start of the time span to analyze; "
"accepts ISO, and human-readable dates like '10 weeks ago'",
)
until_group = parser.add_mutually_exclusive_group()
until_group.add_argument(
"--until-commit",
metavar="STRING",
dest="until_commit",
help="the commit at the end of the time span to analyze",
)
until_group.add_argument(
"--until-version",
metavar="STRING",
dest="until_version",
help="the version at the end of the time span to analyze",
)
until_group.add_argument(
"--until",
metavar="DATE",
dest="until_time",
help="the end of the time span to analyze; same syntax as --since",
)
parser.add_argument(
"--last",
type=int,
metavar="COUNT",
dest="last_n_points",
help="the number of data points to take from the end of the series"
)
def data_selector_from_args(args: argparse.Namespace) -> DataSelector:
data_selector = DataSelector()
if args.branch:
data_selector.branch = args.branch
if args.metrics is not None:
data_selector.metrics = list(args.metrics.split(","))
if args.attributes is not None:
data_selector.attributes = list(args.attributes.split(","))
if args.since_commit is not None:
data_selector.since_commit = args.since_commit
if args.since_version is not None:
data_selector.since_version = args.since_version
if args.since_time is not None:
data_selector.since_time = parse_datetime(args.since_time)
if args.until_commit is not None:
data_selector.until_commit = args.until_commit
if args.until_version is not None:
data_selector.until_version = args.until_version
if args.until_time is not None:
data_selector.until_time = parse_datetime(args.until_time)
if args.last_n_points is not None:
data_selector.last_n_points = args.last_n_points
return data_selector
def setup_analysis_options_parser(parser: argparse.ArgumentParser):
parser.add_argument(
"-P, --p-value",
dest="pvalue",
type=float,
default=0.001,
help="maximum accepted P-value of a change-point; "
"P denotes the probability that the change-point has "
"been found by a random coincidence, rather than a real "
"difference between the data distributions",
)
parser.add_argument(
"-M",
"--magnitude",
dest="magnitude",
type=float,
default=0.0,
help="minimum accepted magnitude of a change-point "
"computed as abs(new_mean / old_mean - 1.0); use it "
"to filter out stupidly small changes like < 0.01",
)
parser.add_argument(
"--window",
default=50,
type=int,
dest="window",
help="the number of data points analyzed at once; "
"the window size affects the discriminative "
"power of the change point detection algorithm; "
"large windows are less susceptible to noise; "
"however, a very large window may cause dismissing short regressions "
"as noise so it is best to keep it short enough to include not more "
"than a few change points (optimally at most 1)",
)
def analysis_options_from_args(args: argparse.Namespace) -> AnalysisOptions:
conf = AnalysisOptions()
if args.pvalue is not None:
conf.max_pvalue = args.pvalue
if args.magnitude is not None:
conf.min_magnitude = args.magnitude
if args.window is not None:
conf.window_len = args.window
return conf
def main():
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
parser = argparse.ArgumentParser(description="Hunts performance regressions in Fallout results")
subparsers = parser.add_subparsers(dest="command")
list_tests_parser = subparsers.add_parser("list-tests", help="list available tests")
list_tests_parser.add_argument("group", help="name of the group of the tests", nargs="*")
list_metrics_parser = subparsers.add_parser(
"list-metrics", help="list available metrics for a test"
)
list_metrics_parser.add_argument("test", help="name of the test")
subparsers.add_parser("list-groups", help="list available groups of tests")
analyze_parser = subparsers.add_parser(
"analyze",
help="analyze performance test results",
formatter_class=argparse.RawTextHelpFormatter,
)
analyze_parser.add_argument("tests", help="name of the test or group of the tests", nargs="+")
analyze_parser.add_argument(
"--update-grafana",
help="Update Grafana dashboards with appropriate annotations of change points",
action="store_true",
)
analyze_parser.add_argument(
"--notify-slack",
help="Send notification containing a summary of change points to given Slack channels",
nargs="+",
)
analyze_parser.add_argument(
"--cph-report-since",
help="Sets a limit on the date range of the Change Point History reported to Slack. Same syntax as --since.",
metavar="DATE",
dest="cph_report_since",
)
setup_data_selector_parser(analyze_parser)
setup_analysis_options_parser(analyze_parser)
regressions_parser = subparsers.add_parser("regressions", help="find performance regressions")
regressions_parser.add_argument(
"tests", help="name of the test or group of the tests", nargs="+"
)
setup_data_selector_parser(regressions_parser)
setup_analysis_options_parser(regressions_parser)
remove_annotations_parser = subparsers.add_parser("remove-annotations")
remove_annotations_parser.add_argument(
"tests", help="name of the test or test group", nargs="*"
)
remove_annotations_parser.add_argument(
"--force", help="don't ask questions, just do it", dest="force", action="store_true"
)
validate_parser = subparsers.add_parser("validate",
help="validates the tests and metrics defined in the configuration")
try:
args = parser.parse_args()
conf = config.load_config()
hunter = Hunter(conf)
if args.command == "list-groups":
hunter.list_test_groups()
if args.command == "list-tests":
group_names = args.group if args.group else None
hunter.list_tests(group_names)
if args.command == "list-metrics":
test = hunter.get_test(args.test)
hunter.list_metrics(test)
if args.command == "analyze":
update_grafana_flag = args.update_grafana
slack_notification_channels = args.notify_slack
slack_cph_since = parse_datetime(args.cph_report_since)
data_selector = data_selector_from_args(args)
options = analysis_options_from_args(args)
tests = hunter.get_tests(*args.tests)
tests_analyzed_series = {test.name: None for test in tests}
for test in tests:
try:
analyzed_series = hunter.analyze(test, selector=data_selector, options=options)
if update_grafana_flag:
if not isinstance(test, GraphiteTestConfig):
raise GrafanaError(f"Not a Graphite test")
hunter.update_grafana_annotations(test, analyzed_series)
if slack_notification_channels:
tests_analyzed_series[test.name] = analyzed_series
except DataImportError as err:
logging.error(err.message)
except GrafanaError as err:
logging.error(
f"Failed to update grafana dashboards for {test.name}: {err.message}"
)
if slack_notification_channels:
hunter.notify_slack(
tests_analyzed_series,
selector=data_selector,
channels=slack_notification_channels,
since=slack_cph_since,
)
if args.command == "regressions":
data_selector = data_selector_from_args(args)
options = analysis_options_from_args(args)
tests = hunter.get_tests(*args.tests)
regressing_test_count = 0
errors = 0
for test in tests:
try:
regressions = hunter.regressions(
test, selector=data_selector, options=options
)
if regressions:
regressing_test_count += 1
except HunterError as err:
logging.error(err.message)
errors += 1
except DataImportError as err:
logging.error(err.message)
errors += 1
if regressing_test_count == 0:
print("No regressions found!")
elif regressing_test_count == 1:
print("Regressions in 1 test found")
else:
print(f"Regressions in {regressing_test_count} tests found")
if errors > 0:
print(f"Some tests were skipped due to import / analyze errors. Consult error log.")
if args.command == "remove-annotations":
if args.tests:
tests = hunter.get_tests(*args.tests)
for test in tests:
hunter.remove_grafana_annotations(test, args.force)
else:
hunter.remove_grafana_annotations(None, args.force)
if args.command == "validate":
hunter.validate()
if args.command is None:
parser.print_usage()
except ConfigError as err:
logging.error(err.message)
exit(1)
except TestConfigError as err:
logging.error(err.message)
exit(1)
except GraphiteError as err:
logging.error(err.message)
exit(1)
except GrafanaError as err:
logging.error(err.message)
exit(1)
except DataImportError as err:
logging.error(err.message)
exit(1)
except HunterError as err:
logging.error(err.message)
exit(1)
except DateFormatError as err:
logging.error(err.message)
exit(1)
except NotificationError as err:
logging.error(err.message)
exit(1)
if __name__ == "__main__":
main()
| [
"slack_sdk.WebClient",
"hunter.attributes.get_back_links",
"hunter.util.interpolate",
"copy.deepcopy",
"hunter.grafana.GrafanaError",
"datetime.timedelta",
"logging.error",
"logging.info",
"hunter.report.Report",
"argparse.ArgumentParser",
"hunter.util.parse_datetime",
"logging.warning",
"hunter.series.AnalysisOptions",
"hunter.grafana.Grafana",
"hunter.importer.Importers",
"logging.basicConfig",
"datetime.datetime.fromtimestamp",
"hunter.data_selector.DataSelector",
"hunter.config.load_config"
]
| [((14875, 14889), 'hunter.data_selector.DataSelector', 'DataSelector', ([], {}), '()\n', (14887, 14889), False, 'from hunter.data_selector import DataSelector\n'), ((17254, 17271), 'hunter.series.AnalysisOptions', 'AnalysisOptions', ([], {}), '()\n', (17269, 17271), False, 'from hunter.series import AnalysisOptions, ChangePointGroup, SeriesComparison, compare, AnalyzedSeries\n'), ((17525, 17601), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s: %(message)s"""', 'level': 'logging.INFO'}), "(format='%(levelname)s: %(message)s', level=logging.INFO)\n", (17544, 17601), False, 'import logging\n'), ((17616, 17708), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Hunts performance regressions in Fallout results"""'}), "(description=\n 'Hunts performance regressions in Fallout results')\n", (17639, 17708), False, 'import argparse\n'), ((1189, 1204), 'hunter.importer.Importers', 'Importers', (['conf'], {}), '(conf)\n', (1198, 1204), False, 'from hunter.importer import DataImportError, Importers\n'), ((3183, 3212), 'hunter.report.Report', 'Report', (['series', 'change_points'], {}), '(series, change_points)\n', (3189, 3212), False, 'from hunter.report import Report\n'), ((3784, 3853), 'logging.info', 'logging.info', (['f"""Fetching Grafana annotations for test {test.name}..."""'], {}), "(f'Fetching Grafana annotations for test {test.name}...')\n", (3796, 3853), False, 'import logging\n'), ((8763, 8786), 'copy.deepcopy', 'copy.deepcopy', (['selector'], {}), '(selector)\n', (8776, 8786), False, 'import copy\n'), ((12536, 12607), 'logging.info', 'logging.info', (['f"""Validation finished: {\'VALID\' if valid else \'INVALID\'}"""'], {}), '(f"Validation finished: {\'VALID\' if valid else \'INVALID\'}")\n', (12548, 12607), False, 'import logging\n'), ((15412, 15443), 'hunter.util.parse_datetime', 'parse_datetime', (['args.since_time'], {}), '(args.since_time)\n', (15426, 15443), False, 'from hunter.util import parse_datetime, DateFormatError, interpolate\n'), ((15704, 15735), 'hunter.util.parse_datetime', 'parse_datetime', (['args.until_time'], {}), '(args.until_time)\n', (15718, 15735), False, 'from hunter.util import parse_datetime, DateFormatError, interpolate\n'), ((20081, 20101), 'hunter.config.load_config', 'config.load_config', ([], {}), '()\n', (20099, 20101), False, 'from hunter import config\n'), ((3425, 3453), 'hunter.grafana.Grafana', 'Grafana', (['self.__conf.grafana'], {}), '(self.__conf.grafana)\n', (3432, 3453), False, 'from hunter.grafana import GrafanaError, Grafana, Annotation\n'), ((6661, 6731), 'logging.info', 'logging.info', (['"""All annotations up-to-date. No new annotations needed."""'], {}), "('All annotations up-to-date. No new annotations needed.')\n", (6673, 6731), False, 'import logging\n'), ((6758, 6811), 'logging.info', 'logging.info', (['f"""Created {created_count} annotations."""'], {}), "(f'Created {created_count} annotations.')\n", (6770, 6811), False, 'import logging\n'), ((7050, 7119), 'logging.info', 'logging.info', (['f"""Fetching Grafana annotations for test {test.name}..."""'], {}), "(f'Fetching Grafana annotations for test {test.name}...')\n", (7062, 7119), False, 'import logging\n'), ((7146, 7194), 'logging.info', 'logging.info', (['f"""Fetching Grafana annotations..."""'], {}), "(f'Fetching Grafana annotations...')\n", (7158, 7194), False, 'import logging\n'), ((7435, 7472), 'logging.info', 'logging.info', (['"""No annotations found."""'], {}), "('No annotations found.')\n", (7447, 7472), False, 'import logging\n'), ((9026, 9044), 'datetime.timedelta', 'timedelta', ([], {'days': '(30)'}), '(days=30)\n', (9035, 9044), False, 'from datetime import datetime, timedelta\n'), ((11016, 11060), 'slack_sdk.WebClient', 'WebClient', ([], {'token': 'self.__conf.slack.bot_token'}), '(token=self.__conf.slack.bot_token)\n', (11025, 11060), False, 'from slack_sdk import WebClient\n'), ((11288, 11391), 'logging.error', 'logging.error', (['"""Slack definition is missing from the configuration, cannot send notification"""'], {}), "(\n 'Slack definition is missing from the configuration, cannot send notification'\n )\n", (11301, 11391), False, 'import logging\n'), ((20670, 20707), 'hunter.util.parse_datetime', 'parse_datetime', (['args.cph_report_since'], {}), '(args.cph_report_since)\n', (20684, 20707), False, 'from hunter.util import parse_datetime, DateFormatError, interpolate\n'), ((23775, 23801), 'logging.error', 'logging.error', (['err.message'], {}), '(err.message)\n', (23788, 23801), False, 'import logging\n'), ((23861, 23887), 'logging.error', 'logging.error', (['err.message'], {}), '(err.message)\n', (23874, 23887), False, 'import logging\n'), ((23945, 23971), 'logging.error', 'logging.error', (['err.message'], {}), '(err.message)\n', (23958, 23971), False, 'import logging\n'), ((24028, 24054), 'logging.error', 'logging.error', (['err.message'], {}), '(err.message)\n', (24041, 24054), False, 'import logging\n'), ((24114, 24140), 'logging.error', 'logging.error', (['err.message'], {}), '(err.message)\n', (24127, 24140), False, 'import logging\n'), ((24196, 24222), 'logging.error', 'logging.error', (['err.message'], {}), '(err.message)\n', (24209, 24222), False, 'import logging\n'), ((24282, 24308), 'logging.error', 'logging.error', (['err.message'], {}), '(err.message)\n', (24295, 24308), False, 'import logging\n'), ((24370, 24396), 'logging.error', 'logging.error', (['err.message'], {}), '(err.message)\n', (24383, 24396), False, 'import logging\n'), ((4993, 5022), 'hunter.util.interpolate', 'interpolate', (['t', 'substitutions'], {}), '(t, substitutions)\n', (5004, 5022), False, 'from hunter.util import parse_datetime, DateFormatError, interpolate\n'), ((5416, 5442), 'hunter.attributes.get_back_links', 'get_back_links', (['attributes'], {}), '(attributes)\n', (5430, 5442), False, 'from hunter.attributes import get_back_links\n'), ((12003, 12059), 'logging.error', 'logging.error', (['f"""Found duplicated metric: {test_metric}"""'], {}), "(f'Found duplicated metric: {test_metric}')\n", (12016, 12059), False, 'import logging\n'), ((12310, 12379), 'logging.warning', 'logging.warning', (['f"""Test\'s metric does not have data: {name} {metric}"""'], {}), '(f"Test\'s metric does not have data: {name} {metric}")\n', (12325, 12379), False, 'import logging\n'), ((21554, 21580), 'logging.error', 'logging.error', (['err.message'], {}), '(err.message)\n', (21567, 21580), False, 'import logging\n'), ((21645, 21734), 'logging.error', 'logging.error', (['f"""Failed to update grafana dashboards for {test.name}: {err.message}"""'], {}), "(\n f'Failed to update grafana dashboards for {test.name}: {err.message}')\n", (21658, 21734), False, 'import logging\n'), ((22678, 22704), 'logging.error', 'logging.error', (['err.message'], {}), '(err.message)\n', (22691, 22704), False, 'import logging\n'), ((22804, 22830), 'logging.error', 'logging.error', (['err.message'], {}), '(err.message)\n', (22817, 22830), False, 'import logging\n'), ((5580, 5624), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['cp.time'], {'tz': 'pytz.UTC'}), '(cp.time, tz=pytz.UTC)\n', (5602, 5624), False, 'from datetime import datetime, timedelta\n'), ((21242, 21278), 'hunter.grafana.GrafanaError', 'GrafanaError', (['f"""Not a Graphite test"""'], {}), "(f'Not a Graphite test')\n", (21254, 21278), False, 'from hunter.grafana import GrafanaError, Grafana, Annotation\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
import os
import sys
import shutil
# Check these extensions were installed.
import sphinx_gallery.gen_gallery
# The package should be installed in a virtual environment.
import onnxruntime
# The documentation requires two extensions available at:
# https://github.com/xadupre/sphinx-docfx-yaml
# https://github.com/xadupre/sphinx-docfx-markdown
import sphinx_modern_theme
# -- Project information -----------------------------------------------------
project = 'ONNX Runtime'
copyright = '2018, Microsoft'
author = 'Microsoft'
version = onnxruntime.__version__
release = version
# -- General configuration ---------------------------------------------------
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.imgmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
"sphinx.ext.autodoc",
'sphinx.ext.githubpages',
"sphinx_gallery.gen_gallery",
'sphinx.ext.autodoc',
"docfx_yaml.extension",
"docfx_markdown",
"pyquickhelper.sphinxext.sphinx_runpython_extension",
]
templates_path = ['_templates']
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
source_suffix = ['.rst', '.md']
master_doc = 'intro'
language = "en"
exclude_patterns = []
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
html_theme = "sphinx_modern_theme"
html_theme_path = [sphinx_modern_theme.get_html_theme_path()]
html_logo = "../MSFT-Onnx-Runtime-11282019-Logo.png"
html_static_path = ['_static']
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for Sphinx Gallery ----------------------------------------------
sphinx_gallery_conf = {
'examples_dirs': 'examples',
'gallery_dirs': 'auto_examples',
}
# -- markdown options -----------------------------------------------------------
md_image_dest = "media"
md_link_replace = {
'#onnxruntimesessionoptionsenable-profiling)': '#class-onnxruntimesessionoptions)',
}
# -- Setup actions -----------------------------------------------------------
def setup(app):
# Placeholder to initialize the folder before
# generating the documentation.
app.add_stylesheet('_static/gallery.css')
# download examples for the documentation
this = os.path.abspath(os.path.dirname(__file__))
dest = os.path.join(this, "model.onnx")
if not os.path.exists(dest):
import urllib.request
url = 'https://raw.githubusercontent.com/onnx/onnx/master/onnx/backend/test/data/node/test_sigmoid/model.onnx'
urllib.request.urlretrieve(url, dest)
loc = os.path.split(dest)[-1]
if not os.path.exists(loc):
import shutil
shutil.copy(dest, loc)
return app
| [
"os.path.exists",
"os.path.join",
"os.path.split",
"os.path.dirname",
"shutil.copy",
"sphinx_modern_theme.get_html_theme_path"
]
| [((1554, 1595), 'sphinx_modern_theme.get_html_theme_path', 'sphinx_modern_theme.get_html_theme_path', ([], {}), '()\n', (1593, 1595), False, 'import sphinx_modern_theme\n'), ((2640, 2672), 'os.path.join', 'os.path.join', (['this', '"""model.onnx"""'], {}), "(this, 'model.onnx')\n", (2652, 2672), False, 'import os\n'), ((2602, 2627), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (2617, 2627), False, 'import os\n'), ((2684, 2704), 'os.path.exists', 'os.path.exists', (['dest'], {}), '(dest)\n', (2698, 2704), False, 'import os\n'), ((2911, 2930), 'os.path.split', 'os.path.split', (['dest'], {}), '(dest)\n', (2924, 2930), False, 'import os\n'), ((2946, 2965), 'os.path.exists', 'os.path.exists', (['loc'], {}), '(loc)\n', (2960, 2965), False, 'import os\n'), ((2997, 3019), 'shutil.copy', 'shutil.copy', (['dest', 'loc'], {}), '(dest, loc)\n', (3008, 3019), False, 'import shutil\n')] |
"""Traffic simulator code."""
import sys
from os import path
from traffic_sim.analysis import TrafficExperiment
from traffic_sim.console import console
if not __package__:
_path = path.realpath(path.abspath(__file__))
sys.path.insert(0, path.dirname(path.dirname(_path)))
def main():
"""Run code from CLI."""
console.log('traffic sim')
num_trials = 30
ex = TrafficExperiment(
experiments=100,
trials=num_trials,
rows=10,
cols=10,
epochs=10,
)
ex.run()
ex.analyze()
if __name__ == '__main__':
main()
| [
"traffic_sim.analysis.TrafficExperiment",
"os.path.dirname",
"os.path.abspath",
"traffic_sim.console.console.log"
]
| [((330, 356), 'traffic_sim.console.console.log', 'console.log', (['"""traffic sim"""'], {}), "('traffic sim')\n", (341, 356), False, 'from traffic_sim.console import console\n'), ((386, 472), 'traffic_sim.analysis.TrafficExperiment', 'TrafficExperiment', ([], {'experiments': '(100)', 'trials': 'num_trials', 'rows': '(10)', 'cols': '(10)', 'epochs': '(10)'}), '(experiments=100, trials=num_trials, rows=10, cols=10,\n epochs=10)\n', (403, 472), False, 'from traffic_sim.analysis import TrafficExperiment\n'), ((201, 223), 'os.path.abspath', 'path.abspath', (['__file__'], {}), '(__file__)\n', (213, 223), False, 'from os import path\n'), ((261, 280), 'os.path.dirname', 'path.dirname', (['_path'], {}), '(_path)\n', (273, 280), False, 'from os import path\n')] |
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import division,print_function,absolute_import,unicode_literals
import sys
import os
os.chdir(sys.path[0])
sys.path.append('/mnt/sda2/github/TSF1KEV/TSFpy')
from TSF_io import *
#from TSF_Forth import *
from TSF_shuffle import *
from TSF_match import *
from TSF_calc import *
from TSF_time import *
TSF_Forth_init(TSF_io_argvs(),[TSF_shuffle_Initwords,TSF_match_Initwords,TSF_calc_Initwords,TSF_time_Initwords])
TSF_Forth_setTSF("TSF_Tab-Separated-Forth:",
"\t".join(["UTF-8","#TSF_encoding","200","#TSF_calcPR","N-Fibonacci:","#TSF_this","0","#TSF_fin."]),
TSF_style="T")
TSF_Forth_setTSF("N-Fibonacci:",
"\t".join(["TSF_argvs:","#TSF_cloneargvs","TSF_argvs:","#TSF_lenthe","[0]Z[Fibcount:0]~[TSF_argvs:0]","#TSF_calcDC","Fibcount:","0","#TSF_pokethe","Fibonacci:","#TSF_this"]),
TSF_style="T")
TSF_Forth_setTSF("Fibonacci:",
"\t".join(["[Fibcount:1]Z1~[Fibcount:1]","#TSF_calcDC","((2&(([0]+3)*[0]+2)^)/((2&(2*[0]+2)^)-(2&([0]+1)^)-1)\\1)#(2&([0]+1)^)","#TSF_calcDC","1","#TSF_echoN","[Fibcount:1]+1","#TSF_calcDC","Fibcount:","1","#TSF_pokethe","Fibjump:","[Fibcount:0]-([Fibcount:1]+1)o0~1","#TSF_calcDC","#TSF_peekthe","#TSF_this"]),
TSF_style="T")
TSF_Forth_setTSF("Fibcount:",
"\t".join(["20","-1"]),
TSF_style="T")
TSF_Forth_setTSF("Fibjump:",
"\t".join(["Fibonacci:","#exit"]),
TSF_style="T")
TSF_Forth_addfin(TSF_io_argvs())
TSF_Forth_argvsleftcut(TSF_io_argvs(),1)
TSF_Forth_run()
| [
"os.chdir",
"sys.path.append"
]
| [((149, 170), 'os.chdir', 'os.chdir', (['sys.path[0]'], {}), '(sys.path[0])\n', (157, 170), False, 'import os\n'), ((171, 220), 'sys.path.append', 'sys.path.append', (['"""/mnt/sda2/github/TSF1KEV/TSFpy"""'], {}), "('/mnt/sda2/github/TSF1KEV/TSFpy')\n", (186, 220), False, 'import sys\n')] |
from pyatool import PYAToolkit
# 个性化的函数需要toolkit形参,即使不需要使用
def test_b(toolkit):
return 'i am test_b, running on {}'.format(toolkit.device_id)
# 封装adb命令成为方法
PYAToolkit.bind_cmd(func_name='test_a', command='shell pm list package | grep google')
# 或者绑定个性化的函数
PYAToolkit.bind_func(real_func=test_b)
# 是否需要log
PYAToolkit.switch_logger(True)
# 初始化
d = PYAToolkit('123456F')
assert d.is_connected()
# 它也支持远程控制(还不够稳定,暂不推荐
# d = PYAToolkit('123456F', mode='remote')
# 已经绑定的方法直接调用即可
result = d.test_a()
# 可能的输出
# package:com.google.android.webview
# 个性化函数也一样
result = d.test_b()
# i am test_b, running on 123456F
# 也可以通过 `std` 或 `standard_func` 调用(会有代码自动补全,比较方便)
# 仅限标准库,自己拓展的库只支持直接调用
d.std.get_current_activity(toolkit=d)
# 获取所有已经注册的函数
all_functions = d.current_function()
print(all_functions)
# 下面列举所有标准函数的使用方法,有任何问题欢迎反馈或自己改
# 打印出机器id,仅供测试用
d.hello_world()
# 展示所有已安装的包
installed_package = d.show_package()
# 栈顶活动名
current_activity_name = d.get_current_activity()
# 安装指定apk(支持url与path),例子里的安装可能比较久因为是从github下的,可以自己改
d.install_from(url=r'https://github.com/williamfzc/simhand2/releases/download/v0.1.2/app-debug.apk')
# d.install_from(path=r'/Users/admin/some_path/some_apk.apk')
# 检测包是否已安装
target_package_name = 'com.github.williamfzc.simhand2'
is_installed = d.is_installed(package_name=target_package_name)
# 清理缓存
d.clean_cache(target_package_name)
if is_installed:
d.uninstall(target_package_name)
# 获取手机ip
local_address = d.get_ip_address()
print(local_address)
# 切换wifi状态
d.switch_wifi(False)
# 切换飞行模式
d.switch_airplane(True)
d.switch_airplane(False)
d.switch_wifi(True)
# 切换输入法
d.set_ime('com.sohu.inputmethod.sogouoem/.SogouIME')
# push and pull
d.push('./README.md', '/sdcard/')
d.pull('/sdcard/README.md', './haha.md')
# send keyevent
d.input_key_event(26)
d.input_key_event(26)
# swipe
d.swipe(500, 1200, 500, 200)
# click
d.click(200, 200)
| [
"pyatool.PYAToolkit.bind_func",
"pyatool.PYAToolkit",
"pyatool.PYAToolkit.switch_logger",
"pyatool.PYAToolkit.bind_cmd"
]
| [((164, 255), 'pyatool.PYAToolkit.bind_cmd', 'PYAToolkit.bind_cmd', ([], {'func_name': '"""test_a"""', 'command': '"""shell pm list package | grep google"""'}), "(func_name='test_a', command=\n 'shell pm list package | grep google')\n", (183, 255), False, 'from pyatool import PYAToolkit\n'), ((264, 302), 'pyatool.PYAToolkit.bind_func', 'PYAToolkit.bind_func', ([], {'real_func': 'test_b'}), '(real_func=test_b)\n', (284, 302), False, 'from pyatool import PYAToolkit\n'), ((314, 344), 'pyatool.PYAToolkit.switch_logger', 'PYAToolkit.switch_logger', (['(True)'], {}), '(True)\n', (338, 344), False, 'from pyatool import PYAToolkit\n'), ((356, 377), 'pyatool.PYAToolkit', 'PYAToolkit', (['"""123456F"""'], {}), "('123456F')\n", (366, 377), False, 'from pyatool import PYAToolkit\n')] |
import random
import argparse
import numpy as np
import pandas as pd
import os
import time
import string
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from tqdm import tqdm
from model import WideResnet
from cifar import get_train_loader, get_val_loader
from label_guessor import LabelGuessor
from lr_scheduler import WarmupCosineLrScheduler
from ema import EMA
import utils
## args
parser = argparse.ArgumentParser(description=' FixMatch Training')
parser.add_argument('--wresnet-k', default=2, type=int, help='width factor of wide resnet')
parser.add_argument('--wresnet-n', default=28, type=int, help='depth of wide resnet')
parser.add_argument('--n-classes', type=int, default=10, help='number of classes in dataset')
parser.add_argument('--n-labeled', type=int, default=10, help='number of labeled samples for training')
parser.add_argument('--n-epochs', type=int, default=256, help='number of training epochs')
parser.add_argument('--batchsize', type=int, default=64, help='train batch size of labeled samples')
parser.add_argument('--mu', type=int, default=7, help='factor of train batch size of unlabeled samples')
parser.add_argument('--mu-c', type=int, default=1, help='factor of train batch size of contrastive learing samples')
parser.add_argument('--thr', type=float, default=0.95, help='pseudo label threshold')
parser.add_argument('--n-imgs-per-epoch', type=int, default=50000, help='number of training images for each epoch')
parser.add_argument('--lam-x', type=float, default=1., help='coefficient of labeled loss')
parser.add_argument('--lam-u', type=float, default=1., help='coefficient of unlabeled loss')
parser.add_argument('--lam-clr', type=float, default=1., help='coefficient of contrastive loss')
parser.add_argument('--ema-alpha', type=float, default=0.999, help='decay rate for ema module')
parser.add_argument('--lr', type=float, default=0.03, help='learning rate for training')
parser.add_argument('--weight-decay', type=float, default=5e-4, help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum for optimizer')
parser.add_argument('--seed', type=int, default=-1, help='seed for random behaviors, no seed if negtive')
parser.add_argument('--feature_dim', default=128, type=int, help='Feature dim for latent vector')
parser.add_argument('--temperature', default=0.5, type=float, help='Temperature used in softmax')
parser.add_argument('--k', default=200, type=int, help='Top k most similar images used to predict the label')
parser.add_argument('--test', default=0, type=int, help='0 is softmax test function, 1 is similarity test function')
parser.add_argument('--bootstrap', type=int, default=16, help='Bootstrapping factor (default=16)')
parser.add_argument('--boot-schedule', type=int, default=1, help='Bootstrapping schedule (default=1)')
parser.add_argument('--balance', type=int, default=0, help='Balance class methods to use (default=0 None)')
parser.add_argument('--delT', type=float, default=0.2, help='Class balance threshold delta (default=0.2)')
args = parser.parse_args()
print(args)
# save results
save_name_pre = '{}_E{}_B{}_LX{}_LU{}_LCLR{}_THR{}_LR{}_WD{}'.format(args.n_labeled, args.n_epochs, args.batchsize,
args.lam_x, args.lam_u, args.lam_clr, args.thr, args.lr, args.weight_decay)
ticks = time.time()
result_dir = 'results/' + save_name_pre + '.' + str(ticks)
if not os.path.exists(result_dir):
os.mkdir(result_dir)
def set_model():
model = WideResnet(args.n_classes, k=args.wresnet_k, n=args.wresnet_n, feature_dim=args.feature_dim) # wresnet-28-2
model.train()
model.cuda()
criteria_x = nn.CrossEntropyLoss().cuda()
criteria_u = nn.CrossEntropyLoss().cuda()
return model, criteria_x, criteria_u
def train_one_epoch(
model,
criteria_x,
criteria_u,
optim,
lr_schdlr,
ema,
dltrain_x,
dltrain_u,
dltrain_all,
lb_guessor,
):
loss_avg, loss_x_avg, loss_u_avg, loss_clr_avg = [], [], [], []
epsilon = 0.000001
dl_u, dl_all = iter(dltrain_u), iter(dltrain_all)
for _, _, ims_all_1, ims_all_2, _ in tqdm(dl_all, desc='Training ...'):
ims_u_weak, ims_u_strong, _, _, lbs_u = next(dl_u)
loss_x, loss_u, loss_clr = torch.tensor(0).cuda(), torch.tensor(0).cuda(), torch.tensor(0).cuda()
fv_1, fv_2 = torch.tensor(0).cuda(), torch.tensor(0).cuda()
ims_u_weak = ims_u_weak.cuda()
ims_u_strong = ims_u_strong.cuda()
ims_all_1 = ims_all_1.cuda(non_blocking=True)
ims_all_2 = ims_all_2.cuda(non_blocking=True)
dl_x = iter(dltrain_x)
ims_x_weak, _, _, _, lbs_x = next(dl_x)
ims_x_weak = ims_x_weak.cuda()
lbs_x = lbs_x.cuda()
n_x, n_u, n_all = 0, 0, 0
if args.lam_u >= epsilon and args.lam_clr >= epsilon: #pseudo-labeling and Contrasive learning
lbs_u, valid_u, mask_u = lb_guessor(model, ims_u_weak, args.balance, args.delT)
ims_u_strong = ims_u_strong[valid_u]
n_x, n_u, n_all = ims_x_weak.size(0), ims_u_strong.size(0), ims_all_1.size(0)
if n_u != 0:
ims_x_u_all_1 = torch.cat([ims_x_weak, ims_u_strong, ims_all_1], dim=0).detach()
ims_x_u_all_2 = torch.cat([ims_x_weak, ims_u_strong, ims_all_2], dim=0).detach()
logits_x_u_all_1, fv_1, z_1 = model(ims_x_u_all_1)
logits_x_u_all_2, fv_2, z_2 = model(ims_x_u_all_2)
logits_x_u_all = (logits_x_u_all_1 + logits_x_u_all_2) / 2
logits_x, logits_u = logits_x_u_all[:n_x], logits_x_u_all[n_x:(n_x + n_u)]
loss_x = criteria_x(logits_x, lbs_x)
if args.balance == 2 or args.balance == 3:
loss_u = (F.cross_entropy(logits_u, lbs_u, reduction='none') * mask_u).mean()
else:
loss_u = criteria_u(logits_u, lbs_u)
else: # n_u == 0
ims_x_all_1 = torch.cat([ims_x_weak, ims_all_1], dim=0).detach()
ims_x_all_2 = torch.cat([ims_x_weak, ims_all_2], dim=0).detach()
logits_x_all_1, fv_1, z_1 = model(ims_x_all_1)
logits_x_all_2, fv_2, z_2 = model(ims_x_all_2)
logits_x_all = (logits_x_all_1 + logits_x_all_2) / 2
logits_x = logits_x_all[:n_x]
loss_x = criteria_x(logits_x, lbs_x)
loss_u = torch.tensor(0)
elif args.lam_u >= epsilon: #lam_clr == 0: pseudo-labeling only
lbs_u, valid_u, mask_u = lb_guessor(model, ims_u_weak, args.balance, args.delT)
ims_u_strong = ims_u_strong[valid_u]
n_x, n_u = ims_x_weak.size(0), ims_u_strong.size(0)
if n_u != 0:
ims_x_u = torch.cat([ims_x_weak, ims_u_strong], dim=0).detach()
logits_x_u, _, _ = model(ims_x_u)
logits_x, logits_u = logits_x_u[:n_x], logits_x_u[n_x:]
loss_x = criteria_x(logits_x, lbs_x)
if args.balance == 2 or args.balance == 3:
loss_u = (F.cross_entropy(logits_u, lbs_u, reduction='none') * mask_u).mean()
else:
loss_u = criteria_u(logits_u, lbs_u)
else: # n_u == 0
logits_x, _, _ = model(ims_x_weak)
loss_x = criteria_x(logits_x, lbs_x)
loss_u = torch.tensor(0)
else: #lam_u == 0: contrastive learning only
n_x, n_all = ims_x_weak.size(0), ims_all_1.size(0)
ims_x_all_1 = torch.cat([ims_x_weak, ims_all_1], dim=0).detach()
ims_x_all_2 = torch.cat([ims_x_weak, ims_all_2], dim=0).detach()
logits_x_all_1, fv_1, z_1 = model(ims_x_all_1)
logits_x_all_2, fv_2, z_2 = model(ims_x_all_2)
logits_x_all = (logits_x_all_1 + logits_x_all_2) / 2
logits_x = logits_x_all[:n_x]
loss_x = criteria_x(logits_x, lbs_x)
loss_u = torch.tensor(0)
if args.lam_clr >= epsilon:
#compute l_clr
fv_1 = fv_1[(n_x + n_u):]
fv_2 = fv_2[(n_x + n_u):]
z_1 = z_1[(n_x + n_u):]
z_2 = z_2[(n_x + n_u):]
#[2*muc*B, D]
z = torch.cat([z_1, z_2], dim=0)
#[2*muc*B, 2*muc*B]
sim_matrix = torch.exp(torch.mm(z, z.t().contiguous()) / args.temperature) #denominator
#[2*muc*B, 2*muc*B]
# mask = (torch.ones_like(sim_matrix) - torch.eye(2 * args.mu_c * args.batchsize, device=sim_matrix.device)).bool()
mask = (torch.ones_like(sim_matrix) - torch.eye(2 * args.mu_c * args.batchsize, device=sim_matrix.device))
mask = mask > 0
#[2*muc*B, 2*muc*B - 1]
sim_matrix = sim_matrix.masked_select(mask).view(2 * args.mu_c * args.batchsize, -1)
#[muc*B]
pos_sim = torch.exp(torch.sum(z_1 * z_2, dim=-1) / args.temperature) #numerator
#[2*muc*B]
pos_sim = torch.cat([pos_sim, pos_sim], dim=0)
loss_clr = (- torch.log(pos_sim / sim_matrix.sum(dim=-1))).mean()
#compute loss
loss = args.lam_x * loss_x + args.lam_u * loss_u + args.lam_clr * loss_clr
optim.zero_grad()
loss.backward()
optim.step()
ema.update_params()
lr_schdlr.step()
loss_x_avg.append(loss_x.item())
loss_u_avg.append(loss_u.item())
loss_clr_avg.append(loss_clr.item())
loss_avg.append(loss.item())
ema.update_buffer()
def evaluate(ema):
ema.apply_shadow()
ema.model.eval()
ema.model.cuda()
dlval = get_val_loader(batch_size=128, num_workers=0)
matches = []
for ims, lbs in dlval:
ims = ims.cuda()
lbs = lbs.cuda()
with torch.no_grad():
logits, _, _ = ema.model(ims)
scores = torch.softmax(logits, dim=1)
_, preds = torch.max(scores, dim=1)
match = lbs == preds
matches.append(match)
matches = torch.cat(matches, dim=0).float()
acc = torch.mean(matches)
ema.restore()
return acc
def test(model, memory_data_loader, test_data_loader, c, epoch):
model.eval()
total_top1, total_top5, total_num, feature_bank, feature_labels = 0.0, 0.0, 0, [], []
with torch.no_grad():
# generate feature bank
for data, _, _ in tqdm(memory_data_loader, desc='Feature extracting'):
logits, feature, _ = model(data.cuda(non_blocking=True))
feature_bank.append(feature)
feature_labels.append(torch.tensor(torch.argmax(logits,dim=1),dtype=torch.int64))
# [D, N]
feature_bank = torch.cat(feature_bank, dim=0).t().contiguous()
# [N]
feature_labels = torch.cat(feature_labels, dim=0).contiguous().cpu()
# loop test data to predict the label by weighted knn search
test_bar = tqdm(test_data_loader)
for data, _, target in test_bar:
# data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)
data = data.cuda(non_blocking=True)
_, feature, _ = model(data)
total_num += data.size(0)
# compute cos similarity between each feature vector and feature bank ---> [B, N]
sim_matrix = torch.mm(feature, feature_bank)
# [B, K]
sim_weight, sim_indices = sim_matrix.topk(k=args.k, dim=-1)
# [B, K]
# sim_labels = torch.gather(feature_labels.expand(data.size(0), -1), dim=-1, index=sim_indices)
sim_labels = torch.gather(feature_labels.expand(data.size(0), -1), dim=-1, index=sim_indices.cpu())
sim_weight = (sim_weight / args.temperature).exp()
# counts for each class
one_hot_label = torch.zeros(data.size(0) * args.k, c, device=sim_labels.device)
# [B*K, C]
one_hot_label = one_hot_label.scatter(-1, sim_labels.view(-1, 1), 1.0)
# weighted score ---> [B, C]
pred_scores = torch.sum(one_hot_label.view(data.size(0), -1, c) * sim_weight.cpu().unsqueeze(dim=-1), dim=1)
pred_labels = pred_scores.argsort(dim=-1, descending=True)
total_top1 += torch.sum((pred_labels[:, :1] == target.unsqueeze(dim=-1)).any(dim=-1).float()).item()
test_bar.set_description('Test Epoch: [{}/{}] Acc@1:{:.2f}%'
.format(epoch, args.n_epochs, total_top1 / total_num * 100))
return total_top1 / total_num * 100
def get_random_string(length):
letters = string.ascii_lowercase
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
def sort_unlabeled(ema,numPerClass):
ema.apply_shadow()
ema.model.eval()
ema.model.cuda()
n_iters_per_epoch = args.n_imgs_per_epoch // args.batchsize
_, _, dltrain_all = get_train_loader(args.batchsize, 1, 1, n_iters_per_epoch, L=args.n_classes*numPerClass, seed=args.seed)
predicted = []
labels = []
for ims_w, _, _, _, lbs in dltrain_all:
ims = ims_w.cuda()
labels.append(lbs)
with torch.no_grad():
logits, _, _ = ema.model(ims)
scores = torch.softmax(logits, dim=1)
predicted.append(scores.cpu())
print( "labels ",len(labels))
labels = np.concatenate(labels, axis=0)
print( "labels ",len(labels))
predicted = np.concatenate( predicted, axis=0)
preds = predicted.argmax(1)
probs = predicted.max(1)
top = np.argsort(-probs,axis=0)
del dltrain_all, logits
labeledSize =args.n_classes * numPerClass
unique_train_pseudo_labels, unique_train_counts = np.unique(preds, return_counts=True)
print("Number of training pseudo-labels in each class: ", unique_train_counts," for classes: ", unique_train_pseudo_labels)
sortByClass = np.random.randint(0,high=len(top), size=(args.n_classes, numPerClass), dtype=int)
indx = np.zeros([args.n_classes], dtype=int)
matches = np.zeros([args.n_classes, numPerClass], dtype=int)
labls = preds[top]
samples = top
for i in range(len(top)):
if indx[labls[i]] < numPerClass:
sortByClass[labls[i], indx[labls[i]]] = samples[i]
if labls[i] == labels[top[i]]:
matches[labls[i], indx[labls[i]]] = 1
indx[labls[i]] += 1
if min(indx) < numPerClass:
print("Counts of at least one class ", indx, " is lower than ", numPerClass)
name = "dataset/seeds/size"+str(labeledSize)+"." + get_random_string(8) + ".npy"
np.save(name, sortByClass[0:args.n_classes, :numPerClass])
classAcc = 100*np.sum(matches, axis=1)/numPerClass
print("Accuracy of the predicted pseudo-labels: top ", labeledSize, ", ", np.mean(classAcc), classAcc )
ema.restore()
return name
def train():
n_iters_per_epoch = args.n_imgs_per_epoch // args.batchsize
n_iters_all = n_iters_per_epoch * args.n_epochs #/ args.mu_c
epsilon = 0.000001
model, criteria_x, criteria_u = set_model()
lb_guessor = LabelGuessor(thresh=args.thr)
ema = EMA(model, args.ema_alpha)
wd_params, non_wd_params = [], []
for param in model.parameters():
if len(param.size()) == 1:
non_wd_params.append(param)
else:
wd_params.append(param)
param_list = [{'params': wd_params}, {'params': non_wd_params, 'weight_decay': 0}]
optim = torch.optim.SGD(param_list, lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum, nesterov=True)
lr_schdlr = WarmupCosineLrScheduler(optim, max_iter=n_iters_all, warmup_iter=0)
dltrain_x, dltrain_u, dltrain_all = get_train_loader(args.batchsize, args.mu, args.mu_c, n_iters_per_epoch,
L=args.n_labeled, seed=args.seed)
train_args = dict(
model=model,
criteria_x=criteria_x,
criteria_u=criteria_u,
optim=optim,
lr_schdlr=lr_schdlr,
ema=ema,
dltrain_x=dltrain_x,
dltrain_u=dltrain_u,
dltrain_all=dltrain_all,
lb_guessor=lb_guessor,
)
n_labeled = int(args.n_labeled / args.n_classes)
best_acc, top1 = -1, -1
results = {'top 1 acc': [], 'best_acc': []}
b_schedule = [args.n_epochs/2, 3*args.n_epochs/4]
if args.boot_schedule == 1:
step = int(args.n_epochs/3)
b_schedule = [step, 2*step]
elif args.boot_schedule == 2:
step = int(args.n_epochs/4)
b_schedule = [step, 2*step, 3*step]
for e in range(args.n_epochs):
if args.bootstrap > 1 and (e in b_schedule):
seed = 99
n_labeled *= args.bootstrap
name = sort_unlabeled(ema, n_labeled)
print("Bootstrap at epoch ", e," Name = ",name)
dltrain_x, dltrain_u, dltrain_all = get_train_loader(args.batchsize, args.mu, args.mu_c, n_iters_per_epoch,
L=10*n_labeled, seed=seed, name=name)
train_args = dict(
model=model,
criteria_x=criteria_x,
criteria_u=criteria_u,
optim=optim,
lr_schdlr=lr_schdlr,
ema=ema,
dltrain_x=dltrain_x,
dltrain_u=dltrain_u,
dltrain_all=dltrain_all,
lb_guessor=lb_guessor,
)
model.train()
train_one_epoch(**train_args)
torch.cuda.empty_cache()
if args.test == 0 or args.lam_clr < epsilon:
top1 = evaluate(ema) * 100
elif args.test == 1:
memory_data = utils.CIFAR10Pair(root='dataset', train=True, transform=utils.test_transform, download=False)
memory_data_loader = DataLoader(memory_data, batch_size=args.batchsize, shuffle=False, num_workers=16, pin_memory=True)
test_data = utils.CIFAR10Pair(root='dataset', train=False, transform=utils.test_transform, download=False)
test_data_loader = DataLoader(test_data, batch_size=args.batchsize, shuffle=False, num_workers=16, pin_memory=True)
c = len(memory_data.classes) #10
top1 = test(model, memory_data_loader, test_data_loader, c, e)
best_acc = top1 if best_acc < top1 else best_acc
results['top 1 acc'].append('{:.4f}'.format(top1))
results['best_acc'].append('{:.4f}'.format(best_acc))
data_frame = pd.DataFrame(data=results)
data_frame.to_csv(result_dir + '/' + save_name_pre + '.accuracy.csv', index_label='epoch')
log_msg = [
'epoch: {}'.format(e + 1),
'top 1 acc: {:.4f}'.format(top1),
'best_acc: {:.4f}'.format(best_acc)]
print(', '.join(log_msg))
if __name__ == '__main__':
train()
| [
"torch.nn.CrossEntropyLoss",
"torch.max",
"numpy.argsort",
"lr_scheduler.WarmupCosineLrScheduler",
"torch.softmax",
"torch.sum",
"utils.CIFAR10Pair",
"numpy.save",
"os.path.exists",
"numpy.mean",
"argparse.ArgumentParser",
"torch.mean",
"torch.eye",
"os.mkdir",
"numpy.concatenate",
"pandas.DataFrame",
"ema.EMA",
"torch.argmax",
"torch.optim.SGD",
"random.choice",
"torch.ones_like",
"label_guessor.LabelGuessor",
"cifar.get_val_loader",
"time.time",
"torch.cuda.empty_cache",
"torch.cat",
"numpy.unique",
"model.WideResnet",
"tqdm.tqdm",
"torch.mm",
"numpy.sum",
"numpy.zeros",
"torch.tensor",
"torch.nn.functional.cross_entropy",
"torch.utils.data.DataLoader",
"torch.no_grad",
"cifar.get_train_loader"
]
| [((454, 511), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""" FixMatch Training"""'}), "(description=' FixMatch Training')\n", (477, 511), False, 'import argparse\n'), ((3352, 3363), 'time.time', 'time.time', ([], {}), '()\n', (3361, 3363), False, 'import time\n'), ((3430, 3456), 'os.path.exists', 'os.path.exists', (['result_dir'], {}), '(result_dir)\n', (3444, 3456), False, 'import os\n'), ((3462, 3482), 'os.mkdir', 'os.mkdir', (['result_dir'], {}), '(result_dir)\n', (3470, 3482), False, 'import os\n'), ((3513, 3610), 'model.WideResnet', 'WideResnet', (['args.n_classes'], {'k': 'args.wresnet_k', 'n': 'args.wresnet_n', 'feature_dim': 'args.feature_dim'}), '(args.n_classes, k=args.wresnet_k, n=args.wresnet_n, feature_dim=\n args.feature_dim)\n', (3523, 3610), False, 'from model import WideResnet\n'), ((4198, 4231), 'tqdm.tqdm', 'tqdm', (['dl_all'], {'desc': '"""Training ..."""'}), "(dl_all, desc='Training ...')\n", (4202, 4231), False, 'from tqdm import tqdm\n'), ((9834, 9879), 'cifar.get_val_loader', 'get_val_loader', ([], {'batch_size': '(128)', 'num_workers': '(0)'}), '(batch_size=128, num_workers=0)\n', (9848, 9879), False, 'from cifar import get_train_loader, get_val_loader\n'), ((10270, 10289), 'torch.mean', 'torch.mean', (['matches'], {}), '(matches)\n', (10280, 10289), False, 'import torch\n'), ((13106, 13215), 'cifar.get_train_loader', 'get_train_loader', (['args.batchsize', '(1)', '(1)', 'n_iters_per_epoch'], {'L': '(args.n_classes * numPerClass)', 'seed': 'args.seed'}), '(args.batchsize, 1, 1, n_iters_per_epoch, L=args.n_classes *\n numPerClass, seed=args.seed)\n', (13122, 13215), False, 'from cifar import get_train_loader, get_val_loader\n'), ((13557, 13587), 'numpy.concatenate', 'np.concatenate', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (13571, 13587), True, 'import numpy as np\n'), ((13638, 13671), 'numpy.concatenate', 'np.concatenate', (['predicted'], {'axis': '(0)'}), '(predicted, axis=0)\n', (13652, 13671), True, 'import numpy as np\n'), ((13744, 13770), 'numpy.argsort', 'np.argsort', (['(-probs)'], {'axis': '(0)'}), '(-probs, axis=0)\n', (13754, 13770), True, 'import numpy as np\n'), ((13924, 13960), 'numpy.unique', 'np.unique', (['preds'], {'return_counts': '(True)'}), '(preds, return_counts=True)\n', (13933, 13960), True, 'import numpy as np\n'), ((14200, 14237), 'numpy.zeros', 'np.zeros', (['[args.n_classes]'], {'dtype': 'int'}), '([args.n_classes], dtype=int)\n', (14208, 14237), True, 'import numpy as np\n'), ((14252, 14302), 'numpy.zeros', 'np.zeros', (['[args.n_classes, numPerClass]'], {'dtype': 'int'}), '([args.n_classes, numPerClass], dtype=int)\n', (14260, 14302), True, 'import numpy as np\n'), ((14816, 14874), 'numpy.save', 'np.save', (['name', 'sortByClass[0:args.n_classes, :numPerClass]'], {}), '(name, sortByClass[0:args.n_classes, :numPerClass])\n', (14823, 14874), True, 'import numpy as np\n'), ((15307, 15336), 'label_guessor.LabelGuessor', 'LabelGuessor', ([], {'thresh': 'args.thr'}), '(thresh=args.thr)\n', (15319, 15336), False, 'from label_guessor import LabelGuessor\n'), ((15347, 15373), 'ema.EMA', 'EMA', (['model', 'args.ema_alpha'], {}), '(model, args.ema_alpha)\n', (15350, 15373), False, 'from ema import EMA\n'), ((15674, 15788), 'torch.optim.SGD', 'torch.optim.SGD', (['param_list'], {'lr': 'args.lr', 'weight_decay': 'args.weight_decay', 'momentum': 'args.momentum', 'nesterov': '(True)'}), '(param_list, lr=args.lr, weight_decay=args.weight_decay,\n momentum=args.momentum, nesterov=True)\n', (15689, 15788), False, 'import torch\n'), ((15801, 15868), 'lr_scheduler.WarmupCosineLrScheduler', 'WarmupCosineLrScheduler', (['optim'], {'max_iter': 'n_iters_all', 'warmup_iter': '(0)'}), '(optim, max_iter=n_iters_all, warmup_iter=0)\n', (15824, 15868), False, 'from lr_scheduler import WarmupCosineLrScheduler\n'), ((15910, 16020), 'cifar.get_train_loader', 'get_train_loader', (['args.batchsize', 'args.mu', 'args.mu_c', 'n_iters_per_epoch'], {'L': 'args.n_labeled', 'seed': 'args.seed'}), '(args.batchsize, args.mu, args.mu_c, n_iters_per_epoch, L=\n args.n_labeled, seed=args.seed)\n', (15926, 16020), False, 'from cifar import get_train_loader, get_val_loader\n'), ((10505, 10520), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10518, 10520), False, 'import torch\n'), ((10580, 10631), 'tqdm.tqdm', 'tqdm', (['memory_data_loader'], {'desc': '"""Feature extracting"""'}), "(memory_data_loader, desc='Feature extracting')\n", (10584, 10631), False, 'from tqdm import tqdm\n'), ((11104, 11126), 'tqdm.tqdm', 'tqdm', (['test_data_loader'], {}), '(test_data_loader)\n', (11108, 11126), False, 'from tqdm import tqdm\n'), ((15010, 15027), 'numpy.mean', 'np.mean', (['classAcc'], {}), '(classAcc)\n', (15017, 15027), True, 'import numpy as np\n'), ((17740, 17764), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (17762, 17764), False, 'import torch\n'), ((18719, 18745), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'results'}), '(data=results)\n', (18731, 18745), True, 'import pandas as pd\n'), ((3673, 3694), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3692, 3694), True, 'import torch.nn as nn\n'), ((3719, 3740), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3738, 3740), True, 'import torch.nn as nn\n'), ((8427, 8455), 'torch.cat', 'torch.cat', (['[z_1, z_2]'], {'dim': '(0)'}), '([z_1, z_2], dim=0)\n', (8436, 8455), False, 'import torch\n'), ((9185, 9221), 'torch.cat', 'torch.cat', (['[pos_sim, pos_sim]'], {'dim': '(0)'}), '([pos_sim, pos_sim], dim=0)\n', (9194, 9221), False, 'import torch\n'), ((9988, 10003), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10001, 10003), False, 'import torch\n'), ((10068, 10096), 'torch.softmax', 'torch.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (10081, 10096), False, 'import torch\n'), ((10120, 10144), 'torch.max', 'torch.max', (['scores'], {'dim': '(1)'}), '(scores, dim=1)\n', (10129, 10144), False, 'import torch\n'), ((10226, 10251), 'torch.cat', 'torch.cat', (['matches'], {'dim': '(0)'}), '(matches, dim=0)\n', (10235, 10251), False, 'import torch\n'), ((11515, 11546), 'torch.mm', 'torch.mm', (['feature', 'feature_bank'], {}), '(feature, feature_bank)\n', (11523, 11546), False, 'import torch\n'), ((12845, 12867), 'random.choice', 'random.choice', (['letters'], {}), '(letters)\n', (12858, 12867), False, 'import random\n'), ((13357, 13372), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13370, 13372), False, 'import torch\n'), ((13438, 13466), 'torch.softmax', 'torch.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (13451, 13466), False, 'import torch\n'), ((14895, 14918), 'numpy.sum', 'np.sum', (['matches'], {'axis': '(1)'}), '(matches, axis=1)\n', (14901, 14918), True, 'import numpy as np\n'), ((17098, 17214), 'cifar.get_train_loader', 'get_train_loader', (['args.batchsize', 'args.mu', 'args.mu_c', 'n_iters_per_epoch'], {'L': '(10 * n_labeled)', 'seed': 'seed', 'name': 'name'}), '(args.batchsize, args.mu, args.mu_c, n_iters_per_epoch, L=\n 10 * n_labeled, seed=seed, name=name)\n', (17114, 17214), False, 'from cifar import get_train_loader, get_val_loader\n'), ((6509, 6524), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (6521, 6524), False, 'import torch\n'), ((8140, 8155), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (8152, 8155), False, 'import torch\n'), ((8767, 8794), 'torch.ones_like', 'torch.ones_like', (['sim_matrix'], {}), '(sim_matrix)\n', (8782, 8794), False, 'import torch\n'), ((8797, 8864), 'torch.eye', 'torch.eye', (['(2 * args.mu_c * args.batchsize)'], {'device': 'sim_matrix.device'}), '(2 * args.mu_c * args.batchsize, device=sim_matrix.device)\n', (8806, 8864), False, 'import torch\n'), ((17913, 18011), 'utils.CIFAR10Pair', 'utils.CIFAR10Pair', ([], {'root': '"""dataset"""', 'train': '(True)', 'transform': 'utils.test_transform', 'download': '(False)'}), "(root='dataset', train=True, transform=utils.\n test_transform, download=False)\n", (17930, 18011), False, 'import utils\n'), ((18040, 18142), 'torch.utils.data.DataLoader', 'DataLoader', (['memory_data'], {'batch_size': 'args.batchsize', 'shuffle': '(False)', 'num_workers': '(16)', 'pin_memory': '(True)'}), '(memory_data, batch_size=args.batchsize, shuffle=False,\n num_workers=16, pin_memory=True)\n', (18050, 18142), False, 'from torch.utils.data import DataLoader\n'), ((18163, 18262), 'utils.CIFAR10Pair', 'utils.CIFAR10Pair', ([], {'root': '"""dataset"""', 'train': '(False)', 'transform': 'utils.test_transform', 'download': '(False)'}), "(root='dataset', train=False, transform=utils.\n test_transform, download=False)\n", (18180, 18262), False, 'import utils\n'), ((18289, 18390), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'batch_size': 'args.batchsize', 'shuffle': '(False)', 'num_workers': '(16)', 'pin_memory': '(True)'}), '(test_data, batch_size=args.batchsize, shuffle=False, num_workers\n =16, pin_memory=True)\n', (18299, 18390), False, 'from torch.utils.data import DataLoader\n'), ((4328, 4343), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4340, 4343), False, 'import torch\n'), ((4352, 4367), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4364, 4367), False, 'import torch\n'), ((4376, 4391), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4388, 4391), False, 'import torch\n'), ((4420, 4435), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4432, 4435), False, 'import torch\n'), ((4444, 4459), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (4456, 4459), False, 'import torch\n'), ((7509, 7524), 'torch.tensor', 'torch.tensor', (['(0)'], {}), '(0)\n', (7521, 7524), False, 'import torch\n'), ((9080, 9108), 'torch.sum', 'torch.sum', (['(z_1 * z_2)'], {'dim': '(-1)'}), '(z_1 * z_2, dim=-1)\n', (9089, 9108), False, 'import torch\n'), ((10790, 10817), 'torch.argmax', 'torch.argmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (10802, 10817), False, 'import torch\n'), ((5243, 5298), 'torch.cat', 'torch.cat', (['[ims_x_weak, ims_u_strong, ims_all_1]'], {'dim': '(0)'}), '([ims_x_weak, ims_u_strong, ims_all_1], dim=0)\n', (5252, 5298), False, 'import torch\n'), ((5340, 5395), 'torch.cat', 'torch.cat', (['[ims_x_weak, ims_u_strong, ims_all_2]'], {'dim': '(0)'}), '([ims_x_weak, ims_u_strong, ims_all_2], dim=0)\n', (5349, 5395), False, 'import torch\n'), ((6058, 6099), 'torch.cat', 'torch.cat', (['[ims_x_weak, ims_all_1]'], {'dim': '(0)'}), '([ims_x_weak, ims_all_1], dim=0)\n', (6067, 6099), False, 'import torch\n'), ((6139, 6180), 'torch.cat', 'torch.cat', (['[ims_x_weak, ims_all_2]'], {'dim': '(0)'}), '([ims_x_weak, ims_all_2], dim=0)\n', (6148, 6180), False, 'import torch\n'), ((7717, 7758), 'torch.cat', 'torch.cat', (['[ims_x_weak, ims_all_1]'], {'dim': '(0)'}), '([ims_x_weak, ims_all_1], dim=0)\n', (7726, 7758), False, 'import torch\n'), ((7794, 7835), 'torch.cat', 'torch.cat', (['[ims_x_weak, ims_all_2]'], {'dim': '(0)'}), '([ims_x_weak, ims_all_2], dim=0)\n', (7803, 7835), False, 'import torch\n'), ((10877, 10907), 'torch.cat', 'torch.cat', (['feature_bank'], {'dim': '(0)'}), '(feature_bank, dim=0)\n', (10886, 10907), False, 'import torch\n'), ((10964, 10996), 'torch.cat', 'torch.cat', (['feature_labels'], {'dim': '(0)'}), '(feature_labels, dim=0)\n', (10973, 10996), False, 'import torch\n'), ((6882, 6926), 'torch.cat', 'torch.cat', (['[ims_x_weak, ims_u_strong]'], {'dim': '(0)'}), '([ims_x_weak, ims_u_strong], dim=0)\n', (6891, 6926), False, 'import torch\n'), ((5847, 5897), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits_u', 'lbs_u'], {'reduction': '"""none"""'}), "(logits_u, lbs_u, reduction='none')\n", (5862, 5897), True, 'import torch.nn.functional as F\n'), ((7200, 7250), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits_u', 'lbs_u'], {'reduction': '"""none"""'}), "(logits_u, lbs_u, reduction='none')\n", (7215, 7250), True, 'import torch.nn.functional as F\n')] |
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
data = pd.read_csv("data.csv")
data.info()
"""
Data columns (total 33 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 id 569 non-null int64
.
.
.
32 Unnamed: 32 0 non-null float64
"""
data.drop(["Unnamed: 32", "id"], axis = 1, inplace = True)
# data.head(10)
data.diagnosis = [1 if each == "M" else 0 for each in data.diagnosis]
y = data.diagnosis.values
x_data = data.drop(["diagnosis"], axis = 1)
# %% Normalization
x_normalized = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data)).values
x_data.head()
"""
x_data.head()
Out[9]:
radius_mean texture_mean ... symmetry_worst fractal_dimension_worst
0 17.99 10.38 ... 0.4601 0.11890
1 20.57 17.77 ... 0.2750 0.08902
2 19.69 21.25 ... 0.3613 0.08758
3 11.42 20.38 ... 0.6638 0.17300
4 20.29 14.34 ... 0.2364 0.07678
"""
x_normalized.head()
"""
x_normalized.head()
Out[10]:
radius_mean texture_mean ... symmetry_worst fractal_dimension_worst
0 0.521037 0.022658 ... 0.598462 0.418864
1 0.643144 0.272574 ... 0.233590 0.222878
2 0.601496 0.390260 ... 0.403706 0.213433
3 0.210090 0.360839 ... 1.000000 0.773711
4 0.629893 0.156578 ... 0.157500 0.142595
"""
# %% train test split
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x_normalized,y,test_size = 0.25, random_state = 42)
# test size & random state can be changed, test size can be choosen as 0.2 or 0.18
# sklearn randomly splits, with given state data will be splitted with same random pattern.
# rows as features
x_train = x_train.T
x_test = x_test.T
y_train = y_train.T
y_test = y_test.T
# %% Parameter Initialize
"""
If all the weights were initialized to zero,
backpropagation will not work as expected because the gradient for the intermediate neurons
and starting neurons will die out(become zero) and will not update ever.
"""
def initialize_weights_and_bias(dimension):
w = np.full((dimension,1), 0.01) # init 0.01
b = np.zeros(1)
return w,b
def sigmoid(n):
y_hat = 1 / (1 + np.exp(-n))
return y_hat
# %%
def forward_backward_propagation(w,b,x_train,y_train):
# forward propagation
z = np.dot(w.T,x_train) + b
#y_train = y_train.T.reshape(-1,1)
y_hat = sigmoid(z)
loss = -(y_train*np.log(y_hat)+(1-y_train)*np.log(1-y_hat))
cost = (np.sum(loss))/x_train.shape[1] # x_train.shape[1] is for scaling
# Once cost is calculated, forward prop. is completed.
# backward propagation
derivative_weight = (np.dot(x_train,((y_hat-y_train).T)))/x_train.shape[1] # x_train.shape[1] is for scaling
derivative_bias = np.sum(y_hat-y_train)/x_train.shape[1] # x_train.shape[1] is for scaling
# x_train.shape[1] = 426
gradients = {"derivative_weight": derivative_weight,"derivative_bias": derivative_bias}
return cost,gradients
# Updating(learning) parameters
def update(w, b, x_train, y_train, learning_rate,number_of_iteration):
cost_list = []
cost_list2 = []
index = []
# updating(learning) parameters is number_of_iterarion times
for i in range(number_of_iteration):
# make forward and backward propagation and find cost and gradients
cost,gradients = forward_backward_propagation(w,b,x_train,y_train)
cost_list.append(cost)
# lets update
w = w - learning_rate * gradients["derivative_weight"]
b = b - learning_rate * gradients["derivative_bias"]
if i % 100 == 0: # that's arbitrary, you can set it differently
cost_list2.append(cost)
index.append(i)
print ("Cost after iteration %i: %f" %(i, cost))
# we update(learn) parameters weights and bias
parameters = {"weight": w,"bias": b}
plt.plot(index,cost_list2)
plt.xticks(index,rotation='vertical')
plt.xlabel("Number of Iteration")
plt.ylabel("Cost")
plt.legend()
plt.show()
return parameters, gradients, cost_list
# prediction
def predict(w,b,x_test):
# x_test is a input for forward propagation
z = sigmoid(np.dot(w.T,x_test)+b)
Y_prediction = np.zeros((1,x_test.shape[1]))
# if z is bigger than 0.5, our prediction is one - true (y_hat=1),
# if z is smaller than 0.5, our prediction is sign zero - false (y_hat=0),
for i in range(z.shape[1]):
if z[0,i]<= 0.5:
Y_prediction[0,i] = 0
else:
Y_prediction[0,i] = 1
return Y_prediction
#implementing logistic regression
def logistic_regression(x_train, y_train, x_test, y_test, learning_rate , num_iterations):
# initialize
dimension = x_train.shape[0]
w,b = initialize_weights_and_bias(dimension)
# do not change learning rate
parameters, gradients, cost_list = update(w, b, x_train, y_train, learning_rate,num_iterations)
y_prediction_test = predict(parameters["weight"],parameters["bias"],x_test)
y_pred_train = predict(parameters["weight"],parameters["bias"],x_train)
# Print accuracy
print("test accuracy: {} %".format(100 - np.mean(np.abs(y_prediction_test - y_test)) * 100))
print("train accuracy: {} %".format(100 - np.mean(np.abs(y_pred_train - y_train)) * 100))
# %% Hyperparameter tuning
logistic_regression(x_train, y_train, x_test, y_test,learning_rate = 3, num_iterations = 1500)
"""
Cost after iteration 0: 0.693035
Cost after iteration 100: 0.153169
Cost after iteration 200: 0.121662
Cost after iteration 300: 0.107146
Cost after iteration 400: 0.098404
Cost after iteration 500: 0.092401
Cost after iteration 600: 0.087937
Cost after iteration 700: 0.084435
Cost after iteration 800: 0.081582
Cost after iteration 900: 0.079191
Cost after iteration 1000: 0.077143
Cost after iteration 1100: 0.075359
Cost after iteration 1200: 0.073784
Cost after iteration 1300: 0.072378
Cost after iteration 1400: 0.071111
No handles with labels found to put in legend.
test accuracy: 98.6013986013986 %
train accuracy: 98.35680751173709 %
"""
logistic_regression(x_train, y_train, x_test, y_test,learning_rate = 1, num_iterations = 1500)
"""
Cost after iteration 0: 0.693035
Cost after iteration 100: 0.226383
Cost after iteration 200: 0.176670
Cost after iteration 300: 0.153585
Cost after iteration 400: 0.139306
Cost after iteration 500: 0.129319
Cost after iteration 600: 0.121835
Cost after iteration 700: 0.115963
Cost after iteration 800: 0.111204
Cost after iteration 900: 0.107248
No handles with labels found to put in legend.
Cost after iteration 1000: 0.103893
Cost after iteration 1100: 0.101001
Cost after iteration 1200: 0.098474
Cost after iteration 1300: 0.096240
Cost after iteration 1400: 0.094247
test accuracy: 97.9020979020979 %
train accuracy: 98.12206572769954 %
"""
logistic_regression(x_train, y_train, x_test, y_test,learning_rate = 0.3, num_iterations = 1500)
"""
Cost after iteration 0: 0.693035
Cost after iteration 100: 0.357455
Cost after iteration 200: 0.274917
Cost after iteration 300: 0.235865
Cost after iteration 400: 0.212165
Cost after iteration 500: 0.195780
Cost after iteration 600: 0.183524
Cost after iteration 700: 0.173868
Cost after iteration 800: 0.165980
Cost after iteration 900: 0.159363
Cost after iteration 1000: 0.153700
Cost after iteration 1100: 0.148775
Cost after iteration 1200: 0.144439
Cost after iteration 1300: 0.140581
Cost after iteration 1400: 0.137119
No handles with labels found to put in legend.
test accuracy: 97.9020979020979 %
train accuracy: 96.94835680751174 %
"""
# %% Sklearn
from sklearn.linear_model import LogisticRegression
x_train = x_train.T
x_test = x_test.T
y_train = y_train.T
y_test = y_test.T
logreg = LogisticRegression(random_state = 42,max_iter= 1500)
print("test accuracy: {} ".format(logreg.fit(x_train, y_train).score(x_test, y_test)))
print("train accuracy: {} ".format(logreg.fit(x_train, y_train).score(x_train, y_train)))
"""
test accuracy: 0.986013986013986
train accuracy: 0.9671361502347418
"""
# %%
| [
"numpy.abs",
"pandas.read_csv",
"matplotlib.pyplot.xticks",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.log",
"sklearn.linear_model.LogisticRegression",
"numpy.max",
"numpy.sum",
"numpy.zeros",
"numpy.dot",
"numpy.exp",
"numpy.min",
"numpy.full",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
]
| [((101, 124), 'pandas.read_csv', 'pd.read_csv', (['"""data.csv"""'], {}), "('data.csv')\n", (112, 124), True, 'import pandas as pd\n'), ((1836, 1902), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_normalized', 'y'], {'test_size': '(0.25)', 'random_state': '(42)'}), '(x_normalized, y, test_size=0.25, random_state=42)\n', (1852, 1902), False, 'from sklearn.model_selection import train_test_split\n'), ((8143, 8193), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(42)', 'max_iter': '(1500)'}), '(random_state=42, max_iter=1500)\n', (8161, 8193), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2473, 2502), 'numpy.full', 'np.full', (['(dimension, 1)', '(0.01)'], {}), '((dimension, 1), 0.01)\n', (2480, 2502), True, 'import numpy as np\n'), ((2522, 2533), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (2530, 2533), True, 'import numpy as np\n'), ((4282, 4309), 'matplotlib.pyplot.plot', 'plt.plot', (['index', 'cost_list2'], {}), '(index, cost_list2)\n', (4290, 4309), True, 'import matplotlib.pyplot as plt\n'), ((4313, 4351), 'matplotlib.pyplot.xticks', 'plt.xticks', (['index'], {'rotation': '"""vertical"""'}), "(index, rotation='vertical')\n", (4323, 4351), True, 'import matplotlib.pyplot as plt\n'), ((4355, 4388), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Iteration"""'], {}), "('Number of Iteration')\n", (4365, 4388), True, 'import matplotlib.pyplot as plt\n'), ((4393, 4411), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cost"""'], {}), "('Cost')\n", (4403, 4411), True, 'import matplotlib.pyplot as plt\n'), ((4416, 4428), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4426, 4428), True, 'import matplotlib.pyplot as plt\n'), ((4433, 4443), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4441, 4443), True, 'import matplotlib.pyplot as plt\n'), ((4632, 4662), 'numpy.zeros', 'np.zeros', (['(1, x_test.shape[1])'], {}), '((1, x_test.shape[1]))\n', (4640, 4662), True, 'import numpy as np\n'), ((662, 676), 'numpy.min', 'np.min', (['x_data'], {}), '(x_data)\n', (668, 676), True, 'import numpy as np\n'), ((2712, 2732), 'numpy.dot', 'np.dot', (['w.T', 'x_train'], {}), '(w.T, x_train)\n', (2718, 2732), True, 'import numpy as np\n'), ((2874, 2886), 'numpy.sum', 'np.sum', (['loss'], {}), '(loss)\n', (2880, 2886), True, 'import numpy as np\n'), ((3061, 3097), 'numpy.dot', 'np.dot', (['x_train', '(y_hat - y_train).T'], {}), '(x_train, (y_hat - y_train).T)\n', (3067, 3097), True, 'import numpy as np\n'), ((3172, 3195), 'numpy.sum', 'np.sum', (['(y_hat - y_train)'], {}), '(y_hat - y_train)\n', (3178, 3195), True, 'import numpy as np\n'), ((681, 695), 'numpy.max', 'np.max', (['x_data'], {}), '(x_data)\n', (687, 695), True, 'import numpy as np\n'), ((698, 712), 'numpy.min', 'np.min', (['x_data'], {}), '(x_data)\n', (704, 712), True, 'import numpy as np\n'), ((2587, 2597), 'numpy.exp', 'np.exp', (['(-n)'], {}), '(-n)\n', (2593, 2597), True, 'import numpy as np\n'), ((4591, 4610), 'numpy.dot', 'np.dot', (['w.T', 'x_test'], {}), '(w.T, x_test)\n', (4597, 4610), True, 'import numpy as np\n'), ((2819, 2832), 'numpy.log', 'np.log', (['y_hat'], {}), '(y_hat)\n', (2825, 2832), True, 'import numpy as np\n'), ((2845, 2862), 'numpy.log', 'np.log', (['(1 - y_hat)'], {}), '(1 - y_hat)\n', (2851, 2862), True, 'import numpy as np\n'), ((5571, 5605), 'numpy.abs', 'np.abs', (['(y_prediction_test - y_test)'], {}), '(y_prediction_test - y_test)\n', (5577, 5605), True, 'import numpy as np\n'), ((5669, 5699), 'numpy.abs', 'np.abs', (['(y_pred_train - y_train)'], {}), '(y_pred_train - y_train)\n', (5675, 5699), True, 'import numpy as np\n')] |
import os
import numpy as np
import torch
import argparse
from hparams import create_hparams
from model import lcm
from train import load_model
from torch.utils.data import DataLoader
from reader import TextMelIDLoader, TextMelIDCollate, id2sp
from inference_utils import plot_data
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--checkpoint_path', type=str,
help='directory to save checkpoints')
parser.add_argument('--hparams', type=str,
required=False, help='comma separated name=value pairs')
args = parser.parse_args()
checkpoint_path=args.checkpoint_path
hparams = create_hparams(args.hparams)
model = load_model(hparams)
model.load_state_dict(torch.load(checkpoint_path)['state_dict'], strict=False)
_ = model.eval()
def gen_embedding(speaker):
training_list = hparams.training_list
train_set_A = TextMelIDLoader(training_list, hparams.mel_mean_std, hparams.speaker_A,
hparams.speaker_B,
shuffle=False,pids=[speaker])
collate_fn = TextMelIDCollate(lcm(hparams.n_frames_per_step_encoder,
hparams.n_frames_per_step_decoder))
train_loader_A = DataLoader(train_set_A, num_workers=1, shuffle=False,
sampler=None,
batch_size=1, pin_memory=False,
drop_last=True, collate_fn=collate_fn)
with torch.no_grad():
speaker_embeddings = []
for i,batch in enumerate(train_loader_A):
#print i
x, y = model.parse_batch(batch)
text_input_padded, mel_padded, text_lengths, mel_lengths, speaker_id = x
speaker_id, speaker_embedding = model.speaker_encoder.inference(mel_padded)
speaker_embedding = speaker_embedding.data.cpu().numpy()
speaker_embeddings.append(speaker_embedding)
speaker_embeddings = np.vstack(speaker_embeddings)
print(speaker_embeddings.shape)
if not os.path.exists('outdir/embeddings'):
os.makedirs('outdir/embeddings')
np.save('outdir/embeddings/%s.npy'%speaker, speaker_embeddings)
plot_data([speaker_embeddings],
'outdir/embeddings/%s.pdf'%speaker)
print('Generating embedding of %s ...'%hparams.speaker_A)
gen_embedding(hparams.speaker_A)
print('Generating embedding of %s ...'%hparams.speaker_B)
gen_embedding(hparams.speaker_B)
| [
"os.path.exists",
"argparse.ArgumentParser",
"os.makedirs",
"train.load_model",
"torch.load",
"model.lcm",
"hparams.create_hparams",
"numpy.vstack",
"torch.utils.data.DataLoader",
"torch.no_grad",
"reader.TextMelIDLoader",
"inference_utils.plot_data",
"numpy.save"
]
| [((293, 318), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (316, 318), False, 'import argparse\n'), ((638, 666), 'hparams.create_hparams', 'create_hparams', (['args.hparams'], {}), '(args.hparams)\n', (652, 666), False, 'from hparams import create_hparams\n'), ((676, 695), 'train.load_model', 'load_model', (['hparams'], {}), '(hparams)\n', (686, 695), False, 'from train import load_model\n'), ((884, 1009), 'reader.TextMelIDLoader', 'TextMelIDLoader', (['training_list', 'hparams.mel_mean_std', 'hparams.speaker_A', 'hparams.speaker_B'], {'shuffle': '(False)', 'pids': '[speaker]'}), '(training_list, hparams.mel_mean_std, hparams.speaker_A,\n hparams.speaker_B, shuffle=False, pids=[speaker])\n', (899, 1009), False, 'from reader import TextMelIDLoader, TextMelIDCollate, id2sp\n'), ((1202, 1344), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set_A'], {'num_workers': '(1)', 'shuffle': '(False)', 'sampler': 'None', 'batch_size': '(1)', 'pin_memory': '(False)', 'drop_last': '(True)', 'collate_fn': 'collate_fn'}), '(train_set_A, num_workers=1, shuffle=False, sampler=None,\n batch_size=1, pin_memory=False, drop_last=True, collate_fn=collate_fn)\n', (1212, 1344), False, 'from torch.utils.data import DataLoader\n'), ((2116, 2181), 'numpy.save', 'np.save', (["('outdir/embeddings/%s.npy' % speaker)", 'speaker_embeddings'], {}), "('outdir/embeddings/%s.npy' % speaker, speaker_embeddings)\n", (2123, 2181), True, 'import numpy as np\n'), ((2184, 2253), 'inference_utils.plot_data', 'plot_data', (['[speaker_embeddings]', "('outdir/embeddings/%s.pdf' % speaker)"], {}), "([speaker_embeddings], 'outdir/embeddings/%s.pdf' % speaker)\n", (2193, 2253), False, 'from inference_utils import plot_data\n'), ((718, 745), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (728, 745), False, 'import torch\n'), ((1077, 1150), 'model.lcm', 'lcm', (['hparams.n_frames_per_step_encoder', 'hparams.n_frames_per_step_decoder'], {}), '(hparams.n_frames_per_step_encoder, hparams.n_frames_per_step_decoder)\n', (1080, 1150), False, 'from model import lcm\n'), ((1447, 1462), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1460, 1462), False, 'import torch\n'), ((1943, 1972), 'numpy.vstack', 'np.vstack', (['speaker_embeddings'], {}), '(speaker_embeddings)\n', (1952, 1972), True, 'import numpy as np\n'), ((2029, 2064), 'os.path.exists', 'os.path.exists', (['"""outdir/embeddings"""'], {}), "('outdir/embeddings')\n", (2043, 2064), False, 'import os\n'), ((2074, 2106), 'os.makedirs', 'os.makedirs', (['"""outdir/embeddings"""'], {}), "('outdir/embeddings')\n", (2085, 2106), False, 'import os\n')] |
import re
import math
class KVPart():
"""docstring for KVPart"""
def __init__(self, name, tab_count = 0):
#super(KVPart, self).__init__()
self.name = name
self.values = []
self.tab_count = tab_count
self.parent = None
self.master = False
def add_simple_value(self, value):
self.values.append(value)
def add_KVPart(self, name):
if self.master == False:
new_KVPart = KVPart(name, self.tab_count + 1)
else:
new_KVPart = KVPart(name, self.tab_count)
new_KVPart.set_parent(self)
self.values.append(new_KVPart)
return new_KVPart
def add_KVPart_finished(self, part):
if not part is None:
part.set_tab_count(self.tab_count + 1)
self.values.append(part)
def add_KVComment(self, text):
new_KVComment = KVComment(text)
self.values.append(new_KVComment)
def is_empty(self):
if len(self.values) == 0:
return True
return False
def set_parent(self, parent):
self.parent = parent
def get_parent(self):
return self.parent
def has_parent(self):
if self.parent is not None:
return True
return False
def get_name(self):
return self.name
def set_master(self, boolean):
self.master = boolean
def get_values(self):
return self.values
def has_KV_child(self):
return any(isinstance(x, KVPart) for x in self.values)
def set_tab_count(self, count):
self.tab_count = count
def items(self):
return self.name, self.values[0]
def __str__(self):
if self.master == False:
string = self.fTab(self.tab_count) + "\"" + self.name + "\""
if any(isinstance(x, KVPart) for x in self.values):
string += "\n" + self.fTab(self.tab_count) + "{\n"
else:
count = self.get_normal_space(string)
string += self.get_normal_space(string)
for x in self.values:
if type(x) is KVPart:
string += str(x)
elif type(x) is KVComment:
string += self.fTab(self.tab_count + 1) + str(x) + "\n"
else:
string += "\"" + str(x) + "\"\n"
if any(isinstance(x, KVPart) for x in self.values):
string += self.fTab(self.tab_count) + "}\n"
return string
else:
if len(self.values) > 1:
string = ""
for x in self.values:
string += str(x) + "\n"
return string
else:
return ""
def __repr__(self):
return "<|" + self.name + "|>"
def fTab(self, count):
string = ""
for x in range(count):
string += "\t"
return string
def get_normal_space(self, text):
lines = text.splitlines()
last_line = lines[len(lines) - 1]
new_position = last_line.rfind("\"")
tab_count = math.floor((40 - new_position) / 5)
space_count = ((40 - new_position) % 5) + 1
string = ""
for x in range(space_count):
string += " "
string += self.fTab(tab_count)
return string
class KVComment():
"""docstring for KVComment"""
def __init__(self, text):
#super(KVComment, self).__init__()
self.text = text
def __str__(self):
return self.text
def read_file(path):
#path = input("Please enter the path of the KV File:")
#path = "C:\\Steam\\steamapps\\common\\dota 2 beta\\game\\dota_addons\\heataria\\scripts\\npc\\abilities\\heataria_blaze_path.txt"
try:
file = open(path, "r")
text = file.read()
except FileNotFoundError:
text = read_file()
finally:
master = KVPart("master")
master.set_master(True)
progress_text(text, master)
return master
#processes a KV textfile into a KV_Part structure
def progress_text(text, last_KVPart = None):
if last_KVPart is not None:
#search patterns to check structure
quote_pattern = r'\"(.*?)\"'
open_pattern = r'.*{'
close_pattern = r'.*}'
comment_pattern = r'//.*'
quote_match = re.search(quote_pattern, text)
open_match = re.search(open_pattern, text)
close_match = re.search(close_pattern, text)
comment_match = re.search(comment_pattern, text)
#cancel if there are no more quotes left
if quote_match is not None:
quote_start = quote_match.start()
else:
return
#if there are no brackets left, give them a placeholder value
if open_match is not None:
open_start = open_match.start()
else:
open_start = len(text)
if close_match is not None:
close_start = close_match.start()
else:
close_start = len(text)
if comment_match is not None:
comment_start = comment_match.start()
else:
comment_start = len(text)
string = quote_match.group(1)
#print("SEACH: q." + str(quote_start) + " o." + str(open_start) + " cl." + str(close_start) + " co." + str(comment_start))
if comment_start < quote_start and comment_start < open_start and comment_start < close_start:
string = comment_match.group()
text = text[comment_match.end() + 1:]
last_KVPart.add_KVComment(string)
progress_text(text, last_KVPart)
#no bracktes before next quote -> simply add to current KV_Part
elif quote_start < open_start and quote_start < close_start:
#check if its a value or key
if last_KVPart.is_empty() and not last_KVPart.get_name() == "master":
last_KVPart.add_simple_value(string)
new_KVPart = last_KVPart.get_parent()
else:
new_KVPart = last_KVPart.add_KVPart(string)
text = text[quote_match.end() + 1:]
progress_text(text, new_KVPart)
#closing bracket -> remove bracket and move to parent KV_Part
elif close_start < quote_start:
text = text[close_match.end() + 1:]
if last_KVPart.has_parent():
temp_KVPart = last_KVPart.get_parent()
else:
temp_KVPart = last_KVPart
progress_text(text, temp_KVPart)
#opening bracket -> creates a new child KV_Part
elif open_start < quote_start:
new_KVPart = last_KVPart.add_KVPart(string)
text = text[quote_match.end() + 1:]
progress_text(text, new_KVPart)
| [
"re.search",
"math.floor"
]
| [((2506, 2541), 'math.floor', 'math.floor', (['((40 - new_position) / 5)'], {}), '((40 - new_position) / 5)\n', (2516, 2541), False, 'import math\n'), ((3583, 3613), 're.search', 're.search', (['quote_pattern', 'text'], {}), '(quote_pattern, text)\n', (3592, 3613), False, 'import re\n'), ((3629, 3658), 're.search', 're.search', (['open_pattern', 'text'], {}), '(open_pattern, text)\n', (3638, 3658), False, 'import re\n'), ((3675, 3705), 're.search', 're.search', (['close_pattern', 'text'], {}), '(close_pattern, text)\n', (3684, 3705), False, 'import re\n'), ((3724, 3756), 're.search', 're.search', (['comment_pattern', 'text'], {}), '(comment_pattern, text)\n', (3733, 3756), False, 'import re\n')] |
#!/usr/bin/python
#
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2014-2016 <NAME>, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import print_function
import argparse
import os
import shutil
#--------------------------------------------------------------------------------------------------
# Utility functions.
#--------------------------------------------------------------------------------------------------
def safe_mkdir(dir):
if not os.path.exists(dir):
os.mkdir(dir)
def walk(directory, recursive):
if recursive:
for dirpath, dirnames, filenames in os.walk(directory):
yield dirpath, dirnames, filenames
else:
yield os.walk(directory).next()
#--------------------------------------------------------------------------------------------------
# Update reference images in a given test suite directory.
#--------------------------------------------------------------------------------------------------
def update_ref_images(parent_dir):
renders_dir = os.path.join(parent_dir, "renders")
ref_dir = os.path.join(parent_dir, "ref")
safe_mkdir(ref_dir)
for filename in os.listdir(renders_dir):
if os.path.splitext(filename)[1] == ".png":
src_path = os.path.join(renders_dir, filename)
dst_path = os.path.join(ref_dir, filename)
print(" copying {0} to {1}...".format(src_path, dst_path))
shutil.copyfile(src_path, dst_path)
#--------------------------------------------------------------------------------------------------
# Entry point.
#--------------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="update functional test suite reference images.")
parser.add_argument("-r", "--recursive", action='store_true', dest="recursive",
help="scan the specified directory and all its subdirectories")
parser.add_argument("directory", nargs='?', default=".", help="directory to scan")
args = parser.parse_args()
for dirpath, dirnames, filenames in walk(args.directory, args.recursive):
if "renders" in dirnames:
update_ref_images(dirpath)
if __name__ == '__main__':
main()
| [
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"os.path.join",
"os.path.splitext",
"shutil.copyfile",
"os.mkdir",
"os.walk"
]
| [((2218, 2253), 'os.path.join', 'os.path.join', (['parent_dir', '"""renders"""'], {}), "(parent_dir, 'renders')\n", (2230, 2253), False, 'import os\n'), ((2268, 2299), 'os.path.join', 'os.path.join', (['parent_dir', '"""ref"""'], {}), "(parent_dir, 'ref')\n", (2280, 2299), False, 'import os\n'), ((2346, 2369), 'os.listdir', 'os.listdir', (['renders_dir'], {}), '(renders_dir)\n', (2356, 2369), False, 'import os\n'), ((2900, 2990), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""update functional test suite reference images."""'}), "(description=\n 'update functional test suite reference images.')\n", (2923, 2990), False, 'import argparse\n'), ((1648, 1667), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (1662, 1667), False, 'import os\n'), ((1677, 1690), 'os.mkdir', 'os.mkdir', (['dir'], {}), '(dir)\n', (1685, 1690), False, 'import os\n'), ((1786, 1804), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (1793, 1804), False, 'import os\n'), ((2446, 2481), 'os.path.join', 'os.path.join', (['renders_dir', 'filename'], {}), '(renders_dir, filename)\n', (2458, 2481), False, 'import os\n'), ((2505, 2536), 'os.path.join', 'os.path.join', (['ref_dir', 'filename'], {}), '(ref_dir, filename)\n', (2517, 2536), False, 'import os\n'), ((2621, 2656), 'shutil.copyfile', 'shutil.copyfile', (['src_path', 'dst_path'], {}), '(src_path, dst_path)\n', (2636, 2656), False, 'import shutil\n'), ((2382, 2408), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (2398, 2408), False, 'import os\n'), ((1877, 1895), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (1884, 1895), False, 'import os\n')] |
from typing import List, Any
import time
from discord import Embed, Reaction
from utils import uniquify
# EMOJIS regional_indicator_A to regional_indicator_T
reaction_emojies = ['\U0001F1E6',
'\U0001F1E7',
'\U0001F1E8',
'\U0001F1E9',
'\U0001F1EA',
'\U0001F1EB',
'\U0001F1EC',
'\U0001F1ED',
'\U0001F1EE',
'\U0001F1EF',
'\U0001F1F0',
'\U0001F1F1',
'\U0001F1F2',
'\U0001F1F3',
'\U0001F1F4',
'\U0001F1F5',
'\U0001F1F6',
'\U0001F1F7',
'\U0001F1F8',
'\U0001F1F9']
number_emojies = {'rq_plus_one': 1, 'rq_plus_two': 2, 'rq_plus_three': 3, 'rq_plus_four': 4}
class PollCreationException(Exception):
pass
class Poll(object):
"""
A Poll object.
"""
def __init__(self, poll_id: str, poll_title: str, options: List[Any], is_immortal=False, updated_since_start=True):
if options is None:
options = []
self.poll_id = poll_id
self.creation_time = time.time()
self.last_update = time.time()
self.poll_title = poll_title
self.options = uniquify(options)
self.reaction_to_option = {reaction_emojies[k]: options[k] for k in range(len(options))}
self.option_to_reaction = {options[k]: reaction_emojies[k] for k in range(len(options))}
self.participants = dict()
self.option_to_participants = {key: [] for key in options}
self.sent_message = None
self.received_message = None
self.is_immortal = is_immortal
self.is_enabled = True
self.updated_since_start = updated_since_start
async def full_update(self, reactions: List[Reaction], bot_user_id: int):
if self.updated_since_start:
return
self.reaction_to_option = {reaction_emojies[k]: self.options[k] for k in range(len(self.options))}
self.option_to_reaction = {self.options[k]: reaction_emojies[k] for k in range(len(self.options))}
self.participants = dict()
self.option_to_participants = {key: [] for key in self.options}
for reaction in reactions:
async for user in reaction.users():
if bot_user_id != user.id:
self.process_reaction(reaction=reaction, user=user, add=True)
self.updated_since_start = True
def process_reaction(self, reaction, user, add):
# get users + reaction emoji
if hasattr(user, 'nick') and user.nick is not None:
nick = user.nick
else:
nick = user.display_name
if reaction.emoji in self.reaction_to_option:
# set list of users for the option the reaction belongs to.
option = self.reaction_to_option[reaction.emoji]
if add and nick not in self.option_to_participants[option]:
self.option_to_participants[option].append(nick)
elif not add:
self.option_to_participants[option].remove(nick)
if nick not in self.participants:
self.participants[nick] = 1
if hasattr(reaction.emoji, 'name') and reaction.emoji.name in number_emojies:
amount = number_emojies[reaction.emoji.name]
self.participants[nick] += (amount if add else -1 * amount)
def to_discord(self):
msg = f'Poll for **{self.poll_title}**'
embed = Embed(color=0xbb1c1c)
for option, participants in self.option_to_participants.items():
reaction = self.option_to_reaction[option]
name = f'{reaction} {option}'
value = ', '.join(
sorted([f'{x} [{self.participants[x]}]' for x in participants])) if participants else '-'
field_counters = [self.participants[x] for x in participants]
total = sum(field_counters)
embed.add_field(name=f'{name} [{total}]', value=value, inline=False)
embed.set_footer(text=f'ID: {self.poll_id}')
return msg, embed
| [
"utils.uniquify",
"discord.Embed",
"time.time"
]
| [((1275, 1286), 'time.time', 'time.time', ([], {}), '()\n', (1284, 1286), False, 'import time\n'), ((1314, 1325), 'time.time', 'time.time', ([], {}), '()\n', (1323, 1325), False, 'import time\n'), ((1386, 1403), 'utils.uniquify', 'uniquify', (['options'], {}), '(options)\n', (1394, 1403), False, 'from utils import uniquify\n'), ((3633, 3654), 'discord.Embed', 'Embed', ([], {'color': '(12262428)'}), '(color=12262428)\n', (3638, 3654), False, 'from discord import Embed, Reaction\n')] |
import email.utils as em
import re
class Main():
def __init__(self):
self.n = int(input())
for i in range(self.n):
self.s = em.parseaddr(input())
if re.match(r'^[a-zA-Z](\w|-|\.|_)+@[a-zA-Z]+\.[a-zA-Z]{0,3}$', self.s[1]):
print(em.formataddr(self.s))
if __name__ == '__main__':
obj = Main()
| [
"re.match",
"email.utils.formataddr"
]
| [((216, 289), 're.match', 're.match', (['"""^[a-zA-Z](\\\\w|-|\\\\.|_)+@[a-zA-Z]+\\\\.[a-zA-Z]{0,3}$"""', 'self.s[1]'], {}), "('^[a-zA-Z](\\\\w|-|\\\\.|_)+@[a-zA-Z]+\\\\.[a-zA-Z]{0,3}$', self.s[1])\n", (224, 289), False, 'import re\n'), ((311, 332), 'email.utils.formataddr', 'em.formataddr', (['self.s'], {}), '(self.s)\n', (324, 332), True, 'import email.utils as em\n')] |
# Author: <NAME> <<EMAIL>>
import numpy as np
from bolero.representation import BlackBoxBehavior
from bolero.representation import DMPBehavior as DMPBehaviorImpl
class DMPBehavior(BlackBoxBehavior):
"""Dynamical Movement Primitive.
Parameters
----------
execution_time : float, optional (default: 1)
Execution time of the DMP in seconds.
dt : float, optional (default: 0.01)
Time between successive steps in seconds.
n_features : int, optional (default: 50)
Number of RBF features for each dimension of the DMP.
configuration_file : string, optional (default: None)
Name of a configuration file that should be used to initialize the DMP.
If it is set all other arguments will be ignored.
"""
def __init__(self, execution_time=1.0, dt=0.01, n_features=50,
configuration_file=None):
self.dmp = DMPBehaviorImpl(execution_time, dt, n_features,
configuration_file)
def init(self, n_inputs, n_outputs):
"""Initialize the behavior.
Parameters
----------
n_inputs : int
number of inputs
n_outputs : int
number of outputs
"""
self.dmp.init(3 * n_inputs, 3 * n_outputs)
self.n_joints = n_inputs
self.x = np.empty(3 * self.n_joints)
self.x[:] = np.nan
def reset(self):
self.dmp.reset()
self.x[:] = 0.0
def set_inputs(self, inputs):
self.x[:self.n_joints] = inputs[:]
def can_step(self):
return self.dmp.can_step()
def step(self):
self.dmp.set_inputs(self.x)
self.dmp.step()
self.dmp.get_outputs(self.x)
def get_outputs(self, outputs):
outputs[:] = self.x[:self.n_joints]
def get_n_params(self):
return self.dmp.get_n_params()
def get_params(self):
return self.dmp.get_params()
def set_params(self, params):
self.dmp.set_params(params)
def set_meta_parameters(self, keys, values):
self.dmp.set_meta_parameters(keys, values)
def trajectory(self):
return self.dmp.trajectory()
class DMPBehaviorWithGoalParams(DMPBehavior):
def __init__(self, goal, execution_time=1.0, dt=0.01, n_features=50,
configuration_file=None):
super(DMPBehaviorWithGoalParams, self).__init__(
execution_time, dt, n_features, configuration_file)
self.params = np.copy(goal)
def set_meta_parameters(self, keys, values):
self.dmp.set_meta_parameters(keys, values)
self.set_params(self.params)
def get_n_params(self):
return len(self.params)
def get_params(self):
return self.params
def set_params(self, params):
self.params[:] = params
self.dmp.set_meta_parameters(["g"], [self.params])
| [
"numpy.copy",
"numpy.empty",
"bolero.representation.DMPBehavior"
]
| [((900, 967), 'bolero.representation.DMPBehavior', 'DMPBehaviorImpl', (['execution_time', 'dt', 'n_features', 'configuration_file'], {}), '(execution_time, dt, n_features, configuration_file)\n', (915, 967), True, 'from bolero.representation import DMPBehavior as DMPBehaviorImpl\n'), ((1340, 1367), 'numpy.empty', 'np.empty', (['(3 * self.n_joints)'], {}), '(3 * self.n_joints)\n', (1348, 1367), True, 'import numpy as np\n'), ((2478, 2491), 'numpy.copy', 'np.copy', (['goal'], {}), '(goal)\n', (2485, 2491), True, 'import numpy as np\n')] |
import logging.config
import tornado
from bitstampws import Client as Websocket
import lib.configs.logging
from lib.subscribers import SimpleLoggerSubscriber
logging.config.dictConfig(lib.configs.logging.d)
if __name__ == '__main__':
with Websocket() as client:
with SimpleLoggerSubscriber(client):
client.connect()
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
client.close()
| [
"bitstampws.Client",
"lib.subscribers.SimpleLoggerSubscriber",
"tornado.ioloop.IOLoop.instance"
]
| [((248, 259), 'bitstampws.Client', 'Websocket', ([], {}), '()\n', (257, 259), True, 'from bitstampws import Client as Websocket\n'), ((284, 314), 'lib.subscribers.SimpleLoggerSubscriber', 'SimpleLoggerSubscriber', (['client'], {}), '(client)\n', (306, 314), False, 'from lib.subscribers import SimpleLoggerSubscriber\n'), ((378, 410), 'tornado.ioloop.IOLoop.instance', 'tornado.ioloop.IOLoop.instance', ([], {}), '()\n', (408, 410), False, 'import tornado\n')] |
"""Generated client library for serviceuser version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.serviceuser.v1 import serviceuser_v1_messages as messages
class ServiceuserV1(base_api.BaseApiClient):
"""Generated client library for service serviceuser version v1."""
MESSAGES_MODULE = messages
BASE_URL = u'https://serviceuser.googleapis.com/'
_PACKAGE = u'serviceuser'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/cloud-platform.read-only', u'https://www.googleapis.com/auth/service.management']
_VERSION = u'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'ServiceuserV1'
_URL_VERSION = u'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new serviceuser handle."""
url = url or self.BASE_URL
super(ServiceuserV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_services = self.ProjectsServicesService(self)
self.projects = self.ProjectsService(self)
self.services = self.ServicesService(self)
class ProjectsServicesService(base_api.BaseApiService):
"""Service class for the projects_services resource."""
_NAME = u'projects_services'
def __init__(self, client):
super(ServiceuserV1.ProjectsServicesService, self).__init__(client)
self._upload_configs = {
}
def Disable(self, request, global_params=None):
r"""Disable a service so it can no longer be used with a.
project. This prevents unintended usage that may cause unexpected billing
charges or security leaks.
Operation<response: google.protobuf.Empty>
Args:
request: (ServiceuserProjectsServicesDisableRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Disable')
return self._RunMethod(
config, request, global_params=global_params)
Disable.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'serviceuser.projects.services.disable',
ordered_params=[u'projectsId', u'servicesId'],
path_params=[u'projectsId', u'servicesId'],
query_params=[],
relative_path=u'v1/projects/{projectsId}/services/{servicesId}:disable',
request_field=u'disableServiceRequest',
request_type_name=u'ServiceuserProjectsServicesDisableRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Enable(self, request, global_params=None):
r"""Enable a service so it can be used with a project.
See [Cloud Auth Guide](https://cloud.google.com/docs/authentication) for
more information.
Operation<response: google.protobuf.Empty>
Args:
request: (ServiceuserProjectsServicesEnableRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Enable')
return self._RunMethod(
config, request, global_params=global_params)
Enable.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'serviceuser.projects.services.enable',
ordered_params=[u'projectsId', u'servicesId'],
path_params=[u'projectsId', u'servicesId'],
query_params=[],
relative_path=u'v1/projects/{projectsId}/services/{servicesId}:enable',
request_field=u'enableServiceRequest',
request_type_name=u'ServiceuserProjectsServicesEnableRequest',
response_type_name=u'Operation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""List enabled services for the specified consumer.
Args:
request: (ServiceuserProjectsServicesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListEnabledServicesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'serviceuser.projects.services.list',
ordered_params=[u'projectsId'],
path_params=[u'projectsId'],
query_params=[u'pageSize', u'pageToken'],
relative_path=u'v1/projects/{projectsId}/services',
request_field='',
request_type_name=u'ServiceuserProjectsServicesListRequest',
response_type_name=u'ListEnabledServicesResponse',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = u'projects'
def __init__(self, client):
super(ServiceuserV1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
class ServicesService(base_api.BaseApiService):
"""Service class for the services resource."""
_NAME = u'services'
def __init__(self, client):
super(ServiceuserV1.ServicesService, self).__init__(client)
self._upload_configs = {
}
def Search(self, request, global_params=None):
r"""Search available services.
When no filter is specified, returns all accessible services. For
authenticated users, also returns all services the calling user has
"servicemanagement.services.bind" permission for.
Args:
request: (ServiceuserServicesSearchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(SearchServicesResponse) The response message.
"""
config = self.GetMethodConfig('Search')
return self._RunMethod(
config, request, global_params=global_params)
Search.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'serviceuser.services.search',
ordered_params=[],
path_params=[],
query_params=[u'pageSize', u'pageToken'],
relative_path=u'v1/services:search',
request_field='',
request_type_name=u'ServiceuserServicesSearchRequest',
response_type_name=u'SearchServicesResponse',
supports_download=False,
)
| [
"apitools.base.py.base_api.ApiMethodInfo"
]
| [((2818, 3288), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'http_method': 'u"""POST"""', 'method_id': 'u"""serviceuser.projects.services.disable"""', 'ordered_params': "[u'projectsId', u'servicesId']", 'path_params': "[u'projectsId', u'servicesId']", 'query_params': '[]', 'relative_path': 'u"""v1/projects/{projectsId}/services/{servicesId}:disable"""', 'request_field': 'u"""disableServiceRequest"""', 'request_type_name': 'u"""ServiceuserProjectsServicesDisableRequest"""', 'response_type_name': 'u"""Operation"""', 'supports_download': '(False)'}), "(http_method=u'POST', method_id=\n u'serviceuser.projects.services.disable', ordered_params=[u'projectsId',\n u'servicesId'], path_params=[u'projectsId', u'servicesId'],\n query_params=[], relative_path=\n u'v1/projects/{projectsId}/services/{servicesId}:disable',\n request_field=u'disableServiceRequest', request_type_name=\n u'ServiceuserProjectsServicesDisableRequest', response_type_name=\n u'Operation', supports_download=False)\n", (2840, 3288), False, 'from apitools.base.py import base_api\n'), ((3995, 4462), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'http_method': 'u"""POST"""', 'method_id': 'u"""serviceuser.projects.services.enable"""', 'ordered_params': "[u'projectsId', u'servicesId']", 'path_params': "[u'projectsId', u'servicesId']", 'query_params': '[]', 'relative_path': 'u"""v1/projects/{projectsId}/services/{servicesId}:enable"""', 'request_field': 'u"""enableServiceRequest"""', 'request_type_name': 'u"""ServiceuserProjectsServicesEnableRequest"""', 'response_type_name': 'u"""Operation"""', 'supports_download': '(False)'}), "(http_method=u'POST', method_id=\n u'serviceuser.projects.services.enable', ordered_params=[u'projectsId',\n u'servicesId'], path_params=[u'projectsId', u'servicesId'],\n query_params=[], relative_path=\n u'v1/projects/{projectsId}/services/{servicesId}:enable', request_field\n =u'enableServiceRequest', request_type_name=\n u'ServiceuserProjectsServicesEnableRequest', response_type_name=\n u'Operation', supports_download=False)\n", (4017, 4462), False, 'from apitools.base.py import base_api\n'), ((5042, 5464), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'http_method': 'u"""GET"""', 'method_id': 'u"""serviceuser.projects.services.list"""', 'ordered_params': "[u'projectsId']", 'path_params': "[u'projectsId']", 'query_params': "[u'pageSize', u'pageToken']", 'relative_path': 'u"""v1/projects/{projectsId}/services"""', 'request_field': '""""""', 'request_type_name': 'u"""ServiceuserProjectsServicesListRequest"""', 'response_type_name': 'u"""ListEnabledServicesResponse"""', 'supports_download': '(False)'}), "(http_method=u'GET', method_id=\n u'serviceuser.projects.services.list', ordered_params=[u'projectsId'],\n path_params=[u'projectsId'], query_params=[u'pageSize', u'pageToken'],\n relative_path=u'v1/projects/{projectsId}/services', request_field='',\n request_type_name=u'ServiceuserProjectsServicesListRequest',\n response_type_name=u'ListEnabledServicesResponse', supports_download=False)\n", (5064, 5464), False, 'from apitools.base.py import base_api\n'), ((6751, 7117), 'apitools.base.py.base_api.ApiMethodInfo', 'base_api.ApiMethodInfo', ([], {'http_method': 'u"""GET"""', 'method_id': 'u"""serviceuser.services.search"""', 'ordered_params': '[]', 'path_params': '[]', 'query_params': "[u'pageSize', u'pageToken']", 'relative_path': 'u"""v1/services:search"""', 'request_field': '""""""', 'request_type_name': 'u"""ServiceuserServicesSearchRequest"""', 'response_type_name': 'u"""SearchServicesResponse"""', 'supports_download': '(False)'}), "(http_method=u'GET', method_id=\n u'serviceuser.services.search', ordered_params=[], path_params=[],\n query_params=[u'pageSize', u'pageToken'], relative_path=\n u'v1/services:search', request_field='', request_type_name=\n u'ServiceuserServicesSearchRequest', response_type_name=\n u'SearchServicesResponse', supports_download=False)\n", (6773, 7117), False, 'from apitools.base.py import base_api\n')] |
#!/usr/bin/env python3
# adapted from wav2letter/src/feature/test/MfccTest.cpp
import itertools as it
import os
import sys
from wav2letter.feature import FeatureParams, Mfcc
def load_data(filename):
path = os.path.join(data_path, filename)
path = os.path.abspath(path)
with open(path) as f:
return [float(x) for x in it.chain.from_iterable(line.split() for line in f)]
if __name__ == "__main__":
if len(sys.argv) != 2:
print(f"usage: {sys.argv[0]} feature_test_data_path", file=sys.stderr)
print(" (usually: <wav2letter_root>/src/feature/test/data)", file=sys.stderr)
sys.exit(1)
data_path = sys.argv[1]
wavinput = load_data("sa1.dat")
# golden features to compare
htkfeatures = load_data("sa1-mfcc.htk")
assert len(wavinput) > 0
assert len(htkfeatures) > 0
params = FeatureParams()
# define parameters of the featurization
params.sampling_freq = 16000
params.low_freq_filterbank = 0
params.high_freq_filterbank = 8000
params.num_filterbank_chans = 20
params.num_cepstral_coeffs = 13
params.use_energy = False
params.zero_mean_frame = False
params.use_power = False
# apply MFCC featurization
mfcc = Mfcc(params)
features = mfcc.apply(wavinput)
# check that obtained features are the same as golden one
assert len(features) == len(htkfeatures)
assert len(features) % 39 == 0
numframes = len(features) // 39
featurescopy = features.copy()
for f in range(numframes):
for i in range(1, 39):
features[f * 39 + i - 1] = features[f * 39 + i]
features[f * 39 + 12] = featurescopy[f * 39 + 0]
features[f * 39 + 25] = featurescopy[f * 39 + 13]
features[f * 39 + 38] = featurescopy[f * 39 + 26]
differences = [abs(x[0] - x[1]) for x in zip(features, htkfeatures)]
print(f"max_diff={max(differences)}")
print(f"avg_diff={sum(differences)/len(differences)}")
| [
"os.path.join",
"wav2letter.feature.FeatureParams",
"sys.exit",
"os.path.abspath",
"wav2letter.feature.Mfcc"
]
| [((215, 248), 'os.path.join', 'os.path.join', (['data_path', 'filename'], {}), '(data_path, filename)\n', (227, 248), False, 'import os\n'), ((260, 281), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (275, 281), False, 'import os\n'), ((855, 870), 'wav2letter.feature.FeatureParams', 'FeatureParams', ([], {}), '()\n', (868, 870), False, 'from wav2letter.feature import FeatureParams, Mfcc\n'), ((1233, 1245), 'wav2letter.feature.Mfcc', 'Mfcc', (['params'], {}), '(params)\n', (1237, 1245), False, 'from wav2letter.feature import FeatureParams, Mfcc\n'), ((624, 635), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (632, 635), False, 'import sys\n')] |
#!/usr/bin/env python3
from urdf2optcontrol import optimizer
from matplotlib import pyplot as plt
import pathlib
# URDF options
urdf_path = pathlib.Path(__file__).parent.joinpath('urdf', 'rrbot.urdf').absolute()
root = "link1"
end = "link3"
in_cond = [0] * 4
def my_cost_func(q, qd, qdd, ee_pos, u, t):
return u.T @ u
def my_constraint1(q, qd, qdd, ee_pos, u, t):
return [-30, -30], u, [30, 30]
def my_constraint2(q, qd, qdd, ee_pos, u, t):
return [-4, -4], qd, [4, 4]
my_constraints = [my_constraint1, my_constraint2]
def my_final_constraint1(q, qd, qdd, ee_pos, u):
return [3.14 / 2, 0], q, [3.14 / 2, 0]
def my_final_constraint2(q, qd, qdd, ee_pos, u):
return [0, 0], qd, [0, 0]
my_final_constraints = [my_final_constraint1, my_final_constraint2]
time_horizon = 2.0
steps = 40
# Load the urdf and calculate the differential equations
optimizer.load_robot(urdf_path, root, end)
# Loading the problem conditions
optimizer.load_problem(
my_cost_func,
steps,
in_cond,
time_horizon=time_horizon,
constraints=my_constraints,
final_constraints=my_final_constraints,
max_iter=500
)
# Solving the non linear problem
res = optimizer.solve()
print('u = ', res['u'][0])
print('q = ', res['q'][0])
# Print the results!
fig = optimizer.plot_result(show=True)
| [
"urdf2optcontrol.optimizer.solve",
"urdf2optcontrol.optimizer.load_robot",
"pathlib.Path",
"urdf2optcontrol.optimizer.plot_result",
"urdf2optcontrol.optimizer.load_problem"
]
| [((875, 917), 'urdf2optcontrol.optimizer.load_robot', 'optimizer.load_robot', (['urdf_path', 'root', 'end'], {}), '(urdf_path, root, end)\n', (895, 917), False, 'from urdf2optcontrol import optimizer\n'), ((952, 1123), 'urdf2optcontrol.optimizer.load_problem', 'optimizer.load_problem', (['my_cost_func', 'steps', 'in_cond'], {'time_horizon': 'time_horizon', 'constraints': 'my_constraints', 'final_constraints': 'my_final_constraints', 'max_iter': '(500)'}), '(my_cost_func, steps, in_cond, time_horizon=\n time_horizon, constraints=my_constraints, final_constraints=\n my_final_constraints, max_iter=500)\n', (974, 1123), False, 'from urdf2optcontrol import optimizer\n'), ((1184, 1201), 'urdf2optcontrol.optimizer.solve', 'optimizer.solve', ([], {}), '()\n', (1199, 1201), False, 'from urdf2optcontrol import optimizer\n'), ((1284, 1316), 'urdf2optcontrol.optimizer.plot_result', 'optimizer.plot_result', ([], {'show': '(True)'}), '(show=True)\n', (1305, 1316), False, 'from urdf2optcontrol import optimizer\n'), ((142, 164), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (154, 164), False, 'import pathlib\n')] |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Mean stddev box coder.
This box coder use the following coding schema to encode boxes:
rel_code = (box_corner - anchor_corner_mean) / anchor_corner_stddev.
"""
from object_detection.core import box_coder
from object_detection.core import box_list
class MeanStddevBoxCoder(box_coder.BoxCoder):
"""Mean stddev box coder."""
@property
def code_size(self):
return 4
def _encode(self, boxes, anchors):
"""Encode a box collection with respect to anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of N anchors. We assume that anchors has an associated
stddev field.
Returns:
a tensor representing N anchor-encoded boxes
Raises:
ValueError: if the anchors BoxList does not have a stddev field
"""
if not anchors.has_field('stddev'):
raise ValueError('anchors must have a stddev field')
box_corners = boxes.get()
means = anchors.get()
stddev = anchors.get_field('stddev')
return (box_corners - means) / stddev
def _decode(self, rel_codes, anchors):
"""Decode.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors. We assume that anchors has an associated
stddev field.
Returns:
boxes: BoxList holding N bounding boxes
Raises:
ValueError: if the anchors BoxList does not have a stddev field
"""
if not anchors.has_field('stddev'):
raise ValueError('anchors must have a stddev field')
means = anchors.get()
stddevs = anchors.get_field('stddev')
box_corners = rel_codes * stddevs + means
return box_list.BoxList(box_corners)
| [
"object_detection.core.box_list.BoxList"
]
| [((2403, 2432), 'object_detection.core.box_list.BoxList', 'box_list.BoxList', (['box_corners'], {}), '(box_corners)\n', (2419, 2432), False, 'from object_detection.core import box_list\n')] |
from django.http import Http404
from django.shortcuts import render, redirect, reverse
from django.views.generic import ListView
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.renderers import TemplateHTMLRenderer
from .models import Schema, SchemaColumn, SchemaResponse, SchemaUrl
from .forms import SchemaResponseForm, ResponseUpdateForm
from .serializers import SchemaResponseSerializer
from .prepare_data import getcolumns
import pytz
class SchemaIndexView(LoginRequiredMixin, ListView):
# login_url = '/accounts/login.html/'
template_name = 'dynamic_schemas/index.html'
context_object_name = 'all_schemas'
def get_queryset(self):
return Schema.objects.all()
@login_required
def form_view(request, pk):
schema = Schema.objects.get(pk=pk)
urls = schema.help_field.all()
if request.method == 'POST':
form = SchemaResponseForm(schema, request.POST)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
instance.save()
return redirect(reverse('dynamic_schemas:schema_view',
kwargs={'pk': pk}))
else:
form = SchemaResponseForm(schema)
return render(request, f'dynamic_schemas/create-form.html', \
{
'form': form,
'schema': schema,
'help_urls': urls,
})
@login_required
def form_update_view(request, pk, r_pk):
schema = Schema.objects.get(pk=pk)
instance = SchemaResponse.objects.get(schema=schema, pk=r_pk)
columns = SchemaColumn.objects.filter(schema=schema)
###################################################
# This little snippet checks if the responses can be edited. If they can
# the submit button will be provided. There is no restriction on
# has_been_edited, but since the data cant be saved we're good for now.
load_button = False
aggr_editables = [c.is_editable_once for c in columns]
if True in aggr_editables:
load_button = True
###################################################
form = ResponseUpdateForm(instance, pk)
if request.method == 'POST':
form = ResponseUpdateForm(instance, pk, request.POST or None)
if form.is_valid():
form.update()
return redirect(reverse('dynamic_schemas:schema_view',
kwargs={'pk': pk}))
return render(request, f'dynamic_schemas/update-form.html',
{'form_update': form,
'load_button': load_button}
)
""" API Views """
class MakeDataPrettyMixin:
def _make_date_tz(self, instance=None, tz=None):
""" Takes an instance, and sets its timezone.
TODO:
Should this be a classmethod? Will a classmethod complicate the
view in its context?
"""
# Can this be moved to SETTINGS instead? Same for _make_date_readable.
# Problem is probably that the UTC format gets overridden.
if instance:
if tz:
tz = pytz.timezone(tz)
return instance.pub_date.astimezone(tz)
return
def _make_date_readable(self, instances):
"""
Helper function to change the dates to a format pleasing to the
eyes, takes a bundle of instances and converts their time.
How extensible do we want this?
Function is kept private for now, since in Denmark the timezone is CET.
"""
for instance in instances:
inst_as_cet = self._make_date_tz(
instance=instance
# tz='Europe/Copenhagen'
)
instance.pub_date = inst_as_cet \
.strftime('%d-%m/%Y %H:%M:%S')
return instances
def _make_user_readable(self, serializer):
""" Helper to return the correct attributes to the front-end
"""
for data in serializer.data:
# import ipdb; ipdb.set_trace()
user = data['user']
instance = User.objects.get(id=user)
user = instance.first_name + instance.last_name
if instance.first_name == '':
user = instance.username
data['user'] = user
# __import__('ipdb').set_trace()
# import ipdb; ipdb.set_trace()
return serializer
def _make_intruction_links_readable(self, serializer):
for data in serializer.data:
instr = data['instruction']
instance = SchemaUrl.objects.get(id=instr)
instr = '<a href="'+ instance.url +'">'+ instance.name +'</a>'
data['instruction'] = instr
return serializer
class ResponseList(MakeDataPrettyMixin, APIView):
"""
Lists responses according to schema.
Purely for APIView for now. Not being used in the actual rendering af the
tables.
"""
default_order = [
('desc', '-'),
('asc', ''),
]
def get_orderprefix(self, order):
for tup in self.default_order:
if order in tup:
return tup[1]
def get(self, request, pk, format=None, *args):
req = request.GET
# Amount of data to fetch each pull
start = int(req.get('start', 0))
length = int(req.get('length', 30))
end = start + length;
order = req.get('order[0][dir]')
order_column = req.get('order[0][column]')
order_by_pre = self.get_orderprefix(order)
order_column_name = req.get('columns['+order_column+'][data]')
# __import__('ipdb').set_trace()
order_str = order_by_pre + order_column_name
draw = req.get('draw')
# TODO Gonna require some thinking. Also need to user recordsFiltered.
# search = req.get('search[value]')
schema = Schema.objects.get(pk=pk)
responses_count = SchemaResponse.objects.filter(schema=schema).count()
responses = SchemaResponse \
.objects \
.filter(schema=schema) \
.order_by(order_str)[start:end]
# __import__('ipdb').set_trace()
responses = self._make_date_readable(responses)
serializer = SchemaResponseSerializer(responses, many=True)
serializer = self._make_user_readable(serializer)
serializer = self._make_intruction_links_readable(serializer)
return_data = {
'draw': int(draw),
'recordsTotal': responses_count,
'recordsFiltered': responses_count,
'data': serializer.data,
}
# __import__('ipdb').set_trace()
return Response(return_data)
class ResponseColumns(APIView):
def get(self, request, pk, format=None, *args):
req = request.GET
schema = Schema.objects.get(pk=pk)
sr = SchemaResponse.objects.filter(schema=schema).first()
columns = getcolumns(sr).getvalue()
return Response(columns)
class SchemaView(LoginRequiredMixin, APIView):
"""
Fetches the FIRST object from ResponseList. Makes it availabe for
as a template for the table in main.html
Excludes schema.id, and the placeholder qa_set in the template.
"""
renderer_classes = [TemplateHTMLRenderer]
template_name = 'dynamic_schemas/table_dev.html'
def get_object(self, pk):
try:
schema = Schema.objects.get(pk=pk)
if SchemaColumn.objects.filter(schema=schema).count() != 0:
all_responses = SchemaResponse.objects.filter(schema=schema)
single_response = all_responses.first()
serializer = SchemaResponseSerializer(single_response)
return serializer.data
except single_response.DoesNotExist:
raise Http404
def get(self, request, pk):
schema = Schema.objects.get(pk=pk)
schema_help_urls = schema.help_field.all()
schema_obsolete = schema.obsolete.all()
schema_new = schema.new.all()
all_responses = SchemaResponse.objects.filter(schema=schema)
# self._make_date_readable(all_responses)
serializer = SchemaResponseSerializer(all_responses, many=True)
data = {'single_response': self.get_object(pk),
'all_responses': serializer.data,
'pk': pk,
'schema': schema,
'help_urls': schema_help_urls,
'schema_obsolete': schema_obsolete,
'schema_new': schema_new,
}
# __import__('ipdb').set_trace()
return Response(data)
| [
"django.shortcuts.render",
"pytz.timezone",
"rest_framework.response.Response",
"django.shortcuts.reverse",
"django.contrib.auth.models.User.objects.get"
]
| [((1493, 1602), 'django.shortcuts.render', 'render', (['request', 'f"""dynamic_schemas/create-form.html"""', "{'form': form, 'schema': schema, 'help_urls': urls}"], {}), "(request, f'dynamic_schemas/create-form.html', {'form': form,\n 'schema': schema, 'help_urls': urls})\n", (1499, 1602), False, 'from django.shortcuts import render, redirect, reverse\n'), ((2689, 2796), 'django.shortcuts.render', 'render', (['request', 'f"""dynamic_schemas/update-form.html"""', "{'form_update': form, 'load_button': load_button}"], {}), "(request, f'dynamic_schemas/update-form.html', {'form_update': form,\n 'load_button': load_button})\n", (2695, 2796), False, 'from django.shortcuts import render, redirect, reverse\n'), ((6939, 6960), 'rest_framework.response.Response', 'Response', (['return_data'], {}), '(return_data)\n', (6947, 6960), False, 'from rest_framework.response import Response\n'), ((7241, 7258), 'rest_framework.response.Response', 'Response', (['columns'], {}), '(columns)\n', (7249, 7258), False, 'from rest_framework.response import Response\n'), ((8891, 8905), 'rest_framework.response.Response', 'Response', (['data'], {}), '(data)\n', (8899, 8905), False, 'from rest_framework.response import Response\n'), ((1338, 1395), 'django.shortcuts.reverse', 'reverse', (['"""dynamic_schemas:schema_view"""'], {'kwargs': "{'pk': pk}"}), "('dynamic_schemas:schema_view', kwargs={'pk': pk})\n", (1345, 1395), False, 'from django.shortcuts import render, redirect, reverse\n'), ((2570, 2627), 'django.shortcuts.reverse', 'reverse', (['"""dynamic_schemas:schema_view"""'], {'kwargs': "{'pk': pk}"}), "('dynamic_schemas:schema_view', kwargs={'pk': pk})\n", (2577, 2627), False, 'from django.shortcuts import render, redirect, reverse\n'), ((4337, 4362), 'django.contrib.auth.models.User.objects.get', 'User.objects.get', ([], {'id': 'user'}), '(id=user)\n', (4353, 4362), False, 'from django.contrib.auth.models import User\n'), ((3344, 3361), 'pytz.timezone', 'pytz.timezone', (['tz'], {}), '(tz)\n', (3357, 3361), False, 'import pytz\n')] |
from selenium import webdriver
from time import sleep
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
def Dm(driver,user,message):
''' This function is used to direct message a single user/group '''
driver.get('https://www.instagram.com/direct/inbox/')
send_message_button = WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div/div[3]/div/button'))).click()
search_user = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[5]/div/div/div[2]/div[1]/div/div[2]/input')))
search_user.send_keys(user)
selector = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[5]/div/div/div[2]/div[2]/div/div/div[3]/button/span'))).click()
next_button = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[5]/div/div/div[1]/div/div[2]/div/button/div'))).click()
try:
text = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[2]/textarea')))
text.send_keys(message)
send = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[3]/button'))).click()
driver.get('https://www.instagram.com/direct/inbox/')
except:
print('No message sent to '+user)
driver.get('https://www.instagram.com/direct/inbox/') | [
"selenium.webdriver.support.wait.WebDriverWait",
"selenium.webdriver.support.expected_conditions.element_to_be_clickable"
]
| [((678, 780), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (["(By.XPATH, '/html/body/div[5]/div/div/div[2]/div[1]/div/div[2]/input')"], {}), "((By.XPATH,\n '/html/body/div[5]/div/div/div[2]/div[1]/div/div[2]/input'))\n", (704, 780), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((646, 671), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (659, 671), False, 'from selenium.webdriver.support.wait import WebDriverWait\n'), ((1206, 1354), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (['(By.XPATH,\n \'//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[2]/textarea\'\n )'], {}), '((By.XPATH,\n \'//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[2]/textarea\'\n ))\n', (1232, 1354), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((494, 622), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (['(By.XPATH,\n \'//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div/div[3]/div/button\'\n )'], {}), '((By.XPATH,\n \'//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div/div[3]/div/button\'\n ))\n', (520, 622), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((862, 974), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (["(By.XPATH, '/html/body/div[5]/div/div/div[2]/div[2]/div/div/div[3]/button/span'\n )"], {}), "((By.XPATH,\n '/html/body/div[5]/div/div/div[2]/div[2]/div/div/div[3]/button/span'))\n", (888, 974), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((1035, 1139), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (["(By.XPATH, '/html/body/div[5]/div/div/div[1]/div/div[2]/div/button/div')"], {}), "((By.XPATH,\n '/html/body/div[5]/div/div/div[1]/div/div[2]/div/button/div'))\n", (1061, 1139), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((1174, 1199), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (1187, 1199), False, 'from selenium.webdriver.support.wait import WebDriverWait\n'), ((462, 487), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['driver', '(20)'], {}), '(driver, 20)\n', (475, 487), False, 'from selenium.webdriver.support.wait import WebDriverWait\n'), ((830, 855), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (843, 855), False, 'from selenium.webdriver.support.wait import WebDriverWait\n'), ((1003, 1028), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (1016, 1028), False, 'from selenium.webdriver.support.wait import WebDriverWait\n'), ((1426, 1572), 'selenium.webdriver.support.expected_conditions.element_to_be_clickable', 'EC.element_to_be_clickable', (['(By.XPATH,\n \'//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[3]/button\'\n )'], {}), '((By.XPATH,\n \'//*[@id="react-root"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[3]/button\'\n ))\n', (1452, 1572), True, 'from selenium.webdriver.support import expected_conditions as EC\n'), ((1394, 1419), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['driver', '(10)'], {}), '(driver, 10)\n', (1407, 1419), False, 'from selenium.webdriver.support.wait import WebDriverWait\n')] |
# Script for data augmentation functions
import numpy as np
from collections import deque
from PIL import Image
import cv2
from data.config import *
def imread_cv2(image_path):
"""
Read image_path with cv2 format (H, W, C)
if image is '.gif' outputs is a numpy array of {0,1}
"""
image_format = image_path[-3:]
if image_format == 'jpg':
image = cv2.imread(image_path)
else:
image = np.array(Image.open(image_path))
return image
def resize_cv2(image, heigh=1280, width=1918):
return cv2.resize(image, (width, heigh), cv2.INTER_LINEAR)
def image_to_tensor(image, mean=0, std=1.):
"""Transform image (input is numpy array, read in by cv2) """
if len(image.shape) == 2:
image = image.reshape(image.shape[0], image.shape[1], 1)
image = image.astype(np.float32)
image = (image-mean)/std
image = image.transpose((2,0,1))
tensor = torch.from_numpy(image)
return tensor
# --- Data Augmentation functions --- #
# A lot of functions can be found here:
# https://github.com/fchollet/keras/blob/master/keras/preprocessing/image.py#L223
# transform image and label
def randomHorizontalFlip(image, mask, p=0.5):
"""Do a random horizontal flip with probability p"""
if np.random.random() < p:
image = np.fliplr(image)
mask = np.fliplr(mask)
return image, mask
def randomVerticalFlip(image, mask, p=0.5):
"""Do a random vertical flip with probability p"""
if np.random.random() < p:
image = np.flipud(image)
mask = np.flipud(mask)
return image, mask
def randomHorizontalShift(image, mask, max_shift=0.05, p=0.5):
"""Do random horizontal shift with max proportion shift and with probability p
Elements that roll beyond the last position are re-introduced at the first."""
max_shift_pixels = int(max_shift*image.shape[1])
shift = np.random.choice(np.arange(-max_shift_pixels, max_shift_pixels+1))
if np.random.random() < p:
image = np.roll(image, shift, axis=1)
mask = np.roll(mask, shift, axis=1)
return image, mask
def randomVerticalShift(image, mask, max_shift=0.05, p=0.5):
"""Do random vertical shift with max proportion shift and probability p
Elements that roll beyond the last position are re-introduced at the first."""
max_shift_pixels = int(max_shift*image.shape[0])
shift = np.random.choice(np.arange(-max_shift_pixels, max_shift_pixels+1))
if np.random.random() < p:
image = np.roll(image, shift, axis=0)
mask = np.roll(mask, shift, axis=0)
return image, mask
def randomInvert(image, mask, p=0.5):
"""Randomly invert image with probability p"""
if np.random.random() < p:
image = 255 - image
mask = mask
return image, mask
def randomBrightness(image, mask, p=0.75):
"""With probability p, randomly increase or decrease brightness.
See https://stackoverflow.com/questions/37822375/python-opencv-increasing-image-brightness-without-overflowing-uint8-array"""
if np.random.random() < p:
max_value = np.percentile(255-image, q=25) # avoid burning out white cars, so take image-specific maximum
value = np.random.choice(np.arange(-max_value, max_value))
if value > 0:
image = np.where((255 - image) < value,255,image+value).astype(np.uint8)
else:
image = np.where(image < -value,0,image+value).astype(np.uint8)
return image, mask
def randomHue(image, mask, p=0.25, max_value=75):
"""With probability p, randomly increase or decrease hue.
See https://stackoverflow.com/questions/32609098/how-to-fast-change-image-brightness-with-python-opencv"""
if np.random.random() < p:
value = np.random.choice(np.arange(-max_value, max_value))
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hsv[:,:,0] = hsv[:,:,0] + value
hsv = np.clip(hsv, a_min=0, a_max=255).astype(np.uint8)
image = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return image, mask
def GaussianBlur(image, mask, kernel=(1, 1),sigma=1, p=0.5):
"""With probability p, apply Gaussian blur"""
# TODO
return image, mask
def randomRotate(image, mask, max_angle, p=0.5):
"""Perform random rotation with max_angle and probability p"""
# TODO
return(image, mask)
| [
"numpy.clip",
"PIL.Image.open",
"numpy.roll",
"numpy.flipud",
"numpy.random.random",
"numpy.fliplr",
"numpy.where",
"cv2.cvtColor",
"numpy.percentile",
"cv2.resize",
"cv2.imread",
"numpy.arange"
]
| [((538, 589), 'cv2.resize', 'cv2.resize', (['image', '(width, heigh)', 'cv2.INTER_LINEAR'], {}), '(image, (width, heigh), cv2.INTER_LINEAR)\n', (548, 589), False, 'import cv2\n'), ((379, 401), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (389, 401), False, 'import cv2\n'), ((1257, 1275), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1273, 1275), True, 'import numpy as np\n'), ((1297, 1313), 'numpy.fliplr', 'np.fliplr', (['image'], {}), '(image)\n', (1306, 1313), True, 'import numpy as np\n'), ((1329, 1344), 'numpy.fliplr', 'np.fliplr', (['mask'], {}), '(mask)\n', (1338, 1344), True, 'import numpy as np\n'), ((1475, 1493), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1491, 1493), True, 'import numpy as np\n'), ((1515, 1531), 'numpy.flipud', 'np.flipud', (['image'], {}), '(image)\n', (1524, 1531), True, 'import numpy as np\n'), ((1547, 1562), 'numpy.flipud', 'np.flipud', (['mask'], {}), '(mask)\n', (1556, 1562), True, 'import numpy as np\n'), ((1898, 1948), 'numpy.arange', 'np.arange', (['(-max_shift_pixels)', '(max_shift_pixels + 1)'], {}), '(-max_shift_pixels, max_shift_pixels + 1)\n', (1907, 1948), True, 'import numpy as np\n'), ((1955, 1973), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1971, 1973), True, 'import numpy as np\n'), ((1995, 2024), 'numpy.roll', 'np.roll', (['image', 'shift'], {'axis': '(1)'}), '(image, shift, axis=1)\n', (2002, 2024), True, 'import numpy as np\n'), ((2040, 2068), 'numpy.roll', 'np.roll', (['mask', 'shift'], {'axis': '(1)'}), '(mask, shift, axis=1)\n', (2047, 2068), True, 'import numpy as np\n'), ((2395, 2445), 'numpy.arange', 'np.arange', (['(-max_shift_pixels)', '(max_shift_pixels + 1)'], {}), '(-max_shift_pixels, max_shift_pixels + 1)\n', (2404, 2445), True, 'import numpy as np\n'), ((2452, 2470), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2468, 2470), True, 'import numpy as np\n'), ((2496, 2525), 'numpy.roll', 'np.roll', (['image', 'shift'], {'axis': '(0)'}), '(image, shift, axis=0)\n', (2503, 2525), True, 'import numpy as np\n'), ((2545, 2573), 'numpy.roll', 'np.roll', (['mask', 'shift'], {'axis': '(0)'}), '(mask, shift, axis=0)\n', (2552, 2573), True, 'import numpy as np\n'), ((2694, 2712), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2710, 2712), True, 'import numpy as np\n'), ((3039, 3057), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3055, 3057), True, 'import numpy as np\n'), ((3083, 3115), 'numpy.percentile', 'np.percentile', (['(255 - image)'], {'q': '(25)'}), '(255 - image, q=25)\n', (3096, 3115), True, 'import numpy as np\n'), ((3696, 3714), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3712, 3714), True, 'import numpy as np\n'), ((3801, 3839), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2HSV'], {}), '(image, cv2.COLOR_BGR2HSV)\n', (3813, 3839), False, 'import cv2\n'), ((3960, 3996), 'cv2.cvtColor', 'cv2.cvtColor', (['hsv', 'cv2.COLOR_HSV2BGR'], {}), '(hsv, cv2.COLOR_HSV2BGR)\n', (3972, 3996), False, 'import cv2\n'), ((437, 459), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (447, 459), False, 'from PIL import Image\n'), ((3210, 3242), 'numpy.arange', 'np.arange', (['(-max_value)', 'max_value'], {}), '(-max_value, max_value)\n', (3219, 3242), True, 'import numpy as np\n'), ((3753, 3785), 'numpy.arange', 'np.arange', (['(-max_value)', 'max_value'], {}), '(-max_value, max_value)\n', (3762, 3785), True, 'import numpy as np\n'), ((3894, 3926), 'numpy.clip', 'np.clip', (['hsv'], {'a_min': '(0)', 'a_max': '(255)'}), '(hsv, a_min=0, a_max=255)\n', (3901, 3926), True, 'import numpy as np\n'), ((3286, 3335), 'numpy.where', 'np.where', (['(255 - image < value)', '(255)', '(image + value)'], {}), '(255 - image < value, 255, image + value)\n', (3294, 3335), True, 'import numpy as np\n'), ((3385, 3427), 'numpy.where', 'np.where', (['(image < -value)', '(0)', '(image + value)'], {}), '(image < -value, 0, image + value)\n', (3393, 3427), True, 'import numpy as np\n')] |
### Load necessary libraries ###
import numpy as np
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
import tensorflow as tf
from tensorflow import keras
from sklearn.metrics import ConfusionMatrixDisplay
model = get_network()
model.summary()
### Train and evaluate via 10-Folds cross-validation ###
accuracies = []
folds = np.array(['fold1','fold2','fold3','fold4',
'fold5','fold6','fold7','fold8',
'fold9','fold10'])
load_dir = "UrbanSounds8K/processed/"
kf = KFold(n_splits=10)
for train_index, test_index in kf.split(folds):
x_train, y_train = [], []
for ind in train_index:
# read features or segments of an audio file
train_data = np.load("{0}/{1}.npz".format(load_dir,folds[ind]),
allow_pickle=True)
# for training stack all the segments so that they are treated as an example/instance
features = np.concatenate(train_data["features"], axis=0)
labels = np.concatenate(train_data["labels"], axis=0)
x_train.append(features)
y_train.append(labels)
# stack x,y pairs of all training folds
x_train = np.concatenate(x_train, axis = 0).astype(np.float32)
y_train = np.concatenate(y_train, axis = 0).astype(np.float32)
# for testing we will make predictions on each segment and average them to
# produce single label for an entire sound clip.
test_data = np.load("{0}/{1}.npz".format(load_dir,
folds[test_index][0]), allow_pickle=True)
x_test = test_data["features"]
y_test = test_data["labels"]
log_dir="logs/fit/" + folds[test_index][0]
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
model = get_network()
model.fit(x_train, y_train, epochs = 20, batch_size = 64, verbose = 1, validation_split=0.2,
use_multiprocessing=True, workers=8, callbacks=[tensorboard_callback])
# evaluate on test set/fold
y_true, y_pred = [], []
for x, y in zip(x_test, y_test):
# average predictions over segments of a sound clip
avg_p = np.argmax(np.mean(model.predict(x), axis = 0))
y_pred.append(avg_p)
# pick single label via np.unique for a sound clip
y_true.append(np.unique(y)[0])
accuracies.append(accuracy_score(y_true, y_pred))
print("Fold n accuracy: {0}".format(accuracy_score(y_true, y_pred)))
cm = ConfusionMatrixDisplay.from_predictions(y_true, y_pred)
cm.figure_.savefig('conf_mat_' + str(test_index) + '_acc_' + str(accuracy_score(y_true, y_pred)) + '.png',dpi=1000)
print("Average 10 Folds Accuracy: {0}".format(np.mean(accuracies)))
| [
"numpy.mean",
"sklearn.metrics.ConfusionMatrixDisplay.from_predictions",
"tensorflow.keras.callbacks.TensorBoard",
"numpy.unique",
"numpy.array",
"numpy.concatenate",
"sklearn.model_selection.KFold",
"sklearn.metrics.accuracy_score"
]
| [((365, 470), 'numpy.array', 'np.array', (["['fold1', 'fold2', 'fold3', 'fold4', 'fold5', 'fold6', 'fold7', 'fold8',\n 'fold9', 'fold10']"], {}), "(['fold1', 'fold2', 'fold3', 'fold4', 'fold5', 'fold6', 'fold7',\n 'fold8', 'fold9', 'fold10'])\n", (373, 470), True, 'import numpy as np\n'), ((539, 557), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)'}), '(n_splits=10)\n', (544, 557), False, 'from sklearn.model_selection import KFold\n'), ((1696, 1761), 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', ([], {'log_dir': 'log_dir', 'histogram_freq': '(1)'}), '(log_dir=log_dir, histogram_freq=1)\n', (1726, 1761), True, 'import tensorflow as tf\n'), ((2468, 2523), 'sklearn.metrics.ConfusionMatrixDisplay.from_predictions', 'ConfusionMatrixDisplay.from_predictions', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2507, 2523), False, 'from sklearn.metrics import ConfusionMatrixDisplay\n'), ((945, 991), 'numpy.concatenate', 'np.concatenate', (["train_data['features']"], {'axis': '(0)'}), "(train_data['features'], axis=0)\n", (959, 991), True, 'import numpy as np\n'), ((1010, 1054), 'numpy.concatenate', 'np.concatenate', (["train_data['labels']"], {'axis': '(0)'}), "(train_data['labels'], axis=0)\n", (1024, 1054), True, 'import numpy as np\n'), ((2349, 2379), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2363, 2379), False, 'from sklearn.metrics import accuracy_score\n'), ((2695, 2714), 'numpy.mean', 'np.mean', (['accuracies'], {}), '(accuracies)\n', (2702, 2714), True, 'import numpy as np\n'), ((1179, 1210), 'numpy.concatenate', 'np.concatenate', (['x_train'], {'axis': '(0)'}), '(x_train, axis=0)\n', (1193, 1210), True, 'import numpy as np\n'), ((1246, 1277), 'numpy.concatenate', 'np.concatenate', (['y_train'], {'axis': '(0)'}), '(y_train, axis=0)\n', (1260, 1277), True, 'import numpy as np\n'), ((2425, 2455), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2439, 2455), False, 'from sklearn.metrics import accuracy_score\n'), ((2309, 2321), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2318, 2321), True, 'import numpy as np\n'), ((2593, 2623), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (2607, 2623), False, 'from sklearn.metrics import accuracy_score\n')] |
import os
import pprint
import subprocess
import time
from typing import Dict, List
from kubernetes.client import (
V1EnvVar,
V1EnvVarSource,
V1ObjectFieldSelector,
V1ResourceFieldSelector,
)
from metaflow import FlowSpec, step, environment, resources, current
def get_env_vars(env_resources: Dict[str, str]) -> List[V1EnvVar]:
res = []
for name, resource in env_resources.items():
res.append(
V1EnvVar(
# this is used by some functions of operator-sdk
# it uses this environment variable to get the pods
name=name,
value_from=V1EnvVarSource(
resource_field_ref=V1ResourceFieldSelector(
container_name="main",
resource=resource,
divisor="1m" if "cpu" in resource else "1",
)
),
)
)
return res
kubernetes_vars = get_env_vars(
{
"LOCAL_STORAGE": "requests.ephemeral-storage",
"LOCAL_STORAGE_LIMIT": "limits.ephemeral-storage",
"CPU": "requests.cpu",
"CPU_LIMIT": "limits.cpu",
"MEMORY": "requests.memory",
"MEMORY_LIMIT": "limits.memory",
}
)
kubernetes_vars.append(
V1EnvVar(
name="MY_POD_NAME",
value_from=V1EnvVarSource(
field_ref=V1ObjectFieldSelector(field_path="metadata.name")
),
)
)
annotations = {
"metaflow.org/flow_name": "MF_NAME",
"metaflow.org/step": "MF_STEP",
"metaflow.org/run_id": "MF_RUN_ID",
"metaflow.org/experiment": "MF_EXPERIMENT",
"metaflow.org/tag_metaflow_test": "MF_TAG_METAFLOW_TEST",
"metaflow.org/tag_test_t1": "MF_TAG_TEST_T1",
}
for annotation, env_name in annotations.items():
kubernetes_vars.append(
V1EnvVar(
name=env_name,
value_from=V1EnvVarSource(
field_ref=V1ObjectFieldSelector(
field_path=f"metadata.annotations['{annotation}']"
)
),
)
)
labels = {
"aip.zillowgroup.net/kfp-pod-default": "KF_POD_DEFAULT",
"tags.ledger.zgtools.net/ai-flow-name": "AI_FLOW_NAME",
"tags.ledger.zgtools.net/ai-step-name": "AI_STEP_NAME",
"tags.ledger.zgtools.net/ai-experiment-name": "AI_EXPERIMENT_NAME",
}
for label, env_name in labels.items():
kubernetes_vars.append(
V1EnvVar(
name=env_name,
value_from=V1EnvVarSource(
field_ref=V1ObjectFieldSelector(
field_path=f"metadata.labels['{label}']"
)
),
)
)
class ResourcesFlow(FlowSpec):
@resources(
local_storage="242",
cpu="0.6",
memory="1G",
)
@environment( # pylint: disable=E1102
vars={"MY_ENV": "value"}, kubernetes_vars=kubernetes_vars
)
@step
def start(self):
pprint.pprint(dict(os.environ))
print("=====")
# test simple environment var
assert os.environ.get("MY_ENV") == "value"
# test kubernetes_vars
assert "resourcesflow" in os.environ.get("MY_POD_NAME")
assert os.environ.get("CPU") == "600"
assert os.environ.get("CPU_LIMIT") == "600"
assert os.environ.get("LOCAL_STORAGE") == "242000000"
assert os.environ.get("LOCAL_STORAGE_LIMIT") == "242000000"
assert os.environ.get("MEMORY") == "1000000000"
assert os.environ.get("MEMORY_LIMIT") == "1000000000"
assert os.environ.get("MF_NAME") == current.flow_name
assert os.environ.get("MF_STEP") == current.step_name
assert os.environ.get("MF_RUN_ID") == current.run_id
assert os.environ.get("MF_EXPERIMENT") == "metaflow_test"
assert os.environ.get("MF_TAG_METAFLOW_TEST") == "true"
assert os.environ.get("MF_TAG_TEST_T1") == "true"
assert os.environ.get("KF_POD_DEFAULT") == "true"
assert os.environ.get("AI_FLOW_NAME") == current.flow_name
assert os.environ.get("AI_STEP_NAME") == current.step_name
assert os.environ.get("AI_EXPERIMENT_NAME") == "metaflow_test"
self.items = [1, 2]
self.next(self.foreach_step, foreach="items")
@environment(vars={"MY_ENV": "value"}) # pylint: disable=E1102
@resources(volume="11G")
@step
def foreach_step(self):
# test simple environment var
assert os.environ.get("MY_ENV") == "value"
output = subprocess.check_output(
"df -h | grep /opt/metaflow_volume", shell=True
)
assert "11G" in str(output)
self.next(self.join_step)
@resources(volume="12G")
@step
def join_step(self, inputs):
output = subprocess.check_output(
"df -h | grep /opt/metaflow_volume", shell=True
)
assert "12G" in str(output)
self.next(self.split_step)
@step
def split_step(self):
self.items = [1, 2]
self.next(self.shared_volume_foreach_step, foreach="items")
@resources(volume="13G", volume_mode="ReadWriteMany")
@step
def shared_volume_foreach_step(self):
output = subprocess.check_output(
"df -h | grep /opt/metaflow_volume", shell=True
)
assert "13G" in str(output)
file_path = "/opt/metaflow_volume/test.txt"
message = "hello world!"
# validate the volume is shared across the foreach splits
if self.input == 1:
with open(file_path, "w") as f:
f.write(message)
else:
while not os.path.exists(file_path):
time.sleep(1)
print(".")
with open(file_path, "r") as f:
read_lines = f.readlines()
print("read_lines", read_lines)
assert message == read_lines[0]
self.next(self.shared_volume_join_step)
@step
def shared_volume_join_step(self, inputs):
self.next(self.end)
@step
def end(self):
print("All done.")
if __name__ == "__main__":
ResourcesFlow()
| [
"subprocess.check_output",
"os.path.exists",
"os.environ.get",
"time.sleep",
"kubernetes.client.V1ObjectFieldSelector",
"metaflow.resources",
"kubernetes.client.V1ResourceFieldSelector",
"metaflow.environment"
]
| [((2696, 2750), 'metaflow.resources', 'resources', ([], {'local_storage': '"""242"""', 'cpu': '"""0.6"""', 'memory': '"""1G"""'}), "(local_storage='242', cpu='0.6', memory='1G')\n", (2705, 2750), False, 'from metaflow import FlowSpec, step, environment, resources, current\n'), ((2787, 2857), 'metaflow.environment', 'environment', ([], {'vars': "{'MY_ENV': 'value'}", 'kubernetes_vars': 'kubernetes_vars'}), "(vars={'MY_ENV': 'value'}, kubernetes_vars=kubernetes_vars)\n", (2798, 2857), False, 'from metaflow import FlowSpec, step, environment, resources, current\n'), ((4251, 4288), 'metaflow.environment', 'environment', ([], {'vars': "{'MY_ENV': 'value'}"}), "(vars={'MY_ENV': 'value'})\n", (4262, 4288), False, 'from metaflow import FlowSpec, step, environment, resources, current\n'), ((4319, 4342), 'metaflow.resources', 'resources', ([], {'volume': '"""11G"""'}), "(volume='11G')\n", (4328, 4342), False, 'from metaflow import FlowSpec, step, environment, resources, current\n'), ((4660, 4683), 'metaflow.resources', 'resources', ([], {'volume': '"""12G"""'}), "(volume='12G')\n", (4669, 4683), False, 'from metaflow import FlowSpec, step, environment, resources, current\n'), ((5049, 5101), 'metaflow.resources', 'resources', ([], {'volume': '"""13G"""', 'volume_mode': '"""ReadWriteMany"""'}), "(volume='13G', volume_mode='ReadWriteMany')\n", (5058, 5101), False, 'from metaflow import FlowSpec, step, environment, resources, current\n'), ((4488, 4560), 'subprocess.check_output', 'subprocess.check_output', (['"""df -h | grep /opt/metaflow_volume"""'], {'shell': '(True)'}), "('df -h | grep /opt/metaflow_volume', shell=True)\n", (4511, 4560), False, 'import subprocess\n'), ((4744, 4816), 'subprocess.check_output', 'subprocess.check_output', (['"""df -h | grep /opt/metaflow_volume"""'], {'shell': '(True)'}), "('df -h | grep /opt/metaflow_volume', shell=True)\n", (4767, 4816), False, 'import subprocess\n'), ((5171, 5243), 'subprocess.check_output', 'subprocess.check_output', (['"""df -h | grep /opt/metaflow_volume"""'], {'shell': '(True)'}), "('df -h | grep /opt/metaflow_volume', shell=True)\n", (5194, 5243), False, 'import subprocess\n'), ((3045, 3069), 'os.environ.get', 'os.environ.get', (['"""MY_ENV"""'], {}), "('MY_ENV')\n", (3059, 3069), False, 'import os\n'), ((3147, 3176), 'os.environ.get', 'os.environ.get', (['"""MY_POD_NAME"""'], {}), "('MY_POD_NAME')\n", (3161, 3176), False, 'import os\n'), ((3192, 3213), 'os.environ.get', 'os.environ.get', (['"""CPU"""'], {}), "('CPU')\n", (3206, 3213), False, 'import os\n'), ((3238, 3265), 'os.environ.get', 'os.environ.get', (['"""CPU_LIMIT"""'], {}), "('CPU_LIMIT')\n", (3252, 3265), False, 'import os\n'), ((3290, 3321), 'os.environ.get', 'os.environ.get', (['"""LOCAL_STORAGE"""'], {}), "('LOCAL_STORAGE')\n", (3304, 3321), False, 'import os\n'), ((3352, 3389), 'os.environ.get', 'os.environ.get', (['"""LOCAL_STORAGE_LIMIT"""'], {}), "('LOCAL_STORAGE_LIMIT')\n", (3366, 3389), False, 'import os\n'), ((3420, 3444), 'os.environ.get', 'os.environ.get', (['"""MEMORY"""'], {}), "('MEMORY')\n", (3434, 3444), False, 'import os\n'), ((3476, 3506), 'os.environ.get', 'os.environ.get', (['"""MEMORY_LIMIT"""'], {}), "('MEMORY_LIMIT')\n", (3490, 3506), False, 'import os\n'), ((3539, 3564), 'os.environ.get', 'os.environ.get', (['"""MF_NAME"""'], {}), "('MF_NAME')\n", (3553, 3564), False, 'import os\n'), ((3601, 3626), 'os.environ.get', 'os.environ.get', (['"""MF_STEP"""'], {}), "('MF_STEP')\n", (3615, 3626), False, 'import os\n'), ((3663, 3690), 'os.environ.get', 'os.environ.get', (['"""MF_RUN_ID"""'], {}), "('MF_RUN_ID')\n", (3677, 3690), False, 'import os\n'), ((3724, 3755), 'os.environ.get', 'os.environ.get', (['"""MF_EXPERIMENT"""'], {}), "('MF_EXPERIMENT')\n", (3738, 3755), False, 'import os\n'), ((3790, 3828), 'os.environ.get', 'os.environ.get', (['"""MF_TAG_METAFLOW_TEST"""'], {}), "('MF_TAG_METAFLOW_TEST')\n", (3804, 3828), False, 'import os\n'), ((3854, 3886), 'os.environ.get', 'os.environ.get', (['"""MF_TAG_TEST_T1"""'], {}), "('MF_TAG_TEST_T1')\n", (3868, 3886), False, 'import os\n'), ((3913, 3945), 'os.environ.get', 'os.environ.get', (['"""KF_POD_DEFAULT"""'], {}), "('KF_POD_DEFAULT')\n", (3927, 3945), False, 'import os\n'), ((3972, 4002), 'os.environ.get', 'os.environ.get', (['"""AI_FLOW_NAME"""'], {}), "('AI_FLOW_NAME')\n", (3986, 4002), False, 'import os\n'), ((4039, 4069), 'os.environ.get', 'os.environ.get', (['"""AI_STEP_NAME"""'], {}), "('AI_STEP_NAME')\n", (4053, 4069), False, 'import os\n'), ((4106, 4142), 'os.environ.get', 'os.environ.get', (['"""AI_EXPERIMENT_NAME"""'], {}), "('AI_EXPERIMENT_NAME')\n", (4120, 4142), False, 'import os\n'), ((4434, 4458), 'os.environ.get', 'os.environ.get', (['"""MY_ENV"""'], {}), "('MY_ENV')\n", (4448, 4458), False, 'import os\n'), ((5596, 5621), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (5610, 5621), False, 'import os\n'), ((5639, 5652), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5649, 5652), False, 'import time\n'), ((1386, 1435), 'kubernetes.client.V1ObjectFieldSelector', 'V1ObjectFieldSelector', ([], {'field_path': '"""metadata.name"""'}), "(field_path='metadata.name')\n", (1407, 1435), False, 'from kubernetes.client import V1EnvVar, V1EnvVarSource, V1ObjectFieldSelector, V1ResourceFieldSelector\n'), ((1938, 2011), 'kubernetes.client.V1ObjectFieldSelector', 'V1ObjectFieldSelector', ([], {'field_path': 'f"""metadata.annotations[\'{annotation}\']"""'}), '(field_path=f"metadata.annotations[\'{annotation}\']")\n', (1959, 2011), False, 'from kubernetes.client import V1EnvVar, V1EnvVarSource, V1ObjectFieldSelector, V1ResourceFieldSelector\n'), ((2525, 2588), 'kubernetes.client.V1ObjectFieldSelector', 'V1ObjectFieldSelector', ([], {'field_path': 'f"""metadata.labels[\'{label}\']"""'}), '(field_path=f"metadata.labels[\'{label}\']")\n', (2546, 2588), False, 'from kubernetes.client import V1EnvVar, V1EnvVarSource, V1ObjectFieldSelector, V1ResourceFieldSelector\n'), ((694, 808), 'kubernetes.client.V1ResourceFieldSelector', 'V1ResourceFieldSelector', ([], {'container_name': '"""main"""', 'resource': 'resource', 'divisor': "('1m' if 'cpu' in resource else '1')"}), "(container_name='main', resource=resource, divisor=\n '1m' if 'cpu' in resource else '1')\n", (717, 808), False, 'from kubernetes.client import V1EnvVar, V1EnvVarSource, V1ObjectFieldSelector, V1ResourceFieldSelector\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.