ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4ce6ea921951fdbc98deb27d32d19ae58bbd4e | import _pickle as pickle
from keras.models import load_model
class BaseModel(object):
def __init__(self, model_size):
self.model_size = model_size
self.model = None
def save(self, filename):
if self.model is not None:
self.model.save(filename + '.model')
d = dict(self.__dict__)
d.pop('model')
f = open(filename, 'wb')
pickle.dump(d, f)
f.close()
@classmethod
def load(cls, filename):
model = load_model(filename + '.model')
f = open(filename, 'rb')
attrs = pickle.load(f)
f.close()
obj = cls(attrs['model_size'])
for key, value in attrs.items():
setattr(obj, key, value)
obj.model = model
return obj |
py | 1a4ce71545cae645765802f53e1b40472684147e | # Copyright (c) 2013, Helio de Jesus and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
from frappe import msgprint, _
def execute(filters=None):
return _execute(filters)
def _execute(filters, additional_table_columns=None, additional_query_columns=None):
if not filters: filters = frappe._dict({})
if filters.get("company"):
invoice_list = get_invoices(filters, additional_query_columns)
print 'Invoice List'
print invoice_list
print additional_query_columns
columns = get_columns(invoice_list, additional_table_columns)
if not invoice_list:
msgprint(_("No record found"))
return columns, invoice_list
company_currency = frappe.db.get_value("Company", filters.get("company"), "default_currency")
mes2_ = 0
data = []
for inv in invoice_list:
#acrescenta o mes corrente ....
if inv.Mes == 1: mes2_ = 'Janeiro'
if inv.Mes == 2: mes2_ = 'Fevereiro'
if inv.Mes == 3: mes2_ = 'Marco'
if inv.Mes == 4: mes2_ = 'Abril'
if inv.Mes == 5: mes2_ = 'Maio'
if inv.Mes == 6: mes2_ = 'Junho'
if inv.Mes == 7: mes2_ = 'Julho'
if inv.Mes == 8: mes2_ = 'Agosto'
if inv.Mes == 9: mes2_ = 'Setembro'
if inv.Mes == 10: mes2_ = 'Outubro'
if inv.Mes == 11: mes2_ = 'Novembro'
if inv.Mes == 12: mes2_ = 'Dezembro'
#print mes2_.encode('utf-8')
row = [
inv.Ano, mes2_ , inv.Total, inv.Selo
]
if additional_query_columns:
for col in additional_query_columns:
row.append(inv.get(col))
# total tax, grand total, outstanding amount & rounded total
row += [inv.Total]
data.append(row)
return columns, data
else:
frappe.throw(_("Selecione a Empresa primeiro."))
def get_columns(invoice_list, additional_table_columns):
"""return columns based on filters"""
columns = [
_("Ano") + "::80", _("Mes") + "::80"
]
columns = columns + [_("Total") + ":Currency/currency:120"] + [_("Imp. Selo 1%") + ":Currency/currency:120"]
return columns
def get_conditions(filters):
conditions = ""
if filters.get("company"): conditions += " and company=%(company)s"
if filters.get("from_date"): conditions += " and posting_date >= %(from_date)s"
if filters.get("to_date"): conditions += " and posting_date <= %(to_date)s"
return conditions
def get_invoices(filters, additional_query_columns):
if additional_query_columns:
additional_query_columns = ', ' + ', '.join(additional_query_columns)
conditions = get_conditions(filters)
#Wrong should be by Payment Entry/Recibo
#return frappe.db.sql(""" select year(posting_date) as Ano, month(posting_date) as Mes, sum(base_grand_total) as Total, sum(base_grand_total*1/100) as Selo from `tabSales Invoice` where docstatus =1 and outstanding_amount = 0 %s group by month(posting_date) order by year(posting_date), month(posting_date)""".format(additional_query_columns or '') %
# conditions, filters, as_dict=1)
#added POS invoices to report
Facturas = frappe.db.sql(""" select year(posting_date) as Ano, month(posting_date) as Mes, sum(paid_amount) as Total, sum(paid_amount*1/100) as Selo from `tabPayment Entry` where payment_type='receive' and docstatus=1 and paid_amount <> 0 %s group by month(posting_date) order by year(posting_date), month(posting_date)""".format(additional_query_columns or '') % conditions, filters, as_dict=1)
FacturasPOS = frappe.db.sql(""" select year(posting_date) as Ano, month(posting_date) as Mes, sum(paid_amount) as Total, sum(paid_amount*1/100) as Selo from `tabSales Invoice` where is_pos = 1 and docstatus=1 and paid_amount <> 0 %s group by month(posting_date) order by year(posting_date), month(posting_date)""".format(additional_query_columns or '') % conditions, filters, as_dict=1)
# Facturas1 = frappe.db.sql(""" select year(x.posting_date) as Ano, month(x.posting_date) as Mes, sum(x.paid_amount) as Total, sum(x.paid_amount*1/100) as Selo from (select posting_date, paid_amount from `tabPayment Entry` where payment_type='receive' and docstatus=1 and paid_amount <> 0 and company=%(company)s and posting_date >= %(from_date)s and posting_date <= %(to_date)s UNION ALL select posting_date, paid_amount from `tabSales Invoice` where is_pos = 1 and docstatus=1 and paid_amount <> 0 and company=%(company)s and posting_date >= %(from_date)s and posting_date <= %(to_date)s ) as x GROUP BY Mes ORDER BY Ano, Mes""", filters, as_dict=1)
# Facturas1 = frappe.db.sql(""" select year(posting_date) as Ano, month(posting_date) as Mes, sum(credit) as Selo from `tabGL Entry` where account like '3471%%' and docstatus = 1 and company=%(company)s and posting_date >= %(from_date)s and posting_date <= %(to_date)s GROUP BY Mes ORDER BY Ano, Mes """, filters, as_dict=1)
# Facturas1 = frappe.db.sql(""" select year(gl.posting_date) as Ano, month(gl.posting_date) as Mes, sum(pe.paid_amount) as Total, sum(gl.credit) as Selo from `tabGL Entry` gl join (select name,paid_amount from `tabPayment Entry`) pe on pe.name = gl.voucher_no where gl.account like '3471%%' and gl.docstatus = 1 and gl.company=%(company)s and gl.posting_date >= %(from_date)s and gl.posting_date <= %(to_date)s and gl.credit != 0 GROUP BY Mes order by year(posting_date), month(posting_date) """, filters, as_dict=1)
#return Facturas1
return Facturas + FacturasPOS
|
py | 1a4ce744a8092ff27273396d9dc25c485144a88e | from numpy.distutils.core import setup
from numpy.distutils.misc_util import Configuration
def configuration(parent_package='', top_path=None):
config = Configuration('delaunay', parent_package, top_path)
config.add_extension("_delaunay",
sources=["_delaunay.cpp", "VoronoiDiagramGenerator.cpp",
"delaunay_utils.cpp", "natneighbors.cpp"],
include_dirs=['.'],
)
return config
if __name__ == '__main__':
setup(**configuration(top_path='').todict())
|
py | 1a4ce7798781196ce7b95d4e90e93baedec7df70 | """Provides a sensor to track various status aspects of a UPS."""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
CONF_HOST, CONF_PORT, CONF_NAME, CONF_USERNAME, CONF_PASSWORD,
TEMP_CELSIUS, CONF_RESOURCES, CONF_ALIAS, ATTR_STATE, STATE_UNKNOWN,
POWER_WATT)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'NUT UPS'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 3493
KEY_STATUS = 'ups.status'
KEY_STATUS_DISPLAY = 'ups.status.display'
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
SENSOR_TYPES = {
'ups.status.display': ['Status', '', 'mdi:information-outline'],
'ups.status': ['Status Data', '', 'mdi:information-outline'],
'ups.alarm': ['Alarms', '', 'mdi:alarm'],
'ups.time': ['Internal Time', '', 'mdi:calendar-clock'],
'ups.date': ['Internal Date', '', 'mdi:calendar'],
'ups.model': ['Model', '', 'mdi:information-outline'],
'ups.mfr': ['Manufacturer', '', 'mdi:information-outline'],
'ups.mfr.date': ['Manufacture Date', '', 'mdi:calendar'],
'ups.serial': ['Serial Number', '', 'mdi:information-outline'],
'ups.vendorid': ['Vendor ID', '', 'mdi:information-outline'],
'ups.productid': ['Product ID', '', 'mdi:information-outline'],
'ups.firmware': ['Firmware Version', '', 'mdi:information-outline'],
'ups.firmware.aux': ['Firmware Version 2', '', 'mdi:information-outline'],
'ups.temperature': ['UPS Temperature', TEMP_CELSIUS, 'mdi:thermometer'],
'ups.load': ['Load', '%', 'mdi:gauge'],
'ups.load.high': ['Overload Setting', '%', 'mdi:gauge'],
'ups.id': ['System identifier', '', 'mdi:information-outline'],
'ups.delay.start': ['Load Restart Delay', 's', 'mdi:timer'],
'ups.delay.reboot': ['UPS Reboot Delay', 's', 'mdi:timer'],
'ups.delay.shutdown': ['UPS Shutdown Delay', 's', 'mdi:timer'],
'ups.timer.start': ['Load Start Timer', 's', 'mdi:timer'],
'ups.timer.reboot': ['Load Reboot Timer', 's', 'mdi:timer'],
'ups.timer.shutdown': ['Load Shutdown Timer', 's', 'mdi:timer'],
'ups.test.interval': ['Self-Test Interval', 's', 'mdi:timer'],
'ups.test.result': ['Self-Test Result', '', 'mdi:information-outline'],
'ups.test.date': ['Self-Test Date', '', 'mdi:calendar'],
'ups.display.language': ['Language', '', 'mdi:information-outline'],
'ups.contacts': ['External Contacts', '', 'mdi:information-outline'],
'ups.efficiency': ['Efficiency', '%', 'mdi:gauge'],
'ups.power': ['Current Apparent Power', 'VA', 'mdi:flash'],
'ups.power.nominal': ['Nominal Power', 'VA', 'mdi:flash'],
'ups.realpower': ['Current Real Power', POWER_WATT, 'mdi:flash'],
'ups.realpower.nominal': ['Nominal Real Power', POWER_WATT, 'mdi:flash'],
'ups.beeper.status': ['Beeper Status', '', 'mdi:information-outline'],
'ups.type': ['UPS Type', '', 'mdi:information-outline'],
'ups.watchdog.status': ['Watchdog Status', '', 'mdi:information-outline'],
'ups.start.auto': ['Start on AC', '', 'mdi:information-outline'],
'ups.start.battery': ['Start on Battery', '', 'mdi:information-outline'],
'ups.start.reboot': ['Reboot on Battery', '', 'mdi:information-outline'],
'ups.shutdown': ['Shutdown Ability', '', 'mdi:information-outline'],
'battery.charge': ['Battery Charge', '%', 'mdi:gauge'],
'battery.charge.low': ['Low Battery Setpoint', '%', 'mdi:gauge'],
'battery.charge.restart': ['Minimum Battery to Start', '%', 'mdi:gauge'],
'battery.charge.warning': ['Warning Battery Setpoint', '%', 'mdi:gauge'],
'battery.charger.status':
['Charging Status', '', 'mdi:information-outline'],
'battery.voltage': ['Battery Voltage', 'V', 'mdi:flash'],
'battery.voltage.nominal': ['Nominal Battery Voltage', 'V', 'mdi:flash'],
'battery.voltage.low': ['Low Battery Voltage', 'V', 'mdi:flash'],
'battery.voltage.high': ['High Battery Voltage', 'V', 'mdi:flash'],
'battery.capacity': ['Battery Capacity', 'Ah', 'mdi:flash'],
'battery.current': ['Battery Current', 'A', 'mdi:flash'],
'battery.current.total': ['Total Battery Current', 'A', 'mdi:flash'],
'battery.temperature':
['Battery Temperature', TEMP_CELSIUS, 'mdi:thermometer'],
'battery.runtime': ['Battery Runtime', 's', 'mdi:timer'],
'battery.runtime.low': ['Low Battery Runtime', 's', 'mdi:timer'],
'battery.runtime.restart':
['Minimum Battery Runtime to Start', 's', 'mdi:timer'],
'battery.alarm.threshold':
['Battery Alarm Threshold', '', 'mdi:information-outline'],
'battery.date': ['Battery Date', '', 'mdi:calendar'],
'battery.mfr.date': ['Battery Manuf. Date', '', 'mdi:calendar'],
'battery.packs': ['Number of Batteries', '', 'mdi:information-outline'],
'battery.packs.bad':
['Number of Bad Batteries', '', 'mdi:information-outline'],
'battery.type': ['Battery Chemistry', '', 'mdi:information-outline'],
'input.sensitivity':
['Input Power Sensitivity', '', 'mdi:information-outline'],
'input.transfer.low': ['Low Voltage Transfer', 'V', 'mdi:flash'],
'input.transfer.high': ['High Voltage Transfer', 'V', 'mdi:flash'],
'input.transfer.reason':
['Voltage Transfer Reason', '', 'mdi:information-outline'],
'input.voltage': ['Input Voltage', 'V', 'mdi:flash'],
'input.voltage.nominal': ['Nominal Input Voltage', 'V', 'mdi:flash'],
'input.frequency': ['Input Line Frequency', 'hz', 'mdi:flash'],
'input.frequency.nominal':
['Nominal Input Line Frequency', 'hz', 'mdi:flash'],
'input.frequency.status':
['Input Frequency Status', '', 'mdi:information-outline'],
'output.current': ['Output Current', 'A', 'mdi:flash'],
'output.current.nominal':
['Nominal Output Current', 'A', 'mdi:flash'],
'output.voltage': ['Output Voltage', 'V', 'mdi:flash'],
'output.voltage.nominal':
['Nominal Output Voltage', 'V', 'mdi:flash'],
'output.frequency': ['Output Frequency', 'hz', 'mdi:flash'],
'output.frequency.nominal':
['Nominal Output Frequency', 'hz', 'mdi:flash'],
}
STATE_TYPES = {
'OL': 'Online',
'OB': 'On Battery',
'LB': 'Low Battery',
'HB': 'High Battery',
'RB': 'Battery Needs Replaced',
'CHRG': 'Battery Charging',
'DISCHRG': 'Battery Discharging',
'BYPASS': 'Bypass Active',
'CAL': 'Runtime Calibration',
'OFF': 'Offline',
'OVER': 'Overloaded',
'TRIM': 'Trimming Voltage',
'BOOST': 'Boosting Voltage',
'FSD': 'Forced Shutdown',
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_ALIAS): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Required(CONF_RESOURCES):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NUT sensors."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
alias = config.get(CONF_ALIAS)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
data = PyNUTData(host, port, alias, username, password)
if data.status is None:
_LOGGER.error("NUT Sensor has no data, unable to set up")
raise PlatformNotReady
_LOGGER.debug('NUT Sensors Available: %s', data.status)
entities = []
for resource in config[CONF_RESOURCES]:
sensor_type = resource.lower()
# Display status is a special case that falls back to the status value
# of the UPS instead.
if sensor_type in data.status or (sensor_type == KEY_STATUS_DISPLAY
and KEY_STATUS in data.status):
entities.append(NUTSensor(name, data, sensor_type))
else:
_LOGGER.warning(
"Sensor type: %s does not appear in the NUT status "
"output, cannot add", sensor_type)
try:
data.update(no_throttle=True)
except data.pynuterror as err:
_LOGGER.error("Failure while testing NUT status retrieval. "
"Cannot continue setup: %s", err)
raise PlatformNotReady
add_entities(entities, True)
class NUTSensor(Entity):
"""Representation of a sensor entity for NUT status values."""
def __init__(self, name, data, sensor_type):
"""Initialize the sensor."""
self._data = data
self.type = sensor_type
self._name = "{} {}".format(name, SENSOR_TYPES[sensor_type][0])
self._unit = SENSOR_TYPES[sensor_type][1]
self._state = None
@property
def name(self):
"""Return the name of the UPS sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return SENSOR_TYPES[self.type][2]
@property
def state(self):
"""Return entity state from ups."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit
@property
def device_state_attributes(self):
"""Return the sensor attributes."""
attr = dict()
attr[ATTR_STATE] = self.display_state()
return attr
def display_state(self):
"""Return UPS display state."""
if self._data.status is None:
return STATE_TYPES['OFF']
try:
return " ".join(
STATE_TYPES[state]
for state in self._data.status[KEY_STATUS].split())
except KeyError:
return STATE_UNKNOWN
def update(self):
"""Get the latest status and use it to update our sensor state."""
if self._data.status is None:
self._state = None
return
# In case of the display status sensor, keep a human-readable form
# as the sensor state.
if self.type == KEY_STATUS_DISPLAY:
self._state = self.display_state()
elif self.type not in self._data.status:
self._state = None
else:
self._state = self._data.status[self.type]
class PyNUTData:
"""Stores the data retrieved from NUT.
For each entity to use, acts as the single point responsible for fetching
updates from the server.
"""
def __init__(self, host, port, alias, username, password):
"""Initialize the data object."""
from pynut2.nut2 import PyNUTClient, PyNUTError
self._host = host
self._port = port
self._alias = alias
self._username = username
self._password = password
self.pynuterror = PyNUTError
# Establish client with persistent=False to open/close connection on
# each update call. This is more reliable with async.
self._client = PyNUTClient(self._host, self._port,
self._username, self._password, 5, False)
self._status = None
@property
def status(self):
"""Get latest update if throttle allows. Return status."""
self.update()
return self._status
def _get_alias(self):
"""Get the ups alias from NUT."""
try:
return next(iter(self._client.list_ups()))
except self.pynuterror as err:
_LOGGER.error("Failure getting NUT ups alias, %s", err)
return None
def _get_status(self):
"""Get the ups status from NUT."""
if self._alias is None:
self._alias = self._get_alias()
try:
return self._client.list_vars(self._alias)
except (self.pynuterror, ConnectionResetError) as err:
_LOGGER.debug(
"Error getting NUT vars for host %s: %s", self._host, err)
return None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self, **kwargs):
"""Fetch the latest status from NUT."""
self._status = self._get_status()
|
py | 1a4ce81930346e5e08d68ee071d4c73375ca8940 | import psycopg2
import sys
import os
def main():
is_prod = os.environ.get('IS_HEROKU', None)
print("is prod?? ", is_prod),
if is_prod:
#Define our connection string
host = os.environ['DB_HOST']
dbname = os.environ['DB_NAME']
user = os.environ['DB_USER']
password = os.environ['DB_PASSWORD']
conn_string = "host='{}' dbname='{}' user='{}' password='{}'".format(host,dbname,user,password)
# print the connection string we will use to connect
print("Connecting to database\n ->%s" % (conn_string))
# get a connection, if a connect cannot be made an exception will be raised here
conn = psycopg2.connect(conn_string)
# conn.cursor will return a cursor object, you can use this cursor to perform queries
cursor = conn.cursor()
print("Connected!\n")
else:
host = 'localhost'
dbname = 'arsenal_bot'
user = 'nic'
#Define our connection string
conn_string = "host='{}' dbname='{}' user='{}'".format(host,dbname,user)
# print the connection string we will use to connect
print("Connecting to database\n ->%s" % (conn_string))
# get a connection, if a connect cannot be made an exception will be raised here
conn = psycopg2.connect(conn_string)
# conn.cursor will return a cursor object, you can use this cursor to perform queries
cursor = conn.cursor()
print("Connected!\n")
if __name__ == "__main__":
main() |
py | 1a4ce87f212d80a075acaffe18faf3b8be1f7cc0 | from xml.etree import ElementTree
try:
import yaml
except ImportError:
yaml = None
from galaxy.util.submodules import import_submodules
def plugins_dict(module, plugin_type_identifier):
""" Walk through all classes in submodules of module and find ones labelled
with specified plugin_type_identifier and throw in a dictionary to allow
constructions from plugins by these types later on.
"""
plugin_dict = {}
for plugin_module in import_submodules(module, ordered=True):
# FIXME: this is not how one is suppose to use __all__ why did you do
# this past John?
for clazz in getattr(plugin_module, "__all__", []):
try:
clazz = getattr(plugin_module, clazz)
except TypeError:
clazz = clazz
plugin_type = getattr(clazz, plugin_type_identifier, None)
if plugin_type:
plugin_dict[plugin_type] = clazz
return plugin_dict
def load_plugins(plugins_dict, plugin_source, extra_kwds={}):
source_type, source = plugin_source
if source_type == "xml":
return __load_plugins_from_element(plugins_dict, source, extra_kwds)
else:
return __load_plugins_from_dicts(plugins_dict, source, extra_kwds)
def __load_plugins_from_element(plugins_dict, plugins_element, extra_kwds):
plugins = []
for plugin_element in plugins_element:
plugin_type = plugin_element.tag
plugin_kwds = dict(plugin_element.items())
plugin_kwds.update(extra_kwds)
try:
plugin_klazz = plugins_dict[plugin_type]
except KeyError:
template = "Failed to find plugin of type [%s] in available plugin types %s"
message = template % (plugin_type, str(plugins_dict.keys()))
raise Exception(message)
plugin = plugin_klazz(**plugin_kwds)
plugins.append(plugin)
return plugins
def __load_plugins_from_dicts(plugins_dict, configs, extra_kwds):
plugins = []
for config in configs:
plugin_type = config["type"]
plugin_kwds = config
plugin_kwds.update(extra_kwds)
plugin = plugins_dict[plugin_type](**plugin_kwds)
plugins.append(plugin)
return plugins
def plugin_source_from_path(path):
if path.endswith(".yaml") or path.endswith(".yml") or path.endswith(".yaml.sample") or path.endswith(".yml.sample"):
return ('dict', __read_yaml(path))
else:
return ('xml', ElementTree.parse(path).getroot())
def __read_yaml(path):
if yaml is None:
raise ImportError("Attempting to read YAML configuration file - but PyYAML dependency unavailable.")
with open(path, "rb") as f:
return yaml.safe_load(f)
|
py | 1a4cec022707c035dda1845f7de31bb1bd62c393 | from forge_symposia.server import env
from eve_sqlalchemy.config import DomainConfig, ResourceConfig
from forge_symposia.server.models import indexDB
RESOURCE_METHODS = ['GET']
DOMAIN = DomainConfig({
}).render()
JWT_SECRET_KEY = 'python-starter-secret-key'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + env.INDEX_DB
SQLALCHEMY_BINDS = {
'app_db': "sqlite:///" + env.APP_DB
} |
py | 1a4cec6cde6401cc76bd87e935e157efbaec3a75 | from sstcam_sandbox import get_plot
from CHECLabPy.plotting.setup import Plotter
from CHECOnsky.utils.astri_database import ASTRISQLQuerier
import pandas as pd
import matplotlib.dates as mdates
import matplotlib.colors as mcolors
class Uptime(Plotter):
def plot(self, sql, start, end, title):
start_day = start.floor("D")
end_day = end.ceil("D")
df = sql.get_table_between_datetimes(
"TCU_ELACTPOS", start_day, end_day
)
df = df.set_index('timestamp')
df = df.resample('1h').count()
idx = pd.date_range(start_day, end_day, freq='h')
df_c = df.loc[start_day:end_day].reindex(idx, fill_value=0)
date = df_c.resample('d').count().index
time = pd.date_range("2019-10-09 00:00", "2019-10-10 00:00", freq='h')
count = df_c.iloc[:-1]['value'].values.reshape(
(date.size - 1, time.size - 1))
count_3 = count.copy()
count_3[count == 0] = 0
count_3[(count > 0) & (count < count.max())] = 1
count_3[count == count.max()] = 2
weeks = mdates.WeekdayLocator(byweekday=mdates.MO)
hours = mdates.HourLocator()
hours_fmt = mdates.DateFormatter('%H:%M')
cmap = mcolors.LinearSegmentedColormap.from_list("", [
"red", "yellow", "green"
])
self.ax.pcolor(
date, time, count_3.T, cmap=cmap, edgecolors='k', linewidths=0.5
)
self.fig.autofmt_xdate()
self.ax.xaxis.set_major_locator(weeks)
self.ax.yaxis.set_major_locator(hours)
self.ax.yaxis.set_major_formatter(hours_fmt)
self.ax.yaxis.set_tick_params(direction='out', which='both')
self.ax.yaxis.set_tick_params(which='minor', left=False, right=False)
self.ax.xaxis.set_tick_params(direction='out', which='both')
self.ax.xaxis.set_tick_params(which='minor', left=False, right=False)
self.ax.set_title(title)
def main():
sql = ASTRISQLQuerier()
p_uptime = Uptime()
start = pd.Timestamp("2019-04-29 00:00")
end = pd.Timestamp("2019-05-13 00:00")
p_uptime.plot(sql, start, end, None)
start = pd.Timestamp("2019-06-10 00:00")
end = pd.Timestamp("2019-06-16 00:00")
p_uptime.plot(sql, start, end, "ASTRI Pointing Database Uptime")
p_uptime.save(get_plot("d191009_astri_db_uptime/campaign_all.pdf"))
if __name__ == '__main__':
main()
|
py | 1a4cec7ee0a6a795cd0cd9936dde8c8c3b5285b4 | agenda = dict()
td = list()
nomes = list()
cont = 0
print(' AGENDA TELEFONICA ')
while True:
menu = int(input('[0] Mostrar agenda\n'
'[1] Novo contato\n'
'[2] Pesquisar contato\n'
'[3] Remover ou fazer alteração do contato\n:'))
while menu not in (0, 1, 2, 3):
print('----Digite um número válido----')
menu = int(input(f'[1] Novo contato\n'
f'[2] Pesquisar contato\n'
f'[3] Remover ou fazer alteração do contato\n:'))
if menu == 0:
print(' ', end='')
for k, v in agenda.items():
print(f'{k}', 17 * ' ', end='')
print()
for k, v in enumerate(td):
if cont != len(td) + 1:
print(f'{k} {td[cont]["nome"]:<22}'
f'{td[cont]["telefone"]:<26}'
f'{td[cont]["email"]:<23}'
f'{td[cont]["twitter"]:<25}'
f'{td[cont]["instagram"]:<5}')
cont += 1
cont = 0
print()
# MENU==1
elif menu == 1:
agenda['nome'] = input(str('Nome: ')).lower()
try:
agenda['telefone'] = int(input('Telefone: '))
except:
print('\033[31mDigite somente números!!!\033[m')
agenda['telefone'] = int(input('Telefone: '))
agenda['email'] = input('Email: ')
agenda['twitter'] = input('twitter: ')
agenda['instagram'] = input('Instagram: ')
td.append(agenda.copy())
nomes.append(agenda.copy()['nome'])
print(f'Contato "{agenda["nome"]}" adicionado na agenda!')
print(menu)
# MENU==2
elif menu == 2:
try:
pesquisar = input(f'Pesquisar nome: ')
num = (nomes.index(pesquisar.lower()))
print(f'{num} - {td[num]}')
except:
print('O item não foi encontrado.')
print()
# MENU==3
elif menu == 3:
opcao = int(input('[1] Remover contato\n[2] Fazer alteração\n:'))
while opcao not in (1, 2):
print('----Digite um número válido----')
print(opcao)
# OPCAO=1
if opcao == 1:
try:
remcont = input('Nome do contato que deseja remover: ').lower()
num2 = (nomes.index(remcont.lower()))
td.pop(num2)
print(f'Contato {remcont} excluido')
except:
print('O item não foi encontrado.')
elif opcao == 2:
try:
altcont = input('Nome do contato que deseja fazer alteração: ').lower()
num2 = (nomes.index(altcont.lower()))
qual = int(input('Em qual setor deseja fazer alteração:\n'
'[1] Nome\n[2] Telefone\n[3] Email\n[4] Twitter\n[5] Instagram\n:'))
while qual not in (1, 2, 3, 4, 5):
print('----Digite um número válido----')
print(qual)
if qual == 1:
novnom = input('Novo nome do contato: ')
td[num2] = {**td[num2], 'nome': novnom}
print(f'contato alterado!\n{td[num2]}')
elif qual == 2:
novtel = input('Novo telefone do contato: ')
td[num2] = {**td[num2], 'telefone': novtel}
print(f'contato alterado!\n{td[num2]}')
elif qual == 3:
novema = input('Novo email do contato: ')
td[num2] = {**td[num2], 'email': novema}
print(f'contato alterado!\n{td[num2]}')
elif qual == 4:
novtwi = input('Novo twitter do contato: ')
td[num2] = {**td[num2], 'twitter': novtwi}
print(f'contato alterado!\n{td[num2]}')
elif qual == 5:
novinsta = input("Novo instagram do contato: ")
td[num2] = {**td[num2], 'instagram': novinsta}
print(f'contato alterado!\n{td[num2]}')
except:
print('O item não foi encontrado.')
|
py | 1a4cece5c4d0eb49623a32f98c5ac87b0ffda091 | # By Kami Bigdely
# Split temp variable
def save_into_db(info):
print("Saved into database.")
username_input = input('Please enter your username: ')
save_into_db(username_input)
age_input = int(input('Please enter your birth year: '))
age = 2022 - age_input
print("You turn", age, "years old this year.")
|
py | 1a4ced61263b11129974dbb9df3929af290e3f95 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from pathlib import Path
import subprocess
from setuptools import setup, find_packages
# io.open is needed for projects that support Python 2.7
# It ensures open() defaults to text mode with universal newlines,
# and accepts an argument to specify the text encoding
# Python 3 only projects can skip this import
from io import open
here = Path()
# Get the long description from the README file
with open((here / "README.md"), encoding="utf-8") as f:
long_description = f.read()
# get all the git tags from the cmd line that follow our versioning pattern
git_tags = subprocess.Popen(['git', 'tag', '--list', 'v*[0-9]', '--sort=version:refname'], stdout=subprocess.PIPE)
# get the most recent tag after it's been sorted 👆
latest_git_tag = subprocess.Popen(['tail', '-1'], stdin=git_tags.stdout, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
git_tags.stdout.close()
latest_version = latest_git_tag.communicate()[0]
# PEP 440 won't accept the v in front, so here we remove it, strip the new line and decode the byte stream
VERSION_FROM_GIT_TAG = latest_version[1:].strip().decode("utf-8")
with open((here / "requirements.txt"), encoding="utf-8") as f:
install_requires = f.read().splitlines()
# removes comments in the requirements file
dependencies = [dependency for dependency in install_requires if (dependency[0] != "#")]
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='statistical-clear-sky', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=VERSION_FROM_GIT_TAG,
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
description='Statistical estimation of a clear sky signal from PV system power data', # Optional
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# Denotes that our long_description is in Markdown; valid values are
# text/plain, text/x-rst, and text/markdown
#
# Optional if long_description is written in reStructuredText (rst) but
# required for plain-text or Markdown; if unspecified, "applications should
# attempt to render [the long_description] as text/x-rst; charset=UTF-8 and
# fall back to text/plain if it is not valid rst" (see link below)
#
# This field corresponds to the "Description-Content-Type" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-content-type-optional
long_description_content_type='text/markdown', # Optional (see note above)
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/bmeyers/StatisticalClearSky', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='SLAC National Accelerator Laboratory - Bennet Meyers', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='[email protected]', # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
# Pick your license as you wish
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
# These classifiers are *not* checked by 'pip install'. See instead
# 'python_requires' below.
#'Programming Language :: Python :: 2',
#'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
#'Programming Language :: Python :: 3.4',
#'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a string of words separated by whitespace, not a list.
keywords='solar pv photovoltaic', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(exclude=['tests', 'contrib', 'docs', 'clearsky', 'dataviewer', 'notebooks']), # Required
# Specify which Python versions you support. In contrast to the
# 'Programming Language' classifiers above, 'pip install' will check this
# and refuse to install the project if the version does not match. If you
# do not support Python 2, you can simplify this to '>=3.5' or similar, see
# https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
#python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4',
python_requires='>=3.6, <4',
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=dependencies,
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
extras_require={ # Optional
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here.
#
# If using Python 2.6 or earlier, then these have to be included in
# MANIFEST.in as well.
#package_data={ # Optional
# 'sample': ['package_data.dat'],
#},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
#data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
#entry_points={ # Optional
# 'console_scripts': [
# 'sample=sample:main',
# ],
#},
entry_points={
'console_scripts': [
'statistical_clear_sky=statistical_clear_sky.command_line:main',
],
},
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
project_urls={ # Optional
'Bug Reports': 'https://github.com/bmeyers/StatisticalClearSky/issues',
},
)
|
py | 1a4cef05670248f01f509eeccd5a4d7ac1c796d1 | #%%
import os
# import warnings
# warnings.filterwarnings('ignore') # 注:放的位置也会影响效果,真是奇妙的代码
#
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import numpy as np
from matplotlib import pyplot as plt
import cv2
from detection.datasets import myDataset,data_generator
from detection.models import faster_rcnn2
import tensorflow as tf
from tensorflow import keras
tf.random.set_seed(22)
np.random.seed(22)
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
train_dataset = myDataset.myDataSet(flip_ratio=0.5,scale=(768, 768))
num_classes = len(train_dataset.get_categories())
train_generator = data_generator.DataGenerator(train_dataset)
train_tf_dataset = tf.data.Dataset.from_generator(train_generator, (tf.float32, tf.float32, tf.float32, tf.int32))
train_tf_dataset = train_tf_dataset.batch(2).prefetch(100).shuffle(100)
model = faster_rcnn2.FasterRCNN(num_classes=num_classes)
# optimizer = keras.optimizers.SGD(1e-3, momentum=0.9, nesterov=True)
optimizer = keras.optimizers.Adam(0.0001)
print([var.name for var in model.trainable_variables])
#####################all Test################################
for i in range(20):
img, img_meta, bboxes, labels = train_dataset[i]
batch_imgs = tf.convert_to_tensor(np.expand_dims(img.astype(np.float32), 0))
batch_metas = tf.convert_to_tensor(np.expand_dims(img_meta.astype(np.float32), 0))
batch_bboxes = tf.convert_to_tensor(np.expand_dims(bboxes.astype(np.float32), 0))
batch_labels = tf.convert_to_tensor(np.expand_dims(labels.astype(np.int), 0))
#%%
if i == 0:
_ = model((batch_imgs, batch_metas, batch_bboxes, batch_labels), training=True)
model.load_weights('weights/faster_rcnn0_4.h5', by_name=True)
_ = model((batch_imgs, batch_metas, batch_bboxes, batch_labels), training=True)
# tf.keras.utils.plot_model(model.rpn_head, show_shapes=True, show_layer_names=True)
#%%
########################test#################################
# rois_list = model((batch_imgs, batch_metas),training=False)
rois_list,rois_list2 = model((batch_imgs, batch_metas, batch_bboxes, batch_labels), training=True,rec=2)
import imgTest
print(rois_list)
image = batch_imgs[0].numpy()
bboxs = rois_list[0].numpy()
for i in range(bboxs.shape[0]):
# if bboxs[i][4] < 0.9:
# continue
bbox = bboxs[i]
image = cv2.rectangle(image, (int(float(bbox[0])),
int(float(bbox[1]))),
(int(float(bbox[2])),
int(float(bbox[3]))), (255, 0, 0), 2)
cv2.imshow('img', image)
img2 = imgTest.showLabRpn(batch_imgs, batch_metas, batch_bboxes, None)
cv2.imshow('img2', img2)
print(rois_list2)
image = batch_imgs[0].numpy()
bboxs = rois_list2[0].numpy()
for i in range(bboxs.shape[0]):
# if bboxs[i][4] < 0.9:
# continue
bbox = bboxs[i]
image = cv2.rectangle(image, (int(float(bbox[0])),
int(float(bbox[1]))),
(int(float(bbox[2])),
int(float(bbox[3]))), (255, 0, 0), 2)
cv2.imshow('img3', image)
cv2.waitKey(0)
# #####################RPN Test################################
# for i in range(20):
# img, img_meta, bboxes, labels = train_dataset[i]
# batch_imgs = tf.convert_to_tensor(np.expand_dims(img.astype(np.float32), 0))
# batch_metas = tf.convert_to_tensor(np.expand_dims(img_meta.astype(np.float32), 0))
# batch_bboxes = tf.convert_to_tensor(np.expand_dims(bboxes.astype(np.float32), 0))
# batch_labels = tf.convert_to_tensor(np.expand_dims(labels.astype(np.int), 0))
# #%%
# if i == 0:
# _ = model((batch_imgs, batch_metas, batch_bboxes, batch_labels), training=True)
# model.load_weights('weights/faster_rcnn0_4.h5', by_name=True)
#
# # tf.keras.utils.plot_model(model.rpn_head, show_shapes=True, show_layer_names=True)
# #%%
# ########################test#################################
# rpn_class_logits, rpn_probs = model((batch_imgs, batch_metas),training=False)
#
# import imgTest
#
# img1 = imgTest.showRunRpn(batch_imgs, batch_metas,rpn_class_logits, rpn_probs,100)
# img2 = imgTest.showLabRpn(batch_imgs, batch_metas,batch_bboxes, None)
# cv2.imshow('img1', img1)
# cv2.imshow('img2', img2)
# cv2.waitKey(0)
########################train#################################
# for (batch, inputs) in enumerate(train_tf_dataset):
# batch_imgs, batch_metas, batch_bboxes, batch_labels = inputs
# rpn_class_loss, rpn_bbox_loss, rcnn_class_loss,rcnn_bbox_loss = model((batch_imgs, batch_metas, batch_bboxes, batch_labels), training=True)
# model.load_weights('weights/faster_rcnn0_4.h5', by_name=True)
# break
#
#
# for epoch in range(100):
# loss_history = []
# for (batch, inputs) in enumerate(train_tf_dataset):
# batch_imgs, batch_metas, batch_bboxes, batch_labels = inputs
# with tf.GradientTape() as tape:
# rpn_class_loss, rpn_bbox_loss, rcnn_class_loss,rcnn_bbox_loss = model((batch_imgs, batch_metas, batch_bboxes, batch_labels), training=True) # , rcnn_class_loss, rcnn_bbox_loss
#
# loss_value = rpn_class_loss + rpn_bbox_loss + rcnn_class_loss + rcnn_bbox_loss
#
# grads = tape.gradient(loss_value, model.trainable_variables)
# optimizer.apply_gradients(zip(grads, model.trainable_variables))
#
# loss_history.append(loss_value.numpy())
#
# if batch % 100 == 0:
# print(rpn_class_loss, rpn_bbox_loss, rcnn_class_loss, rcnn_bbox_loss) #
# print('epoch', epoch, batch, np.mean(loss_history))
# model.save_weights('weights/faster_rcnn0_4.h5')
|
py | 1a4cef7e91022e6c6c0ef3940a20c71d63fc4a8e | class Solution(object):
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
if not matrix or len(matrix[0]) <= 0:
return 0
h = len(matrix)
w = len(matrix[0])
memo_arr = [[0 for _ in range(w)] for _ in range(h)]
max_len = 0
def in_or_not(x, y):
return h > x >= 0 and w > y >= 0
def bfs(i, j, matrix):
# bfs i, j, and memorize
nonlocal memo_arr, max_len
dir_x = [1, -1, 0, 0]
dir_y = [0, 0, 1, -1]
ret = []
if not in_or_not(i, j):
return 0
for t, x in enumerate(dir_x):
if in_or_not(i+x, j+dir_y[t]) and matrix[i][j] > matrix[i+x][j+dir_y[t]]:
if memo_arr[i+x][j+dir_y[t]] != 0:
ret.append(memo_arr[i+x][j+dir_y[t]])
else:
ret.append(bfs(i+x, j+dir_y[t], matrix))
else:
ret.append(0)
memo_arr[i][j] = max(ret) + 1
max_len = max(max_len, memo_arr[i][j])
return max(ret) + 1
for i in range(h):
for j in range(w):
bfs(i, j, matrix)
return max_len
# Sol-2 memorize dfs AC, TC: O(n*m)
class Solution(object):
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
# corner case
if len(matrix) == 0 or len(matrix[0]) == 0:
return 0
directions = [(1, 0), (-1, 0), (0, 1), (0, -1)]
cache = [[1 for _ in range(len(matrix[0]))] for _ in range(len(matrix))]
def dfs(i, j, cache):
if cache[i][j] != 1:
return cache[i][j]
for d in directions:
x, y = i + d[0], j + d[1]
if self.inposition(x, y, len(matrix), len(matrix[0])) and matrix[i][j] < matrix[x][y]:
cache[i][j] = max(cache[i][j], dfs(x, y, cache) + 1)
return cache[i][j]
longest = 0
for i in range(len(matrix)):
for j in range(len(matrix[0])):
longest = max(longest, dfs(i, j, cache))
return longest
def inposition(self, i, j, m, n):
return 0 <= i < m and 0 <= j < n
# Sol-3 sorted dp, original idea from hua hua
class Solution(object):
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
dp = [[1 for _ in range(len(matrix[0]))] for _ in range(len(matrix))]
positions = [(1, 0), (-1, 0), (0, 1), (0, -1)]
longest = 0
tmp = list()
for i in range(len(matrix)):
for j in range(len(matrix[0])):
tmp.append((matrix[i][j], i, j))
tmp.sort(key=lambda x: -x[0])
for t in tmp:
(num, i, j) = t
for p in positions:
x = i + p[0]
y = j + p[1]
if 0 <= x < len(matrix) and 0 <= y < len(matrix[0]) and matrix[i][j] < matrix[x][y]:
dp[i][j] = max(dp[i][j], dp[x][y] + 1)
longest = max(longest, dp[i][j])
return longest
|
py | 1a4cf00f81703330963f15fdef01659bf323e353 | """
Django settings for portfolio project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
import environ
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# Get environment
env = environ.Env(DEBUG=(bool, False))
environ.Env.read_env()
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env("SECRET_KEY", default="insecure-secret-key")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env("DEBUG")
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
# packages
"allauth",
"allauth.account",
"allauth.socialaccount",
"ckeditor",
"ckeditor_uploader",
"crispy_forms",
# local
"projects",
"blog",
"marketing",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "portfolio.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(BASE_DIR, "portfolio", "templates"),
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
# Django allauth
AUTHENTICATION_BACKENDS = [
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
]
SITE_ID = 1
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "none"
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 7
ACCOUNT_LOGOUT_REDIRECT_URL = "/"
LOGIN_REDIRECT_URL = "/" # default is '/account/profile'
WSGI_APPLICATION = "portfolio.wsgi.application"
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
"sqlite": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": BASE_DIR / env("DATABASE_SQLITE_NAME", default="db.sqlite3"),
},
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": env("DATABASE_NAME", default="postgres"),
"USER": env("DATABASE_USER", default="postgres"),
"PASSWORD": env("DATABASE_PASSWORD", default="postgres"),
"HOST": env("DATABASE_HOST", default="localhost"),
"PORT": env("DATABASE_PORT", default=5432),
},
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", # noqa
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", # noqa
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", # noqa
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", # noqa
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# ckeditor settings
CKEDITOR_CONFIGS = {
"default": {
"toolbar": "basic",
"skin": "moono-lisa",
"extraPlugins": ",".join(
[
"autolink",
"codesnippet",
]
),
},
"post": {
"toolbar": "basic",
"skin": "moono-lisa",
"width": "100%",
"extraPlugins": ",".join(
[
"uploadimage", # the upload image feature
# your extra plugins here
"autolink",
"codesnippet",
]
),
},
"comment": {
"toolbar": "comment",
"skin": "moono-lisa",
"extraPlugins": ",".join(
[
"autolink",
"codesnippet",
]
),
"toolbar_comment": [
["Bold", "Italic", "Underline"],
["Link", "Unlink"],
["RemoveFormat", "Source"],
["CodeSnippet"],
],
},
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
MEDIA_URL = "/media/"
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
VENV_PATH = BASE_DIR
STATIC_ROOT = os.path.join(VENV_PATH, "static_root")
MEDIA_ROOT = os.path.join(VENV_PATH, "media_root")
CKEDITOR_UPLOAD_PATH = "uploads/"
# Crispy settings
CRISPY_TEMPLATE_PACK = "bootstrap4"
|
py | 1a4cf0e97ea05a8e94b19b8e6d00f3cda1007607 | # BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import codecs
import gzip
import json
import os
import random
import socket
import time
import zlib
from collections import defaultdict
import jsonschema
import pytest
from pytest_localserver.http import ContentServer
from werkzeug.wrappers import Request, Response
import elasticapm
from elasticapm.base import Client
from elasticapm.conf.constants import SPAN
from elasticapm.traces import execution_context
from elasticapm.transport.http_base import HTTPTransportBase
from elasticapm.utils import compat
from elasticapm.utils.threading import ThreadManager
try:
from urllib.request import pathname2url
except ImportError:
# Python 2
from urllib import pathname2url
cur_dir = os.path.dirname(os.path.realpath(__file__))
ERRORS_SCHEMA = os.path.join(cur_dir, ".schemacache", "errors", "error.json")
TRANSACTIONS_SCHEMA = os.path.join(cur_dir, ".schemacache", "transactions", "transaction.json")
SPAN_SCHEMA = os.path.join(cur_dir, ".schemacache", "spans", "span.json")
METADATA_SCHEMA = os.path.join(cur_dir, ".schemacache", "metadata.json")
assert os.path.exists(ERRORS_SCHEMA) and os.path.exists(
TRANSACTIONS_SCHEMA
), 'JSON Schema files not found. Run "make update-json-schema" to download'
with codecs.open(ERRORS_SCHEMA, encoding="utf8") as errors_json, codecs.open(
TRANSACTIONS_SCHEMA, encoding="utf8"
) as transactions_json, codecs.open(SPAN_SCHEMA, encoding="utf8") as span_json, codecs.open(
METADATA_SCHEMA, encoding="utf8"
) as metadata_json:
VALIDATORS = {
"error": jsonschema.Draft4Validator(
json.load(errors_json),
resolver=jsonschema.RefResolver(
base_uri="file:" + pathname2url(ERRORS_SCHEMA), referrer="file:" + pathname2url(ERRORS_SCHEMA)
),
),
"transaction": jsonschema.Draft4Validator(
json.load(transactions_json),
resolver=jsonschema.RefResolver(
base_uri="file:" + pathname2url(TRANSACTIONS_SCHEMA),
referrer="file:" + pathname2url(TRANSACTIONS_SCHEMA),
),
),
"span": jsonschema.Draft4Validator(
json.load(span_json),
resolver=jsonschema.RefResolver(
base_uri="file:" + pathname2url(SPAN_SCHEMA), referrer="file:" + pathname2url(SPAN_SCHEMA)
),
),
"metadata": jsonschema.Draft4Validator(
json.load(metadata_json),
resolver=jsonschema.RefResolver(
base_uri="file:" + pathname2url(METADATA_SCHEMA), referrer="file:" + pathname2url(METADATA_SCHEMA)
),
),
}
class ValidatingWSGIApp(ContentServer):
def __init__(self, **kwargs):
self.skip_validate = kwargs.pop("skip_validate", False)
super(ValidatingWSGIApp, self).__init__(**kwargs)
self.payloads = []
self.responses = []
def __call__(self, environ, start_response):
content = self.content
request = Request(environ)
self.requests.append(request)
data = request.data
if request.content_encoding == "deflate":
data = zlib.decompress(data)
elif request.content_encoding == "gzip":
with gzip.GzipFile(fileobj=compat.BytesIO(data)) as f:
data = f.read()
data = data.decode(request.charset)
if request.content_type == "application/x-ndjson":
data = [json.loads(line) for line in data.split("\n") if line]
self.payloads.append(data)
code = 202
success = 0
fail = 0
if not self.skip_validate:
for line in data:
item_type, item = list(line.items())[0]
validator = VALIDATORS[item_type]
try:
validator.validate(item)
success += 1
except jsonschema.ValidationError as e:
fail += 1
content += "/".join(map(compat.text_type, e.absolute_schema_path)) + ": " + e.message + "\n"
code = 202 if not fail else 400
response = Response(status=code)
response.headers.clear()
response.headers.extend(self.headers)
response.data = content
self.responses.append({"code": code, "content": content})
return response(environ, start_response)
@pytest.fixture()
def elasticapm_client(request):
client_config = getattr(request, "param", {})
client_config.setdefault("service_name", "myapp")
client_config.setdefault("secret_token", "test_key")
client_config.setdefault("central_config", "false")
client_config.setdefault("include_paths", ("*/tests/*",))
client_config.setdefault("span_frames_min_duration", -1)
client_config.setdefault("metrics_interval", "0ms")
client = TempStoreClient(**client_config)
yield client
client.close()
# clear any execution context that might linger around
execution_context.set_transaction(None)
execution_context.set_span(None)
@pytest.fixture()
def waiting_httpserver(httpserver):
wait_for_http_server(httpserver)
return httpserver
@pytest.fixture
def httpsserver_custom(request):
"""The returned ``httpsserver`` (note the additional S!) provides a
threaded HTTP server instance similar to funcarg ``httpserver`` but with
SSL encryption.
"""
from pytest_localserver import https
config = getattr(request, "param", {})
key = os.path.join(cur_dir, "ca", config.get("key", "server.pem"))
server = https.SecureContentServer(key=key, cert=key)
server.start()
request.addfinalizer(server.stop)
return server
@pytest.fixture()
def waiting_httpsserver(httpsserver_custom):
wait_for_http_server(httpsserver_custom)
return httpsserver_custom
@pytest.fixture()
def validating_httpserver(request):
config = getattr(request, "param", {})
app = config.pop("app", ValidatingWSGIApp)
server = app(**config)
server.start()
wait_for_http_server(server)
request.addfinalizer(server.stop)
return server
@pytest.fixture()
def sending_elasticapm_client(request, validating_httpserver):
validating_httpserver.serve_content(code=202, content="", headers={"Location": "http://example.com/foo"})
client_config = getattr(request, "param", {})
client_config.setdefault("server_url", validating_httpserver.url)
client_config.setdefault("service_name", "myapp")
client_config.setdefault("secret_token", "test_key")
client_config.setdefault("transport_class", "elasticapm.transport.http.Transport")
client_config.setdefault("span_frames_min_duration", -1)
client_config.setdefault("include_paths", ("*/tests/*",))
client_config.setdefault("metrics_interval", "0ms")
client_config.setdefault("central_config", "false")
client = Client(**client_config)
client.httpserver = validating_httpserver
yield client
client.close()
# clear any execution context that might linger around
execution_context.set_transaction(None)
execution_context.set_span(None)
class DummyTransport(HTTPTransportBase):
def __init__(self, url, *args, **kwargs):
super(DummyTransport, self).__init__(url, *args, **kwargs)
self.events = defaultdict(list)
def queue(self, event_type, data, flush=False):
self._flushed.clear()
data = self._process_event(event_type, data)
self.events[event_type].append(data)
self._flushed.set()
def start_thread(self, pid=None):
# don't call the parent method, but the one from ThreadManager
ThreadManager.start_thread(self, pid=pid)
def stop_thread(self):
pass
def get_config(self, current_version=None, keys=None):
return False, None, 30
class TempStoreClient(Client):
def __init__(self, **inline):
inline.setdefault("transport_class", "tests.fixtures.DummyTransport")
super(TempStoreClient, self).__init__(**inline)
@property
def events(self):
return self._transport.events
def spans_for_transaction(self, transaction):
"""Test helper method to get all spans of a specific transaction"""
return [span for span in self.events[SPAN] if span["transaction_id"] == transaction["id"]]
@pytest.fixture()
def not_so_random():
old_state = random.getstate()
random.seed(42)
yield
random.setstate(old_state)
@pytest.fixture()
def instrument():
elasticapm.instrument()
yield
elasticapm.uninstrument()
def wait_for_http_server(httpserver, timeout=30):
start_time = time.time()
while True:
try:
sock = socket.create_connection(httpserver.server_address, timeout=0.1)
sock.close()
break
except socket.error:
if time.time() - start_time > timeout:
raise TimeoutError()
|
py | 1a4cf179953a44432def4819e4265ca455404390 | import tensorflowjs as tfjs
import tensorflow as tf
model = tf.keras.models.load_model("model.h5")
tfjs.converters.save_keras_model(model, "tfjs")
|
py | 1a4cf17e265016e823076c32fbe120d2e50e3274 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from sentry.testutils import TestCase
from sentry.utils.data_scrubber import SensitiveDataFilter
VARS = {
'foo': 'bar',
'password': 'hello',
'the_secret': 'hello',
'a_password_here': 'hello',
'api_key': 'secret_key',
'apiKey': 'secret_key',
}
class SensitiveDataFilterTest(TestCase):
def _check_vars_sanitized(self, vars, proc):
"""
Helper to check that keys have been sanitized.
"""
self.assertTrue('foo' in vars)
self.assertEquals(vars['foo'], 'bar')
self.assertTrue('password' in vars)
self.assertEquals(vars['password'], proc.MASK)
self.assertTrue('the_secret' in vars)
self.assertEquals(vars['the_secret'], proc.MASK)
self.assertTrue('a_password_here' in vars)
self.assertEquals(vars['a_password_here'], proc.MASK)
self.assertTrue('api_key' in vars)
self.assertEquals(vars['api_key'], proc.MASK)
self.assertTrue('apiKey' in vars)
self.assertEquals(vars['apiKey'], proc.MASK)
def test_stacktrace(self):
data = {
'sentry.interfaces.Stacktrace': {
'frames': [{'vars': VARS}],
}
}
proc = SensitiveDataFilter()
proc.apply(data)
self.assertTrue('sentry.interfaces.Stacktrace' in data)
stack = data['sentry.interfaces.Stacktrace']
self.assertTrue('frames' in stack)
self.assertEquals(len(stack['frames']), 1)
frame = stack['frames'][0]
self.assertTrue('vars' in frame)
self._check_vars_sanitized(frame['vars'], proc)
def test_http(self):
data = {
'sentry.interfaces.Http': {
'data': VARS,
'env': VARS,
'headers': VARS,
'cookies': VARS,
}
}
proc = SensitiveDataFilter()
proc.apply(data)
self.assertTrue('sentry.interfaces.Http' in data)
http = data['sentry.interfaces.Http']
for n in ('data', 'env', 'headers', 'cookies'):
self.assertTrue(n in http)
self._check_vars_sanitized(http[n], proc)
def test_extra(self):
data = {
'extra': VARS
}
proc = SensitiveDataFilter()
proc.apply(data)
self.assertTrue('extra' in data)
self._check_vars_sanitized(data['extra'], proc)
def test_querystring_as_string(self):
data = {
'sentry.interfaces.Http': {
'query_string': 'foo=bar&password=hello&the_secret=hello'
'&a_password_here=hello&api_key=secret_key',
}
}
proc = SensitiveDataFilter()
proc.apply(data)
self.assertTrue('sentry.interfaces.Http' in data)
http = data['sentry.interfaces.Http']
self.assertEquals(
http['query_string'],
'foo=bar&password=%(m)s&the_secret=%(m)s'
'&a_password_here=%(m)s&api_key=%(m)s' % dict(m=proc.MASK))
def test_querystring_as_string_with_partials(self):
data = {
'sentry.interfaces.Http': {
'query_string': 'foo=bar&password&baz=bar',
}
}
proc = SensitiveDataFilter()
proc.apply(data)
self.assertTrue('sentry.interfaces.Http' in data)
http = data['sentry.interfaces.Http']
self.assertEquals(http['query_string'], 'foo=bar&password&baz=bar' % dict(m=proc.MASK))
def test_sanitize_additional_sensitive_fields(self):
additional_sensitive_dict = {
'fieldy_field': 'value',
'moar_other_field': 'another value'
}
data = {
'extra': dict(VARS.items() + additional_sensitive_dict.items())
}
proc = SensitiveDataFilter(additional_sensitive_dict.keys())
proc.apply(data)
for field in additional_sensitive_dict.keys():
self.assertEquals(data['extra'][field], proc.MASK)
self._check_vars_sanitized(data['extra'], proc)
def test_sanitize_credit_card(self):
proc = SensitiveDataFilter()
result = proc.sanitize('foo', '4242424242424242')
self.assertEquals(result, proc.MASK)
def test_sanitize_credit_card_amex(self):
# AMEX numbers are 15 digits, not 16
proc = SensitiveDataFilter()
result = proc.sanitize('foo', '424242424242424')
self.assertEquals(result, proc.MASK)
def test_sanitize_credit_card_within_value(self):
proc = SensitiveDataFilter()
result = proc.sanitize('foo', "'4242424242424242'")
self.assertEquals(result, proc.MASK)
proc = SensitiveDataFilter()
result = proc.sanitize('foo', "foo 4242424242424242")
self.assertEquals(result, proc.MASK)
def test_sanitize_url(self):
proc = SensitiveDataFilter()
result = proc.sanitize('foo', 'pg://matt:pass@localhost/1')
self.assertEquals(result, 'pg://matt:%s@localhost/1' % proc.MASK)
# Make sure we don't mess up any other url.
# This url specifically if passed through urlunsplit(urlsplit()),
# it'll change the value.
result = proc.sanitize('foo', 'postgres:///path')
self.assertEquals(result, 'postgres:///path')
result = proc.sanitize('foo', "foo 'redis://redis:foo@localhost:6379/0' bar")
self.assertEquals(result, "foo 'redis://redis:%s@localhost:6379/0' bar" % proc.MASK)
result = proc.sanitize('foo', "'redis://redis:foo@localhost:6379/0'")
self.assertEquals(result, "'redis://redis:%s@localhost:6379/0'" % proc.MASK)
result = proc.sanitize('foo', "foo redis://redis:foo@localhost:6379/0 bar")
self.assertEquals(result, "foo redis://redis:%s@localhost:6379/0 bar" % proc.MASK)
result = proc.sanitize('foo', "foo redis://redis:foo@localhost:6379/0 bar pg://matt:foo@localhost/1")
self.assertEquals(result, "foo redis://redis:%s@localhost:6379/0 bar pg://matt:%s@localhost/1" % (proc.MASK, proc.MASK))
def test_sanitize_http_body(self):
data = {
'sentry.interfaces.Http': {
'data': '{"email":"[email protected]","password":"zzzzz"}',
},
}
proc = SensitiveDataFilter()
proc.apply(data)
self.assertTrue('sentry.interfaces.Http' in data)
http = data['sentry.interfaces.Http']
self.assertEquals(http['data'], proc.MASK)
def test_does_not_fail_on_non_string(self):
data = {
'extra': {
'foo': 1,
},
}
proc = SensitiveDataFilter()
proc.apply(data)
self.assertEquals(data['extra'], {'foo': 1})
|
py | 1a4cf1a900dc0575454b96c724e877229da06b97 | import randopt as ro
def loss(x):
return x**2
e = ro.Experiment('myexp', {
'alpha': ro.Gaussian(mean=0.0, std=1.0, dtype='float'),
})
# Sampling parameters
for i in range(100):
e.sample('alpha')
res = loss(e.alpha)
print('Result: ', res)
e.add_result(res)
# Manually setting parameters
e.alpha = 0.00001
res = loss(e.alpha)
e.add_result(res)
# Search over all experiments results, including ones from previous runs
opt = e.minimum()
print('Best result: ', opt.result, ' with params: ', opt.params) |
py | 1a4cf2ce8152da7d190251eaeeddcb9550ef1ba8 | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="pypodo",
version="3.0.3",
description="pypodo is a todolist tool which works with a .todo file at the root of the home directory. It has a mecanism of indexes and tags.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/thib1984/pypodo",
author="thib1984",
author_email="[email protected]",
license="mit",
packages=["pypodo"],
install_requires=["setuptools", "termcolor", "cryptography"],
zip_safe=False,
entry_points={
"console_scripts": ["pypodo=pypodo.__pypodo__:pypodo"],
},
)
|
py | 1a4cf45d4dd96e4bbfcc71b0629b786dc7090a0b | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webui.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
py | 1a4cf57684c9b0cacf1c0bdf16dd38c4db4ec495 | import tkinter as tk
from tkinter import messagebox
import math
e = None
e2 = None
sign = None
f_num = None
frame = None
num = None
def sin():
global sign
global num
sign = "sin"
num = float(e.get())
e.delete(0, len(e.get()))
e2.delete(0, len(e2.get()))
e2.insert(0, "sin(" + str(num) + ")")
e.insert(0, "sin(" + str(num) + ")")
def cos():
global sign
global num
sign = "cos"
num = float(e.get())
e.delete(0, len(e.get()))
e2.delete(0, len(e2.get()))
e2.insert(0, "cos(" + str(num) + ")")
e.insert(0, "cos(" + str(num) + ")")
def tan():
global sign
global num
sign = "tan"
num = float(e.get())
e2.delete(0, len(e2.get()))
e.delete(0, len(e.get()))
e2.insert(0, "tan(" + str(num) + ")")
e.insert(0, "tan(" + str(num) + ")")
def tan_inv():
global sign
global num
sign = "tan_inv"
num = float(e.get())
e.delete(0, len(e.get()))
e2.delete(0, len(e2.get()))
e2.insert(0, "tan^-1(" + str(num) + ")")
e.insert(0, "tan^-1(" + str(num) + ")")
def cos_inv():
global sign
global num
sign = "cos_inv"
num = float(e.get())
e.delete(0, len(e.get()))
e2.delete(0, len(e2.get()))
e2.insert(0, "cos^-1(" + str(num) + ")")
e.insert(0, "cos^-1(" + str(num) + ")")
def sin_inv():
global sign
global num
sign = "sin_inv"
num = float(e.get())
e.delete(0, len(e.get()))
e2.delete(0, len(e2.get()))
e2.insert(0, "sin^-1(" + str(num) + ")")
e.insert(0, "sin^-1(" + str(num) + ")")
def exp():
global num
num = e.get()
global sign
sign = "exp"
e.delete(0, len(e.get()))
e2.delete(0, len(e2.get()))
e2.insert(0, "E" + num)
e.insert(0, "E" + num)
def fact():
global num
global sign
num = int(e.get())
sign = "fact"
e2.insert(len(e.get()), "!")
e.insert(len(e.get()), "!")
def sqrt():
global num
num = float(e.get())
global sign
sign = "sqrt"
e2.delete(0, len(e2.get()))
e2.insert(0, "sqrt(" + str(num) + ")")
e.delete(0, len(e.get()))
e.insert(0, "sqrt(" + str(num) + ")")
def cube():
global num
num = float(e.get())
global sign
sign = "cube"
e2.delete(0, len(e2.get()))
e2.insert(len(e.get()), str(num) + "^3")
e.delete(0, len(e.get()))
e.insert(len(e.get()), "^3")
def n_pow():
global f_num
f_num = e.get()
global sign
sign = "n_pow"
e2.delete(0, len(e2.get()))
e.delete(0, len(e.get()))
e2.insert(0, str(f_num) + " ^ ")
def inv():
global num
global sign
sign = "inv"
num = float(e.get())
e2.insert(len(e2.get()), "^(-1)")
e.insert(len(e.get()), "^(-1)")
def rem():
global f_num
e2.delete(0, len(e2.get()))
f_num = float(e.get())
global sign
sign = "rem"
e2.insert(0, str(f_num) + " % ")
e.delete(0, len(e.get()))
def show(number):
current = e.get()
current1 = e2.get()
e2.delete(0, len(current1))
e.delete(0, len(current))
e.insert(0, str(current) + str(number))
e2.insert(0, str(current1) + str(number))
def addition():
first_num = e.get()
e2.delete(0, len(e2.get()))
global sign
sign = "addition"
global f_num
f_num = float(first_num)
e2.insert(0, str(f_num) + " + ")
e.delete(0, len(e.get()))
def subtraction():
first_num = e.get()
e2.delete(0, len(e2.get()))
global sign
sign = "subtraction"
global f_num
f_num = float(first_num)
e2.insert(0, str(f_num) + " - ")
e.delete(0, len(e.get()))
def multiplication():
first_num = e.get()
e2.delete(0, len(e2.get()))
global sign
sign = "multiplication"
global f_num
f_num = float(first_num)
e2.insert(0, str(f_num) + " * ")
e.delete(0, len(e.get()))
def division():
first_num = e.get()
e2.delete(0, len(e2.get()))
global sign
sign = "division"
global f_num
f_num = float(first_num)
e2.insert(0, str(f_num) + " / ")
e.delete(0, len(e.get()))
def equal():
try:
second_num = e.get()
e.delete(0, len(second_num))
# e2.insert(len(e2.get()), second_num)
if sign == "addition":
result = f_num + float(second_num)
e.insert(0, result)
e2.insert(len(e2.get()), " = " + str(result))
if sign == "subtraction":
result = f_num - float(second_num)
e.insert(0, result)
e2.insert(len(e2.get()), " = " + str(result))
if sign == "multiplication":
result = f_num * float(second_num)
e.insert(0, result)
e2.insert(len(e2.get()), " = " + str(result))
if sign == "division":
if int(second_num) == 0:
e.insert(0, "NaN")
e2.insert(len(e2.get()), " = NaN")
else:
result = f_num / int(second_num)
e.insert(0, result)
e2.insert(len(e2.get()), " = " + str(result))
if sign == "sin":
# print(math.sin(num))
result = math.sin(math.radians(num))
e.insert(0, result)
e2.delete(0, len(e2.get()))
e2.insert(0, "sin(" + str(num) + ") = " + str(result))
if sign == "cos":
result = math.cos(math.radians(num))
e.insert(0, result)
e2.delete(0, len(e2.get()))
e2.insert(0, "cos(" + str(num) + ") = " + str(result))
if sign == "tan":
result = math.tan(math.radians(num))
e.insert(0, result)
e2.delete(0, len(e2.get()))
e2.insert(0, "tan(" + str(num) + ") = " + str(result))
if sign == "n_pow":
result = float(f_num) ** int(second_num)
e.insert(0, result)
e2.delete(0, len(e2.get()))
e2.insert(
0,
str(f_num) + " ^ " + str(second_num) + " = " + str(result)
)
if sign == "exp":
result = math.exp(num)
e.insert(0, result)
e2.delete(0, len(e2.get()))
e2.insert(0, "E" + num + " = " + str(result))
if sign == "fact":
result = math.factorial(num)
e.insert(0, result)
e2.delete(0, len(e2.get()))
e2.insert(0, str(num) + "! = " + str(result))
if sign == "sqrt":
result = math.sqrt(num)
e.insert(0, result)
e2.delete(0, len(e2.get()))
e2.insert(0, "sqrt(" + str(num) + ") = " + str(result))
if sign == "inv":
result = 1 / num
e.insert(0, result)
e2.delete(0, len(e2.get()))
e2.insert(0, str(num) + "^(-1) = " + str(result))
if sign == "rem":
result = f_num % int(second_num)
e.insert(0, result)
e2.delete(0, len(e2.get()))
e2.insert(
0,
str(f_num) + " % " + str(second_num) + " = " + str(result)
)
if sign == "sin_inv":
result = math.degrees(math.asin(num))
e.insert(0, str(result) + " (degree)")
e2.delete(0, len(e2.get()))
e2.insert(0, "sin^-1(" + str(num) + ") = " + str(result))
if sign == "cos_inv":
result = math.degrees(math.acos(num))
e.insert(0, str(result) + " (degree)")
e2.delete(0, len(e2.get()))
e2.insert(0, "cos^-1(" + str(num) + ") = " + str(result))
if sign == "tan_inv":
result = math.degrees(math.atan(num))
e.insert(0, str(result) + " (degree)")
e2.delete(0, len(e2.get()))
e2.insert(0, "tan^-1(" + str(num) + ") = " + str(result))
if sign == "cube":
result = num**3
e.insert(0, str(result))
e2.delete(0, len(e2.get()))
e2.insert(0, str(num) + "^3 = " + str(result))
except Exception as ex:
print(ex)
messagebox.showerror("Value Error", "Math Error")
def delete():
current = e.get()
# e.insert(0, current)
e.delete(len(current) - 1, len(current))
def clear():
e.delete(0, len(e.get()))
e2.delete(0, len(e2.get()))
def create_frame(mode):
global frame
try:
frame.destroy()
except Exception as ex:
print(ex)
pass
frame = tk.Label(root, background="sky blue")
frame.grid(row=1, column=0, columnspan=3, padx=10, pady=10)
global e
global e2
e2 = tk.Entry(
frame, font=("Helvetica", 9), width=40, borderwidth=2
)
e2.grid(row=0, column=0, columnspan=3, padx=10, pady=10, ipady=3)
e = tk.Entry(
frame, font=("Helvetica", 9, "bold"), width=40, borderwidth=5
)
e.grid(row=1, column=0, columnspan=3, padx=10, pady=10, ipady=8)
e.focus()
if mode == 'n':
button_1 = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="1", padx=35, pady=5, command=lambda: show(1)
)
button_2 = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="2", padx=35, pady=5, command=lambda: show(2)
)
button_3 = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="3", padx=35, pady=5, command=lambda: show(3)
)
button_4 = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="4", padx=35, pady=5, command=lambda: show(4)
)
button_5 = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="5", padx=35, pady=5, command=lambda: show(5)
)
button_6 = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="6", padx=35, pady=5, command=lambda: show(6)
)
button_7 = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="7", padx=35, pady=5, command=lambda: show(7)
)
button_8 = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="8", padx=35, pady=5, command=lambda: show(8)
)
button_9 = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="9", padx=35, pady=5, command=lambda: show(9)
)
button_0 = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="0", padx=35, pady=5, command=lambda: show(0)
)
button_del = tk.Button(
frame,
background="light gray",
font=("Helvetica", 9, "bold"),
text="<--", padx=31, pady=5, command=delete
)
button_add = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="+", padx=35, pady=5, command=addition
)
button_clear = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="Clear", padx=77, pady=5, command=clear
)
button_equal = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="=", padx=89, pady=5, command=equal
)
button_dot = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text=".", padx=37, pady=5, command=lambda: show(".")
)
button_sqrt = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="sqrt", padx=27, pady=5, command=sqrt
)
button_sub = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="-", padx=37, pady=5, command=subtraction
)
button_mul = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="*", padx=37, pady=5, command=multiplication
)
button_div = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="/", padx=37, pady=5, command=division
)
button_1.grid(row=2, column=0, pady=5)
button_2.grid(row=2, column=1, pady=5)
button_3.grid(row=2, column=2, pady=5)
button_4.grid(row=3, column=0, pady=5)
button_5.grid(row=3, column=1, pady=5)
button_6.grid(row=3, column=2, pady=5)
button_7.grid(row=4, column=0, pady=5)
button_8.grid(row=4, column=1, pady=5)
button_9.grid(row=4, column=2, pady=5)
button_0.grid(row=5, column=0, pady=5)
button_sqrt.grid(row=6, column=0, pady=5)
button_clear.grid(row=6, column=1, columnspan=2, pady=5)
button_equal.grid(row=8, column=1, columnspan=2, pady=5)
button_del.grid(row=5, column=2)
button_dot.grid(row=5, column=1)
button_add.grid(row=8, column=0)
button_sub.grid(row=7, column=0, pady=5)
button_mul.grid(row=7, column=1, pady=5)
button_div.grid(row=7, column=2, pady=5)
else:
button_1 = tk.Button(
frame,
background="light gray",
font=("Helvetica", 9, "bold"),
text="1", padx=35, pady=5, command=lambda: show(1)
)
button_2 = tk.Button(
frame,
background="light gray",
font=("Helvetica", 9, "bold"),
text="2", padx=36, pady=5, command=lambda: show(2)
)
button_3 = tk.Button(
frame,
background="light gray",
font=("Helvetica", 9, "bold"),
text="3", padx=35, pady=5, command=lambda: show(3)
)
button_4 = tk.Button(
frame,
background="light gray",
font=("Helvetica", 9, "bold"),
text="4", padx=35, pady=5, command=lambda: show(4)
)
button_5 = tk.Button(
frame,
background="light gray",
font=("Helvetica", 9, "bold"),
text="5", padx=36, pady=5, command=lambda: show(5)
)
button_6 = tk.Button(
frame,
background="light gray",
font=("Helvetica", 9, "bold"),
text="6", padx=35, pady=5, command=lambda: show(6)
)
button_7 = tk.Button(
frame,
background="light gray",
font=("Helvetica", 9, "bold"),
text="7", padx=35, pady=5, command=lambda: show(7)
)
button_8 = tk.Button(
frame,
background="light gray",
font=("Helvetica", 9, "bold"),
text="8", padx=36, pady=5, command=lambda: show(8)
)
button_9 = tk.Button(
frame,
background="light gray",
font=("Helvetica", 9, "bold"),
text="9", padx=35, pady=5, command=lambda: show(9)
)
button_0 = tk.Button(
frame,
background="light gray",
font=("Helvetica", 9, "bold"),
text="0", padx=35, pady=5, command=lambda: show(0)
)
button_add = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="+", padx=35, pady=5, command=addition)
button_clear = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="<--", padx=31, pady=5, command=delete)
button_dot = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text=".", padx=37, pady=5, command=lambda: show("."))
button_equal = tk.Button(
frame, background="light gray",
font=("Helvetica", 9, "bold"),
text="=", padx=89, pady=5, command=equal)
button_sin = tk.Button(
frame,
background="light gray",
padx=29, pady=5,
font=("Helvetica", 9, "bold"), text="sin", command=sin
)
button_cos = tk.Button(
frame,
background="light gray",
padx=30, pady=5,
font=("Helvetica", 9, "bold"), text="cos", command=cos
)
button_tan = tk.Button(
frame,
background="light gray",
padx=31, pady=5,
font=("Helvetica", 9, "bold"), text="tan", command=tan
)
button_cot = tk.Button(
frame,
background="light gray",
padx=20, pady=5,
font=("Helvetica", 9, "bold"), text="sin^-1", command=sin_inv
)
button_cosec = tk.Button(
frame,
background="light gray",
padx=21, pady=5,
font=("Helvetica", 9, "bold"), text="cos^-1", command=cos_inv
)
button_sec = tk.Button(
frame,
background="light gray",
padx=22, pady=5,
font=("Helvetica", 9, "bold"), text="tan^-1", command=tan_inv
)
button_fact = tk.Button(
frame,
background="light gray",
padx=35, pady=5,
font=("Helvetica", 9, "bold"), text="X!", command=fact
)
button_sqrt = tk.Button(
frame,
background="light gray",
padx=26, pady=5,
font=("Helvetica", 9, "bold"), text="sqrt", command=sqrt
)
button_exp = tk.Button(
frame,
background="light gray",
padx=30, pady=5,
font=("Helvetica", 9, "bold"), text="exp", command=exp
)
button_cube = tk.Button(
frame,
background="light gray",
padx=27, pady=5,
font=("Helvetica", 9, "bold"), text="X^3", command=cube
)
button_n_pow = tk.Button(
frame,
background="light gray",
padx=30, pady=5,
font=("Helvetica", 9, "bold"), text="X^n", command=n_pow
)
button_inv = tk.Button(
frame,
background="light gray",
padx=27, pady=5,
font=("Helvetica", 9, "bold"), text="X^-1", command=inv
)
button_rem = tk.Button(
frame,
background="light gray",
padx=26, pady=5,
font=("Helvetica", 9, "bold"), text="X%n", command=rem
)
button_e = tk.Button(
frame,
background="light gray",
padx=78, pady=5,
font=("Helvetica", 9, "bold"), text="Clear", command=clear
)
button_sub = tk.Button(
frame,
background="light gray",
font=("Helvetica", 9, "bold"),
text="-", padx=37, pady=5, command=subtraction
)
button_mul = tk.Button(
frame,
background="light gray",
font=("Helvetica", 9, "bold"),
text="*", padx=37, pady=5, command=multiplication
)
button_div = tk.Button(
frame,
background="light gray",
font=("Helvetica", 9, "bold"),
text="/", padx=37, pady=5, command=division
)
button_1.grid(row=7, column=0, pady=5)
button_2.grid(row=7, column=1, pady=5)
button_3.grid(row=7, column=2, pady=5)
button_4.grid(row=8, column=0, pady=5)
button_5.grid(row=8, column=1, pady=5)
button_6.grid(row=8, column=2, pady=5)
button_7.grid(row=9, column=0, pady=5)
button_8.grid(row=9, column=1, pady=5)
button_9.grid(row=9, column=2, pady=5)
button_0.grid(row=10, column=0, pady=5)
button_add.grid(row=11, column=0, pady=5)
button_clear.grid(row=10, column=1, pady=5)
button_dot.grid(row=10, column=2, pady=5)
button_equal.grid(row=1, column=1, columnspan=2, pady=5)
button_sub.grid(row=12, column=0, pady=5)
button_mul.grid(row=12, column=1, pady=5)
button_div.grid(row=12, column=2, pady=5)
button_sin.grid(row=2, column=0, pady=5)
button_cos.grid(row=2, column=1, pady=5)
button_tan.grid(row=2, column=2, pady=5)
button_cot.grid(row=3, column=0, pady=5)
button_cosec.grid(row=3, column=1, pady=5)
button_sec.grid(row=3, column=2, pady=5)
button_sqrt.grid(row=4, column=0, pady=5)
button_fact.grid(row=4, column=1, pady=5)
button_inv.grid(row=4, column=2, pady=5)
button_rem.grid(row=5, column=0, pady=5)
button_n_pow.grid(row=5, column=1, pady=5)
button_exp.grid(row=5, column=2, pady=5)
button_cube.grid(row=6, column=0, pady=5)
button_e.grid(row=6, column=1, columnspan=2, pady=5)
if __name__ == "__main__":
root = tk.Tk()
root.title("Calculator")
root.configure(background="sky blue")
"""
global frame
frame = tk.Label(root)
frame.grid(row=1, column=0, columnspan=2)
"""
mode = tk.StringVar()
tk.Radiobutton(
root,
background="light gray",
variable=mode,
font=("Helvetica", 9, "bold"),
text="Scientific", value="s"
).grid(
row=0, column=0, padx=5, pady=5, ipadx=10
)
tk.Radiobutton(
root,
background="light gray",
variable=mode,
font=("Helvetica", 9, "bold"),
text="Simple", value="n"
).grid(
row=0, column=1, padx=5, pady=5, ipadx=10
)
choose_btn = tk.Button(
root, background="light gray",
width=10, font=("Helvetica", 9, "bold"),
text="Select", command=lambda: create_frame(mode.get())
)
choose_btn.grid(row=0, column=2, padx=5, pady=5)
root.mainloop()
|
py | 1a4cf587af5742de6720b7709ad25d505e241425 | from django.db import models
class imgs(models.Model):
title = models.CharField(max_length=50)
img = models.ImageField(upload_to='langs/')
|
py | 1a4cf62f212d93dac5aedcbda62cc408aba2af42 | # -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper to manipulate GCP git repository."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import errno
import os
import re
import subprocess
import textwrap
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.util import encoding
from googlecloudsdk.core.util import files
from googlecloudsdk.core.util import platforms
import six
import uritemplate
# This is the minimum version of git required to use credential helpers.
_HELPER_MIN = (2, 0, 1)
_WINDOWS_HELPER_MIN = (2, 15, 0)
class Error(exceptions.Error):
"""Exceptions for this module."""
class UnknownRepositoryAliasException(Error):
"""Exception to be thrown when a repository alias provided cannot be found."""
class CannotInitRepositoryException(Error):
"""Exception to be thrown when a repository cannot be created."""
class CannotFetchRepositoryException(Error):
"""Exception to be thrown when a repository cannot be fetched."""
class GitVersionException(Error):
"""Exceptions for when git version is too old."""
def __init__(self, fmtstr, cur_version, min_version):
self.cur_version = cur_version
super(GitVersionException, self).__init__(
fmtstr.format(cur_version=cur_version, min_version=min_version))
class InvalidGitException(Error):
"""Exceptions for when git version is empty or invalid."""
class GcloudIsNotInPath(Error):
"""Exception for when the gcloud cannot be found."""
def CheckGitVersion(version_lower_bound=None):
"""Returns true when version of git is >= min_version.
Args:
version_lower_bound: (int,int,int), The lowest allowed version, or None to
just check for the presence of git.
Returns:
True if version >= min_version.
Raises:
GitVersionException: if `git` was found, but the version is incorrect.
InvalidGitException: if `git` was found, but the output of `git version` is
not as expected.
NoGitException: if `git` was not found.
"""
try:
cur_version = encoding.Decode(subprocess.check_output(['git', 'version']))
if not cur_version:
raise InvalidGitException('The git version string is empty.')
if not cur_version.startswith('git version '):
raise InvalidGitException(('The git version string must start with '
'git version .'))
match = re.search(r'(\d+)\.(\d+)\.(\d+)', cur_version)
if not match:
raise InvalidGitException('The git version string must contain a '
'version number.')
current_version = tuple([int(item) for item in match.group(1, 2, 3)])
if version_lower_bound and current_version < version_lower_bound:
min_version = '.'.join(six.text_type(i) for i in version_lower_bound)
raise GitVersionException(
'Your git version {cur_version} is older than the minimum version '
'{min_version}. Please install a newer version of git.',
cur_version=cur_version, min_version=min_version)
except OSError as e:
if e.errno == errno.ENOENT:
raise NoGitException()
raise
return True
class NoGitException(Error):
"""Exceptions for when git is not available."""
def __init__(self):
super(NoGitException, self).__init__(
textwrap.dedent("""\
Cannot find git. Please install git and try again.
You can find git installers at [http://git-scm.com/downloads], or use
your favorite package manager to install it on your computer. Make sure
it can be found on your system PATH.
"""))
def _GetRepositoryURI(project, alias):
"""Get the URI for a repository, given its project and alias.
Args:
project: str, The project name.
alias: str, The repository alias.
Returns:
str, The repository URI.
"""
return uritemplate.expand(
'https://source.developers.google.com/p/{project}/r/{alias}',
{'project': project, 'alias': alias})
def _GetGcloudScript(full_path=False):
"""Get name of the gcloud script.
Args:
full_path: boolean, True if the gcloud full path should be used if free
of spaces.
Returns:
str, command to use to execute gcloud
Raises:
GcloudIsNotInPath: if gcloud is not found in the path
"""
if (platforms.OperatingSystem.Current() ==
platforms.OperatingSystem.WINDOWS):
gcloud_ext = '.cmd'
else:
gcloud_ext = ''
gcloud_name = 'gcloud'
gcloud = files.FindExecutableOnPath(gcloud_name, pathext=[gcloud_ext])
if not gcloud:
raise GcloudIsNotInPath(
'Could not verify that gcloud is in the PATH. '
'Please make sure the Cloud SDK bin folder is in PATH.')
if full_path:
if not re.match(r'[-a-zA-Z0-9_/]+$', gcloud):
log.warning(
textwrap.dedent("""\
You specified the option to use the full gcloud path in the git
credential.helper, but the path contains non alphanumberic characters
so the credential helper may not work correctly."""))
return gcloud
else:
return gcloud_name + gcloud_ext
def _GetCredHelperCommand(uri, full_path=False, min_version=_HELPER_MIN):
"""Returns the gcloud credential helper command for a remote repository.
The command will be of the form '!gcloud auth git-helper --account=EMAIL
--ignore-unknown $@`. See https://git-scm.com/docs/git-config. If the
installed version of git or the remote repository does not support
the gcloud credential helper, then returns None.
Args:
uri: str, The uri of the remote repository.
full_path: bool, If true, use the full path to gcloud.
min_version: minimum git version; if found git is earlier than this, warn
and return None
Returns:
str, The credential helper command if it is available.
"""
credentialed_hosts = ['source.developers.google.com']
extra = properties.VALUES.core.credentialed_hosted_repo_domains.Get()
if extra:
credentialed_hosts.extend(extra.split(','))
if any(
uri.startswith('https://' + host + '/') for host in credentialed_hosts):
try:
CheckGitVersion(min_version)
except GitVersionException as e:
helper_min_str = '.'.join(six.text_type(i) for i in min_version)
log.warning(
textwrap.dedent("""\
You are using a Google-hosted repository with a
{current} which is older than {min_version}. If you upgrade
to {min_version} or later, gcloud can handle authentication to
this repository. Otherwise, to authenticate, use your Google
account and the password found by running the following command.
$ gcloud auth print-access-token""".format(
current=e.cur_version, min_version=helper_min_str)))
return None
# Use git alias "!shell command" syntax so we can configure
# the helper with options. Also git-credential is not
# prefixed when it starts with "!".
return '!{0} auth git-helper --account={1} --ignore-unknown $@'.format(
_GetGcloudScript(full_path),
properties.VALUES.core.account.Get(required=True))
return None
class Git(object):
"""Represents project git repo."""
def __init__(self, project_id, repo_name, uri=None):
"""Constructor.
Args:
project_id: str, The name of the project that has a repository associated
with it.
repo_name: str, The name of the repository to clone.
uri: str, The URI of the repository, or None if it will be inferred from
the name.
Raises:
UnknownRepositoryAliasException: If the repo name is not known to be
associated with the project.
"""
self._project_id = project_id
self._repo_name = repo_name
self._uri = uri or _GetRepositoryURI(project_id, repo_name)
if not self._uri:
raise UnknownRepositoryAliasException()
def GetName(self):
return self._repo_name
def Clone(self, destination_path, dry_run=False, full_path=False):
"""Clone a git repository into a gcloud workspace.
If the resulting clone does not have a .gcloud directory, create one. Also,
sets the credential.helper to use the gcloud credential helper.
Args:
destination_path: str, The relative path for the repository clone.
dry_run: bool, If true do not run but print commands instead.
full_path: bool, If true use the full path to gcloud.
Returns:
str, The absolute path of cloned repository.
Raises:
CannotInitRepositoryException: If there is already a file or directory in
the way of creating this repository.
CannotFetchRepositoryException: If there is a problem fetching the
repository from the remote host, or if the repository is otherwise
misconfigured.
"""
abs_repository_path = os.path.abspath(destination_path)
if os.path.exists(abs_repository_path):
CheckGitVersion() # Do this here, before we start running git commands
if os.listdir(abs_repository_path):
# Raise exception if dir is not empty and not a git repository
raise CannotInitRepositoryException(
'Directory path specified exists and is not empty')
# Make a brand new repository if directory does not exist or
# clone if directory exists and is empty
try:
# If this is a Google-hosted repo, clone with the cred helper.
cmd = ['git', 'clone', self._uri, abs_repository_path]
min_git = _HELPER_MIN
if (platforms.OperatingSystem.Current() ==
platforms.OperatingSystem.WINDOWS):
min_git = _WINDOWS_HELPER_MIN
cred_helper_command = _GetCredHelperCommand(
self._uri, full_path=full_path, min_version=min_git)
if cred_helper_command:
cmd += [
'--config',
'credential.https://source.developers.google.com/.helper=',
'--config',
'credential.https://source.developers.google.com/.helper=' +
cred_helper_command
]
self._RunCommand(cmd, dry_run)
except subprocess.CalledProcessError as e:
raise CannotFetchRepositoryException(e)
return abs_repository_path
def _RunCommand(self, cmd, dry_run):
log.debug('Executing %s', cmd)
if dry_run:
log.out.Print(' '.join(cmd))
else:
subprocess.check_call(cmd)
|
py | 1a4cf64bf28978fc7e6cc3d2760eea4f7358a3a5 | from django.conf import settings
from django.contrib.postgres.fields import JSONField
from django.core.exceptions import ValidationError
from django.core.mail import EmailMultiAlternatives
from django.db import models
from django.template import (
Context,
Template,
TemplateDoesNotExist,
TemplateSyntaxError
)
from django.utils.translation import ugettext_lazy as _lazy
from . import helpers
from .settings import (
ADD_EXTRA_HEADERS,
VALIDATE_ON_SAVE,
CONTEXT_PROCESSORS
)
class EmailTemplateQuerySet(models.query.QuerySet):
def active(self):
"""Returns active templates only."""
return self.filter(is_active=True)
def current(self, name, language=settings.LANGUAGE_CODE):
"""Returns the latest version of a template."""
return self.active().filter(name=name, language=language).order_by('version').last()
def version(self, name, version, language=settings.LANGUAGE_CODE):
"""Returns a specific version of a template."""
return self.active().get(name=name, language=language, version=version)
class EmailTemplate(models.Model):
"""
Email template. Contains HTML and plain text variants.
Each Template object has a unique name:language.version combination, which
means that localisation of templates is managed through having multiple
objects with the same name - there is no inheritence model. This is to
keep it simple:
order-confirmation:en.0
order-confirmation:de.0
order-confirmation:fr.0
Templates contain HTML and plain text content.
"""
CONTENT_TYPE_PLAIN = 'text/plain'
CONTENT_TYPE_HTML = 'text/html'
CONTENT_TYPES = (CONTENT_TYPE_PLAIN, CONTENT_TYPE_HTML)
name = models.CharField(
_lazy('Template name'),
max_length=100,
help_text=_lazy("Template name - must be unique for a given language/version combination."),
db_index=True
)
description = models.CharField(
_lazy('Description'),
max_length=100,
help_text=_lazy("Optional description. e.g. used to differentiate variants ('new header')."), # noqa
blank=True
)
# language is free text and not a choices field as we make no assumption
# as to how the end user is storing / managing languages.
language = models.CharField(
_lazy('Language'),
max_length=20,
default=settings.LANGUAGE_CODE,
help_text=_lazy(
"Used to support localisation of emails, defaults to `settings.LANGUAGE_CODE`, "
"but can be any string, e.g. 'London', 'NYC'."
),
db_index=True
)
version = models.IntegerField(
_lazy('Version (or variant)'),
default=0,
help_text=_lazy("Integer value - can be used for versioning or A/B testing."),
db_index=True
)
subject = models.CharField(
_lazy('Subject line template'),
max_length=100,
help_text=_lazy("Email subject line (may contain template variables)."),
)
body_text = models.TextField(
_lazy('Plain text template'),
help_text=_lazy("Plain text content (may contain template variables)."),
)
body_html = models.TextField(
_lazy('HTML template'),
help_text=_lazy("HTML content (may contain template variables)."),
)
test_context = JSONField(
default=dict,
blank=True,
help_text=_lazy("Dummy JSON used for test rendering (set automatically on first save).")
)
is_active = models.BooleanField(
_lazy("Active (live)"),
help_text=_lazy("Set to False to remove from `current` queryset."),
default=True
)
from_email = models.CharField(
_lazy("Sender"),
max_length=254,
help_text=_lazy(
"Default sender address if none specified. Verbose form is accepted."
),
default=settings.DEFAULT_FROM_EMAIL
)
reply_to = models.CharField(
_lazy("Reply-To"),
max_length=254,
help_text=_lazy("Comma separated list of Reply-To recipients."),
default=settings.DEFAULT_FROM_EMAIL
)
objects = EmailTemplateQuerySet().as_manager()
class Meta:
unique_together = ("name", "language", "version")
def __str__(self):
return "'{}' ({})".format(self.name, self.language)
def __repr__(self):
return (
"<EmailTemplate id={} name='{}' language='{}' version={}>".format(
self.id, self.name, self.language, self.version
)
)
@property
def extra_headers(self):
return{
'X-Appmail-Template': (
'name=%s; language=%s; version=%s' % (self.name, self.language, self.version)
)
}
@property
def reply_to_list(self):
"""Convert the reply_to field to a list."""
return [a.strip() for a in self.reply_to.split(',')]
def save(self, *args, **kwargs):
"""Update dummy context on first save and validate template contents.
Kwargs:
validate: set to False to bypass template validation; defaults
to settings.VALIDATE_ON_SAVE.
"""
if self.pk is None:
self.test_context = helpers.get_context(
self.subject +
self.body_text +
self.body_html
)
validate = kwargs.pop('validate', VALIDATE_ON_SAVE)
if validate:
self.clean()
super(EmailTemplate, self).save(*args, **kwargs)
return self
def clean(self):
"""Validate model - specifically that the template can be rendered."""
validation_errors = {}
validation_errors.update(self._validate_body(EmailTemplate.CONTENT_TYPE_PLAIN))
validation_errors.update(self._validate_body(EmailTemplate.CONTENT_TYPE_HTML))
validation_errors.update(self._validate_subject())
if validation_errors:
raise ValidationError(validation_errors)
def render_subject(self, context, processors=CONTEXT_PROCESSORS):
"""Render subject line."""
ctx = Context(helpers.patch_context(context, processors))
return Template(self.subject).render(ctx)
def _validate_subject(self):
"""Try rendering the body template and capture any errors."""
try:
self.render_subject({})
except TemplateDoesNotExist as ex:
return {'subject': _lazy("Template does not exist: {}".format(ex))}
except TemplateSyntaxError as ex:
return {'subject': str(ex)}
else:
return {}
def render_body(self, context, content_type=CONTENT_TYPE_PLAIN, processors=CONTEXT_PROCESSORS):
"""Render email body in plain text or HTML format."""
assert content_type in EmailTemplate.CONTENT_TYPES, _lazy("Invalid content type.")
ctx = Context(helpers.patch_context(context, processors))
if content_type == EmailTemplate.CONTENT_TYPE_PLAIN:
return Template(self.body_text).render(ctx)
if content_type == EmailTemplate.CONTENT_TYPE_HTML:
return Template(self.body_html).render(ctx)
def _validate_body(self, content_type):
"""Try rendering the body template and capture any errors."""
assert content_type in EmailTemplate.CONTENT_TYPES, _lazy("Invalid content type.")
if content_type == EmailTemplate.CONTENT_TYPE_PLAIN:
field_name = 'body_text'
if content_type == EmailTemplate.CONTENT_TYPE_HTML:
field_name = 'body_html'
try:
self.render_body({}, content_type=content_type)
except TemplateDoesNotExist as ex:
return {field_name: _lazy("Template does not exist: {}".format(ex))}
except TemplateSyntaxError as ex:
return {field_name: str(ex)}
else:
return {}
def create_message(self, context, **email_kwargs):
"""
Return populated EmailMultiAlternatives object.
This function is a helper that will render the template subject and
plain text / html content, as well as populating all of the standard
EmailMultiAlternatives properties.
>>> template = EmailTemplate.objects.get_latest('order_summary')
>>> context = {'first_name': "Bruce", 'last_name'="Lee"}
>>> email = template.create_message(context, to=['[email protected]'])
>>> email.send()
The function supports all of the standard EmailMultiAlternatives
constructor kwargs except for 'subject', 'body' and 'alternatives' - as
these are set from the template (subject, body_text and body_html).
"""
for kw in ('subject', 'body', 'alternatives'):
assert kw not in email_kwargs, _lazy("Invalid create_message kwarg: '{}'".format(kw))
subject = self.render_subject(context)
body = self.render_body(context, content_type=EmailTemplate.CONTENT_TYPE_PLAIN)
html = self.render_body(context, content_type=EmailTemplate.CONTENT_TYPE_HTML)
email_kwargs['reply_to'] = email_kwargs.get('reply_to') or self.reply_to_list
email_kwargs['from_email'] = email_kwargs.get('from_email') or self.from_email
if ADD_EXTRA_HEADERS:
email_kwargs['headers'] = email_kwargs.get('headers', {})
email_kwargs['headers'].update(self.extra_headers)
# alternatives is a list of (content, mimetype) tuples
# https://github.com/django/django/blob/master/django/core/mail/message.py#L435
return EmailMultiAlternatives(
subject=subject,
body=body,
alternatives=[(html, EmailTemplate.CONTENT_TYPE_HTML)],
**email_kwargs
)
def clone(self):
"""Create a copy of the current object, increase version by 1."""
self.pk = None
self.version += 1
return self.save()
|
py | 1a4cf7223c6a2da0180711f4bf5bdafdae4898b8 | # Desafio 030 -> Crie um programa que leia um numero inteiro e diga se ele é par ou impar
import math
num = int(input('Digite um número inteiro: '))
numd = num % 2
print ('{} é par'.format(num) if numd == 0 else '{} é ímpar'.format(num))
# print('{} é par'.format(num))
#else:
# print('{} é ímpar'.format(num))
|
py | 1a4cf7c4e516431d42a887e1eeb6dd6907979b58 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
from distutils.core import setup
setup(
name='PyMine',
version='1.0.0',
packages=['mine', ],
license='Apache License, Version 2.0 Copyright © 2017 Florents Tselai.',
description='Python Implementation of the MINE family of statistics',
long_description=open('README', 'r').read(),
author='Florents Tselai',
author_email='[email protected]',
url='https://github.com/Florents-Tselai/PyMine',
)
|
py | 1a4cf7e7f559d36465bf89159ce67b8319ce8a1b | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class AncpVlanRange(Base):
"""
The AncpVlanRange class encapsulates a required ancpVlanRange resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'ancpVlanRange'
_SDM_ATT_MAP = {
'Enabled': 'enabled',
'FirstId': 'firstId',
'IdIncrMode': 'idIncrMode',
'Increment': 'increment',
'IncrementStep': 'incrementStep',
'InnerEnable': 'innerEnable',
'InnerFirstId': 'innerFirstId',
'InnerIncrement': 'innerIncrement',
'InnerIncrementStep': 'innerIncrementStep',
'InnerPriority': 'innerPriority',
'InnerTpid': 'innerTpid',
'InnerUniqueCount': 'innerUniqueCount',
'Name': 'name',
'ObjectId': 'objectId',
'Priority': 'priority',
'Tpid': 'tpid',
'UniqueCount': 'uniqueCount',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(AncpVlanRange, self).__init__(parent, list_op)
@property
def VlanIdInfo(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.vlanidinfo_24a1e909d863687c686fff601ea5317d.VlanIdInfo): An instance of the VlanIdInfo class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.vlanidinfo_24a1e909d863687c686fff601ea5317d import VlanIdInfo
if self._properties.get('VlanIdInfo', None) is not None:
return self._properties.get('VlanIdInfo')
else:
return VlanIdInfo(self)
@property
def Enabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Disabled ranges won't be configured nor validated.
"""
return self._get_attribute(self._SDM_ATT_MAP['Enabled'])
@Enabled.setter
def Enabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['Enabled'], value)
@property
def FirstId(self):
# type: () -> int
"""DEPRECATED
Returns
-------
- number: The first ID to be used for the first VLAN tag.
"""
return self._get_attribute(self._SDM_ATT_MAP['FirstId'])
@FirstId.setter
def FirstId(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['FirstId'], value)
@property
def IdIncrMode(self):
# type: () -> int
"""
Returns
-------
- number: Method used to increment VLAN IDs. May take the following values: 0 (First VLAN first), 1 (Last VLAN first), 2 (All).
"""
return self._get_attribute(self._SDM_ATT_MAP['IdIncrMode'])
@IdIncrMode.setter
def IdIncrMode(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['IdIncrMode'], value)
@property
def Increment(self):
# type: () -> int
"""DEPRECATED
Returns
-------
- number: Amount of increment per increment step for first VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
"""
return self._get_attribute(self._SDM_ATT_MAP['Increment'])
@Increment.setter
def Increment(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['Increment'], value)
@property
def IncrementStep(self):
# type: () -> int
"""DEPRECATED
Returns
-------
- number: Frequency of first VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
"""
return self._get_attribute(self._SDM_ATT_MAP['IncrementStep'])
@IncrementStep.setter
def IncrementStep(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['IncrementStep'], value)
@property
def InnerEnable(self):
# type: () -> bool
"""DEPRECATED
Returns
-------
- bool: Enable the inner VLAN.
"""
return self._get_attribute(self._SDM_ATT_MAP['InnerEnable'])
@InnerEnable.setter
def InnerEnable(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['InnerEnable'], value)
@property
def InnerFirstId(self):
# type: () -> int
"""DEPRECATED
Returns
-------
- number: The first ID to be used for the inner VLAN tag.
"""
return self._get_attribute(self._SDM_ATT_MAP['InnerFirstId'])
@InnerFirstId.setter
def InnerFirstId(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['InnerFirstId'], value)
@property
def InnerIncrement(self):
# type: () -> int
"""DEPRECATED
Returns
-------
- number: Amount of increment per increment step for Inner VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
"""
return self._get_attribute(self._SDM_ATT_MAP['InnerIncrement'])
@InnerIncrement.setter
def InnerIncrement(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['InnerIncrement'], value)
@property
def InnerIncrementStep(self):
# type: () -> int
"""DEPRECATED
Returns
-------
- number: Frequency of inner VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
"""
return self._get_attribute(self._SDM_ATT_MAP['InnerIncrementStep'])
@InnerIncrementStep.setter
def InnerIncrementStep(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['InnerIncrementStep'], value)
@property
def InnerPriority(self):
# type: () -> int
"""DEPRECATED
Returns
-------
- number: The 802.1Q priority to be used for the inner VLAN tag.
"""
return self._get_attribute(self._SDM_ATT_MAP['InnerPriority'])
@InnerPriority.setter
def InnerPriority(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['InnerPriority'], value)
@property
def InnerTpid(self):
# type: () -> str
"""DEPRECATED
Returns
-------
- str: The TPID value in the inner VLAN Tag.
"""
return self._get_attribute(self._SDM_ATT_MAP['InnerTpid'])
@InnerTpid.setter
def InnerTpid(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['InnerTpid'], value)
@property
def InnerUniqueCount(self):
# type: () -> int
"""DEPRECATED
Returns
-------
- number: Number of unique inner VLAN IDs to use.
"""
return self._get_attribute(self._SDM_ATT_MAP['InnerUniqueCount'])
@InnerUniqueCount.setter
def InnerUniqueCount(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['InnerUniqueCount'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of range
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def ObjectId(self):
# type: () -> str
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
@property
def Priority(self):
# type: () -> int
"""DEPRECATED
Returns
-------
- number: The 802.1Q priority to be used for the outer VLAN tag.
"""
return self._get_attribute(self._SDM_ATT_MAP['Priority'])
@Priority.setter
def Priority(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['Priority'], value)
@property
def Tpid(self):
# type: () -> str
"""DEPRECATED
Returns
-------
- str: The TPID value in the outer VLAN Tag.
"""
return self._get_attribute(self._SDM_ATT_MAP['Tpid'])
@Tpid.setter
def Tpid(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Tpid'], value)
@property
def UniqueCount(self):
# type: () -> int
"""DEPRECATED
Returns
-------
- number: Number of unique first VLAN IDs to use.
"""
return self._get_attribute(self._SDM_ATT_MAP['UniqueCount'])
@UniqueCount.setter
def UniqueCount(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['UniqueCount'], value)
def update(self, Enabled=None, FirstId=None, IdIncrMode=None, Increment=None, IncrementStep=None, InnerEnable=None, InnerFirstId=None, InnerIncrement=None, InnerIncrementStep=None, InnerPriority=None, InnerTpid=None, InnerUniqueCount=None, Name=None, Priority=None, Tpid=None, UniqueCount=None):
# type: (bool, int, int, int, int, bool, int, int, int, int, str, int, str, int, str, int) -> AncpVlanRange
"""Updates ancpVlanRange resource on the server.
Args
----
- Enabled (bool): Disabled ranges won't be configured nor validated.
- FirstId (number): The first ID to be used for the first VLAN tag.
- IdIncrMode (number): Method used to increment VLAN IDs. May take the following values: 0 (First VLAN first), 1 (Last VLAN first), 2 (All).
- Increment (number): Amount of increment per increment step for first VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
- IncrementStep (number): Frequency of first VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
- InnerEnable (bool): Enable the inner VLAN.
- InnerFirstId (number): The first ID to be used for the inner VLAN tag.
- InnerIncrement (number): Amount of increment per increment step for Inner VLAN. E.g. increment step = 10 and increment = 2 means increment VLAN ID by 2 for every 10 IPs
- InnerIncrementStep (number): Frequency of inner VLAN ID increment. E.g., value of 10 means increment VLAN ID once for every 10 IP addresses.
- InnerPriority (number): The 802.1Q priority to be used for the inner VLAN tag.
- InnerTpid (str): The TPID value in the inner VLAN Tag.
- InnerUniqueCount (number): Number of unique inner VLAN IDs to use.
- Name (str): Name of range
- Priority (number): The 802.1Q priority to be used for the outer VLAN tag.
- Tpid (str): The TPID value in the outer VLAN Tag.
- UniqueCount (number): Number of unique first VLAN IDs to use.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def CustomProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the customProtocolStack operation on the server.
Create custom protocol stack under /vport/protocolStack
customProtocolStack(Arg2=list, Arg3=enum, async_operation=bool)
---------------------------------------------------------------
- Arg2 (list(str)): List of plugin types to be added in the new custom stack
- Arg3 (str(kAppend | kMerge | kOverwrite)): Append, merge or overwrite existing protocol stack
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('customProtocolStack', payload=payload, response_object=None)
def DisableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the disableProtocolStack operation on the server.
Disable a protocol under protocolStack using the class name
disableProtocolStack(Arg2=string, async_operation=bool)string
-------------------------------------------------------------
- Arg2 (str): Protocol class name to disable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('disableProtocolStack', payload=payload, response_object=None)
def EnableProtocolStack(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the enableProtocolStack operation on the server.
Enable a protocol under protocolStack using the class name
enableProtocolStack(Arg2=string, async_operation=bool)string
------------------------------------------------------------
- Arg2 (str): Protocol class name to enable
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Status of the exec
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('enableProtocolStack', payload=payload, response_object=None)
|
py | 1a4cfd5a373b20f56c3eb93e07359680c9c68e5f | from requests import Session
from m3u8 import loads
import os
from m3u8.model import SegmentList, Segment, find_key
class XET(object):
APPID = '' # APPid
XIAOEID = '' # Cookie XIAOEID
RESOURCEID = '' # ResourceID,这里的resourceid代表课程id
sessionid = '' # Cookie laravel_session
session = Session()
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
'Referer': '',
'Origin': 'http://pc-shop.xiaoe-tech.com',
'Content-Type': 'application/x-www-form-urlencoded'
}
cookie = {
'XIAOEID': XIAOEID,
'laravel_session': sessionid
}
def get_lesson_list(self):
url = 'https://pc-shop.xiaoe-tech.com/{appid}/open/column.resourcelist.get/2.0'.format(appid=self.APPID)
body = {
'data[page_index]': '0',
'data[page_size]': '1000',
'data[order_by]': 'start_at:desc',
'data[resource_id]': self.RESOURCEID,
'data[state]': '0'
}
# 获取当前课程的信息
self.header['Referer'] = 'https://pc-shop.xiaoe-tech.com/{appid}/'.format(appid=self.APPID)
resp = self.session.post(url, data=body, headers=self.header, cookies=self.cookie)
if resp.status_code != 200:
raise Exception('获取课程列表失败')
try:
# 拼接课程id、标题以及资源类型
data = [{'id': lesson['id'], 'name': lesson['title'], 'resource_type': lesson['resource_type']} for lesson
in resp.json()['data']]
except Exception as e:
print("获取课程列表失败")
exit(1)
# 返回课程列表
return data
def get_lesson_hls(self, resource):
'''
:param resource: 这里的resource代表当前课程下的某节课的id
:return:
'''
resource_type = {'2': 'audio.detail.get', '3': 'video.detail.get'}
url = 'https://pc-shop.xiaoe-tech.com/{appid}/open/{resource}/1.0'.format(appid=self.APPID,
resource=resource_type[
str(resource['resource_type'])])
body = {
'data[resource_id]': resource['id']
}
self.header['Referer'] = 'https://pc-shop.xiaoe-tech.com/{appid}/video_details?id={resourceid}'.format(
appid=self.APPID, resourceid=self.RESOURCEID)
resp = self.session.post(url, data=body, headers=self.header, cookies=self.cookie)
if resp.status_code != 200:
raise Exception('获取课程信息失败')
# 返回当前课程的信息
hls = resp.json()['data']
return hls
def video(self, url, media_dir, title, playurl):
'''
:param url: hls 视频流文件
:param media_dir: 下载保存目录
:param title: 视频标题
:param playurl: ts文件地址
:return:
'''
resp = self.session.get(url, headers=self.header)
media = loads(resp.text)
# 拼接ts文件列表
playlist = ["{playurl}{uri}".format(playurl=playurl, uri=uri) for uri in media.segments.uri]
n = 0
new_segments = []
# get ts file list
for url in playlist:
ts_file = os.path.join(media_dir, title, 'm_{num}.ts'.format(num=n))
ts_path = os.path.join(title, 'm_{num}.ts'.format(num=n))
media.data['segments'][n]['uri'] = ts_path
new_segments.append(media.data.get('segments')[n])
# 下载ts文件
resp = self.session.get(url, headers=self.header, cookies=self.cookie)
if resp.status_code != 200:
print('Error: {title} {tsfile}'.format(title=title, tsfile=ts_file))
# 如果文件不存在或者本地文件大小于接口返回大小不一致则保存ts文件
if not os.path.exists(ts_file) or os.stat(ts_file).st_size != resp.headers['content-length']:
with open(ts_file, 'wb') as ts:
ts.write(resp.content)
n += 1
# change m3u8 data
media.data['segments'] = new_segments
# 修改m3u8文件信息
segments = SegmentList(
[Segment(base_uri=None, keyobject=find_key(segment.get('key', {}), media.keys), **segment)
for segment in
media.data.get('segments', [])])
media.segments = segments
# save m3u8 file
m3u8_file = os.path.join(media_dir, '{title}.m3u8'.format(title=title))
if not os.path.exists(m3u8_file):
with open(m3u8_file, 'wb', encoding='utf8') as f:
f.write(media.dumps())
def audio(self, url, media_dir, title):
# 下载音频
resp = self.session.get(url, headers=self.header, stream=True)
if resp.status_code != 200:
print('Error: {title}'.format(title=title))
else:
audio_file = os.path.join(media_dir, title, '{title}.mp3'.format(title=title))
if not os.path.exists(audio_file):
with open(audio_file, 'wb') as f:
f.write(resp.content)
def download(self):
# 设置保存目录
media_dir = 'media'
# 获取课程信息
for resourceid in self.get_lesson_list():
# 课程类型为1和6的直接跳过
if resourceid['resource_type'] == 1 or resourceid['resource_type'] == 6:
continue
data = self.get_lesson_hls(resourceid)
title = data['title']
# 判断media目录是否存在
if not os.path.exists(media_dir):
os.mkdir(media_dir)
# 课程类型为2则代表音频,可直接下载
if resourceid['resource_type'] == 2:
playurl = data['audio_url']
if not os.path.exists(os.path.join(media_dir, title)):
try:
os.mkdir(os.path.join(media_dir, title))
except OSError as e:
title = title.replace('|', '丨')
os.mkdir(os.path.join(media_dir, title))
self.audio(playurl, media_dir, title)
# 课程类型为3则代表视频下载后需要手动拼接
elif resourceid['resource_type'] == 3:
url = data['video_hls']
playurl = url.split('v.f230')[0]
# mkdir media directory
if not os.path.exists(os.path.join(media_dir, title)):
os.mkdir(os.path.join(media_dir, title))
self.video(url, media_dir, title, playurl)
if __name__ == '__main__':
XET().download()
|
py | 1a4cfdd919f97f22ec1dffd6b92d7fc71fd64d5f | from .base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!!)*)$191cwf++(pmovz8e*-39^f+ye3_$^(i#otyb5*^r*^$4'
# SECURITY WARNING: define the correct hosts in production!
ALLOWED_HOSTS = ['*']
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
try:
from .local import *
except ImportError:
pass
|
py | 1a4cfe52e64e1cbc2af980058707dd3747218637 | # config.py
import os
class Config(object):
"""Parent configuration class."""
DEBUG = False
TESTING = False
# SECRET_KEY = os.getenv('SECRET_KEY')
SECRET_KEY="some-very-long-string-of-random-characters-CHANGE-TO-YOUR-LIKING"
RESTPLUS_VALIDATE = True
ERROR_404_HELP = False
# db_name = os.getenv("DATABASE")
# host = os.getenv("HOST")
# role = os.getenv("ROLE")
# pwd = os.getenv("PASSWORD")
# port = os.getenv("PORT")
class DevelopmentConfig(Config):
"""Configurations for Development."""
DEBUG = True
MODE="development"
class TestingConfig(Config):
"""Configurations for Testing, with a separate test database."""
TESTING = True
DEBUG = True
# db_name = os.getenv("TEST_DB")
MODE="testing"
class ProductionConfig(Config):
"""Configurations for Production."""
DEBUG = False
app_config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
} |
py | 1a4cfe813194f0ac69d4da14946381ef9af2896d | import logging
import os
import platform
import subprocess
import sys
import warnings
from unittest import skipIf
from pytest import raises, mark
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.trial import unittest
import scrapy
from scrapy.crawler import Crawler, CrawlerRunner, CrawlerProcess
from scrapy.settings import Settings, default_settings
from scrapy.spiderloader import SpiderLoader
from scrapy.utils.log import configure_logging, get_scrapy_root_handler
from scrapy.utils.spider import DefaultSpider
from scrapy.utils.misc import load_object
from scrapy.extensions.throttle import AutoThrottle
from scrapy.extensions import telnet
from scrapy.utils.test import get_testenv
class BaseCrawlerTest(unittest.TestCase):
def assertOptionIsDefault(self, settings, key):
self.assertIsInstance(settings, Settings)
self.assertEqual(settings[key], getattr(default_settings, key))
class CrawlerTestCase(BaseCrawlerTest):
def setUp(self):
self.crawler = Crawler(DefaultSpider, Settings())
def test_populate_spidercls_settings(self):
spider_settings = {'TEST1': 'spider', 'TEST2': 'spider'}
project_settings = {'TEST1': 'project', 'TEST3': 'project'}
class CustomSettingsSpider(DefaultSpider):
custom_settings = spider_settings
settings = Settings()
settings.setdict(project_settings, priority='project')
crawler = Crawler(CustomSettingsSpider, settings)
self.assertEqual(crawler.settings.get('TEST1'), 'spider')
self.assertEqual(crawler.settings.get('TEST2'), 'spider')
self.assertEqual(crawler.settings.get('TEST3'), 'project')
self.assertFalse(settings.frozen)
self.assertTrue(crawler.settings.frozen)
def test_crawler_accepts_dict(self):
crawler = Crawler(DefaultSpider, {'foo': 'bar'})
self.assertEqual(crawler.settings['foo'], 'bar')
self.assertOptionIsDefault(crawler.settings, 'RETRY_ENABLED')
def test_crawler_accepts_None(self):
crawler = Crawler(DefaultSpider)
self.assertOptionIsDefault(crawler.settings, 'RETRY_ENABLED')
def test_crawler_rejects_spider_objects(self):
with raises(ValueError):
Crawler(DefaultSpider())
class SpiderSettingsTestCase(unittest.TestCase):
def test_spider_custom_settings(self):
class MySpider(scrapy.Spider):
name = 'spider'
custom_settings = {
'AUTOTHROTTLE_ENABLED': True
}
crawler = Crawler(MySpider, {})
enabled_exts = [e.__class__ for e in crawler.extensions.middlewares]
self.assertIn(AutoThrottle, enabled_exts)
class CrawlerLoggingTestCase(unittest.TestCase):
def test_no_root_handler_installed(self):
handler = get_scrapy_root_handler()
if handler is not None:
logging.root.removeHandler(handler)
class MySpider(scrapy.Spider):
name = 'spider'
Crawler(MySpider, {})
assert get_scrapy_root_handler() is None
def test_spider_custom_settings_log_level(self):
log_file = self.mktemp()
class MySpider(scrapy.Spider):
name = 'spider'
custom_settings = {
'LOG_LEVEL': 'INFO',
'LOG_FILE': log_file,
# disable telnet if not available to avoid an extra warning
'TELNETCONSOLE_ENABLED': telnet.TWISTED_CONCH_AVAILABLE,
}
configure_logging()
self.assertEqual(get_scrapy_root_handler().level, logging.DEBUG)
crawler = Crawler(MySpider, {})
self.assertEqual(get_scrapy_root_handler().level, logging.INFO)
info_count = crawler.stats.get_value('log_count/INFO')
logging.debug('debug message')
logging.info('info message')
logging.warning('warning message')
logging.error('error message')
with open(log_file, 'rb') as fo:
logged = fo.read().decode('utf8')
self.assertNotIn('debug message', logged)
self.assertIn('info message', logged)
self.assertIn('warning message', logged)
self.assertIn('error message', logged)
self.assertEqual(crawler.stats.get_value('log_count/ERROR'), 1)
self.assertEqual(crawler.stats.get_value('log_count/WARNING'), 1)
self.assertEqual(
crawler.stats.get_value('log_count/INFO') - info_count, 1)
self.assertEqual(crawler.stats.get_value('log_count/DEBUG', 0), 0)
class SpiderLoaderWithWrongInterface:
def unneeded_method(self):
pass
class CustomSpiderLoader(SpiderLoader):
pass
class CrawlerRunnerTestCase(BaseCrawlerTest):
def test_spider_manager_verify_interface(self):
settings = Settings({
'SPIDER_LOADER_CLASS': 'tests.test_crawler.SpiderLoaderWithWrongInterface'
})
with warnings.catch_warnings(record=True) as w:
self.assertRaises(AttributeError, CrawlerRunner, settings)
self.assertEqual(len(w), 1)
self.assertIn("SPIDER_LOADER_CLASS", str(w[0].message))
self.assertIn("scrapy.interfaces.ISpiderLoader", str(w[0].message))
def test_crawler_runner_accepts_dict(self):
runner = CrawlerRunner({'foo': 'bar'})
self.assertEqual(runner.settings['foo'], 'bar')
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
def test_crawler_runner_accepts_None(self):
runner = CrawlerRunner()
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
def test_deprecated_attribute_spiders(self):
with warnings.catch_warnings(record=True) as w:
runner = CrawlerRunner(Settings())
spiders = runner.spiders
self.assertEqual(len(w), 1)
self.assertIn("CrawlerRunner.spiders", str(w[0].message))
self.assertIn("CrawlerRunner.spider_loader", str(w[0].message))
sl_cls = load_object(runner.settings['SPIDER_LOADER_CLASS'])
self.assertIsInstance(spiders, sl_cls)
class CrawlerProcessTest(BaseCrawlerTest):
def test_crawler_process_accepts_dict(self):
runner = CrawlerProcess({'foo': 'bar'})
self.assertEqual(runner.settings['foo'], 'bar')
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
def test_crawler_process_accepts_None(self):
runner = CrawlerProcess()
self.assertOptionIsDefault(runner.settings, 'RETRY_ENABLED')
class ExceptionSpider(scrapy.Spider):
name = 'exception'
@classmethod
def from_crawler(cls, crawler, *args, **kwargs):
raise ValueError('Exception in from_crawler method')
class NoRequestsSpider(scrapy.Spider):
name = 'no_request'
def start_requests(self):
return []
@mark.usefixtures('reactor_pytest')
class CrawlerRunnerHasSpider(unittest.TestCase):
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_successful(self):
runner = CrawlerRunner()
yield runner.crawl(NoRequestsSpider)
self.assertEqual(runner.bootstrap_failed, False)
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_successful_for_several(self):
runner = CrawlerRunner()
yield runner.crawl(NoRequestsSpider)
yield runner.crawl(NoRequestsSpider)
self.assertEqual(runner.bootstrap_failed, False)
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_failed(self):
runner = CrawlerRunner()
try:
yield runner.crawl(ExceptionSpider)
except ValueError:
pass
else:
self.fail('Exception should be raised from spider')
self.assertEqual(runner.bootstrap_failed, True)
@defer.inlineCallbacks
def test_crawler_runner_bootstrap_failed_for_several(self):
runner = CrawlerRunner()
try:
yield runner.crawl(ExceptionSpider)
except ValueError:
pass
else:
self.fail('Exception should be raised from spider')
yield runner.crawl(NoRequestsSpider)
self.assertEqual(runner.bootstrap_failed, True)
def test_crawler_runner_asyncio_enabled_true(self):
if self.reactor_pytest == 'asyncio':
CrawlerRunner(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
})
else:
msg = r"The installed reactor \(.*?\) does not match the requested one \(.*?\)"
with self.assertRaisesRegex(Exception, msg):
CrawlerRunner(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
})
@defer.inlineCallbacks
# https://twistedmatrix.com/trac/ticket/9766
@skipIf(platform.system() == 'Windows' and sys.version_info >= (3, 8),
"the asyncio reactor is broken on Windows when running Python ≥ 3.8")
def test_crawler_process_asyncio_enabled_true(self):
with LogCapture(level=logging.DEBUG) as log:
if self.reactor_pytest == 'asyncio':
runner = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
})
yield runner.crawl(NoRequestsSpider)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", str(log))
else:
msg = r"The installed reactor \(.*?\) does not match the requested one \(.*?\)"
with self.assertRaisesRegex(Exception, msg):
runner = CrawlerProcess(settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
})
@defer.inlineCallbacks
def test_crawler_process_asyncio_enabled_false(self):
runner = CrawlerProcess(settings={"TWISTED_REACTOR": None})
with LogCapture(level=logging.DEBUG) as log:
yield runner.crawl(NoRequestsSpider)
self.assertNotIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", str(log))
class ScriptRunnerMixin:
def run_script(self, script_name):
script_path = os.path.join(self.script_dir, script_name)
args = (sys.executable, script_path)
p = subprocess.Popen(args, env=get_testenv(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return stderr.decode('utf-8')
class CrawlerProcessSubprocess(ScriptRunnerMixin, unittest.TestCase):
script_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'CrawlerProcess')
def test_simple(self):
log = self.run_script('simple.py')
self.assertIn('Spider closed (finished)', log)
self.assertNotIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
# https://twistedmatrix.com/trac/ticket/9766
@skipIf(platform.system() == 'Windows' and sys.version_info >= (3, 8),
"the asyncio reactor is broken on Windows when running Python ≥ 3.8")
def test_asyncio_enabled_no_reactor(self):
log = self.run_script('asyncio_enabled_no_reactor.py')
self.assertIn('Spider closed (finished)', log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
# https://twistedmatrix.com/trac/ticket/9766
@skipIf(platform.system() == 'Windows' and sys.version_info >= (3, 8),
"the asyncio reactor is broken on Windows when running Python ≥ 3.8")
def test_asyncio_enabled_reactor(self):
log = self.run_script('asyncio_enabled_reactor.py')
self.assertIn('Spider closed (finished)', log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
def test_ipv6_default_name_resolver(self):
log = self.run_script('default_name_resolver.py')
self.assertIn('Spider closed (finished)', log)
self.assertIn("'downloader/exception_type_count/twisted.internet.error.DNSLookupError': 1,", log)
self.assertIn(
"twisted.internet.error.DNSLookupError: DNS lookup failed: no results for hostname lookup: ::1.",
log)
def test_ipv6_alternative_name_resolver(self):
log = self.run_script('alternative_name_resolver.py')
self.assertIn('Spider closed (finished)', log)
self.assertNotIn("twisted.internet.error.DNSLookupError", log)
def test_reactor_select(self):
log = self.run_script("twisted_reactor_select.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.selectreactor.SelectReactor", log)
@mark.skipif(platform.system() == 'Windows', reason="PollReactor is not supported on Windows")
def test_reactor_poll(self):
log = self.run_script("twisted_reactor_poll.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.pollreactor.PollReactor", log)
# https://twistedmatrix.com/trac/ticket/9766
@skipIf(platform.system() == 'Windows' and sys.version_info >= (3, 8),
"the asyncio reactor is broken on Windows when running Python ≥ 3.8")
def test_reactor_asyncio(self):
log = self.run_script("twisted_reactor_asyncio.py")
self.assertIn("Spider closed (finished)", log)
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
class CrawlerRunnerSubprocess(ScriptRunnerMixin, unittest.TestCase):
script_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'CrawlerRunner')
def test_response_ip_address(self):
log = self.run_script("ip_address.py")
self.assertIn("INFO: Spider closed (finished)", log)
self.assertIn("INFO: Host: not.a.real.domain", log)
self.assertIn("INFO: Type: <class 'ipaddress.IPv4Address'>", log)
self.assertIn("INFO: IP address: 127.0.0.1", log)
|
py | 1a4cfeaada7ef66db564997a094e015c35976d31 | from . import common, models
def test_basic(init_dist, batch_size, iteration):
print("test_basic")
common.run(models.BasicModel(), batch_size, iteration, gather_inputs=False)
def test_small_param(init_dist, batch_size, iteration):
print("test_small_param")
common.run(models.SmallParamModel(), batch_size, iteration, gather_inputs=False)
def test_fork_join(init_dist, batch_size, iteration):
print("test_fork_join")
common.run(models.ForkJoinModel(), batch_size, iteration, gather_inputs=False)
def test_shared_param(init_dist, batch_size, iteration):
print("test_shared_param")
common.run(models.SharedParamModel(), batch_size, iteration, gather_inputs=False)
def test_one_op(init_dist, batch_size, iteration):
print("test_one_op")
common.run(models.OneOpModel(), batch_size, iteration, gather_inputs=False)
def test_tensor_mul(init_dist, batch_size, iteration):
print("test_tensor_mul")
common.run(models.TensorMulModel(), batch_size, iteration, gather_inputs=False)
def test_emb(init_dist, batch_size, iteration):
print("test_emb")
common.run(models.EmbeddingModel(), batch_size, iteration, get_dataset=models.EmbeddingModel.get_dataset, gather_inputs=False)
def test_function(init_dist, batch_size, iteration):
print("test_function")
common.run(models.FunctionModel(), batch_size, iteration, get_dataset=models.FunctionModel.get_dataset, gather_inputs=False)
def test_loss_out(init_dist, batch_size, iteration):
print("test_loss_out")
common.run_loss(models.LossOutModel(), batch_size, iteration, gather_inputs=False)
|
py | 1a4cff6b1473d5bc67dac7695e9d48d2e6241ae5 | #!python
# coding=utf-8
import logging
from typing import List, Tuple
from itertools import zip_longest, filterfalse
from avro import schema
from confluent_kafka import Producer
from confluent_kafka.avro import AvroProducer, CachedSchemaRegistryClient
# Monkey patch to get hashable avro schemas
# https://issues.apache.org/jira/browse/AVRO-1737
# https://github.com/confluentinc/confluent-kafka-python/issues/122
def hash_func(self):
return hash(str(self))
schema.EnumSchema.__hash__ = hash_func
schema.RecordSchema.__hash__ = hash_func
schema.PrimitiveSchema.__hash__ = hash_func
schema.ArraySchema.__hash__ = hash_func
schema.FixedSchema.__hash__ = hash_func
schema.MapSchema.__hash__ = hash_func
L = logging.getLogger('easyavro')
L.propagate = False
L.addHandler(logging.NullHandler())
def grouper(iterable, batch_size, fillend=False, fillvalue=None):
# Modified from https://docs.python.org/3/library/itertools.html#recipes
# to remove None values
# grouper('ABCDEFG', 3, fillend=True, fillvalue='x') --> ABC DEF Gxx"
# grouper('ABCDEFG', 3, fillend=False) --> ABC DEF G"
"Collect data into fixed-length chunks or blocks"
args = [iter(iterable)] * batch_size
if fillend is False:
return ( tuple(filterfalse(lambda x: x is None, g)) for g in zip_longest(*args, fillvalue=None) )
else:
return zip_longest(*args, fillvalue=fillvalue)
def on_delivery(err, msg):
if err:
L.error(err)
else:
L.debug('Delivered to {} at offset {}'.format(msg.topic(), msg.offset()))
class BaseProducer:
def produce(self, records: List[Tuple], batch=None, flush_timeout=60) -> None:
batch = batch or len(records)
for g, group in enumerate(grouper(records, batch)):
for i, r in enumerate(group):
super().produce(
topic=self.kafka_topic,
key=r[0],
value=r[1],
on_delivery=on_delivery
)
L.debug("{}/{} messages queued".format(i + 1, len(records)))
L.debug("Flushing...")
remaining = self.flush(timeout=flush_timeout)
sent = len(group) - remaining
L.info("Batch {} finished: {} sent, {} pending".format(g, sent, remaining))
self.flush(timeout=flush_timeout)
L.info("Done producing")
class EasyProducer(BaseProducer, Producer):
def __init__(self,
kafka_brokers: List[str],
kafka_topic: str,
debug: bool = False,
kafka_conf: dict = None,
py_conf: dict = None) -> None:
self.kafka_topic = kafka_topic
conf = {
'bootstrap.servers': ','.join(kafka_brokers),
'client.id': self.__class__.__name__,
'api.version.request': 'true',
}
if debug is True:
conf['debug'] = 'msg'
kafka_conf = kafka_conf or {}
py_conf = py_conf or {}
super().__init__(
{**conf, **kafka_conf},
**py_conf
)
class EasyAvroProducer(BaseProducer, AvroProducer):
def __init__(self,
schema_registry_url: str,
kafka_brokers: List[str],
kafka_topic: str,
value_schema: schema.Schema = None,
key_schema: schema.Schema = None,
debug: bool = False,
kafka_conf: dict = None,
py_conf: dict = None) -> None:
self.kafka_topic = kafka_topic
self._client = CachedSchemaRegistryClient(dict(
url=schema_registry_url
))
# Value Schema
if value_schema is None:
vs_name = '{}-value'.format(self.kafka_topic)
_, value_schema, _ = self._client.get_latest_schema(vs_name)
if value_schema is None:
raise ValueError('Schema "{}" not found in registry'.format(vs_name))
# Key Schema
if key_schema is None:
ks_name = '{}-key'.format(self.kafka_topic)
_, key_schema, _ = self._client.get_latest_schema(ks_name)
if key_schema is None:
raise ValueError('Schema "{}" not found in registry'.format(ks_name))
conf = {
'bootstrap.servers': ','.join(kafka_brokers),
'schema.registry.url': schema_registry_url,
'client.id': self.__class__.__name__,
'api.version.request': 'true',
}
if debug is True:
conf['debug'] = 'msg'
kafka_conf = kafka_conf or {}
py_conf = py_conf or {}
super().__init__(
{**conf, **kafka_conf},
default_value_schema=value_schema,
default_key_schema=key_schema,
**py_conf
)
|
py | 1a4d00162086087dc2a48f82f547b00cbcc4da05 | """FastAPI framework, high performance, easy to learn, fast to code, ready for production"""
__version__ = "0.77.1"
from starlette import status as status
from .applications import FastAPI as FastAPI
from .background import BackgroundTasks as BackgroundTasks
from .datastructures import UploadFile as UploadFile
from .exceptions import HTTPException as HTTPException
from .param_functions import Body as Body
from .param_functions import Cookie as Cookie
from .param_functions import Depends as Depends
from .param_functions import File as File
from .param_functions import Form as Form
from .param_functions import Header as Header
from .param_functions import Path as Path
from .param_functions import Query as Query
from .param_functions import Security as Security
from .requests import Request as Request
from .responses import Response as Response
from .routing import APIRouter as APIRouter
from .websockets import WebSocket as WebSocket
from .websockets import WebSocketDisconnect as WebSocketDisconnect
|
py | 1a4d01f7c77cebf44b5e57813a48de798d5b4bf5 | import asyncio
import logging
from aiohttp import ClientError, ClientSession
from gios import ApiError, InvalidSensorsData, Gios, NoStationError
GIOS_STATION_ID = 117
logging.basicConfig(level=logging.DEBUG)
async def main():
try:
async with ClientSession() as websession:
gios = Gios(GIOS_STATION_ID, websession)
await gios.update()
except (ApiError, NoStationError, InvalidSensorsData, ClientError) as error:
print(f"{error}")
return
data = gios.data
latitude = gios.latitude
longitude = gios.longitude
station_name = gios.station_name
print(f"Longitude: {longitude}")
print(f"Latitude: {latitude}")
print(f"Station name: {station_name}")
print(data)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
|
py | 1a4d02104577b295a613e36a2ae1a0ef99a68ef7 | import os
import tensorflow as tf
def assign_to_gpu(gpu=0, ps_dev="/device:CPU:0"):
def _assign(op):
node_def = op if isinstance(op, tf.compat.v1.NodeDef) else op.node_def
if node_def.op == "Variable":
return ps_dev
else:
return "/gpu:%d" % gpu
return _assign
def average_grads_and_vars(tower_grads_and_vars):
def average_dense(grad_and_vars):
if len(grad_and_vars) == 1:
return grad_and_vars[0][0]
grad = grad_and_vars[0][0]
for g, _ in grad_and_vars[1:]:
grad += g
return grad / len(grad_and_vars)
def average_sparse(grad_and_vars):
if len(grad_and_vars) == 1:
return grad_and_vars[0][0]
indices = []
values = []
for g, _ in grad_and_vars:
indices += [g.indices]
values += [g.values]
indices = tf.concat(indices, 0)
values = tf.concat(values, 0) / len(grad_and_vars)
return tf.IndexedSlices(values, indices, grad_and_vars[0][0].dense_shape)
average_grads_and_vars = []
for grad_and_vars in zip(*tower_grads_and_vars):
if grad_and_vars[0][0] is None:
grad = None
elif isinstance(grad_and_vars[0][0], tf.IndexedSlices):
grad = average_sparse(grad_and_vars)
else:
grad = average_dense(grad_and_vars)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads_and_vars.append(grad_and_var)
return average_grads_and_vars
def load_from_checkpoint(saver, logdir):
sess = tf.get_default_session()
ckpt = tf.train.get_checkpoint_state(logdir)
if ckpt and ckpt.model_checkpoint_path:
if os.path.isabs(ckpt.model_checkpoint_path):
# Restores from checkpoint with absolute path.
saver.restore(sess, ckpt.model_checkpoint_path)
else:
# Restores from checkpoint with relative path.
saver.restore(sess, os.path.join(logdir, ckpt.model_checkpoint_path))
return True
return False
|
py | 1a4d02a0f648109d452e84d08df89d612ff142ea | # coding: utf-8
"""
TextMagic API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import TextMagic
from TextMagic.models.get_surveys_paginated_response import GetSurveysPaginatedResponse # noqa: E501
from TextMagic.rest import ApiException
class TestGetSurveysPaginatedResponse(unittest.TestCase):
"""GetSurveysPaginatedResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetSurveysPaginatedResponse(self):
"""Test GetSurveysPaginatedResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = TextMagic.models.get_surveys_paginated_response.GetSurveysPaginatedResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a4d02e51710191111628dd78c71df93b66c3bf8 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import asyncio
import signal
import sys
import functools
from aiotinder.controllers.tinder import Tinder
facebook_id = ""
facebook_token = ""
async def shutdown(loop: asyncio.events.AbstractEventLoop) -> None:
await asyncio.sleep(0.1)
loop.close()
async def result(tinder: Tinder) -> None:
users = await tinder.prospective_matches()
for user in users:
print("Name: {0}, Age: {1}".format(user.name, user.age))
def main() -> None:
loop = asyncio.get_event_loop()
loop.set_debug(True)
loop.add_signal_handler(signal.SIGINT, functools.partial(shutdown, loop))
tinder = Tinder(facebook_id, facebook_token)
loop.run_until_complete(result(tinder))
sys.exit(1)
if __name__ == "__main__":
main()
|
py | 1a4d039bb4d1778ca085960599a45a2a47d2a5a8 | '''
Voice metadata definition.
Copyright (c) 2009, 2013 Peter Parente
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
class Voice(object):
def __init__(self, id, name=None, languages=[], gender=None, age=None):
self.id = id
self.name = name
self.languages = languages
self.gender = gender
self.age = age
def __str__(self):
return '''<Voice id=%(id)s
name=%(name)s
languages=%(languages)s
gender=%(gender)s
age=%(age)s>''' % self.__dict__ |
py | 1a4d03bdd0f89a80510865be702e296cf2b12fbf | from enum import IntEnum
import functools
import usb.core
import usb.util
from traffic_light.error import TrafficLightError, MultipleTrafficLightsError
CTRL_ENDPOINT = 0x02
ID_VENDOR = 0x0d50
ID_PRODUCT = 0x0008
INTERFACE = 0
class Color(IntEnum):
RED = 0x10
YELLOW = 0x11
GREEN = 0x12
class State(IntEnum):
OFF = 0x0
ON = 0x1
class ClewareTrafficLight:
def __init__(self, address=None):
if address:
self.address = address
self.device = usb.core.find(
address=address,
idVendor=ID_VENDOR,
idProduct=ID_PRODUCT)
elif len(list(ClewareTrafficLight.find_devices())) > 1:
raise MultipleTrafficLightsError(
"No address is given and there are multiple devices conected! "
"Use 'print_devices' to see a list of connected devices."
)
else:
self.device = usb.core.find(
idVendor=ID_VENDOR,
idProduct=ID_PRODUCT)
if self.device is None:
raise TrafficLightError('Cleware traffic light not found!')
self.reattach = False
def attach(self):
"""Attaches the device back to the kernel"""
usb.util.dispose_resources(self.device)
if self.reattach:
self.device.attach_kernel_driver(INTERFACE)
def detach(self):
"""Detaches the device from to kernel so it can be used"""
if self.device.is_kernel_driver_active(INTERFACE):
self.device.detach_kernel_driver(INTERFACE)
self.reattach = True
@staticmethod
def find_devices():
"""Returns the raw iterator of all found traffic lights"""
devices = usb.core.find(find_all=True, idVendor=ID_VENDOR, idProduct=ID_PRODUCT)
if devices:
return devices
return []
@staticmethod
def print_devices():
"""Prints a list of all connected traffic lights"""
devices = ClewareTrafficLight.get_devices()
for device in devices:
print(device)
@staticmethod
def get_devices():
"""Returns a list of ClewareTrafficLight instances"""
usb_devices = ClewareTrafficLight.find_devices()
return [ClewareTrafficLight(d.address) for d in usb_devices]
def set_led(self, color, value, timeout=1000):
"""Sets the given state and color of the attached traffic light
Attribute:
color -- the to set color as the enum. E.g. Color.RED
state -- the state to which it should be set. E.g. State.ON
address -- the usb address of a specific traffic light
"""
try:
self.detach()
self.device.write(CTRL_ENDPOINT, [0x00, color, value], timeout=timeout)
except Exception as exc:
raise TrafficLightError(str(exc)) from exc
finally:
self.attach()
def __getattr__(self, name):
"""Parses attribut calls in function"""
args = name.split('_')
try:
color = Color[args[0].upper()]
state = State[args[1].upper()]
except Exception as exc:
raise TrafficLightError("Either the given color or state could not be parsed! Exc: {}"
.format(exc))
return functools.partial(self.set_led, color, state)
def __str__(self):
"""Converts instance into string with important imformations"""
return ("== Cleware Traffic Light ==\n"
"Address: {} \n"
"IdVendor: {} \n"
"IdProduct: {}".format(self.address, ID_VENDOR, ID_PRODUCT))
|
py | 1a4d041316918ddf992a478e3fcec3ff23573c57 | from aws_cdk.aws_lambda import Function, Code, Runtime
from aws_cdk.core import Stack
from b_elasticsearch_layer.layer import Layer as ElasticsearchLayer
class TestingInfrastructure(Stack):
def __init__(self, scope: Stack):
super().__init__(
scope=scope,
id=f'TestingStack',
stack_name=f'TestingStack'
)
Function(
scope=self,
id='TestingFunction',
code=Code.from_inline(
'def handler(): return "Hello World!"'
),
handler='index.handler',
runtime=Runtime.PYTHON_3_6,
layers=[ElasticsearchLayer(self, 'TestingElasticsearchLayer')]
)
|
py | 1a4d0480ce17c3561ca579bc049192f54ba19075 | import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--results", type=str, required=True)
parser.add_argument("--claims", type=str, required=True)
parser.add_argument("--t5_output_ids", type=str, required=True)
parser.add_argument("--t5_output", type=str, required=True)
args = parser.parse_args()
label_map = {"weak": "NOT_ENOUGH_INFO", "false": "CONTRADICT", "true": "SUPPORT"}
format1_ids = open(args.t5_output_ids, "r")
format1_label = open(args.t5_output, "r")
format1_eval = open(args.results, "w")
labels = [line.split()[0] for line in format1_label.readlines()]
id_lines = format1_ids.readlines()
claim_labels_dict = {}
for idx, line in enumerate(id_lines):
info = line.split()
if info[0] not in claim_labels_dict:
claim_labels_dict[str(info[0])] = {}
claim_labels_dict[info[0]][info[1]] = {"label": label_map[labels[idx]]}
claims_f = open(args.claims, "r").readlines()
all_ids = []
for line in claims_f:
info = json.loads(line)
claim_id = info["id"]
all_ids.append(claim_id)
for key in all_ids:
if str(key) in claim_labels_dict:
format1_eval.write(json.dumps({"claim_id": int(key), "labels": claim_labels_dict[str(key)]})+"\n")
else:
format1_eval.write(json.dumps({"claim_id": int(key), "labels": {}}) + "\n")
format1_eval.close()
format1_label.close()
|
py | 1a4d054d01e231a58bd8cc887d2dbb9ee3dc1c64 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pogoprotos/networking/requests/messages/get_download_urls_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='pogoprotos/networking/requests/messages/get_download_urls_message.proto',
package='pogoprotos.networking.requests.messages',
syntax='proto3',
serialized_pb=_b('\nGpogoprotos/networking/requests/messages/get_download_urls_message.proto\x12\'pogoprotos.networking.requests.messages\"*\n\x16GetDownloadUrlsMessage\x12\x10\n\x08\x61sset_id\x18\x01 \x03(\tb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_GETDOWNLOADURLSMESSAGE = _descriptor.Descriptor(
name='GetDownloadUrlsMessage',
full_name='pogoprotos.networking.requests.messages.GetDownloadUrlsMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='asset_id', full_name='pogoprotos.networking.requests.messages.GetDownloadUrlsMessage.asset_id', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=116,
serialized_end=158,
)
DESCRIPTOR.message_types_by_name['GetDownloadUrlsMessage'] = _GETDOWNLOADURLSMESSAGE
GetDownloadUrlsMessage = _reflection.GeneratedProtocolMessageType('GetDownloadUrlsMessage', (_message.Message,), dict(
DESCRIPTOR = _GETDOWNLOADURLSMESSAGE,
__module__ = 'pogoprotos.networking.requests.messages.get_download_urls_message_pb2'
# @@protoc_insertion_point(class_scope:pogoprotos.networking.requests.messages.GetDownloadUrlsMessage)
))
_sym_db.RegisterMessage(GetDownloadUrlsMessage)
# @@protoc_insertion_point(module_scope)
|
py | 1a4d05778557b175bc47fd9254fc804a018634cc | #!/usr/bin/env python3
import uuid
import typing
import aiohttp
from aiohttp import web_exceptions
class MumbleException(Exception):
def __init__(self, message: str):
super().__init__()
self.message = message
def ApiSession(timeout: int) -> typing.AsyncContextManager[aiohttp.ClientSession]:
skip_headers = ['User-Agent']
client_timeout = aiohttp.ClientTimeout(total=timeout)
return aiohttp.ClientSession(timeout=client_timeout, skip_auto_headers=skip_headers)
class Spotiflag:
PORT = 17171
def __init__(self, hostname: str, timeout: int):
self.url = f'http://{hostname}:{self.PORT}'
self.timeout = timeout
async def ping(self, answer: str='pong') -> None:
url = f'{self.url}/api/ping/'
async with ApiSession(self.timeout) as session:
async with session.get(url) as response:
if response.status != web_exceptions.HTTPOk.status_code:
raise MumbleException(f'/api/ping/: returns {response.status}')
try:
data = await response.text()
except Exception:
raise MumbleException(f'/api/ping/: can\'t get text')
if data != answer:
raise MumbleException(f'/api/ping/: incorrect answer')
async def list(self) -> typing.List[str]:
url = f'{self.url}/api/list/'
async with ApiSession(self.timeout) as session:
async with session.get(url) as response:
if response.status != web_exceptions.HTTPOk.status_code:
raise MumbleException(f'/api/list/: returns {response.status}')
try:
data = await response.json()
except Exception:
raise MumbleException(f'/api/list/: can\'t get json')
if not isinstance(data, list) or not all(isinstance(x, str) for x in data):
raise MumbleException(f'/api/list/: invalid json structure')
return data
async def generate(self, description: bytes) -> uuid.UUID:
url = f'{self.url}/api/generate/'
async with ApiSession(self.timeout) as session:
async with session.post(url, data=description) as response:
if response.status != web_exceptions.HTTPOk.status_code:
raise MumbleException(f'/api/generate/: returns {response.status}')
try:
data = await response.text()
except Exception:
raise MumbleException(f'/api/generate/: can\'t get id')
try:
id = uuid.UUID(data.strip(), version=4)
except Exception:
raise MumbleException(f'/api/generate/: invalid id format')
return id
async def listen(
self,
id: uuid.UUID,
chunks_count: int,
expected_chunk_size: int=64 * 1024
) -> typing.Optional[bytes]:
url = f'{self.url}/api/listen/{id}/'
chunks = []
async with ApiSession(self.timeout) as session:
offset = 0
for i in range(chunks_count):
session.headers.update({
'Range': f'bytes={offset}-'
})
async with session.get(url) as response:
if response.status == web_exceptions.HTTPNotFound.status_code:
return None
if response.status != web_exceptions.HTTPPartialContent.status_code:
raise MumbleException(f'/api/listen/: returns {response.status}')
try:
chunk = await response.read()
except Exception:
raise MumbleException(f'/api/listen/: can\'t get content')
if len(chunk) != expected_chunk_size:
raise MumbleException(f'/api/listen/: incorrect content size')
chunks.append(chunk)
offset += len(chunk)
return b''.join(chunks)
|
py | 1a4d057e0d3597bd5d2ac0d892a8c3923b483908 | import argparse
import json
import os
import subprocess
from bids import BIDSLayout
import datetime
from collections import OrderedDict
from shutil import copy as fileCopy
from shutil import rmtree
def isTrue(arg):
return arg is not None and (arg == 'Y' or arg == '1' or arg == 'True')
def logtext(logfile, textstr):
stamp=datetime.datetime.now().strftime("%m-%d-%y %H:%M:%S%p")
textstring = str(stamp) + ' ' + str(textstr)
print(textstring)
logfile.write(textstring + '\n')
def createDatasetDescription(bidsDir,project):
datasetjson=os.path.join(bidsDir,'dataset_description.json');
if not os.path.exists(datasetjson):
print("Constructing BIDS dataset description")
dataset_description=OrderedDict()
dataset_description['Name'] =project
dataset_description['BIDSVersion']=BIDSVERSION
dataset_description['License']=""
dataset_description['ReferencesAndLinks']=""
with open(datasetjson,'w') as datasetjson:
json.dump(dataset_description,datasetjson)
BIDSVERSION = "1.0.0"
parser = argparse.ArgumentParser(description="Run dcm2niix on every file in a session")
parser.add_argument("--subject", help="Subject Label", required=True)
parser.add_argument("--session_label", help="session Label", nargs='?', required=False)
parser.add_argument("--proc_steps", help="additional proc steps", nargs='?', required=False)
parser.add_argument("--dicomdir", help="Root output directory for DICOM files", required=True)
parser.add_argument("--niftidir", help="Root output directory for NIFTI files", required=True)
parser.add_argument("--workdir", help="working directory for temporary files", required=False,default="/tmp")
parser.add_argument("--bidsconfig", help="path to BIDS config file", required=True)
parser.add_argument("--bidsaction", help="path to BIDS action file", required=False)
parser.add_argument("--overwrite", help="Overwrite NIFTI files if they exist")
parser.add_argument("--cleanup", help="Attempt to clean up temporary files")
parser.add_argument('--version', action='version', version='%(prog)s 0.1')
args, unknown_args = parser.parse_known_args()
subject = args.subject
session_label = args.session_label
if session_label is None:
session_label='nosession'
if not session_label:
session_label='nosession'
overwrite = isTrue(args.overwrite)
cleanup = isTrue(args.cleanup)
dicomdir = args.dicomdir
niftidir = args.niftidir
workdir = args.workdir
logdir = niftidir + "/logs"
bidsactionfile = args.bidsaction
if bidsactionfile is None:
bidsactionfile=''
dcm2bids_config = args.bidsconfig
proc_steps = args.proc_steps
if proc_steps is None:
proc_steps = ''
if not proc_steps:
proc_steps = 'bids'
# Set up working directory
if not os.access(niftidir, os.R_OK):
os.mkdir(niftidir)
if not os.access(logdir, os.R_OK):
os.mkdir(logdir)
# set up log file
TIMESTAMP = datetime.datetime.now().strftime("%m%d%y%H%M%S%p")
LOGFILENAME = 'xnatSession_' + TIMESTAMP + '.log'
LOGFILENAME = os.path.join(logdir,LOGFILENAME)
LOGFILE = open(LOGFILENAME,'w+')
# Download and convert Dicoms to BIDS format
if 'bids' in proc_steps:
os.chdir(workdir)
# find step-specific parameters
step_info=''
proc_steps_list=proc_steps.split(",");
for step_item in proc_steps_list:
if 'bids:' in step_item:
step_info = step_item
break
resourceExists = os.listdir(niftidir)
if not resourceExists or overwrite:
if overwrite:
if session_label == "nosession":
dcm2bids_command = "dcm2bids -d {} -p {} -c {} -o {} --clobber".format(dicomdir, subject, dcm2bids_config, niftidir ).split()
else:
dcm2bids_command = "dcm2bids -d {} -p {} -s {} -c {} -o {} --clobber".format(dicomdir, subject, session_label, dcm2bids_config, niftidir ).split()
else:
if session_label == "nosession":
dcm2bids_command = "dcm2bids -d {} -p {} -c {} -o {}".format(dicomdir, subject, dcm2bids_config, niftidir ).split()
else:
dcm2bids_command = "dcm2bids -d {} -p {} -s {} -c {} -o {}".format(dicomdir, subject, session_label, dcm2bids_config, niftidir ).split()
logtext(LOGFILE, ' '.join(dcm2bids_command))
logtext(LOGFILE, str(subprocess.check_output(dcm2bids_command)))
#delete temporary folder
tmpBidsDir=os.path.join(niftidir,'tmp_dcm2bids')
if cleanup:
try:
logtext(LOGFILE,'Cleaning up %s directory.' % tmpBidsDir)
rmtree(tmpBidsDir)
except OSError:
logtext(LOGFILE, 'problem deleting tmp_dcm2bids directory due to OS error. Please delete manually')
# perform deface
createDatasetDescription(niftidir, "PROJECTNAME")
layout = BIDSLayout(niftidir)
T1w=layout.get(subject=subject, suffix='T1w', extension='nii.gz')
for t1w in T1w:
t1wpath=t1w.path
deface_command = "pydeface --force {}".format(t1wpath).split()
logtext(LOGFILE,"Executing command: " + " ".join(deface_command))
logtext(LOGFILE,subprocess.check_output(deface_command))
logtext (LOGFILE,"Get project BIDS bidsaction map")
if os.path.exists(bidsactionfile):
with open(bidsactionfile) as f:
action = json.load(f)
try:
copyitems = action['copy']
except KeyError:
copyitems = []
logtext (LOGFILE, 'No copy items provided.')
for item in copyitems:
entities={}
entities['extension']=['nii','nii.gz']
try:
dataType = item["dataType"]
entities['datatype']=dataType
except KeyError:
dataType = None
try:
modalityLabel = item["modalityLabel"]
entities['suffix']=modalityLabel
except KeyError:
modalityLabel = None
try:
customLabels = item["customLabels"]
labels = customLabels.split("_")
subjectbids=list(filter(lambda x: "sub-" in x, labels))
if subjectbids:
subjectValue=subjectbids[0].split('-')[1]
entities['subject']=subjectValue
else:
entities['subject']=subject
sessionbids=list(filter(lambda x: "ses-" in x, labels))
if sessionbids:
sessionValue=sessionbids[0].split('-')[1]
entities['session']=sessionValue
elif session_label != "nosession":
entities['session']=session_label
task=list(filter(lambda x: "task-" in x, labels))
if task:
taskValue=task[0].split('-')[1]
entities['task']=taskValue
acquisition=list(filter(lambda x: "acq-" in x, labels))
if acquisition:
acquisitionValue=acquisition[0].split('-')[1]
entities['acquisition']=acquisitionValue
run=list(filter(lambda x: "run-" in x, labels))
if run:
runValue=run[0].split('-')[1]
entities['run']=runValue
except KeyError:
customLabels= None
entities['subject']=subject
if session_label != "nosession":
entities['session']=session_label
files = layout.get(return_type='file', **entities)
if files:
sourcefile = files[0]
entities = layout.parse_file_entities(sourcefile)
entities['extension'] = 'json'
files = layout.get(return_type='file', **entities)
if files:
sourcejson = files[0]
else:
sourcejson = None
else:
sourcefile = None
try:
destination = item["destination"]
except KeyError:
destination = []
logtext (LOGFILE, 'No Destination provided for copy')
if destination and sourcefile and sourcejson:
entities['subject']=subject
try:
dataType = destination["dataType"]
entities['datatype']=dataType
except KeyError:
dataType = None
try:
modalityLabel = destination["modalityLabel"]
entities['suffix']=modalityLabel
except KeyError:
modalityLabel = None
try:
customLabels = destination["customLabels"]
labels = customLabels.split("_")
sessionbids=list(filter(lambda x: "ses-" in x, labels))
if sessionbids:
sessionValue=sessionbids[0].split('-')[1]
entities['session']=sessionValue
task=list(filter(lambda x: "task-" in x, labels))
if task:
taskValue=task[0].split('-')[1]
entities['task']=taskValue
else:
entities.pop('task', None)
acquisition=list(filter(lambda x: "acq-" in x, labels))
if acquisition:
acquisitionValue=acquisition[0].split('-')[1]
entities['acquisition']=acquisitionValue
else:
entities.pop('acquisition', None)
run=list(filter(lambda x: "run-" in x, labels))
if run:
runValue=run[0].split('-')[1]
entities['run']=runValue
else:
entities.pop('run', None)
entities['extension']='nii.gz'
outputfile=os.path.join(niftidir, layout.build_path(entities))
if os.path.exists(sourcefile):
logtext (LOGFILE, "copying %s to %s" %(sourcefile, outputfile))
subprocess.check_output(['cp',sourcefile,outputfile])
else:
logtext (LOGFILE, "ERROR: %s cannot be found. Check bidsaction file logic." % sourcefile)
entities['extension']='json'
outputjson=os.path.join(niftidir, layout.build_path(entities))
if os.path.exists(sourcejson):
logtext (LOGFILE, "copying %s to %s" %(sourcejson, outputjson))
subprocess.check_output(['cp',sourcejson, outputjson])
else:
logtext (LOGFILE, "ERROR: %s cannot be found. Check bidsaction file logic." % sourcejson)
except KeyError:
customLabels= None
else:
logtext (LOGFILE,"Destination or source file could not be found - skipping")
else:
logtext (LOGFILE,"Could not read project BIDS action file - continuing with upload")
##########
LOGFILE.flush()
else:
message = 'Looks like Dcm2bids has already been run. If you want to rerun then set overwrite flag to True.'
logtext (LOGFILE, message)
logtext (LOGFILE, 'All done with session processing.')
LOGFILE.close()
|
py | 1a4d061d0e99a4312f91147a21c4bff9d9050561 | import os
import requests
import json
import pandas as pd
from datetime import datetime, timedelta
ENV = "sandbox" #Use "sandbox" when testing, and "api" if you have an account at Tradier
API_TOKEN = "" #Fill in your Tradier API Token here
###
#Script starts here
###
def main():
#Get list of symbols from file
filename_in = "symbols.csv"
listOfSymbols = importCSV(filename_in)
#Find Cash Secured Puts
#Parameters: Symbols, min DTE, max DTE
findCashSecuredPuts(listOfSymbols, 10, 47)
###
#API Functions
###
#Get Data from Tradier API
def getAPIData(url):
bearer_token = f"Bearer {API_TOKEN}"
headers={'Authorization': bearer_token, 'Accept': 'application/json'}
response = requests.get(url, headers=headers)
if response.status_code == 200:
return json.loads(response.content.decode('utf-8'))
#Get all the upcoming expirations for given symbol
def getOptionExpirations(symbol):
url = f"https://{ENV}.tradier.com/v1/markets/options/expirations?symbol={symbol}"
expirations_data = getAPIData(url)
expirations = []
if (expirations_data['expirations']):
expirations = expirations_data['expirations']['date']
return expirations
#Retrieve the options chain for given symbol and expiration
def getOptionsChain(symbol, expiration):
url = f"https://{ENV}.tradier.com/v1/markets/options/chains?symbol={symbol}&expiration={expiration}&greeks=true"
options_chain_data = getAPIData(url)
options_chain = []
if (options_chain_data['options']):
options_chain = options_chain_data['options']['option']
return options_chain
#Retrieves latest stock price from Tradier Market API
def getLastStockPrice(symbol):
url = f"https://{ENV}.tradier.com/v1/markets/quotes?symbols={symbol}"
quote_data = getAPIData(url)
last_price = -1
if ('quote' in quote_data['quotes']):
last_price = quote_data['quotes']['quote']['last']
return last_price
###
#Utility functions
###
#Import CSV files using Pandas library
def importCSV(filename_in):
data = pd.read_csv(filename_in)
symbols = data['Symbol'].to_list()
return symbols
#Limit expirations of symbol to provided min_dte (Min Days Until Expiration) and max_dte (Max Days Until Expiration)
def listOfLimitedExpirations(symbol, min_dte, max_dte):
#Get option expirations for symbol
expirations_list = getOptionExpirations(symbol)
expirations = []
if(isinstance(expirations_list, str)):
return []
for expiration_date in expirations_list:
#Extract dates within set DTE
date_object = datetime.strptime(expiration_date,"%Y-%m-%d")
expiration_min_date = datetime.now() + timedelta(min_dte)
expiration_max_date = datetime.now() + timedelta(max_dte)
if (date_object <= expiration_min_date):
continue
if (date_object >= expiration_max_date):
continue
expirations.append(expiration_date)
return expirations
def exportToFile(data, filename_out):
output = pd.DataFrame(data, columns=['Symbol','Expiration','Strike','Bid','Ask','Volume','Delta','Premium'])
output.to_csv(filename_out,index=False)
#Creates a new dictionary with options data
def gatherOptionData(option):
option_data = {}
option_data['symbol'] = option['underlying']
option_data['type'] = option['option_type']
option_data['expiration'] = option['expiration_date']
option_data['strike'] = option['strike']
option_data['bid'] = option['bid']
option_data['ask'] = option['ask']
option_data['volume'] = option['volume']
option_data['open_int'] = option['open_interest']
#Add necessary greeks here
option_greeks = option.get('greeks',None)
if (option_greeks):
option_data['delta'] = option_greeks['delta']
option_data['theta'] = option_greeks['theta']
option_data['gamma'] = option_greeks['gamma']
return option_data
###
# Main function for filtering the PUT options we are looking for
# You will have to set your own critera
# Generally, for minimum critera, you want:
# tight bid/ask spreads (under .15)
# Some liquidity (Looking for volume greater than 0)
# Certain delta, minium premium, etc.
###
def findCashSecuredPuts(ListOfSymbols, minDays, maxDays):
#Adjust these according to your criteria
MAX_BID_ASK_SPREAD = .15
MIN_PRICE = 10
MAX_PRICE = 70
MIN_PREM = .30
MAX_DELTA = -.2
matching_options = []
data_frame = []
for symbol in ListOfSymbols:
print(f"Processing {symbol}...")
#Depending on your list of symbols, you may want to filter by current price, since you will need buying power
last_price = getLastStockPrice(symbol)
if (last_price <= MIN_PRICE or last_price >= MAX_PRICE):
continue
#We only want options expiring within a certain timeframe
expirations_list = listOfLimitedExpirations(symbol, minDays, maxDays)
numOptions = 0
for expiration in expirations_list:
#First we need the options chain
options = getOptionsChain(symbol, expiration)
for option_item in options:
#This will just gather data from option into a more useful dictionary
option = gatherOptionData(option_item)
#Start filtering by your criteria here
#Make sure there is a bid/ask, otherwise there's probably no liquidity
if (option['bid'] is None or option['ask'] is None):
continue
#Estimated premium (this goes by the approx mid price)
premium = round((option['bid'] + option['ask']) / 2,2)
#Check for delta if it exists
delta = -999
if ('delta' in option):
delta = option['delta']
#Filter out the options we actually want
if (option['type'] == "put"
and option['bid'] > 0
and delta >= MAX_DELTA
and premium >= MIN_PREM
and (option['ask'] - option['bid']) <= MAX_BID_ASK_SPREAD
and option['volume'] > 0
):
#Format the output
option_output = '{}, {}, BID:{}, ASK:{}, {}, {}(D), Premium: {}'\
.format(
option['expiration'],
option['strike'],
option['bid'],
option['ask'],
option['volume'],
delta,
premium)
#Separate by symbol
if (numOptions == 0):
matching_options.append(f"Symbol: {symbol}")
numOptions += 1
#Print the screen when a match is found
print(f"Wheel: {option_output}")
#Add data to Pandas DataFrame
data_frame.append([symbol,
option['expiration'],
option['strike'],
option['bid'],
option['ask'],
option['volume'],
delta,
premium])
#Export results to a new csv file
exportToFile(data_frame, "output_cash_secured_puts.csv")
if __name__ == '__main__':
main()
|
py | 1a4d06d1e0016ccf0ffcde6034d12f0d0ac85d75 | '''
# Functions
'''
import cv2
import numpy as np
import platform
import time
import sys
from rmracerlib import config as cfg
def contours_detection(mask, frame):
# find shapes
# contours detection
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
approx = cv2.approxPolyDP(cnt, 0.01 * cv2.arcLength(cnt, True), True)
if area > AREA_SIZE:
if len(cnt) == 8:
cv2.drawContours(frame, [approx], 0, (0,0,0), 5)
x = approx.ravel()[0]
y = approx.ravel()[1]
cv2.putText(frame, "STOP", (x,y), font, 1, (0,0,0))
return "stop"
# nothing
return None
###
###
### HELPER FUNCTIONS
###
###
def valid_range(x, y, w, h, frame):
'''
This function returns if an roi is in a valid or acceptable part of the image. The reason
for having this is due to extra parts of the frame containing reflections.
'''
left_buf = 10
right_buf = 40
top_buf = 10
centre_buf = 25
height, width = frame.shape[:2]
h_top = int(height / cfg.VR_TOP) # previously h1
h_bot = int(height / cfg.VR_BOTTOM) # previously horizon
v0 = left_buf # furthest left width
v1 = int(width/3) # 1/3rd width
v2 = v1*2 # 2/3rd width
v3 = width - right_buf # furthest right width
if cfg.DRAW_RANGE:
cv2.line(frame, (0, h_top ), (width, h_top ), (255,0,255))
cv2.line(frame, (0, h_bot ), (width, h_bot ), (0,255,255))
cw = True
ch = False
if ( (v0 < x < v1) or (v2 < x < v3) ) and ( (v0 < x+w < v1) or (v2 < x+w < v3) ):
cw = True
if (h_top < y < h_bot) and (h_top < y+h < h_bot): #h0 < y < h2:
ch = True
if ch and cw:
return True
else:
return False
def is_squarish(height, width):
# calculate ratio of sides - anything not square is not worth checking
a = height / width
b = width / height
if (0.5 < a < 2.0) and (0.5 < b < 2.0):
return True
else:
return False
def sign_direction(img):
"""
Turning Sign Detection Part 1
Reads in a ROI and outputs either: right, left or None
"""
# sharpen the ROI so it is clearer for detection
sharpen = cv2.GaussianBlur(img, (3,3), 3)
sharpen = cv2.addWeighted(img, 1.5, sharpen, -0.5, 0)
# convert image to binary
grey = cv2.cvtColor(sharpen, cv2.COLOR_BGR2GRAY)
thresh, binary = cv2.threshold(grey, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# get picture shape information for selecting a smaller ROI
height, width = binary.shape[:2]
# CHECK 1 - calculate ratio of sides - anything not square is not worth checking
a = height / width
b = width / height
if (0.5 < a < 2.0) and (0.5 < b < 2.0):
pass
else:
return None
# CHECK 2 - check the mix of white and black pixels to eliminate false detections
# calculate total number of pixels (TODO: faster way)
total = height * width
# calculate ratios
n_white_pix = np.sum(binary == 255)
w_ratio = int(n_white_pix / total * 100)
n_black_pix = np.sum(binary == 0)
b_ratio = int(n_black_pix / total * 100)
# check
if ( ( 40 <= w_ratio <= 60 ) and ( 40 <= b_ratio <= 60 ) ):
# run the sign detection algorithm
result = direction_check(binary)
if result is not None:
return result
# if we fail any tests, return None
return None
def direction_check(binary):
"""
Turning Sign Dection Part 2
Checks the sign direction based on relevant information in the image
"""
# extract image information
height, width = binary.shape[:2]
# set up our regions at 25%, 50% and 75% marks
# we are only going to look at the center of the binary image
h1 = int(height/4) # was 0
h2 = int(height/2)
h3 = int(h1+h2) # was height
v1 = int(width/4) # was 0
v2 = int(width/2)
v3 = int(v1+v2) # was width
# quadrants / regions
q1Block = binary[h1:h2, v2:v3]
q2Block = binary[h1:h2, v1:v2]
q3Block = binary[h2:h3, v1:v2]
q4Block = binary[h2:h3, v2:v3]
# add up the number of white pixels in each quadrant.
q1Sum = np.sum(q1Block == 255)
q2Sum = np.sum(q2Block == 255)
q3Sum = np.sum(q3Block == 255)
q4Sum = np.sum(q4Block == 255)
# information search - check which region has the most white pixels and then
# determine if the sign is left or right.
if q4Sum > q3Sum: #and q1Sum < q2Sum:
#print("guess: left")
return "left"
elif q4Sum < q3Sum: #and q1Sum > q2Sum:
#print("guess: right")
return "right"
else:
return None
|
py | 1a4d06e000aa3df3f10614b5a109a20f7318be3a | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open("README.rst") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst") as history_file:
history = history_file.read()
requirements = ["Click>=6.0",
"watchdog",
"requests",
"pytesseract",
"pdf2image",
"PyPDF2",
"unidecode",
]
setup_requirements = []
test_requirements = []
setup(
author="Justin Keller",
author_email="[email protected]",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
description="REST API Uploader",
entry_points={"console_scripts": ["rest_uploader=rest_uploader.cli:main"]},
install_requires=requirements,
license="MIT license",
long_description=readme + "\n\n" + history,
include_package_data=True,
keywords=["rest_uploader", "joplin", "rest-uploader"],
name="rest_uploader",
packages=find_packages(include=["rest_uploader"]),
setup_requires=setup_requirements,
test_suite="tests",
tests_require=test_requirements,
url="https://github.com/kellerjustin/rest-uploader",
version="0.3.0",
zip_safe=False,
)
|
py | 1a4d06f2ab29926a76bf6772b5564393704bc779 | from subprocess import run, PIPE
from pydub import AudioSegment
from platform import system
from os import path as pth
from os import remove
sys = system()
supported = ["Windows", "Linux", "Darwin"]
if sys not in supported:
raise RuntimeError("Invalid operating system. pyARSS only supports Windows, MacOS and Linux")
# Set ARSS run command for the correct platform
modpth = pth.abspath(__file__)
moddir = pth.dirname(modpth)
if sys == "Windows":
cmd = moddir + "\\bin_windows\\arss"
elif sys == "Linux":
cmd = moddir + "\\bin_linux\\arss"
else:
cmd = "arss" # MacOS requires ARSS to be directly installed via the installer.
# Encodes audio file `input_path` into image file `output_path` using ARSS.
def Encode(input_path : str, output_path : str,
min_frequency = 27,
max_frequency = 20000,
pps = 100,
bpo = 48):
# Change paths to absolute paths to avoid errors with ARSS.
input_path = pth.abspath(input_path)
output_path = pth.abspath(output_path)
temp = False
if not input_path.endswith(".wav"):
# Assume we need to convert the file.
if not input_path.endswith(".mp3"):
# Validate input_path input.
# Raise an error if the user inputted an invalid file type.
if "." in input_path:
raise ValueError("The input_path must be a WAV file or an MP3 file.")
# Raise an error if the user inputted a directory.
elif input_path[-1] in ["\\", "/"]:
raise ValueError("The input_path must be a file path, not a directory.")
# Raise generic error.
else:
raise ValueError("The input_path must contain a path to an MP3 or WAV file.")
# Generate temporary WAV file from MP3.
new = AudioSegment.from_mp3(input_path)
new.export("temp.wav", format="wav")
temp = True
# Validate output_path input.
if not output_path.endswith(".bmp"):
# Raise an error if the user inputted an invalid file type.
if "." in output_path:
raise ValueError("The output_path must be a BMP file.")
# Raise an error if the user inputted a directory.
elif output_path[-1] in ["\\", "/"]:
raise ValueError("The output_path must be a file path, not a directory.")
# Raise generic error.
else:
raise ValueError("The output_path must contain a path to the new BMP file.")
# Run the main ARSS executable.
result = run([
cmd, "-q",
pth.abspath("temp.wav") if temp \ # Input file
else input_path,
output_path, # Output file
"--analysis", # Type
"--min-freq", str(min_frequency), # Minimum frequency
"--max-freq", str(max_frequency), # Maximum frequency
"--pps", str(pps), # Time resolution (pixels per second)
"--bpo", str(bpo) # Frequency resolution (bands per octave)
], stderr=PIPE, universal_newlines=True)
# Remove temporary WAV file.
if temp:
remove("temp.wav")
# Check and raise ARSS errors.
if result.returncode != 0:
raise RuntimeError(result.stderr)
# Decodes image file `input_path` into audio file `output_path` using ARSS.
def Decode(input_path : str, output_path : str,
min_frequency = 27,
max_frequency = 20000,
sample_rate = 44100,
sine = True,
pps = 100,
bpo = 48):
# Change paths to absolute paths to avoid errors with ARSS.
input_path = pth.abspath(input_path)
output_path = pth.abspath(output_path)
# Validate input_path input.
if not input_path.endswith(".bmp"):
# Raise an error if the user inputted an invalid file type.
if "." in input_path:
raise ValueError("The input_path must be a BMP file.")
# Raise an error if the user inputted a directory.
elif input_path[-1] in ["\\", "/"]:
raise ValueError("The input_path must be a file path, not a directory.")
# Raise generic error.
else:
raise ValueError("The input_path must contain a path to the BMP file.")
# Validate output_path input.
if not output_path.endswith(".wav") and not output_path.endswith(".mp3"):
# Raise an error if the user inputted an invalid file type.
if "." in output_path:
raise ValueError("The output_path must be a WAV file or an MP3 file.")
# Raise an error if the user inputted a directory.
elif output_path[-1] in ["\\", "/"]:
raise ValueError("The output_path must be a file path, not a directory.")
# Raise generic error.
else:
raise ValueError("The output_path must contain a path to an MP3 or WAV file.")
# Should pyARSS create a temporary WAV file?
# ARSS only supports waveform files.
if not output_path.endswith(".wav"):
temp = True
else:
temp = False
# Run the main ARSS executable.
result = run([
cmd, "-q",
input_path, # Input file
pth.abspath("temp.wav") if temp \
else output_path, # Output file
"--sine" if sine else "--noise", # Type
"--sample-rate", str(sample_rate), # Sample rate
"--min-freq", str(min_frequency), # Minimum frequency
# "--max-freq", str(max_frequency), # Maximum frequency -- TODO: ARSS: "You have set one parameter too many"
"--pps", str(pps), # Time resolution (pixels per second)
"--bpo", str(bpo) # Frequency resolution (bands per octave)
], stderr=PIPE, universal_newlines=True)
# Raise error if ARSS failed.
if result.returncode != 0:
try:
# Attempt to remove the temporary WAV file if it was generated.
remove("temp.wav")
except:
pass
raise RuntimeError(result.stderr)
# Convert the file if required.
if temp:
# Load WAV and convert MP3.
new = AudioSegment.from_wav("temp.wav")
new.export(output_path, format="mp3")
# Remove temporary WAV file.
remove("temp.wav")
|
py | 1a4d0798c883892cced5903b03df1b7831dc9f67 | from pyspark.sql.types import *
from optimus import Optimus
from optimus.helpers.json import json_enconding
from optimus.helpers.functions import deep_sort
import unittest
from pyspark.ml.linalg import Vectors, VectorUDT, DenseVector
import numpy as np
nan = np.nan
import datetime
from pyspark.sql import functions as F
op = Optimus(master='local')
source_df=op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
class Test_df_cols(unittest.TestCase):
maxDiff = None
@staticmethod
def test_cols_abs():
actual_df =source_df.cols.abs('height(ft)')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", 28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_abs_all_columns():
actual_df =source_df.cols.abs('*')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", 28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_add():
actual_df =source_df.cols.add(['height(ft)','rank'])
expected_df = op.create.df([('names', StringType(), True),('height(ft)', FloatType(), True),('function', StringType(), True),('rank', FloatType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('sum', FloatType(), True)], [("Optim'us", -28.0, 'Leader', 10.0, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, -18.0), ('bumbl#ebéé ', 17.0, 'Espionage', 7.0, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 24.0), ('ironhide&', 26.0, 'Security', 7.0, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 33.0), ('Jazz', 13.0, 'First Lieutenant', 8.0, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 21.0), ('Megatron', None, 'None', 10.0, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, None), ('Metroplex_)^$', 300.0, 'Battle Station', 8.0, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, 308.0), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_add_all_columns():
actual_df =source_df.cols.add('*')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', FloatType(), True),('function', StringType(), True),('rank', FloatType(), True),('age', FloatType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('sum', FloatType(), True)], [("Optim'us", -28.0, 'Leader', 10.0, 5000000.0, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, 4999986.5), ('bumbl#ebéé ', 17.0, 'Espionage', 7.0, 5000000.0, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 5000026.0), ('ironhide&', 26.0, 'Security', 7.0, 5000000.0, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 5000037.0), ('Jazz', 13.0, 'First Lieutenant', 8.0, 5000000.0, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 5000023.0), ('Megatron', None, 'None', 10.0, 5000000.0, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, None), ('Metroplex_)^$', 300.0, 'Battle Station', 8.0, 5000000.0, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_append_number():
actual_df =source_df.cols.append('new col',1)
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('new col', IntegerType(), False)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, 1), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 1), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 1), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 1), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, 1), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, 1), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, 1)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_cast():
actual_df =source_df.cols.cast('function','string')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_cast_all_columns():
actual_df =source_df.cols.cast('*','string')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', StringType(), True),('function', StringType(), True),('rank', StringType(), True),('age', StringType(), True),('weight(t)', StringType(), True),('japanese name', StringType(), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', StringType(), True),('Date Type', StringType(), True),('timestamp', StringType(), True),('Cybertronian', StringType(), True),('function(binary)', StringType(), True),('NullType', StringType(), True)], [("Optim'us", '-28', 'Leader', '10', '5000000', '4.3', '[Inochi, Convoy]', '19.442735,-99.201111', '1980/04/10', '2016/09/10', '[8.5344, 4300.0]', '2016-09-10', '2014-06-24 00:00:00', 'true', 'Leader', None), ('bumbl#ebéé ', '17', 'Espionage', '7', '5000000', '2.0', '[Bumble, Goldback]', '10.642707,-71.612534', '1980/04/10', '2015/08/10', '[5.334, 2000.0]', '2015-08-10', '2014-06-24 00:00:00', 'true', 'Espionage', None), ('ironhide&', '26', 'Security', '7', '5000000', '4.0', '[Roadbuster]', '37.789563,-122.400356', '1980/04/10', '2014/07/10', '[7.9248, 4000.0]', '2014-06-24', '2014-06-24 00:00:00', 'true', 'Security', None), ('Jazz', '13', 'First Lieutenant', '8', '5000000', '1.8', '[Meister]', '33.670666,-117.841553', '1980/04/10', '2013/06/10', '[3.9624, 1800.0]', '2013-06-24', '2014-06-24 00:00:00', 'true', 'First Lieutenant', None), ('Megatron', None, 'None', '10', '5000000', '5.7', '[Megatron]', None, '1980/04/10', '2012/05/10', '[, 5700.0]', '2012-05-10', '2014-06-24 00:00:00', 'true', 'None', None), ('Metroplex_)^$', '300', 'Battle Station', '8', '5000000', None, '[Metroflex]', None, '1980/04/10', '2011/04/10', '[91.44,]', '2011-04-10', '2014-06-24 00:00:00', 'true', 'Battle Station', None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_clip():
actual_df =source_df.cols.clip('rank',3,5)
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', IntegerType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 5, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 5, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 5, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 5, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 5, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 5, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_clip_all_columns():
actual_df =source_df.cols.clip('*',3,5)
expected_df = op.create.df([('names', StringType(), True),('height(ft)', IntegerType(), True),('function', StringType(), True),('rank', IntegerType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", 3, 'Leader', 5, 5, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 5, 'Espionage', 5, 5, 3.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 5, 'Security', 5, 5, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 5, 'First Lieutenant', 5, 5, 3.0, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 5, 5, 5.0, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 5, 'Battle Station', 5, 5, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_count():
actual_df =source_df.cols.count()
actual_df =json_enconding(actual_df)
expected_value =json_enconding(16)
assert (expected_value == actual_df)
def test_cols_count_by_dtypes(self):
source_df=op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
actual_df =source_df.cols.count_by_dtypes('*',infer=False)
expected_value ={'names': {'null': 1, 'missing': 0, 'string': 6}, 'height(ft)': {'null': 2, 'missing': 0, 'int': 5}, 'function': {'null': 1, 'missing': 0, 'string': 6}, 'rank': {'null': 1, 'missing': 0, 'int': 6}, 'age': {'null': 1, 'missing': 0, 'int': 6}, 'weight(t)': {'null': 2, 'missing': 0, 'decimal': 5}, 'japanese name': {'null': 1, 'missing': 0, 'array': 6}, 'last position seen': {'null': 3, 'missing': 0, 'string': 4}, 'date arrival': {'null': 1, 'missing': 0, 'string': 6}, 'last date seen': {'null': 1, 'missing': 0, 'string': 6}, 'attributes': {'null': 1, 'missing': 0, 'array': 6}, 'Date Type': {'null': 1, 'missing': 0, 'date': 6}, 'timestamp': {'null': 1, 'missing': 0, 'date': 6}, 'Cybertronian': {'null': 1, 'missing': 0, 'boolean': 6}, 'function(binary)': {'null': 1, 'missing': 0, 'binary': 6}, 'NullType': {'null': 7, 'missing': 0}}
self.assertDictEqual(deep_sort(expected_value), deep_sort(actual_df))
def test_cols_count_by_dtypes_infer(self):
source_df=op.create.df([('col 1', StringType(), True),('col 2', StringType(), True),('col 3', IntegerType(), True)], [('male', 'male', 1), ('optimus', 'bumblebee', 1), ('3', '4.1', 1), ('true', 'False', 1), ('[1,2,3,4]', '(1,2,3,4)', 1), ('{1,2,3,4}', "{'key1' :1 , 'key2':2}", 1), ('1.1.1.1', '123.123.123.123', 1), ('http://hi-optimuse.com', 'https://hi-bumblebee.com', 1), ('[email protected]', '[email protected]', 1), ('5123456789123456', '373655783158306', 1), ('11529', '30345', 1), ('04/10/1980', '04/10/1980', 1), ('null', 'Null', 1), ('', '', 1), (None, None, 1)])
actual_df =source_df.cols.count_by_dtypes('*',infer=True)
expected_value ={'col 1': {'gender': 1, 'string': 1, 'int': 1, 'boolean': 1, 'array': 1, 'object': 1, 'ip': 1, 'url': 1, 'email': 1, 'credit_card_number': 1, 'zip_code': 1, 'date': 1, 'null': 2, 'missing': 1, 'decimal': 0}, 'col 2': {'gender': 1, 'string': 1, 'decimal': 1, 'boolean': 1, 'array': 1, 'object': 1, 'ip': 1, 'url': 1, 'email': 1, 'credit_card_number': 1, 'zip_code': 1, 'date': 1, 'null': 2, 'missing': 1, 'int': 0}, 'col 3': {'int': 15}}
self.assertDictEqual(deep_sort(expected_value), deep_sort(actual_df))
def test_cols_count_by_dtypes_mismatch(self):
source_df=op.create.df([('names', StringType(), True),('height(ft)', IntegerType(), True),('function', StringType(), True),('rank', IntegerType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('DateType', DateType(), True),('Timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), False),('NullType', NullType(), True)], [('31/12/2019', 28, '1978-12-20', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), ('1', 2, '3', 4, 5, 6.0, ['7'], '8', '1980/04/10', '2011/04/10', [11.0], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'15'), None)])
actual_df =source_df.cols.count_by_dtypes('*',infer=False,mismatch={'names': 'dd/mm/yyyy', 'height(ft)': '^([0-2][0-9]|(3)[0-1])(\\/)(((0)[0-9])|((1)[0-2]))(\\/)\\d{4}$', 'function': 'yyyy-mm-dd'})
expected_value ={'names': {'null': 0, 'missing': 0, 'string': 7, 'mismatch': 6}, 'height(ft)': {'null': 1, 'missing': 0, 'int': 6, 'mismatch': 0}, 'function': {'null': 0, 'missing': 0, 'string': 7, 'mismatch': 6}, 'rank': {'null': 0, 'missing': 0, 'int': 7, 'mismatch': 0}, 'age': {'null': 0, 'missing': 0, 'int': 7, 'mismatch': 0}, 'weight(t)': {'null': 1, 'missing': 0, 'decimal': 6, 'mismatch': 0}, 'japanese name': {'null': 0, 'missing': 0, 'array': 7, 'mismatch': 0}, 'last position seen': {'null': 2, 'missing': 0, 'string': 5, 'mismatch': 0}, 'date arrival': {'null': 0, 'missing': 0, 'string': 7, 'mismatch': 0}, 'last date seen': {'null': 0, 'missing': 0, 'string': 7, 'mismatch': 0}, 'attributes': {'null': 0, 'missing': 0, 'array': 7, 'mismatch': 0}, 'DateType': {'null': 0, 'missing': 0, 'date': 7, 'mismatch': 0}, 'Timestamp': {'null': 0, 'missing': 0, 'date': 7, 'mismatch': 0}, 'Cybertronian': {'null': 0, 'missing': 0, 'boolean': 7, 'mismatch': 0}, 'function(binary)': {'null': 0, 'missing': 0, 'binary': 7, 'mismatch': 0}, 'NullType': {'null': 7, 'missing': 0, 'mismatch': 0}}
self.assertDictEqual(deep_sort(expected_value), deep_sort(actual_df))
@staticmethod
def test_cols_count_na():
actual_df =source_df.cols.count_na('height(ft)')
actual_df =json_enconding(actual_df)
expected_value =json_enconding(2)
assert (expected_value == actual_df)
@staticmethod
def test_cols_count_na_all_columns():
actual_df =source_df.cols.count_na('*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'names': 1, 'height(ft)': 2, 'function': 1, 'rank': 1, 'age': 1, 'weight(t)': 2, 'japanese name': 1, 'last position seen': 3, 'date arrival': 1, 'last date seen': 1, 'attributes': 1, 'Date Type': 1, 'timestamp': 1, 'Cybertronian': 1, 'function(binary)': 1, 'NullType': 7})
assert (expected_value == actual_df)
@staticmethod
def test_cols_count_uniques():
actual_df =source_df.cols.count_uniques('height(ft)')
actual_df =json_enconding(actual_df)
expected_value =json_enconding(5)
assert (expected_value == actual_df)
@staticmethod
def test_cols_count_uniques_all_columns():
actual_df =source_df.cols.count_uniques('*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'names': 5, 'height(ft)': 5, 'function': 6, 'rank': 3, 'age': 1, 'weight(t)': 5, 'japanese name': 6, 'last position seen': 4, 'date arrival': 1, 'last date seen': 6, 'attributes': 6, 'Date Type': 6, 'timestamp': 1, 'Cybertronian': 1, 'function(binary)': 6, 'NullType': 0})
assert (expected_value == actual_df)
@staticmethod
def test_cols_count_zeros():
actual_df =source_df.cols.count_zeros('height(ft)')
actual_df =json_enconding(actual_df)
expected_value =json_enconding(0)
assert (expected_value == actual_df)
@staticmethod
def test_cols_count_zeros_all_columns():
actual_df =source_df.cols.count_zeros('*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'names': 0, 'height(ft)': 0, 'function': 0, 'rank': 0, 'age': 0, 'weight(t)': 0, 'last position seen': 0, 'date arrival': 0, 'last date seen': 0})
assert (expected_value == actual_df)
@staticmethod
def test_cols_date_transform():
actual_df =source_df.cols.date_transform('date arrival','yyyy/MM/dd','dd-MM-YYYY')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '10-04-1980', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '10-04-1980', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '10-04-1980', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '10-04-1980', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '10-04-1980', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '10-04-1980', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_date_transform_all_columns():
actual_df =source_df.cols.date_transform(['date arrival','last date seen'],'yyyy/MM/dd','dd-MM-YYYY')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '10-04-1980', '10-09-2016', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '10-04-1980', '10-08-2015', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '10-04-1980', '10-07-2014', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '10-04-1980', '10-06-2013', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '10-04-1980', '10-05-2012', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '10-04-1980', '10-04-2011', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_div():
actual_df =source_df.cols.div(['height(ft)','rank'])
expected_df = op.create.df([('names', StringType(), True),('height(ft)', FloatType(), True),('function', StringType(), True),('rank', FloatType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('div', DoubleType(), True)], [("Optim'us", -28.0, 'Leader', 10.0, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, -2.8), ('bumbl#ebéé ', 17.0, 'Espionage', 7.0, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 2.4285714285714284), ('ironhide&', 26.0, 'Security', 7.0, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 3.7142857142857144), ('Jazz', 13.0, 'First Lieutenant', 8.0, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 1.625), ('Megatron', None, 'None', 10.0, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, None), ('Metroplex_)^$', 300.0, 'Battle Station', 8.0, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, 37.5), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_div_all_columns():
actual_df =source_df.cols.div('*')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', FloatType(), True),('function', StringType(), True),('rank', FloatType(), True),('age', FloatType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('div', DoubleType(), True)], [("Optim'us", -28.0, 'Leader', 10.0, 5000000.0, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, -1.302325523628167e-07), ('bumbl#ebéé ', 17.0, 'Espionage', 7.0, 5000000.0, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 2.428571428571428e-07), ('ironhide&', 26.0, 'Security', 7.0, 5000000.0, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 1.8571428571428572e-07), ('Jazz', 13.0, 'First Lieutenant', 8.0, 5000000.0, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 1.8055556033864447e-07), ('Megatron', None, 'None', 10.0, 5000000.0, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, None), ('Metroplex_)^$', 300.0, 'Battle Station', 8.0, 5000000.0, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_drop():
actual_df =source_df.cols.drop('rank')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_dtypes():
actual_df =source_df.cols.dtypes('rank')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'rank': 'tinyint'})
assert (expected_value == actual_df)
@staticmethod
def test_cols_dtypes_all_columns():
actual_df =source_df.cols.dtypes('*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'names': 'string', 'height(ft)': 'smallint', 'function': 'string', 'rank': 'tinyint', 'age': 'int', 'weight(t)': 'float', 'japanese name': 'array<string>', 'last position seen': 'string', 'date arrival': 'string', 'last date seen': 'string', 'attributes': 'array<float>', 'Date Type': 'date', 'timestamp': 'timestamp', 'Cybertronian': 'boolean', 'function(binary)': 'binary', 'NullType': 'null'})
assert (expected_value == actual_df)
@staticmethod
def test_cols_fill_na():
actual_df =source_df.cols.fill_na('height(ft)','1')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', DoubleType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28.0, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17.0, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26.0, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13.0, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', 1.0, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300.0, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, 1.0, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_fill_na_all_columns():
actual_df =source_df.cols.fill_na(['names','height(ft)','function','rank','age'],'2')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', DoubleType(), True),('function', StringType(), True),('rank', DoubleType(), True),('age', DoubleType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28.0, 'Leader', 10.0, 5000000.0, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17.0, 'Espionage', 7.0, 5000000.0, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26.0, 'Security', 7.0, 5000000.0, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13.0, 'First Lieutenant', 8.0, 5000000.0, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', 2.0, 'None', 10.0, 5000000.0, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300.0, 'Battle Station', 8.0, 5000000.0, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), ('2', 2.0, '2', 2.0, 2.0, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_fill_na_array():
actual_df =source_df.cols.fill_na('japanese name',['1','2'])
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),False), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, ['1', '2'], None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_fill_na_bool():
actual_df =source_df.cols.fill_na('Cybertronian',False)
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, False, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_hist():
actual_df =source_df.cols.hist(['height(ft)','rank'],4)
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'height(ft)': {'hist': [{'count': 4.0, 'lower': -28.0, 'upper': 54.0}, {'count': 0.0, 'lower': 54.0, 'upper': 136.0}, {'count': 0.0, 'lower': 136.0, 'upper': 218.0}, {'count': 0.0, 'lower': 218.0, 'upper': 300.0}]}, 'rank': {'hist': [{'count': 2.0, 'lower': 7.0, 'upper': 7.75}, {'count': 2.0, 'lower': 7.75, 'upper': 8.5}, {'count': 0.0, 'lower': 8.5, 'upper': 9.25}, {'count': 0.0, 'lower': 9.25, 'upper': 10.0}]}})
assert (expected_value == actual_df)
@staticmethod
def test_cols_hist_all_columns():
actual_df =source_df.cols.hist('Date Type',4)
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'Date Type': {'hist': {'hours': [{'count': 6.0, 'lower': 0.0, 'upper': 1.0}, {'count': 0.0, 'lower': 1.0, 'upper': 2.0}, {'count': 0.0, 'lower': 2.0, 'upper': 3.0}, {'count': 0.0, 'lower': 3.0, 'upper': 4.0}, {'count': 0.0, 'lower': 4.0, 'upper': 5.0}, {'count': 0.0, 'lower': 5.0, 'upper': 6.0}, {'count': 0.0, 'lower': 6.0, 'upper': 7.0}, {'count': 0.0, 'lower': 7.0, 'upper': 8.0}, {'count': 0.0, 'lower': 8.0, 'upper': 9.0}, {'count': 0.0, 'lower': 9.0, 'upper': 10.0}, {'count': 0.0, 'lower': 10.0, 'upper': 11.0}, {'count': 0.0, 'lower': 11.0, 'upper': 12.0}, {'count': 0.0, 'lower': 12.0, 'upper': 13.0}, {'count': 0.0, 'lower': 13.0, 'upper': 14.0}, {'count': 0.0, 'lower': 14.0, 'upper': 15.0}, {'count': 0.0, 'lower': 15.0, 'upper': 16.0}, {'count': 0.0, 'lower': 16.0, 'upper': 17.0}, {'count': 0.0, 'lower': 17.0, 'upper': 18.0}, {'count': 0.0, 'lower': 18.0, 'upper': 19.0}, {'count': 0.0, 'lower': 19.0, 'upper': 20.0}, {'count': 0.0, 'lower': 20.0, 'upper': 21.0}, {'count': 0.0, 'lower': 21.0, 'upper': 22.0}, {'count': 0.0, 'lower': 22.0, 'upper': 23.0}], 'seconds': [{'count': 6.0, 'lower': 0.0, 'upper': 1.0}, {'count': 0.0, 'lower': 1.0, 'upper': 2.0}, {'count': 0.0, 'lower': 2.0, 'upper': 3.0}, {'count': 0.0, 'lower': 3.0, 'upper': 4.0}, {'count': 0.0, 'lower': 4.0, 'upper': 5.0}, {'count': 0.0, 'lower': 5.0, 'upper': 6.0}, {'count': 0.0, 'lower': 6.0, 'upper': 7.0}, {'count': 0.0, 'lower': 7.0, 'upper': 8.0}, {'count': 0.0, 'lower': 8.0, 'upper': 9.0}, {'count': 0.0, 'lower': 9.0, 'upper': 10.0}, {'count': 0.0, 'lower': 10.0, 'upper': 11.0}, {'count': 0.0, 'lower': 11.0, 'upper': 12.0}, {'count': 0.0, 'lower': 12.0, 'upper': 13.0}, {'count': 0.0, 'lower': 13.0, 'upper': 14.0}, {'count': 0.0, 'lower': 14.0, 'upper': 15.0}, {'count': 0.0, 'lower': 15.0, 'upper': 16.0}, {'count': 0.0, 'lower': 16.0, 'upper': 17.0}, {'count': 0.0, 'lower': 17.0, 'upper': 18.0}, {'count': 0.0, 'lower': 18.0, 'upper': 19.0}, {'count': 0.0, 'lower': 19.0, 'upper': 20.0}, {'count': 0.0, 'lower': 20.0, 'upper': 21.0}, {'count': 0.0, 'lower': 21.0, 'upper': 22.0}, {'count': 0.0, 'lower': 22.0, 'upper': 23.0}, {'count': 0.0, 'lower': 23.0, 'upper': 24.0}, {'count': 0.0, 'lower': 24.0, 'upper': 25.0}, {'count': 0.0, 'lower': 25.0, 'upper': 26.0}, {'count': 0.0, 'lower': 26.0, 'upper': 27.0}, {'count': 0.0, 'lower': 27.0, 'upper': 28.0}, {'count': 0.0, 'lower': 28.0, 'upper': 29.0}, {'count': 0.0, 'lower': 29.0, 'upper': 30.0}, {'count': 0.0, 'lower': 30.0, 'upper': 31.0}, {'count': 0.0, 'lower': 31.0, 'upper': 32.0}, {'count': 0.0, 'lower': 32.0, 'upper': 33.0}, {'count': 0.0, 'lower': 33.0, 'upper': 34.0}, {'count': 0.0, 'lower': 34.0, 'upper': 35.0}, {'count': 0.0, 'lower': 35.0, 'upper': 36.0}, {'count': 0.0, 'lower': 36.0, 'upper': 37.0}, {'count': 0.0, 'lower': 37.0, 'upper': 38.0}, {'count': 0.0, 'lower': 38.0, 'upper': 39.0}, {'count': 0.0, 'lower': 39.0, 'upper': 40.0}, {'count': 0.0, 'lower': 40.0, 'upper': 41.0}, {'count': 0.0, 'lower': 41.0, 'upper': 42.0}, {'count': 0.0, 'lower': 42.0, 'upper': 43.0}, {'count': 0.0, 'lower': 43.0, 'upper': 44.0}, {'count': 0.0, 'lower': 44.0, 'upper': 45.0}, {'count': 0.0, 'lower': 45.0, 'upper': 46.0}, {'count': 0.0, 'lower': 46.0, 'upper': 47.0}, {'count': 0.0, 'lower': 47.0, 'upper': 48.0}, {'count': 0.0, 'lower': 48.0, 'upper': 49.0}, {'count': 0.0, 'lower': 49.0, 'upper': 50.0}, {'count': 0.0, 'lower': 50.0, 'upper': 51.0}, {'count': 0.0, 'lower': 51.0, 'upper': 52.0}, {'count': 0.0, 'lower': 52.0, 'upper': 53.0}, {'count': 0.0, 'lower': 53.0, 'upper': 54.0}, {'count': 0.0, 'lower': 54.0, 'upper': 55.0}, {'count': 0.0, 'lower': 55.0, 'upper': 56.0}, {'count': 0.0, 'lower': 56.0, 'upper': 57.0}, {'count': 0.0, 'lower': 57.0, 'upper': 58.0}, {'count': 0.0, 'lower': 58.0, 'upper': 59.0}, {'count': 0.0, 'lower': 59.0, 'upper': 60.0}], 'months': [{'count': 0.0, 'lower': 1.0, 'upper': 2.0}, {'count': 0.0, 'lower': 2.0, 'upper': 3.0}, {'count': 0.0, 'lower': 3.0, 'upper': 4.0}, {'count': 1.0, 'lower': 4.0, 'upper': 5.0}, {'count': 1.0, 'lower': 5.0, 'upper': 6.0}, {'count': 2.0, 'lower': 6.0, 'upper': 7.0}, {'count': 0.0, 'lower': 7.0, 'upper': 8.0}, {'count': 1.0, 'lower': 8.0, 'upper': 9.0}, {'count': 1.0, 'lower': 9.0, 'upper': 10.0}, {'count': 0.0, 'lower': 10.0, 'upper': 11.0}, {'count': 0.0, 'lower': 11.0, 'upper': 12.0}], 'years': [{'count': 0.0, 'lower': 1950.0, 'upper': 1951.0}, {'count': 0.0, 'lower': 1951.0, 'upper': 1952.0}, {'count': 0.0, 'lower': 1952.0, 'upper': 1953.0}, {'count': 0.0, 'lower': 1953.0, 'upper': 1954.0}, {'count': 0.0, 'lower': 1954.0, 'upper': 1955.0}, {'count': 0.0, 'lower': 1955.0, 'upper': 1956.0}, {'count': 0.0, 'lower': 1956.0, 'upper': 1957.0}, {'count': 0.0, 'lower': 1957.0, 'upper': 1958.0}, {'count': 0.0, 'lower': 1958.0, 'upper': 1959.0}, {'count': 0.0, 'lower': 1959.0, 'upper': 1960.0}, {'count': 0.0, 'lower': 1960.0, 'upper': 1961.0}, {'count': 0.0, 'lower': 1961.0, 'upper': 1962.0}, {'count': 0.0, 'lower': 1962.0, 'upper': 1963.0}, {'count': 0.0, 'lower': 1963.0, 'upper': 1964.0}, {'count': 0.0, 'lower': 1964.0, 'upper': 1965.0}, {'count': 0.0, 'lower': 1965.0, 'upper': 1966.0}, {'count': 0.0, 'lower': 1966.0, 'upper': 1967.0}, {'count': 0.0, 'lower': 1967.0, 'upper': 1968.0}, {'count': 0.0, 'lower': 1968.0, 'upper': 1969.0}, {'count': 0.0, 'lower': 1969.0, 'upper': 1970.0}, {'count': 0.0, 'lower': 1970.0, 'upper': 1971.0}, {'count': 0.0, 'lower': 1971.0, 'upper': 1972.0}, {'count': 0.0, 'lower': 1972.0, 'upper': 1973.0}, {'count': 0.0, 'lower': 1973.0, 'upper': 1974.0}, {'count': 0.0, 'lower': 1974.0, 'upper': 1975.0}, {'count': 0.0, 'lower': 1975.0, 'upper': 1976.0}, {'count': 0.0, 'lower': 1976.0, 'upper': 1977.0}, {'count': 0.0, 'lower': 1977.0, 'upper': 1978.0}, {'count': 0.0, 'lower': 1978.0, 'upper': 1979.0}, {'count': 0.0, 'lower': 1979.0, 'upper': 1980.0}, {'count': 0.0, 'lower': 1980.0, 'upper': 1981.0}, {'count': 0.0, 'lower': 1981.0, 'upper': 1982.0}, {'count': 0.0, 'lower': 1982.0, 'upper': 1983.0}, {'count': 0.0, 'lower': 1983.0, 'upper': 1984.0}, {'count': 0.0, 'lower': 1984.0, 'upper': 1985.0}, {'count': 0.0, 'lower': 1985.0, 'upper': 1986.0}, {'count': 0.0, 'lower': 1986.0, 'upper': 1987.0}, {'count': 0.0, 'lower': 1987.0, 'upper': 1988.0}, {'count': 0.0, 'lower': 1988.0, 'upper': 1989.0}, {'count': 0.0, 'lower': 1989.0, 'upper': 1990.0}, {'count': 0.0, 'lower': 1990.0, 'upper': 1991.0}, {'count': 0.0, 'lower': 1991.0, 'upper': 1992.0}, {'count': 0.0, 'lower': 1992.0, 'upper': 1993.0}, {'count': 0.0, 'lower': 1993.0, 'upper': 1994.0}, {'count': 0.0, 'lower': 1994.0, 'upper': 1995.0}, {'count': 0.0, 'lower': 1995.0, 'upper': 1996.0}, {'count': 0.0, 'lower': 1996.0, 'upper': 1997.0}, {'count': 0.0, 'lower': 1997.0, 'upper': 1998.0}, {'count': 0.0, 'lower': 1998.0, 'upper': 1999.0}, {'count': 0.0, 'lower': 1999.0, 'upper': 2000.0}, {'count': 0.0, 'lower': 2000.0, 'upper': 2001.0}, {'count': 0.0, 'lower': 2001.0, 'upper': 2002.0}, {'count': 0.0, 'lower': 2002.0, 'upper': 2003.0}, {'count': 0.0, 'lower': 2003.0, 'upper': 2004.0}, {'count': 0.0, 'lower': 2004.0, 'upper': 2005.0}, {'count': 0.0, 'lower': 2005.0, 'upper': 2006.0}, {'count': 0.0, 'lower': 2006.0, 'upper': 2007.0}, {'count': 0.0, 'lower': 2007.0, 'upper': 2008.0}, {'count': 0.0, 'lower': 2008.0, 'upper': 2009.0}, {'count': 0.0, 'lower': 2009.0, 'upper': 2010.0}, {'count': 0.0, 'lower': 2010.0, 'upper': 2011.0}, {'count': 1.0, 'lower': 2011.0, 'upper': 2012.0}, {'count': 1.0, 'lower': 2012.0, 'upper': 2013.0}, {'count': 1.0, 'lower': 2013.0, 'upper': 2014.0}, {'count': 1.0, 'lower': 2014.0, 'upper': 2015.0}, {'count': 1.0, 'lower': 2015.0, 'upper': 2016.0}, {'count': 1.0, 'lower': 2016.0, 'upper': 2017.0}, {'count': 0.0, 'lower': 2017.0, 'upper': 2018.0}, {'count': 0.0, 'lower': 2018.0, 'upper': 2019.0}], 'weekdays': [{'count': 1.0, 'lower': 1.0, 'upper': 1.97}, {'count': 2.0, 'lower': 1.97, 'upper': 2.94}, {'count': 1.0, 'lower': 2.94, 'upper': 3.9}, {'count': 0.0, 'lower': 3.9, 'upper': 4.87}, {'count': 1.0, 'lower': 4.87, 'upper': 5.84}, {'count': 0.0, 'lower': 5.84, 'upper': 6.81}, {'count': 1.0, 'lower': 6.81, 'upper': 7.77}, {'count': 0.0, 'lower': 7.77, 'upper': 8.74}, {'count': 0.0, 'lower': 8.74, 'upper': 9.71}, {'count': 0.0, 'lower': 9.71, 'upper': 10.68}, {'count': 0.0, 'lower': 10.68, 'upper': 11.65}, {'count': 0.0, 'lower': 11.65, 'upper': 12.61}, {'count': 0.0, 'lower': 12.61, 'upper': 13.58}, {'count': 0.0, 'lower': 13.58, 'upper': 14.55}, {'count': 0.0, 'lower': 14.55, 'upper': 15.52}, {'count': 0.0, 'lower': 15.52, 'upper': 16.48}, {'count': 0.0, 'lower': 16.48, 'upper': 17.45}, {'count': 0.0, 'lower': 17.45, 'upper': 18.42}, {'count': 0.0, 'lower': 18.42, 'upper': 19.39}, {'count': 0.0, 'lower': 19.39, 'upper': 20.35}, {'count': 0.0, 'lower': 20.35, 'upper': 21.32}, {'count': 0.0, 'lower': 21.32, 'upper': 22.29}, {'count': 0.0, 'lower': 22.29, 'upper': 23.26}, {'count': 0.0, 'lower': 23.26, 'upper': 24.23}, {'count': 0.0, 'lower': 24.23, 'upper': 25.19}, {'count': 0.0, 'lower': 25.19, 'upper': 26.16}, {'count': 0.0, 'lower': 26.16, 'upper': 27.13}, {'count': 0.0, 'lower': 27.13, 'upper': 28.1}, {'count': 0.0, 'lower': 28.1, 'upper': 29.06}, {'count': 0.0, 'lower': 29.06, 'upper': 30.03}, {'count': 0.0, 'lower': 30.03, 'upper': 31.0}], 'minutes': [{'count': 6.0, 'lower': 0.0, 'upper': 1.0}, {'count': 0.0, 'lower': 1.0, 'upper': 2.0}, {'count': 0.0, 'lower': 2.0, 'upper': 3.0}, {'count': 0.0, 'lower': 3.0, 'upper': 4.0}, {'count': 0.0, 'lower': 4.0, 'upper': 5.0}, {'count': 0.0, 'lower': 5.0, 'upper': 6.0}, {'count': 0.0, 'lower': 6.0, 'upper': 7.0}, {'count': 0.0, 'lower': 7.0, 'upper': 8.0}, {'count': 0.0, 'lower': 8.0, 'upper': 9.0}, {'count': 0.0, 'lower': 9.0, 'upper': 10.0}, {'count': 0.0, 'lower': 10.0, 'upper': 11.0}, {'count': 0.0, 'lower': 11.0, 'upper': 12.0}, {'count': 0.0, 'lower': 12.0, 'upper': 13.0}, {'count': 0.0, 'lower': 13.0, 'upper': 14.0}, {'count': 0.0, 'lower': 14.0, 'upper': 15.0}, {'count': 0.0, 'lower': 15.0, 'upper': 16.0}, {'count': 0.0, 'lower': 16.0, 'upper': 17.0}, {'count': 0.0, 'lower': 17.0, 'upper': 18.0}, {'count': 0.0, 'lower': 18.0, 'upper': 19.0}, {'count': 0.0, 'lower': 19.0, 'upper': 20.0}, {'count': 0.0, 'lower': 20.0, 'upper': 21.0}, {'count': 0.0, 'lower': 21.0, 'upper': 22.0}, {'count': 0.0, 'lower': 22.0, 'upper': 23.0}, {'count': 0.0, 'lower': 23.0, 'upper': 24.0}, {'count': 0.0, 'lower': 24.0, 'upper': 25.0}, {'count': 0.0, 'lower': 25.0, 'upper': 26.0}, {'count': 0.0, 'lower': 26.0, 'upper': 27.0}, {'count': 0.0, 'lower': 27.0, 'upper': 28.0}, {'count': 0.0, 'lower': 28.0, 'upper': 29.0}, {'count': 0.0, 'lower': 29.0, 'upper': 30.0}, {'count': 0.0, 'lower': 30.0, 'upper': 31.0}, {'count': 0.0, 'lower': 31.0, 'upper': 32.0}, {'count': 0.0, 'lower': 32.0, 'upper': 33.0}, {'count': 0.0, 'lower': 33.0, 'upper': 34.0}, {'count': 0.0, 'lower': 34.0, 'upper': 35.0}, {'count': 0.0, 'lower': 35.0, 'upper': 36.0}, {'count': 0.0, 'lower': 36.0, 'upper': 37.0}, {'count': 0.0, 'lower': 37.0, 'upper': 38.0}, {'count': 0.0, 'lower': 38.0, 'upper': 39.0}, {'count': 0.0, 'lower': 39.0, 'upper': 40.0}, {'count': 0.0, 'lower': 40.0, 'upper': 41.0}, {'count': 0.0, 'lower': 41.0, 'upper': 42.0}, {'count': 0.0, 'lower': 42.0, 'upper': 43.0}, {'count': 0.0, 'lower': 43.0, 'upper': 44.0}, {'count': 0.0, 'lower': 44.0, 'upper': 45.0}, {'count': 0.0, 'lower': 45.0, 'upper': 46.0}, {'count': 0.0, 'lower': 46.0, 'upper': 47.0}, {'count': 0.0, 'lower': 47.0, 'upper': 48.0}, {'count': 0.0, 'lower': 48.0, 'upper': 49.0}, {'count': 0.0, 'lower': 49.0, 'upper': 50.0}, {'count': 0.0, 'lower': 50.0, 'upper': 51.0}, {'count': 0.0, 'lower': 51.0, 'upper': 52.0}, {'count': 0.0, 'lower': 52.0, 'upper': 53.0}, {'count': 0.0, 'lower': 53.0, 'upper': 54.0}, {'count': 0.0, 'lower': 54.0, 'upper': 55.0}, {'count': 0.0, 'lower': 55.0, 'upper': 56.0}, {'count': 0.0, 'lower': 56.0, 'upper': 57.0}, {'count': 0.0, 'lower': 57.0, 'upper': 58.0}, {'count': 0.0, 'lower': 58.0, 'upper': 59.0}, {'count': 0.0, 'lower': 59.0, 'upper': 60.0}]}}})
assert (expected_value == actual_df)
@staticmethod
def test_cols_impute():
actual_df =source_df.cols.impute('rank')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', FloatType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 10.0, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7.0, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7.0, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8.0, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10.0, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8.0, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, 8.333333015441895, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_impute_all_columns():
actual_df =source_df.cols.impute('names','categorical')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), ('None', None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_iqr():
actual_df =source_df.cols.iqr('height(ft)')
actual_df =json_enconding(actual_df)
expected_value =json_enconding(13)
assert (expected_value == actual_df)
@staticmethod
def test_cols_iqr_all_columns():
actual_df =source_df.cols.iqr('*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'height(ft)': 13, 'rank': 3, 'age': 0, 'weight(t)': 2.3000001907348633})
assert (expected_value == actual_df)
@staticmethod
def test_cols_is_na():
actual_df =source_df.cols.is_na('height(ft)')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', BooleanType(), False),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", False, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', False, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', False, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', False, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', True, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', False, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, True, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_is_na_all_columns():
actual_df =source_df.cols.is_na('*')
expected_df = op.create.df([('names', BooleanType(), False),('height(ft)', BooleanType(), False),('function', BooleanType(), False),('rank', BooleanType(), False),('age', BooleanType(), False),('weight(t)', BooleanType(), False),('japanese name', BooleanType(), False),('last position seen', BooleanType(), False),('date arrival', BooleanType(), False),('last date seen', BooleanType(), False),('attributes', BooleanType(), False),('Date Type', BooleanType(), False),('timestamp', BooleanType(), False),('Cybertronian', BooleanType(), False),('function(binary)', BooleanType(), False),('NullType', BooleanType(), False)], [(False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True), (False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True), (False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True), (False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True), (False, True, False, False, False, False, False, True, False, False, False, False, False, False, False, True), (False, False, False, False, False, True, False, True, False, False, False, False, False, False, False, True), (True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_keep():
actual_df =source_df.cols.keep('rank')
expected_df = op.create.df([('rank', ByteType(), True)], [(10,), (7,), (7,), (8,), (10,), (8,), (None,)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_kurt():
actual_df =source_df.cols.kurt('height(ft)')
actual_df =json_enconding(actual_df)
expected_value =json_enconding(0.13863)
assert (expected_value == actual_df)
@staticmethod
def test_cols_kurt_all_columns():
actual_df =source_df.cols.kurt('*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'height(ft)': 0.13863, 'rank': -1.5, 'age': nan, 'weight(t)': -1.43641})
assert (expected_value == actual_df)
@staticmethod
def test_cols_lower():
actual_df =source_df.cols.lower('function')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'first lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'none', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'battle station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_lower_all_columns():
actual_df =source_df.cols.lower('*')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("optim'us", -28, 'leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('jazz', 13, 'first lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('megatron', None, 'none', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('metroplex_)^$', 300, 'battle station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_mad():
actual_df =source_df.cols.mad('height(ft)')
actual_df =json_enconding(actual_df)
expected_value =json_enconding(9)
assert (expected_value == actual_df)
@staticmethod
def test_cols_mad_all_columns():
actual_df =source_df.cols.mad('*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'height(ft)': 9, 'rank': 1, 'age': 0, 'weight(t)': 1.6999998092651367})
assert (expected_value == actual_df)
@staticmethod
def test_cols_max():
actual_df =source_df.cols.max('height(ft)')
actual_df =json_enconding(actual_df)
expected_value =json_enconding(300)
assert (expected_value == actual_df)
@staticmethod
def test_cols_max_all_columns():
actual_df =source_df.cols.max('*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'names': 'ironhide&', 'height(ft)': 300, 'function': 'Security', 'rank': 10, 'age': 5000000, 'weight(t)': 5.7, 'japanese name': ['Roadbuster'], 'last position seen': '37.789563,-122.400356', 'date arrival': '1980/04/10', 'last date seen': '2016/09/10', 'attributes': [91.44000244140625, None], 'Date Type': '2016-09-10', 'timestamp': '2014-06-24 00:00:00', 'Cybertronian': 1, 'function(binary)': None, 'NullType': None})
assert (expected_value == actual_df)
@staticmethod
def test_cols_mean():
actual_df =source_df.cols.mean('height(ft)')
actual_df =json_enconding(actual_df)
expected_value =json_enconding(65.6)
assert (expected_value == actual_df)
@staticmethod
def test_cols_mean_all_columns():
actual_df =source_df.cols.mean('*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'height(ft)': 65.6, 'rank': 8.33333, 'age': 5000000.0, 'weight(t)': 3.56})
assert (expected_value == actual_df)
@staticmethod
def test_cols_median():
actual_df =source_df.cols.median('height(ft)')
actual_df =json_enconding(actual_df)
expected_value =json_enconding(17)
assert (expected_value == actual_df)
@staticmethod
def test_cols_median_all_columns():
actual_df =source_df.cols.median('*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'height(ft)': 17, 'rank': 8, 'age': 5000000, 'weight(t)': 4.0})
assert (expected_value == actual_df)
@staticmethod
def test_cols_min():
actual_df =source_df.cols.min('height(ft)')
actual_df =json_enconding(actual_df)
expected_value =json_enconding(-28)
assert (expected_value == actual_df)
@staticmethod
def test_cols_min_all_columns():
actual_df =source_df.cols.min('*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'names': 'Jazz', 'height(ft)': -28, 'function': 'Battle Station', 'rank': 7, 'age': 5000000, 'weight(t)': 1.8, 'japanese name': ['Bumble', 'Goldback'], 'last position seen': '10.642707,-71.612534', 'date arrival': '1980/04/10', 'last date seen': '2011/04/10', 'attributes': [None, 5700.0], 'Date Type': '2011-04-10', 'timestamp': '2014-06-24 00:00:00', 'Cybertronian': 1, 'function(binary)': None, 'NullType': None})
assert (expected_value == actual_df)
@staticmethod
def test_cols_mode():
actual_df =source_df.cols.mode('height(ft)')
actual_df =json_enconding(actual_df)
expected_value =json_enconding(None)
assert (expected_value == actual_df)
@staticmethod
def test_cols_mode_all_columns():
actual_df =source_df.cols.mode('*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding([{'names': None}, {'height(ft)': None}, {'function': None}, {'rank': [8, 7, 10]}, {'age': 5000000}, {'weight(t)': None}, {'japanese name': None}, {'last position seen': None}, {'date arrival': '1980/04/10'}, {'last date seen': None}, {'attributes': None}, {'Date Type': None}, {'timestamp': datetime.datetime(2014, 6, 24, 0, 0)}, {'Cybertronian': True}, {'function(binary)': None}, {'NullType': None}])
assert (expected_value == actual_df)
@staticmethod
def test_cols_move_after():
actual_df =source_df.cols.move('rank','after','attributes')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('rank', ByteType(), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], 10, datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], 7, datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], 7, datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], 8, datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], 10, datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], 8, datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_move_before():
actual_df =source_df.cols.move('rank','before','attributes')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('rank', ByteType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', 10, [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', 7, [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', 7, [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', 8, [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', 10, [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', 8, [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_move_beginning():
actual_df =source_df.cols.move('rank','beginning')
expected_df = op.create.df([('rank', ByteType(), True),('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [(10, "Optim'us", -28, 'Leader', 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), (7, 'bumbl#ebéé ', 17, 'Espionage', 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), (7, 'ironhide&', 26, 'Security', 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), (8, 'Jazz', 13, 'First Lieutenant', 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), (10, 'Megatron', None, 'None', 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), (8, 'Metroplex_)^$', 300, 'Battle Station', 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_move_end():
actual_df =source_df.cols.move('rank','end')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('rank', ByteType(), True)], [("Optim'us", -28, 'Leader', 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, 10), ('bumbl#ebéé ', 17, 'Espionage', 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 7), ('ironhide&', 26, 'Security', 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 7), ('Jazz', 13, 'First Lieutenant', 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 8), ('Megatron', None, 'None', 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, 10), ('Metroplex_)^$', 300, 'Battle Station', 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, 8), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_mul():
actual_df =source_df.cols.mul(['height(ft)','rank'])
expected_df = op.create.df([('names', StringType(), True),('height(ft)', FloatType(), True),('function', StringType(), True),('rank', FloatType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('mul', FloatType(), True)], [("Optim'us", -28.0, 'Leader', 10.0, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, -280.0), ('bumbl#ebéé ', 17.0, 'Espionage', 7.0, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 119.0), ('ironhide&', 26.0, 'Security', 7.0, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 182.0), ('Jazz', 13.0, 'First Lieutenant', 8.0, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 104.0), ('Megatron', None, 'None', 10.0, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, None), ('Metroplex_)^$', 300.0, 'Battle Station', 8.0, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, 2400.0), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_mul_all_columns():
actual_df =source_df.cols.mul('*')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', FloatType(), True),('function', StringType(), True),('rank', FloatType(), True),('age', FloatType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('mul', FloatType(), True)], [("Optim'us", -28.0, 'Leader', 10.0, 5000000.0, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, -6020000256.0), ('bumbl#ebéé ', 17.0, 'Espionage', 7.0, 5000000.0, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 1190000000.0), ('ironhide&', 26.0, 'Security', 7.0, 5000000.0, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 3640000000.0), ('Jazz', 13.0, 'First Lieutenant', 8.0, 5000000.0, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 936000000.0), ('Megatron', None, 'None', 10.0, 5000000.0, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, None), ('Metroplex_)^$', 300.0, 'Battle Station', 8.0, 5000000.0, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_names():
actual_df =source_df.cols.names()
actual_df =json_enconding(actual_df)
expected_value =json_enconding(['names', 'height(ft)', 'function', 'rank', 'age', 'weight(t)', 'japanese name', 'last position seen', 'date arrival', 'last date seen', 'attributes', 'Date Type', 'timestamp', 'Cybertronian', 'function(binary)', 'NullType'])
assert (expected_value == actual_df)
@staticmethod
def test_cols_nest():
actual_df =source_df.cols.nest(['height(ft)','rank'],separator=' ',output_col='new col')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('new col', StringType(), False)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, '-28 10'), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, '17 7'), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, '26 7'), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, '13 8'), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, '10'), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, '300 8'), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, '')])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_nest_array():
actual_df =source_df.cols.nest(['height(ft)','rank','rank'],shape='array',output_col='new col')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', StringType(), True),('function', StringType(), True),('rank', StringType(), True),('age', StringType(), True),('weight(t)', StringType(), True),('japanese name', StringType(), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', StringType(), True),('Date Type', StringType(), True),('timestamp', StringType(), True),('Cybertronian', StringType(), True),('function(binary)', StringType(), True),('NullType', StringType(), True),('new col', ArrayType(StringType(),True), False)], [("Optim'us", '-28', 'Leader', '10', '5000000', '4.3', '[Inochi, Convoy]', '19.442735,-99.201111', '1980/04/10', '2016/09/10', '[8.5344, 4300.0]', '2016-09-10', '2014-06-24 00:00:00', 'true', 'Leader', None, ['-28', '10', '10']), ('bumbl#ebéé ', '17', 'Espionage', '7', '5000000', '2.0', '[Bumble, Goldback]', '10.642707,-71.612534', '1980/04/10', '2015/08/10', '[5.334, 2000.0]', '2015-08-10', '2014-06-24 00:00:00', 'true', 'Espionage', None, ['17', '7', '7']), ('ironhide&', '26', 'Security', '7', '5000000', '4.0', '[Roadbuster]', '37.789563,-122.400356', '1980/04/10', '2014/07/10', '[7.9248, 4000.0]', '2014-06-24', '2014-06-24 00:00:00', 'true', 'Security', None, ['26', '7', '7']), ('Jazz', '13', 'First Lieutenant', '8', '5000000', '1.8', '[Meister]', '33.670666,-117.841553', '1980/04/10', '2013/06/10', '[3.9624, 1800.0]', '2013-06-24', '2014-06-24 00:00:00', 'true', 'First Lieutenant', None, ['13', '8', '8']), ('Megatron', None, 'None', '10', '5000000', '5.7', '[Megatron]', None, '1980/04/10', '2012/05/10', '[, 5700.0]', '2012-05-10', '2014-06-24 00:00:00', 'true', 'None', None, [None, '10', '10']), ('Metroplex_)^$', '300', 'Battle Station', '8', '5000000', None, '[Metroflex]', None, '1980/04/10', '2011/04/10', '[91.44,]', '2011-04-10', '2014-06-24 00:00:00', 'true', 'Battle Station', None, ['300', '8', '8']), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, [None, None, None])])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_nest_vector():
source_df=op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader')), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage')), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security')), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'))])
actual_df =source_df.cols.nest(['rank','rank'],shape='vector',output_col='new col')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('new col', VectorUDT(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), DenseVector([10.0])), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), DenseVector([7.0])), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), DenseVector([7.0])), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), DenseVector([8.0]))])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_nest_vector_all_columns():
source_df=op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader')), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage')), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security')), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'))])
actual_df =source_df.cols.nest(['rank','rank'],shape='vector',output_col='new col')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('new col', VectorUDT(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), DenseVector([10.0])), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), DenseVector([7.0])), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), DenseVector([7.0])), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), DenseVector([8.0]))])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_percentile():
actual_df =source_df.cols.percentile('height(ft)',[0.05,0.25],1)
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'height(ft)': {'percentile': {'0.05': -28, '0.25': -28}}})
assert (expected_value == actual_df)
@staticmethod
def test_cols_percentile_all_columns():
actual_df =source_df.cols.percentile('*',[0.05,0.25],1)
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'height(ft)': {'percentile': {'0.05': -28, '0.25': -28}}, 'rank': {'percentile': {'0.05': 7, '0.25': 7}}, 'age': {'percentile': {'0.05': 5000000, '0.25': 5000000}}, 'weight(t)': {'percentile': {'0.05': 1.7999999523162842, '0.25': 1.7999999523162842}}})
assert (expected_value == actual_df)
@staticmethod
def test_cols_qcut():
actual_df =source_df.cols.qcut('rank',4)
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('rank_qcut', DoubleType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, 3.0), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 1.0), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 1.0), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 2.0), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, 3.0), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, 2.0)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_qcut_all_columns():
actual_df =source_df.cols.qcut('*',4)
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('height(ft)_qcut', DoubleType(), True),('rank_qcut', DoubleType(), True),('age_qcut', DoubleType(), True),('weight(t)_qcut', DoubleType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, 0.0, 2.0, 1.0, 3.0), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 2.0, 1.0, 1.0, 2.0), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 3.0, 1.0, 1.0, 3.0), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 1.0, 2.0, 1.0, 1.0)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_range():
actual_df =source_df.cols.range('height(ft)')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'height(ft)': {'range': {'max': 300, 'min': -28}}})
assert (expected_value == actual_df)
@staticmethod
def test_cols_range_all_columns():
actual_df =source_df.cols.range('*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'names': {'range': {'max': 'ironhide&', 'min': 'Jazz'}}, 'height(ft)': {'range': {'max': 300, 'min': -28}}, 'function': {'range': {'max': 'Security', 'min': 'Battle Station'}}, 'rank': {'range': {'max': 10, 'min': 7}}, 'age': {'range': {'max': 5000000, 'min': 5000000}}, 'weight(t)': {'range': {'max': 5.699999809265137, 'min': 1.7999999523162842}}, 'japanese name': {'range': {'max': ['Roadbuster'], 'min': ['Bumble', 'Goldback']}}, 'last position seen': {'range': {'max': '37.789563,-122.400356', 'min': '10.642707,-71.612534'}}, 'date arrival': {'range': {'max': '1980/04/10', 'min': '1980/04/10'}}, 'last date seen': {'range': {'max': '2016/09/10', 'min': '2011/04/10'}}, 'attributes': {'range': {'max': [91.44000244140625, None], 'min': [None, 5700.0]}}, 'Date Type': {'range': {'max': '2016-09-10', 'min': '2011-04-10'}}, 'timestamp': {'range': {'max': '2014-06-24 00:00:00', 'min': '2014-06-24 00:00:00'}}, 'Cybertronian': {'range': {'max': True, 'min': True}}, 'function(binary)': {'range': {'max': None, 'min': None}}, 'NullType': {'range': {'max': None, 'min': None}}})
assert (expected_value == actual_df)
@staticmethod
def test_cols_remove():
actual_df =source_df.cols.remove('function','i')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Esponage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Securty', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'Frst Leutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Staton', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_remove_accents():
actual_df =source_df.cols.remove_accents('function')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, 'None', None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_remove_accents_all_columns():
actual_df =source_df.cols.remove_accents('function')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, 'None', None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_remove_list():
actual_df =source_df.cols.remove('function',['a','i','Es'])
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leder', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'ponge', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Securty', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'Frst Leutennt', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Bttle Stton', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_remove_list_output():
actual_df =source_df.cols.remove('function',['a','i','Es'],output_cols='function_new')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('function_new', StringType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, 'Leder'), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 'ponge'), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 'Securty'), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 'Frst Leutennt'), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, 'None'), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, 'Bttle Stton'), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_remove_special_chars():
actual_df =source_df.cols.remove_special_chars('function')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_remove_special_chars_all_columns():
actual_df =source_df.cols.remove_special_chars('*')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [('Optimus', -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '1944273599201111', '19800410', '20160910', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumblebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '1064270771612534', '19800410', '20150810', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37789563122400356', '19800410', '20140710', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33670666117841553', '19800410', '20130610', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '19800410', '20120510', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '19800410', '20110410', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_remove_white_spaces():
actual_df =source_df.cols.remove_white_spaces('function')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'FirstLieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'BattleStation', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_remove_white_spaces_all_columns():
actual_df =source_df.cols.remove_white_spaces('*')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', StringType(), True),('function', StringType(), True),('rank', StringType(), True),('age', StringType(), True),('weight(t)', StringType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', StringType(), True),('timestamp', TimestampType(), True),('Cybertronian', StringType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", '-28', 'Leader', '10', '5000000', '4.3', ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], '2016-09-10', datetime.datetime(2014, 6, 24, 0, 0), 'true', bytearray(b'Leader'), None), ('bumbl#ebéé', '17', 'Espionage', '7', '5000000', '2.0', ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], '2015-08-10', datetime.datetime(2014, 6, 24, 0, 0), 'true', bytearray(b'Espionage'), None), ('ironhide&', '26', 'Security', '7', '5000000', '4.0', ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], '2014-06-24', datetime.datetime(2014, 6, 24, 0, 0), 'true', bytearray(b'Security'), None), ('Jazz', '13', 'FirstLieutenant', '8', '5000000', '1.8', ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], '2013-06-24', datetime.datetime(2014, 6, 24, 0, 0), 'true', bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', '10', '5000000', '5.7', ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], '2012-05-10', datetime.datetime(2014, 6, 24, 0, 0), 'true', bytearray(b'None'), None), ('Metroplex_)^$', '300', 'BattleStation', '8', '5000000', None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], '2011-04-10', datetime.datetime(2014, 6, 24, 0, 0), 'true', bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_rename():
actual_df =source_df.cols.rename('rank','rank(old)')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank(old)', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_rename_function():
actual_df =source_df.cols.rename(str.upper)
expected_df = op.create.df([('NAMES', StringType(), True),('HEIGHT(FT)', ShortType(), True),('FUNCTION', StringType(), True),('RANK', ByteType(), True),('AGE', IntegerType(), True),('WEIGHT(T)', FloatType(), True),('JAPANESE NAME', ArrayType(StringType(),True), True),('LAST POSITION SEEN', StringType(), True),('DATE ARRIVAL', StringType(), True),('LAST DATE SEEN', StringType(), True),('ATTRIBUTES', ArrayType(FloatType(),True), True),('DATE TYPE', DateType(), True),('timestamp', TimestampType(), True),('CYBERTRONIAN', BooleanType(), True),('FUNCTION(BINARY)', BinaryType(), True),('NULLTYPE', NullType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_rename_list():
actual_df =source_df.cols.rename(['height(ft)','height(ft)(tons)','rank','rank(old)'])
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_replace():
actual_df =source_df.cols.replace('function',['Security','Leader'],'Match')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'Match', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Match', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_reverse():
actual_df =source_df.cols.reverse('function')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'redaeL', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'eganoipsE', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'ytiruceS', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'tnanetueiL tsriF', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'enoN', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'noitatS elttaB', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_reverse_all_columns():
actual_df =source_df.cols.reverse('*')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("su'mitpO", -28, 'redaeL', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '111102.99-,537244.91', '01/40/0891', '01/90/6102', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), (' éébe#lbmub', 17, 'eganoipsE', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '435216.17-,707246.01', '01/40/0891', '01/80/5102', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('&edihnori', 26, 'ytiruceS', 7, 5000000, 4.0, ['Roadbuster'], '653004.221-,365987.73', '01/40/0891', '01/70/4102', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('zzaJ', 13, 'tnanetueiL tsriF', 8, 5000000, 1.7999999523162842, ['Meister'], '355148.711-,666076.33', '01/40/0891', '01/60/3102', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('nortageM', None, 'enoN', 10, 5000000, 5.699999809265137, ['Megatron'], None, '01/40/0891', '01/50/2102', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('$^)_xelporteM', 300, 'noitatS elttaB', 8, 5000000, None, ['Metroflex'], None, '01/40/0891', '01/40/1102', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_schema_dtype():
actual_df =source_df.cols.schema_dtype('rank')
actual_df =json_enconding(actual_df)
expected_value =json_enconding(ByteType)
assert (expected_value == actual_df)
@staticmethod
def test_cols_select():
actual_df =source_df.cols.select(0,'height(ft)')
expected_df = op.create.df([('names', StringType(), True)], [("Optim'us",), ('bumbl#ebéé ',), ('ironhide&',), ('Jazz',), ('Megatron',), ('Metroplex_)^$',), (None,)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_select_by_dtypes_array():
actual_df =source_df.cols.select_by_dtypes('array')
expected_df = op.create.df([('japanese name', ArrayType(StringType(),True), True),('attributes', ArrayType(FloatType(),True), True)], [(['Inochi', 'Convoy'], [8.53439998626709, 4300.0]), (['Bumble', 'Goldback'], [5.334000110626221, 2000.0]), (['Roadbuster'], [7.924799919128418, 4000.0]), (['Meister'], [3.962399959564209, 1800.0]), (['Megatron'], [None, 5700.0]), (['Metroflex'], [91.44000244140625, None]), (None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_select_by_dtypes_float():
actual_df =source_df.cols.select_by_dtypes('float')
expected_df = op.create.df([('weight(t)', FloatType(), True)], [(4.300000190734863,), (2.0,), (4.0,), (1.7999999523162842,), (5.699999809265137,), (None,), (None,)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_select_by_dtypes_int():
actual_df =source_df.cols.select_by_dtypes('int')
expected_df = op.create.df([('age', IntegerType(), True)], [(5000000,), (5000000,), (5000000,), (5000000,), (5000000,), (5000000,), (None,)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_select_by_dtypes_str():
actual_df =source_df.cols.select_by_dtypes('str')
expected_df = op.create.df([('names', StringType(), True),('function', StringType(), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True)], [("Optim'us", 'Leader', '19.442735,-99.201111', '1980/04/10', '2016/09/10'), ('bumbl#ebéé ', 'Espionage', '10.642707,-71.612534', '1980/04/10', '2015/08/10'), ('ironhide&', 'Security', '37.789563,-122.400356', '1980/04/10', '2014/07/10'), ('Jazz', 'First Lieutenant', '33.670666,-117.841553', '1980/04/10', '2013/06/10'), ('Megatron', 'None', None, '1980/04/10', '2012/05/10'), ('Metroplex_)^$', 'Battle Station', None, '1980/04/10', '2011/04/10'), (None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_select_regex():
actual_df =source_df.cols.select('n.*',regex=True)
expected_df = op.create.df([('names', StringType(), True)], [("Optim'us",), ('bumbl#ebéé ',), ('ironhide&',), ('Jazz',), ('Megatron',), ('Metroplex_)^$',), (None,)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_skewness():
actual_df =source_df.cols.skewness('height(ft)')
actual_df =json_enconding(actual_df)
expected_value =json_enconding(1.4049)
assert (expected_value == actual_df)
@staticmethod
def test_cols_skewness_all_columns():
actual_df =source_df.cols.skewness('*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'height(ft)': 1.4049, 'rank': 0.3818, 'age': nan, 'weight(t)': 0.06521})
assert (expected_value == actual_df)
@staticmethod
def test_cols_sort():
actual_df =source_df.cols.sort()
expected_df = op.create.df([('age', IntegerType(), True),('attributes', ArrayType(FloatType(),True), True),('Cybertronian', BooleanType(), True),('date arrival', StringType(), True),('Date Type', DateType(), True),('function', StringType(), True),('function(binary)', BinaryType(), True),('height(ft)', ShortType(), True),('japanese name', ArrayType(StringType(),True), True),('last date seen', StringType(), True),('last position seen', StringType(), True),('names', StringType(), True),('NullType', NullType(), True),('rank', ByteType(), True),('timestamp', TimestampType(), True),('weight(t)', FloatType(), True)], [(5000000, [8.53439998626709, 4300.0], True, '1980/04/10', datetime.date(2016, 9, 10), 'Leader', bytearray(b'Leader'), -28, ['Inochi', 'Convoy'], '2016/09/10', '19.442735,-99.201111', "Optim'us", None, 10, datetime.datetime(2014, 6, 24, 0, 0), 4.300000190734863), (5000000, [5.334000110626221, 2000.0], True, '1980/04/10', datetime.date(2015, 8, 10), 'Espionage', bytearray(b'Espionage'), 17, ['Bumble', 'Goldback'], '2015/08/10', '10.642707,-71.612534', 'bumbl#ebéé ', None, 7, datetime.datetime(2014, 6, 24, 0, 0), 2.0), (5000000, [7.924799919128418, 4000.0], True, '1980/04/10', datetime.date(2014, 6, 24), 'Security', bytearray(b'Security'), 26, ['Roadbuster'], '2014/07/10', '37.789563,-122.400356', 'ironhide&', None, 7, datetime.datetime(2014, 6, 24, 0, 0), 4.0), (5000000, [3.962399959564209, 1800.0], True, '1980/04/10', datetime.date(2013, 6, 24), 'First Lieutenant', bytearray(b'First Lieutenant'), 13, ['Meister'], '2013/06/10', '33.670666,-117.841553', 'Jazz', None, 8, datetime.datetime(2014, 6, 24, 0, 0), 1.7999999523162842), (5000000, [None, 5700.0], True, '1980/04/10', datetime.date(2012, 5, 10), 'None', bytearray(b'None'), None, ['Megatron'], '2012/05/10', None, 'Megatron', None, 10, datetime.datetime(2014, 6, 24, 0, 0), 5.699999809265137), (5000000, [91.44000244140625, None], True, '1980/04/10', datetime.date(2011, 4, 10), 'Battle Station', bytearray(b'Battle Station'), 300, ['Metroflex'], '2011/04/10', None, 'Metroplex_)^$', None, 8, datetime.datetime(2014, 6, 24, 0, 0), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_sort_asc():
actual_df =source_df.cols.sort('asc')
expected_df = op.create.df([('age', IntegerType(), True),('attributes', ArrayType(FloatType(),True), True),('Cybertronian', BooleanType(), True),('date arrival', StringType(), True),('Date Type', DateType(), True),('function', StringType(), True),('function(binary)', BinaryType(), True),('height(ft)', ShortType(), True),('japanese name', ArrayType(StringType(),True), True),('last date seen', StringType(), True),('last position seen', StringType(), True),('names', StringType(), True),('NullType', NullType(), True),('rank', ByteType(), True),('timestamp', TimestampType(), True),('weight(t)', FloatType(), True)], [(5000000, [8.53439998626709, 4300.0], True, '1980/04/10', datetime.date(2016, 9, 10), 'Leader', bytearray(b'Leader'), -28, ['Inochi', 'Convoy'], '2016/09/10', '19.442735,-99.201111', "Optim'us", None, 10, datetime.datetime(2014, 6, 24, 0, 0), 4.300000190734863), (5000000, [5.334000110626221, 2000.0], True, '1980/04/10', datetime.date(2015, 8, 10), 'Espionage', bytearray(b'Espionage'), 17, ['Bumble', 'Goldback'], '2015/08/10', '10.642707,-71.612534', 'bumbl#ebéé ', None, 7, datetime.datetime(2014, 6, 24, 0, 0), 2.0), (5000000, [7.924799919128418, 4000.0], True, '1980/04/10', datetime.date(2014, 6, 24), 'Security', bytearray(b'Security'), 26, ['Roadbuster'], '2014/07/10', '37.789563,-122.400356', 'ironhide&', None, 7, datetime.datetime(2014, 6, 24, 0, 0), 4.0), (5000000, [3.962399959564209, 1800.0], True, '1980/04/10', datetime.date(2013, 6, 24), 'First Lieutenant', bytearray(b'First Lieutenant'), 13, ['Meister'], '2013/06/10', '33.670666,-117.841553', 'Jazz', None, 8, datetime.datetime(2014, 6, 24, 0, 0), 1.7999999523162842), (5000000, [None, 5700.0], True, '1980/04/10', datetime.date(2012, 5, 10), 'None', bytearray(b'None'), None, ['Megatron'], '2012/05/10', None, 'Megatron', None, 10, datetime.datetime(2014, 6, 24, 0, 0), 5.699999809265137), (5000000, [91.44000244140625, None], True, '1980/04/10', datetime.date(2011, 4, 10), 'Battle Station', bytearray(b'Battle Station'), 300, ['Metroflex'], '2011/04/10', None, 'Metroplex_)^$', None, 8, datetime.datetime(2014, 6, 24, 0, 0), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_sort_desc():
actual_df =source_df.cols.sort('desc')
expected_df = op.create.df([('weight(t)', FloatType(), True),('timestamp', TimestampType(), True),('rank', ByteType(), True),('NullType', NullType(), True),('names', StringType(), True),('last position seen', StringType(), True),('last date seen', StringType(), True),('japanese name', ArrayType(StringType(),True), True),('height(ft)', ShortType(), True),('function(binary)', BinaryType(), True),('function', StringType(), True),('Date Type', DateType(), True),('date arrival', StringType(), True),('Cybertronian', BooleanType(), True),('attributes', ArrayType(FloatType(),True), True),('age', IntegerType(), True)], [(4.300000190734863, datetime.datetime(2014, 6, 24, 0, 0), 10, None, "Optim'us", '19.442735,-99.201111', '2016/09/10', ['Inochi', 'Convoy'], -28, bytearray(b'Leader'), 'Leader', datetime.date(2016, 9, 10), '1980/04/10', True, [8.53439998626709, 4300.0], 5000000), (2.0, datetime.datetime(2014, 6, 24, 0, 0), 7, None, 'bumbl#ebéé ', '10.642707,-71.612534', '2015/08/10', ['Bumble', 'Goldback'], 17, bytearray(b'Espionage'), 'Espionage', datetime.date(2015, 8, 10), '1980/04/10', True, [5.334000110626221, 2000.0], 5000000), (4.0, datetime.datetime(2014, 6, 24, 0, 0), 7, None, 'ironhide&', '37.789563,-122.400356', '2014/07/10', ['Roadbuster'], 26, bytearray(b'Security'), 'Security', datetime.date(2014, 6, 24), '1980/04/10', True, [7.924799919128418, 4000.0], 5000000), (1.7999999523162842, datetime.datetime(2014, 6, 24, 0, 0), 8, None, 'Jazz', '33.670666,-117.841553', '2013/06/10', ['Meister'], 13, bytearray(b'First Lieutenant'), 'First Lieutenant', datetime.date(2013, 6, 24), '1980/04/10', True, [3.962399959564209, 1800.0], 5000000), (5.699999809265137, datetime.datetime(2014, 6, 24, 0, 0), 10, None, 'Megatron', None, '2012/05/10', ['Megatron'], None, bytearray(b'None'), 'None', datetime.date(2012, 5, 10), '1980/04/10', True, [None, 5700.0], 5000000), (None, datetime.datetime(2014, 6, 24, 0, 0), 8, None, 'Metroplex_)^$', None, '2011/04/10', ['Metroflex'], 300, bytearray(b'Battle Station'), 'Battle Station', datetime.date(2011, 4, 10), '1980/04/10', True, [91.44000244140625, None], 5000000), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_std():
actual_df =source_df.cols.std('height(ft)')
actual_df =json_enconding(actual_df)
expected_value =json_enconding(132.66612)
assert (expected_value == actual_df)
@staticmethod
def test_cols_std_all_columns():
actual_df =source_df.cols.std('*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'height(ft)': 132.66612, 'rank': 1.36626, 'age': 0.0, 'weight(t)': 1.64712})
assert (expected_value == actual_df)
@staticmethod
def test_cols_string_to_index():
source_df=op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader')), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage')), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security')), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant')), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None')), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'))])
actual_df =source_df.cols.string_to_index('rank')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('rank***INDEX_TO_STRING', DoubleType(), False)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), 2.0), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), 1.0), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), 1.0), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), 0.0), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), 2.0), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), 0.0)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_sub():
actual_df =source_df.cols.sub(['height(ft)','rank'])
expected_df = op.create.df([('names', StringType(), True),('height(ft)', FloatType(), True),('function', StringType(), True),('rank', FloatType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('sub', FloatType(), True)], [("Optim'us", -28.0, 'Leader', 10.0, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, -38.0), ('bumbl#ebéé ', 17.0, 'Espionage', 7.0, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 10.0), ('ironhide&', 26.0, 'Security', 7.0, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 19.0), ('Jazz', 13.0, 'First Lieutenant', 8.0, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 5.0), ('Megatron', None, 'None', 10.0, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, None), ('Metroplex_)^$', 300.0, 'Battle Station', 8.0, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, 292.0), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_sub_all_columns():
actual_df =source_df.cols.sub('*')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', FloatType(), True),('function', StringType(), True),('rank', FloatType(), True),('age', FloatType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('sub', FloatType(), True)], [("Optim'us", -28.0, 'Leader', 10.0, 5000000.0, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, -5000042.5), ('bumbl#ebéé ', 17.0, 'Espionage', 7.0, 5000000.0, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, -4999992.0), ('ironhide&', 26.0, 'Security', 7.0, 5000000.0, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, -4999985.0), ('Jazz', 13.0, 'First Lieutenant', 8.0, 5000000.0, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, -4999997.0), ('Megatron', None, 'None', 10.0, 5000000.0, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, None), ('Metroplex_)^$', 300.0, 'Battle Station', 8.0, 5000000.0, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_sum():
actual_df =source_df.cols.sum('height(ft)')
actual_df =json_enconding(actual_df)
expected_value =json_enconding(328)
assert (expected_value == actual_df)
@staticmethod
def test_cols_sum_all_columns():
actual_df =source_df.cols.sum('*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'height(ft)': 328, 'rank': 50, 'age': 30000000, 'weight(t)': 17.8})
assert (expected_value == actual_df)
@staticmethod
def test_cols_trim():
actual_df =source_df.cols.trim('height(ft)')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', StringType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", '-28', 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', '17', 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', '26', 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', '13', 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', '300', 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_trim_all_columns():
actual_df =source_df.cols.trim('*')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', StringType(), True),('function', StringType(), True),('rank', StringType(), True),('age', StringType(), True),('weight(t)', StringType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', StringType(), True),('timestamp', TimestampType(), True),('Cybertronian', StringType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", '-28', 'Leader', '10', '5000000', '4.3', ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], '2016-09-10', datetime.datetime(2014, 6, 24, 0, 0), 'true', bytearray(b'Leader'), None), ('bumbl#ebéé', '17', 'Espionage', '7', '5000000', '2.0', ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], '2015-08-10', datetime.datetime(2014, 6, 24, 0, 0), 'true', bytearray(b'Espionage'), None), ('ironhide&', '26', 'Security', '7', '5000000', '4.0', ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], '2014-06-24', datetime.datetime(2014, 6, 24, 0, 0), 'true', bytearray(b'Security'), None), ('Jazz', '13', 'First Lieutenant', '8', '5000000', '1.8', ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], '2013-06-24', datetime.datetime(2014, 6, 24, 0, 0), 'true', bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', '10', '5000000', '5.7', ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], '2012-05-10', datetime.datetime(2014, 6, 24, 0, 0), 'true', bytearray(b'None'), None), ('Metroplex_)^$', '300', 'Battle Station', '8', '5000000', None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], '2011-04-10', datetime.datetime(2014, 6, 24, 0, 0), 'true', bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_unique():
actual_df =source_df.cols.unique('height(ft)')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'height(ft)': [300, 26, None, 13, 17, -28]})
assert (expected_value == actual_df)
@staticmethod
def test_cols_unique_all_columns():
actual_df =source_df.cols.unique('*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'names': ['Jazz', None, 'bumbl#ebéé ', 'ironhide&', "Optim'us", 'Megatron', 'Metroplex_)^$'], 'height(ft)': [300, 26, None, 13, 17, -28], 'function': ['Leader', 'First Lieutenant', 'None', 'Security', None, 'Espionage', 'Battle Station'], 'rank': [None, 8, 7, 10], 'age': [None, 5000000], 'weight(t)': [5.699999809265137, None, 2.0, 1.7999999523162842, 4.0, 4.300000190734863], 'japanese name': [['Metroflex'], ['Bumble', 'Goldback'], None, ['Inochi', 'Convoy'], ['Megatron'], ['Meister'], ['Roadbuster']], 'last position seen': [None, '37.789563,-122.400356', '19.442735,-99.201111', '33.670666,-117.841553', '10.642707,-71.612534'], 'date arrival': [None, '1980/04/10'], 'last date seen': ['2011/04/10', None, '2012/05/10', '2013/06/10', '2015/08/10', '2014/07/10', '2016/09/10'], 'attributes': [[3.962399959564209, 1800.0], [None, 5700.0], None, [8.53439998626709, 4300.0], [7.924799919128418, 4000.0], [91.44000244140625, None], [5.334000110626221, 2000.0]], 'Date Type': ['2012-05-10', '2015-08-10', None, '2011-04-10', '2013-06-24', '2014-06-24', '2016-09-10'], 'timestamp': ['2014-06-24 00:00:00', None], 'Cybertronian': [None, True], 'function(binary)': [None, None, None, None, None, None, None], 'NullType': [None]})
assert (expected_value == actual_df)
@staticmethod
def test_cols_unnest_array():
actual_df =source_df.cols.unnest('attributes')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('attributes_0', FloatType(), True),('attributes_1', FloatType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, 8.53439998626709, 4300.0), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 5.334000110626221, 2000.0), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 7.924799919128418, 4000.0), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 3.962399959564209, 1800.0), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, None, 5700.0), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, 91.44000244140625, None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_unnest_array_all_columns():
actual_df =source_df.cols.unnest('attributes',index=[1, 2])
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('attributes_0', FloatType(), True),('attributes_1', FloatType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, 8.53439998626709, 4300.0), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 5.334000110626221, 2000.0), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 7.924799919128418, 4000.0), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 3.962399959564209, 1800.0), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, None, 5700.0), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, 91.44000244140625, None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_unnest_array_index():
actual_df =source_df.cols.unnest('attributes',index=2)
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('attributes_1', FloatType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, 4300.0), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 2000.0), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 4000.0), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 1800.0), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, 5700.0), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_unnest_array_mmulti_index():
actual_df =source_df.cols.unnest('attributes',index=[1, 2])
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('attributes_0', FloatType(), True),('attributes_1', FloatType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, 8.53439998626709, 4300.0), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 5.334000110626221, 2000.0), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 7.924799919128418, 4000.0), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 3.962399959564209, 1800.0), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, None, 5700.0), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, 91.44000244140625, None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_unnest_array_multi_index():
actual_df =source_df.cols.unnest('attributes',index=[1, 2])
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('attributes_0', FloatType(), True),('attributes_1', FloatType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, 8.53439998626709, 4300.0), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 5.334000110626221, 2000.0), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 7.924799919128418, 4000.0), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 3.962399959564209, 1800.0), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, None, 5700.0), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, 91.44000244140625, None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_unnest_string():
actual_df =source_df.cols.unnest('date arrival','/',splits=3,output_cols=[('year', 'month', 'day')])
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('year', StringType(), True),('month', StringType(), True),('day', StringType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, '1980', '04', '10'), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, '1980', '04', '10'), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, '1980', '04', '10'), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, '1980', '04', '10'), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, '1980', '04', '10'), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, '1980', '04', '10'), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_unnest_string_all_columns():
actual_df =source_df.cols.unnest('attributes',index=2)
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('attributes_1', FloatType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, 4300.0), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, 2000.0), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, 4000.0), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, 1800.0), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, 5700.0), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_unnest_string_index():
actual_df =source_df.cols.unnest('date arrival','/',splits=3,index=[1, 2, 3])
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('date arrival_1', StringType(), True),('date arrival_2', StringType(), True),('date arrival_3', StringType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, '1980', '04', '10'), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, '1980', '04', '10'), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, '1980', '04', '10'), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, '1980', '04', '10'), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, '1980', '04', '10'), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, '1980', '04', '10'), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_unnest_string_infer_split():
actual_df =source_df.cols.unnest('date arrival','/')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('date arrival_0', StringType(), True),('date arrival_1', StringType(), True),('date arrival_2', StringType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, '1980', '04', '10'), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, '1980', '04', '10'), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, '1980', '04', '10'), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, '1980', '04', '10'), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, '1980', '04', '10'), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, '1980', '04', '10'), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_unnest_string_multi_colum_multi_output():
actual_df =source_df.cols.unnest(['date arrival','last date seen'],'/',output_cols=[('year1', 'month1'), ('year2', 'month2')])
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('year1', StringType(), True),('month1', StringType(), True),('year2', StringType(), True),('month2', StringType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, '1980', '04', '2016', '09'), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, '1980', '04', '2015', '08'), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, '1980', '04', '2014', '07'), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, '1980', '04', '2013', '06'), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, '1980', '04', '2012', '05'), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, '1980', '04', '2011', '04'), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_unnest_string_multi_index():
actual_df =source_df.cols.unnest('date arrival','/',splits=3,index=[1, 2])
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('date arrival_0', StringType(), True),('date arrival_1', StringType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, '1980', '04'), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, '1980', '04'), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, '1980', '04'), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, '1980', '04'), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, '1980', '04'), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, '1980', '04'), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_unnest_string_no_index():
actual_df =source_df.cols.unnest('date arrival','/',splits=3)
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('date arrival_0', StringType(), True),('date arrival_1', StringType(), True),('date arrival_2', StringType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, '1980', '04', '10'), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, '1980', '04', '10'), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, '1980', '04', '10'), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, '1980', '04', '10'), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, '1980', '04', '10'), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, '1980', '04', '10'), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_unnest_string_output_columns():
actual_df =source_df.cols.unnest('date arrival','/',splits=3,output_cols=[('year', 'month', 'day')])
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True),('year', StringType(), True),('month', StringType(), True),('day', StringType(), True)], [("Optim'us", -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None, '1980', '04', '10'), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None, '1980', '04', '10'), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None, '1980', '04', '10'), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None, '1980', '04', '10'), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None, '1980', '04', '10'), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None, '1980', '04', '10'), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_upper():
actual_df =source_df.cols.upper('function')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", -28, 'LEADER', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'ESPIONAGE', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'SECURITY', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'FIRST LIEUTENANT', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'NONE', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'BATTLE STATION', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_upper_all_columns():
actual_df =source_df.cols.upper('*')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("OPTIM'US", -28, 'LEADER', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('BUMBL#EBÉÉ ', 17, 'ESPIONAGE', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('IRONHIDE&', 26, 'SECURITY', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('JAZZ', 13, 'FIRST LIEUTENANT', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('MEGATRON', None, 'NONE', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('METROPLEX_)^$', 300, 'BATTLE STATION', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_value_counts():
actual_df =source_df.cols.value_counts('height(ft)')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'height(ft)': [{'value': 300, 'count': 1}, {'value': 26, 'count': 1}, {'value': 13, 'count': 1}, {'value': 17, 'count': 1}, {'value': -28, 'count': 1}, {'value': None, 'count': 2}]})
assert(expected_value == actual_df)
@staticmethod
def test_cols_value_counts_all_columns():
actual_df =source_df.cols.value_counts('*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'names': [{'value': 'Jazz', 'count': 1}, {'value': None, 'count': 1}, {'value': 'bumbl#ebéé ', 'count': 1}, {'value': 'ironhide&', 'count': 1}, {'value': "Optim'us", 'count': 1}, {'value': 'Megatron', 'count': 1}, {'value': 'Metroplex_)^$', 'count': 1}], 'height(ft)': [{'value': 300, 'count': 1}, {'value': 26, 'count': 1}, {'value': 13, 'count': 1}, {'value': 17, 'count': 1}, {'value': -28, 'count': 1}, {'value': None, 'count': 2}], 'function': [{'value': 'Leader', 'count': 1}, {'value': 'First Lieutenant', 'count': 1}, {'value': 'None', 'count': 1}, {'value': 'Security', 'count': 1}, {'value': None, 'count': 1}, {'value': 'Espionage', 'count': 1}, {'value': 'Battle Station', 'count': 1}], 'rank': [{'value': None, 'count': 1}, {'value': 8, 'count': 2}, {'value': 7, 'count': 2}, {'value': 10, 'count': 2}], 'age': [{'value': None, 'count': 1}, {'value': 5000000, 'count': 6}], 'weight(t)': [{'value': 5.699999809265137, 'count': 1}, {'value': 2.0, 'count': 1}, {'value': 1.7999999523162842, 'count': 1}, {'value': 4.0, 'count': 1}, {'value': 4.300000190734863, 'count': 1}, {'value': None, 'count': 2}], 'japanese name': [{'value': ['Metroflex'], 'count': 1}, {'value': ['Bumble', 'Goldback'], 'count': 1}, {'value': None, 'count': 1}, {'value': ['Inochi', 'Convoy'], 'count': 1}, {'value': ['Megatron'], 'count': 1}, {'value': ['Meister'], 'count': 1}, {'value': ['Roadbuster'], 'count': 1}], 'last position seen': [{'value': '37.789563,-122.400356', 'count': 1}, {'value': '19.442735,-99.201111', 'count': 1}, {'value': '33.670666,-117.841553', 'count': 1}, {'value': '10.642707,-71.612534', 'count': 1}, {'value': None, 'count': 3}], 'date arrival': [{'value': None, 'count': 1}, {'value': '1980/04/10', 'count': 6}], 'last date seen': [{'value': '2011/04/10', 'count': 1}, {'value': None, 'count': 1}, {'value': '2012/05/10', 'count': 1}, {'value': '2013/06/10', 'count': 1}, {'value': '2015/08/10', 'count': 1}, {'value': '2014/07/10', 'count': 1}, {'value': '2016/09/10', 'count': 1}], 'attributes': [{'value': [3.962399959564209, 1800.0], 'count': 1}, {'value': [None, 5700.0], 'count': 1}, {'value': None, 'count': 1}, {'value': [8.53439998626709, 4300.0], 'count': 1}, {'value': [7.924799919128418, 4000.0], 'count': 1}, {'value': [91.44000244140625, None], 'count': 1}, {'value': [5.334000110626221, 2000.0], 'count': 1}], 'Date Type': [{'value': '2012-05-10', 'count': 1}, {'value': '2015-08-10', 'count': 1}, {'value': None, 'count': 1}, {'value': '2011-04-10', 'count': 1}, {'value': '2013-06-24', 'count': 1}, {'value': '2014-06-24', 'count': 1}, {'value': '2016-09-10', 'count': 1}], 'timestamp': [{'value': None, 'count': 1}, {'value': '2014-06-24 00:00:00', 'count': 6}], 'Cybertronian': [{'value': None, 'count': 1}, {'value': True, 'count': 6}], 'function(binary)': [{'value': None, 'count': 1}, {'value': None, 'count': 1}, {'value': None, 'count': 1}, {'value': None, 'count': 1}, {'value': None, 'count': 1}, {'value': None, 'count': 1}, {'value': None, 'count': 1}], 'NullType': [{'value': None, 'count': 7}]})
assert (expected_value == actual_df)
@staticmethod
def test_cols_variance():
actual_df =source_df.cols.variance('height(ft)')
actual_df =json_enconding(actual_df)
expected_value =json_enconding(17600.3)
assert (expected_value == actual_df)
@staticmethod
def test_cols_variance_all_columns():
actual_df =source_df.cols.variance('*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'height(ft)': 17600.3, 'rank': 1.86667, 'age': 0.0, 'weight(t)': 2.713})
assert (expected_value == actual_df)
@staticmethod
def test_cols_z_score():
actual_df =source_df.cols.z_score('height(ft)')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', DoubleType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", 0.7055305454022474, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 0.366333167805013, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 0.29849369228556616, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 0.39648404581365604, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 1.7668414513064827, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
@staticmethod
def test_cols_z_score_all_columns():
actual_df =source_df.cols.z_score('*')
expected_df = op.create.df([('names', StringType(), True),('height(ft)', DoubleType(), True),('function', StringType(), True),('rank', DoubleType(), True),('age', DoubleType(), True),('weight(t)', DoubleType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [("Optim'us", 0.7055305454022474, 'Leader', 1.2198776221217045, None, 0.4492691429494289, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 0.366333167805013, 'Espionage', 0.9758977061467071, None, 0.9471076788576425, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 0.29849369228556616, 'Security', 0.9758977061467071, None, 0.2671329350624119, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 0.39648404581365604, 'First Lieutenant', 0.24397259672390328, None, 1.0685317691994, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 1.2198776221217045, None, 1.2992373410954494, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 1.7668414513064827, 'Battle Station', 0.24397259672390328, None, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
assert (expected_df.collect() == actual_df.collect())
|
py | 1a4d082f21f8f681eab1caf147ccf47f50bed664 | from .base import * # flake8: noqa
#env.bool('DJANGO_DEBUG', default=False)
DEBUG = env('DEBUG')
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
SECRET_KEY = env('DJANGO_SECRET_KEY')
# Compress static files offline
# http://django-compressor.readthedocs.org/en/latest/settings/#django.conf.settings.COMPRESS_OFFLINE
# Turning this on creates causes the server to return 500
# According to the docs if this is set to True then also need to run the compress management commnand
#COMPRESS_OFFLINE = True
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFilter',
]
ALLOWED_HOSTS = env('DJANGO_ALLOWED_HOSTS')
INSTALLED_APPS += (
'wagtail.contrib.wagtailfrontendcache',
'gunicorn',
)
# to prevent this from blocking web server threads
# (requires the django-celery package):
# http://celery.readthedocs.org/en/latest/configuration.html
import djcelery
djcelery.setup_loader()
CELERY_SEND_TASK_ERROR_EMAILS = True
BROKER_URL = 'redis://'
# Use Redis as the cache backend for extra performance
# (requires the django-redis-cache package):
# http://wagtail.readthedocs.org/en/latest/howto/performance.html#cache
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': '{0}/{1}'.format(env('REDIS_URL', default='redis://127.0.0.1:6379'), 0),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'IGNORE_EXCEPTIONS': True,
}
}
}
DEFAULT_FROM_EMAIL = env('EMAIL_FROM')
EMAIL_USE_TLS = True
EMAIL_HOST = env("EMAIL_HOST")
EMAIL_HOST_USER = env('EMAIL_USER')
EMAIL_HOST_PASSWORD = env('EMAIL_PASSWD')
EMAIL_PORT = 587
# LOGGING CONFIGURATION
# Sends an email to site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
|
py | 1a4d09aa09e2665f13f249c5708fdd7192001e99 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-06 16:44
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('publication_date', models.DateTimeField(blank=True, help_text='Date at which the article will become visible', null=True, verbose_name='Publication date')),
('expiration_date', models.DateTimeField(blank=True, help_text='Date at which the article will become invisible. Can be left blank if the article should not expire.', null=True, verbose_name='Expiration date')),
('title', models.CharField(help_text='The full title, displayed in lists and before content', max_length=500, verbose_name='Title')),
('content', models.TextField(help_text="The article's content. Support markdown.", verbose_name='Content')),
],
options={
'ordering': ('publication_date',),
},
),
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Visible author name', max_length=200, unique=True, verbose_name='Author name')),
('twitter', models.CharField(blank=True, help_text='Twitter handle for this author identity', max_length=30, null=True, verbose_name='Twitter handle')),
('user', models.ManyToManyField(help_text='List of users that can use this identity to publish articles', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('name',),
},
),
migrations.AddField(
model_name='article',
name='author',
field=models.ForeignKey(help_text='The identity under which the article is published', on_delete=django.db.models.deletion.CASCADE, to='blog.Author'),
),
]
|
py | 1a4d0a00bfd64ab2af7aaf38d39e8ff87f884631 | # -*- coding: utf-8 -*-
import os
import re
import time
from bs4 import BeautifulSoup
import requests
import httpx
def rrss(test_str):
regex = r"(@(\w+))"
subst = "<a rel='nofolow norefer' href=\"https://twitter.com/\\2\" target=\"_blank\">\\1</a>"
result = re.sub(regex, subst, test_str, 0, re.IGNORECASE | re.UNICODE)
if result:
test_str = result
regex = r"(#(\w+))"
subst = "<a href=\"https://twitter.com/hashtag/\\2\" target=\"_blank\">\\1</a>"
result = re.sub(regex, subst, test_str, 0, re.IGNORECASE | re.UNICODE)
if result:
test_str = result
regex = r"[^\'\"](https?:\/\/((www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6})\b([-a-zA-Z0-9()@:%_\+.~#?&//=]*))"
subst = " <a href=\"\\1\" target=\"_blank\" rel='nofollow norefer'>\\2</a>"
result = re.sub(regex, subst, test_str, 0, re.IGNORECASE | re.UNICODE)
if result:
test_str = result
return test_str
def href(name):
return "-".join(name.split(" ")).lower()
def id_tuit_text_plain(text,lis):
regex = r"https:\/\/twitter.com/\w+/status/(\d+)"
m = re.search(regex, text)
re.search
if m:
lis.append(m.group(1))
def down_e(url,pined=False):
h = {"User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.116 Mobile Safari/537.36"}
r = requests.get(url,headers=h)
bs = BeautifulSoup(r.content.decode("utf8"), "html.parser")
content = bs.find("main",{"class":"content"})
title = content.find("h1", {"class":"entry-title", "itemprop":"headline"}).get_text()
tuit = content.find_all("a",{"href":re.compile(r"https:\/\/t\.co\/(\w+)")})
img = content.find_all("img",limit=4)
img_link = []
tuit_links = []
link_tuit = []
if tuit:
for a in tuit:
tuit_links.append(a.attrs["href"])
tmp = [requests.get(link).url for link in tuit_links]
for i in tmp:
id_tuit_text_plain(i, link_tuit)
if img:
for i in img:
img_link.append(i.attrs["src"])
date = content.find("time", {"class":"entry-time"}).get_text()
categorias = [i.text for i in content.find("span",{"class":"entry-categories"}).find_all("a")]
external = content.find_all("iframe")
contento = []
external_link = []
if external:
try:
for i in external:
external_link.append(i.attrs["src"])
except Exception:
print("Error into exernals links")
# for i in content.find_all("p"):
# if len(i.text) > 6 and i.em == None :
# contento.append(i.text
for p in bs.find_all("p"):
if p.attrs.get("class") == "entry-meta":
continue
elif p.img:
continue
elif p.get_text().startswith("Fuente") and p.a:
continue
elif p.em:
pass
elif p.get_text().startswith("Copyright") and p.a:
break
else:
contento.append(p.get_text())
print("flush")
contento = [rrss(cnt) for cnt in contento]
contento = "*#*".join(contento)
contento = "{} *$* {}".format(title, contento)
return dict(content=contento, categorias=categorias, date=date, img=img_link, external=external_link, link=href(title), tuit=link_tuit, pined=pined) |
py | 1a4d0b37f9db7442b1dc3e42ebdb25b7fac2c020 | """
Python re-implementation of "Exploiting the Circulant Structure of
Tracking-by-detection with Kernels"
@book{Henriques2012Exploiting,
title={Exploiting the Circulant Structure of Tracking-by-Detection with Kernels},
author={Henriques, Jo?o F. and Rui, Caseiro and Martins, Pedro and Batista, Jorge},
year={2012},
}
"""
import numpy as np
import cv2
from .base import BaseCF
from lib.utils import gaussian2d_labels,cos_window
from lib.fft_tools import fft2,ifft2
class CSK(BaseCF):
def __init__(self, interp_factor=0.075, sigma=0.2, lambda_=0.01):
super(CSK).__init__()
self.interp_factor = interp_factor
self.sigma = sigma
self.lambda_ = lambda_
def init(self,first_frame,bbox):
if len(first_frame.shape)==3:
assert first_frame.shape[2]==3
first_frame=cv2.cvtColor(first_frame,cv2.COLOR_BGR2GRAY)
first_frame=first_frame.astype(np.float32)
bbox=np.array(bbox).astype(np.int64)
x,y,w,h=tuple(bbox)
self._center=(x+w/2,y+h/2)
self.w,self.h=w,h
self._window=cos_window((int(round(2*w)),int(round(2*h))))
self.crop_size=(int(round(2*w)),int(round(2*h)))
self.x=cv2.getRectSubPix(first_frame,(int(round(2*w)),int(round(2*h))),self._center)/255-0.5
self.x=self.x*self._window
s=np.sqrt(w*h)/16
self.y=gaussian2d_labels((int(round(2*w)),int(round(2*h))),s)
self._init_response_center=np.unravel_index(np.argmax(self.y,axis=None),self.y.shape)
self.alphaf=self._training(self.x,self.y)
def update(self,current_frame,idx,vis=False):
if len(current_frame.shape)==3:
assert current_frame.shape[2]==3
current_frame=cv2.cvtColor(current_frame,cv2.COLOR_BGR2GRAY)
current_frame=current_frame.astype(np.float32)
z=cv2.getRectSubPix(current_frame,(int(round(2*self.w)),int(round(2*self.h))),self._center)/255-0.5
z=z*self._window
self.z=z
responses=self._detection(self.alphaf,self.x,z)
if vis is True:
self.score=responses
curr=np.unravel_index(np.argmax(responses,axis=None),responses.shape)
dy=curr[0]-self._init_response_center[0]
dx=curr[1]-self._init_response_center[1]
x_c, y_c = self._center
x_c -= dx
y_c -= dy
self._center = (x_c, y_c)
new_x=cv2.getRectSubPix(current_frame,(2*self.w,2*self.h),self._center)/255-0.5
new_x=new_x*self._window
self.alphaf=self.interp_factor*self._training(new_x,self.y)+(1-self.interp_factor)*self.alphaf
self.x=self.interp_factor*new_x+(1-self.interp_factor)*self.x
return [self._center[0]-self.w/2,self._center[1]-self.h/2,self.w,self.h]
def _dgk(self, x1, x2):
c = np.fft.fftshift(ifft2(fft2(x1)* np.conj(fft2(x2))))
d = np.dot(x1.flatten().conj(), x1.flatten()) + np.dot(x2.flatten().conj(), x2.flatten()) - 2 * c
k = np.exp(-1 / self.sigma ** 2 * np.clip(d,a_min=0,a_max=None) / np.size(x1))
return k
def _training(self, x, y):
k = self._dgk(x, x)
alphaf = fft2(y) / (fft2(k) + self.lambda_)
return alphaf
def _detection(self, alphaf, x, z):
k = self._dgk(x, z)
responses = np.real(ifft2(alphaf * fft2(k)))
return responses
|
py | 1a4d0c0acb7361922a49a1548c08887fff49174e | from io import BytesIO
from PIL import Image, ImageDraw
from flask import send_file
from utils.endpoint import Endpoint, setup
from utils.textutils import auto_text_size, render_text_with_emoji
@setup
class KnowYourLocation(Endpoint):
params = ['text']
def generate(self, avatars, text, usernames, kwargs):
base = Image.open(self.assets.get('assets/knowyourlocation/knowyourlocation.bmp')).convert('RGBA')
# We need a text layer here for the rotation
canv = ImageDraw.Draw(base)
text = text.split(', ')
if len(text) != 2:
text = ["Separate the items with a", "comma followed by a space"]
top, bottom = text
top_font, top_text = auto_text_size(top, self.assets.get_font('assets/fonts/sans.ttf'), 630)
bottom_font, bottom_text = auto_text_size(bottom,
self.assets.get_font('assets/fonts/sans.ttf'),
539)
render_text_with_emoji(base, canv, (64, 131), top_text, top_font, 'black')
render_text_with_emoji(base, canv, (120, 450), bottom_text, bottom_font, 'black')
base = base.convert('RGB')
b = BytesIO()
base.save(b, format='jpeg')
b.seek(0)
return send_file(b, mimetype='image/jpeg')
|
py | 1a4d0c661fa478bb6df23b17b0587826faa312c3 | #!/Users/atreya/Desktop/junction-2020-wip/.venv/bin/python3
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
|
py | 1a4d0de0468bd9722a8fb5dd996896f4573baf4a | # Copyright 2017 The Wallaroo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import datetime
import errno
import io
import logging
import threading
import time
import socket
import struct
from .errors import TimeoutError
from .logger import INFO2
from .stoppable_thread import StoppableThread
from wallaroo.experimental.connectors import (BaseIter,
BaseSource,
MultiSourceConnector)
try:
basestring
except:
basestring = (str, bytes)
class SingleSocketReceiver(StoppableThread):
"""
Read length or newline encoded data from a socket and append it to an
accumulator list.
Multiple SingleSocketReceivers may write to the same accumulator safely,
so long as they perform atomic writes (e.g. each append adds a single,
complete entry).
"""
__base_name__ = 'SingleSocketReceiver'
def __init__(self, sock, accumulator, mode='framed', header_fmt='>I',
name=None):
super(SingleSocketReceiver, self).__init__()
self.sock = sock
self.accumulator = accumulator
self.mode = mode
self.header_fmt = header_fmt
self.header_length = struct.calcsize(self.header_fmt)
if name:
self.name = '{}:{}:{}'.format(self.__base_name__, name,
sock.fileno())
else:
self.name = '{}:{}'.format(self.__base_name__, sock.fileno())
def try_recv(self, bs, flags=0):
"""
Try to to run `sock.recv(bs)` and return None if error
"""
try:
return self.sock.recv(bs, flags)
except:
return None
def append(self, bs):
if self.mode == 'framed':
self.accumulator.append(bs)
else:
self.accumulator.append(bs + b'\n')
def run(self):
if self.mode == 'framed':
self.run_framed()
else:
self.run_newlines()
def run_newlines(self):
data = []
while not self.stopped():
buf = self.try_recv(1024)
if not buf:
self.stop()
if data:
self.append(b''.join(data))
break
# We must be careful not to accidentally join two separate lines
# nor split a line
split = buf.split(b'\n') # '\n' show as '' in list after split
s0 = split.pop(0)
if s0:
if data:
data.append(s0)
self.append(b''.join(data))
data = []
else:
self.append(s0)
else:
# s0 is '', so first line is a '\n', and overflow is a
# complete message if it isn't empty
if data:
self.append(b''.join(data))
data = []
for s in split[:-1]:
self.append(s)
if split: # not an empty list
if split[-1]: # not an empty string, i.e. it wasn't a '\n'
data.append(split[-1])
time.sleep(0.000001)
def run_framed(self):
while not self.stopped():
header = self.try_recv(self.header_length, socket.MSG_WAITALL)
if not header:
self.stop()
continue
expect = struct.unpack(self.header_fmt, header)[0]
data = self.try_recv(expect, socket.MSG_WAITALL)
if not data:
self.stop()
else:
self.append(b''.join((header, data)))
time.sleep(0.000001)
def stop(self, *args, **kwargs):
super(self.__class__, self).stop(*args, **kwargs)
self.sock.close()
class MultiClientStreamView(object):
def __init__(self, initial_streams, blocking=True):
self.streams = {s.name: s.accumulator for s in initial_streams}
self.positions = {s.name: 0 for s in initial_streams}
self.keys = list(self.positions.keys())
self.key_position = 0
self.blocking = blocking
def add_stream(self, stream):
if stream.name in self.streams:
raise KeyError("Stream {} already in view!".format(stream.name))
self.streams[stream.name] = stream.accumulator
self.positions[stream.name] = 0
self.keys.append(stream.name)
def throw(self, type=None, value=None, traceback=None):
raise StopIteration
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
# sleep condition
origin = self.key_position
while True:
# get current key
cur = self.keys[self.key_position]
# set key for next iteration
self.key_position = (self.key_position + 1) % len(self.keys)
# Can we read from current key?
if self.positions[cur] < len(self.streams[cur]):
# read next value
val = self.streams[cur][self.positions[cur]]
# Increment position
self.positions[cur] += 1
return val
elif self.key_position == origin:
if self.blocking:
# sleep after a full round on all keys produces no value
time.sleep(0.001)
else:
time.sleep(0.001)
return None
# implicit: continue
class TCPReceiver(StoppableThread):
"""
Listen on a (host,port) pair and write any incoming data to an accumulator.
If `port` is 0, an available port will be chosen by the operation system.
`get_connection_info` may be used to obtain the (host, port) pair after
`start()` is called.
`max_connections` specifies the number of total concurrent connections
supported.
`mode` specifices how the receiver handles parsing the network stream
into records. `'newlines'` will split on newlines, and `'framed'` will
use a length-encoded framing, along with the `header_fmt` value (default
mode is `'framed'` with `header_fmt='>I'`).
You can read any data saved to the accumulator (a list) at any time
by reading the `data` attribute of the receiver, although this attribute
is only guaranteed to stop growing after `stop()` has been called.
"""
__base_name__ = 'TCPReceiver'
def __init__(self, host, port=0, max_connections=1000, mode='framed',
split_streams=False, header_fmt='>I'):
"""
Listen on a (host, port) pair for up to max_connections connections.
Each connection is handled by a separate client thread.
"""
super(TCPReceiver, self).__init__()
self.host = host
self.port = port
self.address = '{}.{}'.format(host, port)
self.max_connections = max_connections
self.mode = mode
self.split_streams = split_streams
self.header_fmt = header_fmt
self.header_length = struct.calcsize(self.header_fmt)
# use an in-memory byte buffer
self.data = {}
# Create a socket and start listening
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.clients = []
self.err = None
self.event = threading.Event()
self.start_time = None
self.views = []
def __len__(self):
return sum(map(len, self.data.values()))
def bytes_received(self):
return sum( sum(map(len, acc)) for acc in d.values() )
def get_connection_info(self, timeout=10):
is_connected = self.event.wait(timeout)
if not is_connected:
raise TimeoutError("{} Couldn't get connection info after {}"
" seconds".format(self.__base_name__, timeout))
return self.sock.getsockname()
def run(self):
self.start_time = datetime.datetime.now()
try:
self.sock.bind((self.host, self.port))
self.sock.listen(self.max_connections)
self.host, self.port = self.sock.getsockname()
self.event.set()
while not self.stopped():
try:
(clientsocket, address) = self.sock.accept()
except Exception as err:
try:
if self.stopped():
break
else:
raise err
except OSError as err:
if err.errno == errno.ECONNABORTED:
# [ECONNABORTED] A connection arrived, but it was
# closed while waiting on the listen queue.
# This happens on macOS during normal
# harness shutdown.
return
else:
logging.error("socket accept errno {}"
.format(err.errno))
self.err = err
raise
if self.split_streams:
# Use a counter to identify unique streams
client_accumulator = self.data.setdefault(len(self.data),
[])
else:
# use * to identify the "everything" stream
client_accumulator = self.data.setdefault('*', [])
cl = SingleSocketReceiver(clientsocket,
client_accumulator,
self.mode,
self.header_fmt,
name='{}-{}'.format(
self.__base_name__,
len(self.clients)))
logging.debug("{}:{} accepting connection from ({}, {}) on "
"port {}."
.format(self.__base_name__, self.name, self.host,
self.port, address[1]))
self.clients.append(cl)
if self.views:
for v in self.views:
v.add_stream(cl)
cl.start()
except Exception as err:
self.err = err
raise
def stop(self, *args, **kwargs):
if not self.stopped():
super(TCPReceiver, self).stop(*args, **kwargs)
try:
self.sock.shutdown(socket.SHUT_RDWR)
except OSError as err:
if err.errno == errno.ENOTCONN:
# [ENOTCONN] Connection is already closed or unopened
# and can't be shutdown.
pass
else:
raise
self.sock.close()
for cl in self.clients:
cl.stop()
def view(self, blocking=True):
view = MultiClientStreamView(self.clients, blocking=blocking)
self.views.append(view)
return view
def save(self, path):
files = []
if self.split_streams:
# Save streams separately
for stream, data in self.data.items():
base, suffix = path.rsplit('.', 1)
new_path = '{}.{}.{}'.format(base, stream, suffix)
logging.debug("Saving stream {} to path {}".format(
stream, new_path))
with open(new_path, 'wb') as f:
files.append(new_path)
for item in data:
f.write(item)
f.flush()
else:
# only have stream '*' to save
logging.debug("Saving stream * to path {}".format(path))
with open(path, 'wb') as f:
files.append(path)
for item in self.data['*']:
f.write(item)
f.flush()
return files
class Metrics(TCPReceiver):
__base_name__ = 'Metrics'
class Sink(TCPReceiver):
__base_name__ = 'Sink'
class Sender(StoppableThread):
"""
Send length framed data to a destination (addr).
`address` is the full address in the host:port format
`reader` is a Reader instance
`batch_size` denotes how many records to send at once (default=1)
`interval` denotes the minimum delay between transmissions, in seconds
(default=0.001)
`header_length` denotes the byte length of the length header
`header_fmt` is the format to use for encoding the length using
`struct.pack`
`reconnect` is a boolean denoting whether sender should attempt to
reconnect after a connection is lost.
"""
def __init__(self, address, reader, batch_size=1, interval=0.001,
header_fmt='>I', reconnect=False):
logging.info("Sender({address}, {reader}, {batch_size}, {interval},"
" {header_fmt}, {reconnect}) created".format(
address=address, reader=reader, batch_size=batch_size,
interval=interval, header_fmt=header_fmt,
reconnect=reconnect))
super(Sender, self).__init__()
self.daemon = True
self.reader = reader
self.batch_size = batch_size
self.batch = []
self.interval = interval
self.header_fmt = header_fmt
self.header_length = struct.calcsize(self.header_fmt)
self.address = address
(host, port) = address.split(":")
self.host = host
self.port = int(port)
self.name = 'Sender'
self.error = None
self._bytes_sent = 0
self.reconnect = reconnect
self.pause_event = threading.Event()
self.data = []
self.start_time = None
def pause(self):
self.pause_event.set()
def paused(self):
return self.pause_event.is_set()
def resume(self):
self.pause_event.clear()
def send(self, bs):
try:
self.sock.sendall(bs)
except OSError as err:
if err.errno == 104 or err.errno == 54:
# ECONNRESET on Linux or macOS, respectively
is_econnreset = True
else:
is_econnreset = False
logging.info("socket errno {} ECONNRESET {} stopped {}"
.format(err.errno, is_econnreset, self.stopped()))
self.data.append(bs)
self._bytes_sent += len(bs)
def bytes_sent(self):
return self._bytes_sent
def batch_append(self, bs):
self.batch.append(bs)
def batch_send(self):
if len(self.batch) >= self.batch_size:
self.batch_send_final()
time.sleep(self.interval)
def batch_send_final(self):
if self.batch:
self.send(b''.join(self.batch))
self.batch = []
def run(self):
self.start_time = datetime.datetime.now()
while not self.stopped():
try:
logging.info("Sender connecting to ({}, {})."
.format(self.host, self.port))
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
while not self.stopped():
while self.paused():
# make sure to empty the send buffer before
#entering pause state!
self.batch_send_final()
time.sleep(0.001)
header = self.reader.read(self.header_length)
if not header:
self.maybe_stop()
break
expect = struct.unpack(self.header_fmt, header)[0]
body = self.reader.read(expect)
if not body:
self.maybe_stop()
break
self.batch_append(header + body)
self.batch_send()
time.sleep(0.000000001)
self.batch_send_final()
self.sock.close()
except KeyboardInterrupt:
logging.info("KeyboardInterrupt received.")
self.stop()
break
except Exception as err:
self.error = err
logging.error(err)
if not self.reconnect:
break
if not self.stopped():
logging.info("Waiting 1 second before retrying...")
time.sleep(1)
self.sock.close()
def maybe_stop(self):
if not self.batch:
self.stop()
def stop(self, *args, **kwargs):
if not self.stopped():
logging.log(INFO2, "Sender received stop instruction.")
super(Sender, self).stop(*args, **kwargs)
if self.batch:
logging.warning("Sender stopped, but send buffer size is {}"
.format(len(self.batch)))
def last_sent(self):
if isinstance(self.reader.gen, MultiSequenceGenerator):
return self.reader.gen.last_sent()
else:
raise ValueError("Can only use last_sent on a sender with "
"a MultiSequenceGenerator, or an ALOSender.")
class NoNonzeroError(ValueError):
pass
def first_nonzero_index(seq):
idx = 0
for item in seq:
if item == 0:
idx += 1
else:
return idx
else:
raise NoNonzeroError("No nonzero values found in list")
class Sequence(object):
def __init__(self, index, val=0):
self.index = '{index:07d}'.format(index = index)
self.key = self.index.encode()
self.val = val
def __next__(self):
self.val += 1
return (self.key, self.val)
def __iter__(self):
return self
def next(self):
return self.__next__()
def throw(self, type=None, value=None, traceback=None):
raise StopIteration
class MultiSequenceGenerator(object):
"""
A growable collection of sequence generators.
- Each new generator has its own partition
- Messages are emitted in a round-robin fashion over the generator list
- When a new generator joins, it takes over all new messages until it
catches up
- At stoppage time, all generators are allowed to reach the same final
value
"""
def __init__(self, base_index=0, initial_partitions=1, base_value=0):
self._base_value = base_value
self._next_index = base_index + initial_partitions
self.seqs = [Sequence(x, self._base_value)
for x in range(base_index, self._next_index)]
# self.seqs stores the last value sent for each sequence
self._idx = 0 # the idx of the last sequence sent
self._remaining = []
self.lock = threading.Lock()
def format_value(self, value, partition):
return struct.pack('>IQ{}s'.format(len(partition)), 8+len(partition),
value, partition)
def _next_value_(self):
# Normal operation next value: round robin through the sets
if self._idx >= len(self.seqs):
self._idx = 0
next_seq = self.seqs[self._idx]
self._idx += 1
return next(next_seq)
def _next_catchup_value(self):
# After stop() was called: all sets catch up to current max
try:
idx = first_nonzero_index(self._remaining)
next_seq = self.seqs[idx]
self._remaining[idx] -= 1
return next(next_seq)
except NoNonzeroError:
# reset self._remaining so it can be reused
if not self.max_val:
self._remaining = []
logging.debug("MultiSequenceGenerator: Stop condition "
"reached. Final values are: {}".format(
self.seqs))
self.throw()
def add_sequence(self):
if not self._remaining:
logging.debug("MultiSequenceGenerator: adding new sequence")
self.seqs.append(Sequence(self._next_index, self._base_value))
self._next_index += 1
def stop(self):
logging.info("MultiSequenceGenerator: stop called")
logging.debug("seqs are: {}".format(self.seqs))
with self.lock:
self.max_val = max([seq.val for seq in self.seqs])
self._remaining = [self.max_val - seq.val for seq in self.seqs]
logging.debug("_remaining: {}".format(self._remaining))
def last_sent(self):
return [('{}'.format(key), val) for (key,val) in
[(seq.index, seq.val) for seq in self.seqs]]
def send(self, ignored_arg):
with self.lock:
if self._remaining:
idx, val = self._next_catchup_value()
else:
idx, val = self._next_value_()
return self.format_value(val, idx)
def throw(self, type=None, value=None, traceback=None):
raise StopIteration
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
return self.send(None)
def close(self):
"""Raise GeneratorExit inside generator.
"""
try:
self.throw(GeneratorExit)
except (GeneratorExit, StopIteration):
pass
else:
raise RuntimeError("generator ignored GeneratorExit")
def sequence_generator(stop=1000, start=0, header_fmt='>I', partition=''):
"""
Generate a sequence of integers, encoded as big-endian U64.
`stop` denotes the maximum value of the sequence (inclusive)
`start` denotes the starting value of the sequence (exclusive)
`header_length` denotes the byte length of the length header
`header_fmt` is the format to use for encoding the length using
`struct.pack`
`partition` is a string representing the optional partition key. It is
empty by default.
"""
partition = partition.encode()
size = 8 + len(partition)
fmt = '>Q{}s'.format(len(partition)) if partition else '>Q'
for x in range(start+1, stop+1):
yield struct.pack(header_fmt, size)
if partition:
yield struct.pack(fmt, x, partition)
else:
yield struct.pack(fmt, x)
def iter_generator(items,
to_bytes=lambda s: s.encode()
if isinstance(s, basestring) else str(s).encode(),
header_fmt='>I',
on_next=None):
"""
Generate a sequence of length encoded binary records from an iterator.
`items` is the iterator of items to encode
`to_bytes` is a function for converting items to a bytes
(default:`lambda s: s.encode() if isinstance(s, basestring) else
str(s).encode()`)
`header_fmt` is the format to use for encoding the length using
`struct.pack`
"""
for val in items:
if on_next:
on_next(val)
bs = to_bytes(val)
yield struct.pack(header_fmt, len(bs))
yield bs
def files_generator(files, mode='framed', header_fmt='>I', on_next=None):
"""
Generate a sequence of binary data stubs from a set of files.
- `files`: either a single filepath or a list of filepaths.
The same filepath may be provided multiple times, in which case it will
be read that many times from start to finish.
- `mode`: 'framed' or 'newlines'. If 'framed' is used, `header_fmt` is
used to determine how many bytes to read each time. Default: 'framed'
- `header_fmt`: the format of the length encoding header used in the files
Default: '>I'
"""
if isinstance(files, basestring):
files = [files]
for path in files:
if mode == 'newlines':
for l in newline_file_generator(path):
if on_next:
on_next(l)
yield l
elif mode == 'framed':
for l in framed_file_generator(path, header_fmt):
if on_next:
on_next(l)
yield l
else:
raise ValueError("`mode` must be either 'framed' or 'newlines'")
def newline_file_generator(filepath, header_fmt='>I', on_next=None):
"""
Generate length-encoded strings from a newline-delimited file.
"""
with open(filepath, 'rb') as f:
f.seek(0, 2)
fin = f.tell()
f.seek(0)
while f.tell() < fin:
o = f.readline().strip(b'\n')
if o:
if on_next:
on_next(o)
yield struct.pack(header_fmt, len(o))
yield o
def framed_file_generator(filepath, header_fmt='>I', on_next=None):
"""
Generate length encoded records from a length-framed binary file.
"""
header_length = struct.calcsize(header_fmt)
with open(filepath, 'rb') as f:
while True:
header = f.read(header_length)
if not header:
break
expect = struct.unpack(header_fmt, header)[0]
body = f.read(expect)
if not body:
break
if on_next:
on_next(header + body)
yield header
yield body
class Reader(object):
"""
A BufferedReader interface over a bytes generator
"""
def __init__(self, generator):
self.gen = generator
self.overflow = b''
def read(self, num):
remaining = num
out = io.BufferedWriter(io.BytesIO())
remaining -= out.write(self.overflow)
while remaining > 0:
try:
remaining -= out.write(next(self.gen))
except StopIteration:
break
# first num bytes go to return, remainder to overflow
out.seek(0)
r = out.raw.read(num)
self.overflow = out.raw.read()
return r
class ALOSequenceGenerator(BaseIter, BaseSource):
"""
A sequence generator with a resettable position.
Starts at 1, and stops aftering sending `stop`.
Usage: `ALOSequenceGenerator(partition, stop=1000, data=None)`
if `data` is a list, data generated is appended to it in order
as (position, value) tuples.
"""
def __init__(self, key, stop=None, start=0):
self.partition = key
self.name = key.encode()
self.key = key.encode()
self.position = start
self._stop = stop
self.start = start
self.stopped = False
self.paused = False
def __str__(self):
return ("ALOSequenceGenerator(partition: {}, stopped: {}, point_of_ref: {})"
.format(self.name, self.stopped, self.point_of_ref()))
def point_of_ref(self):
return self.position
def reset(self, pos=None):
if pos is None:
pos = self.start
self.position = pos
def __next__(self):
# This has to be before the increment, otherwise point_of_ref()
# doesn't return the previous position!
if self.stopped:
raise StopIteration
if self._stop is not None:
if self.position >= self._stop:
raise StopIteration
if self.paused:
return (None, self.position)
self.position += 1
val, pos, key = (self.position, self.position, self.key)
payload = struct.pack('>Q{}s'.format(len(key)), val, key)
return (payload, pos)
def close(self):
self.closed = True
def stop(self):
self.stopped = True
def pause(self):
self.paused = True
def resume(self):
self.paused = False
class ALOSender(StoppableThread):
"""
A wrapper for MultiSourceConnector to look like a regular TCP Sender
"""
def __init__(self, sources, version, cookie, program_name, instance_name,
addr):
super(ALOSender, self).__init__()
host, port = addr.split(':')
port = int(port)
self.client = client = MultiSourceConnector(
version,
cookie,
program_name,
instance_name,
host, port)
self.name = "ALOSender_{}".format("-".join(
[source.partition for source in sources]))
self.sources = sources
logging.debug("ALO: sources = {}".format(sources))
self.data = []
self.client.data = self.data
for source in self.sources:
source.data = self.data
self.host = host
self.port = port
self.start_time = None
self.error = None
self.batch = [] # for compatibility with Sender during validations
def run(self):
self.start_time = datetime.datetime.now()
self.client.connect()
for source in self.sources:
self.client.add_source(source)
self.error = self.client.join()
def stop(self, error=None):
logging.debug("ALOSender stop")
for source in self.sources:
logging.debug("source to stop: {}".format(source))
source.stop()
if error is not None:
self.client.shutdown(error=error)
def pause(self):
logging.debug("ALOSender pause: pausing {} sources"
.format(len(self.sources)))
for source in self.sources:
source.pause()
def resume(self):
logging.debug("ALOSender resume: resuming {} sources"
.format(len(self.sources)))
for source in self.sources:
source.resume()
def last_sent(self):
return [(source.partition, source.position) for source in self.sources]
|
py | 1a4d0f443ab4af01fca9b80a22514075b7d2d9a8 | import unittest
from src.api import Settings
class SettingsTestCase(unittest.TestCase):
"""Tests the Settings class."""
def setUp(self):
self.settings = Settings(800, 600, 60, "3D Engine", use_antialiasing=False)
def test_keyword_arguments(self):
"""Check that the keyword arguments are being parsed correctly."""
self.assertTrue(hasattr(self.settings, 'use_antialiasing'))
def test_as_dict(self):
"""Check that the as_dict() method is working correctly."""
self.assertEqual(self.settings.as_dict(), self.settings.__dict__)
def tearDown(self):
del self.settings
if __name__ == '__main__':
unittest.main()
|
py | 1a4d104171184cbb22ce921680fcc95e82955c20 | #!/usr/bin/env python
#
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Update the prebuilt clang from the build server."""
from __future__ import print_function
import argparse
import inspect
import os
import shutil
import subprocess
import sys
THIS_DIR = os.path.realpath(os.path.dirname(__name__))
ANDROID_DIR = os.path.realpath(os.path.join(THIS_DIR, '../..'))
BRANCH = 'aosp-llvm'
def android_path(*args):
return os.path.join(ANDROID_DIR, *args)
class ArgParser(argparse.ArgumentParser):
def __init__(self):
super(ArgParser, self).__init__(
description=inspect.getdoc(sys.modules[__name__]))
self.add_argument(
'build', metavar='BUILD',
help='Build number to pull from the build server.')
self.add_argument(
'-b', '--bug', type=int,
help='Bug to reference in commit message.')
self.add_argument(
'--use-current-branch', action='store_true',
help='Do not repo start a new branch for the update.')
def host_to_build_host(host):
"""Gets the build host name for an NDK host tag.
The Windows builds are done from Linux.
"""
return {
'darwin': 'mac',
'linux': 'linux',
'windows': 'linux',
}[host]
def build_name(host):
"""Gets the build name for a given host.
The build name is either "linux" or "darwin", with any Windows builds
coming from "linux".
"""
return {
'darwin': 'darwin',
'linux': 'linux',
'windows': 'linux',
}[host]
def package_name(build_number, host):
"""Returns the file name for a given package configuration.
>>> package_name('1234', 'linux')
'clang-1234-linux-x86.tar.bz2'
"""
return 'clang-{}-{}-x86.tar.bz2'.format(build_number, host)
def download_build(host, build_number, download_dir):
url_base = 'https://android-build-uber.corp.google.com'
path = 'builds/{branch}-{build_host}-{build_name}/{build_num}'.format(
branch=BRANCH,
build_host=host_to_build_host(host),
build_name=build_name(host),
build_num=build_number)
pkg_name = package_name(build_number, host)
url = '{}/{}/{}'.format(url_base, path, pkg_name)
TIMEOUT = '60' # In seconds.
out_file_path = os.path.join(download_dir, pkg_name)
with open(out_file_path, 'w') as out_file:
print('Downloading {} to {}'.format(url, out_file_path))
subprocess.check_call(
['sso_client', '--location', '--request_timeout', TIMEOUT, url],
stdout=out_file)
return out_file_path
def extract_package(package, install_dir):
cmd = ['tar', 'xf', package, '-C', install_dir]
print('Extracting {}...'.format(package))
subprocess.check_call(cmd)
def update_clang(host, build_number, use_current_branch, download_dir, bug):
host_tag = host + '-x86'
prebuilt_dir = android_path('prebuilts/clang/host', host_tag)
os.chdir(prebuilt_dir)
if not use_current_branch:
subprocess.check_call(
['repo', 'start', 'update-clang-{}'.format(build_number), '.'])
package = download_build(host, build_number, download_dir)
install_subdir = 'clang-' + build_number
extract_package(package, prebuilt_dir)
print('Adding files to index...')
subprocess.check_call(['git', 'add', install_subdir])
version_file_path = os.path.join(install_subdir, 'AndroidVersion.txt')
with open(version_file_path) as version_file:
version = version_file.read().strip()
print('Committing update...')
message_lines = [
'Update prebuilt Clang to build {}.'.format(build_number),
'',
'Built from version {}.'.format(version),
]
if bug is not None:
message_lines.append('')
message_lines.append('Bug: http://b/{}'.format(bug))
message = '\n'.join(message_lines)
subprocess.check_call(['git', 'commit', '-m', message])
def main():
args = ArgParser().parse_args()
download_dir = os.path.realpath('.download')
if os.path.isdir(download_dir):
shutil.rmtree(download_dir)
os.makedirs(download_dir)
try:
hosts = ('darwin', 'linux', 'windows')
for host in hosts:
update_clang(host, args.build, args.use_current_branch,
download_dir, args.bug)
finally:
shutil.rmtree(download_dir)
if __name__ == '__main__':
main()
|
py | 1a4d108f9d785fceb66f992314037080555a9d09 | """
WSGI config for coolnsight project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'coolnsight.settings')
application = get_wsgi_application()
|
py | 1a4d11adcfa468423a2e345aa7aa2cfa0f4a862e | # Generated by Django 3.2.6 on 2021-09-05 17:33
import django.contrib.postgres.fields
from django.db import migrations, models
import django.db.models.deletion
import jobs.models
import moni.utils.funcs
class Migration(migrations.Migration):
initial = True
dependencies = [
('notifiers', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Jobs',
fields=[
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('uuid', models.CharField(default=moni.utils.funcs.get_str_uuid, max_length=40, primary_key=True, serialize=False)),
('url', models.URLField(unique=True)),
('title', models.CharField(max_length=50)),
('state', models.BooleanField(default=True)),
('headers', models.JSONField(default=dict)),
('verify_ssl', models.BooleanField(default=True)),
('interval', models.PositiveIntegerField(default=15)),
('success_status', django.contrib.postgres.fields.ArrayField(base_field=models.PositiveIntegerField(), default=jobs.models.default_success_status, size=None)),
('check_redirect', models.BooleanField(default=True)),
('notifiers', models.ManyToManyField(db_column='uuid', related_name='jobs_notification', to='notifiers.Notifiers')),
],
options={
'verbose_name': 'Jobs',
'verbose_name_plural': 'Jobs',
},
),
migrations.CreateModel(
name='JobsHistory',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.DateTimeField(auto_now_add=True)),
('status_code', models.IntegerField(null=True)),
('success', models.BooleanField()),
('response_time', models.FloatField(null=True)),
('error', models.TextField(null=True)),
('uuid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='jobs_history_uuid', to='jobs.jobs')),
],
options={
'verbose_name': 'Jobs History',
'verbose_name_plural': 'Jobs History',
},
),
migrations.AddIndex(
model_name='jobshistory',
index=models.Index(fields=['uuid'], name='jobs_jobshi_uuid_id_a30d59_idx'),
),
migrations.AddIndex(
model_name='jobshistory',
index=models.Index(fields=['-timestamp', 'uuid'], name='jobs_jobshi_timesta_69c060_idx'),
),
migrations.AddIndex(
model_name='jobs',
index=models.Index(fields=['uuid'], name='jobs_jobs_uuid_076788_idx'),
),
]
|
py | 1a4d12fec3732bb42630c32ad0c46b4314b7f06c | # Generated by Django 2.1.5 on 2019-03-27 16:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0002_remove_profile_following'),
]
operations = [
migrations.AddField(
model_name='profile',
name='activation_key',
field=models.CharField(blank=True, max_length=120, null=True),
),
]
|
py | 1a4d134fad33f577b3b10b20383a5a8013736124 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
# Register your models here.
from models import Attraction, Country
@admin.register(Attraction)
class AttractionAdmin(admin.ModelAdmin):
list_display = ('name', 'score', 'country', 'review_summary')
admin.site.register(Country)
|
py | 1a4d13fb4e7b60bb88d700ac0a48cfd83cd5232f | from ..base import ShopifyResource
from ..resources import Metafield
from six.moves import urllib
import base64
import re
class Image(ShopifyResource):
_prefix_source = "/products/$product_id/"
@classmethod
def _prefix(cls, options={}):
product_id = options.get("product_id")
if product_id:
return "%s/products/%s" % (cls.site, product_id)
else:
return cls.site
def __getattr__(self, name):
if name in ["pico", "icon", "thumb", "small", "compact", "medium", "large", "grande", "original"]:
return re.sub(r"/(.*)\.(\w{2,4})", r"/\1_%s.\2" % (name), self.src)
else:
return super(Image, self).__getattr__(name)
def attach_image(self, data, filename=None):
self.attributes["attachment"] = base64.b64encode(data).decode()
if filename:
self.attributes["filename"] = filename
def metafields(self):
if self.is_new():
return []
query_params = {"metafield[owner_id]": self.id, "metafield[owner_resource]": "product_image"}
return Metafield.find(
from_="%s/metafields.json?%s" % (ShopifyResource.site, urllib.parse.urlencode(query_params))
)
def save(self):
if "product_id" not in self._prefix_options:
self._prefix_options["product_id"] = self.product_id
return super(ShopifyResource, self).save()
|
py | 1a4d1495aec7fb4bf4759ff5e580d3b892d1755c | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
CNN completely definable via command line arguments.
Provides create().
Author: Jan Schlüter
"""
import re
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import PickDictKey, PutDictKey, ReceptiveField
from .layers import (nonlinearity, SpatialLogMeanExp, Shift, Crop, Squeeze,
ShakeShake)
class Cat(nn.ModuleList):
"""
Modules applied to the same input and concatenated along the channels.
"""
def forward(self, x):
return torch.cat([module(x) for module in self], dim=1)
class Add(nn.ModuleList):
"""
Modules applied to the same input and then added up.
"""
def forward(self, x):
modules = iter(self)
first = next(modules)
return sum((module(x) for module in modules), first(x))
class Mul(nn.ModuleList):
"""
Modules applied to the same input and then multiplied.
"""
def forward(self, x):
modules = iter(self)
y = next(modules)(x)
for module in modules:
y = y * module(x)
return y
def custom_cnn(input_channels, specification, input_name='input',
output_name='output', default_nonlin='relu', batch_norm=False):
"""
Creates a CNN for the given number of input channels, with an architecture
defined as a comma-separated string of layer definitions. Supported layer
definitions are (with variables in <>, and optional parts in []):
- pad1d:<method>@<size>
- pad2d:<method>@<size>
- crop1d:<size>
- crop2d:<size>
- conv1d:<channels>@<size>[s<stride>][p<pad>][d<dilation>][g<groups>]
- conv2d:<channels>@<size0>x<size1>[s<stride>][p<pad>][d<dilation>][g<groups>]
- pool1d:<method>@<size>[s<stride>][p<pad>][d<dilation]
- pool2d:<method>@<size0>x<size1>[s<stride>][p<pad>][d<dilation>]
- globalpool1d:<method>
- globalpool2d:<method>
- globallmepool:<alpha>[t<trainable>][c<channelwise>][e<exponentiated>]
- bn1d
- bn2d
- groupnorm:<groups>
- dropout:<drop_probability>
- relu
- lrelu
- sigm
- swish
- mish
- bipol:<nonlin>
- shift:<amount>
- bypass (does nothing)
- squeeze:<dim>
- cat[layers1|layers2|...] (apply stacks to same input, then concat)
- add[layers1|layers2|...] (apply stacks to same input, then add)
- shake[layers1|layers2|...] (apply stacks to same input, then shake-shake)
If there is a batch normalization one or two layers after a convolution,
the convolution will not have a bias term.
"""
def read_layers(s):
"""
Yields all layer definitions (as separated by , | [ or ]) as tuples
of the definition string and the following delimiter.
"""
pos = 0
for match in re.finditer(r'[,|[\]]', s):
yield s[pos:match.start()], s[match.start():match.end()]
pos = match.end()
yield s[pos:], None
def read_size(s, t=int, expect_remainder=True):
"""
Read and parse a size (e.g., 1, 1x1, 1x1x1) at the beginning of `s`,
with elements of type `t`. If `expect_remainder`, returns the
remainder, otherwise tries to parse the complete `s` as a size.
"""
if expect_remainder:
# yes, we could use a precompiled regular expression...
p = next((i for i, c in enumerate(s) if c not in '0123456789x'),
len(s))
remainder = s[p:]
s = s[:p]
size = tuple(map(t, s.split('x')))
if len(size) == 1:
size = size[0]
if expect_remainder:
return size, remainder
else:
return size
def size_string(size):
"""
Convert a size integer or tuple back into its string form.
"""
try:
return 'x'.join(map(str, size))
except TypeError:
return str(size)
def read_extra_sizes(s, prefixes, t=int):
"""
Read and parse any extra size definitions prefixed by any of the
allowed prefixes, and returns them as a dictionary. If `prefixes` is
a dictionary, the prefixes (keys) will be translated to the expanded
names (values) in the returned dictionary. Values will be converted
from strings to `t`.
"""
if not isinstance(prefixes, dict):
prefixes = {prefix: prefix for prefix in prefixes}
result = {}
while s:
for prefix, return_key in prefixes.items():
if s.startswith(prefix):
size, s = read_size(s[len(prefix):], t)
result[return_key] = size
break
else:
raise ValueError("unrecognized part in layer definition: "
"%r" % s)
return result
stack = []
layers = []
if input_name:
layers = [PickDictKey(input_name)]
# track receptive field for the full network
receptive_field = ReceptiveField()
# split specification string into definition, delimiter tuples
specification = list(read_layers(specification))
# iterate over it (in a way that allows us to expand macro definitions)
while specification:
layer_def, delim = specification.pop(0)
layer_def = layer_def.split(':')
kind = layer_def[0]
if kind in ('pad1d', 'pad2d'):
method, size = layer_def[1].split('@')
size = read_size(size, expect_remainder=False)
cls = {'reflectpad1d': nn.ReflectionPad1d,
'reflectpad2d': nn.ReflectionPad2d}[method + kind]
layers.append(cls(size))
receptive_field *= ReceptiveField(padding=size)
elif kind in ('crop1d', 'crop2d'):
size = int(layer_def[1])
dimensionality = int(kind[-2])
layers.append(Crop(dimensionality, size))
receptive_field *= ReceptiveField(padding=-size)
elif kind in ('conv1d', 'conv2d'):
channels, remainder = layer_def[1].split('@')
channels = int(channels)
size, remainder = read_size(remainder)
params = dict(stride=1, padding=0, dilation=1, groups=1)
params.update(read_extra_sizes(
remainder, dict(s='stride', p='padding', d='dilation',
g='groups')))
cls = {'conv1d': nn.Conv1d, 'conv2d': nn.Conv2d}[kind]
layers.append(cls(input_channels, channels, size, **params))
input_channels = channels
# effective kernel size:
size = (np.array(size) - 1) * params['dilation'] + 1
receptive_field *= ReceptiveField(size, params['stride'],
params['padding'])
elif kind in ('pool1d', 'pool2d'):
method, size = layer_def[1].split('@')
size, remainder = read_size(size)
params = dict(stride=None, padding=0, dilation=1)
params.update(read_extra_sizes(
remainder, dict(s='stride', p='padding', d='dilation')))
cls = {'maxpool1d': nn.MaxPool1d, 'meanpool1d': nn.AvgPool1d,
'maxpool2d': nn.MaxPool2d, 'meanpool2d': nn.AvgPool2d}[method + kind]
layers.append(cls(size, **params))
# effective kernel size:
size = (np.array(size) - 1) * params['dilation'] + 1
if params['stride'] is None:
params['stride'] = size
receptive_field *= ReceptiveField(size, params['stride'],
params['padding'])
elif kind in ('globalpool1d', 'globalpool2d'):
method = layer_def[1]
cls = {'maxglobalpool1d': nn.AdaptiveMaxPool1d,
'meanglobalpool1d': nn.AdaptiveAvgPool1d,
'maxglobalpool2d': nn.AdaptiveMaxPool2d,
'meanglobalpool2d': nn.AdaptiveAvgPool2d}[method + kind]
layers.append(cls(output_size=1))
# we do not adjust the receptive field; it spans the whole input
elif kind == 'globallmepool':
alpha, remainder = read_size(layer_def[1], float)
params = read_extra_sizes(
remainder, dict(t='trainable', c='per_channel', e='exp'),
t=lambda s: bool(int(s)))
layers.append(SpatialLogMeanExp(alpha, in_channels=input_channels,
keepdim=True, **params))
# we do not adjust the receptive field; it spans the whole input
elif kind == 'bn1d':
if len(layers) >= 1 and hasattr(layers[-1], 'bias'):
layers[-1].register_parameter('bias', None)
elif len(layers) >=2 and hasattr(layers[-2], 'bias'):
layers[-2].register_parameter('bias', None)
layers.append(nn.BatchNorm1d(input_channels))
elif kind == 'bn2d':
if len(layers) >= 1 and hasattr(layers[-1], 'bias'):
layers[-1].register_parameter('bias', None)
elif len(layers) >= 2 and hasattr(layers[-2], 'bias'):
layers[-2].register_parameter('bias', None)
layers.append(nn.BatchNorm2d(input_channels))
elif kind == 'groupnorm':
groups = int(layer_def[1])
layers.append(nn.GroupNorm(groups, input_channels))
elif kind == 'dropout':
p = float(layer_def[1])
layers.append(nn.Dropout(p))
elif kind == 'squeeze':
dim = int(layer_def[1])
layers.append(Squeeze(dim))
elif kind == 'shift':
amount = float(layer_def[1])
layers.append(Shift(amount))
elif kind == 'bypass':
layers.append(nn.Identity())
elif kind == 'cat':
stack.append((layers, input_channels, receptive_field))
stack.append((Cat(), input_channels, receptive_field))
layers = []
receptive_field = ReceptiveField()
elif kind == 'add':
stack.append((layers, input_channels, receptive_field))
stack.append((Add(), input_channels, receptive_field))
layers = []
receptive_field = ReceptiveField()
elif kind == 'mul':
stack.append((layers, input_channels, receptive_field))
stack.append((Mul(), input_channels, receptive_field))
layers = []
receptive_field = ReceptiveField()
elif kind == 'shake':
stack.append((layers, input_channels, receptive_field))
stack.append((ShakeShake(), input_channels, receptive_field))
layers = []
receptive_field = ReceptiveField()
elif kind == '':
pass
elif kind == 'mbconv2d':
# mobile inverted bottleneck convolution layer from MobileNetV2
channels, remainder = layer_def[1].split('@')
channels = int(channels)
size, remainder = read_size(remainder)
params = dict(stride=1, dilation=1, groups=1, expansion=1,
size=size, channels=channels)
params.update(read_extra_sizes(
remainder, dict(s="stride", d="dilation", g="groups",
e="expansion")))
hidden_channels = int(input_channels * params['expansion'])
# define layers
macro = []
# 1x1 channel expansion
if hidden_channels != input_channels:
macro.append('conv2d:%d@1x1g%d' %
(hidden_channels, params['groups']))
if batch_norm:
macro.append('bn2d')
macro.append(default_nonlin)
# channelwise convolution
macro.append('conv2d:%d@%ss%sd%sg%d' %
(hidden_channels, size_string(size),
size_string(params['stride']),
size_string(params['dilation']),
hidden_channels))
if batch_norm:
macro.append('bn2d')
macro.append(default_nonlin)
# linear projection
macro.append('conv2d:%d@1x1g%d' % (channels, params['groups']))
# residual shortcut, if applicable
macro = ','.join(macro)
if params['stride'] == 1 and channels == input_channels:
crop = ((np.array(size) - 1) * params['dilation'] + 1) // 2
macro = 'add[%s|%s]' % ('crop2d:%d' % crop[0], macro)
# push to beginning of remaining layer specifications
specification[:0] = read_layers(macro)
elif kind == 'bipol':
layers.append(nonlinearity('bipol:' + layer_def[1]))
else:
try:
layers.append(nonlinearity(kind))
except KeyError:
raise ValueError('Unknown layer type "%s"' % kind)
if delim is not None and delim in '|]':
if isinstance(layers, list):
layers = nn.Sequential(*layers) if len(layers) > 1 else layers[0]
layers.receptive_field = receptive_field
layers.out_channels = input_channels
# append layers to Cat() or Add()
stack[-1][0].append(layers)
if delim == '|':
# reset input_channels to match input of Cat() or Add()
input_channels = stack[-1][1]
# we expect another set of layers
layers = []
receptive_field = ReceptiveField()
elif delim == ']':
# take the Cat() or Add() from the stack
layers, _, receptive_field = stack.pop()
# append it to what we were building before
stack[-1][0].append(layers)
# and continue there
if isinstance(layers, Cat):
input_channels = sum(path.out_channels for path in layers)
receptive_field *= sum(path.receptive_field for path in layers)
layers, _, _ = stack.pop()
if stack:
raise ValueError('There seems to be a missing "]" bracket.')
if output_name:
layers.append(PutDictKey(output_name))
if isinstance(layers, list):
layers = nn.Sequential(*layers)
layers.receptive_field = receptive_field
layers.out_channels = input_channels
return layers
def create(cfg, shapes, dtypes, num_classes):
"""
Instantiates a Model for the given data shapes and dtypes.
"""
input_channels = shapes['input'][0]
specification = cfg['model.arch']
num_outputs = 1 if num_classes == 2 else num_classes
specification = specification.replace('C', str(num_outputs))
input_name = cfg.get('model.input_name', 'input')
output_name = cfg.get('model.output_name', 'output')
return custom_cnn(input_channels, specification, input_name, output_name,
default_nonlin=cfg.get('model.nonlin', 'relu'),
batch_norm=cfg.get('model.batch_norm', False))
|
py | 1a4d157fb730eee97988d538eacd6b215cf22307 | from pprint import pprint
from atlassian import Crowd
import os
import logging
log = logging.getLogger()
log.setLevel(logging.DEBUG)
CROWD_URL = os.environ.get('CROWD_URL', 'http://localhost:8085/crowd')
CROWD_APPLICATION = os.environ.get('CROWD_APPLICATION', 'bamboo')
CROWD_APPLICATION_PASSWORD = os.environ.get('CROWD_APPLICATION_PASSWORD', 'admin')
crowd = Crowd(
url=CROWD_URL,
username=CROWD_APPLICATION,
password=CROWD_APPLICATION_PASSWORD)
user_details = crowd.user('xdfjklm')
pprint(user_details)
|
py | 1a4d15de4d08039d3270dd279ad43f4d22796214 | import os
def run(**args):
print('[*] In dir_lister module.')
files = os.listdir('.')
return str(files)
|
py | 1a4d15e1a348f8d3b2aeac23a8fe9068a9544e10 |
# coding: utf-8
# In[1]:
import autograd.numpy as np
import autograd.numpy.random as npr
npr.seed(0)
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
from matplotlib.gridspec import GridSpec
import seaborn as sns
sns.set_style("white")
sns.set_context("talk")
color_names = ["windows blue",
"red",
"amber",
"faded green",
"dusty purple",
"orange",
"clay",
"pink",
"greyish",
"mint",
"light cyan",
"steel blue",
"forest green",
"pastel purple",
"salmon",
"dark brown"]
colors = sns.xkcd_palette(color_names)
import ssm
from ssm.variational import SLDSMeanFieldVariationalPosterior, SLDSTriDiagVariationalPosterior
from ssm.util import random_rotation, find_permutation
# Specify whether or not to save figures
save_figures = True
# In[2]:
# Set the parameters of the HMM
T = 200 # number of time bins
K = 5 # number of discrete states
D = 2 # number of latent dimensions
N = 10 # number of observed dimensions
# In[3]:
# Make an LDS with the somewhat interesting dynamics parameters
true_lds = ssm.LDS(N, D, emissions="gaussian")
A0 = .99 * random_rotation(D, theta=np.pi/20)
# S = (1 + 3 * npr.rand(D))
S = np.arange(1, D+1)
R = np.linalg.svd(npr.randn(D, D))[0] * S
A = R.dot(A0).dot(np.linalg.inv(R))
b = npr.randn(D)
true_lds.dynamics.As[0] = A
true_lds.dynamics.bs[0] = b
_, x, y = true_lds.sample(T)
# In[4]:
# Plot the dynamics vector field
xmins = x.min(axis=0)
xmaxs = x.max(axis=0)
npts = 20
true_lds.dynamics.As[0] = A
XX, YY = np.meshgrid(np.linspace(xmins[0], xmaxs[0], npts), np.linspace(xmins[1], xmaxs[1], npts))
XY = np.column_stack((XX.ravel(), YY.ravel(), np.zeros((npts**2, D-2))))
dx = XY.dot(A.T) + b - XY
plt.figure(figsize=(6, 6))
plt.quiver(XX, YY, dx[:,0], dx[:,1], color=colors[0])
plt.plot(x[:,0], x[:,1], '-k', lw=3)
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.title("Simulated Latent States")
plt.tight_layout()
if save_figures:
plt.savefig("lds_1.pdf")
# In[5]:
# Plot the dynamics vector field
plt.figure(figsize=(8, 6))
gs = GridSpec(2, 1, height_ratios=(1, N/D))
# Plot the continuous latent states
lim = abs(x).max()
plt.subplot(gs[0])
for d in range(D):
plt.plot(x[:, d] + lim * d, '-k')
plt.yticks(np.arange(D) * lim, ["$x_{}$".format(d+1) for d in range(D)])
plt.xticks([])
plt.xlim(0, T)
plt.title("Simulated Latent States")
lim = abs(y).max()
plt.subplot(gs[1])
for n in range(N):
plt.plot(y[:, n] - lim * n, '-k')
plt.yticks(-np.arange(N) * lim, ["$y_{{ {} }}$".format(n+1) for n in range(N)])
plt.xlabel("time")
plt.xlim(0, T)
plt.title("Simulated Observations")
plt.tight_layout()
if save_figures:
plt.savefig("lds_2.pdf")
# In[6]:
print("Fitting LDS with SVI")
# Create the model and initialize its parameters
lds = ssm.LDS(N, D, emissions="gaussian_orthog")
lds.initialize(y)
# Create a variational posterior
q_mf = SLDSMeanFieldVariationalPosterior(lds, y)
q_mf_elbos = lds.fit(q_mf, y, num_iters=1000, initialize=False)
# Get the posterior mean of the continuous states
q_mf_x = q_mf.mean[0]
# In[7]:
# Smooth the data under the variational posterior
q_mf_y = lds.smooth(q_mf_x, y)
# In[8]:
print("Fitting LDS with SVI using structured variational posterior")
lds = ssm.LDS(N, D, emissions="gaussian_orthog")
lds.initialize(y)
q_struct = SLDSTriDiagVariationalPosterior(lds, y)
q_struct_elbos = lds.fit(q_struct, y, num_iters=1000, initialize=False)
# Get the posterior mean of the continuous states
q_struct_x = q_struct.mean[0]
# Smooth the data under the variational posterior
q_struct_y = lds.smooth(q_struct_x, y)
# In[9]:
# Plot the ELBOs
plt.plot(q_mf_elbos, label="MF")
plt.plot(q_struct_elbos, label="LDS")
plt.xlabel("Iteration")
plt.ylabel("ELBO")
plt.legend()
# In[10]:
plt.figure(figsize=(8,4))
plt.plot(x + 4 * np.arange(D), '-k')
for d in range(D):
plt.plot(q_mf_x[:,d] + 4 * d, '-', color=colors[0], label="MF" if d==0 else None)
plt.plot(q_struct_x[:,d] + 4 * d, '-', color=colors[1], label="Struct" if d==0 else None)
plt.ylabel("$x$")
plt.legend()
# In[11]:
# Plot the smoothed observations
plt.figure(figsize=(8,4))
for n in range(N):
plt.plot(y[:, n] + 4 * n, '-k', label="True" if n == 0 else None)
plt.plot(q_mf_y[:, n] + 4 * n, '--', color=colors[0], label="MF" if n == 0 else None)
plt.plot(q_struct_y[:, n] + 4 * n, ':', color=colors[1], label="Struct" if n == 0 else None)
plt.legend()
plt.xlabel("time")
# # Fit an HMM to the LDS states
# In[13]:
from ssm.models import HMM
N_iters = 50
K = 15
hmm = ssm.HMM(K, D, observations="gaussian")
hmm_lls = hmm.fit(x, method="em", num_em_iters=N_iters)
z = hmm.most_likely_states(x)
# In[14]:
plt.plot(hmm_lls, label="EM")
plt.xlabel("EM Iteration")
plt.ylabel("Log Probability")
plt.legend(loc="lower right")
# In[15]:
# Plot the observation distributions
from hips.plotting.colormaps import white_to_color_cmap
xmins = x.min(axis=0)
xmaxs = x.max(axis=0)
npts = 100
XX, YY = np.meshgrid(np.linspace(xmins[0], xmaxs[0], npts), np.linspace(xmins[1], xmaxs[1], npts))
data = np.column_stack((XX.ravel(), YY.ravel(), np.zeros((npts**2, D-2))))
input = np.zeros((data.shape[0], 0))
mask = np.ones_like(data, dtype=bool)
tag = None
lls = hmm.observations.log_likelihoods(data, input, mask, tag)
plt.figure(figsize=(6, 6))
for k in range(K):
plt.contour(XX, YY, np.exp(lls[:,k]).reshape(XX.shape),
cmap=white_to_color_cmap(colors[k % len(colors)]))
plt.plot(x[z==k, 0], x[z==k, 1], 'o', mfc=colors[k], mec='none', ms=4)
plt.plot(x[:,0], x[:,1], '-k', lw=2, alpha=.5)
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.title("Observation Distributions")
plt.tight_layout()
if save_figures:
plt.savefig("lds_3.pdf")
# In[16]:
# Simulate from the HMM fit
smpls = [hmm.sample(T-1, prefix=(z[:1], x[:1])) for _ in range(1)]
# In[17]:
plt.figure(figsize=(8, 6))
lim = abs(x).max()
for d in range(D):
plt.plot(x[:,d] - d * lim, '-k', lw=4)
for i, (_, x_smpl) in enumerate(smpls):
x_smpl = np.concatenate((x[:1], x_smpl))
plt.plot(x_smpl[:,d] - d*lim, '-', lw=1, color=colors[i])
plt.yticks(-np.arange(D) * lim, ["$x_{}$".format(d+1) for d in range(D)])
plt.xlabel("time")
plt.xlim(0, T)
plt.title("True LDS States and Fitted HMM Simulations")
plt.tight_layout()
if save_figures:
plt.savefig("lds_4.pdf")
# In[18]:
# Plot the observation distributions
from hips.plotting.colormaps import white_to_color_cmap
xmins = x.min(axis=0)
xmaxs = x.max(axis=0)
npts = 100
XX, YY = np.meshgrid(np.linspace(xmins[0], xmaxs[0], npts), np.linspace(xmins[1], xmaxs[1], npts))
data = np.column_stack((XX.ravel(), YY.ravel(), np.zeros((npts**2, D-2))))
input = np.zeros((data.shape[0], 0))
mask = np.ones_like(data, dtype=bool)
tag = None
lls = hmm.observations.log_likelihoods(data, input, mask, tag)
plt.figure(figsize=(6, 6))
for k in range(K):
plt.contour(XX, YY, np.exp(lls[:,k]).reshape(XX.shape),
cmap=white_to_color_cmap(colors[k % len(colors)]))
plt.plot(x[z==k, 0], x[z==k, 1], 'o', mfc=colors[k], mec='none', ms=4)
plt.plot(x[:,0], x[:,1], '-k', lw=2, alpha=.5)
for i, (_, x_smpl) in enumerate(smpls):
x_smpl = np.concatenate((x[:1], x_smpl))
plt.plot(x_smpl[:,0], x_smpl[:,1], '-', lw=1, color=colors[i])
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.title("Observation Distributions")
plt.tight_layout()
if save_figures:
plt.savefig("lds_5.pdf")
# # Simulate Poisson data from an Poisson LDS with the same dynamics
# In[19]:
import copy
plds = ssm.LDS(N, D, emissions="poisson_orthog", emission_kwargs=dict(link="softplus"))
plds.dynamics.params = copy.deepcopy(true_lds.dynamics.params)
plds.emissions.ds = 0 * np.ones(N)
_, x_plds, y_plds = plds.sample(T)
# In[20]:
# Plot the dynamics vector field
xmins = x_plds.min(axis=0)
xmaxs = x_plds.max(axis=0)
npts = 20
true_lds.dynamics.As[0] = A
XX, YY = np.meshgrid(np.linspace(xmins[0], xmaxs[0], npts), np.linspace(xmins[1], xmaxs[1], npts))
XY = np.column_stack((XX.ravel(), YY.ravel(), np.zeros((npts**2, D-2))))
dx = XY.dot(A.T) + b - XY
plt.figure(figsize=(6, 6))
plt.quiver(XX, YY, dx[:,0], dx[:,1], color=colors[0])
plt.plot(x_plds[:,0], x_plds[:,1], '-k', lw=3)
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.title("Simulated Latent States")
plt.tight_layout()
if save_figures:
plt.savefig("lds_6.pdf")
# In[21]:
# Plot the dynamics vector field
plt.figure(figsize=(8, 6))
gs = GridSpec(2, 1, height_ratios=(1, N/D))
# Plot the continuous latent states
lim = abs(x).max()
plt.subplot(gs[0])
for d in range(D):
plt.plot(x[:, d] + lim * d, '-k')
plt.yticks(np.arange(D) * lim, ["$x_{}$".format(d+1) for d in range(D)])
plt.xticks([])
plt.xlim(0, T)
plt.title("Simulated Latent States")
lim = abs(y).max()
plt.subplot(gs[1])
plt.imshow(y_plds.T, cmap="Greys", aspect="auto")
plt.yticks(np.arange(N), ["$y_{{ {} }}$".format(n+1) for n in range(N)])
plt.xlabel("time")
plt.xlim(0, T)
plt.title("Simulated Poisson Observations")
plt.colorbar()
plt.tight_layout()
if save_figures:
plt.savefig("lds_7.pdf")
|
py | 1a4d16dbfd06429c16dc51a587ef5b251e9b2ac5 | # Copyright (c) 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import os
import random
import time
import warnings
import fixtures
import mock
import netaddr
from neutron_lib import constants
from neutron_lib.tests import tools
from neutron_lib.utils import helpers
from neutron_lib.utils import net
from oslo_utils import netutils
from oslo_utils import timeutils
import unittest2
from neutron.services.logapi.common import constants as log_const
# NOTE(yamahata): from neutron-lib 1.9.1, callback priority was added and
# priority_group module was added for constants of priority.
# test the existence of the module of priority_group to check if
# callback priority is supported or not.
_CALLBACK_PRIORITY_SUPPORTED = True
try:
from neutron_lib.callbacks import priority_group # noqa
except ImportError as e:
_CALLBACK_PRIORITY_SUPPORTED = False
class WarningsFixture(fixtures.Fixture):
"""Filters out warnings during test runs."""
warning_types = (
DeprecationWarning, PendingDeprecationWarning, ImportWarning
)
def _setUp(self):
self.addCleanup(warnings.resetwarnings)
for wtype in self.warning_types:
warnings.filterwarnings(
"once", category=wtype, module='^neutron\\.')
class OpenFixture(fixtures.Fixture):
"""Mock access to a specific file while preserving open for others."""
def __init__(self, filepath, contents=''):
self.path = filepath
self.contents = contents
def _setUp(self):
self.mock_open = mock.mock_open(read_data=self.contents)
self._orig_open = open
def replacement_open(name, *args, **kwargs):
if name == self.path:
return self.mock_open(name, *args, **kwargs)
return self._orig_open(name, *args, **kwargs)
self._patch = mock.patch('six.moves.builtins.open',
new=replacement_open)
self._patch.start()
self.addCleanup(self._patch.stop)
class SafeCleanupFixture(fixtures.Fixture):
"""Catch errors in daughter fixture cleanup."""
def __init__(self, fixture):
self.fixture = fixture
def _setUp(self):
def cleanUp():
try:
self.fixture.cleanUp()
except Exception:
pass
self.fixture.setUp()
self.addCleanup(cleanUp)
def setup_mock_calls(mocked_call, expected_calls_and_values):
"""A convenient method to setup a sequence of mock calls.
expected_calls_and_values is a list of (expected_call, return_value):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, '--', "--may-exist", "add-port",
self.BR_NAME, pname]),
None),
(mock.call(["ovs-vsctl", self.TO, "set", "Interface",
pname, "type=gre"]),
None),
....
]
* expected_call should be mock.call(expected_arg, ....)
* return_value is passed to side_effect of a mocked call.
A return value or an exception can be specified.
"""
return_values = [call[1] for call in expected_calls_and_values]
mocked_call.side_effect = return_values
def verify_mock_calls(mocked_call, expected_calls_and_values,
any_order=False):
"""A convenient method to setup a sequence of mock calls.
expected_calls_and_values is a list of (expected_call, return_value):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, '--', "--may-exist", "add-port",
self.BR_NAME, pname]),
None),
(mock.call(["ovs-vsctl", self.TO, "set", "Interface",
pname, "type=gre"]),
None),
....
]
* expected_call should be mock.call(expected_arg, ....)
* return_value is passed to side_effect of a mocked call.
A return value or an exception can be specified.
"""
expected_calls = [call[0] for call in expected_calls_and_values]
mocked_call.assert_has_calls(expected_calls, any_order=any_order)
def _make_magic_method(method_mock):
# NOTE(yamahata): new environment needs to be created to keep actual
# method_mock for each callables.
def __call__(*args, **kwargs):
value_mock = method_mock._orig___call__(*args, **kwargs)
value_mock.__json__ = lambda: {}
return value_mock
def _get_child_mock(**kwargs):
value_mock = method_mock._orig__get_child_mock(**kwargs)
value_mock.__json__ = lambda: {}
return value_mock
return __call__, _get_child_mock
def make_mock_plugin_json_encodable(plugin_instance_mock):
# NOTE(yamahata): Make return value of plugin method json encodable
# e.g. the return value of plugin_instance.create_network() needs
# to be json encodable
# plugin instance -> method -> return value
# Mock MagicMock Mock
# plugin_instance_mock method_mock value_mock
#
# From v1.3 of pecan, pecan.jsonify uses json.Encoder unconditionally.
# pecan v1.2 uses simplejson.Encoder which accidentally encodes
# Mock as {} due to check of '_asdict' attributes.
# pecan.jsonify uses __json__ magic method for encoding when
# it's defined, so add __json__ method to return {}
for method_mock in plugin_instance_mock._mock_children.values():
if not callable(method_mock):
continue
method_mock._orig___call__ = method_mock.__call__
method_mock._orig__get_child_mock = method_mock._get_child_mock
__call__, _get_child_mock = _make_magic_method(method_mock)
method_mock.__call__ = __call__
method_mock._get_child_mock = _get_child_mock
def get_subscribe_args(*args):
# NOTE(yamahata): from neutron-lib 1.9.1, callback priority was added.
# old signature: (callback, resource, event)
# new signature: (callback, resource, event, priority=PRIORITY_DEFAULT)
if len(args) == 3 and _CALLBACK_PRIORITY_SUPPORTED:
args = list(args) # don't modify original list
args.append(priority_group.PRIORITY_DEFAULT)
return args
def fail(msg=None):
"""Fail immediately, with the given message.
This method is equivalent to TestCase.fail without requiring a
testcase instance (usefully for reducing coupling).
"""
raise unittest2.TestCase.failureException(msg)
def get_random_string_list(i=3, n=5):
return [helpers.get_random_string(n) for _ in range(0, i)]
def get_random_boolean():
return bool(random.getrandbits(1))
def get_random_datetime(start_time=None,
end_time=None):
start_time = start_time or timeutils.utcnow()
end_time = end_time or (start_time + datetime.timedelta(days=1))
# calculate the seconds difference between start and end time
delta_seconds_difference = int(timeutils.delta_seconds(start_time,
end_time))
# get a random time_delta_seconds between 0 and
# delta_seconds_difference
random_time_delta = random.randint(0, delta_seconds_difference)
# generate a random datetime between start and end time
return start_time + datetime.timedelta(seconds=random_time_delta)
def get_random_integer(range_begin=0, range_end=1000):
return random.randint(range_begin, range_end)
def get_random_prefixlen(version=4):
maxlen = constants.IPv4_BITS
if version == 6:
maxlen = constants.IPv6_BITS
return random.randint(0, maxlen)
def get_random_port(start=constants.PORT_RANGE_MIN):
return random.randint(start, constants.PORT_RANGE_MAX)
def get_random_vlan():
return random.randint(constants.MIN_VLAN_TAG, constants.MAX_VLAN_TAG)
def get_random_ip_version():
return random.choice(constants.IP_ALLOWED_VERSIONS)
def get_random_EUI():
return netaddr.EUI(
net.get_random_mac(['fe', '16', '3e', '00', '00', '00'])
)
def get_random_ip_network(version=4):
return netaddr.IPNetwork(tools.get_random_cidr(version=version))
def get_random_ip_address(version=4):
if version == 4:
ip_string = '10.%d.%d.%d' % (random.randint(3, 254),
random.randint(3, 254),
random.randint(3, 254))
return netaddr.IPAddress(ip_string)
else:
ip = netutils.get_ipv6_addr_by_EUI64(
'2001:db8::/64',
net.get_random_mac(['fe', '16', '3e', '00', '00', '00'])
)
return ip
def get_random_router_status():
return random.choice(constants.VALID_ROUTER_STATUS)
def get_random_floatingip_status():
return random.choice(constants.VALID_FLOATINGIP_STATUS)
def get_random_flow_direction():
return random.choice(constants.VALID_DIRECTIONS)
def get_random_ha_states():
return random.choice(constants.VALID_HA_STATES)
def get_random_ether_type():
return random.choice(constants.VALID_ETHERTYPES)
def get_random_ipam_status():
return random.choice(constants.VALID_IPAM_ALLOCATION_STATUSES)
def get_random_ip_protocol():
return random.choice(list(constants.IP_PROTOCOL_MAP.keys()))
def get_random_port_binding_statuses():
return random.choice(constants.PORT_BINDING_STATUSES)
def get_random_network_segment_range_network_type():
return random.choice([constants.TYPE_VLAN,
constants.TYPE_VXLAN,
constants.TYPE_GRE,
constants.TYPE_GENEVE])
def reset_random_seed():
# reset random seed to make sure other processes extracting values from RNG
# don't get the same results (useful especially when you then use the
# random values to allocate system resources from global pool, like ports
# to listen). Use both current time and pid to make sure no tests started
# at the same time get the same values from RNG
seed = time.time() + os.getpid()
random.seed(seed)
def get_random_ipv6_mode():
return random.choice(constants.IPV6_MODES)
def get_random_security_event():
return random.choice(log_const.LOG_EVENTS)
|
py | 1a4d17588d8a5e627cb76fcbe64e1904bad43a00 | """rent_site URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib.auth.decorators import login_required
from django.contrib import admin
from django.urls import path
from rentapp.views import home_view, SignupView, profile, ProfileUpdateView, ToolUserAddView, Inbox, \
Directs, NewConversation, SendDirect, UserSearch, my_tools_view, ToolDetailView, RentPropositionView, RequestsView, \
DeleteRequestView, ApproveRequestView, LendedView, RentedView, MyToolUpdateView, MyRequestsView, RejectRequestView, \
CancelRequestView, OwnerToolReturnView, UserToolReturnView, HideView, SearchToolView
from django.contrib.auth import views as auth_views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
# path('accounts/', include('allauth.urls')),
path('', home_view, name="home"),
path('profile/my_tools', my_tools_view, name="my_tools"),
path('signup/', SignupView.as_view(), name="signup"),
path('login/', auth_views.LoginView.as_view(template_name='login.html'), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='logout.html'), name='logout'),
path('reset_password/',
auth_views.PasswordResetView.as_view(template_name="password_reset.html"),
name="reset_password"),
path('reset_password_sent/',
auth_views.PasswordResetDoneView.as_view(template_name="password_reset_sent.html"),
name="password_reset_done"),
path('reset/<uidb64>/<token>/',
auth_views.PasswordResetConfirmView.as_view(template_name="password_reset_form.html"),
name="password_reset_confirm"),
path('reset_password_complete/',
auth_views.PasswordResetCompleteView.as_view(template_name="password_reset_done.html"),
name="password_reset_complete"),
path('change-password/',
auth_views.PasswordChangeView.as_view(template_name='change_password.html', success_url='/'),
name='change_password'),
path('profile/', profile, name='profile'),
path('profile/settings', login_required(ProfileUpdateView.as_view()),
name='profile_update'),
path('profile/add_tool/', login_required(ToolUserAddView.as_view()), name='add_user_tool'),
path('inbox/', Inbox, name='inbox'),
path('directs/<username>', Directs, name='directs'),
path('new/', UserSearch, name='usersearch'),
path('new/<username>', NewConversation, name='newconversation'),
path('send/', SendDirect, name='send_direct'),
path('tool_detail/<int:tool_id>', login_required(ToolDetailView.as_view()), name='tool_detail'),
path('rent_this_elektro/<int:elektro_id>', login_required(RentPropositionView.as_view()), name='rent_this_elektro'),
path('profile/requests', RequestsView.as_view(), name='requests'),
path('profile/my_requests', login_required(MyRequestsView.as_view()), name='my_requests'),
path('reject/<int:req_id>', login_required(RejectRequestView.as_view()), name='reject'),
path('cancel/<int:req_id>', login_required(CancelRequestView.as_view()), name='reject'),
path('approve/<int:req_id>', login_required(ApproveRequestView.as_view()), name='approve'),
path('profile/my_tool/update/<int:my_tool_id>', login_required(MyToolUpdateView.as_view()), name='tool_update'),
path('profile/lended/', login_required(LendedView.as_view()), name='lended'),
path('profile/rented/', login_required(RentedView.as_view()), name='rented'),
path('ownertoolreturn/<int:req_id>', login_required(OwnerToolReturnView.as_view()), name='owner_tool_return'),
path('usertoolreturn/<int:req_id>', login_required(UserToolReturnView.as_view()), name='user_tool_return'),
path('hide/<int:req_id>', login_required(HideView.as_view()), name='hide'),
path('search/tool', login_required(SearchToolView.as_view()), name='search')
]
if settings.DEBUG is True:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
py | 1a4d182615ce8d45a67d358a343c678e05704207 | import sympy as sp
import numpy as np
from devito import (Eq, Operator, VectorTimeFunction, TimeFunction, Function, NODE,
div, grad, Inc)
from examples.seismic import PointSource, Receiver
def src_rec(p, model, geometry, **kwargs):
"""
Forward case: Source injection and receiver interpolation
Adjoint case: Receiver injection and source interpolation
"""
dt = model.grid.time_dim.spacing
m = model.m
# Source symbol with input wavelet
src = PointSource(name="src", grid=model.grid, time_range=geometry.time_axis,
npoint=geometry.nsrc)
rec = Receiver(name='rec', grid=model.grid, time_range=geometry.time_axis,
npoint=geometry.nrec)
forward = kwargs.get('forward', True)
time_order = p.time_order
if forward:
# The source injection term
if(time_order == 1):
src_term = src.inject(field=p.forward, expr=src * dt)
else:
src_term = src.inject(field=p.forward, expr=src * dt**2 / m)
# Create interpolation expression for receivers
rec_term = rec.interpolate(expr=p)
else:
# Construct expression to inject receiver values
if(time_order == 1):
rec_term = rec.inject(field=p.backward, expr=rec * dt)
else:
rec_term = rec.inject(field=p.backward, expr=rec * dt**2 / m)
# Create interpolation expression for the adjoint-source
src_term = src.interpolate(expr=p)
return src_term + rec_term
def sls_1st_order(model, geometry, p, **kwargs):
"""
Implementation of the 1st order viscoacoustic wave-equation
from Blanch and Symes (1995) / Dutta and Schuster (2014).
https://library.seg.org/doi/pdf/10.1190/1.1822695
https://library.seg.org/doi/pdf/10.1190/geo2013-0414.1
Parameters
----------
p : TimeFunction
Pressure field.
"""
forward = kwargs.get('forward', True)
space_order = p.space_order
save = kwargs.get('save', False)
save_t = geometry.nt if save else None
s = model.grid.stepping_dim.spacing
b = model.b
vp = model.vp
damp = model.damp
qp = model.qp
f0 = geometry._f0
# Particle Velocity
v = kwargs.pop('v')
# The stress relaxation parameter
t_s = (sp.sqrt(1.+1./qp**2)-1./qp)/f0
# The strain relaxation parameter
t_ep = 1./(f0**2*t_s)
# The relaxation time
tt = (t_ep/t_s)-1.
# Density
rho = 1. / b
# Bulk modulus
bm = rho * vp**2
# Attenuation Memory variable.
r = TimeFunction(name="r", grid=model.grid, time_order=1, space_order=space_order,
save=save_t, staggered=NODE)
if forward:
# Define PDE
pde_v = v - s * b * grad(p)
u_v = Eq(v.forward, damp * pde_v)
pde_r = r - s * (1. / t_s) * r - s * (1. / t_s) * tt * bm * div(v.forward)
u_r = Eq(r.forward, damp * pde_r)
pde_p = p - s * bm * (tt + 1.) * div(v.forward) - s * r.forward
u_p = Eq(p.forward, damp * pde_p)
return [u_v, u_r, u_p]
else:
# Define PDE
pde_r = r - s * (1. / t_s) * r - s * p
u_r = Eq(r.backward, damp * pde_r)
pde_v = v + s * grad(bm * (1. + tt) * p) + s * \
grad((1. / t_s) * bm * tt * r.backward)
u_v = Eq(v.backward, damp * pde_v)
pde_p = p + s * div(b * v.backward)
u_p = Eq(p.backward, damp * pde_p)
return [u_r, u_v, u_p]
def sls_2nd_order(model, geometry, p, r=None, **kwargs):
"""
Implementation of the 2nd order viscoacoustic wave-equation from Bai (2014).
https://library.seg.org/doi/10.1190/geo2013-0030.1
Parameters
----------
p : TimeFunction
Pressure field.
r : TimeFunction
Attenuation Memory variable.
"""
forward = kwargs.get('forward', True)
space_order = p.space_order
save = kwargs.get('save', False)
save_t = geometry.nt if save else None
s = model.grid.stepping_dim.spacing
b = model.b
vp = model.vp
damp = model.damp
qp = model.qp
f0 = geometry._f0
q = kwargs.get('q', 0)
# The stress relaxation parameter
t_s = (sp.sqrt(1.+1./qp**2)-1./qp)/f0
# The strain relaxation parameter
t_ep = 1./(f0**2*t_s)
# The relaxation time
tt = (t_ep/t_s)-1.
# Density
rho = 1. / b
# Bulk modulus
bm = rho * vp**2
# Attenuation Memory variable.
r = r or TimeFunction(name="r", grid=model.grid, time_order=2,
space_order=space_order, save=save_t, staggered=NODE)
if forward:
pde_r = r + s * (tt / t_s) * rho * div(b * grad(p, shift=.5), shift=-.5) - \
s * (1. / t_s) * r
u_r = Eq(r.forward, damp * pde_r)
pde_p = 2. * p - damp * p.backward + s**2 * bm * (1. + tt) * \
div(b * grad(p, shift=.5), shift=-.5) - s**2 * vp**2 * \
r.forward + s**2 * vp**2 * q
u_p = Eq(p.forward, damp * pde_p)
return [u_r, u_p]
else:
pde_r = r + s * (tt / t_s) * p - s * (1. / t_s) * r
u_r = Eq(r.backward, damp * pde_r)
pde_p = 2. * p - damp * p.forward + s**2 * vp**2 * \
div(b * grad((1. + tt) * rho * p, shift=.5), shift=-.5) - s**2 * vp**2 * \
div(b * grad(rho * r.backward, shift=.5), shift=-.5)
u_p = Eq(p.backward, damp * pde_p)
return [u_r, u_p]
def ren_1st_order(model, geometry, p, **kwargs):
"""
Implementation of the 1st order viscoacoustic wave-equation from Ren et al. (2014).
https://academic.oup.com/gji/article/197/2/948/616510
Parameters
----------
p : TimeFunction
Pressure field.
"""
forward = kwargs.get('forward', True)
s = model.grid.stepping_dim.spacing
f0 = geometry._f0
vp = model.vp
b = model.b
qp = model.qp
damp = model.damp
# Particle velocity
v = kwargs.pop('v')
# Angular frequency
w0 = 2. * np.pi * f0
# Density
rho = 1. / b
eta = vp**2 / (w0 * qp)
# Bulk modulus
bm = rho * vp**2
if forward:
# Define PDE
pde_v = v - s * b * grad(p)
u_v = Eq(v.forward, damp * pde_v)
pde_p = p - s * bm * div(v.forward) + \
s * ((vp**2 * rho) / (w0 * qp)) * div(b * grad(p, shift=.5), shift=-.5)
u_p = Eq(p.forward, damp * pde_p)
return [u_v, u_p]
else:
pde_v = v + s * grad(bm * p)
u_v = Eq(v.backward, pde_v * damp)
pde_p = p + s * div(b * grad(rho * eta * p, shift=.5), shift=-.5) + \
s * div(b * v.backward)
u_p = Eq(p.backward, pde_p * damp)
return [u_v, u_p]
def ren_2nd_order(model, geometry, p, **kwargs):
"""
Implementation of the 2nd order viscoacoustic wave-equation from Ren et al. (2014).
https://library.seg.org/doi/pdf/10.1190/1.2714334
Parameters
----------
p : TimeFunction
Pressure field.
"""
forward = kwargs.get('forward', True)
s = model.grid.stepping_dim.spacing
f0 = geometry._f0
vp = model.vp
b = model.b
qp = model.qp
damp = model.damp
# Angular frequency
w0 = 2. * np.pi * f0
# Density
rho = 1. / b
eta = vp**2 / (w0 * qp)
# Bulk modulus
bm = rho * vp**2
if forward:
pde_p = 2. * p - damp * p.backward + s**2 * bm * \
div(b * grad(p, shift=.5), shift=-.5) + s**2 * eta * rho * \
div(b * grad(p - p.backward, shift=.5) / s, shift=-.5)
u_p = Eq(p.forward, damp * pde_p)
return [u_p]
else:
pde_p = 2. * p - damp * p.forward + s**2 * \
div(b * grad(bm * p, shift=.5), shift=-.5) - s**2 * \
div(b * grad(((p.forward - p) / s) * rho * eta, shift=.5), shift=-.5)
u_p = Eq(p.backward, damp * pde_p)
return [u_p]
def deng_1st_order(model, geometry, p, **kwargs):
"""
Implementation of the 1st order viscoacoustic wave-equation
from Deng and McMechan (2007).
https://library.seg.org/doi/pdf/10.1190/1.2714334
Parameters
----------
p : TimeFunction
Pressure field.
"""
forward = kwargs.get('forward', True)
s = model.grid.stepping_dim.spacing
f0 = geometry._f0
vp = model.vp
b = model.b
qp = model.qp
damp = model.damp
# Particle velocity
v = kwargs.pop('v')
# Angular frequency
w0 = 2. * np.pi * f0
# Density
rho = 1. / b
# Bulk modulus
bm = rho * vp**2
if forward:
# Define PDE
pde_v = v - s * b * grad(p)
u_v = Eq(v.forward, damp * pde_v)
pde_p = p - s * bm * div(v.forward) - s * (w0 / qp) * p
u_p = Eq(p.forward, damp * pde_p)
return [u_v, u_p]
else:
pde_v = v + s * grad(bm * p)
u_v = Eq(v.backward, pde_v * damp)
pde_p = p + s * div(b * v.backward) - s * (w0 / qp) * p
u_p = Eq(p.backward, pde_p * damp)
return [u_v, u_p]
def deng_2nd_order(model, geometry, p, **kwargs):
"""
Implementation of the 2nd order viscoacoustic wave-equation
from Deng and McMechan (2007).
https://library.seg.org/doi/pdf/10.1190/1.2714334
Parameters
----------
p : TimeFunction
Pressure field.
"""
forward = kwargs.get('forward', True)
s = model.grid.stepping_dim.spacing
f0 = geometry._f0
vp = model.vp
b = model.b
qp = model.qp
damp = model.damp
# Angular frequency
w0 = 2. * np.pi * f0
# Density
rho = 1. / b
bm = rho * vp**2
if forward:
pde_p = 2. * p - damp*p.backward + s**2 * bm * \
div(b * grad(p, shift=.5), shift=-.5) - s**2 * w0/qp * (p - p.backward)/s
u_p = Eq(p.forward, damp * pde_p)
return [u_p]
else:
pde_p = 2. * p - damp * p.forward + s**2 * w0 / qp * (p.forward - p) / s + \
s * s * div(b * grad(bm * p, shift=.5), shift=-.5)
u_p = Eq(p.backward, damp * pde_p)
return [u_p]
def sls(model, geometry, p, forward=True, **kwargs):
"""
Implementation of the 1st order viscoacoustic wave-equation
from Blanch and Symes (1995) / Dutta and Schuster (2014) and
Implementation of the 2nd order viscoacoustic wave-equation from Bai (2014).
https://library.seg.org/doi/pdf/10.1190/1.1822695
https://library.seg.org/doi/pdf/10.1190/geo2013-0414.1
https://library.seg.org/doi/10.1190/geo2013-0030.1
Parameters
----------
p : TimeFunction
Pressure field.
"""
time_order = p.time_order
eq_stencil = stencils[('sls', time_order)]
eqn = eq_stencil(model, geometry, p, forward=forward, **kwargs)
return eqn
def ren(model, geometry, p, forward=True, **kwargs):
"""
Implementation of the 1st and 2nd order viscoacoustic wave-equation from
Ren et al. (2014).
https://academic.oup.com/gji/article/197/2/948/616510
https://library.seg.org/doi/pdf/10.1190/1.2714334
Parameters
----------
p : TimeFunction
Pressure field.
"""
time_order = p.time_order
eq_stencil = stencils[('ren', time_order)]
eqn = eq_stencil(model, geometry, p, forward=forward, **kwargs)
return eqn
def deng_mcmechan(model, geometry, p, forward=True, **kwargs):
"""
Implementation of the 1st order viscoacoustic wave-equation and 2nd order
viscoacoustic wave-equation from Deng and McMechan (2007).
https://library.seg.org/doi/pdf/10.1190/1.2714334
Parameters
----------
p : TimeFunction
Pressure field.
"""
time_order = p.time_order
eq_stencil = stencils[('deng_mcmechan', time_order)]
eqn = eq_stencil(model, geometry, p, forward=forward, **kwargs)
return eqn
def ForwardOperator(model, geometry, space_order=4, kernel='sls', time_order=2,
save=False, **kwargs):
"""
Construct method for the forward modelling operator in a viscoacoustic medium.
Parameters
----------
model : Model
Object containing the physical parameters.
geometry : AcquisitionGeometry
Geometry object that contains the source (SparseTimeFunction) and
receivers (SparseTimeFunction) and their position.
space_order : int, optional
Space discretization order.
kernel : string, optional
selects a viscoacoustic equation from the options below:
sls (Standard Linear Solid) :
1st order - Blanch and Symes (1995) / Dutta and Schuster (2014)
viscoacoustic equation
2nd order - Bai et al. (2014) viscoacoustic equation
ren - Ren et al. (2014) viscoacoustic equation
deng_mcmechan - Deng and McMechan (2007) viscoacoustic equation
Defaults to sls 2nd order.
save : int or Buffer
Saving flag, True saves all time steps, False saves three buffered
indices (last three time steps). Defaults to False.
"""
# Create symbols for forward wavefield, particle velocity, source and receivers
save_t = geometry.nt if save else None
if time_order == 1:
v = VectorTimeFunction(name="v", grid=model.grid, time_order=time_order,
space_order=space_order, save=save_t)
kwargs.update({'v': v})
p = TimeFunction(name="p", grid=model.grid, time_order=time_order,
space_order=space_order, save=save_t, staggered=NODE)
# Equations kernels
eq_kernel = kernels[kernel]
eqn = eq_kernel(model, geometry, p, save=save, **kwargs)
srcrec = src_rec(p, model, geometry)
# Substitute spacing terms to reduce flops
return Operator(eqn + srcrec, subs=model.spacing_map,
name='Forward', **kwargs)
def AdjointOperator(model, geometry, space_order=4, kernel='sls', time_order=2, **kwargs):
"""
Construct an adjoint modelling operator in a viscoacoustic medium.
Parameters
----------
model : Model
Object containing the physical parameters.
geometry : AcquisitionGeometry
Geometry object that contains the source (SparseTimeFunction) and
receivers (SparseTimeFunction) and their position.
space_order : int, optional
Space discretization order.
kernel : selects a visco-acoustic equation from the options below:
sls (Standard Linear Solid) :
1st order - Blanch and Symes (1995) / Dutta and Schuster (2014)
viscoacoustic equation
2nd order - Bai et al. (2014) viscoacoustic equation
ren - Ren et al. (2014) viscoacoustic equation
deng_mcmechan - Deng and McMechan (2007) viscoacoustic equation
Defaults to sls 2nd order.
"""
if time_order == 1:
va = VectorTimeFunction(name="va", grid=model.grid, time_order=time_order,
space_order=space_order)
kwargs.update({'v': va})
pa = TimeFunction(name="pa", grid=model.grid, time_order=time_order,
space_order=space_order, staggered=NODE)
# Equations kernels
eq_kernel = kernels[kernel]
eqn = eq_kernel(model, geometry, pa, forward=False, **kwargs)
srcrec = src_rec(pa, model, geometry, forward=False)
# Substitute spacing terms to reduce flops
return Operator(eqn + srcrec, subs=model.spacing_map, name='Adjoint', **kwargs)
def GradientOperator(model, geometry, space_order=4, kernel='sls', time_order=2,
save=True, **kwargs):
"""
Construct a gradient operator in an acoustic media.
Parameters
----------
model : Model
Object containing the physical parameters.
geometry : AcquisitionGeometry
Geometry object that contains the source (SparseTimeFunction) and
receivers (SparseTimeFunction) and their position.
space_order : int, optional
Space discretization order.
save : int or Buffer, optional
Option to store the entire (unrolled) wavefield.
kernel : selects a visco-acoustic equation from the options below:
sls (Standard Linear Solid) :
1st order - Blanch and Symes (1995) / Dutta and Schuster (2014)
viscoacoustic equation
2nd order - Bai et al. (2014) viscoacoustic equation
ren - Ren et al. (2014) viscoacoustic equation
deng_mcmechan - Deng and McMechan (2007) viscoacoustic equation
Defaults to sls 2nd order.
"""
# Gradient symbol and wavefield symbols
save_t = geometry.nt if save else None
grad = Function(name='grad', grid=model.grid)
p = TimeFunction(name='p', grid=model.grid, time_order=2, space_order=space_order,
save=save_t, staggered=NODE)
pa = TimeFunction(name='pa', grid=model.grid, time_order=time_order,
space_order=space_order, staggered=NODE)
# Equations kernels
eq_kernel = kernels[kernel]
eqn = eq_kernel(model, geometry, pa, forward=False, save=False, **kwargs)
gradient_update = Inc(grad, - p.dt2 * pa)
# Add expression for receiver injection
_, recterm = src_rec(pa, model, geometry, forward=False)
# Substitute spacing terms to reduce flops
return Operator(eqn + recterm + [gradient_update], subs=model.spacing_map,
name='Gradient', **kwargs)
def BornOperator(model, geometry, space_order=4, kernel='sls', time_order=2, **kwargs):
"""
Construct an Linearized Born operator in an acoustic media.
Parameters
----------
model : Model
Object containing the physical parameters.
geometry : AcquisitionGeometry
Geometry object that contains the source (SparseTimeFunction) and
receivers (SparseTimeFunction) and their position.
space_order : int, optional
Space discretization order.
kernel : str, optional
Type of discretization, centered or shifted.
"""
# Create wavefields and a dm field
p = TimeFunction(name='p', grid=model.grid, time_order=time_order,
space_order=space_order, staggered=NODE)
P = TimeFunction(name='P', grid=model.grid, time_order=time_order,
space_order=space_order, staggered=NODE)
rp = TimeFunction(name="rp", grid=model.grid, time_order=time_order,
space_order=space_order, staggered=NODE)
rP = TimeFunction(name="rP", grid=model.grid, time_order=time_order,
space_order=space_order, staggered=NODE)
dm = Function(name='dm', grid=model.grid, space_order=0)
# Equations kernels
eq_kernel = kernels[kernel]
eqn1 = eq_kernel(model, geometry, p, r=rp, **kwargs)
s = model.grid.stepping_dim.spacing
q = -dm * (p.forward - 2 * p + p.backward) / (s**2)
eqn2 = eq_kernel(model, geometry, P, r=rP, q=q, **kwargs)
# Add source term expression for p
src_term, _ = src_rec(p, model, geometry)
# Create receiver interpolation expression from P
_, rec_term = src_rec(P, model, geometry)
# Substitute spacing terms to reduce flops
return Operator(eqn1 + src_term + rec_term + eqn2, subs=model.spacing_map,
name='Born', **kwargs)
kernels = {'sls': sls, 'ren': ren, 'deng_mcmechan': deng_mcmechan}
stencils = {('sls', 1): sls_1st_order, ('sls', 2): sls_2nd_order,
('deng_mcmechan', 1): deng_1st_order,
('deng_mcmechan', 2): deng_2nd_order,
('ren', 1): ren_1st_order, ('ren', 2): ren_2nd_order}
|
py | 1a4d1a3ac2b11adc0acb85bb8391fdd9e0755048 | # Copyright 2019 Axis Communications AB.
#
# For a full list of individual contributors, please see the commit history.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import graphene
from ..base import EiffelObjectType
from ..lib.generic import json_schema_to_graphql, load
class ActivityCanceled(EiffelObjectType):
data = json_schema_to_graphql(
"ActivityCanceledData",
load("EiffelActivityCanceledEvent.json").get("data").get("properties"))
mongo = None
def __init__(self, mongo):
self.mongo = mongo
class ActivityCanceledConnection(graphene.Connection):
class Meta:
node = ActivityCanceled
|
py | 1a4d1ac8d2265cb08d0136c585047ad206433c09 | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin test framework primitive and message strcutures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization."""
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70914 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
# NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def ser_uint64(u):
rs = b""
for i in range(2):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress():
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv():
typemap = {
0: "Error",
1: "TX",
2: "Block",
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator():
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint():
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn():
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut():
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CTransaction():
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader():
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.nAccumulatorCheckpoint = header.nAccumulatorCheckpoint
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 4
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.nAccumulatorCheckpoint = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.nAccumulatorCheckpoint = deser_uint256(f)
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
r += ser_uint256(self.nAccumulatorCheckpoint)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
# VERS Uniqueness
def get_uniqueness(self, prevout):
r = b""
r += struct.pack("<I", prevout.n)
r += ser_uint256(prevout.hash)
return r
def solve_stake(self, prevouts):
target0 = uint256_from_compact(self.nBits)
loop = True
while loop:
for prevout in prevouts:
nvalue, txBlockTime, stakeModifier, hashStake = prevouts[prevout]
target = int(target0 * nvalue / 100) % 2**256
data = b""
data += ser_uint64(stakeModifier)
data += struct.pack("<I", txBlockTime)
# prevout for zPoS is serial hashes hex strings
if isinstance(prevout, COutPoint):
data += self.get_uniqueness(prevout)
else:
data += ser_uint256(uint256_from_str(bytes.fromhex(hashStake)[::-1]))
data += struct.pack("<I", self.nTime)
posHash = uint256_from_str(hash256(data))
if posHash <= target:
self.prevoutStake = prevout
loop = False
break
if loop:
self.nTime += 1
return True
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
if hasattr(self, 'vchBlockSig'):
r += ser_string(self.vchBlockSig)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def sign_block(self, key, low_s=True):
data = b""
data += struct.pack("<i", self.nVersion)
data += ser_uint256(self.hashPrevBlock)
data += ser_uint256(self.hashMerkleRoot)
data += struct.pack("<I", self.nTime)
data += struct.pack("<I", self.nBits)
data += struct.pack("<I", self.nNonce)
data += ser_uint256(self.nAccumulatorCheckpoint)
sha256NoSig = hash256(data)
self.vchBlockSig = key.sign(sha256NoSig, low_s=low_s)
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction():
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs():
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs():
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest():
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions():
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree():
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
self.fBad = False
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock():
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version():
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack():
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr():
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv():
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata():
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks():
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx():
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block():
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize(with_witness=False)
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic():
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr():
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping():
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong():
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool():
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders():
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders():
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers():
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject():
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter():
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct():
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock():
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn():
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn():
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=False)
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
|
py | 1a4d1b1d7abaa9494d049332b082a4dec1d8a5b2 | import pytest
from datetime import time, timedelta
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
from pandas import (Series, Timedelta, to_timedelta, isna,
TimedeltaIndex)
from pandas._libs.tslib import iNaT
class TestTimedeltas(object):
_multiprocess_can_split_ = True
def test_to_timedelta(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
assert (to_timedelta('1 days 06:05:01.00003', box=False) ==
conv(d1 + np.timedelta64(6 * 3600 + 5 * 60 + 1, 's') +
np.timedelta64(30, 'us')))
assert (to_timedelta('15.5us', box=False) ==
conv(np.timedelta64(15500, 'ns')))
# empty string
result = to_timedelta('', box=False)
assert result.astype('int64') == iNaT
result = to_timedelta(['', ''])
assert isna(result).all()
# pass thru
result = to_timedelta(np.array([np.timedelta64(1, 's')]))
expected = pd.Index(np.array([np.timedelta64(1, 's')]))
tm.assert_index_equal(result, expected)
# ints
result = np.timedelta64(0, 'ns')
expected = to_timedelta(0, box=False)
assert result == expected
# Series
expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)])
result = to_timedelta(Series(['1d', '1days 00:00:01']))
tm.assert_series_equal(result, expected)
# with units
result = TimedeltaIndex([np.timedelta64(0, 'ns'), np.timedelta64(
10, 's').astype('m8[ns]')])
expected = to_timedelta([0, 10], unit='s')
tm.assert_index_equal(result, expected)
# single element conversion
v = timedelta(seconds=1)
result = to_timedelta(v, box=False)
expected = np.timedelta64(timedelta(seconds=1))
assert result == expected
v = np.timedelta64(timedelta(seconds=1))
result = to_timedelta(v, box=False)
expected = np.timedelta64(timedelta(seconds=1))
assert result == expected
# arrays of various dtypes
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='s')
expected = TimedeltaIndex([np.timedelta64(1, 's')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='m')
expected = TimedeltaIndex([np.timedelta64(1, 'm')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='h')
expected = TimedeltaIndex([np.timedelta64(1, 'h')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='timedelta64[s]')
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, 's')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='timedelta64[D]')
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, 'D')] * 5)
tm.assert_index_equal(result, expected)
# Test with lists as input when box=false
expected = np.array(np.arange(3) * 1000000000, dtype='timedelta64[ns]')
result = to_timedelta(range(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta(np.arange(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta([0, 1, 2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
# Tests with fractional seconds as input:
expected = np.array(
[0, 500000000, 800000000, 1200000000], dtype='timedelta64[ns]')
result = to_timedelta([0., 0.5, 0.8, 1.2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
def test_to_timedelta_invalid(self):
# bad value for errors parameter
msg = "errors must be one of"
tm.assert_raises_regex(ValueError, msg, to_timedelta,
['foo'], errors='never')
# these will error
pytest.raises(ValueError, lambda: to_timedelta([1, 2], unit='foo'))
pytest.raises(ValueError, lambda: to_timedelta(1, unit='foo'))
# time not supported ATM
pytest.raises(ValueError, lambda: to_timedelta(time(second=1)))
assert to_timedelta(time(second=1), errors='coerce') is pd.NaT
pytest.raises(ValueError, lambda: to_timedelta(['foo', 'bar']))
tm.assert_index_equal(TimedeltaIndex([pd.NaT, pd.NaT]),
to_timedelta(['foo', 'bar'], errors='coerce'))
tm.assert_index_equal(TimedeltaIndex(['1 day', pd.NaT, '1 min']),
to_timedelta(['1 day', 'bar', '1 min'],
errors='coerce'))
# gh-13613: these should not error because errors='ignore'
invalid_data = 'apple'
assert invalid_data == to_timedelta(invalid_data, errors='ignore')
invalid_data = ['apple', '1 days']
tm.assert_numpy_array_equal(
np.array(invalid_data, dtype=object),
to_timedelta(invalid_data, errors='ignore'))
invalid_data = pd.Index(['apple', '1 days'])
tm.assert_index_equal(invalid_data, to_timedelta(
invalid_data, errors='ignore'))
invalid_data = Series(['apple', '1 days'])
tm.assert_series_equal(invalid_data, to_timedelta(
invalid_data, errors='ignore'))
def test_to_timedelta_via_apply(self):
# GH 5458
expected = Series([np.timedelta64(1, 's')])
result = Series(['00:00:01']).apply(to_timedelta)
tm.assert_series_equal(result, expected)
result = Series([to_timedelta('00:00:01')])
tm.assert_series_equal(result, expected)
def test_to_timedelta_on_missing_values(self):
# GH5438
timedelta_NaT = np.timedelta64('NaT')
actual = pd.to_timedelta(Series(['00:00:01', np.nan]))
expected = Series([np.timedelta64(1000000000, 'ns'),
timedelta_NaT], dtype='<m8[ns]')
assert_series_equal(actual, expected)
actual = pd.to_timedelta(Series(['00:00:01', pd.NaT]))
assert_series_equal(actual, expected)
actual = pd.to_timedelta(np.nan)
assert actual.value == timedelta_NaT.astype('int64')
actual = pd.to_timedelta(pd.NaT)
assert actual.value == timedelta_NaT.astype('int64')
def test_to_timedelta_on_nanoseconds(self):
# GH 9273
result = Timedelta(nanoseconds=100)
expected = Timedelta('100ns')
assert result == expected
result = Timedelta(days=1, hours=1, minutes=1, weeks=1, seconds=1,
milliseconds=1, microseconds=1, nanoseconds=1)
expected = Timedelta(694861001001001)
assert result == expected
result = Timedelta(microseconds=1) + Timedelta(nanoseconds=1)
expected = Timedelta('1us1ns')
assert result == expected
result = Timedelta(microseconds=1) - Timedelta(nanoseconds=1)
expected = Timedelta('999ns')
assert result == expected
result = Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2)
expected = Timedelta('990ns')
assert result == expected
pytest.raises(TypeError, lambda: Timedelta(nanoseconds='abc'))
|
py | 1a4d1d2e553a85dba815f39e496e8cb6d048d3dc | # -*- coding: utf-8 -*-
"""Development settings and globals."""
from __future__ import absolute_import
from os.path import join, normpath
from .base import *
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## EMAIL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
########## END EMAIL CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': normpath(join(DJANGO_ROOT, 'default.db')),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## CACHE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
########## END CACHE CONFIGURATION
########## TOOLBAR CONFIGURATION
# See: http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html
INTERNAL_IPS = ('127.0.0.1',)
########## END TOOLBAR CONFIGURATION
LOCAL_SETTINGS = True
|
py | 1a4d1dbe9909310957174327004978afa1ae695d | from django import forms
from django.db.models import Q
from common.models import User, Attachments, Comment
from contacts.models import Contact
from events.models import Event
from teams.models import Teams
class EventForm(forms.ModelForm):
WEEKDAYS = (('Monday', 'Monday'),
('Tuesday', 'Tuesday'),
('Wednesday', 'Wednesday'),
('Thursday', 'Thursday'),
('Friday', 'Friday'),
('Saturday', 'Saturday'),
('Sunday', 'Sunday'))
recurring_days = forms.MultipleChoiceField(
required=False, choices=WEEKDAYS)
teams_queryset = []
teams = forms.MultipleChoiceField(choices=teams_queryset)
def __init__(self, *args, **kwargs):
request_user = kwargs.pop('request_user', None)
self.obj_instance = kwargs.get('instance', None)
super(EventForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {"class": "form-control"}
if request_user.role == 'ADMIN' or request_user.is_superuser:
self.fields['assigned_to'].queryset = User.objects.filter(is_active=True)
self.fields["contacts"].queryset = Contact.objects.filter()
self.fields['assigned_to'].required = True
self.fields["teams"].choices = [(team.get('id'), team.get('name')) for team in
Teams.objects.all().values('id', 'name')]
# elif request_user.google.all():
# self.fields['assigned_to'].queryset = User.objects.none()
# self.fields["contacts"].queryset = Contact.objects.filter(
# Q(assigned_to__in=[request_user]) | Q(created_by=request_user))
# self.fields['assigned_to'].required = False
elif request_user.role == 'USER':
self.fields['assigned_to'].queryset = User.objects.filter(
role='ADMIN')
self.fields["contacts"].queryset = Contact.objects.filter(
Q(assigned_to__in=[request_user]) | Q(created_by=request_user))
self.fields['assigned_to'].required = True
else:
pass
if self.obj_instance:
# self.fields['name'].widget.attrs['readonly'] = True
self.fields['start_date'].widget.attrs['readonly'] = True
self.fields['end_date'].widget.attrs['readonly'] = True
self.fields["teams"].required = False
self.fields['name'].required = True
self.fields['event_type'].required = True
self.fields['contacts'].required = True
self.fields['start_date'].required = True
self.fields['start_time'].required = True
self.fields['end_date'].required = True
self.fields['end_time'].required = True
self.fields['description'].required = False
def clean_recurring_days(self):
recurring_days = self.cleaned_data.get('recurring_days')
if not self.obj_instance:
if self.cleaned_data.get('event_type') == 'Recurring':
if len(recurring_days) < 1:
raise forms.ValidationError('Choose atleast one recurring day')
def clean_name(self):
name = self.cleaned_data.get('name')
if not self.obj_instance:
if Event.objects.filter(name=name).exclude(id=self.instance.id).exists():
raise forms.ValidationError(
'Event with this name already exists.')
return name
def clean_event_type(self):
""" This Validation Is For Keeping The Field Readonly While Editing or Updating"""
event_type = self.cleaned_data.get('event_type')
if self.obj_instance:
return self.obj_instance.event_type
else:
return event_type
def clean_start_date(self):
start_date = self.cleaned_data.get('start_date')
if start_date:
if self.obj_instance:
return self.obj_instance.start_date
else:
return start_date
else:
raise forms.ValidationError('Enter a valid Start date.')
def clean_end_date(self):
end_date = self.cleaned_data.get('end_date')
event_type = self.cleaned_data.get('event_type')
if event_type == 'Recurring':
if self.clean_start_date() == end_date:
raise forms.ValidationError(
'Start Date and End Date cannot be equal for recurring events')
if self.clean_start_date() > end_date:
raise forms.ValidationError(
'End Date cannot be less than start date')
return end_date
def clean_end_time(self):
end_time = self.cleaned_data.get('end_time')
if not self.cleaned_data.get('start_time'):
raise forms.ValidationError('Enter a valid start time.')
if self.cleaned_data.get('start_time') > end_time:
raise forms.ValidationError(
'End Time cannot be less than Start Time')
return end_time
class Meta:
model = Event
fields = (
'name', 'event_type', 'contacts', 'assigned_to', 'start_date', 'start_time',
'end_date', 'end_time', 'description',
)
class EventCommentForm(forms.ModelForm):
comment = forms.CharField(max_length=255, required=True)
class Meta:
model = Comment
fields = ('comment', 'event', 'commented_by')
class EventAttachmentForm(forms.ModelForm):
attachment = forms.FileField(max_length=1001, required=True)
class Meta:
model = Attachments
fields = ('attachment', 'event')
|
py | 1a4d1f0eb76f6ebbce8641704a6b20140c07caf4 | from __future__ import print_function, division
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd.function import InplaceFunction
import torch.optim as optim
from torch.autograd import Variable
from config import Config
import numpy as np
from lib import actions as actionslib
from lib.util import to_cuda, to_variable
import imgaug as ia
import random
ANGLE_BIN_SIZE = 5
GPU = 0
class SteeringWheelTrackerCNNModel(nn.Module):
def __init__(self):
super(SteeringWheelTrackerCNNModel, self).__init__()
self.c1 = nn.Conv2d(3, 32, kernel_size=7, padding=3, stride=1)
self.fc1 = nn.Linear(32*(32//4)*(64//4), 16)
self.fc2 = nn.Linear(16, 360//ANGLE_BIN_SIZE)
def forward(self, inputs, softmax=False):
x = inputs
x = F.relu(self.c1(x))
x = F.avg_pool2d(x, 4)
x = x.view(-1, 32*(32//4)*(64//4))
x = F.relu(self.fc1(x))
x = self.fc2(x)
if softmax:
x = F.softmax(x)
return x
def forward_image(self, subimg, softmax=False, volatile=False, requires_grad=True, gpu=GPU):
subimg = np.float32([subimg/255]).transpose((0, 3, 1, 2))
subimg = to_cuda(to_variable(subimg, volatile=volatile, requires_grad=requires_grad), GPU)
return self.forward(subimg, softmax=softmax)
|
py | 1a4d1faf57d30189b0c039c3dddcc1779bb7f447 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from google.cloud.texttospeech_v1 import types
from google.cloud.texttospeech_v1.gapic import enums
from google.cloud.texttospeech_v1.gapic import text_to_speech_client
class TextToSpeechClient(text_to_speech_client.TextToSpeechClient):
__doc__ = text_to_speech_client.TextToSpeechClient.__doc__
enums = enums
__all__ = (
'enums',
'types',
'TextToSpeechClient',
)
|
py | 1a4d200bce429789783448ef6d3996dad2b8c283 | import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../tools'))
import files
import strings
import table
def main(argv):
blosom62 = table.scoring(argv[0])
s, t = files.read_lines(argv[1])
edge = strings.middle_edge(s, t, blosom62)
print edge[0], edge[1]
if __name__ == "__main__":
main(sys.argv[1:])
|
py | 1a4d20a1abbd47385f93cc45bd3d841e7eca83b5 | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
import math
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 200 # Number of waypoints we will publish. You can change this number
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# TODO: Add other member variables you need below
self.pose = None
self.base_waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.loop()
def loop(self):
# TODO: Implement
rate = rospy.Rate(50)
if self.pose and self.base_waypoints:
closest_waypoint_idx = self.get_closest_waypoint_idx()
self.publish_waypoints(closest_waypoint_idx)
rate.sleep()
def get_closest_waypoint_idx(self):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_idx = self.waypoint_tree.query([x,y], 1)[1]
closest_coor = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx-1]
cl_vect = np.array(closest_coor)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x, y])
val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)
if val > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
def pose_cb(self, msg):
# TODO: Implement
self.pose = msg
def waypoints_cb(self, waypoints):
self.base_lane = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def publish_waypoints (self,closest_idx):
lane = lane()
lane.header = self.base_points.header
lane.waypoints = self.base_waypoints.waypoints[closest_idx:closest_idx+ LOOKAHEAD_WPS]
self.final_waypoints.publish(lane)
def traffic_cb(self, msg):
# TODO: Callback for /traffic_waypoint message. Implement
pass
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.') |
py | 1a4d20c99b41240e752ad7f2f4cf6b0f41341cf1 | #!/usr/bin/env python3
# Develop a program that finds all the genes in a bacterial genome.
# Program reads FASTA file of genome sequence
# Genes begin with ATG and end with stop codon
# Genes are at least X amino acids long (default 100)
# Genes may be on either strand
# Genes must be given unique names
# Genes must be reported in a FASTA file as their protein sequence
# Also create a genome report containing the following information
# Size of the genome in bp
# Number of genes
# Percentage of genome that is coding
# Number of genes on the positive strand
# Number of genes on the negative strand
import random
import argparse
import biotools as bt
parser = argparse.ArgumentParser(
description='Prokaryotic gene finder.')
parser.add_argument('--file', required=True, type=str,
metavar='<str>', help='FASTA file')
parser.add_argument('--minorf', required=False, type=int, default=300,
metavar='<int>', help='minimum open reading frame length [%(default)i]')
arg = parser.parse_args()
gcode = {
'AAA' : 'K', 'AAC' : 'N', 'AAG' : 'K', 'AAT' : 'N',
'ACA' : 'T', 'ACC' : 'T', 'ACG' : 'T', 'ACT' : 'T',
'AGA' : 'R', 'AGC' : 'S', 'AGG' : 'R', 'AGT' : 'S',
'ATA' : 'I', 'ATC' : 'I', 'ATG' : 'M', 'ATT' : 'I',
'CAA' : 'Q', 'CAC' : 'H', 'CAG' : 'Q', 'CAT' : 'H',
'CCA' : 'P', 'CCC' : 'P', 'CCG' : 'P', 'CCT' : 'P',
'CGA' : 'R', 'CGC' : 'R', 'CGG' : 'R', 'CGT' : 'R',
'CTA' : 'L', 'CTC' : 'L', 'CTG' : 'L', 'CTT' : 'L',
'GAA' : 'E', 'GAC' : 'D', 'GAG' : 'E', 'GAT' : 'D',
'GCA' : 'A', 'GCC' : 'A', 'GCG' : 'A', 'GCT' : 'A',
'GGA' : 'G', 'GGC' : 'G', 'GGG' : 'G', 'GGT' : 'G',
'GTA' : 'V', 'GTC' : 'V', 'GTG' : 'V', 'GTT' : 'V',
'TAA' : '*', 'TAC' : 'Y', 'TAG' : '*', 'TAT' : 'Y',
'TCA' : 'S', 'TCC' : 'S', 'TCG' : 'S', 'TCT' : 'S',
'TGA' : '*', 'TGC' : 'C', 'TGG' : 'W', 'TGT' : 'C',
'TTA' : 'L', 'TTC' : 'F', 'TTG' : 'L', 'TTT' : 'F',
}
def anti(seq):
forward = 'ACGTRYMKWSBDHV'
reverse = 'TGCAYRKMWSVHBD'
table = seq.maketrans(forward, reverse)
return seq.translate(table)[::-1]
#defines the reverse sequence
def get_orfs(seq, min):
orfs = []
stop_used = {}
for i in range(len(seq) - 2):
codon = seq[i:i+3]
if codon == 'ATG':
atg = i
for j in range(atg + 3, len(seq) - 2, 3):
codon = seq[j:j+3]
if codon == 'TAG' or codon == 'TAA' or codon == 'TGA':
break
stp = j + 2
if stp - atg + 1 > min and stp not in stop_used:
stop_used[stp] = True
orfs.append(seq[atg:stp +1])
return orfs
#looks at genome and creates list of all the orfs, once it finds stop codon it ends orf
#and begins new orf at next start codon
def translate(orf):
pro = []
for i in range(0, len(orf), 3):
codon = orf[i:i+3]
if codon in gcode: pro.append(gcode[codon])
else: pro.append('X')
return ''.join(pro)
#looks at each orf and translates it into amino acids
def comp(seq):
A = seq.count('A')
C = seq.count('C')
G = seq.count('G')
T = seq.count('T')
total = A + C + G + T
return A/total, C/total, G/total, T/total
#tells us what nt frequency is in actual genome
def randseq(length, a, c, g, t):
pool = int(a * 100) * "A" + int(c * 100) * "C" + int(g * 100) * "G" + int(t * 100) * "T"
seq = []
for i in range(length):
seq.append(random.choice(pool))
return ''.join(seq)
#uses nt frequency to make random genome
n = 0
len_orfs = 0
for name, seq in bt.read_fasta(arg.file):
orfs1 = get_orfs(seq, arg.minorf)
orfs2 = get_orfs(anti(seq), arg.minorf)
for orf in orfs1:
n += 1
len_orfs += len(orf)
print(f'>Protein+{n}')
print(translate(orf))
for orf in orfs2:
n += 1
len_orfs += len(orf)
print(f'>Protein-{n}')
print(translate(orf))
print(f'Number of + genes: {len(orfs1)}')
print(f'Number of - genes: {len(orfs2)}')
print(f'Number of genes: {len(orfs1 + orfs2)}')
print(f'Genome size: {len(seq)}')
print(f'Coding nts: {len_orfs}')
print(f'Percentage genome coding: {len_orfs/len(seq)}')
a, c, g, t = comp(seq)
#count of real genome
seq = randseq(int(10000), a, c, g, t)
count = 0
for orf in get_orfs(seq, arg.minorf):
count += 1
for orf in get_orfs(anti(seq), arg.minorf):
count += 1
#counts/prints how many orfs are in the random sequence
print(f'A: {a}, C: {c}, G: {g}, T: {t}')
print(f'Random orfs: {count}')
add_bp = 0
for bp in seq:
add_bp += 1
print(f'Rand_genome size: {add_bp}')
"""
Size of the genome in bp
Number of genes
Percentage of genome that is coding
Number of genes on the positive strand
Number of genes on the negative strand
"""
|
py | 1a4d21ba530c4d08efb2aa8dcaca4a29ddc08af9 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import collections
import os
import re
import time
import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
# In[2]:
def build_dataset(words, n_words, atleast=1):
count = [["PAD", 0], ["GO", 1], ["EOS", 2], ["UNK", 3]]
counter = collections.Counter(words).most_common(n_words)
counter = [i for i in counter if i[1] >= atleast]
count.extend(counter)
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
index = dictionary.get(word, 0)
if index == 0:
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
# In[3]:
lines = open("movie_lines.txt", encoding="utf-8", errors="ignore").read().split("\n")
conv_lines = open("movie_conversations.txt", encoding="utf-8", errors="ignore").read().split("\n")
id2line = {}
for line in lines:
_line = line.split(" +++$+++ ")
if len(_line) == 5:
id2line[_line[0]] = _line[4]
convs = []
for line in conv_lines[:-1]:
_line = line.split(" +++$+++ ")[-1][1:-1].replace("'", "").replace(" ", "")
convs.append(_line.split(","))
questions = []
answers = []
for conv in convs:
for i in range(len(conv) - 1):
questions.append(id2line[conv[i]])
answers.append(id2line[conv[i + 1]])
def clean_text(text):
text = text.lower()
text = re.sub(r"i'm", "i am", text)
text = re.sub(r"he's", "he is", text)
text = re.sub(r"she's", "she is", text)
text = re.sub(r"it's", "it is", text)
text = re.sub(r"that's", "that is", text)
text = re.sub(r"what's", "that is", text)
text = re.sub(r"where's", "where is", text)
text = re.sub(r"how's", "how is", text)
text = re.sub(r"\'ll", " will", text)
text = re.sub(r"\'ve", " have", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"\'d", " would", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"won't", "will not", text)
text = re.sub(r"can't", "cannot", text)
text = re.sub(r"n't", " not", text)
text = re.sub(r"n'", "ng", text)
text = re.sub(r"'bout", "about", text)
text = re.sub(r"'til", "until", text)
text = re.sub(r"[-()\"#/@;:<>{}`+=~|.!?,]", "", text)
return " ".join([i.strip() for i in filter(None, text.split())])
clean_questions = []
for question in questions:
clean_questions.append(clean_text(question))
clean_answers = []
for answer in answers:
clean_answers.append(clean_text(answer))
min_line_length = 2
max_line_length = 5
short_questions_temp = []
short_answers_temp = []
i = 0
for question in clean_questions:
if len(question.split()) >= min_line_length and len(question.split()) <= max_line_length:
short_questions_temp.append(question)
short_answers_temp.append(clean_answers[i])
i += 1
short_questions = []
short_answers = []
i = 0
for answer in short_answers_temp:
if len(answer.split()) >= min_line_length and len(answer.split()) <= max_line_length:
short_answers.append(answer)
short_questions.append(short_questions_temp[i])
i += 1
question_test = short_questions[500:550]
answer_test = short_answers[500:550]
short_questions = short_questions[:500]
short_answers = short_answers[:500]
# In[4]:
concat_from = " ".join(short_questions + question_test).split()
vocabulary_size_from = len(list(set(concat_from)))
data_from, count_from, dictionary_from, rev_dictionary_from = build_dataset(
concat_from, vocabulary_size_from
)
print("vocab from size: %d" % (vocabulary_size_from))
print("Most common words", count_from[4:10])
print("Sample data", data_from[:10], [rev_dictionary_from[i] for i in data_from[:10]])
print("filtered vocab size:", len(dictionary_from))
print("% of vocab used: {}%".format(round(len(dictionary_from) / vocabulary_size_from, 4) * 100))
# In[5]:
concat_to = " ".join(short_answers + answer_test).split()
vocabulary_size_to = len(list(set(concat_to)))
data_to, count_to, dictionary_to, rev_dictionary_to = build_dataset(concat_to, vocabulary_size_to)
print("vocab from size: %d" % (vocabulary_size_to))
print("Most common words", count_to[4:10])
print("Sample data", data_to[:10], [rev_dictionary_to[i] for i in data_to[:10]])
print("filtered vocab size:", len(dictionary_to))
print("% of vocab used: {}%".format(round(len(dictionary_to) / vocabulary_size_to, 4) * 100))
# In[6]:
GO = dictionary_from["GO"]
PAD = dictionary_from["PAD"]
EOS = dictionary_from["EOS"]
UNK = dictionary_from["UNK"]
# In[7]:
for i in range(len(short_answers)):
short_answers[i] += " EOS"
# In[8]:
class Chatbot:
def __init__(
self,
size_layer,
num_layers,
embedded_size,
from_dict_size,
to_dict_size,
learning_rate,
batch_size,
):
def cells(reuse=False):
return tf.nn.rnn_cell.GRUCell(size_layer, reuse=reuse)
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None, None])
self.X_seq_len = tf.placeholder(tf.int32, [None])
self.Y_seq_len = tf.placeholder(tf.int32, [None])
batch_size = tf.shape(self.X)[0]
encoder_embeddings = tf.Variable(tf.random_uniform([from_dict_size, embedded_size], -1, 1))
decoder_embeddings = tf.Variable(tf.random_uniform([to_dict_size, embedded_size], -1, 1))
encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)
main = tf.strided_slice(self.X, [0, 0], [batch_size, -1], [1, 1])
decoder_input = tf.concat([tf.fill([batch_size, 1], GO), main], 1)
decoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, decoder_input)
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units=size_layer, memory=encoder_embedded
)
rnn_cells = tf.contrib.seq2seq.AttentionWrapper(
cell=tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)]),
attention_mechanism=attention_mechanism,
attention_layer_size=size_layer,
)
_, last_state = tf.nn.dynamic_rnn(rnn_cells, encoder_embedded, dtype=tf.float32)
last_state = tuple(last_state[0][-1] for _ in range(num_layers))
with tf.variable_scope("decoder"):
rnn_cells_dec = tf.nn.rnn_cell.MultiRNNCell([cells() for _ in range(num_layers)])
outputs, _ = tf.nn.dynamic_rnn(
rnn_cells_dec, decoder_embedded, initial_state=last_state, dtype=tf.float32
)
self.logits = tf.layers.dense(outputs, to_dict_size)
masks = tf.sequence_mask(self.Y_seq_len, tf.reduce_max(self.Y_seq_len), dtype=tf.float32)
self.cost = tf.contrib.seq2seq.sequence_loss(
logits=self.logits, targets=self.Y, weights=masks
)
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)
y_t = tf.argmax(self.logits, axis=2)
y_t = tf.cast(y_t, tf.int32)
self.prediction = tf.boolean_mask(y_t, masks)
mask_label = tf.boolean_mask(self.Y, masks)
correct_pred = tf.equal(self.prediction, mask_label)
correct_index = tf.cast(correct_pred, tf.float32)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# In[9]:
size_layer = 256
num_layers = 2
embedded_size = 128
learning_rate = 0.001
batch_size = 16
epoch = 20
# In[10]:
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Chatbot(
size_layer,
num_layers,
embedded_size,
len(dictionary_from),
len(dictionary_to),
learning_rate,
batch_size,
)
sess.run(tf.global_variables_initializer())
# In[11]:
def str_idx(corpus, dic):
X = []
for i in corpus:
ints = []
for k in i.split():
ints.append(dic.get(k, UNK))
X.append(ints)
return X
# In[12]:
X = str_idx(short_questions, dictionary_from)
Y = str_idx(short_answers, dictionary_to)
X_test = str_idx(question_test, dictionary_from)
Y_test = str_idx(answer_test, dictionary_from)
# In[13]:
maxlen_question = max([len(x) for x in X]) * 2
maxlen_answer = max([len(y) for y in Y]) * 2
maxlen_question, maxlen_answer
# In[14]:
def pad_sentence_batch(sentence_batch, pad_int, maxlen):
padded_seqs = []
seq_lens = []
max_sentence_len = maxlen
for sentence in sentence_batch:
padded_seqs.append(sentence + [pad_int] * (max_sentence_len - len(sentence)))
seq_lens.append(maxlen)
return padded_seqs, seq_lens
# In[15]:
for i in range(epoch):
total_loss, total_accuracy = 0, 0
X, Y = shuffle(X, Y)
for k in range(0, len(short_questions), batch_size):
index = min(k + batch_size, len(short_questions))
batch_x, seq_x = pad_sentence_batch(X[k:index], PAD, maxlen_answer)
batch_y, seq_y = pad_sentence_batch(Y[k:index], PAD, maxlen_answer)
predicted, accuracy, loss, _ = sess.run(
[tf.argmax(model.logits, 2), model.accuracy, model.cost, model.optimizer],
feed_dict={
model.X: batch_x,
model.Y: batch_y,
model.X_seq_len: seq_x,
model.Y_seq_len: seq_y,
},
)
total_loss += loss
total_accuracy += accuracy
total_loss /= len(short_questions) / batch_size
total_accuracy /= len(short_questions) / batch_size
print("epoch: %d, avg loss: %f, avg accuracy: %f" % (i + 1, total_loss, total_accuracy))
# In[16]:
for i in range(len(batch_x)):
print("row %d" % (i + 1))
print(
"QUESTION:", " ".join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0, 1, 2, 3]])
)
print(
"REAL ANSWER:",
" ".join([rev_dictionary_to[n] for n in batch_y[i] if n not in [0, 1, 2, 3]]),
)
print(
"PREDICTED ANSWER:",
" ".join([rev_dictionary_to[n] for n in predicted[i] if n not in [0, 1, 2, 3]]),
"\n",
)
# In[17]:
batch_x, seq_x = pad_sentence_batch(X_test[:batch_size], PAD, maxlen_answer)
batch_y, seq_y = pad_sentence_batch(Y_test[:batch_size], PAD, maxlen_answer)
predicted = sess.run(
tf.argmax(model.logits, 2), feed_dict={model.X: batch_x, model.X_seq_len: seq_x}
)
for i in range(len(batch_x)):
print("row %d" % (i + 1))
print(
"QUESTION:", " ".join([rev_dictionary_from[n] for n in batch_x[i] if n not in [0, 1, 2, 3]])
)
print(
"REAL ANSWER:",
" ".join([rev_dictionary_to[n] for n in batch_y[i] if n not in [0, 1, 2, 3]]),
)
print(
"PREDICTED ANSWER:",
" ".join([rev_dictionary_to[n] for n in predicted[i] if n not in [0, 1, 2, 3]]),
"\n",
)
# In[ ]:
|
py | 1a4d21c2d8505ae9f4e1ab1a2292155ad993e6c9 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017-2020 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
import unittest
import os
from tern.classes.image_layer import ImageLayer
from tern.classes.package import Package
from tern.classes.file_data import FileData
from tern.utils import rootfs
from test_fixtures import TestTemplate1
from test_fixtures import TestTemplate2
class TestClassImageLayer(unittest.TestCase):
def setUp(self):
self.layer = ImageLayer('123abc', 'path/to/tar')
rootfs.set_working_dir()
def tearDown(self):
del self.layer
def testInstance(self):
self.assertEqual(self.layer.diff_id, '123abc')
self.assertEqual(self.layer.tar_file, 'path/to/tar')
self.assertFalse(self.layer.packages)
self.assertFalse(self.layer.created_by)
self.assertRaises(AttributeError, setattr, self.layer,
'diff_id', '456def')
self.assertRaises(AttributeError, setattr, self.layer, 'tar_file',
'some/other/path')
self.layer.created_by = 'some string'
self.assertEqual(self.layer.created_by, 'some string')
self.layer.pkg_format = 'rpm'
self.assertEqual(self.layer.pkg_format, 'rpm')
self.layer.os_guess = 'operating system'
self.assertEqual(self.layer.os_guess, 'operating system')
self.assertFalse(self.layer.files_analyzed)
self.layer.files_analyzed = True
self.assertTrue(self.layer.files_analyzed)
self.assertRaises(ValueError, setattr, self.layer,
'files_analyzed', 'some string')
self.assertEqual("", self.layer.analyzed_output)
self.layer.analyzed_output = 'some string'
self.assertEqual(self.layer.analyzed_output, 'some string')
self.assertRaises(ValueError, setattr, self.layer,
'analyzed_output', 123)
def testAddPackage(self):
err = "Object type String, should be Package"
p1 = Package('x')
self.layer.add_package(p1)
self.assertEqual(len(self.layer.packages), 1)
with self.assertRaises(TypeError, msg=err):
self.layer.add_package("not_a_package")
def testRemovePackage(self):
p1 = Package('x')
p2 = Package('y')
self.layer.add_package(p1)
self.layer.add_package(p2)
self.assertTrue(self.layer.remove_package('y'))
self.assertFalse(self.layer.remove_package('y'))
def testAddFile(self):
err = "Object type String, should be FileData"
file1 = FileData('file1', 'path/to/file1')
self.layer.add_file(file1)
self.assertEqual(len(self.layer.files), 1)
with self.assertRaises(TypeError, msg=err):
self.layer.add_file("afile")
def testRemoveFile(self):
file1 = FileData('file1', 'path/to/file1')
self.layer.add_file(file1)
self.assertFalse(self.layer.remove_file('file1'))
self.assertTrue(self.layer.remove_file('path/to/file1'))
self.assertFalse(self.layer.remove_file('path/to/file1'))
def testToDict(self):
p1 = Package('x')
f1 = FileData('file1', 'path/to/file1')
self.layer.add_package(p1)
self.layer.add_file(f1)
a_dict = self.layer.to_dict()
self.assertEqual(a_dict['diff_id'], '123abc')
self.assertEqual(len(a_dict['packages']), 1)
self.assertEqual(a_dict['packages'][0]['name'], 'x')
self.assertEqual(len(a_dict['files']), 1)
self.assertEqual(a_dict['files'][0]['name'], 'file1')
self.assertEqual(a_dict['files'][0]['path'], 'path/to/file1')
self.assertEqual(a_dict['tar_file'], 'path/to/tar')
def testToDictTemplate(self):
template1 = TestTemplate1()
template2 = TestTemplate2()
p1 = Package('x')
self.layer.add_package(p1)
f1 = FileData('file1', 'path/to/file1')
self.layer.add_file(f1)
dict1 = self.layer.to_dict(template1)
dict2 = self.layer.to_dict(template2)
self.assertEqual(len(dict1.keys()), 4)
self.assertEqual(dict1['layer.diff'], '123abc')
self.assertEqual(dict1['layer.tarfile'], 'path/to/tar')
self.assertEqual(len(dict1['layer.packages']), 1)
self.assertEqual(len(dict1['layer.files']), 1)
self.assertEqual(len(dict2.keys()), 5)
self.assertFalse(dict2['notes'])
self.assertFalse(dict2['layer.packages'][0]['notes'])
self.assertFalse(dict2['layer.files'][0]['notes'])
def testGetPackageNames(self):
p1 = Package('x')
self.layer.add_package(p1)
pkgs = self.layer.get_package_names()
self.assertEqual(pkgs[0], 'x')
def testGetFilePaths(self):
f1 = FileData('file1', 'path/to/file1')
f2 = FileData('file2', 'path/to/file2')
self.layer.add_file(f1)
self.layer.add_file(f2)
file_paths = self.layer.get_file_paths()
self.assertEqual(file_paths, ['path/to/file1', 'path/to/file2'])
def testSetChecksum(self):
self.layer.set_checksum('sha256', '12345abcde')
self.assertEqual(self.layer.checksum_type, 'sha256')
self.assertEqual(self.layer.checksum, '12345abcde')
def testAddChecksums(self):
self.layer.add_checksums({'SHA1': '12345abcde',
'MD5': '1ff38cc592c4c5d0c8e3ca38be8f1eb1'})
self.assertEqual(self.layer.checksums,
{'sha1': '12345abcde',
'md5': '1ff38cc592c4c5d0c8e3ca38be8f1eb1'})
def testSetExtensionInfo(self):
self.layer.extension_info = {"header": set({"Test Header"})}
self.assertIsInstance(self.layer.extension_info, dict)
self.assertIsNotNone(
self.layer.extension_info.get("header", None), None)
self.assertIsInstance(
self.layer.extension_info.get("header", None), set)
header = self.layer.extension_info.get("header").pop()
self.assertEqual(header, "Test Header")
def testGetUntarDir(self):
self.layer.image_layout = "oci"
self.assertEqual(self.layer.image_layout, "oci")
self.layer.image_layout = "docker"
self.assertEqual(self.layer.image_layout, "docker")
self.layer.image_layout = ""
self.assertEqual(self.layer.image_layout, "oci")
self.layer.layer_index = 1
self.assertEqual(self.layer.layer_index, "1")
expected_path = os.path.join(rootfs.get_working_dir(),
'1/contents')
self.assertEqual(self.layer.get_untar_dir(), expected_path)
self.layer.image_layout = "docker"
expected_path = os.path.join(rootfs.get_working_dir(),
'path/to/contents')
self.assertEqual(self.layer.get_untar_dir(), expected_path)
# Kaniko image format test
self.layer = ImageLayer('123abc', 'some_layer_tar_file.tar.gz')
self.layer.image_layout = "docker"
expected_path = os.path.join(rootfs.get_working_dir(),
'some_layer_tar_file/contents')
self.assertEqual(self.layer.get_untar_dir(), expected_path)
if __name__ == '__main__':
unittest.main()
|
py | 1a4d227753b6347f9e30013bee1ca117e3019aad | try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
import sys, platform
sys.path.append('python')
extra_compile_args = ['-DHAVE_KALLOC']
include_dirs = ["."]
if platform.machine() in ["aarch64", "arm64"]:
include_dirs.append("sse2neon/")
extra_compile_args.extend(['-ftree-vectorize', '-DKSW_SSE2_ONLY', '-D__SSE2__'])
else:
extra_compile_args.append('-msse4.1') # WARNING: ancient x86_64 CPUs don't have SSE4
def readme():
with open('python/README.rst') as f:
return f.read()
setup(
name = 'mappy',
version = '2.21',
url = 'https://github.com/lh3/minimap2',
description = 'Minimap2 python binding',
long_description = readme(),
author = 'Heng Li',
author_email = '[email protected]',
license = 'MIT',
keywords = 'sequence-alignment',
scripts = ['python/minimap2.py'],
ext_modules = [Extension('mappy',
sources = ['python/mappy.pyx', 'align.c', 'bseq.c', 'lchain.c', 'seed.c', 'format.c', 'hit.c', 'index.c', 'pe.c', 'options.c',
'ksw2_extd2_sse.c', 'ksw2_exts2_sse.c', 'ksw2_extz2_sse.c', 'ksw2_ll_sse.c',
'kalloc.c', 'kthread.c', 'map.c', 'misc.c', 'sdust.c', 'sketch.c', 'esterr.c', 'splitidx.c'],
depends = ['minimap.h', 'bseq.h', 'kalloc.h', 'kdq.h', 'khash.h', 'kseq.h', 'ksort.h',
'ksw2.h', 'kthread.h', 'kvec.h', 'mmpriv.h', 'sdust.h',
'python/cmappy.h', 'python/cmappy.pxd'],
extra_compile_args = extra_compile_args,
include_dirs = include_dirs,
libraries = ['z', 'm', 'pthread'])],
classifiers = [
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: C',
'Programming Language :: Cython',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics'],
setup_requires=["cython"])
|
py | 1a4d2376c7b81ba48e59e66b52f95a132162da2f | from fontTools.designspaceLib import DesignSpaceDocument
from fontTools.pens.pointPen import PointToSegmentPen
from fontTools.varLib.models import VariationModel, allEqual, normalizeLocation
from ufoLib2 import Font as UFont
from .objects import Component, Glyph, MathDict
from .utils import makeTransformVarCo, tuplifyLocation
class VarCoGlyph(Glyph):
@classmethod
def loadFromUFOs(cls, ufos, locations, glyphName, axes):
uglyph = ufos[0][glyphName]
self = cls.loadFromGlyphObject(uglyph)
self.axes = axes
self._postParse(ufos, locations)
return self
def _postParse(self, ufos, locations):
# Filter out and collect component info from the outline
self.outline, components = self.outline.splitComponents()
# Build Component objects
vcComponentData = self.lib.get("varco.components", [])
if vcComponentData:
assert len(components) == len(vcComponentData), (
self.name,
len(components),
len(vcComponentData),
components,
)
else:
vcComponentData = [None] * len(components)
assert len(self.components) == 0
for (baseGlyph, affine), vcCompo in zip(components, vcComponentData):
if vcCompo is None:
xx, xy, yx, yy, dx, dy = affine
assert xy == 0, "rotation and skew are not implemented"
assert yx == 0, "rotation and skew are not implemented"
coord = {}
transform = MathDict(
x=dx,
y=dy,
rotation=0,
scalex=xx,
scaley=yy,
skewx=0,
skewy=0,
tcenterx=0,
tcentery=0,
)
else:
assert affine[:4] == (1, 0, 0, 1)
x, y = affine[4:]
coord = vcCompo["coord"]
transformDict = vcCompo["transform"]
transform = MathDict(
x=affine[4],
y=affine[5],
rotation=transformDict.get("rotation", 0),
scalex=transformDict.get("scalex", 1),
scaley=transformDict.get("scaley", 1),
skewx=transformDict.get("skewx", 0),
skewy=transformDict.get("skewy", 0),
tcenterx=transformDict.get("tcenterx", 0),
tcentery=transformDict.get("tcentery", 0),
)
self.components.append(Component(baseGlyph, MathDict(coord), transform))
assert len(self.variations) == 0
if ufos:
assert len(ufos) == len(locations)
for ufo, location in zip(ufos[1:], locations[1:]):
if self.name not in ufo:
continue
for axisName, axisValue in location.items():
assert -1 <= axisValue <= 1, (axisName, axisValue)
varGlyph = self.__class__.loadFromGlyphObject(ufo[self.name])
varGlyph._postParse([], [])
varGlyph.location = location
self.variations.append(varGlyph)
if self.variations:
locations = [{}] + [variation.location for variation in self.variations]
self.model = VariationModel(locations)
class VarCoFont:
def __init__(self, designSpacePath):
doc = DesignSpaceDocument.fromfile(designSpacePath)
self.axes, self.ufos, self.locations = unpackDesignSpace(doc)
self.varcoGlyphs = {}
def drawGlyph(self, pen, glyphName, location):
self.drawPointsGlyph(PointToSegmentPen(pen), glyphName, location)
def drawPointsGlyph(self, pen, glyphName, location, transform=None):
varGlyph = self[glyphName]
instanceGlyph = varGlyph.instantiate(location)
outline = instanceGlyph.outline
if transform is not None:
outline = outline.transform(transform)
outline.drawPoints(pen)
for component in instanceGlyph.components:
t = makeTransformVarCo(**component.transform)
if transform is not None:
t = transform.transform(t)
self.drawPointsGlyph(pen, component.name, component.coord, t)
def keys(self):
return self.ufos[0].keys()
def __contains__(self, glyphName):
return glyphName in self.ufos[0]
def __len__(self):
return len(self.ufos[0])
def __iter__(self):
return iter(self.ufos[0].keys())
def __getitem__(self, glyphName):
varcoGlyph = self.varcoGlyphs.get(glyphName)
if varcoGlyph is None:
varcoGlyph = VarCoGlyph.loadFromUFOs(
self.ufos, self.locations, glyphName, self.axes
)
self.varcoGlyphs[glyphName] = varcoGlyph
return varcoGlyph
def get(self, glyphName, default=None):
try:
glyph = self[glyphName]
except KeyError:
glyph = default
return glyph
def extractVarCoData(self, globalAxisNames, neutralOnly=False):
allLocations = set()
vcData = {}
neutralGlyphNames = []
for glyphName in sorted(self.keys()):
glyph = self[glyphName]
axisTags = {axisTag for v in glyph.variations for axisTag in v.location}
if neutralOnly and not axisTags - globalAxisNames:
masters = [glyph]
neutralGlyphNames.append(glyphName)
else:
masters = [glyph] + glyph.variations
if not glyph.outline.isEmpty() and glyph.components:
assert not any(
c.coord for c in glyph.components
), "can't mix outlines and variable components"
# ensure only the offset may vary across masters
for attr in [
"rotation",
"scalex",
"scaley",
"skewx",
"skewy",
"tcenterx",
"tcentery",
]:
values = {c.transform[attr] for m in masters for c in m.components}
assert len(values) == 1, f"classic component varies {attr}"
# This glyph mixes outlines and classic components, it will be
# flattened upon TTF compilation, so should not be part of the VarC table
continue
locations = [m.location for m in masters]
allLocations.update(tuplifyLocation(loc) for loc in locations)
components = []
for i in range(len(glyph.components)):
assert allEqual([m.components[i].name for m in masters])
coords = [m.components[i].coord for m in masters]
fillMissingFromNeutral(coords)
transforms = [
# Filter out x and y, as they'll be in glyf and gvar
{
_transformFieldMapping[k]: v
for k, v in m.components[i].transform.items()
if k not in {"x", "y"}
}
for m in masters
]
components.append(list(zip(coords, transforms)))
if components:
vcData[glyphName] = components, locations
allLocations = [dict(items) for items in sorted(allLocations)]
return vcData, allLocations, neutralGlyphNames
def fillMissingFromNeutral(coords):
# This ensures that all variation coord dicts contain all the
# keys from the neutral coord dict. If missing, the value from
# the neutral coord is used. This is crucial for the variation
# building mechanism.
firstCoord = coords[0]
for coord in coords[1:]:
for k, v in firstCoord.items():
coord.setdefault(k, v)
def unpackDesignSpace(doc):
axisTagMapping = {axis.name: axis.tag for axis in doc.axes}
axes = {axis.tag: (axis.minimum, axis.default, axis.maximum) for axis in doc.axes}
# We want the default source to be the first in the list; the rest of
# the order is not important
sources = sorted(doc.sources, key=lambda src: src != doc.default)
ufos = []
locations = []
_loaded = {}
for src in sources:
loc = src.location
loc = {
axisTagMapping[axisName]: axisValue for axisName, axisValue in loc.items()
}
loc = normalizeLocation(loc, axes)
loc = {
axisName: axisValue for axisName, axisValue in loc.items() if axisValue != 0
}
locations.append(loc)
ufo = _loaded.get(src.path)
if ufo is None:
ufo = UFont(src.path)
_loaded[src.path] = ufo
if src.layerName is None:
ufo.layers.defaultLayer
else:
ufo = ufo.layers[src.layerName]
ufos.append(ufo)
userAxes = {
axis.tag: (axis.minimum, axis.default, axis.maximum)
for axis in doc.axes
if not axis.hidden
}
return userAxes, ufos, locations
_transformFieldMapping = {
"rotation": "Rotation",
"scalex": "ScaleX",
"scaley": "ScaleY",
"skewx": "SkewX",
"skewy": "SkewY",
"tcenterx": "TCenterX",
"tcentery": "TCenterY",
}
if __name__ == "__main__":
import sys
ufoPath = sys.argv[1]
vcFont = VarCoFont(ufoPath)
g = vcFont["DC_5927_03"]
print(g.components)
print(g.axes)
x = g + 0.5 * (g.variations[0] - g)
print(g.components[-1].transform)
print(x.components[-1].transform)
print(g.variations[0].components[-1].transform)
print(list(vcFont.keys())[:100])
print("AE_PieZhe" in vcFont)
# for x in vcFont:
# print(x)
|
py | 1a4d260ea51e35458b237706a7283e6abed7c732 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#! /usr/bin/env python2
import os
import numpy as np
from matplotlib import pyplot
import re
from argparse import Namespace
# the directory used in run_on_cluster.bash
basedir = '/mnt/vol/gfsai-east/ai-group/users/matthijs/bench_all_ivf/'
logdir = basedir + 'logs/'
# which plot to output
db = 'bigann1B'
code_size = 8
def unitsize(indexkey):
""" size of one vector in the index """
mo = re.match('.*,PQ(\\d+)', indexkey)
if mo:
return int(mo.group(1))
if indexkey.endswith('SQ8'):
bits_per_d = 8
elif indexkey.endswith('SQ4'):
bits_per_d = 4
elif indexkey.endswith('SQfp16'):
bits_per_d = 16
else:
assert False
mo = re.match('PCAR(\\d+),.*', indexkey)
if mo:
return bits_per_d * int(mo.group(1)) / 8
mo = re.match('OPQ\\d+_(\\d+),.*', indexkey)
if mo:
return bits_per_d * int(mo.group(1)) / 8
mo = re.match('RR(\\d+),.*', indexkey)
if mo:
return bits_per_d * int(mo.group(1)) / 8
assert False
def dbsize_from_name(dbname):
sufs = {
'1B': 10**9,
'100M': 10**8,
'10M': 10**7,
'1M': 10**6,
}
for s in sufs:
if dbname.endswith(s):
return sufs[s]
else:
assert False
def keep_latest_stdout(fnames):
fnames = [fname for fname in fnames if fname.endswith('.stdout')]
fnames.sort()
n = len(fnames)
fnames2 = []
for i, fname in enumerate(fnames):
if i + 1 < n and fnames[i + 1][:-8] == fname[:-8]:
continue
fnames2.append(fname)
return fnames2
def parse_result_file(fname):
# print fname
st = 0
res = []
keys = []
stats = {}
stats['run_version'] = fname[-8]
for l in open(fname):
if st == 0:
if l.startswith('CHRONOS_JOB_INSTANCE_ID'):
stats['CHRONOS_JOB_INSTANCE_ID'] = l.split()[-1]
if l.startswith('index size on disk:'):
stats['index_size'] = int(l.split()[-1])
if l.startswith('current RSS:'):
stats['RSS'] = int(l.split()[-1])
if l.startswith('precomputed tables size:'):
stats['tables_size'] = int(l.split()[-1])
if l.startswith('Setting nb of threads to'):
stats['n_threads'] = int(l.split()[-1])
if l.startswith(' add in'):
stats['add_time'] = float(l.split()[-2])
if l.startswith('args:'):
args = eval(l[l.find(' '):])
indexkey = args.indexkey
elif 'R@1 R@10 R@100' in l:
st = 1
elif 'index size on disk:' in l:
index_size = int(l.split()[-1])
elif st == 1:
st = 2
elif st == 2:
fi = l.split()
keys.append(fi[0])
res.append([float(x) for x in fi[1:]])
return indexkey, np.array(res), keys, stats
# run parsing
allres = {}
allstats = {}
nts = []
missing = []
versions = {}
fnames = keep_latest_stdout(os.listdir(logdir))
# print fnames
# filenames are in the form <key>.x.stdout
# where x is a version number (from a to z)
# keep only latest version of each name
for fname in fnames:
if not ('db' + db in fname and fname.endswith('.stdout')):
continue
indexkey, res, _, stats = parse_result_file(logdir + fname)
if res.size == 0:
missing.append(fname)
errorline = open(
logdir + fname.replace('.stdout', '.stderr')).readlines()
if len(errorline) > 0:
errorline = errorline[-1]
else:
errorline = 'NO STDERR'
print fname, stats['CHRONOS_JOB_INSTANCE_ID'], errorline
else:
if indexkey in allres:
if allstats[indexkey]['run_version'] > stats['run_version']:
# don't use this run
continue
n_threads = stats.get('n_threads', 1)
nts.append(n_threads)
allres[indexkey] = res
allstats[indexkey] = stats
assert len(set(nts)) == 1
n_threads = nts[0]
def plot_tradeoffs(allres, code_size, recall_rank):
dbsize = dbsize_from_name(db)
recall_idx = int(np.log10(recall_rank))
bigtab = []
names = []
for k,v in sorted(allres.items()):
if v.ndim != 2: continue
us = unitsize(k)
if us != code_size: continue
perf = v[:, recall_idx]
times = v[:, 3]
bigtab.append(
np.vstack((
np.ones(times.size, dtype=int) * len(names),
perf, times
))
)
names.append(k)
bigtab = np.hstack(bigtab)
perm = np.argsort(bigtab[1, :])
bigtab = bigtab[:, perm]
times = np.minimum.accumulate(bigtab[2, ::-1])[::-1]
selection = np.where(bigtab[2, :] == times)
selected_methods = [names[i] for i in
np.unique(bigtab[0, selection].astype(int))]
not_selected = list(set(names) - set(selected_methods))
print "methods without an optimal OP: ", not_selected
nq = 10000
pyplot.title('database ' + db + ' code_size=%d' % code_size)
# grayed out lines
for k in not_selected:
v = allres[k]
if v.ndim != 2: continue
us = unitsize(k)
if us != code_size: continue
linestyle = (':' if 'PQ' in k else
'-.' if 'SQ4' in k else
'--' if 'SQ8' in k else '-')
pyplot.semilogy(v[:, recall_idx], v[:, 3], label=None,
linestyle=linestyle,
marker='o' if 'HNSW' in k else '+',
color='#cccccc', linewidth=0.2)
# important methods
for k in selected_methods:
v = allres[k]
if v.ndim != 2: continue
us = unitsize(k)
if us != code_size: continue
stats = allstats[k]
tot_size = stats['index_size'] + stats['tables_size']
id_size = 8 # 64 bit
addt = ''
if 'add_time' in stats:
add_time = stats['add_time']
if add_time > 7200:
add_min = add_time / 60
addt = ', %dh%02d' % (add_min / 60, add_min % 60)
else:
add_sec = int(add_time)
addt = ', %dm%02d' % (add_sec / 60, add_sec % 60)
label = k + ' (size+%.1f%%%s)' % (
tot_size / float((code_size + id_size) * dbsize) * 100 - 100,
addt)
linestyle = (':' if 'PQ' in k else
'-.' if 'SQ4' in k else
'--' if 'SQ8' in k else '-')
pyplot.semilogy(v[:, recall_idx], v[:, 3], label=label,
linestyle=linestyle,
marker='o' if 'HNSW' in k else '+')
if len(not_selected) == 0:
om = ''
else:
om = '\nomitted:'
nc = len(om)
for m in not_selected:
if nc > 80:
om += '\n'
nc = 0
om += ' ' + m
nc += len(m) + 1
pyplot.xlabel('1-recall at %d %s' % (recall_rank, om) )
pyplot.ylabel('search time per query (ms, %d threads)' % n_threads)
pyplot.legend()
pyplot.grid()
pyplot.savefig('figs/tradeoffs_%s_cs%d_r%d.png' % (
db, code_size, recall_rank))
return selected_methods, not_selected
pyplot.gcf().set_size_inches(15, 10)
plot_tradeoffs(allres, code_size=code_size, recall_rank=1)
|
py | 1a4d26322f53f5d01cc2f8701ec6c9745b8d3107 | info = {
"name": "pa-Arab",
"date_order": "DMY",
"january": [
"جنوری"
],
"february": [
"فروری"
],
"march": [
"مارچ"
],
"april": [
"اپریل"
],
"may": [
"مئ"
],
"june": [
"جون"
],
"july": [
"جولائی"
],
"august": [
"اگست"
],
"september": [
"ستمبر"
],
"october": [
"اکتوبر"
],
"november": [
"نومبر"
],
"december": [
"دسمبر"
],
"monday": [
"پیر"
],
"tuesday": [
"منگل"
],
"wednesday": [
"بُدھ"
],
"thursday": [
"جمعرات"
],
"friday": [
"جمعہ"
],
"saturday": [
"ہفتہ"
],
"sunday": [
"اتوار"
],
"am": [
"am"
],
"pm": [
"pm"
],
"year": [
"ورھا"
],
"month": [
"مہينا"
],
"week": [
"ہفتہ"
],
"day": [
"دئن"
],
"hour": [
"گھنٹا"
],
"minute": [
"منٹ"
],
"second": [
"second"
],
"relative-type": {
"0 day ago": [
"today"
],
"0 hour ago": [
"this hour"
],
"0 minute ago": [
"this minute"
],
"0 month ago": [
"this month"
],
"0 second ago": [
"now"
],
"0 week ago": [
"this week"
],
"0 year ago": [
"this year"
],
"1 day ago": [
"yesterday"
],
"1 month ago": [
"last month"
],
"1 week ago": [
"last week"
],
"1 year ago": [
"last year"
],
"in 1 day": [
"tomorrow"
],
"in 1 month": [
"next month"
],
"in 1 week": [
"next week"
],
"in 1 year": [
"next year"
]
},
"locale_specific": {},
"skip": [
" ",
".",
",",
";",
"-",
"/",
"'",
"|",
"@",
"[",
"]",
","
]
}
|
py | 1a4d2671b79a2b87ade11d6a4ec9200544f7b303 | # coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for jobs.batch_jobs.validation_jobs."""
from __future__ import absolute_import
from __future__ import unicode_literals
from core.platform import models
import feconf
from jobs import job_test_utils
from jobs.batch_jobs import validation_jobs
from jobs.transforms.validation import base_validation
from jobs.types import base_validation_errors
from jobs.types import model_property
(auth_models, base_models, user_models) = models.Registry.import_models(
[models.NAMES.auth, models.NAMES.base_model, models.NAMES.user])
class AuditAllStorageModelsJobTests(job_test_utils.JobTestBase):
JOB_CLASS = validation_jobs.AuditAllStorageModelsJob
VALID_USER_ID = 'uid_%s' % ('a' * feconf.USER_ID_RANDOM_PART_LENGTH)
def test_empty_storage(self):
self.assert_job_output_is_empty()
def test_base_validation(self):
base_model_with_invalid_id = self.create_model(
base_models.BaseModel, id='123@?!*', deleted=False)
base_model_with_invalid_timestamps = self.create_model(
base_models.BaseModel, id='124', deleted=False,
created_on=self.NOW, last_updated=self.YEAR_LATER)
base_model_with_inconsistent_timestamps = self.create_model(
base_models.BaseModel, id='125', deleted=False,
created_on=self.YEAR_LATER, last_updated=self.YEAR_AGO)
expired_base_model = self.create_model(
base_models.BaseModel, id='126', deleted=True)
valid_base_model = self.create_model(
base_models.BaseModel, id='127', deleted=False)
self.put_multi([
base_model_with_invalid_id,
base_model_with_invalid_timestamps,
base_model_with_inconsistent_timestamps,
expired_base_model,
valid_base_model,
])
self.assert_job_output_is([
base_validation_errors.ModelIdRegexError(
base_model_with_invalid_id,
base_validation.BASE_MODEL_ID_PATTERN),
base_validation_errors.ModelMutatedDuringJobError(
base_model_with_invalid_timestamps),
base_validation_errors.InconsistentTimestampsError(
base_model_with_inconsistent_timestamps),
base_validation_errors.ModelExpiredError(expired_base_model),
])
def test_user_audits(self):
user_settings_model_with_invalid_id = self.create_model(
user_models.UserSettingsModel,
id='128', email='[email protected]')
user_settings_model_with_valid_id = self.create_model(
user_models.UserSettingsModel,
id=self.VALID_USER_ID, email='[email protected]')
self.put_multi([
user_settings_model_with_invalid_id,
user_settings_model_with_valid_id,
])
self.assert_job_output_is([
base_validation_errors.ModelIdRegexError(
user_settings_model_with_invalid_id, feconf.USER_ID_REGEX),
])
def test_reports_error_when_id_property_target_does_not_exist(self):
self.put_multi([
# UserEmailPreferencesModel.id -> UserSettingsModel.id.
self.create_model(
user_models.UserEmailPreferencesModel, id=self.VALID_USER_ID),
# UserSettingsModel missing.
])
self.assert_job_output_is([
base_validation_errors.ModelRelationshipError(
model_property.ModelProperty(
user_models.UserEmailPreferencesModel,
user_models.UserEmailPreferencesModel.id),
self.VALID_USER_ID, 'UserSettingsModel', self.VALID_USER_ID),
])
def test_empty_when_id_property_target_exists(self):
self.put_multi([
self.create_model(
user_models.UserEmailPreferencesModel, id=self.VALID_USER_ID),
self.create_model(
user_models.UserSettingsModel,
id=self.VALID_USER_ID, email='[email protected]'),
])
self.assert_job_output_is_empty()
def test_empty_when_web_of_id_property_targets_exist(self):
self.put_multi([
self.create_model(
auth_models.UserAuthDetailsModel,
id=self.VALID_USER_ID, firebase_auth_id='abc', gae_id='123'),
self.create_model(
auth_models.UserIdByFirebaseAuthIdModel,
id='abc', user_id=self.VALID_USER_ID),
self.create_model(
auth_models.UserIdentifiersModel,
id='123', user_id=self.VALID_USER_ID),
])
self.assert_job_output_is_empty()
def test_reports_missing_id_property_target_even_if_sibling_property_is_valid(self): # pylint: disable=line-too-long
self.put_multi([
self.create_model(
auth_models.UserAuthDetailsModel, id=self.VALID_USER_ID,
# Value is not None, so UserIdentifiersModel must exist.
gae_id='abc',
# Value is None, so missing UserIdByFirebaseAuthIdModel is OK.
firebase_auth_id=None),
self.create_model(
auth_models.UserIdentifiersModel, user_id=self.VALID_USER_ID,
# Should be gae_id='abc', so error will occur.
id='123'),
])
self.assert_job_output_is([
base_validation_errors.ModelRelationshipError(
model_property.ModelProperty(
auth_models.UserAuthDetailsModel,
auth_models.UserAuthDetailsModel.gae_id),
self.VALID_USER_ID, 'UserIdentifiersModel', 'abc'),
])
|
py | 1a4d267eeebc286ef052602199a6e118c697bd17 | # Generated by Django 2.2 on 2020-04-27 22:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProfileFeedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status_text', models.CharField(max_length=255)),
('created_on', models.DateTimeField(auto_now_add=True)),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
py | 1a4d27663e305c33ad1fb304200436b12c9929e4 | import argparse
import logging
import os
import platform
from yaml import safe_load
configdata = None
if platform.system() == "Windows":
APPDATA = os.environ["APPDATA"]
CONFIGFILE = os.path.join(APPDATA, "svtplay-dl", "svtplay-dl.yaml")
else:
CONFIGFILE = os.path.expanduser("~/.svtplay-dl.yaml")
class Options:
"""
Options used when invoking the script from another Python script.
Simple container class used when calling get_media() from another Python
script. The variables corresponds to the command line parameters parsed
in main() when the script is called directly.
When called from a script there are a few more things to consider:
* Logging is done to 'log'. main() calls setup_log() which sets the
logging to either stdout or stderr depending on the silent level.
A user calling get_media() directly can either also use setup_log()
or configure the log manually.
* Progress information is printed to 'progress_stream' which defaults to
sys.stderr but can be changed to any stream.
* Many errors results in calls to system.exit() so catch 'SystemExit'-
Exceptions to prevent the entire application from exiting if that happens.
"""
def __init__(self):
self.default = {}
def set(self, key, value):
self.default[key] = value
def get(self, key):
if key in self.default:
return self.default[key]
def get_variable(self):
return self.default
def set_variable(self, value):
self.default = value
def gen_parser(version="unknown"):
parser = argparse.ArgumentParser(prog="svtplay-dl")
general = parser.add_argument_group()
general.add_argument("--version", action="version", version=f"%(prog)s {version}")
general.add_argument("-o", "--output", metavar="output", default=None, help="outputs to the given filename or folder")
general.add_argument(
"--subfolder",
action="store_true",
default=False,
help="Create a subfolder titled as the show, non-series gets in folder movies",
)
general.add_argument("--config", dest="configfile", metavar="configfile", default=CONFIGFILE, help="Specify configuration file")
general.add_argument("-f", "--force", action="store_true", dest="force", default=False, help="overwrite if file exists already")
general.add_argument("-r", "--resume", action="store_true", dest="resume", default=False, help="resume a download (RTMP obsolete)")
general.add_argument("-l", "--live", action="store_true", dest="live", default=False, help="enable for live streams (RTMP based ones)")
general.add_argument("-c", "--capture_time", default=-1, type=int, metavar="capture_time", help="define capture time in minutes of a live stream")
general.add_argument("-s", "--silent", action="store_true", dest="silent", default=False, help="be less verbose")
general.add_argument(
"--silent-semi",
action="store_true",
dest="silent_semi",
default=False,
help="only show a message when the file is downloaded",
)
general.add_argument("-u", "--username", default=None, help="username")
general.add_argument("-p", "--password", default=None, help="password")
general.add_argument(
"-t",
"--thumbnail",
action="store_true",
dest="thumbnail",
default=False,
help="download thumbnail from the site if available",
)
general.add_argument(
"-g",
"--get-url",
action="store_true",
dest="get_url",
default=False,
help="do not download any video, but instead print the URL.",
)
general.add_argument(
"--get-only-episode-url",
action="store_true",
dest="get_only_episode_url",
default=False,
help="do not get video URLs, only print the episode URL.",
)
general.add_argument(
"--dont-verify-ssl-cert",
action="store_false",
dest="ssl_verify",
default=True,
help="Don't attempt to verify SSL certificates.",
)
general.add_argument(
"--http-header",
dest="http_headers",
default=None,
metavar="header1=value;header2=value2",
help="A header to add to each HTTP request.",
)
general.add_argument(
"--cookies",
dest="cookies",
default=None,
metavar="cookie1=value;cookie2=value2",
help="A cookies to add to each HTTP request.",
)
general.add_argument("--remux", dest="remux", default=False, action="store_true", help="Remux from one container to mp4 using ffmpeg or avconv")
general.add_argument(
"--exclude",
dest="exclude",
default=None,
metavar="WORD1,WORD2,...",
help="exclude videos with the WORD(s) in the filename. comma separated.",
)
general.add_argument("--after-date", dest="after_date", default=None, metavar="yyyy-MM-dd", help="only videos published on or after this date")
general.add_argument(
"--proxy",
dest="proxy",
default=None,
metavar="proxy",
help="Use the specified HTTP/HTTPS/SOCKS proxy. To enable experimental "
"SOCKS proxy, specify a proper scheme. For example "
"socks5://127.0.0.1:1080/.",
)
general.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=False, help="explain what is going on")
general.add_argument("--nfo", action="store_true", dest="nfo", default=False, help="create a NFO file")
general.add_argument("--force-nfo", action="store_true", dest="force_nfo", default=False, help="download only NFO if used with --nfo")
general.add_argument(
"--only-audio",
action="store_true",
dest="only_audio",
default=False,
help="only download audio if audio and video is seperated",
)
general.add_argument(
"--only-video",
action="store_true",
dest="only_video",
default=False,
help="only download video if audio and video is seperated",
)
quality = parser.add_argument_group("Quality")
quality.add_argument(
"-q",
"--quality",
default=0,
metavar="quality",
help="choose what format to download based on bitrate / video resolution." "it will download the best format by default",
)
quality.add_argument(
"-Q",
"--flexible-quality",
default=0,
metavar="amount",
dest="flexibleq",
help="allow given quality (as above) to differ by an amount",
)
quality.add_argument("-P", "--preferred", default=None, metavar="preferred", help="preferred download method (dash, hls, hds, or http)")
quality.add_argument("--list-quality", dest="list_quality", action="store_true", default=False, help="list the quality for a video")
quality.add_argument(
"--stream-priority",
dest="stream_prio",
default=None,
metavar="dash,hls,hds,http",
help="If two streams have the same quality, choose the one you prefer",
)
quality.add_argument(
"--format-preferred",
dest="format_preferred",
default=None,
metavar="h264,h264-51",
help="Choose the format you prefer, --list-quality to show which one to choose from",
)
subtitle = parser.add_argument_group("Subtitle")
subtitle.add_argument(
"-S",
"--subtitle",
action="store_true",
dest="subtitle",
default=False,
help="download subtitle from the site if available",
)
subtitle.add_argument(
"-M",
"--merge-subtitle",
action="store_true",
dest="merge_subtitle",
default=False,
help="merge subtitle with video/audio file with corresponding ISO639-3 language code." "this invokes --remux automatically.",
)
subtitle.add_argument(
"--force-subtitle",
dest="force_subtitle",
default=False,
action="store_true",
help="download only subtitle if its used with -S",
)
subtitle.add_argument(
"--require-subtitle",
dest="require_subtitle",
default=False,
action="store_true",
help="download only if a subtitle is available",
)
subtitle.add_argument(
"--all-subtitles",
dest="get_all_subtitles",
default=False,
action="store_true",
help="Download all available subtitles for the video",
)
subtitle.add_argument(
"--raw-subtitles",
dest="get_raw_subtitles",
default=False,
action="store_true",
help="also download the subtitles in their native format",
)
subtitle.add_argument(
"--convert-subtitle-colors",
dest="convert_subtitle_colors",
default=False,
action="store_true",
help='converts the color information in subtitles, to <font color=""> tags',
)
alleps = parser.add_argument_group("All")
alleps.add_argument("-A", "--all-episodes", action="store_true", dest="all_episodes", default=False, help="try to download all episodes")
alleps.add_argument("--all-last", dest="all_last", default=-1, type=int, metavar="NN", help="get last NN episodes instead of all episodes")
alleps.add_argument("--include-clips", dest="include_clips", default=False, action="store_true", help="include clips from websites when using -A")
cmorep = parser.add_argument_group("C More")
cmorep.add_argument("--cmore-operatorlist", dest="cmoreoperatorlist", default=False, action="store_true", help="show operatorlist for cmore")
cmorep.add_argument("--cmore-operator", dest="cmoreoperator", default=None, metavar="operator")
parser.add_argument("urls", nargs="*")
return parser
def parser(version):
parser = gen_parser(version)
options = parser.parse_args()
return parser, options
def setup_defaults():
options = Options()
options.set("output", None)
options.set("subfolder", False)
options.set("configfile", CONFIGFILE)
options.set("resume", False)
options.set("live", False)
options.set("capture_time", -1)
options.set("silent", False)
options.set("force", False)
options.set("quality", 0)
options.set("flexibleq", 0)
options.set("list_quality", False)
options.set("other", None)
options.set("subtitle", False)
options.set("username", None)
options.set("password", None)
options.set("thumbnail", False)
options.set("all_episodes", False)
options.set("all_last", -1)
options.set("merge_subtitle", False)
options.set("force_subtitle", False)
options.set("require_subtitle", False)
options.set("get_all_subtitles", False)
options.set("get_raw_subtitles", False)
options.set("convert_subtitle_colors", False)
options.set("preferred", None)
options.set("verbose", False)
options.set("nfo", False)
options.set("force_nfo", False)
options.set("output_auto", False)
options.set("service", None)
options.set("cookies", None)
options.set("exclude", None)
options.set("after_date", None)
options.set("get_url", False)
options.set("get_only_episode_url", False)
options.set("ssl_verify", True)
options.set("http_headers", None)
options.set("format_preferred", None)
options.set("stream_prio", None)
options.set("remux", False)
options.set("silent_semi", False)
options.set("proxy", None)
options.set("include_clips", False)
options.set("cmoreoperatorlist", False)
options.set("filename", "{title}.s{season}e{episode}.{episodename}-{id}-{service}.{ext}")
options.set("only_audio", False)
options.set("only_video", False)
return _special_settings(options)
def parsertoconfig(config, parser):
config.set("output", parser.output)
config.set("subfolder", parser.subfolder)
config.set("configfile", parser.configfile)
config.set("resume", parser.resume)
config.set("live", parser.live)
config.set("capture_time", parser.capture_time)
config.set("silent", parser.silent)
config.set("force", parser.force)
config.set("quality", parser.quality)
config.set("flexibleq", parser.flexibleq)
config.set("list_quality", parser.list_quality)
config.set("subtitle", parser.subtitle)
config.set("merge_subtitle", parser.merge_subtitle)
config.set("silent_semi", parser.silent_semi)
config.set("username", parser.username)
config.set("password", parser.password)
config.set("thumbnail", parser.thumbnail)
config.set("all_episodes", parser.all_episodes)
config.set("all_last", parser.all_last)
config.set("force_subtitle", parser.force_subtitle)
config.set("require_subtitle", parser.require_subtitle)
config.set("preferred", parser.preferred)
config.set("verbose", parser.verbose)
config.set("nfo", parser.nfo)
config.set("force_nfo", parser.force_nfo)
config.set("exclude", parser.exclude)
config.set("after_date", parser.after_date)
config.set("get_url", parser.get_url)
config.set("get_only_episode_url", parser.get_only_episode_url)
config.set("ssl_verify", parser.ssl_verify)
config.set("http_headers", parser.http_headers)
config.set("cookies", parser.cookies)
config.set("format_preferred", parser.format_preferred)
config.set("stream_prio", parser.stream_prio)
config.set("remux", parser.remux)
config.set("get_all_subtitles", parser.get_all_subtitles)
config.set("get_raw_subtitles", parser.get_raw_subtitles)
config.set("convert_subtitle_colors", parser.convert_subtitle_colors)
config.set("include_clips", parser.include_clips)
config.set("cmoreoperatorlist", parser.cmoreoperatorlist)
config.set("cmoreoperator", parser.cmoreoperator)
config.set("proxy", parser.proxy)
config.set("only_audio", parser.only_audio)
config.set("only_video", parser.only_video)
return _special_settings(config)
def _special_settings(config):
if config.get("require_subtitle"):
if config.get("merge_subtitle"):
config.set("merge_subtitle", True)
else:
config.set("subtitle", True)
if config.get("merge_subtitle"):
config.set("remux", True)
config.set("subtitle", True)
if config.get("silent_semi"):
config.set("silent", True)
if config.get("proxy"):
config.set("proxy", config.get("proxy").replace("socks5", "socks5h", 1))
config.set("proxy", dict(http=config.get("proxy"), https=config.get("proxy")))
if config.get("get_only_episode_url"):
config.set("get_url", True)
return config
def merge(old, new):
if isinstance(new, list):
new = {list(i.keys())[0]: i[list(i.keys())[0]] for i in new}
config = setup_defaults()
if new:
for item in new:
if item in new:
if new[item] != config.get(item): # Check if new value is not a default one.
old[item] = new[item]
else:
old[item] = new[item]
options = Options()
options.set_variable(old)
return options
def readconfig(config, configfile, service=None, preset=None):
global configdata
if configfile and configdata is None:
try:
with open(configfile) as fd:
data = fd.read()
configdata = safe_load(data)
except PermissionError:
logging.error(f"Permission denied while reading config: {configfile}")
if configdata is None:
return config
if "default" in configdata:
config = merge(config.get_variable(), configdata["default"])
if service and "service" in configdata and service in configdata["service"]:
config = merge(config.get_variable(), configdata["service"][service])
if preset and "presets" in configdata and preset in configdata["presets"]:
config = merge(config.get_variable(), configdata["presets"][preset])
return config
|
py | 1a4d27aeb74da93719a789fd159b4795c67326b5 | import numpy as np
import numpy.random as rnd
import simple_optimise as mlopt
vec = rnd.randn(10)
mat = rnd.randn(10, 10)
mat += mat.T
# Single output, single input
def f1_1(x):
return x**2.0
def fD_1(x):
return vec * x
def f1_D(x):
return x.dot(mat.dot(x))
def f1_DD(x):
return vec.dot(x.dot(vec))
def fDD_DD(x):
return x * 3
fd1_1 = mlopt.finite_difference(f1_1, 3.0)
fdD_1 = mlopt.finite_difference(fD_1, 3.0)
fd1_D = mlopt.finite_difference(f1_D, rnd.randn(10))
fd1_DD = mlopt.finite_difference(f1_DD, rnd.randn(10, 10))
fdDD_DD = mlopt.finite_difference(fDD_DD, rnd.randn(10, 10)) |
py | 1a4d28122bb940e50fce2044e3a2bae6121af0ae | from django.conf.urls import include, url
from resources import views
from resources.api import ResourceResource, ResourceSubmissionResource
resource_resource = ResourceResource()
resource_submission_resource = ResourceSubmissionResource()
urlpatterns = [
url(r'^api/', include(resource_resource.urls)),
url(r'^api/', include(resource_submission_resource.urls)),
url(r'^$', views.index, name='resources.index'),
url(r'^create/$', views.create_resource, name='resources.views.create_resource'),
url(r'^(?P<resource_id>[^/]+)/comment/$', views.comment_on_resource,
name='resources.views.comment_on_resource'),
url(r'^(?P<resource_id>[^/]+)/$', views.detail, name='detail'),
]
|
py | 1a4d29b906090f4945cb73a867d43418024911ff | import pandas as pd
import pprint
newclient_diagnoses = pd.read_csv('2021_possible_diagnoses.csv')
print(newclient_diagnoses.columns)
newclient_primary = newclient_diagnoses.drop(columns=['Diagnosis Date ', 'Client Name ', 'PID ', 'External ID ', 'Age '])
nora_newclient_diagnoses = newclient_primary.groupby('Diagnosis Name ').count()
nora_newclient_gender = newclient_primary.groupby('Gender ').count()
print('NORA New Client Primary Diagnosis')
print('------------------------------------')
pprint.pprint(nora_newclient_diagnoses)
print('-------------------------------------')
print('NORA New Client Gender Breakdown')
print('-------------------------------------')
pprint.pprint(nora_newclient_gender)
print('------------------------------------') |
py | 1a4d29ba37d9aae0688af758d78c3786eba81b21 | from .gezagsverhouding import * # noqa
from .inschrijving import * # noqa
from .kiesrecht import * # noqa
from .kind import * # noqa
from .nationaliteit import * # noqa
from .ouder import * # noqa
from .overlijden import * # noqa
from .partnerschap import * # noqa
from .persoon import * # noqa
from .reisdocument import * # noqa
from .token import * # noqa
from .user import * # noqa
from .verblijfplaats import * # noqa
from .verblijfstitel import * # noqa
|
py | 1a4d29d892642731e56dffa9326ff7e0743e1781 | """Validate post-ETL Generators data from EIA 860."""
import logging
import pytest
import pudl
logger = logging.getLogger(__name__)
###############################################################################
# Tests validating data against physically reasonable boundary values:
###############################################################################
def test_capacity_bounds(pudl_out_eia, live_pudl_db):
"""Check that the distribution of coal heat content per unit is valid."""
if not live_pudl_db:
raise AssertionError("Data validation only works with a live PUDL DB.")
if pudl_out_eia.freq is not None:
pytest.skip("Test should only run on un-aggregated data.")
for args in pudl.validate.gens_eia860_vs_bound:
pudl.validate.vs_bounds(pudl_out_eia.gens_eia860(), **args)
def test_capacity_self(pudl_out_eia, live_pudl_db):
"""Check that the distribution of coal heat content per unit is valid."""
if not live_pudl_db:
raise AssertionError("Data validation only works with a live PUDL DB.")
if pudl_out_eia.freq is not None:
pytest.skip("Test should only run on un-aggregated data.")
for args in pudl.validate.gens_eia860_self:
pudl.validate.vs_self(pudl_out_eia.gens_eia860(), **args)
|
py | 1a4d29e0aa7c6cd818e4bb750d9412f4504557ad | #!/usr/bin/env python3
"""
Setup script that reads in the users.yml and courses.yml files in the ../data directory and then
creates the users and courses for the system. This is primarily used by Vagrant and Travis to
figure the environments easily, but it could be run pretty much anywhere, unless the courses
already exist as else the system will probably fail.
Usage: ./setup_sample_courses.py
./setup_sample_courses.py [course [course]]
./setup_sample_courses.py --help
The first will create all couress in courses.yml while the second will only create the courses
specified (which is useful for something like Travis where we don't need the "demo classes", and
just the ones used for testing.
"""
from __future__ import print_function, division
import argparse
from collections import OrderedDict
from datetime import datetime, timedelta
from shutil import copyfile
import glob
import grp
import hashlib
import json
import os
import pwd
import random
import shutil
import subprocess
import uuid
import os.path
import string
import pdb
import docker
from tempfile import TemporaryDirectory
from submitty_utils import dateutils
from ruamel.yaml import YAML
from sqlalchemy import create_engine, Table, MetaData, bindparam, select, join, func
yaml = YAML(typ='safe')
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
SETUP_DATA_PATH = os.path.join(CURRENT_PATH, "..", "data")
SUBMITTY_INSTALL_DIR = "/usr/local/submitty"
SUBMITTY_DATA_DIR = "/var/local/submitty"
SUBMITTY_REPOSITORY = os.path.join(SUBMITTY_INSTALL_DIR, "GIT_CHECKOUT/Submitty")
MORE_EXAMPLES_DIR = os.path.join(SUBMITTY_INSTALL_DIR, "more_autograding_examples")
TUTORIAL_DIR = os.path.join(SUBMITTY_INSTALL_DIR, "GIT_CHECKOUT/Tutorial", "examples")
DB_HOST = "localhost"
with open(os.path.join(SUBMITTY_INSTALL_DIR, "config", "database.json")) as database_config:
database_config_json = json.load(database_config)
DB_USER = database_config_json["database_user"]
DB_PASS = database_config_json["database_password"]
DB_ONLY = False
NO_SUBMISSIONS = False
NO_GRADING = False
NOW = dateutils.get_current_time()
def main():
"""
Main program execution. This gets us our commandline arugments, reads in the data files,
and then sets us up to run the create methods for the users and courses.
"""
global DB_ONLY, NO_SUBMISSIONS, NO_GRADING
args = parse_args()
DB_ONLY = args.db_only
NO_SUBMISSIONS = args.no_submissions
NO_GRADING = args.no_grading
if not os.path.isdir(SUBMITTY_DATA_DIR):
raise SystemError("The following directory does not exist: " + SUBMITTY_DATA_DIR)
for directory in ["courses"]:
if not os.path.isdir(os.path.join(SUBMITTY_DATA_DIR, directory)):
raise SystemError("The following directory does not exist: " + os.path.join(
SUBMITTY_DATA_DIR, directory))
use_courses = args.course
# We have to stop all running daemon grading and jobs handling
# processes as otherwise we end up with the process grabbing the
# homework files that we are inserting before we're ready to (and
# permission errors exist) which ends up with just having a ton of
# build failures. Better to wait on grading any homeworks until
# we've done all steps of setting up a course.
print("pausing the autograding and jobs hander daemons")
os.system("systemctl stop submitty_autograding_shipper")
os.system("systemctl stop submitty_autograding_worker")
os.system("systemctl stop submitty_daemon_jobs_handler")
os.system("systemctl stop submitty_websocket_server")
courses = {} # dict[str, Course]
users = {} # dict[str, User]
for course_file in sorted(glob.iglob(os.path.join(args.courses_path, '*.yml'))):
# only create the plagiarism course if we have a local LichenTestData repo
if os.path.basename(course_file) == "plagiarism.yml" and not os.path.isdir(os.path.join(SUBMITTY_INSTALL_DIR, "GIT_CHECKOUT", "LichenTestData")):
continue
course_json = load_data_yaml(course_file)
if len(use_courses) == 0 or course_json['code'] in use_courses:
course = Course(course_json)
courses[course.code] = course
create_group("submitty_course_builders")
for user_file in sorted(glob.iglob(os.path.join(args.users_path, '*.yml'))):
user = User(load_data_yaml(user_file))
if user.id in ['submitty_php', 'submitty_daemon', 'submitty_cgi', 'submitty_dbuser', 'vagrant', 'postgres'] or \
user.id.startswith("untrusted"):
continue
user.create()
users[user.id] = user
if user.courses is not None:
for course in user.courses:
if course in courses:
courses[course].users.append(user)
else:
for key in courses.keys():
courses[key].users.append(user)
# To make Rainbow Grades testing possible, need to seed random to have the same users each time
random.seed(10090542)
# we get the max number of extra students, and then create a list that holds all of them,
# which we then randomly choose from to add to a course
extra_students = 0
for course_id in sorted(courses.keys()):
course = courses[course_id]
tmp = course.registered_students + course.unregistered_students + \
course.no_rotating_students + \
course.no_registration_students
extra_students = max(tmp, extra_students)
extra_students = generate_random_users(extra_students, users)
submitty_engine = create_engine("postgresql://{}:{}@{}/submitty".format(DB_USER, DB_PASS, DB_HOST))
submitty_conn = submitty_engine.connect()
submitty_metadata = MetaData(bind=submitty_engine)
user_table = Table('users', submitty_metadata, autoload=True)
for user_id in sorted(users.keys()):
user = users[user_id]
submitty_conn.execute(user_table.insert(),
user_id=user.id,
user_numeric_id = user.numeric_id,
user_password=get_php_db_password(user.password),
user_firstname=user.firstname,
user_preferred_firstname=user.preferred_firstname,
user_lastname=user.lastname,
user_preferred_lastname=user.preferred_lastname,
user_email=user.email,
user_access_level=user.access_level,
last_updated=NOW.strftime("%Y-%m-%d %H:%M:%S%z"))
for user in extra_students:
submitty_conn.execute(user_table.insert(),
user_id=user.id,
user_numeric_id=user.numeric_id,
user_password=get_php_db_password(user.password),
user_firstname=user.firstname,
user_preferred_firstname=user.preferred_firstname,
user_lastname=user.lastname,
user_preferred_lastname=user.preferred_lastname,
user_email=user.email,
last_updated=NOW.strftime("%Y-%m-%d %H:%M:%S%z"))
# INSERT term into terms table, based on today's date.
today = datetime.today()
year = str(today.year)
if today.month < 7:
term_id = "s" + year[-2:]
term_name = "Spring " + year
term_start = "01/02/" + year
term_end = "06/30/" + year
else:
term_id = "f" + year[-2:]
term_name = "Fall " + year
term_start = "07/01/" + year
term_end = "12/23/" + year
terms_table = Table("terms", submitty_metadata, autoload=True)
submitty_conn.execute(terms_table.insert(),
term_id = term_id,
name = term_name,
start_date = term_start,
end_date = term_end)
submitty_conn.close()
for course_id in sorted(courses.keys()):
course = courses[course_id]
total_students = course.registered_students + course.no_registration_students + \
course.no_rotating_students + course.unregistered_students
students = extra_students[:total_students]
key = 0
for i in range(course.registered_students):
reg_section = (i % course.registration_sections) + 1
rot_section = (i % course.rotating_sections) + 1
students[key].courses[course.code] = {"registration_section": reg_section, "rotating_section": rot_section}
course.users.append(students[key])
key += 1
for i in range(course.no_rotating_students):
reg_section = (i % course.registration_sections) + 1
students[key].courses[course.code] = {"registration_section": reg_section, "rotating_section": None}
course.users.append(students[key])
key += 1
for i in range(course.no_registration_students):
rot_section = (i % course.rotating_sections) + 1
students[key].courses[course.code] = {"registration_section": None, "rotating_section": rot_section}
course.users.append(students[key])
key += 1
for i in range(course.unregistered_students):
students[key].courses[course.code] = {"registration_section": None, "rotating_section": None}
course.users.append(students[key])
key += 1
course.users.sort(key=lambda x: x.id)
for course in sorted(courses.keys()):
courses[course].instructor = users[courses[course].instructor]
courses[course].check_rotating(users)
courses[course].create()
if courses[course].make_customization:
courses[course].make_course_json()
# restart the autograding daemon
print("restarting the autograding and jobs handler daemons")
os.system("systemctl restart submitty_autograding_shipper")
os.system("systemctl restart submitty_autograding_worker")
os.system("systemctl restart submitty_daemon_jobs_handler")
os.system("systemctl restart submitty_websocket_server")
if not NO_GRADING:
# queue up all of the newly created submissions to grade!
os.system("/usr/local/submitty/bin/regrade.py --no_input /var/local/submitty/courses/")
def get_random_text_from_file(filename):
line = ""
with open(os.path.join(SETUP_DATA_PATH, 'random', filename)) as comment:
line = next(comment)
for num, aline in enumerate(comment):
if random.randrange(num + 2):
continue
line = aline
return line.strip()
def generate_random_user_id(length=15):
return ''.join(random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits) for _ in range(length))
def generate_random_ta_comment():
return get_random_text_from_file('TAComment.txt')
def generate_random_ta_note():
return get_random_text_from_file('TANote.txt')
def generate_random_student_note():
return get_random_text_from_file('StudentNote.txt')
def generate_random_marks(default_value, max_value):
with open(os.path.join(SETUP_DATA_PATH, 'random', 'marks.yml')) as f:
marks_yml = yaml.load(f)
if default_value == max_value and default_value > 0:
key = 'count_down'
else:
key = 'count_up'
marks = []
mark_list = random.choice(marks_yml[key])
for i in range(len(mark_list)):
marks.append(Mark(mark_list[i], i))
return marks
def generate_versions_to_submit(num=3, original_value=3):
if num == 1:
return original_value
if random.random() < 0.3:
return generate_versions_to_submit(num-1, original_value)
else:
return original_value-(num-1)
def generate_probability_space(probability_dict, default=0):
"""
This function takes in a dictionary whose key is the probability (decimal less than 1),
and the value is the outcome (whatever the outcome is).
"""
probability_counter = 0
target_random = random.random()
prev_random_counter = 0
for key in sorted(probability_dict.keys()):
value = probability_dict[key]
probability_counter += key
if probability_counter >= target_random and target_random > prev_random_counter:
return value
prev_random_counter = probability_counter
return default
def generate_random_users(total, real_users):
"""
:param total:
:param real_users:
:return:
:rtype: list[User]
"""
with open(os.path.join(SETUP_DATA_PATH, 'random', 'lastNames.txt')) as last_file, \
open(os.path.join(SETUP_DATA_PATH, 'random', 'maleFirstNames.txt')) as male_file, \
open(os.path.join(SETUP_DATA_PATH, 'random', 'womenFirstNames.txt')) as woman_file:
last_names = last_file.read().strip().split()
male_names = male_file.read().strip().split()
women_names = woman_file.read().strip().split()
users = []
user_ids = []
anon_ids = []
with open(os.path.join(SETUP_DATA_PATH, "random_users.txt"), "w") as random_users_file:
for i in range(total):
if random.random() < 0.5:
first_name = random.choice(male_names)
else:
first_name = random.choice(women_names)
last_name = random.choice(last_names)
user_id = last_name.replace("'", "")[:5] + first_name[0]
user_id = user_id.lower()
anon_id = generate_random_user_id(15)
# create a binary string for the numeric ID
numeric_id = '{0:09b}'.format(i)
while user_id in user_ids or user_id in real_users:
if user_id[-1].isdigit():
user_id = user_id[:-1] + str(int(user_id[-1]) + 1)
else:
user_id = user_id + "1"
if anon_id in anon_ids:
anon_id = generate_random_user_id()
new_user = User({"user_id": user_id,
"user_numeric_id": numeric_id,
"anon_id": anon_id,
"user_firstname": first_name,
"user_lastname": last_name,
"user_group": 4,
"courses": dict()})
new_user.create()
user_ids.append(user_id)
users.append(new_user)
anon_ids.append(anon_id)
random_users_file.write(user_id + "\n")
return users
def load_data_json(file_name):
"""
Loads json file from the .setup/data directory returning the parsed structure
:param file_name: name of file to load
:return: parsed JSON structure from loaded file
"""
file_path = os.path.join(SETUP_DATA_PATH, file_name)
if not os.path.isfile(file_path):
raise IOError("Missing the json file .setup/data/{}".format(file_name))
with open(file_path) as open_file:
json_file = json.load(open_file)
return json_file
def load_data_yaml(file_path):
"""
Loads yaml file from the .setup/data directory returning the parsed structure
:param file_path: name of file to load
:return: parsed YAML structure from loaded file
"""
if not os.path.isfile(file_path):
raise IOError("Missing the yaml file {}".format(file_path))
with open(file_path) as open_file:
yaml_file = yaml.load(open_file)
return yaml_file
def user_exists(user):
"""
Checks to see if the user exists on the linux file system. We can use this to delete a user
so that we can recreate them which avoids users having left over data from a previous run of
setting up the sample courses.
:param user: string to check if user exists
:return: boolean on if user exists or not
"""
try:
pwd.getpwnam(user)
return True
except KeyError:
return False
def group_exists(group):
"""
Checks to see if the group exists on the linux file system so that we don't try to create
groups that already exist.
:param group: string to check if group exists
:return: boolean on if group exists or not
"""
try:
grp.getgrnam(group)
return True
except KeyError:
return False
def create_group(group):
"""
Creates the group on the system, adding some base users to the group as well that are necessary
for the system to function and are not defined within the users.yml file.
:param group: name of the group to create
"""
if not group_exists(group):
os.system("groupadd {}".format(group))
if group == "sudo":
return
def add_to_group(group, user_id):
"""
Adds the user to the specified group, creating the group if it does not exist.
:param group:
:param user_id:
"""
create_group(group)
os.system("usermod -a -G {} {}".format(group, user_id))
def get_php_db_password(password):
"""
Generates a password to be used within the site for database authentication. The password_hash
function (http://php.net/manual/en/function.password-hash.php) generates us a nice secure
password and takes care of things like salting and hashing.
:param password:
:return: password hash to be inserted into the DB for a user
"""
proc = subprocess.Popen(
["php", "-r", "print(password_hash('{}', PASSWORD_DEFAULT));".format(password)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = proc.communicate()
return out.decode('utf-8')
def get_current_semester():
"""
Given today's date, generates a three character code that represents the semester to use for
courses such that the first half of the year is considered "Spring" and the last half is
considered "Fall". The "Spring" semester gets an S as the first letter while "Fall" gets an
F. The next two characters are the last two digits in the current year.
:return:
"""
today = datetime.today()
semester = "f" + str(today.year)[-2:]
if today.month < 7:
semester = "s" + str(today.year)[-2:]
return semester
def parse_args():
"""
Parses out the arguments that might be passed to this script as it's run as a commandline
application.
:return: parsed args from the argparse module
"""
parser = argparse.ArgumentParser(
description="Sets up the sample courses as well as creating the necessary users for the "
"course as needed. It reads in the courses.json and users.json files from the "
".setup/data directory to determine what courses/users are allowed and then "
"either adds all or just a few depending on what gets passed to this script")
parser.add_argument("--db_only", action='store_true', default=False)
parser.add_argument("--no_submissions", action='store_true', default=False)
parser.add_argument("--no_grading", action='store_true', default=False)
parser.add_argument("--users_path", default=os.path.join(SETUP_DATA_PATH, "users"),
help="Path to folder that contains .yml files to use for user creation. Defaults to "
"../data/users")
parser.add_argument("--submission_url", type=str, default="",help="top level url for the website")
parser.add_argument("--courses_path", default=os.path.join(SETUP_DATA_PATH, "courses"),
help="Path to the folder that contains .yml files to use for course creation. Defaults to "
"../data/courses")
parser.add_argument("course", nargs="*",
help="course code to build. If no courses are passed in, then it'll use "
"all courses in courses.json")
return parser.parse_args()
def create_user(user_id):
if not user_exists(id):
print("Creating user {}...".format(user_id))
os.system("useradd --home /tmp -c \'AUTH ONLY account\' "
"-M --shell /bin/false {}".format(user_id))
print("Setting password for user {}...".format(user_id))
os.system("echo {}:{} | chpasswd".format(user_id, user_id))
def create_gradeable_submission(src, dst):
"""
Given a source and a destination, copy the files from the source to the destination. First, before
copying, we check if the source is a directory, if it is, then we zip the contents of this to a temp
zip file (stored in /tmp) and store the path to this newly created zip as our new source.
At this point, (for all uploads), we check if our source is a zip (by just checking file extension is
a .zip), then we will extract the contents of the source (using Shutil) to the destination, else we
just do a simple copy operation of the source file to the destination location.
At this point, if we created a zip file (as part of that first step), we remove it from the /tmp directory.
:param src: path of the file or directory we want to use for this submission
:type src: str
:param dst: path to the folder where we should copy the submission to
:type src: str
"""
zip_dst = None
if os.path.isdir(src):
zip_dst = os.path.join("/tmp", str(uuid.uuid4()))
zip_dst = shutil.make_archive(zip_dst, 'zip', src)
src = zip_dst
if src[-3:] == "zip":
shutil.unpack_archive(src, dst)
else:
shutil.copy(src, dst)
if zip_dst is not None and isinstance(zip_dst, str):
os.remove(zip_dst)
class User(object):
"""
A basic object to contain the objects loaded from the users.json file. We use this to link
against the courses.
Attributes:
id
numeric_id
anon_id
password
firstname
lastname
email
group
preferred_firstname
preferred_lastname
access_level
registration_section
rotating_section
unix_groups
courses
"""
def __init__(self, user):
self.id = user['user_id']
self.numeric_id = user['user_numeric_id']
self.anon_id = user['anon_id']
self.password = self.id
self.firstname = user['user_firstname']
self.lastname = user['user_lastname']
self.email = self.id + "@example.com"
self.group = 4
self.preferred_firstname = None
self.preferred_lastname = None
self.access_level = 3
self.registration_section = None
self.rotating_section = None
self.grading_registration_section = None
self.unix_groups = None
self.courses = None
self.manual = False
self.sudo = False
if 'user_preferred_firstname' in user:
self.preferred_firstname = user['user_preferred_firstname']
if 'user_preferred_lastname' in user:
self.preferred_lastname = user['user_preferred_lastname']
if 'user_email' in user:
self.email = user['user_email']
if 'user_group' in user:
self.group = user['user_group']
if self.group < 1 or 4 < self.group:
raise SystemExit("ASSERT: user {}, user_group is not between 1 - 4. Check YML file.".format(self.id))
if 'user_access_level' in user:
self.access_level = user['user_access_level']
if self.access_level < 1 or 3 < self.access_level:
raise SystemExit("ASSERT: user {}, user_access_level is not between 1 - 3. Check YML file.".format(self.id))
if 'registration_section' in user:
self.registration_section = int(user['registration_section'])
if 'rotating_section' in user:
self.rotating_section = int(user['rotating_section'])
if 'grading_registration_section' in user:
self.grading_registration_section = user['grading_registration_section']
if 'unix_groups' in user:
self.unix_groups = user['unix_groups']
if 'manual_registration' in user:
self.manual = user['manual_registration'] is True
if 'courses' in user:
self.courses = {}
if isinstance(user['courses'], list):
for course in user['courses']:
self.courses[course] = {"user_group": self.group}
elif isinstance(user['courses'], dict):
self.courses = user['courses']
for course in self.courses:
if 'user_group' not in self.courses[course]:
self.courses[course]['user_group'] = self.group
else:
raise ValueError("Invalid type for courses key, it should either be list or dict")
if 'sudo' in user:
self.sudo = user['sudo'] is True
if 'user_password' in user:
self.password = user['user_password']
def create(self, force_ssh=False):
if not DB_ONLY:
if self.group > 2 and not force_ssh:
self._create_non_ssh()
else:
self._create_ssh()
if self.group <= 1:
add_to_group("submitty_course_builders", self.id)
if self.sudo:
add_to_group("sudo", self.id)
def _create_ssh(self):
if not user_exists(self.id):
print("Creating user {}...".format(self.id))
os.system("useradd -m -c 'First Last,RoomNumber,WorkPhone,HomePhone' {}".format(self.id))
self.set_password()
def _create_non_ssh(self):
if not DB_ONLY and not user_exists(self.id):
print("Creating user {}...".format(self.id))
os.system("useradd --home /tmp -c \'AUTH ONLY account\' "
"-M --shell /bin/false {}".format(self.id))
self.set_password()
def set_password(self):
print("Setting password for user {}...".format(self.id))
os.system("echo {}:{} | chpasswd".format(self.id, self.password))
def get_detail(self, course, detail):
if self.courses is not None and course in self.courses:
user_detail = "user_" + detail
if user_detail in self.courses[course]:
return self.courses[course][user_detail]
elif detail in self.courses[course]:
return self.courses[course][detail]
if detail in self.__dict__:
return self.__dict__[detail]
else:
return None
class Course(object):
"""
Object to represent the courses loaded from the courses.json file as well as the list of
users that are needed for this particular course (which is a list of User objects).
Attributes:
code
semester
instructor
gradeables
users
max_random_submissions
"""
def __init__(self, course):
self.semester = get_current_semester()
self.code = course['code']
self.instructor = course['instructor']
self.gradeables = []
self.make_customization = False
ids = []
if 'gradeables' in course:
for gradeable in course['gradeables']:
self.gradeables.append(Gradeable(gradeable))
assert self.gradeables[-1].id not in ids
ids.append(self.gradeables[-1].id)
self.users = []
self.registration_sections = 10
self.rotating_sections = 5
self.registered_students = 50
self.no_registration_students = 10
self.no_rotating_students = 10
self.unregistered_students = 10
if 'registration_sections' in course:
self.registration_sections = course['registration_sections']
if 'rotating_sections' in course:
self.rotating_sections = course['rotating_sections']
if 'registered_students' in course:
self.registered_students = course['registered_students']
if 'no_registration_students' in course:
self.no_registration_students = course['no_registration_students']
if 'no_rotating_students' in course:
self.no_rotating_students = course['no_rotating_students']
if 'unregistered_students' in course:
self.unregistered_students = course['unregistered_students']
if 'make_customization' in course:
self.make_customization = course['make_customization']
def create(self):
# Sort users and gradeables in the name of determinism
self.users.sort(key=lambda x: x.get_detail(self.code, "id"))
self.gradeables.sort(key=lambda x: x.id)
self.course_path = os.path.join(SUBMITTY_DATA_DIR, "courses", self.semester, self.code)
# To make Rainbow Grades testing possible, need to seed random
m = hashlib.md5()
m.update(bytes(self.code, 'utf-8'))
random.seed(int(m.hexdigest(), 16))
course_group = self.code + "_tas_www"
archive_group = self.code + "_archive"
create_group(self.code)
create_group(course_group)
create_group(archive_group)
add_to_group(self.code, self.instructor.id)
add_to_group(course_group, self.instructor.id)
add_to_group(archive_group, self.instructor.id)
add_to_group("submitty_course_builders", self.instructor.id)
add_to_group(course_group, "submitty_php")
add_to_group(course_group, "submitty_daemon")
add_to_group(course_group, "submitty_cgi")
os.system("{}/sbin/create_course.sh {} {} {} {}"
.format(SUBMITTY_INSTALL_DIR, self.semester, self.code, self.instructor.id,
course_group))
os.environ['PGPASSWORD'] = DB_PASS
database = "submitty_" + self.semester + "_" + self.code
print("Database created, now populating ", end="")
submitty_engine = create_engine("postgresql://{}:{}@{}/submitty".format(DB_USER, DB_PASS, DB_HOST))
submitty_conn = submitty_engine.connect()
submitty_metadata = MetaData(bind=submitty_engine)
print("(Master DB connection made, metadata bound)...")
engine = create_engine("postgresql://{}:{}@{}/{}".format(DB_USER, DB_PASS, DB_HOST, database))
self.conn = engine.connect()
self.metadata = MetaData(bind=engine)
print("(Course DB connection made, metadata bound)...")
print("Creating registration sections ", end="")
table = Table("courses_registration_sections", submitty_metadata, autoload=True)
print("(tables loaded)...")
for section in range(1, self.registration_sections+1):
print("Create section {}".format(section))
submitty_conn.execute(table.insert(), semester=self.semester, course=self.code, registration_section_id=str(section))
print("Creating rotating sections ", end="")
table = Table("sections_rotating", self.metadata, autoload=True)
print("(tables loaded)...")
for section in range(1, self.rotating_sections+1):
print("Create section {}".format(section))
self.conn.execute(table.insert(), sections_rotating_id=section)
print("Create users ", end="")
submitty_users = Table("courses_users", submitty_metadata, autoload=True)
users_table = Table("users", self.metadata, autoload=True)
reg_table = Table("grading_registration", self.metadata, autoload=True)
print("(tables loaded)...")
for user in self.users:
print("Creating user {} {} ({})...".format(user.get_detail(self.code, "firstname"),
user.get_detail(self.code, "lastname"),
user.get_detail(self.code, "id")))
reg_section = user.get_detail(self.code, "registration_section")
if reg_section is not None and reg_section > self.registration_sections:
reg_section = None
rot_section = user.get_detail(self.code, "rotating_section")
if rot_section is not None and rot_section > self.rotating_sections:
rot_section = None
if reg_section is not None:
reg_section=str(reg_section)
# We already have a row in submitty.users for this user,
# just need to add a row in courses_users which will put a
# a row in the course specific DB, and off we go.
submitty_conn.execute(submitty_users.insert(),
semester=self.semester,
course=self.code,
user_id=user.get_detail(self.code, "id"),
user_group=user.get_detail(self.code, "group"),
registration_section=reg_section,
manual_registration=user.get_detail(self.code, "manual"))
update = users_table.update(values={
users_table.c.rotating_section: bindparam('rotating_section'),
users_table.c.anon_id: bindparam('anon_id')
}).where(users_table.c.user_id == bindparam('b_user_id'))
self.conn.execute(update, rotating_section=rot_section, anon_id=user.anon_id, b_user_id=user.id)
if user.get_detail(self.code, "grading_registration_section") is not None:
try:
grading_registration_sections = str(user.get_detail(self.code,"grading_registration_section"))
grading_registration_sections = [int(x) for x in grading_registration_sections.split(",")]
except ValueError:
grading_registration_sections = []
for grading_registration_section in grading_registration_sections:
self.conn.execute(reg_table.insert(),
user_id=user.get_detail(self.code, "id"),
sections_registration_id=str(grading_registration_section))
if user.unix_groups is None:
if user.get_detail(self.code, "group") <= 1:
add_to_group(self.code, user.id)
add_to_group(self.code + "_archive", user.id)
if user.get_detail(self.code, "group") <= 2:
add_to_group(self.code + "_tas_www", user.id)
gradeable_table = Table("gradeable", self.metadata, autoload=True)
electronic_table = Table("electronic_gradeable", self.metadata, autoload=True)
peer_assign = Table("peer_assign", self.metadata, autoload=True)
reg_table = Table("grading_rotating", self.metadata, autoload=True)
component_table = Table('gradeable_component', self.metadata, autoload=True)
mark_table = Table('gradeable_component_mark', self.metadata, autoload=True)
gradeable_data = Table("gradeable_data", self.metadata, autoload=True)
gradeable_component_data = Table("gradeable_component_data", self.metadata, autoload=True)
gradeable_component_mark_data = Table('gradeable_component_mark_data', self.metadata, autoload=True)
gradeable_data_overall_comment = Table('gradeable_data_overall_comment', self.metadata, autoload=True)
electronic_gradeable_data = Table("electronic_gradeable_data", self.metadata, autoload=True)
electronic_gradeable_version = Table("electronic_gradeable_version", self.metadata, autoload=True)
for gradeable in self.gradeables:
gradeable.create(self.conn, gradeable_table, electronic_table, peer_assign, reg_table, component_table, mark_table)
form = os.path.join(self.course_path, "config", "form", "form_{}.json".format(gradeable.id))
with open(form, "w") as open_file:
json.dump(gradeable.create_form(), open_file, indent=2)
os.system("chown -f submitty_php:{}_tas_www {}".format(self.code, os.path.join(self.course_path, "config", "form", "*")))
if not os.path.isfile(os.path.join(self.course_path, "ASSIGNMENTS.txt")):
os.system("touch {}".format(os.path.join(self.course_path, "ASSIGNMENTS.txt")))
os.system("chown {}:{}_tas_www {}".format(self.instructor.id, self.code,
os.path.join(self.course_path, "ASSIGNMENTS.txt")))
os.system("chmod -R g+w {}".format(self.course_path))
os.system("su {} -c '{}'".format("submitty_daemon", os.path.join(self.course_path,
"BUILD_{}.sh".format(self.code))))
#os.system("su {} -c '{}'".format(self.instructor.id, os.path.join(self.course_path,
# "BUILD_{}.sh".format(self.code))))
os.system("chown -R {}:{}_tas_www {}".format(self.instructor.id, self.code, os.path.join(self.course_path, "build")))
os.system("chown -R {}:{}_tas_www {}".format(self.instructor.id, self.code,
os.path.join(self.course_path, "test_*")))
# On python 3, replace with os.makedirs(..., exist_ok=True)
os.system("mkdir -p {}".format(os.path.join(self.course_path, "submissions")))
os.system('chown submitty_php:{}_tas_www {}'.format(self.code, os.path.join(self.course_path, 'submissions')))
for gradeable in self.gradeables:
# create_teams
if gradeable.team_assignment is True:
json_team_history = self.make_sample_teams(gradeable)
if gradeable.type == 0 and \
(len(gradeable.submissions) == 0 or
gradeable.sample_path is None or
gradeable.config_path is None):
# Make sure the electronic gradeable is valid
continue
# creating the folder containing all the submissions
gradeable_path = os.path.join(self.course_path, "submissions", gradeable.id)
submission_count = 0
max_submissions = gradeable.max_random_submissions
max_individual_submissions = gradeable.max_individual_submissions
# makes a section be ungraded if the gradeable is not electronic
ungraded_section = random.randint(1, max(1, self.registration_sections if gradeable.grade_by_registration else self.rotating_sections))
# This for loop adds submissions for users and teams(if applicable)
if not NO_SUBMISSIONS:
only_submit_plagiarized_users = gradeable.lichen_sample_path is not None and len(gradeable.plagiarized_user) > 0
for user in self.users:
if only_submit_plagiarized_users and user.id not in gradeable.plagiarized_user:
continue
submitted = False
team_id = None
if gradeable.team_assignment is True:
# If gradeable is team assignment, then make sure to make a team_id and don't over submit
res = self.conn.execute("SELECT teams.team_id FROM teams INNER JOIN gradeable_teams\
ON teams.team_id = gradeable_teams.team_id where user_id='{}' and g_id='{}'".format(user.id, gradeable.id))
temp = res.fetchall()
if len(temp) != 0:
team_id = temp[0][0]
previous_submission = select([electronic_gradeable_version]).where(
electronic_gradeable_version.c['team_id'] == team_id)
res = self.conn.execute(previous_submission)
if res.rowcount > 0:
continue
submission_path = os.path.join(gradeable_path, team_id)
else:
continue
res.close()
else:
submission_path = os.path.join(gradeable_path, user.id)
if gradeable.type == 0 and gradeable.submission_open_date < NOW:
if user.id in gradeable.plagiarized_user:
# If the user is a bad and unethical student(plagiarized_user), then the version to submit is going to
# be the same as the number of assignments defined in users.yml in the lichen_submissions folder.
versions_to_submit = len(gradeable.plagiarized_user[user.id])
elif gradeable.lichen_sample_path is not None:
# if we have set a plagiarism configuration but no manually-specified submissions, submit the default number
versions_to_submit = gradeable.plagiarism_versions_per_user
else:
versions_to_submit = generate_versions_to_submit(max_individual_submissions, max_individual_submissions)
if ((gradeable.gradeable_config is not None
and (gradeable.has_due_date is True and (gradeable.submission_due_date < NOW or random.random() < 0.5))
and (random.random() < 0.9) and (max_submissions is None or submission_count < max_submissions))
or (gradeable.gradeable_config is not None and user.id in gradeable.plagiarized_user)):
# only create these directories if we're actually going to put something in them
if not os.path.exists(gradeable_path):
os.makedirs(gradeable_path)
os.system("chown -R submitty_php:{}_tas_www {}".format(self.code, gradeable_path))
if not os.path.exists(submission_path):
os.makedirs(submission_path)
# Reduce the probability to get a cancelled submission (active_version = 0)
# This is done my making other possibilities three times more likely
version_population = []
for version in range(1, versions_to_submit+1):
version_population.append((version, 3))
# disallow cancelled submission if this is a manually-specified user
if user.id not in gradeable.plagiarized_user:
version_population = [(0, 1)] + version_population
version_population = [ver for ver, freq in version_population for i in range(freq)]
active_version = random.choice(version_population)
if team_id is not None:
json_history = {"active_version": active_version, "history": [], "team_history": []}
else:
json_history = {"active_version": active_version, "history": []}
random_days = 1
if random.random() < 0.3:
random_days = random.choice(range(-3, 2))
for version in range(1, versions_to_submit+1):
os.system("mkdir -p " + os.path.join(submission_path, str(version)))
submitted = True
submission_count += 1
current_time_string = dateutils.write_submitty_date(gradeable.submission_due_date - timedelta(days=random_days+version/versions_to_submit))
if team_id is not None:
self.conn.execute(electronic_gradeable_data.insert(), g_id=gradeable.id, user_id=None,
team_id=team_id, g_version=version, submission_time=current_time_string)
if version == versions_to_submit:
self.conn.execute(electronic_gradeable_version.insert(), g_id=gradeable.id, user_id=None,
team_id=team_id, active_version=active_version)
json_history["team_history"] = json_team_history[team_id]
else:
self.conn.execute(electronic_gradeable_data.insert(), g_id=gradeable.id, user_id=user.id,
g_version=version, submission_time=current_time_string)
if version == versions_to_submit:
self.conn.execute(electronic_gradeable_version.insert(), g_id=gradeable.id, user_id=user.id,
active_version=active_version)
json_history["history"].append({"version": version, "time": current_time_string, "who": user.id, "type": "upload"})
with open(os.path.join(submission_path, str(version), ".submit.timestamp"), "w") as open_file:
open_file.write(current_time_string + "\n")
if user.id in gradeable.plagiarized_user:
# If the user is in the plagirized folder, then only add those submissions
src = os.path.join(gradeable.lichen_sample_path, gradeable.plagiarized_user[user.id][version-1])
dst = os.path.join(submission_path, str(version))
# pdb.set_trace()
create_gradeable_submission(src, dst)
elif gradeable.lichen_sample_path is not None:
if len(gradeable.plagiarism_submissions) > 0: # check to make sure we haven't run out of data
# if there were no specified plagiarized users but we have plagiarism submissions, grab a random submisison
src = os.path.join(gradeable.lichen_sample_path, gradeable.plagiarism_submissions.pop())
dst = os.path.join(submission_path, str(version))
create_gradeable_submission(src, dst)
else:
if isinstance(gradeable.submissions, dict):
for key in sorted(gradeable.submissions.keys()):
os.system("mkdir -p " + os.path.join(submission_path, str(version), key))
submission = random.choice(gradeable.submissions[key])
src = os.path.join(gradeable.sample_path, submission)
dst = os.path.join(submission_path, str(version), key)
create_gradeable_submission(src, dst)
else:
submission = random.choice(gradeable.submissions)
if isinstance(submission, list):
submissions = submission
else:
submissions = [submission]
for submission in submissions:
src = os.path.join(gradeable.sample_path, submission)
dst = os.path.join(submission_path, str(version))
create_gradeable_submission(src, dst)
random_days -= 0.5
with open(os.path.join(submission_path, "user_assignment_settings.json"), "w") as open_file:
json.dump(json_history, open_file)
if gradeable.grade_start_date < NOW and os.path.exists(os.path.join(submission_path, str(versions_to_submit))):
if (gradeable.has_release_date is True and gradeable.grade_released_date < NOW) or (random.random() < 0.5 and (submitted or gradeable.type !=0)):
status = 1 if gradeable.type != 0 or submitted else 0
print("Inserting {} for {}...".format(gradeable.id, user.id))
# gd_overall_comment no longer does anything, and will be removed in a future update.
values = {'g_id': gradeable.id, 'gd_overall_comment' : ''}
overall_comment_values = {'g_id' : gradeable.id, 'goc_overall_comment': 'lorem ipsum lodar', 'goc_grader_id' : self.instructor.id}
if gradeable.team_assignment is True:
values['gd_team_id'] = team_id
overall_comment_values['goc_team_id'] = team_id
else:
values['gd_user_id'] = user.id
overall_comment_values['goc_user_id'] = user.id
if gradeable.grade_released_date < NOW and random.random() < 0.5:
values['gd_user_viewed_date'] = NOW.strftime('%Y-%m-%d %H:%M:%S%z')
ins = gradeable_data.insert().values(**values)
res = self.conn.execute(ins)
gd_id = res.inserted_primary_key[0]
if gradeable.type != 0 or gradeable.use_ta_grading:
skip_grading = random.random()
if skip_grading > 0.3 and random.random() > 0.01:
ins = gradeable_data_overall_comment.insert().values(**overall_comment_values)
res = self.conn.execute(ins)
for component in gradeable.components:
if random.random() < 0.01 and skip_grading < 0.3:
# This is used to simulate unfinished grading.
break
if status == 0 or random.random() < 0.4:
score = 0
else:
max_value_score = random.randint(component.lower_clamp * 2, component.max_value * 2) / 2
uppser_clamp_score = random.randint(component.lower_clamp * 2, component.upper_clamp * 2) / 2
score = generate_probability_space({0.7: max_value_score, 0.2: uppser_clamp_score, 0.08: -max_value_score, 0.02: -99999})
grade_time = gradeable.grade_start_date.strftime("%Y-%m-%d %H:%M:%S%z")
self.conn.execute(gradeable_component_data.insert(), gc_id=component.key, gd_id=gd_id,
gcd_score=score, gcd_component_comment=generate_random_ta_comment(),
gcd_grader_id=self.instructor.id, gcd_grade_time=grade_time, gcd_graded_version=versions_to_submit)
first = True
first_set = False
for mark in component.marks:
if (random.random() < 0.5 and first_set == False and first == False) or random.random() < 0.2:
self.conn.execute(gradeable_component_mark_data.insert(), gc_id=component.key, gd_id=gd_id, gcm_id=mark.key, gcd_grader_id=self.instructor.id)
if(first):
first_set = True
first = False
if gradeable.type == 0 and os.path.isdir(submission_path):
os.system("chown -R submitty_php:{}_tas_www {}".format(self.code, submission_path))
if (gradeable.type != 0 and gradeable.grade_start_date < NOW and ((gradeable.has_release_date is True and gradeable.grade_released_date < NOW) or random.random() < 0.5) and
random.random() < 0.9 and (ungraded_section != (user.get_detail(self.code, 'registration_section') if gradeable.grade_by_registration else user.get_detail(self.code, 'rotating_section')))):
res = self.conn.execute(gradeable_data.insert(), g_id=gradeable.id, gd_user_id=user.id, gd_overall_comment='')
gd_id = res.inserted_primary_key[0]
skip_grading = random.random()
for component in gradeable.components:
if random.random() < 0.01 and skip_grading < 0.3:
break
if random.random() < 0.1:
continue
elif gradeable.type == 1:
score = generate_probability_space({0.2: 0, 0.1: 0.5}, 1)
else:
score = random.randint(component.lower_clamp * 2, component.upper_clamp * 2) / 2
grade_time = gradeable.grade_start_date.strftime("%Y-%m-%d %H:%M:%S%z")
self.conn.execute(gradeable_component_data.insert(), gc_id=component.key, gd_id=gd_id,
gcd_score=score, gcd_component_comment="", gcd_grader_id=self.instructor.id, gcd_grade_time=grade_time, gcd_graded_version=-1)
# This segment adds the sample data for features in the sample course only
if self.code == "sample":
self.add_sample_forum_data()
print('Added forum data to sample course.')
self.add_sample_polls_data()
print('Added polls data to sample course.')
self.add_sample_queue_data()
print('Added office hours queue data to sample course.')
self.conn.close()
submitty_conn.close()
os.environ['PGPASSWORD'] = ""
if self.code == 'sample':
student_image_folder = os.path.join(SUBMITTY_DATA_DIR, 'courses', self.semester, self.code, 'uploads', 'student_images')
zip_path = os.path.join(SUBMITTY_REPOSITORY, 'sample_files', 'user_photos', 'CSCI-1300-01.zip')
with TemporaryDirectory() as tmpdir:
shutil.unpack_archive(zip_path, tmpdir)
inner_folder = os.path.join(tmpdir, 'CSCI-1300-01')
for f in os.listdir(inner_folder):
shutil.move(os.path.join(inner_folder, f), os.path.join(student_image_folder, f))
if self.code == 'tutorial':
client = docker.from_env()
client.images.pull('submitty/tutorial:tutorial_18')
client.images.pull('submitty/tutorial:database_client')
def check_rotating(self, users):
for gradeable in self.gradeables:
for grading_rotating in gradeable.grading_rotating:
string = "Invalid user_id {} for rotating section for gradeable {}".format(
grading_rotating['user_id'], gradeable.id)
if grading_rotating['user_id'] not in users:
raise ValueError(string)
def getForumDataFromFile(self, filename):
forum_path = os.path.join(SETUP_DATA_PATH, "forum")
forum_data = []
for line in open(os.path.join(forum_path, filename)):
l = [x.replace("\\n", "\n").strip() for x in line.split("|")]
if(len(line) > 1):
forum_data.append(l)
return forum_data
def make_sample_teams(self, gradeable):
"""
arg: any team gradeable
This function adds teams to the database and gradeable.
return: A json object filled with team information
"""
assert gradeable.team_assignment
json_team_history = {}
gradeable_teams_table = Table("gradeable_teams", self.metadata, autoload=True)
teams_table = Table("teams", self.metadata, autoload=True)
ucounter = self.conn.execute(select([func.count()]).select_from(gradeable_teams_table)).scalar()
for user in self.users:
#the unique team id is made up of 5 digits, an underline, and the team creater's userid.
#example: 00001_aphacker
unique_team_id=str(ucounter).zfill(5)+"_"+user.get_detail(self.code, "id")
reg_section = user.get_detail(self.code, "registration_section")
if reg_section is None:
continue
# The teams are created based on the order of the users. As soon as the number of teamates
# exceeds the max team size, then a new team will be created within the same registration section
print("Adding team for " + unique_team_id + " in gradeable " + gradeable.id)
# adding json data for team history
teams_registration = select([gradeable_teams_table]).where(
(gradeable_teams_table.c['registration_section'] == str(reg_section)) &
(gradeable_teams_table.c['g_id'] == gradeable.id))
res = self.conn.execute(teams_registration)
added = False
if res.rowcount != 0:
# If the registration has a team already, join it
for team_in_section in res:
members_in_team = select([teams_table]).where(
teams_table.c['team_id'] == team_in_section['team_id'])
res = self.conn.execute(members_in_team)
if res.rowcount < gradeable.max_team_size:
self.conn.execute(teams_table.insert(),
team_id=team_in_section['team_id'],
user_id=user.get_detail(self.code, "id"),
state=1)
json_team_history[team_in_section['team_id']].append({"action": "admin_create",
"time": dateutils.write_submitty_date(gradeable.submission_open_date),
"admin_user": "instructor",
"added_user": user.get_detail(self.code, "id")})
added = True
if not added:
# if the team the user tried to join is full, make a new team
self.conn.execute(gradeable_teams_table.insert(),
team_id=unique_team_id,
g_id=gradeable.id,
registration_section=str(reg_section),
rotating_section=str(random.randint(1, self.rotating_sections)))
self.conn.execute(teams_table.insert(),
team_id=unique_team_id,
user_id=user.get_detail(self.code, "id"),
state=1)
json_team_history[unique_team_id] = [{"action": "admin_create",
"time": dateutils.write_submitty_date(gradeable.submission_open_date),
"admin_user": "instructor",
"first_user": user.get_detail(self.code, "id")}]
ucounter += 1
res.close()
return json_team_history
def add_sample_forum_data(self):
# set sample course to have forum enabled by default
course_json_file = os.path.join(self.course_path, 'config', 'config.json')
with open(course_json_file, 'r+') as open_file:
course_json = json.load(open_file)
course_json['course_details']['forum_enabled'] = True
open_file.seek(0)
open_file.truncate()
json.dump(course_json, open_file, indent=2)
f_data = (self.getForumDataFromFile('posts.txt'), self.getForumDataFromFile('threads.txt'), self.getForumDataFromFile('categories.txt'))
forum_threads = Table("threads", self.metadata, autoload=True)
forum_posts = Table("posts", self.metadata, autoload=True)
forum_cat_list = Table("categories_list", self.metadata, autoload=True)
forum_thread_cat = Table("thread_categories", self.metadata, autoload=True)
for catData in f_data[2]:
self.conn.execute(forum_cat_list.insert(), category_desc=catData[0], rank=catData[1], color=catData[2])
for thread_id, threadData in enumerate(f_data[1], start = 1):
self.conn.execute(forum_threads.insert(),
title=threadData[0],
created_by=threadData[1],
pinned=True if threadData[2] == "t" else False,
deleted=True if threadData[3] == "t" else False,
merged_thread_id=threadData[4],
merged_post_id=threadData[5],
is_visible=True if threadData[6] == "t" else False)
self.conn.execute(forum_thread_cat.insert(), thread_id=thread_id, category_id=threadData[7])
counter = 1
for postData in f_data[0]:
if(postData[10] != "f" and postData[10] != ""):
# In posts.txt, if the 10th column is f or empty, then no attachment is added. If anything else is in the column, then it will be treated as the file name.
attachment_path = os.path.join(self.course_path, "forum_attachments", str(postData[0]), str(counter))
os.makedirs(attachment_path)
os.system("chown -R submitty_php:sample_tas_www {}".format(os.path.join(self.course_path, "forum_attachments", str(postData[0]))))
copyfile(os.path.join(SETUP_DATA_PATH, "forum", "attachments", postData[10]), os.path.join(attachment_path, postData[10]))
counter += 1
self.conn.execute(forum_posts.insert(),
thread_id=postData[0],
parent_id=postData[1],
author_user_id=postData[2],
content=postData[3],
timestamp=postData[4],
anonymous=True if postData[5] == "t" else False,
deleted=True if postData[6] == "t" else False,
endorsed_by=postData[7],
resolved = True if postData[8] == "t" else False,
type=postData[9],
has_attachment=True if postData[10] != "f" else False)
def add_sample_polls_data(self):
# set sample course to have polls enabled by default
course_json_file = os.path.join(self.course_path, 'config', 'config.json')
with open(course_json_file, 'r+') as open_file:
course_json = json.load(open_file)
course_json['course_details']['polls_enabled'] = True
open_file.seek(0)
open_file.truncate()
json.dump(course_json, open_file, indent=2)
# load the sample polls from input file
polls_data_path = os.path.join(SETUP_DATA_PATH, "polls", "polls_data.json")
with open(polls_data_path, 'r') as polls_file:
polls_data = json.load(polls_file)
# set some values that depend on current time
polls_data[0]["image_path"] = self.course_path + polls_data[0]["image_path"]
polls_data[2]["release_date"] = f"{datetime.today().date()}"
# add attached image
image_dir = os.path.dirname(polls_data[0]["image_path"])
if os.path.isdir(image_dir):
shutil.rmtree(image_dir)
os.makedirs(image_dir)
os.system(f"chown -R submitty_php:sample_tas_www {image_dir}")
copyfile(os.path.join(SETUP_DATA_PATH, "polls", "sea_animals.png"), polls_data[0]["image_path"])
# add polls to DB
polls_table = Table("polls", self.metadata, autoload=True)
poll_options_table = Table("poll_options", self.metadata, autoload=True)
poll_responses_table = Table("poll_responses", self.metadata, autoload=True)
for poll in polls_data:
self.conn.execute(polls_table.insert(),
name=poll["name"],
question=poll["question"],
status=poll["status"],
release_date=poll["release_date"],
image_path=poll["image_path"],
question_type=poll["question_type"],
release_histogram=poll["release_histogram"])
for i in range(len(poll["responses"])):
self.conn.execute(poll_options_table.insert(),
option_id=i,
order_id=i,
poll_id=poll["id"],
response=poll["responses"][i],
correct=(i in poll["correct_responses"]))
# generate responses to the polls
poll_responses_data = []
# poll1: for each self.users make a random number (0-5) of responses
poll1_response_ids = list(range(len(polls_data[0]['responses'])))
for user in self.users:
random_responses = random.sample(poll1_response_ids, random.randint(0, len(polls_data[0]['responses'])))
for response_id in random_responses:
poll_responses_data.append({
"poll_id": polls_data[0]["id"],
"student_id": user.id,
"option_id": response_id
})
# poll2: take a large portion of self.users and make each submit one random response
for user in self.users:
if random.random() < 0.8:
poll_responses_data.append({
"poll_id": polls_data[1]["id"],
"student_id": user.id,
"option_id": random.randint(0, len(polls_data[1]['responses']) - 1)
})
# add responses to DB
for response in poll_responses_data:
self.conn.execute(poll_responses_table.insert(),
poll_id=response["poll_id"],
student_id=response["student_id"],
option_id=response["option_id"])
def add_sample_queue_data(self):
# load the sample polls from input file
queue_data_path = os.path.join(SETUP_DATA_PATH, "queue", "queue_data.json")
with open(queue_data_path, 'r') as queue_file:
queue_data = json.load(queue_file)
# set sample course to have office hours queue enabled by default
course_json_file = os.path.join(self.course_path, 'config', 'config.json')
with open(course_json_file, 'r+') as open_file:
course_json = json.load(open_file)
course_json['course_details']['queue_enabled'] = True
course_json['course_details']['queue_message'] = queue_data["queue_message"]
course_json['course_details']['queue_announcement_message'] = queue_data["queue_announcement_message"]
open_file.seek(0)
open_file.truncate()
json.dump(course_json, open_file, indent=2)
# generate values that depend on current date and time
# helped for the first time today, done --- LAB queue
queue_data["queue_entries"][0]["time_in"] = datetime.now() - timedelta(minutes=25)
queue_data["queue_entries"][0]["time_out"] = datetime.now() - timedelta(minutes=19)
queue_data["queue_entries"][0]["time_help_start"] = datetime.now() - timedelta(minutes=24)
# helped, done --- LAB queue
queue_data["queue_entries"][1]["time_in"] = datetime.now() - timedelta(minutes=24)
queue_data["queue_entries"][1]["time_out"] = datetime.now() - timedelta(minutes=15)
queue_data["queue_entries"][1]["time_help_start"] = datetime.now() - timedelta(minutes=23)
# removed by self --- LAB queue
queue_data["queue_entries"][2]["time_in"] = datetime.now() - timedelta(minutes=22)
queue_data["queue_entries"][2]["time_out"] = datetime.now() - timedelta(minutes=21)
# being helped --- HW queue
queue_data["queue_entries"][3]["time_in"] = datetime.now() - timedelta(minutes=23)
queue_data["queue_entries"][3]["time_help_start"] = datetime.now() - timedelta(minutes=14)
# waiting for help for second time today --- LAB queue
queue_data["queue_entries"][4]["time_in"] = datetime.now() - timedelta(minutes=21)
queue_data["queue_entries"][4]["last_time_in_queue"] = queue_data["queue_entries"][0]["time_in"]
# paused --- HW queue
queue_data["queue_entries"][5]["time_in"] = datetime.now() - timedelta(minutes=20)
queue_data["queue_entries"][5]["time_paused_start"] = datetime.now() - timedelta(minutes=18)
# wait for the first time --- HW queue
queue_data["queue_entries"][6]["time_in"] = datetime.now() - timedelta(minutes=15)
# waiting for help for second time this week --- LAB queue
queue_data["queue_entries"][7]["time_in"] = datetime.now() - timedelta(minutes=10)
queue_data["queue_entries"][7]["last_time_in_queue"] = datetime.now() - timedelta(days=1, minutes=30)
queues_table = Table("queue_settings", self.metadata, autoload=True)
queue_entries_table = Table("queue", self.metadata, autoload=True)
# make two sample queues
self.conn.execute(queues_table.insert(),
open=True,
code="Lab Help",
token="lab")
self.conn.execute(queues_table.insert(),
open=True,
code="Homework Debugging",
token="hw_debug")
# add, help, remove, pause, etc. students in the queue
for queue_entry in queue_data["queue_entries"]:
self.conn.execute(queue_entries_table.insert(),
current_state=queue_entry["current_state"],
removal_type=queue_entry["removal_type"],
queue_code=queue_entry["queue_code"],
user_id=queue_entry["user_id"],
name=queue_entry["name"],
time_in=queue_entry["time_in"],
time_out=queue_entry["time_out"],
added_by=queue_entry["added_by"],
help_started_by=queue_entry["help_started_by"],
removed_by=queue_entry["removed_by"],
contact_info=queue_entry["contact_info"],
last_time_in_queue=queue_entry["last_time_in_queue"],
time_help_start=queue_entry["time_help_start"],
paused=queue_entry["paused"],
time_paused=queue_entry["time_paused"],
time_paused_start=queue_entry["time_paused_start"])
def make_course_json(self):
"""
This function generates customization_sample.json in case it has changed from the provided version in the test suite
within the Submitty repository. Ideally this function will be pulled out and made independent, or better yet when
the code for the web interface is done, that will become the preferred route and this function can be retired.
Keeping this function after the web interface would mean we have another place where we need to update code anytime
the expected format of customization.json changes.
Right now the code uses the Gradeable and Component classes, so to avoid code duplication the function lives inside
setup_sample_courses.py
:return:
"""
course_id = self.code
# Reseed to minimize the situations under which customization.json changes
m = hashlib.md5()
m.update(bytes(course_id, "utf-8"))
random.seed(int(m.hexdigest(), 16))
# Would be great if we could install directly to test_suite, but
# currently the test uses "clean" which will blow away test_suite
customization_path = os.path.join(SUBMITTY_INSTALL_DIR, ".setup")
print("Generating customization_{}.json".format(course_id))
gradeables = {}
gradeables_json_output = {}
# Create gradeables by syllabus bucket
for gradeable in self.gradeables:
if gradeable.syllabus_bucket not in gradeables:
gradeables[gradeable.syllabus_bucket] = []
gradeables[gradeable.syllabus_bucket].append(gradeable)
# Randomly generate the impact of each bucket on the overall grade
gradeables_percentages = []
gradeable_percentage_left = 100 - len(gradeables)
for i in range(len(gradeables)):
gradeables_percentages.append(random.randint(1, max(1, gradeable_percentage_left)) + 1)
gradeable_percentage_left -= (gradeables_percentages[-1] - 1)
if gradeable_percentage_left > 0:
gradeables_percentages[-1] += gradeable_percentage_left
# Compute totals and write out each syllabus bucket in the "gradeables" field of customization.json
bucket_no = 0
# for bucket,g_list in gradeables.items():
for bucket in sorted(gradeables.keys()):
g_list = gradeables[bucket]
bucket_json = {"type": bucket, "count": len(g_list), "percent": 0.01*gradeables_percentages[bucket_no],
"ids" : []}
g_list.sort(key=lambda x: x.id)
# Manually total up the non-penalty non-extra-credit max scores, and decide which gradeables are 'released'
for gradeable in g_list:
use_ta_grading = gradeable.use_ta_grading
g_type = gradeable.type
components = gradeable.components
g_id = gradeable.id
max_auto = 0
max_ta = 0
print_grades = True if g_type != 0 or (gradeable.submission_open_date < NOW) else False
release_grades = (gradeable.has_release_date is True) and (gradeable.grade_released_date < NOW)
gradeable_config_dir = os.path.join(SUBMITTY_DATA_DIR, "courses", get_current_semester(), "sample",
"config", "complete_config")
# For electronic gradeables there is a config file - read through to get the total
if os.path.isdir(gradeable_config_dir):
gradeable_config = os.path.join(gradeable_config_dir, "complete_config_" + g_id + ".json")
if os.path.isfile(gradeable_config):
try:
with open(gradeable_config, 'r') as gradeable_config_file:
gradeable_json = json.load(gradeable_config_file)
# Not every config has AUTO_POINTS, so have to parse through test cases
# Add points to max if not extra credit, and points>0 (not penalty)
if "testcases" in gradeable_json:
for test_case in gradeable_json["testcases"]:
if "extra_credit" in test_case:
continue
if "points" in test_case and test_case["points"] > 0:
max_auto += test_case["points"]
except EnvironmentError:
print("Failed to load JSON")
# For non-electronic gradeables, or electronic gradeables with TA grading, read through components
if use_ta_grading or g_type != 0:
for component in components:
if component.max_value >0:
max_ta += component.max_value
# Add the specific associative array for this gradeable in customization.json to the output string
max_points = max_auto + max_ta
if print_grades:
bucket_json["ids"].append({"id": g_id, "max": max_points})
if not release_grades:
bucket_json["ids"][-1]["released"] = False
# Close the bucket's array in customization.json
if "gradeables" not in gradeables_json_output:
gradeables_json_output["gradeables"] = []
gradeables_json_output["gradeables"].append(bucket_json)
bucket_no += 1
# Generate the section labels
section_ta_mapping = {}
for section in range(1, self.registration_sections + 1):
section_ta_mapping[section] = []
for user in self.users:
if user.get_detail(course_id, "grading_registration_section") is not None:
grading_registration_sections = str(user.get_detail(course_id, "grading_registration_section"))
grading_registration_sections = [int(x) for x in grading_registration_sections.split(",")]
for section in grading_registration_sections:
section_ta_mapping[section].append(user.id)
for section in section_ta_mapping:
if len(section_ta_mapping[section]) == 0:
section_ta_mapping[section] = "TBA"
else:
section_ta_mapping[section] = ", ".join(section_ta_mapping[section])
# Construct the rest of the JSON dictionary
benchmarks = ["a-", "b-", "c-", "d"]
gradeables_json_output["display"] = ["instructor_notes", "grade_summary", "grade_details"]
gradeables_json_output["display_benchmark"] = ["average", "stddev", "perfect"]
gradeables_json_output["benchmark_percent"] = {}
for i in range(len(benchmarks)):
gradeables_json_output["display_benchmark"].append("lowest_" + benchmarks[i])
gradeables_json_output["benchmark_percent"]["lowest_" + benchmarks[i]] = 0.9 - (0.1 * i)
gradeables_json_output["section"] = section_ta_mapping
messages = ["<b>{} Course</b>".format(course_id),
"Note: Please be patient with data entry/grade corrections for the most recent "
"lab, homework, and test.",
"Please contact your graduate lab TA if a grade remains missing or incorrect for more than a week."]
gradeables_json_output["messages"] = messages
# Attempt to write the customization.json file
try:
with open(os.path.join(customization_path, "customization_" + course_id + ".json"), 'w') as customization_file:
customization_file.write("/*\n"
"This JSON is based on the automatically generated customization for\n"
"the development course \"{}\" as of {}.\n"
"It is intended as a simple example, with additional documentation online.\n"
"*/\n".format(course_id,NOW.strftime("%Y-%m-%d %H:%M:%S%z")))
json.dump(gradeables_json_output,
open(os.path.join(customization_path, "customization_" + course_id + ".json"), 'a'),indent=2)
except EnvironmentError as e:
print("Failed to write to customization file: {}".format(e))
print("Wrote customization_{}.json".format(course_id))
class Gradeable(object):
"""
Attributes:
config_path
id
type
"""
def __init__(self, gradeable):
self.id = ""
self.gradeable_config = None
self.config_path = None
self.sample_path = None
self.lichen_sample_path = None
self.plagiarized_user = {}
self.title = ""
self.instructions_url = ""
self.overall_ta_instructions = ""
self.peer_grading = False
self.grade_by_registration = True
self.grader_assignment_method = 1
self.is_repository = False
self.subdirectory = ""
self.use_ta_grading = True
self.late_days = 2
self.precision = 0.5
self.syllabus_bucket = "none (for practice only)"
self.min_grading_group = 3
self.grading_rotating = []
self.submissions = []
self.max_random_submissions = None
self.max_individual_submissions = 3
self.team_assignment = False
self.max_team_size = 1
self.has_due_date = True
self.has_release_date = True
self.allow_custom_marks = True
self.plagiarism_submissions = []
self.plagiarism_versions_per_user = 1
if 'gradeable_config' in gradeable:
self.gradeable_config = gradeable['gradeable_config']
self.type = 0
if 'g_id' in gradeable:
self.id = gradeable['g_id']
else:
self.id = gradeable['gradeable_config']
if 'eg_max_random_submissions' in gradeable:
self.max_random_submissions = int(gradeable['eg_max_random_submissions'])
if 'eg_max_individual_submissions' in gradeable:
self.max_individual_submissions = int(gradeable['eg_max_individual_submissions'])
if 'config_path' in gradeable:
self.config_path = gradeable['config_path']
else:
examples_path = os.path.join(MORE_EXAMPLES_DIR, self.gradeable_config, "config")
tutorial_path = os.path.join(TUTORIAL_DIR, self.gradeable_config, "config")
if os.path.isdir(examples_path):
self.config_path = examples_path
elif os.path.isdir(tutorial_path):
self.config_path = tutorial_path
else:
self.config_path = None
examples_path = os.path.join(MORE_EXAMPLES_DIR, self.gradeable_config, "submissions")
tutorial_path = os.path.join(TUTORIAL_DIR, self.gradeable_config, "submissions")
if 'eg_lichen_sample_path' in gradeable:
# pdb.set_trace()
self.lichen_sample_path = gradeable['eg_lichen_sample_path']
if 'eg_plagiarized_users' in gradeable:
for user in gradeable['eg_plagiarized_users']:
temp = user.split(" - ")
self.plagiarized_user[temp[0]] = temp[1:]
else: # if we weren't given a list of plagiarized users, make one
self.plagiarism_submissions = os.listdir(self.lichen_sample_path)
random.shuffle(self.plagiarism_submissions)
if 'eg_plagiarism_versions_per_user' in gradeable:
self.plagiarism_versions_per_user = gradeable['plagiarism_versions_per_user']
if 'sample_path' in gradeable:
self.sample_path = gradeable['sample_path']
else:
if os.path.isdir(examples_path):
self.sample_path = examples_path
elif os.path.isdir(tutorial_path):
self.sample_path = tutorial_path
else:
self.sample_path = None
else:
self.id = gradeable['g_id']
self.type = int(gradeable['g_type'])
self.config_path = None
self.sample_path = None
# To make Rainbow Grades testing possible, need to seed random
m = hashlib.md5()
m.update(bytes(self.id, 'utf-8'))
random.seed(int(m.hexdigest(), 16))
if 'g_bucket' in gradeable:
self.syllabus_bucket = gradeable['g_bucket']
assert 0 <= self.type <= 2
if 'g_title' in gradeable:
self.title = gradeable['g_title']
else:
self.title = self.id.replace("_", " ").title()
if 'g_grader_assignment_method' in gradeable:
self.grade_by_registration = gradeable['g_grader_assignment_method'] == 1
self.grader_assignment_method = gradeable['g_grader_assignment_method']
if 'grading_rotating' in gradeable:
self.grading_rotating = gradeable['grading_rotating']
self.ta_view_date = dateutils.parse_datetime(gradeable['g_ta_view_start_date'])
self.grade_start_date = dateutils.parse_datetime(gradeable['g_grade_start_date'])
self.grade_due_date = dateutils.parse_datetime(gradeable['g_grade_due_date'])
self.grade_released_date = dateutils.parse_datetime(gradeable['g_grade_released_date'])
if self.type == 0:
self.submission_open_date = dateutils.parse_datetime(gradeable['eg_submission_open_date'])
self.submission_due_date = dateutils.parse_datetime(gradeable['eg_submission_due_date'])
self.team_lock_date = dateutils.parse_datetime(gradeable['eg_submission_due_date'])
self.grade_inquiry_start_date = dateutils.parse_datetime(gradeable['eg_grade_inquiry_start_date'])
self.grade_inquiry_due_date = dateutils.parse_datetime(gradeable['eg_grade_inquiry_due_date'])
self.student_view = True
self.student_submit = True
if 'eg_is_repository' in gradeable:
self.is_repository = gradeable['eg_is_repository'] is True
if self.is_repository and 'eg_subdirectory' in gradeable:
self.subdirectory = gradeable['eg_subdirectory']
if 'eg_peer_grading' in gradeable:
self.peer_grading = gradeable['eg_peer_grading']
if 'eg_use_ta_grading' in gradeable:
self.use_ta_grading = gradeable['eg_use_ta_grading'] is True
if 'eg_student_view' in gradeable:
self.student_view = gradeable['eg_student_view'] is True
if 'eg_student_submit' in gradeable:
self.student_submit = gradeable['eg_student_submit'] is True
if 'eg_late_days' in gradeable:
self.late_days = max(0, int(gradeable['eg_late_days']))
else:
self.late_days = random.choice(range(0, 3))
if 'eg_precision' in gradeable:
self.precision = float(gradeable['eg_precision'])
if 'eg_team_assignment' in gradeable:
self.team_assignment = gradeable['eg_team_assignment'] is True
if 'eg_max_team_size' in gradeable:
self.max_team_size = gradeable['eg_max_team_size']
if 'eg_team_lock_date' in gradeable:
self.team_lock_date = submitty_utils.parse_datetime(gradeable['eg_team_lock_date'])
self.has_due_date = gradeable['eg_has_due_date'] if 'eg_has_due_date' in gradeable else True
self.has_release_date = gradeable['eg_has_release_date'] if 'eg_has_release_date' in gradeable else True
if self.config_path is None:
examples_path = os.path.join(MORE_EXAMPLES_DIR, self.id, "config")
tutorial_path = os.path.join(TUTORIAL_DIR, self.id, "config")
if os.path.isdir(examples_path):
self.config_path = examples_path
elif os.path.isdir(tutorial_path):
self.config_path = tutorial_path
else:
self.config_path = None
assert self.ta_view_date < self.submission_open_date
assert self.has_due_date is False or self.submission_open_date < self.submission_due_date
assert self.has_due_date is False or self.submission_due_date < self.grade_start_date
assert self.has_release_date is False or self.grade_released_date <= self.grade_inquiry_start_date
assert self.grade_inquiry_start_date < self.grade_inquiry_due_date
if self.gradeable_config is not None:
if self.sample_path is not None:
if os.path.isfile(os.path.join(self.sample_path, "submissions.yml")):
self.submissions = load_data_yaml(os.path.join(self.sample_path, "submissions.yml"))
else:
self.submissions = os.listdir(self.sample_path)
self.submissions = list(filter(lambda x: not x.startswith("."), self.submissions))
# Ensure we're not sensitive to directory traversal order
self.submissions.sort()
if isinstance(self.submissions, list):
for elem in self.submissions:
if isinstance(elem, dict):
raise TypeError("Cannot have dictionary inside of list for submissions "
"for {}".format(self.sample_path))
assert self.ta_view_date < self.grade_start_date
assert self.grade_start_date < self.grade_due_date
assert self.has_release_date is False or self.grade_due_date <= self.grade_released_date
self.components = []
for i in range(len(gradeable['components'])):
component = gradeable['components'][i]
if self.type >= 0:
component['gc_ta_comment'] = generate_random_ta_note()
component['gc_student_comment'] = generate_random_student_note()
component['gc_page'] = 0
if self.type == 1:
component['gc_lower_clamp'] = 0
component['gc_default'] = 0
component['gc_max_value'] = 1
component['gc_upper_clamp'] = 1
if self.type != 2:
component['gc_is_text'] = False
i -= 1
self.components.append(Component(component, i+1))
def create(self, conn, gradeable_table, electronic_table, peer_assign, reg_table, component_table, mark_table):
conn.execute(gradeable_table.insert(), g_id=self.id, g_title=self.title,
g_instructions_url=self.instructions_url,
g_overall_ta_instructions=self.overall_ta_instructions,
g_gradeable_type=self.type,
g_grader_assignment_method=self.grader_assignment_method,
g_ta_view_start_date=self.ta_view_date,
g_grade_start_date=self.grade_start_date,
g_grade_due_date=self.grade_due_date,
g_grade_released_date=self.grade_released_date,
g_syllabus_bucket=self.syllabus_bucket,
g_allow_custom_marks=self.allow_custom_marks,
g_min_grading_group=self.min_grading_group,
g_closed_date=None)
for rotate in self.grading_rotating:
conn.execute(reg_table.insert(), g_id=self.id, user_id=rotate['user_id'],
sections_rotating=rotate['section_rotating_id'])
if self.peer_grading is True:
with open(os.path.join(SETUP_DATA_PATH, 'random', 'graders.txt')) as graders, \
open(os.path.join(SETUP_DATA_PATH, 'random', 'students.txt')) as students:
graders = graders.read().strip().split()
students = students.read().strip().split()
length = len(graders)
for i in range(length):
conn.execute(peer_assign.insert(), g_id=self.id, grader_id=graders[i], user_id=students[i])
if self.type == 0:
conn.execute(electronic_table.insert(), g_id=self.id,
eg_submission_open_date=self.submission_open_date,
eg_submission_due_date=self.submission_due_date,
eg_is_repository=self.is_repository, eg_subdirectory=self.subdirectory,
eg_team_assignment=self.team_assignment,
eg_max_team_size=self.max_team_size,
eg_team_lock_date=self.team_lock_date,
eg_use_ta_grading=self.use_ta_grading,
eg_student_view=self.student_view,
eg_student_submit=self.student_submit,
eg_config_path=self.config_path,
eg_late_days=self.late_days, eg_precision=self.precision, eg_peer_grading=self.peer_grading,
eg_grade_inquiry_start_date=self.grade_inquiry_start_date,
eg_grade_inquiry_due_date=self.grade_inquiry_due_date)
for component in self.components:
component.create(self.id, conn, component_table, mark_table)
def create_form(self):
form_json = OrderedDict()
form_json['gradeable_id'] = self.id
if self.type == 0:
form_json['config_path'] = self.config_path
form_json['gradeable_title'] = self.title
form_json['gradeable_type'] = self.get_gradeable_type_text()
form_json['instructions_url'] = self.instructions_url
form_json['ta_view_date'] = dateutils.write_submitty_date(self.ta_view_date)
if self.type == 0:
form_json['date_submit'] = dateutils.write_submitty_date(self.submission_open_date)
form_json['date_due'] = dateutils.write_submitty_date(self.submission_due_date)
form_json['grade_inquiry_start_date'] = dateutils.write_submitty_date(self.grade_inquiry_start_date)
form_json['grade_inquiry_due_date'] = dateutils.write_submitty_date(self.grade_inquiry_due_date)
form_json['date_grade'] = dateutils.write_submitty_date(self.grade_start_date)
form_json['date_grade_due'] = dateutils.write_submitty_date(self.grade_due_date)
form_json['date_released'] = dateutils.write_submitty_date(self.grade_released_date)
if self.type == 0:
form_json['section_type'] = self.get_submission_type()
form_json['eg_late_days'] = self.late_days
form_json['upload_type'] = self.get_upload_type()
form_json['upload_repo'] = self.subdirectory
form_json['comment_title'] = []
form_json['points'] = []
form_json['eg_extra'] = []
form_json['ta_comment'] = []
form_json['student_comment'] = []
for i in range(len(self.components)):
component = self.components[i]
form_json['comment_title'].append(component.title)
# form_json['lower_clamp'].append(component.lower_clamp)
# form_json['default'].append(component.default)
form_json['points'].append(component.max_value)
# form_json['upper_clamp'].append(component.upper_clamp)
form_json['ta_comment'].append(component.ta_comment)
form_json['student_comment'].append(component.student_comment)
elif self.type == 1:
form_json['checkpoint_label'] = []
form_json['checkpoint_extra'] = []
for i in range(len(self.components)):
component = self.components[i]
form_json['checkpoint_label'].append(component.title)
else:
form_json['num_numeric_items'] = 0
form_json['numeric_labels'] = []
form_json['lower_clamp'] = []
form_json['default'] = []
form_json['max_score'] = []
form_json['upper_clamp'] = []
form_json['numeric_extra'] = []
form_json['num_text_items'] = 0
form_json['text_label'] = []
for i in range(len(self.components)):
component = self.components[i]
if component.is_text:
form_json['num_text_items'] += 1
form_json['text_label'].append(component.title)
else:
form_json['num_numeric_items'] += 1
form_json['numeric_labels'].append(component.title)
form_json['lower_clamp'].append(component.lower_clamp)
form_json['default'].append(component.default)
form_json['max_score'].append(component.max_value)
form_json['upper_clamp'].append(component.upper_clamp)
form_json['minimum_grading_group'] = self.min_grading_group
form_json['gradeable_buckets'] = self.syllabus_bucket
return form_json
def get_gradeable_type_text(self):
if self.type == 0:
return "Electronic File"
elif self.type == 1:
return "Checkpoints"
else:
return "Numeric"
def get_submission_type(self):
if self.grade_by_registration:
return "reg_section"
else:
return "rotating-section"
def get_upload_type(self):
if self.is_repository:
return "Repository"
else:
return "Upload File"
class Component(object):
def __init__(self, component, order):
self.title = component['gc_title']
self.ta_comment = ""
self.student_comment = ""
self.is_text = False
self.is_peer_component = False
self.page = 0
self.order = order
self.marks = []
if 'gc_ta_comment' in component:
self.ta_comment = component['gc_ta_comment']
if 'gc_is_peer' in component:
self.is_peer_component = component['gc_is_peer']
if 'gc_student_comment' in component:
self.student_comment = component['gc_student_comment']
if 'gc_is_text' in component:
self.is_text = component['gc_is_text'] is True
if 'gc_page' in component:
self.page = int(component['gc_page'])
if self.is_text:
self.lower_clamp = 0
self.default = 0
self.max_value = 0
self.upper_clamp = 0
else:
self.lower_clamp = float(component['gc_lower_clamp'])
self.default = float(component['gc_default'])
self.max_value = float(component['gc_max_value'])
self.upper_clamp = float(component['gc_upper_clamp'])
if 'marks' in component:
for i in range(len(component['marks'])):
mark = component['marks'][i]
self.marks.append(Mark(mark, i))
else:
self.marks = generate_random_marks(self.default, self.max_value)
self.key = None
def create(self, g_id, conn, table, mark_table):
ins = table.insert().values(g_id=g_id, gc_title=self.title, gc_ta_comment=self.ta_comment,
gc_student_comment=self.student_comment,
gc_lower_clamp=self.lower_clamp, gc_default=self.default, gc_max_value=self.max_value,
gc_upper_clamp=self.upper_clamp, gc_is_text=self.is_text,
gc_is_peer=self.is_peer_component, gc_order=self.order, gc_page=self.page)
res = conn.execute(ins)
self.key = res.inserted_primary_key[0]
for mark in self.marks:
mark.create(self.key, conn, mark_table)
class Mark(object):
def __init__(self, mark, order):
self.note = mark['gcm_note']
self.points = mark['gcm_points']
self.order = order
self.grader = 'instructor'
self.key = None
def create(self, gc_id, conn, table):
ins = table.insert().values(gc_id=gc_id, gcm_points=self.points, gcm_note=self.note,
gcm_order=self.order)
res = conn.execute(ins)
self.key = res.inserted_primary_key[0]
if __name__ == "__main__":
main()
|
py | 1a4d2a16f587486d977abf5a4528042cc3372b1e | """
A binary search tree is a sorted binary tree that allows for log complexity* searching. Unlike binary search on an
array, inserting and deleting nodes only takes log time*
*Assuming the tree is relatively balanced such as an AVL tree or red-black tree. As an extreme example, the binary tree
below is essentially a linked list:
1
\
2
\
3
\
4
\
5
BSTs are binary trees where all nodes in node X's left subtree have a value less than X and all nodes in X's right
subtree have a value greater than X. By default, they don't have duplicates (but we can add in an attribute to each node
as the count, for example). An inorder traversal will process nodes from lowest to highest
"""
import random
class Node:
def __init__(self, val=None):
self.val = val
self.left = None
self.right = None
class BST:
def __init__(self):
self.root = None
def insert(self, val):
if not self.root:
self.root = Node(val)
return True
curr = self.root
while True:
if val < curr.val:
if not curr.left:
curr.left = Node(val)
return True
curr = curr.left
elif val > curr.val:
if not curr.right:
curr.right = Node(val)
return True
curr = curr.right
else:
return False # No duplicates
def delete(self, val):
def newNode(node): # Node that will take the deleted node's spot
# At least one child missing: Replace the node with the other child
if not node.left:
return node.right
if not node.right:
return node.left
# Two children: replace this node with the inorder successor
curr_node = node.right
curr_parent = node
while True:
if not curr_node.left:
node.val = curr_node.val
curr_parent.left = curr_node.right # Delete this node
return node
curr_parent = curr_node
curr_node = curr_node.left
if not self.root:
return False
parent = None
curr = self.root
if curr.val == val: # Handles the root case
self.root = newNode(curr)
return True
while curr: # Find the node
if curr.val > val:
parent = curr
curr = curr.left
elif curr.val < val:
parent = curr
curr = curr.right
else: # Replace it with the new node
if parent.left == curr:
parent.left = newNode(curr)
else:
parent.right = newNode(curr)
return True
return False
def search(self, val):
curr = self.root
while curr:
if curr.val < val:
curr = curr.left
elif curr.val > val:
curr = curr.right
else:
return True
return False
def minValue(self):
if self.root is None:
return None
curr = self.root
while True:
if not curr.left:
return curr.val
curr = curr.left
def maxValue(self):
if self.root is None:
return None
curr = self.root
while True:
if not curr.right:
return curr.val
curr = curr.right
def print2D(self):
"""
Wrapper method for __print2D
Prints this binary tree in a 2-D representation
"""
self.__print2D(self.root)
def __print2D(self, root, space=0):
if not root:
return
space += 4
# Process right child first
self.__print2D(root.right, space)
# Print current node after a newline
print()
for spaces in range(4, space):
print(end=" ")
print(root.val)
# Process left child
self.__print2D(root.left, space)
if __name__ == "__main__":
bst = BST()
bst.insert(50)
nodes = [50]
for i in range(50):
value = random.randint(1, 100)
nodes.append(value)
bst.insert(value)
bst.print2D()
maximum = bst.maxValue()
assert maximum == max(nodes)
print(f"\nMaximum value: {maximum}")
minimum = bst.minValue()
assert minimum == min(nodes)
print(f"Minimum value: {minimum}")
for i in nodes:
bst.delete(i)
bst.print2D()
assert bst.root is None
|
py | 1a4d2b8fcb6c59cb6563a9f796ff1dc39268ee60 | import warnings
warnings.filterwarnings('ignore')
# data processing
import pandas as pd
import numpy as np
# image processing
from PIL import Image
# tf and keras
import tensorflow as tf
import keras
from keras.models import Sequential, Model, load_model
from keras.layers import Input, concatenate, Conv2D, MaxPooling2D, Conv2DTranspose, Dropout, BatchNormalization
from keras.layers.core import Dense, Activation, Flatten
from keras.optimizers import SGD, Adam
from keras.utils import plot_model
from keras.callbacks import ModelCheckpoint
from keras import backend as K
# dataset processing, ml models and metrics
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import scipy
import glob
from itertools import product
# Object Detection Metrics
import xml.etree.ElementTree as ET
__all__ = [
'SIMILARS_MAP',
'GREEKS',
'read_csv',
'remove_transparency',
'preprocess_img',
'populate_images',
'convert_to_one_hot_encode',
'one_hot_encode_to_char',
'one_hot_encode_to_char_list',
'convert_pred_list_ohe_to_labels',
'get_df_split',
'gen_x_y_train_test_stratified_1df',
'process_x_y_train_test_stratified_2df',
'process_x_y_train_test_stratified_ann',
'get_label_count_df',
'get_label_count_train_test_dfs',
'dir_',
'model_dir',
'data_dir',
'processed_data_dir',
'get_symbols',
'generate_eqns',
'read_annotation',
'get_iou',
'calculate_map',
]
dir_ = 'HASYv2/'
model_dir = 'trained_models/'
data_dir = 'data/'
processed_data_dir = 'processed_data/'
SIMILARS_MAP = [
(r'\times', 'x', 'X'),
('y', 'Y'),
('c', r'\subset', r'\subseteq'),
('g', '9'),
('o', 'O', '0'),
('s', '5'),
('z', 'Z', '2'),
]
GREEKS = [r'\sigma', r'\Sigma', r'\gamma', r'\delta', r'\Delta',
r'\eta', r'\theta', r'\epsilon', r'\lambda', r'\mu',
r'\Pi', r'\rho', r'\phi', r'\omega', r'\ohm']
def read_csv(path):
return pd.read_csv(path)
# Image Preprocessing
def remove_transparency(im, bg_colour=(255, 255, 255)):
# Only process if image has transparency
if im.mode in ('RGBA', 'LA') or (im.mode == 'P' and 'transparency' in im.info):
# Need to convert to RGBA if LA format due to a bug in PIL
alpha = im.convert('RGBA').split()[-1]
# Create a new background image of our matt color.
# Must be RGBA because paste requires both images have the same format
bg = Image.new("RGBA", im.size, bg_colour + (255,))
bg.paste(im, mask=alpha)
return bg
else:
return im
def preprocess_img(path):
# Open Image
im = Image.open(dir_ + path)
# Resize image to 32 by 32
if im.size != (32, 32):
im = im.resize((32, 32))
# Convert image to a single greyscale channel
im = remove_transparency(im).convert('L')
# Convert image to numpy array
I = np.asarray(im)
# Close image
im.close()
return I
def populate_images(dataset):
temp = []
for i in range(len(dataset)):
path = dataset.iloc[i]['path']
pathsplit = path.split('/')
if len(pathsplit) > 2:
path = '/'.join([pathsplit[-2], pathsplit[-1]])
img = preprocess_img(path)
temp.append(img)
dataset['img'] = [i for i in temp]
return dataset
def convert_to_one_hot_encode(data, no_categories):
data = np.array(data).reshape(-1)
print('len of dataset', len(data))
return np.eye(no_categories)[data]
# to process output to the value
# returns a list with all the categories with more than 50% accuracy
def one_hot_encode_to_char(arr, threshold=0.5, get_max=True):
result = []
val = 0
for i in range(len(arr)):
if arr[i] >= threshold:
result.append((val, arr[i]))
val += 1
_max = []
high = 0
if get_max:
for i in result:
if i[1] > high:
_max = [i[0]]
high = i[1]
return _max
else:
return [i[0] for i in result]
def one_hot_encode_to_char_list(arr, threshold=0.5, get_max=True):
result = []
for i in range(len(arr)):
if arr[i] >= threshold:
result.append((i, arr[i]))
_max = []
result = sorted(result, key=lambda x: x[1], reverse=True)
if get_max:
return result[0]
return result
def convert_pred_list_ohe_to_labels(pred_data, threshold=0.5, get_max=True):
result = []
for i in range(len(pred_data)):
val = one_hot_encode_to_char(pred_data[i], threshold=threshold, get_max=get_max)
if len(val) > 0:
if get_max:
result.append(val[0])
else:
result.append(val)
else:
result.append(None)
print(":( :( :(")
return result
# Dataset Splitting
# Stratified Train Test Split (new function)
def get_df_split(ds, stratify_col, test_size=0.2):
_train, _test = train_test_split(ds, test_size=test_size, stratify=ds[stratify_col])
return _train, _test
# function to split whole dataset at once (old function)
def gen_x_y_train_test_stratified_1df(dataset, input_shape, test_size=0.2):
x = np.array(list(dataset['img']))
y = np.array(list(dataset['symbol_id_ohe']))
x = x.reshape((x.shape[0], 1, input_shape[1], input_shape[2]))
# Normalize data to 0-1
x = x.astype("float32") / 255.0
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=test_size, stratify=y)
return X_train, X_test, y_train, y_test
# function to process already split data
def process_x_y_train_test_stratified_2df(_tr, _ts, input_shape):
# train df
X_train = np.array(list(_tr['img']))
y_train = np.array(list(_tr['symbol_id_ohe']))
X_train = X_train.reshape((X_train.shape[0], 1, input_shape[1], input_shape[2]))
# Normalize data to 0-1
X_train = X_train.astype("float32") / 255.0
# test df
X_test = np.array(list(_ts['img']))
y_test = np.array(list(_ts['symbol_id_ohe']))
X_test = X_test.reshape((X_test.shape[0], 1, input_shape[1], input_shape[2]))
# Normalize data to 0-1
X_test = X_test.astype("float32") / 255.0
return X_train, X_test, y_train, y_test
def process_x_y_train_test_stratified_ann(_tr, _ts, input_shape):
X_train = np.array(list(_tr['img']))
y_train = np.array(list(_tr['symbol_id_ohe']))
X_train = X_train.reshape((X_train.shape[0], input_shape[0]))
# Normalize data to 0-1
X_train = X_train.astype("float32") / 255.0
# test df
X_test = np.array(list(_ts['img']))
y_test = np.array(list(_ts['symbol_id_ohe']))
X_test = X_test.reshape((X_test.shape[0], input_shape[0]))
# Normalize data to 0-1
X_test = X_test.astype("float32") / 255.0
return X_train, X_test, y_train, y_test
# Dataset metrics
# generate label counts for dataframe and list
def get_label_count_df(df_train, df_test, sym_list):
train_labels_count = {}
test_labels_count = {}
perc_labels_count = {}
for i in sym_list:
train_labels_count[i] = 0
test_labels_count[i] = 0
for i in range(len(df_train)):
train_labels_count[df_train.loc[i, 'symbol_id']] += 1
for i in range(len(df_test)):
test_labels_count[df_test.loc[i, 'symbol_id']] += 1
for i in sym_list:
perc = (train_labels_count[i] / (train_labels_count[i] + test_labels_count[i])) * 100
perc_labels_count[i] = (train_labels_count[i], test_labels_count[i], round(perc, 2))
return perc_labels_count
def get_label_count_train_test_dfs(df_train, df_test):
train_labels_count = {}
test_labels_count = {}
perc_labels_count = {}
train_syms = df_train['symbol_id'].unique()
test_syms = df_test['symbol_id'].unique()
sym_list = np.unique(np.concatenate([train_syms, test_syms], axis=0))
for i in sym_list:
train_labels_count[i] = 0
test_labels_count[i] = 0
for i in range(len(df_train)):
train_labels_count[df_train.loc[i, 'symbol_id']] += 1
for i in range(len(df_test)):
test_labels_count[df_test.loc[i, 'symbol_id']] += 1
for i in sym_list:
perc = (train_labels_count[i] / (train_labels_count[i] + test_labels_count[i])) * 100
perc_labels_count[i] = (train_labels_count[i], test_labels_count[i], round(perc, 2))
return perc_labels_count
def get_label_count_list(lst_data, sym_list):
labels_count = {}
for i in sym_list:
labels_count[i] = 0
for i in range(len(lst_data)):
j = one_hot_encode_to_char(lst_data[i])[0]
labels_count[j] += 1
return labels_count
# Error Handling before Syntactical Analysis
def get_symbols(syms_):
result_syms = []
for i in syms_:
sym_maps = None
for j in i:
#if sym_maps is not None:
# break
# ignore greeks for now since greeks are not included in lexer
if j[0] in GREEKS:
continue
for k in SIMILARS_MAP:
if j[0] in k:
sym_maps = k
break
break
if sym_maps is not None:
result_syms.append(sym_maps)
else:
for j in i:
if j[0] not in GREEKS:
result_syms.append((j[0],))
break
return result_syms
def generate_eqns(err_handled_symbols):
return [i for i in product(*err_handled_symbols)]
# ************************************************************
# Object Detection metrics
# read xml file
def read_annotation(xml_file):
tree = ET.parse(xml_file)
root = tree.getroot()
all_boxes = []
for i in root.iter('object'):
ymin, xmin, ymax, xmax = None, None, None, None
for j in i.findall("bndbox"):
ymin = int(j.find("ymin").text)
xmin = int(j.find("xmin").text)
ymax = int(j.find("ymax").text)
xmax = int(j.find("xmax").text)
# bbox = [xmin, xmax, ymin, ymax]
bbox = {
'x1': xmin,
'x2': xmax,
'y1': ymin,
'y2': ymax
}
all_boxes.append(bbox)
return all_boxes
# calculate iou
def get_iou(bb1, bb2):
"""
Calculate the Intersection over Union (IoU) of two bounding boxes.
Parameters
----------
bb1 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x1, y1) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
bb2 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x, y) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
Returns
-------
float
in [0, 1]
"""
assert bb1['x1'] < bb1['x2']
assert bb1['y1'] < bb1['y2']
assert bb2['x1'] < bb2['x2']
assert bb2['y1'] < bb2['y2']
# determine the coordinates of the intersection rectangle
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou
def calculate_map(map_data):
"""
map_data: a list of tuples with each tuple containing (precision, recall)
"""
md = sorted(map_data, key=lambda x: x[1])
md = [(i, round(j, 1)) for i, j in md]
ap_11_precs = {str(round(k*0.1, 1)): None for k in range(11)}
for p_, r_ in md:
if not ap_11_precs[str(r_)] or p_ > ap_11_precs[str(r_)]:
ap_11_precs[str(r_)] = p_
ap_11_precs_list = list(ap_11_precs.values())
ap_11_precs_list = [z if z is not None else 0 for z in ap_11_precs_list]
mean_ap = np.mean(ap_11_precs_list)
return mean_ap
# ********************************************************
|
py | 1a4d2c1a219a771d1806b678da93af991dad698d | import pymssql
from scrape_microcenter import ScrapeMicrocenter
from time import time
class MSSQL_Database:
def __init__(self, server, user, password, database, autocommit=True):
self._server = server
self._user = user
self._password = password
self._database = database
self._autocommit = autocommit
self._conn = pymssql.connect(server=server, port=1433, user=user,
password=password, database=database,
autocommit=autocommit)
self._tablecol = {}
def mssql_dec(func):
def execute(self, *args, **kwargs):
with self._conn.cursor() as cursor:
command = func(self, *args, **kwargs)
cursor.execute(command)
print(command)
return execute
def time_dec(func):
def wrapper(self, *args, **kwargs):
init_t = time()
ret = func(self, *args, **kwargs)
final_t = time()
print(func, args, kwargs, final_t-init_t)
return ret
return wrapper
@time_dec
@mssql_dec
def create_db(self, db_name):
command = """
IF NOT EXISTS(SELECT * FROM master.dbo.sysdatabases WHERE NAME = '{db_name}')
BEGIN
CREATE DATABASE [{db_name}]
END;""".format(db_name=db_name)
return command
@time_dec
@mssql_dec
def create_table(self, table_name, **kwargs):
self._tablecol = kwargs
command ="""
IF OBJECT_ID('{name}', 'U') IS NULL
CREATE TABLE {name} (
ID int IDENTITY(1,1) PRIMARY KEY,\n""".format(name=table_name)
if kwargs is not None:
for col, col_type in kwargs.items():
if col_type.upper() == 'VARCHAR':
command += "\t\t{col} {col_type}(255),\n".format(
col=col, col_type=col_type)
else:
command += "\t\t{col} {col_type},\n".format(
col=col, col_type=col_type)
command += "\t\t);"
return command
@time_dec
@mssql_dec
def insert_table(self, table_name, **kwargs):
assert kwargs is not None,"Product not passed. Check to see argument is passed"
command = "INSERT INTO {table_name} (".format(table_name=table_name)
for col_name in kwargs:
command += "{}, ".format(col_name)
command = command[0:-2]
command += ")\nVALUES ("
for col_name in kwargs:
kwargs[col_name] = kwargs[col_name].replace("\'", "\'\'")
if self._tablecol[col_name] in ('varchar', 'datetime'):
command += "'{}', ".format(kwargs[col_name])
else:
command += "{}, ".format(kwargs[col_name])
command = command[0:-2]
command += ");"
print(command)
return command
@time_dec
def get_tablecol(self):
return self._tablecol
@time_dec
def commit_command(self):
self._conn.commit()
@time_dec
def close_conn(self):
self._conn.close()
|
py | 1a4d2c74c2553e045ce8279f14967b0e9764b502 | import numpy as np
from src.strategize.game import Game
def prisoners_dilemma():
player_set = np.array(['Alice', 'Bob'])
action_set = np.array([['Cooperate', 'Defect'], ['Cooperate', 'Defect']])
utility_set = np.array([[[2, 2], [0, 3]], [[3, 0], [1, 1]]])
pd = Game(player_set, action_set, utility_set)
return pd
def common_payoff():
player_set = np.array(['Alice', 'Bob'])
action_set = np.array([['Left', 'Right'], ['Left', 'Right']])
utility_set = np.array([[[1, 1], [0, 0]], [[0, 0], [1, 1]]])
cp = Game(player_set, action_set, utility_set)
return cp
def zero_sum():
player_set = np.array(['Alice', 'Bob'])
action_set = np.array([['Heads', 'Tails'], ['Left', 'Right']])
utility_set = np.array([[[1, -1], [-1, 1]], [[-1, 1], [1, -1]]])
zs = Game(player_set, action_set, utility_set)
return zs
game = prisoners_dilemma()
print(game)
game.plot() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.