filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_17215 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Yahoo! Finance market data downloader (+fix for Pandas Datareader)
# https://github.com/ranaroussi/yfinance
#
# Copyright 2017-2019 Ran Aroussi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import time as _time
import datetime as _datetime
import requests as _requests
import pandas as _pd
import numpy as _np
from . import utils
# import json as _json
# import re as _re
# import sys as _sys
from . import shared
class TickerBase():
def __init__(self, ticker):
self.ticker = ticker.upper()
self._history = None
self._base_url = 'https://query1.finance.yahoo.com'
self._scrape_url = 'https://finance.yahoo.com/quote'
self._fundamentals = False
self._info = None
self._sustainability = None
self._recommendations = None
self._major_holders = None
self._institutional_holders = None
self._calendar = None
self._expirations = {}
self._earnings = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
self._financials = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
self._balancesheet = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
self._cashflow = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
def history(self, period="1mo", interval="1d",
start=None, end=None, prepost=False, actions=True,
auto_adjust=True, back_adjust=False,
proxy=None, rounding=True, tz=None, **kwargs):
"""
:Parameters:
period : str
Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
Either Use period parameter or use start and end
interval : str
Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
Intraday data cannot extend last 60 days
start: str
Download start date string (YYYY-MM-DD) or _datetime.
Default is 1900-01-01
end: str
Download end date string (YYYY-MM-DD) or _datetime.
Default is now
prepost : bool
Include Pre and Post market data in results?
Default is False
auto_adjust: bool
Adjust all OHLC automatically? Default is True
back_adjust: bool
Back-adjusted data to mimic true historical prices
proxy: str
Optional. Proxy server URL scheme. Default is None
rounding: bool
Round values to 2 decimal places?
Optional. Default is False = precision suggested by Yahoo!
tz: str
Optional timezone locale for dates.
(default data is returned as non-localized dates)
**kwargs: dict
debug: bool
Optional. If passed as False, will suppress
error message printing to console.
"""
if start or period is None or period.lower() == "max":
if start is None:
start = -2208988800
elif isinstance(start, _datetime.datetime):
start = int(_time.mktime(start.timetuple()))
else:
start = int(_time.mktime(
_time.strptime(str(start), '%Y-%m-%d')))
if end is None:
end = int(_time.time())
elif isinstance(end, _datetime.datetime):
end = int(_time.mktime(end.timetuple()))
else:
end = int(_time.mktime(_time.strptime(str(end), '%Y-%m-%d')))
params = {"period1": start, "period2": end}
else:
period = period.lower()
params = {"range": period}
params["interval"] = interval.lower()
params["includePrePost"] = prepost
params["events"] = "div,splits"
# 1) fix weired bug with Yahoo! - returning 60m for 30m bars
if params["interval"] == "30m":
params["interval"] = "15m"
# setup proxy in requests format
if proxy is not None:
if isinstance(proxy, dict) and "https" in proxy:
proxy = proxy["https"]
proxy = {"https": proxy}
# Getting data from json
url = "{}/v8/finance/chart/{}".format(self._base_url, self.ticker)
data = _requests.get(url=url, params=params, proxies=proxy)
if "Will be right back" in data.text:
raise RuntimeError("*** YAHOO! FINANCE IS CURRENTLY DOWN! ***\n"
"Our engineers are working quickly to resolve "
"the issue. Thank you for your patience.")
data = data.json()
# Work with errors
debug_mode = True
if "debug" in kwargs and isinstance(kwargs["debug"], bool):
debug_mode = kwargs["debug"]
err_msg = "No data found for this date range, symbol may be delisted"
if "chart" in data and data["chart"]["error"]:
err_msg = data["chart"]["error"]["description"]
shared._DFS[self.ticker] = utils.empty_df()
shared._ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared._DFS[self.ticker]
elif "chart" not in data or data["chart"]["result"] is None or \
not data["chart"]["result"]:
shared._DFS[self.ticker] = utils.empty_df()
shared._ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared._DFS[self.ticker]
# parse quotes
try:
quotes = utils.parse_quotes(data["chart"]["result"][0], tz)
except Exception:
shared._DFS[self.ticker] = utils.empty_df()
shared._ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared._DFS[self.ticker]
# 2) fix weired bug with Yahoo! - returning 60m for 30m bars
if interval.lower() == "30m":
quotes2 = quotes.resample('30T')
quotes = _pd.DataFrame(index=quotes2.last().index, data={
'Open': quotes2['Open'].first(),
'High': quotes2['High'].max(),
'Low': quotes2['Low'].min(),
'Close': quotes2['Close'].last(),
'Adj Close': quotes2['Adj Close'].last(),
'Volume': quotes2['Volume'].sum()
})
try:
quotes['Dividends'] = quotes2['Dividends'].max()
except Exception:
pass
try:
quotes['Stock Splits'] = quotes2['Dividends'].max()
except Exception:
pass
if auto_adjust:
quotes = utils.auto_adjust(quotes)
elif back_adjust:
quotes = utils.back_adjust(quotes)
if rounding:
quotes = _np.round(quotes, data[
"chart"]["result"][0]["meta"]["priceHint"])
quotes['Volume'] = quotes['Volume'].fillna(0).astype(_np.int64)
quotes.dropna(inplace=True)
# actions
dividends, splits = utils.parse_actions(data["chart"]["result"][0], tz)
# combine
df = _pd.concat([quotes, dividends, splits], axis=1, sort=True)
df["Dividends"].fillna(0, inplace=True)
df["Stock Splits"].fillna(0, inplace=True)
# index eod/intraday
df.index = df.index.tz_localize("UTC").tz_convert(
data["chart"]["result"][0]["meta"]["exchangeTimezoneName"])
if params["interval"][-1] == "m":
df.index.name = "Datetime"
else:
df.index = _pd.to_datetime(df.index.date)
if tz is not None:
df.index = df.index.tz_localize(tz)
df.index.name = "Date"
self._history = df.copy()
if not actions:
df.drop(columns=["Dividends", "Stock Splits"], inplace=True)
return df
# ------------------------
def _get_fundamentals(self, kind=None, proxy=None):
def cleanup(data):
df = _pd.DataFrame(data).drop(columns=['maxAge'])
for col in df.columns:
df[col] = _np.where(
df[col].astype(str) == '-', _np.nan, df[col])
df.set_index('endDate', inplace=True)
try:
df.index = _pd.to_datetime(df.index, unit='s')
except ValueError:
df.index = _pd.to_datetime(df.index)
df = df.T
df.columns.name = ''
df.index.name = 'Breakdown'
df.index = utils.camel2title(df.index)
return df
# setup proxy in requests format
if proxy is not None:
if isinstance(proxy, dict) and "https" in proxy:
proxy = proxy["https"]
proxy = {"https": proxy}
if self._fundamentals:
return
# get info and sustainability
url = '%s/%s' % (self._scrape_url, self.ticker)
data = utils.get_json(url, proxy)
# holders
holders = _pd.read_html('https://finance.yahoo.com/quote/MSFT/holders')
self._major_holders = holders[0]
self._institutional_holders = holders[1]
self._institutional_holders['Date Reported'] = _pd.to_datetime(
self._institutional_holders['Date Reported'])
self._institutional_holders['% Out'] = self._institutional_holders[
'% Out'].str.replace('%', '').astype(float)/100
# sustainability
d = {}
if isinstance(data.get('esgScores'), dict):
for item in data['esgScores']:
if not isinstance(data['esgScores'][item], (dict, list)):
d[item] = data['esgScores'][item]
s = _pd.DataFrame(index=[0], data=d)[-1:].T
s.columns = ['Value']
s.index.name = '%.f-%.f' % (
s[s.index == 'ratingYear']['Value'].values[0],
s[s.index == 'ratingMonth']['Value'].values[0])
self._sustainability = s[~s.index.isin(
['maxAge', 'ratingYear', 'ratingMonth'])]
# info (be nice to python 2)
self._info = {}
items = ['summaryProfile', 'summaryDetail', 'quoteType',
'defaultKeyStatistics', 'assetProfile', 'summaryDetail']
for item in items:
if isinstance(data.get(item), dict):
self._info.update(data[item])
self._info['regularMarketPrice'] = self._info['regularMarketOpen']
self._info['logo_url'] = ""
try:
domain = self._info['website'].split(
'://')[1].split('/')[0].replace('www.', '')
self._info['logo_url'] = 'https://logo.clearbit.com/%s' % domain
except Exception:
pass
# events
try:
cal = _pd.DataFrame(
data['calendarEvents']['earnings'])
cal['earningsDate'] = _pd.to_datetime(
cal['earningsDate'], unit='s')
self._calendar = cal.T
self._calendar.index = utils.camel2title(self._calendar.index)
self._calendar.columns = ['Value']
except Exception:
pass
# analyst recommendations
try:
rec = _pd.DataFrame(
data['upgradeDowngradeHistory']['history'])
rec['earningsDate'] = _pd.to_datetime(
rec['epochGradeDate'], unit='s')
rec.set_index('earningsDate', inplace=True)
rec.index.name = 'Date'
rec.columns = utils.camel2title(rec.columns)
self._recommendations = rec[[
'Firm', 'To Grade', 'From Grade', 'Action']].sort_index()
except Exception:
pass
# get fundamentals
data = utils.get_json(url+'/financials', proxy)
# generic patterns
for key in (
(self._cashflow, 'cashflowStatement', 'cashflowStatements'),
(self._balancesheet, 'balanceSheet', 'balanceSheetStatements'),
(self._financials, 'incomeStatement', 'incomeStatementHistory')
):
item = key[1] + 'History'
if isinstance(data.get(item), dict):
key[0]['yearly'] = cleanup(data[item][key[2]])
item = key[1]+'HistoryQuarterly'
if isinstance(data.get(item), dict):
key[0]['quarterly'] = cleanup(data[item][key[2]])
# earnings
if isinstance(data.get('earnings'), dict):
earnings = data['earnings']['financialsChart']
df = _pd.DataFrame(earnings['yearly']).set_index('date')
df.columns = utils.camel2title(df.columns)
df.index.name = 'Year'
self._earnings['yearly'] = df
df = _pd.DataFrame(earnings['quarterly']).set_index('date')
df.columns = utils.camel2title(df.columns)
df.index.name = 'Quarter'
self._earnings['quarterly'] = df
self._fundamentals = True
def get_recommendations(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._recommendations
if as_dict:
return data.to_dict()
return data
def get_calendar(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._calendar
if as_dict:
return data.to_dict()
return data
def get_major_holders(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._major_holders
if as_dict:
return data.to_dict()
return data
def get_institutional_holders(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._institutional_holders
if as_dict:
return data.to_dict()
return data
def get_info(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._info
if as_dict:
return data.to_dict()
return data
def get_sustainability(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._sustainability
if as_dict:
return data.to_dict()
return data
def get_earnings(self, proxy=None, as_dict=False, freq="yearly"):
self._get_fundamentals(proxy)
data = self._earnings[freq]
if as_dict:
return data.to_dict()
return data
def get_financials(self, proxy=None, as_dict=False, freq="yearly"):
self._get_fundamentals(proxy)
data = self._financials[freq]
if as_dict:
return data.to_dict()
return data
def get_balancesheet(self, proxy=None, as_dict=False, freq="yearly"):
self._get_fundamentals(proxy)
data = self._balancesheet[freq]
if as_dict:
return data.to_dict()
return data
def get_balance_sheet(self, proxy=None, as_dict=False, freq="yearly"):
return self.get_balancesheet(proxy, as_dict, freq)
def get_cashflow(self, proxy=None, as_dict=False, freq="yearly"):
self._get_fundamentals(proxy)
data = self._cashflow[freq]
if as_dict:
return data.to_dict()
return data
def get_dividends(self, proxy=None):
if self._history is None:
self.history(period="max", proxy=proxy)
dividends = self._history["Dividends"]
return dividends[dividends != 0]
def get_splits(self, proxy=None):
if self._history is None:
self.history(period="max", proxy=proxy)
splits = self._history["Stock Splits"]
return splits[splits != 0]
def get_actions(self, proxy=None):
if self._history is None:
self.history(period="max", proxy=proxy)
actions = self._history[["Dividends", "Stock Splits"]]
return actions[actions != 0].dropna(how='all').fillna(0)
|
the-stack_0_17216 | # Copyright (c) 2016-2021 The Regents of the University of Michigan
# This file is part of the General Simulation Data (GSD) project, released under
# the BSD 2-Clause License.
"""Read and write HOOMD schema GSD files.
The main package :py:mod:`gsd.hoomd` is a reference implementation of the
GSD schema ``hoomd``. It is a simple, but high performance and memory
efficient, reader and writer for the schema. See :ref:`hoomd-examples`
for full examples.
* `open` - Open a hoomd schema GSD file.
* `HOOMDTrajectory` - Read and write hoomd schema GSD files.
* `Snapshot` - Store the state of a single frame.
* `ConfigurationData` - Store configuration data in a snapshot.
* `ParticleData` - Store particle data in a snapshot.
* `BondData` - Store topology data in a snapshot.
"""
import numpy
from collections import OrderedDict
import logging
import json
try:
from gsd import fl
except ImportError:
fl = None
try:
import gsd
except ImportError:
gsd = None
logger = logging.getLogger('gsd.hoomd')
class ConfigurationData(object):
"""Store configuration data.
Use the `Snapshot.configuration` attribute of a to access the configuration.
Attributes:
step (int): Time step of this frame (:chunk:`configuration/step`).
dimensions (int): Number of dimensions
(:chunk:`configuration/dimensions`). When not set explicitly,
dimensions will default to different values based on the value of
:math:`L_z` in `box`. When :math:`L_z = 0` dimensions will default
to 2, otherwise 3. User set values always take precedence.
"""
_default_value = OrderedDict()
_default_value['step'] = numpy.uint64(0)
_default_value['dimensions'] = numpy.uint8(3)
_default_value['box'] = numpy.array([1, 1, 1, 0, 0, 0], dtype=numpy.float32)
def __init__(self):
self.step = None
self.dimensions = None
self._box = None
@property
def box(self):
"""((6, 1) `numpy.ndarray` of ``numpy.float32``): Box dimensions \
(:chunk:`configuration/box`).
[lx, ly, lz, xy, xz, yz].
"""
return self._box
@box.setter
def box(self, box):
self._box = box
try:
Lz = box[2]
except TypeError:
return
else:
if self.dimensions is None:
self.dimensions = 2 if Lz == 0 else 3
def validate(self):
"""Validate all attributes.
Convert every array attribute to a `numpy.ndarray` of the proper
type and check that all attributes have the correct dimensions.
Ignore any attributes that are ``None``.
Warning:
Array attributes that are not contiguous numpy arrays will be
replaced with contiguous numpy arrays of the appropriate type.
"""
logger.debug('Validating ConfigurationData')
if self.box is not None:
self.box = numpy.ascontiguousarray(self.box, dtype=numpy.float32)
self.box = self.box.reshape([6])
class ParticleData(object):
"""Store particle data chunks.
Use the `Snapshot.particles` attribute of a to access the particles.
Instances resulting from file read operations will always store array
quantities in `numpy.ndarray` objects of the defined types. User created
snapshots may provide input data that can be converted to a `numpy.ndarray`.
Attributes:
N (int): Number of particles in the snapshot (:chunk:`particles/N`).
types (`typing.List` [str]):
Names of the particle types (:chunk:`particles/types`).
position ((*N*, 3) `numpy.ndarray` of ``numpy.float32``):
Particle position (:chunk:`particles/position`).
orientation ((*N*, 4) `numpy.ndarray` of ``numpy.float32``):
Particle orientation. (:chunk:`particles/orientation`).
typeid ((*N*, ) `numpy.ndarray` of ``numpy.uint32``):
Particle type id (:chunk:`particles/typeid`).
mass ((*N*, ) `numpy.ndarray` of ``numpy.float32``):
Particle mass (:chunk:`particles/mass`).
charge ((*N*, ) `numpy.ndarray` of ``numpy.float32``):
Particle charge (:chunk:`particles/charge`).
diameter ((*N*, ) `numpy.ndarray` of ``numpy.float32``):
Particle diameter (:chunk:`particles/diameter`).
body ((*N*, ) `numpy.ndarray` of ``numpy.int32``):
Particle body (:chunk:`particles/body`).
moment_inertia ((*N*, 3) `numpy.ndarray` of ``numpy.float32``):
Particle moment of inertia (:chunk:`particles/moment_inertia`).
velocity ((*N*, 3) `numpy.ndarray` of ``numpy.float32``):
Particle velocity (:chunk:`particles/velocity`).
angmom ((*N*, 4) `numpy.ndarray` of ``numpy.float32``):
Particle angular momentum (:chunk:`particles/angmom`).
image ((*N*, 3) `numpy.ndarray` of ``numpy.int32``):
Particle image (:chunk:`particles/image`).
type_shapes (`typing.List` [`typing.Dict`]): Shape specifications for
visualizing particle types (:chunk:`particles/type_shapes`).
"""
_default_value = OrderedDict()
_default_value['N'] = numpy.uint32(0)
_default_value['types'] = ['A']
_default_value['typeid'] = numpy.uint32(0)
_default_value['mass'] = numpy.float32(1.0)
_default_value['charge'] = numpy.float32(0)
_default_value['diameter'] = numpy.float32(1.0)
_default_value['body'] = numpy.int32(-1)
_default_value['moment_inertia'] = numpy.array([0, 0, 0],
dtype=numpy.float32)
_default_value['position'] = numpy.array([0, 0, 0], dtype=numpy.float32)
_default_value['orientation'] = numpy.array([1, 0, 0, 0],
dtype=numpy.float32)
_default_value['velocity'] = numpy.array([0, 0, 0], dtype=numpy.float32)
_default_value['angmom'] = numpy.array([0, 0, 0, 0], dtype=numpy.float32)
_default_value['image'] = numpy.array([0, 0, 0], dtype=numpy.int32)
_default_value['type_shapes'] = [{}]
def __init__(self):
self.N = 0
self.position = None
self.orientation = None
self.types = None
self.typeid = None
self.mass = None
self.charge = None
self.diameter = None
self.body = None
self.moment_inertia = None
self.velocity = None
self.angmom = None
self.image = None
self.type_shapes = None
def validate(self):
"""Validate all attributes.
Convert every array attribute to a `numpy.ndarray` of the proper
type and check that all attributes have the correct dimensions.
Ignore any attributes that are ``None``.
Warning:
Array attributes that are not contiguous numpy arrays will be
replaced with contiguous numpy arrays of the appropriate type.
"""
logger.debug('Validating ParticleData')
if self.position is not None:
self.position = numpy.ascontiguousarray(self.position,
dtype=numpy.float32)
self.position = self.position.reshape([self.N, 3])
if self.orientation is not None:
self.orientation = numpy.ascontiguousarray(self.orientation,
dtype=numpy.float32)
self.orientation = self.orientation.reshape([self.N, 4])
if self.typeid is not None:
self.typeid = numpy.ascontiguousarray(self.typeid,
dtype=numpy.uint32)
self.typeid = self.typeid.reshape([self.N])
if self.mass is not None:
self.mass = numpy.ascontiguousarray(self.mass, dtype=numpy.float32)
self.mass = self.mass.reshape([self.N])
if self.charge is not None:
self.charge = numpy.ascontiguousarray(self.charge,
dtype=numpy.float32)
self.charge = self.charge.reshape([self.N])
if self.diameter is not None:
self.diameter = numpy.ascontiguousarray(self.diameter,
dtype=numpy.float32)
self.diameter = self.diameter.reshape([self.N])
if self.body is not None:
self.body = numpy.ascontiguousarray(self.body, dtype=numpy.int32)
self.body = self.body.reshape([self.N])
if self.moment_inertia is not None:
self.moment_inertia = numpy.ascontiguousarray(self.moment_inertia,
dtype=numpy.float32)
self.moment_inertia = self.moment_inertia.reshape([self.N, 3])
if self.velocity is not None:
self.velocity = numpy.ascontiguousarray(self.velocity,
dtype=numpy.float32)
self.velocity = self.velocity.reshape([self.N, 3])
if self.angmom is not None:
self.angmom = numpy.ascontiguousarray(self.angmom,
dtype=numpy.float32)
self.angmom = self.angmom.reshape([self.N, 4])
if self.image is not None:
self.image = numpy.ascontiguousarray(self.image, dtype=numpy.int32)
self.image = self.image.reshape([self.N, 3])
class BondData(object):
"""Store bond data chunks.
Use the `Snapshot.bonds`, `Snapshot.angles`, `Snapshot.dihedrals`,
`Snapshot.impropers`, and `Snapshot.pairs` attributes to access the bonds.
Instances resulting from file read operations will always store array
quantities in `numpy.ndarray` objects of the defined types. User created
snapshots may provide input data that can be converted to a `numpy.ndarray`.
Note:
*M* varies depending on the type of bond. `BondData` represents all
types of bonds.
======== ===
Type *M*
======== ===
Bond 2
Angle 3
Dihedral 4
Improper 4
Pair 2
======== ===
Attributes:
N (int): Number of particles in the snapshot
(:chunk:`bonds/N`, :chunk:`angles/N`, :chunk:`dihedrals/N`,
:chunk:`impropers/N`, :chunk:`pairs/N`).
types (`typing.List` [str]): Names of the particle types
(:chunk:`bonds/types`, :chunk:`angles/types`,
:chunk:`dihedrals/types`, :chunk:`impropers/types`,
:chunk:`pairs/types`).
typeid ((*N*, 3) `numpy.ndarray` of ``numpy.uint32``):
Bond type id (:chunk:`bonds/typeid`,
:chunk:`angles/typeid`, :chunk:`dihedrals/typeid`,
:chunk:`impropers/typeid`, :chunk:`pairs/types`).
group ((*N*, *M*) `numpy.ndarray` of ``numpy.uint32``):
Tags of the particles in the bond (:chunk:`bonds/group`,
:chunk:`angles/group`, :chunk:`dihedrals/group`,
:chunk:`impropers/group`, :chunk:`pairs/group`).
"""
def __init__(self, M):
self.M = M
self.N = 0
self.types = None
self.typeid = None
self.group = None
self._default_value = OrderedDict()
self._default_value['N'] = numpy.uint32(0)
self._default_value['types'] = []
self._default_value['typeid'] = numpy.uint32(0)
self._default_value['group'] = numpy.array([0] * M, dtype=numpy.int32)
def validate(self):
"""Validate all attributes.
Convert every array attribute to a `numpy.ndarray` of the proper
type and check that all attributes have the correct dimensions.
Ignore any attributes that are ``None``.
Warning:
Array attributes that are not contiguous numpy arrays will be
replaced with contiguous numpy arrays of the appropriate type.
"""
logger.debug('Validating BondData')
if self.typeid is not None:
self.typeid = numpy.ascontiguousarray(self.typeid,
dtype=numpy.uint32)
self.typeid = self.typeid.reshape([self.N])
if self.group is not None:
self.group = numpy.ascontiguousarray(self.group, dtype=numpy.int32)
self.group = self.group.reshape([self.N, self.M])
class ConstraintData(object):
"""Store constraint data chunks.
Use the `Snapshot.constraints` attribute to access the constraints.
Instances resulting from file read operations will always store array
quantities in `numpy.ndarray` objects of the defined types. User created
snapshots may provide input data that can be converted to a `numpy.ndarray`.
Attributes:
N (int): Number of particles in the snapshot (:chunk:`constraints/N`).
value ((*N*, ) `numpy.ndarray` of ``numpy.float32``):
Constraint length (:chunk:`constraints/value`).
group ((*N*, *2*) `numpy.ndarray` of ``numpy.uint32``):
Tags of the particles in the constraint
(:chunk:`constraints/group`).
"""
def __init__(self):
self.M = 2
self.N = 0
self.value = None
self.group = None
self._default_value = OrderedDict()
self._default_value['N'] = numpy.uint32(0)
self._default_value['value'] = numpy.float32(0)
self._default_value['group'] = numpy.array([0] * self.M,
dtype=numpy.int32)
def validate(self):
"""Validate all attributes.
Convert every array attribute to a `numpy.ndarray` of the proper
type and check that all attributes have the correct dimensions.
Ignore any attributes that are ``None``.
Warning:
Array attributes that are not contiguous numpy arrays will be
replaced with contiguous numpy arrays of the appropriate type.
"""
logger.debug('Validating ConstraintData')
if self.value is not None:
self.value = numpy.ascontiguousarray(self.value,
dtype=numpy.float32)
self.value = self.value.reshape([self.N])
if self.group is not None:
self.group = numpy.ascontiguousarray(self.group, dtype=numpy.int32)
self.group = self.group.reshape([self.N, self.M])
class Snapshot(object):
"""Snapshot of a system state.
Attributes:
configuration (`ConfigurationData`): Configuration data.
particles (`ParticleData`): Particles.
bonds (`BondData`): Bonds.
angles (`BondData`): Angles.
dihedrals (`BondData`): Dihedrals.
impropers (`BondData`): Impropers.
pairs (`BondData`): Special pair.
constraints (`ConstraintData`): Distance constraints.
state (typing.Dict): State data.
log (typing.Dict): Logged data (values must be `numpy.ndarray` or
`array_like`)
"""
def __init__(self):
self.configuration = ConfigurationData()
self.particles = ParticleData()
self.bonds = BondData(2)
self.angles = BondData(3)
self.dihedrals = BondData(4)
self.impropers = BondData(4)
self.constraints = ConstraintData()
self.pairs = BondData(2)
self.state = {}
self.log = {}
self._valid_state = [
'hpmc/integrate/d',
'hpmc/integrate/a',
'hpmc/sphere/radius',
'hpmc/sphere/orientable',
'hpmc/ellipsoid/a',
'hpmc/ellipsoid/b',
'hpmc/ellipsoid/c',
'hpmc/convex_polyhedron/N',
'hpmc/convex_polyhedron/vertices',
'hpmc/convex_spheropolyhedron/N',
'hpmc/convex_spheropolyhedron/vertices',
'hpmc/convex_spheropolyhedron/sweep_radius',
'hpmc/convex_polygon/N',
'hpmc/convex_polygon/vertices',
'hpmc/convex_spheropolygon/N',
'hpmc/convex_spheropolygon/vertices',
'hpmc/convex_spheropolygon/sweep_radius',
'hpmc/simple_polygon/N',
'hpmc/simple_polygon/vertices',
]
def validate(self):
"""Validate all contained snapshot data."""
logger.debug('Validating Snapshot')
self.configuration.validate()
self.particles.validate()
self.bonds.validate()
self.angles.validate()
self.dihedrals.validate()
self.impropers.validate()
self.constraints.validate()
self.pairs.validate()
# validate HPMC state
if self.particles.types is not None:
NT = len(self.particles.types)
else:
NT = 1
if 'hpmc/integrate/d' in self.state:
self.state['hpmc/integrate/d'] = \
numpy.ascontiguousarray(self.state['hpmc/integrate/d'],
dtype=numpy.float64)
self.state['hpmc/integrate/d'] = \
self.state['hpmc/integrate/d'].reshape([1])
if 'hpmc/integrate/a' in self.state:
self.state['hpmc/integrate/a'] = \
numpy.ascontiguousarray(self.state['hpmc/integrate/a'],
dtype=numpy.float64)
self.state['hpmc/integrate/a'] = \
self.state['hpmc/integrate/a'].reshape([1])
if 'hpmc/sphere/radius' in self.state:
self.state['hpmc/sphere/radius'] = \
numpy.ascontiguousarray(self.state['hpmc/sphere/radius'],
dtype=numpy.float32)
self.state['hpmc/sphere/radius'] = \
self.state['hpmc/sphere/radius'].reshape([NT])
if 'hpmc/sphere/orientable' in self.state:
self.state['hpmc/sphere/orientable'] = \
numpy.ascontiguousarray(self.state['hpmc/sphere/orientable'],
dtype=numpy.uint8)
self.state['hpmc/sphere/orientable'] = \
self.state['hpmc/sphere/orientable'].reshape([NT])
if 'hpmc/ellipsoid/a' in self.state:
self.state['hpmc/ellipsoid/a'] = \
numpy.ascontiguousarray(self.state['hpmc/ellipsoid/a'],
dtype=numpy.float32)
self.state['hpmc/ellipsoid/a'] = \
self.state['hpmc/ellipsoid/a'].reshape([NT])
self.state['hpmc/ellipsoid/b'] = \
numpy.ascontiguousarray(self.state['hpmc/ellipsoid/b'],
dtype=numpy.float32)
self.state['hpmc/ellipsoid/b'] = \
self.state['hpmc/ellipsoid/b'].reshape([NT])
self.state['hpmc/ellipsoid/c'] = \
numpy.ascontiguousarray(self.state['hpmc/ellipsoid/c'],
dtype=numpy.float32)
self.state['hpmc/ellipsoid/c'] = \
self.state['hpmc/ellipsoid/c'].reshape([NT])
if 'hpmc/convex_polyhedron/N' in self.state:
self.state['hpmc/convex_polyhedron/N'] = \
numpy.ascontiguousarray(self.state['hpmc/convex_polyhedron/N'],
dtype=numpy.uint32)
self.state['hpmc/convex_polyhedron/N'] = \
self.state['hpmc/convex_polyhedron/N'].reshape([NT])
sumN = numpy.sum(self.state['hpmc/convex_polyhedron/N'])
self.state['hpmc/convex_polyhedron/vertices'] = \
numpy.ascontiguousarray(
self.state['hpmc/convex_polyhedron/vertices'],
dtype=numpy.float32)
self.state['hpmc/convex_polyhedron/vertices'] = \
self.state['hpmc/convex_polyhedron/vertices'].reshape([sumN, 3])
if 'hpmc/convex_spheropolyhedron/N' in self.state:
self.state['hpmc/convex_spheropolyhedron/N'] = \
numpy.ascontiguousarray(
self.state['hpmc/convex_spheropolyhedron/N'],
dtype=numpy.uint32)
self.state['hpmc/convex_spheropolyhedron/N'] = \
self.state['hpmc/convex_spheropolyhedron/N'].reshape([NT])
sumN = numpy.sum(self.state['hpmc/convex_spheropolyhedron/N'])
self.state['hpmc/convex_spheropolyhedron/sweep_radius'] = \
numpy.ascontiguousarray(
self.state['hpmc/convex_spheropolyhedron/sweep_radius'],
dtype=numpy.float32)
self.state['hpmc/convex_spheropolyhedron/sweep_radius'] = \
self.state[
'hpmc/convex_spheropolyhedron/sweep_radius'].reshape([NT])
self.state['hpmc/convex_spheropolyhedron/vertices'] = \
numpy.ascontiguousarray(
self.state['hpmc/convex_spheropolyhedron/vertices'],
dtype=numpy.float32)
self.state['hpmc/convex_spheropolyhedron/vertices'] = \
self.state[
'hpmc/convex_spheropolyhedron/vertices'].reshape([sumN, 3])
if 'hpmc/convex_polygon/N' in self.state:
self.state['hpmc/convex_polygon/N'] = \
numpy.ascontiguousarray(self.state['hpmc/convex_polygon/N'],
dtype=numpy.uint32)
self.state['hpmc/convex_polygon/N'] = \
self.state['hpmc/convex_polygon/N'].reshape([NT])
sumN = numpy.sum(self.state['hpmc/convex_polygon/N'])
self.state['hpmc/convex_polygon/vertices'] = \
numpy.ascontiguousarray(
self.state['hpmc/convex_polygon/vertices'],
dtype=numpy.float32)
self.state['hpmc/convex_polygon/vertices'] = \
self.state['hpmc/convex_polygon/vertices'].reshape([sumN, 2])
if 'hpmc/convex_spheropolygon/N' in self.state:
self.state['hpmc/convex_spheropolygon/N'] = \
numpy.ascontiguousarray(
self.state['hpmc/convex_spheropolygon/N'],
dtype=numpy.uint32)
self.state['hpmc/convex_spheropolygon/N'] = \
self.state['hpmc/convex_spheropolygon/N'].reshape([NT])
sumN = numpy.sum(self.state['hpmc/convex_spheropolygon/N'])
self.state['hpmc/convex_spheropolygon/sweep_radius'] = \
numpy.ascontiguousarray(
self.state['hpmc/convex_spheropolygon/sweep_radius'],
dtype=numpy.float32)
self.state['hpmc/convex_spheropolygon/sweep_radius'] = \
self.state[
'hpmc/convex_spheropolygon/sweep_radius'].reshape([NT])
self.state['hpmc/convex_spheropolygon/vertices'] = \
numpy.ascontiguousarray(
self.state['hpmc/convex_spheropolygon/vertices'],
dtype=numpy.float32)
self.state['hpmc/convex_spheropolygon/vertices'] = \
self.state[
'hpmc/convex_spheropolygon/vertices'].reshape([sumN, 2])
if 'hpmc/simple_polygon/N' in self.state:
self.state['hpmc/simple_polygon/N'] = \
numpy.ascontiguousarray(self.state['hpmc/simple_polygon/N'],
dtype=numpy.uint32)
self.state['hpmc/simple_polygon/N'] = \
self.state['hpmc/simple_polygon/N'].reshape([NT])
sumN = numpy.sum(self.state['hpmc/simple_polygon/N'])
self.state['hpmc/simple_polygon/vertices'] = \
numpy.ascontiguousarray(
self.state['hpmc/simple_polygon/vertices'],
dtype=numpy.float32)
self.state['hpmc/simple_polygon/vertices'] = \
self.state[
'hpmc/simple_polygon/vertices'].reshape([sumN, 2])
for k in self.state:
if k not in self._valid_state:
raise RuntimeError('Not a valid state: ' + k)
class _HOOMDTrajectoryIterable(object):
"""Iterable over a HOOMDTrajectory object."""
def __init__(self, trajectory, indices):
self._trajectory = trajectory
self._indices = indices
self._indices_iterator = iter(indices)
def __next__(self):
return self._trajectory[next(self._indices_iterator)]
next = __next__ # Python 2.7 compatibility
def __iter__(self):
return type(self)(self._trajectory, self._indices)
def __len__(self):
return len(self._indices)
class _HOOMDTrajectoryView(object):
"""A view of a HOOMDTrajectory object.
Enables the slicing and iteration over a subset of a trajectory
instance.
"""
def __init__(self, trajectory, indices):
self._trajectory = trajectory
self._indices = indices
def __iter__(self):
return _HOOMDTrajectoryIterable(self._trajectory, self._indices)
def __len__(self):
return len(self._indices)
def __getitem__(self, key):
if isinstance(key, slice):
return type(self)(self._trajectory, self._indices[key])
else:
return self._trajectory[self._indices[key]]
class HOOMDTrajectory(object):
"""Read and write hoomd gsd files.
Args:
file (`gsd.fl.GSDFile`): File to access.
Open hoomd GSD files with `open`.
"""
def __init__(self, file):
if file.mode == 'ab':
raise ValueError('Append mode not yet supported')
self._file = file
self._initial_frame = None
logger.info('opening HOOMDTrajectory: ' + str(self.file))
if self.file.schema != 'hoomd':
raise RuntimeError('GSD file is not a hoomd schema file: '
+ str(self.file))
valid = False
version = self.file.schema_version
if (version < (2, 0) and version >= (1, 0)):
valid = True
if not valid:
raise RuntimeError('Incompatible hoomd schema version '
+ str(version) + ' in: ' + str(self.file))
logger.info('found ' + str(len(self)) + ' frames')
@property
def file(self):
""":class:`gsd.fl.GSDFile`: The underlying file handle."""
return self._file
def __len__(self):
"""The number of frames in the trajectory."""
return self.file.nframes
def append(self, snapshot):
"""Append a snapshot to a hoomd gsd file.
Args:
snapshot (:py:class:`Snapshot`): Snapshot to append.
Write the given snapshot to the file at the current frame and increase
the frame counter. Do not write any fields that are ``None``. For all
non-``None`` fields, scan them and see if they match the initial frame
or the default value. If the given data differs, write it out to the
frame. If it is the same, do not write it out as it can be instantiated
either from the value at the initial frame or the default value.
"""
logger.debug('Appending snapshot to hoomd trajectory: '
+ str(self.file))
snapshot.validate()
# want the initial frame specified as a reference to detect if chunks
# need to be written
if self._initial_frame is None and len(self) > 0:
self.read_frame(0)
for path in [
'configuration',
'particles',
'bonds',
'angles',
'dihedrals',
'impropers',
'constraints',
'pairs',
]:
container = getattr(snapshot, path)
for name in container._default_value:
if self._should_write(path, name, snapshot):
logger.debug('writing data chunk: ' + path + '/' + name)
data = getattr(container, name)
if name == 'N':
data = numpy.array([data], dtype=numpy.uint32)
if name == 'step':
data = numpy.array([data], dtype=numpy.uint64)
if name == 'dimensions':
data = numpy.array([data], dtype=numpy.uint8)
if name in ('types', 'type_shapes'):
if name == 'type_shapes':
data = [
json.dumps(shape_dict) for shape_dict in data
]
wid = max(len(w) for w in data) + 1
b = numpy.array(data, dtype=numpy.dtype((bytes, wid)))
data = b.view(dtype=numpy.int8).reshape(len(b), wid)
self.file.write_chunk(path + '/' + name, data)
# write state data
for state, data in snapshot.state.items():
self.file.write_chunk('state/' + state, data)
# write log data
for log, data in snapshot.log.items():
self.file.write_chunk('log/' + log, data)
self.file.end_frame()
def truncate(self):
"""Remove all frames from the file."""
self.file.truncate()
self._initial_frame = None
def close(self):
"""Close the file."""
self.file.close()
del self._initial_frame
def _should_write(self, path, name, snapshot):
"""Test if we should write a given data chunk.
Args:
path (str): Path part of the data chunk.
name (str): Name part of the data chunk.
snapshot (:py:class:`Snapshot`): Snapshot data is from.
Returns:
False if the data matches that in the initial frame. False
if the data matches all default values. True otherwise.
"""
container = getattr(snapshot, path)
data = getattr(container, name)
if data is None:
return False
if self._initial_frame is not None:
initial_container = getattr(self._initial_frame, path)
initial_data = getattr(initial_container, name)
if numpy.array_equal(initial_data, data):
logger.debug('skipping data chunk, matches frame 0: ' + path
+ '/' + name)
return False
if numpy.array_equiv(data, container._default_value[name]):
logger.debug('skipping data chunk, default value: ' + path + '/'
+ name)
return False
return True
def extend(self, iterable):
"""Append each item of the iterable to the file.
Args:
iterable: An iterable object the provides :py:class:`Snapshot`
instances. This could be another HOOMDTrajectory, a generator
that modifies snapshots, or a simple list of snapshots.
"""
for item in iterable:
self.append(item)
def read_frame(self, idx):
"""Read the frame at the given index from the file.
Args:
idx (int): Frame index to read.
Returns:
`Snapshot` with the frame data
Replace any data chunks not present in the given frame with either data
from frame 0, or initialize from default values if not in frame 0. Cache
frame 0 data to avoid file read overhead. Return any default data as
non-writable numpy arrays.
"""
if idx >= len(self):
raise IndexError
logger.debug('reading frame ' + str(idx) + ' from: ' + str(self.file))
if self._initial_frame is None and idx != 0:
self.read_frame(0)
snap = Snapshot()
# read configuration first
if self.file.chunk_exists(frame=idx, name='configuration/step'):
step_arr = self.file.read_chunk(frame=idx,
name='configuration/step')
snap.configuration.step = step_arr[0]
else:
if self._initial_frame is not None:
snap.configuration.step = self._initial_frame.configuration.step
else:
snap.configuration.step = \
snap.configuration._default_value['step']
if self.file.chunk_exists(frame=idx, name='configuration/dimensions'):
dimensions_arr = self.file.read_chunk(
frame=idx, name='configuration/dimensions')
snap.configuration.dimensions = dimensions_arr[0]
else:
if self._initial_frame is not None:
snap.configuration.dimensions = \
self._initial_frame.configuration.dimensions
else:
snap.configuration.dimensions = \
snap.configuration._default_value['dimensions']
if self.file.chunk_exists(frame=idx, name='configuration/box'):
snap.configuration.box = self.file.read_chunk(
frame=idx, name='configuration/box')
else:
if self._initial_frame is not None:
snap.configuration.box = self._initial_frame.configuration.box
else:
snap.configuration.box = \
snap.configuration._default_value['box']
# then read all groups that have N, types, etc...
for path in [
'particles',
'bonds',
'angles',
'dihedrals',
'impropers',
'constraints',
'pairs',
]:
container = getattr(snap, path)
if self._initial_frame is not None:
initial_frame_container = getattr(self._initial_frame, path)
container.N = 0
if self.file.chunk_exists(frame=idx, name=path + '/N'):
N_arr = self.file.read_chunk(frame=idx, name=path + '/N')
container.N = N_arr[0]
else:
if self._initial_frame is not None:
container.N = initial_frame_container.N
# type names
if 'types' in container._default_value:
if self.file.chunk_exists(frame=idx, name=path + '/types'):
tmp = self.file.read_chunk(frame=idx, name=path + '/types')
tmp = tmp.view(dtype=numpy.dtype((bytes, tmp.shape[1])))
tmp = tmp.reshape([tmp.shape[0]])
container.types = list(a.decode('UTF-8') for a in tmp)
else:
if self._initial_frame is not None:
container.types = initial_frame_container.types
else:
container.types = container._default_value['types']
# type shapes
if ('type_shapes' in container._default_value
and path == 'particles'):
if self.file.chunk_exists(frame=idx,
name=path + '/type_shapes'):
tmp = self.file.read_chunk(frame=idx,
name=path + '/type_shapes')
tmp = tmp.view(dtype=numpy.dtype((bytes, tmp.shape[1])))
tmp = tmp.reshape([tmp.shape[0]])
container.type_shapes = \
list(json.loads(json_string.decode('UTF-8'))
for json_string in tmp)
else:
if self._initial_frame is not None:
container.type_shapes = \
initial_frame_container.type_shapes
else:
container.type_shapes = \
container._default_value['type_shapes']
for name in container._default_value:
if name in ('N', 'types', 'type_shapes'):
continue
# per particle/bond quantities
if self.file.chunk_exists(frame=idx, name=path + '/' + name):
container.__dict__[name] = self.file.read_chunk(
frame=idx, name=path + '/' + name)
else:
if (self._initial_frame is not None
and initial_frame_container.N == container.N):
# read default from initial frame
container.__dict__[name] = \
initial_frame_container.__dict__[name]
else:
# initialize from default value
tmp = numpy.array([container._default_value[name]])
s = list(tmp.shape)
s[0] = container.N
container.__dict__[name] = numpy.empty(shape=s,
dtype=tmp.dtype)
container.__dict__[name][:] = tmp
container.__dict__[name].flags.writeable = False
# read state data
for state in snap._valid_state:
if self.file.chunk_exists(frame=idx, name='state/' + state):
snap.state[state] = self.file.read_chunk(frame=idx,
name='state/' + state)
# read log data
logged_data_names = self.file.find_matching_chunk_names('log/')
for log in logged_data_names:
if self.file.chunk_exists(frame=idx, name=log):
snap.log[log[4:]] = self.file.read_chunk(frame=idx, name=log)
else:
if self._initial_frame is not None:
snap.log[log[4:]] = self._initial_frame.log[log[4:]]
# store initial frame
if self._initial_frame is None and idx == 0:
self._initial_frame = snap
return snap
def __getitem__(self, key):
"""Index trajectory frames.
The index can be a positive integer, negative integer, or slice and is
interpreted the same as `list` indexing.
Warning:
As you loop over frames, each frame is read from the file when it is
reached in the iteration. Multiple passes may lead to multiple disk
reads if the file does not fit in cache.
"""
if isinstance(key, slice):
return _HOOMDTrajectoryView(self, range(*key.indices(len(self))))
elif isinstance(key, int):
if key < 0:
key += len(self)
if key >= len(self) or key < 0:
raise IndexError()
return self.read_frame(key)
else:
raise TypeError
def __iter__(self):
"""Iterate over HOOMD trajectories."""
return _HOOMDTrajectoryIterable(self, range(len(self)))
def __enter__(self):
"""Enter the context manager."""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Close the file when the context manager exits."""
self.file.close()
def open(name, mode='rb'):
"""Open a hoomd schema GSD file.
The return value of `open` can be used as a context manager.
Args:
name (str): File name to open.
mode (str): File open mode.
Returns:
An `HOOMDTrajectory` instance that accesses the file *name* with the
given mode.
Valid values for mode:
+------------------+---------------------------------------------+
| mode | description |
+==================+=============================================+
| ``'rb'`` | Open an existing file for reading. |
+------------------+---------------------------------------------+
| ``'rb+'`` | Open an existing file for reading and |
| | writing. |
+------------------+---------------------------------------------+
| ``'wb'`` | Open a file for writing. Creates the file |
| | if needed, or overwrites an existing file. |
+------------------+---------------------------------------------+
| ``'wb+'`` | Open a file for reading and writing. |
| | Creates the file if needed, or overwrites |
| | an existing file. |
+------------------+---------------------------------------------+
| ``'xb'`` | Create a gsd file exclusively and opens it |
| | for writing. |
| | Raise an :py:exc:`FileExistsError` |
| | exception if it already exists. |
+------------------+---------------------------------------------+
| ``'xb+'`` | Create a gsd file exclusively and opens it |
| | for reading and writing. |
| | Raise an :py:exc:`FileExistsError` |
| | exception if it already exists. |
+------------------+---------------------------------------------+
| ``'ab'`` | Open an existing file for writing. |
| | Does *not* create or overwrite existing |
| | files. |
+------------------+---------------------------------------------+
"""
if fl is None:
raise RuntimeError("file layer module is not available")
if gsd is None:
raise RuntimeError("gsd module is not available")
gsdfileobj = fl.open(name=str(name),
mode=mode,
application='gsd.hoomd ' + gsd.__version__,
schema='hoomd',
schema_version=[1, 4])
return HOOMDTrajectory(gsdfileobj)
|
the-stack_0_17217 | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2016, Clearpath Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Open Source Robotics Foundation, Inc. nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import subprocess
import sys
import threading
import time
try:
import queue
except ImportError:
import Queue as queue
CONCURRENT_DEFAULT = 16
def freeze_distribution_sources(dist, release_version=False, release_tag=False,
concurrent_ops=CONCURRENT_DEFAULT, quiet=False):
# Populate this queue with tuples of repositories instances to be updated,
# so that this work can be spread across multiple threads.
work_queue = queue.Queue()
for repo_name, repo in dist.repositories.iteritems():
# Only manipulate distribution entries with a source repo listed.
if repo.source_repository:
# Decide which git ref string we'll be using as the replacement match.
if repo.release_repository and (release_version or release_tag):
version = repo.release_repository.version.split('-')[0]
else:
version = repo.source_repository.version
work_queue.put((repo.source_repository, version, release_tag))
total_items = work_queue.qsize()
for i in range(concurrent_ops):
threading.Thread(target=_worker, args=[work_queue]).start()
# Wait until the threads have done all the work and exited.
while not work_queue.empty():
time.sleep(0.1)
if not quiet:
sys.stdout.write("Updating source repo versions (%d/%d) \r" %
(total_items - work_queue.qsize(), total_items))
sys.stdout.flush()
work_queue.join()
# Clear past the updating line.
if not quiet:
print("")
# Get the repo commit information
def _get_repo_info(url, retry=2, retry_period=1):
cmd = ['git', 'ls-remote', url]
try:
return subprocess.check_output(cmd).splitlines()
except subprocess.CalledProcessError as err:
if not retry:
raise
print(' Non-zero return code for: %s, retrying in %f seconds' %
(' '.join(cmd), retry_period), file=sys.stderr)
# brief delay incase its an intermittent issue with infrastructure
time.sleep(retry_period)
return _get_repo_info(url, retry=retry - 1, retry_period=retry_period * 2)
def _worker(work_queue):
while True:
try:
source_repo, freeze_version, freeze_to_tag = work_queue.get(block=False)
ls_remote_lines = _get_repo_info(source_repo.url)
for line in ls_remote_lines:
hash, ref = line.split('\t', 1)
if freeze_to_tag and ref == 'refs/tags/%s' % freeze_version:
source_repo.version = ref.split('refs/tags/')[1]
break
elif ref in ('refs/heads/%s' % freeze_version, 'refs/tags/%s' % freeze_version):
source_repo.version = hash
break
work_queue.task_done()
except subprocess.CalledProcessError as e:
print("No information could be retrieved for repo %s with error: %s" %
(source_repo.url, e), file=sys.stderr)
work_queue.task_done()
except queue.Empty:
break
|
the-stack_0_17218 | import tensorflow as tf
import numpy as np
from .gradients import GradientAttribution
class IntegratedGradients(GradientAttribution):
def GetMask(self, x_value, feed_dict={}, x_baseline=None, x_steps=25):
if x_baseline is None:
x_baseline = np.zeros_like(x_value)
assert x_baseline.shape == x_value.shape
x_diff = x_value - x_baseline
total_gradients = np.zeros_like(x_value)
for alpha in np.linspace(0, 1, x_steps):
x_step = x_baseline + alpha * x_diff
total_gradients += super().GetMask(x_step, feed_dict)
return total_gradients * x_diff / x_steps
|
the-stack_0_17219 | """Tests for runway.cfngin.hooks.iam."""
import unittest
import boto3
from awacs.helpers.trust import get_ecs_assumerole_policy
from botocore.exceptions import ClientError
from moto import mock_iam
from runway.cfngin.hooks.iam import _get_cert_arn_from_response, create_ecs_service_role
from ..factories import mock_context, mock_provider
REGION = "us-east-1"
# No test for stacker.hooks.iam.ensure_server_cert_exists until
# updated version of moto is imported
# (https://github.com/spulec/moto/pull/679) merged
class TestIAMHooks(unittest.TestCase):
"""Tests for runway.cfngin.hooks.iam."""
def setUp(self):
"""Run before tests."""
self.context = mock_context(namespace="fake")
self.provider = mock_provider(region=REGION)
def test_get_cert_arn_from_response(self):
"""Test get cert arn from response."""
arn = "fake-arn"
# Creation response
response = {"ServerCertificateMetadata": {"Arn": arn}}
self.assertEqual(_get_cert_arn_from_response(response), arn)
# Existing cert response
response = {"ServerCertificate": response}
self.assertEqual(_get_cert_arn_from_response(response), arn)
def test_create_service_role(self):
"""Test create service role."""
with mock_iam():
client = boto3.client("iam", region_name=REGION)
role_name = "ecsServiceRole"
with self.assertRaises(ClientError):
client.get_role(RoleName=role_name)
self.assertTrue(
create_ecs_service_role(context=self.context, provider=self.provider,)
)
role = client.get_role(RoleName=role_name)
self.assertIn("Role", role)
self.assertEqual(role_name, role["Role"]["RoleName"])
policy_name = "AmazonEC2ContainerServiceRolePolicy"
client.get_role_policy(RoleName=role_name, PolicyName=policy_name)
def test_create_service_role_already_exists(self):
"""Test create service role already exists."""
with mock_iam():
client = boto3.client("iam", region_name=REGION)
role_name = "ecsServiceRole"
client.create_role(
RoleName=role_name,
AssumeRolePolicyDocument=get_ecs_assumerole_policy().to_json(),
)
self.assertTrue(
create_ecs_service_role(context=self.context, provider=self.provider,)
)
role = client.get_role(RoleName=role_name)
self.assertIn("Role", role)
self.assertEqual(role_name, role["Role"]["RoleName"])
policy_name = "AmazonEC2ContainerServiceRolePolicy"
client.get_role_policy(RoleName=role_name, PolicyName=policy_name)
|
the-stack_0_17220 | from multiprocessing import Pool
import torch.utils
import torch.utils.data
from data_utils import indexed_dataset
import torch
import os
import re
import pdb
from data_utils.tokenization import BertWordPieceTokenizer
key_word = {
"…":"...",
"—":"-",
"“":"\"",
"”":"\"",
"‘":"'",
"’":"'"
}
SPECIAL_SIGNAL = "./';,\(\)\"\"'~`''“”《》<>"
def cut_sentence(paragraph):
paragraph = paragraph.replace(" ", "")
sentences = re.split('(。|!|\!|?|\?)',paragraph) # 保留分割符
if len(sentences) == 1:
return [sentences[0]]
new_sents = []
for i in range(int(len(sentences)/2)):
sent = sentences[2*i] + sentences[2*i+1]
if len(new_sents) != 0 and (sent[0] in SPECIAL_SIGNAL or len(new_sents[-1]) < 20):
new_sents[-1] += sent
else:
new_sents.append(sent)
sent = sentences[-1]
if len(sentences) % 2 == 1 and len(sent) > 0:
if len(new_sents) != 0 and (sent[0] in SPECIAL_SIGNAL or len(new_sents[-1]) < 20):
new_sents[-1] += sent
else:
new_sents.append(sent)
return new_sents
def replace_text(text):
for key,value in key_word.items():
text = re.sub(key, value, text)
return text
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) # search where this character begins
def read_split(
filename, tokenizer, worker_id, num_workers, type_doc, min_lens=10
):
with open(filename, 'r') as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_workers
offset = worker_id * chunk_size
end = offset + chunk_size
f.seek(offset)
if offset > 0:
safe_readline(f) # drop first incomplete line
result = []
line = f.readline()
while line:
line = replace_text(line)
ids = tokenizer.convert_text_to_ids(line)
ids = ids[:509]
if len(ids) >= min_lens:
ids = [type_doc]+ids
result.append(ids)
if f.tell() > end:
break
line = f.readline()
return result
def merge_multi_line(
filename, tokenizer, worker_id, num_workers, type_doc, min_lens=10
):
with open(filename, 'r') as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_workers
offset = worker_id * chunk_size
end = offset + chunk_size
eos_id = tokenizer.eos()
f.seek(offset)
if offset > 0:
safe_readline(f) # drop first incomplete line
result = []
line = f.readline()
tmp_ids = []
while line:
line = replace_text(line)
ids = tokenizer.convert_text_to_ids(line)+[eos_id]
# tmp_ids.extend(ids)
if len(tmp_ids) + len(ids) > 511:
ids_cur = tmp_ids[:511]
if ids_cur[0] == eos_id:
ids_cur[0] = type_doc
else:
ids_cur = [type_doc] + ids_cur
if ids_cur[-1] == eos_id:
ids_cur.pop()
ids_cur = ids_cur[:511]
result.append(ids_cur)
tmp_ids = tmp_ids[511:]
if len(tmp_ids) + len(ids) < 511:
tmp_ids += ids
else:
tmp_ids = ids[-511:]
else:
tmp_ids.extend(ids)
if f.tell() > end:
break
line = f.readline()
return result
def main_multi_task(args):
from argparse import ArgumentParser
parser = ArgumentParser()
# parser.add_argument("--tokenizer", type=str, help="where to load vocabulary")
parser.add_argument("--data", type=str)
parser.add_argument("--out", type=str, help="output path")
parser.add_argument("--prefix", type=str, default="train")
parser.add_argument("--workers", type=int, default=6)
parser.add_argument("--task", type=str, choices=['single', 'multi'], default="single")
args = parser.parse_args(args)
tokenizer = BertWordPieceTokenizer("bert-base-chinese", cache_dir="temp_cache_dir")
data_bin = os.path.join(args.out, "{}-CLM.bin".format(args.prefix))
data_idx = os.path.join(args.out, "{}-CLM.idx".format(args.prefix))
data_ds = indexed_dataset.IndexedDatasetBuilder(data_bin)
def comsume(worker_result):
for ids in worker_result:
data_ds.add_item(torch.IntTensor(ids)
)
pool = Pool(processes=args.workers)
worker_result = []
if args.task == "single":
handle_func = read_split
elif args.task == "multi":
handle_func = merge_multi_line
for i in range(args.workers):
w = pool.apply_async(
handle_func,
(
args.data,
tokenizer,
i,
args.workers,
0,
10
),
callback=comsume
)
worker_result.append(w)
pool.close()
pool.join()
data_ds.finalize(data_idx)
print("| write data into {}".format(args.out))
if __name__ == "__main__":
import sys
main_multi_task(sys.argv[1:])
|
the-stack_0_17221 | # insertion sort practice
# exercises from introductions to algorithms 3rd edition
import numpy as np
import math
A = np.array([15, 26, 34, 31, 57, 93, 27])
# ascending order, 2.1
for j in range(1, np.size(A)):
key = A[j]
i = j - 1
while (i > -1) and (A[i] > key):
A[i+1] = A[i]
i = i-1
A[i+1] = key
print('Ascending order A is', A)
# descending order, exercise 2.1-2
for j in range(1, np.size(A)):
key = A[j]
i = j - 1
while (i > -1) and (A[i] < key):
A[i+1] = A[i]
i = i-1
A[i+1] = key
print('Descending order A is', A)
# find the index of a number v from A, exercise 2.1-3
v = 32
quitflag = 0
for j in range(np.size(A)):
key = v
if A[j] == key:
quitflag = 1
break
if quitflag == 0:
print('v is not in A')
else:
print('index is', j+1)
# or if def as a function, use return instead of quitflag
# sum of two n-bit binary integers, 2.1-4
A = [1, 0, 1, 0, 1, 0, 1]
B = [1, 1, 1, 0, 0, 1, 0]
C = []
n = len(A)
carry = 0
for i in range(n-1, -1, -1):
C.append((A[i] + B[i] + carry) % 2)
carry = math.floor((A[i] + B[i] + carry) / 2)
C.append(carry)
C.reverse()
print('C is', C)
|
the-stack_0_17222 | import numpy
import os
from grocsvs import datasets as svdatasets
from grocsvs import step
from grocsvs import utilities
from grocsvs.stages import call_readclouds
CHUNKSIZE = 5e7
def chunks_for_chrom(options, chrom):
return int(numpy.ceil(options.reference.chrom_lengths[chrom]/CHUNKSIZE))
class WindowBarcodesStep(step.StepChunk):
"""
Build a list of all the fragment barcodes overlapping each
genomic window
Output files:
bcwindows.sample.dataset.chrom.pickle - dictionary:
- barcode_windows - a list of sets of barcode IDs
- barcod_map - a dict of barcode->barcode ID
- window_size - the size of the genomic window used; eg 10,000 would
mean that window starts were range(0, chrom_length, 10000)
"""
@staticmethod
def get_steps(options):
for sample, dataset in options.iter_10xdatasets():
for chrom in options.reference.chroms:
for chunk in range(chunks_for_chrom(options, chrom)):
yield WindowBarcodesStep(
options, sample, dataset, chrom, chunk)
def __init__(self, options, sample, dataset, chrom, chunk):
self.options = options
self.sample = sample
self.dataset = dataset
self.chrom = chrom
self.chunk = chunk
assert isinstance(self.dataset, svdatasets.TenXDataset)
def __str__(self):
return ".".join([self.__class__.__name__,
self.sample.name,
self.dataset.id,
self.chrom,
str(self.chunk)])
def outpaths(self, final):
directory = self.results_dir if final \
else self.working_dir
file_name = "bcwindows.{}.{}.{}.{}.pickle".format(
self.sample.name,
self.dataset.id,
self.chrom,
self.chunk)
paths = {
"bcwindows": os.path.join(directory, file_name)
}
return paths
def run(self):
import logging
logging.info("running!")
window_size = self.options.constants["window_size"]
outpath = self.outpaths(final=False)["bcwindows"]
self.logger.log("Loading barcode map...")
# call_readclouds_step = call_readclouds.FilterFragmentsStep(
input_step = call_readclouds.CombineReadcloudsStep(
self.options, self.sample, self.dataset)
barcode_map = utilities.pickle.load(
open(input_step.outpaths(final=True)["barcode_map"]))
chrom_length = self.options.reference.chrom_lengths[self.chrom]
start = int(self.chunk*CHUNKSIZE)
end = int(min((self.chunk+1)*CHUNKSIZE, chrom_length))
self.logger.log("Running chunk: {}:{:,}-{:,}".format(self.chrom, start, end))
fragments = call_readclouds.load_fragments(
self.options, self.sample, self.dataset,
self.chrom, start, end, min_reads_per_frag=0)
barcode_windows = get_barcode_windows(
fragments, barcode_map, window_size, chrom_length, start, end)
self.logger.log("Saving results...")
result = {
"barcode_windows": barcode_windows,
# "barcode_map": barcode_map,
"nbcs": len(barcode_map),
"window_size": window_size
}
utilities.pickle.dump(result, open(outpath, "w"), protocol=-1)
def get_barcode_windows(fragments, barcode_map, window_size, chrom_length, start, end):
window_starts = range(start, end, window_size)
barcode_windows = []
for start in window_starts:
end = start + window_size
overlap = utilities.frags_overlap_same_chrom(fragments, start, end)
barcodes = set(barcode_map[bc] for bc in overlap["bc"])
barcode_windows.append(barcodes)
return barcode_windows
|
the-stack_0_17225 | """Support for FRITZ!Box routers."""
from __future__ import annotations
import datetime
import logging
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN as DEVICE_TRACKER_DOMAIN,
PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA,
SOURCE_TYPE_ROUTER,
)
from homeassistant.components.device_tracker.config_entry import ScannerEntity
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType
from .common import (
FritzBoxTools,
FritzData,
FritzDevice,
FritzDeviceBase,
device_filter_out_from_trackers,
)
from .const import DATA_FRITZ, DOMAIN
_LOGGER = logging.getLogger(__name__)
YAML_DEFAULT_HOST = "169.254.1.1"
YAML_DEFAULT_USERNAME = "admin"
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_HOST),
cv.deprecated(CONF_USERNAME),
cv.deprecated(CONF_PASSWORD),
PARENT_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST, default=YAML_DEFAULT_HOST): cv.string,
vol.Optional(CONF_USERNAME, default=YAML_DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
}
),
)
async def async_get_scanner(hass: HomeAssistant, config: ConfigType) -> None:
"""Import legacy FRITZ!Box configuration."""
_LOGGER.debug("Import legacy FRITZ!Box configuration from YAML")
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config[DEVICE_TRACKER_DOMAIN],
)
)
_LOGGER.warning(
"Your Fritz configuration has been imported into the UI, "
"please remove it from configuration.yaml. "
"Loading Fritz via scanner setup is now deprecated"
)
return None
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up device tracker for FRITZ!Box component."""
_LOGGER.debug("Starting FRITZ!Box device tracker")
router: FritzBoxTools = hass.data[DOMAIN][entry.entry_id]
data_fritz: FritzData = hass.data[DATA_FRITZ]
@callback
def update_router() -> None:
"""Update the values of the router."""
_async_add_entities(router, async_add_entities, data_fritz)
entry.async_on_unload(
async_dispatcher_connect(hass, router.signal_device_new, update_router)
)
update_router()
@callback
def _async_add_entities(
router: FritzBoxTools,
async_add_entities: AddEntitiesCallback,
data_fritz: FritzData,
) -> None:
"""Add new tracker entities from the router."""
new_tracked = []
if router.unique_id not in data_fritz.tracked:
data_fritz.tracked[router.unique_id] = set()
for mac, device in router.devices.items():
if device_filter_out_from_trackers(mac, device, data_fritz.tracked.values()):
continue
new_tracked.append(FritzBoxTracker(router, device))
data_fritz.tracked[router.unique_id].add(mac)
if new_tracked:
async_add_entities(new_tracked)
class FritzBoxTracker(FritzDeviceBase, ScannerEntity):
"""This class queries a FRITZ!Box router."""
def __init__(self, router: FritzBoxTools, device: FritzDevice) -> None:
"""Initialize a FRITZ!Box device."""
super().__init__(router, device)
self._last_activity: datetime.datetime | None = device.last_activity
@property
def is_connected(self) -> bool:
"""Return device status."""
return self._router.devices[self._mac].is_connected
@property
def unique_id(self) -> str:
"""Return device unique id."""
return f"{self._mac}_tracker"
@property
def mac_address(self) -> str:
"""Return mac_address."""
return self._mac
@property
def icon(self) -> str:
"""Return device icon."""
if self.is_connected:
return "mdi:lan-connect"
return "mdi:lan-disconnect"
@property
def extra_state_attributes(self) -> dict[str, str]:
"""Return the attributes."""
attrs: dict[str, str] = {}
device = self._router.devices[self._mac]
self._last_activity = device.last_activity
if self._last_activity is not None:
attrs["last_time_reachable"] = self._last_activity.isoformat(
timespec="seconds"
)
if device.connected_to:
attrs["connected_to"] = device.connected_to
if device.connection_type:
attrs["connection_type"] = device.connection_type
if device.ssid:
attrs["ssid"] = device.ssid
return attrs
@property
def source_type(self) -> str:
"""Return tracker source type."""
return SOURCE_TYPE_ROUTER
|
the-stack_0_17226 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .deploy_stage_execution_progress import DeployStageExecutionProgress
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ComputeInstanceGroupCanaryDeployStageExecutionProgress(DeployStageExecutionProgress):
"""
Specifies the Instance Group Canary deployment stage.
"""
def __init__(self, **kwargs):
"""
Initializes a new ComputeInstanceGroupCanaryDeployStageExecutionProgress object with values from keyword arguments. The default value of the :py:attr:`~oci.devops.models.ComputeInstanceGroupCanaryDeployStageExecutionProgress.deploy_stage_type` attribute
of this class is ``COMPUTE_INSTANCE_GROUP_CANARY_DEPLOYMENT`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param deploy_stage_display_name:
The value to assign to the deploy_stage_display_name property of this ComputeInstanceGroupCanaryDeployStageExecutionProgress.
:type deploy_stage_display_name: str
:param deploy_stage_type:
The value to assign to the deploy_stage_type property of this ComputeInstanceGroupCanaryDeployStageExecutionProgress.
:type deploy_stage_type: str
:param deploy_stage_id:
The value to assign to the deploy_stage_id property of this ComputeInstanceGroupCanaryDeployStageExecutionProgress.
:type deploy_stage_id: str
:param time_started:
The value to assign to the time_started property of this ComputeInstanceGroupCanaryDeployStageExecutionProgress.
:type time_started: datetime
:param time_finished:
The value to assign to the time_finished property of this ComputeInstanceGroupCanaryDeployStageExecutionProgress.
:type time_finished: datetime
:param status:
The value to assign to the status property of this ComputeInstanceGroupCanaryDeployStageExecutionProgress.
Allowed values for this property are: "ACCEPTED", "IN_PROGRESS", "FAILED", "SUCCEEDED", "CANCELING", "CANCELED", "ROLLBACK_IN_PROGRESS", "ROLLBACK_SUCCEEDED", "ROLLBACK_FAILED"
:type status: str
:param deploy_stage_predecessors:
The value to assign to the deploy_stage_predecessors property of this ComputeInstanceGroupCanaryDeployStageExecutionProgress.
:type deploy_stage_predecessors: oci.devops.models.DeployStagePredecessorCollection
:param deploy_stage_execution_progress_details:
The value to assign to the deploy_stage_execution_progress_details property of this ComputeInstanceGroupCanaryDeployStageExecutionProgress.
:type deploy_stage_execution_progress_details: list[oci.devops.models.DeployStageExecutionProgressDetails]
"""
self.swagger_types = {
'deploy_stage_display_name': 'str',
'deploy_stage_type': 'str',
'deploy_stage_id': 'str',
'time_started': 'datetime',
'time_finished': 'datetime',
'status': 'str',
'deploy_stage_predecessors': 'DeployStagePredecessorCollection',
'deploy_stage_execution_progress_details': 'list[DeployStageExecutionProgressDetails]'
}
self.attribute_map = {
'deploy_stage_display_name': 'deployStageDisplayName',
'deploy_stage_type': 'deployStageType',
'deploy_stage_id': 'deployStageId',
'time_started': 'timeStarted',
'time_finished': 'timeFinished',
'status': 'status',
'deploy_stage_predecessors': 'deployStagePredecessors',
'deploy_stage_execution_progress_details': 'deployStageExecutionProgressDetails'
}
self._deploy_stage_display_name = None
self._deploy_stage_type = None
self._deploy_stage_id = None
self._time_started = None
self._time_finished = None
self._status = None
self._deploy_stage_predecessors = None
self._deploy_stage_execution_progress_details = None
self._deploy_stage_type = 'COMPUTE_INSTANCE_GROUP_CANARY_DEPLOYMENT'
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
the-stack_0_17227 | from replit import clear
from art import logo
print(logo)
bids = {}
bidding_finished = False
def find_highest_bidder(bidding_record):
maximum = max(bidding_record, key=bidding_record.get)
print(f"The winner is {maximum} with a bid of ${bidding_record[maximum]}")
while True:
name = input("What is your name?: ")
price = int(input("What is your bid?: $"))
bids[name] = price
should_continue = input(
"Are there any other bidders? Type 'yes or 'no'.\n")
if should_continue == "no":
break
elif should_continue == "yes":
clear()
find_highest_bidder(bids)
|
the-stack_0_17228 | import torch
import torch.nn as nn
from torch.nn.functional import relu
from torch.nn.utils.rnn import pad_sequence
class Regressor():
"""A PyTorch MLP model consisting of an MLP for each module type.
The model is learnt only on single module.
The model takes as input the input power and the meta data of the
corresponding cascade. To predict the output power the model
simply cascades the different MLPs matching the input module cascade."""
def __init__(self):
super().__init__()
# Since the model need meta data present in the data
# we will only instantiate the model when calling the fit function
self.Model = PyTorchModel # PyTorch model class
self.model = None # PyTorch model instance
self.mod_id = None # Module IDs
def fit(self, X, y):
# Retrieve some information about the modules from the data
all_mods = set(
[(("type", mod[0]), ("nb_feat", len(mod[1]))) for seq, _, _ in X
for mod in seq])
mod_info = [dict(m) for m in all_mods]
self.mod_id = {mod["type"]: i for i, mod in enumerate(mod_info)}
# Instantiate the PyTorch model
self.model = self.Model(mod_info)
# Turn on training mode
self.model.train()
# Get data and create train data loaders
data_list = [{"mod_id_seq": torch.tensor(
[self.mod_id[mod] for mod, _ in mod_seq]),
"mod_feat_seq_list": [torch.tensor(feat).float() for
_, feat in mod_seq],
"input_power": torch.tensor(p_in).float(),
"output_power": torch.tensor(p_out).float()} for
(mod_seq, p_in, campaign_id), p_out in zip(X, y)]
train_loader = torch.utils.data.DataLoader(data_list, batch_size=128,
collate_fn=collate_fn)
# Instantiate criterion and optimizer
crit = torch.nn.MSELoss()
opt = torch.optim.Adam(self.model.parameters(), lr=0.0001)
# Training loop
for e in range(100):
for data in train_loader:
(mod_id_seq, mod_feat_seq, p_in), p_out = data
opt.zero_grad()
preds = self.model(mod_id_seq, mod_feat_seq, p_in)
# Since the evaluation is only done for on-channels it
# helps the optimization to only backpropagate through them.
on_chan = p_in != 0
on_preds = torch.mul(on_chan, preds)
on_p_out = torch.mul(on_chan, p_out)
loss = crit(on_preds, on_p_out)
# Since we are only looking at single modules
if loss.requires_grad:
loss.backward()
opt.step()
def predict(self, X):
# Turn on evaluation mode
self.model.eval()
# No ground truth when predicting, format input arguments
# Input powers
p_in = torch.stack([torch.tensor(p_in).float() for _, p_in, _ in X])
# Module features
mod_feat_seq = [[torch.tensor(feat).float() for _, feat in mod_seq]
for mod_seq, _, _ in X]
# Module IDs
mod_id_seq = [torch.tensor([self.mod_id[mod] for mod, _ in mod_seq])
for mod_seq, _, _ in X]
mod_id_seq = pad_sequence(mod_id_seq, batch_first=True,
padding_value=-1)
# Model prediction
preds = self.model(mod_id_seq, mod_feat_seq, p_in).detach().numpy()
return preds
class PyTorchModel(torch.nn.Module):
def __init__(self, mod_info):
super(PyTorchModel, self).__init__()
self.mod_info = mod_info
# Construct as many MLPs as modules present in the data
self.MLPs = torch.nn.ModuleList(
[MLP(m["nb_feat"]) for m in self.mod_info])
def forward(self, mod_id_seq, mod_feat_seq, p_in):
seq_len = torch.tensor(list(map(len, mod_feat_seq)))
p_out = p_in
max_nb_mod = max(seq_len)
for n in range(max_nb_mod):
for i, mlp in enumerate(self.MLPs):
msk = torch.mul(mod_id_seq[:, n] == i, seq_len > n)
if msk.any():
feats = torch.stack(
[f[n] for i, f in enumerate(mod_feat_seq) if msk[i]])
p_out[msk] = mlp(torch.cat([p_out[msk], feats], dim=-1))
# Return positive values when evaluating the model
return p_out if self.training else relu(p_out)
class MLP(torch.nn.Module):
"""A simple two layer MLP taking as input the
input powers and the features of the module"""
def __init__(self, feat_size):
super(MLP, self).__init__()
self.drop_layer = nn.Dropout(p=0.5)
# Definition of the modules of the model
# Two fully connected layers
self.fc0 = torch.nn.Linear(32 + feat_size, 256)
self.fc1 = torch.nn.Linear(256, 256)
self.fc2 = torch.nn.Linear(256, 256)
self.fc3 = torch.nn.Linear(256, 32)
def forward(self, x):
# Compute the output of the model using a tanh activation function
p_out = self.drop_layer(self.fc1(relu(self.fc0(x))))
p_out = self.fc3(relu(self.fc2(p_out)))
return p_out
def collate_fn(batch):
# Power output
p_out = torch.stack([sample["output_power"] for sample in batch])
# Power input
p_in = torch.stack([sample["input_power"] for sample in batch])
# Module id
l_id_seq = [sample["mod_id_seq"] for sample in batch]
mod_id_seq = pad_sequence(l_id_seq, batch_first=True, padding_value=-1)
# Module features
mod_feat_seq = [sample["mod_feat_seq_list"] for sample in batch]
return (mod_id_seq, mod_feat_seq, p_in), p_out
|
the-stack_0_17229 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
import struct
# Reference: draft-krovetz-vmac-01.txt
# This only implements VMAC with AES as cipher and 64 bit tags.
BLOCKSIZE = 16 # block size of AES in bytes
L1KEYSIZE = 128 # size of the L1 key in bytes
MASK_POLY = 0x1FFFFFFF1FFFFFFF1FFFFFFF1FFFFFFF
P127 = 2 ** 127 - 1
P64 = 2 ** 64 - 257
PP = 2 ** 64 - 2 ** 32
def nh(k, m):
mask64 = 0xffffffffffffffff
res = 0
for i in range(0, len(m), 2):
res += ((m[i] + k[i]) & mask64) * ((m[i+1] + k[i+1]) & mask64)
return res % 2**126
class Vmac64:
def __init__(self, key: bytes):
self.cipher = self.create_cipher(key)
self.l1_keys = self.kdf_int(128, 0, L1KEYSIZE // 8)
self.l2_keys = self.kdf_int(192, 0, 2)
idx = 1
while True:
k0, k1 = self.kdf_int(224, 2 * (idx - 1), 2 * idx)
if (k0 < P64) and (k1 < P64):
self.l3_keys = k0, k1
break
idx += 1
def create_cipher(self, key):
if isinstance(key, bytearray):
key = bytes(key)
assert isinstance(key, bytes) and len(key) in (16, 24, 32)
return Cipher(algorithms.AES(key), modes.ECB(), default_backend())
def encrypt_block(self, ba) -> bytes:
encryptor = self.cipher.encryptor()
assert len(ba) == 16
if isinstance(ba, bytearray):
ba = bytes(ba)
return encryptor.update(ba) + encryptor.finalize()
def kdf(self, index: int, size: int) -> bytes:
if size % BLOCKSIZE > 0:
return self.kdf(index, size + (-size % BLOCKSIZE))[:size]
res = bytearray(size)
for i in range(size // BLOCKSIZE):
inp = bytes([index] + [0] * 14 + [i])
res[BLOCKSIZE * i : BLOCKSIZE * (i+1)] = self.encrypt_block(inp)
return bytes(res)
def kdf_int(self, index: int, start: int, stop: int):
ba = self.kdf(index, 8 * stop)
return struct.unpack('>%dQ' % (stop - start), ba[8 * start: 8 * stop])
def pdf(self, nonce: bytes) -> bytes:
index = nonce[-1] % 2
block = bytearray(BLOCKSIZE - len(nonce)) + nonce
block[-1] -= index
enc = self.encrypt_block(bytes(block))
return enc[8 * index : 8 * (index + 1)]
def l1_hash(self, m: bytes):
k = self.l1_keys
blocks = (len(m) + L1KEYSIZE - 1) // L1KEYSIZE
fullblocks = len(m) // L1KEYSIZE
y = [None] * blocks
cnt = L1KEYSIZE // 8
fmt = '<%dQ' % cnt
for i in range(fullblocks):
pos = i * L1KEYSIZE
hstr = struct.unpack_from(fmt, m, pos)
y[i] = nh(k, hstr)
if blocks > fullblocks:
pos = fullblocks * L1KEYSIZE
ba = m[pos : pos + L1KEYSIZE]
ba += bytes(-len(ba) % 16)
cnt = len(ba) // 8
hstr = struct.unpack('<%dQ' % cnt, ba)
y[fullblocks] = nh(k, hstr)
return y
def l2_hash(self, m: bytes, bitlength: int) -> int:
t0, t1 = self.l2_keys
k = ((t0 & MASK_POLY) << 64) | (t1 & MASK_POLY)
if len(m) == 0:
y = k
else:
y = 1
for v in m:
y = (y * k + v) % P127
return (y + ((bitlength % (L1KEYSIZE * 8)) << 64)) % P127
def l3_hash(self, m: int) -> int:
k0, k1 = self.l3_keys
m0, m1 = divmod(m, PP)
return ((k0 + m0) * (k1 + m1)) % P64
def vhash(self, m: bytes) -> int:
t1 = self.l1_hash(m)
t2 = self.l2_hash(t1, 8 * len(m))
return self.l3_hash(t2)
def mac(self, m: bytes, nonce: bytes):
if len(nonce) > 16:
raise ValueError("Nonce too long")
elif len(nonce) == 16 and nonce[0] >= 128:
raise ValueError("Nonce must be smaller than 128-bits")
v = self.vhash(m)
m = struct.unpack('>Q', self.pdf(nonce))[0]
tag = (m + v) % 2 ** 64
return struct.pack('>Q', tag)
def tag(self, m: bytes, nonce: bytes) -> str:
return self.mac(m, nonce)
|
the-stack_0_17230 | import re
from collections import defaultdict
with open('input.txt') as file:
data = file.read()
ALLERGENS = defaultdict(list)
FOOD = []
KNOWN = []
for line in data.splitlines():
if m := re.search(r'(.+) \(contains (.+)\)', line):
food = m[1].split(' ')
allergens = m[2].split(', ')
FOOD.append(food)
for a in allergens:
ALLERGENS[a].append(food)
else:
assert False, "chujowy regex?"
def learn(alg):
# generate common ingredients
common = set.intersection(*map(set, ALLERGENS[alg]))
# remove known ingredients
for known_ing, _ in KNOWN:
try:
common.remove(known_ing)
except:
pass
# if 1 left, add to known
if len(common) == 1:
KNOWN.append((*common, alg))
while len(KNOWN) < len(ALLERGENS):
for alg in ALLERGENS:
learn(alg)
cnt = 0
known_ing = set(ing for ing, alg in KNOWN)
cnt = sum(len(set(food) - known_ing) for food in FOOD)
print(f"Part 1: {cnt}")
result = ','.join(ing for ing, _ in sorted(KNOWN, key=lambda x: x[1]))
print(f"Part 2: {result}") |
the-stack_0_17231 | import _plotly_utils.basevalidators
class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="hoverlabel", parent_name="scattergl", **kwargs):
super(HoverlabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Hoverlabel"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
""",
),
**kwargs
)
|
the-stack_0_17232 | #!/usr/bin/env python
# encoding: utf-8
"""
@author: sherlock
@contact: [email protected]
"""
import logging
import os
import sys
sys.path.append('.')
from fastreid.config import get_cfg
from fastreid.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from fastreid.utils.checkpoint import Checkpointer
from fastreid.engine import hooks
from partialreid import *
class Trainer(DefaultTrainer):
@classmethod
def build_evaluator(cls, cfg, num_query, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return DsrEvaluator(cfg, num_query)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_partialreid_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
logger = logging.getLogger("fastreid.trainer")
cfg.defrost()
cfg.MODEL.BACKBONE.PRETRAIN = False
model = Trainer.build_model(cfg)
Checkpointer(model).load(cfg.MODEL.WEIGHTS) # load trained model
if cfg.TEST.PRECISE_BN.ENABLED and hooks.get_bn_modules(model):
prebn_cfg = cfg.clone()
prebn_cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
prebn_cfg.DATASETS.NAMES = tuple([cfg.TEST.PRECISE_BN.DATASET]) # set dataset name for PreciseBN
logger.info("Prepare precise BN dataset")
hooks.PreciseBN(
# Run at the same freq as (but before) evaluation.
model,
# Build a new data loader to not affect training
Trainer.build_train_loader(prebn_cfg),
cfg.TEST.PRECISE_BN.NUM_ITER,
).update_stats()
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
the-stack_0_17235 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import boilerplate
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = boilerplate.__version__
if sys.argv[-1] == 'publish':
os.system('cd docs && make html')
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
if sys.argv[-1] == 'test':
print("Running tests only on current environment.")
print("Use `tox` for testing multiple environments.")
os.system('python manage.py test')
sys.exit()
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
setup(
name='django-boilerplate',
version=version,
description="""What Django is missing""",
long_description=readme + '\n\n' + history,
author='Irving Kcam',
author_email='[email protected]',
url='https://github.com/cubope/django-boilerplate',
packages=[
'boilerplate',
],
include_package_data=True,
install_requires=['Pillow', 'six'],
license="Apache License 2.0",
zip_safe=False,
keywords='django-boilerplate',
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
"Environment :: Web Environment",
"Framework :: Django",
],
)
|
the-stack_0_17236 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reverse_sequence_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ReverseSequenceTest(XLATestCase):
def _testReverseSequence(self,
x,
batch_axis,
seq_axis,
seq_lengths,
truth,
expected_err_re=None):
with self.test_session():
p = array_ops.placeholder(dtypes.as_dtype(x.dtype))
lengths = array_ops.placeholder(dtypes.as_dtype(seq_lengths.dtype))
with self.test_scope():
ans = array_ops.reverse_sequence(
p, batch_axis=batch_axis, seq_axis=seq_axis, seq_lengths=lengths)
if expected_err_re is None:
tf_ans = ans.eval(feed_dict={p: x, lengths: seq_lengths})
self.assertAllClose(tf_ans, truth, atol=1e-10)
else:
with self.assertRaisesOpError(expected_err_re):
ans.eval(feed_dict={p: x, lengths: seq_lengths})
def testSimple(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
expected = np.array([[1, 2, 3], [6, 5, 4], [8, 7, 9]], dtype=np.int32)
self._testReverseSequence(
x,
batch_axis=0,
seq_axis=1,
seq_lengths=np.array([1, 3, 2], np.int32),
truth=expected)
def _testBasic(self, dtype, len_dtype):
x = np.asarray(
[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]],
[[17, 18, 19, 20], [21, 22, 23, 24]]],
dtype=dtype)
x = x.reshape(3, 2, 4, 1, 1)
x = x.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
# reverse dim 2 up to (0:3, none, 0:4) along dim=0
seq_lengths = np.asarray([3, 0, 4], dtype=len_dtype)
truth_orig = np.asarray(
[
[[3, 2, 1, 4], [7, 6, 5, 8]], # reverse 0:3
[[9, 10, 11, 12], [13, 14, 15, 16]], # reverse none
[[20, 19, 18, 17], [24, 23, 22, 21]]
], # reverse 0:4 (all)
dtype=dtype)
truth_orig = truth_orig.reshape(3, 2, 4, 1, 1)
truth = truth_orig.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
seq_axis = 0 # permute seq_axis and batch_axis (originally 2 and 0, resp.)
batch_axis = 2
self._testReverseSequence(x, batch_axis, seq_axis, seq_lengths, truth)
def testSeqLength(self):
for dtype in self.all_types:
for seq_dtype in self.int_types:
self._testBasic(dtype, seq_dtype)
if __name__ == "__main__":
test.main()
|
the-stack_0_17238 | from lib.utilities import get_truncated_normal, get_truncated_exponential
from lib import static
class DistributionsManager:
def __init__(self, yaml_parser):
self.favorite_subjects_per_user_distribution = self.build_favorite_subjects_distribution(yaml_parser)
self.subjects_distribution = self.build_subjects_distribution(yaml_parser)
self.age_distribution = self.build_age_distribution(yaml_parser)
self.likes_distribution = self.build_likes_distribution(yaml_parser)
self.followers_distribution = self.build_followers_distribution(yaml_parser)
self.salary_distribution_per_occupation = self.build_compensations_distribution(yaml_parser)
@staticmethod
def build_favorite_subjects_distribution(yaml_parser):
distribution_type = yaml_parser.favorite_subjects_per_user_distribution
if distribution_type is static.NORMAL:
try:
s = get_truncated_normal(yaml_parser.favorite_subjects_per_user_mean,
yaml_parser.favorite_subjects_per_user_sd,
yaml_parser.favorite_subjects_per_user_lower_bound,
yaml_parser.favorite_subjects_per_user_upper_bound)
except:
print('Incorrect parameters for that kind of distribution. Using default value.')
s = get_truncated_normal(static.NORMAL_DISTRIBUTION[1],
static.NORMAL_DISTRIBUTION[2],
static.NORMAL_DISTRIBUTION[3],
static.NORMAL_DISTRIBUTION[4])
return s
else:
try:
s = get_truncated_exponential(yaml_parser.favorite_subjects_per_user_upper_bound,
yaml_parser.favorite_subjects_per_user_lower_bound,
yaml_parser.favorite_subjects_scale)
except:
print('Incorrect parameters for that kind of distribution. Using default value.')
s = get_truncated_exponential(static.EXPONENTIAL_DISTRIBUTION[4],
static.EXPONENTIAL_DISTRIBUTION[3],
static.EXPONENTIAL_DISTRIBUTION[5])
return s
@staticmethod
def build_subjects_distribution(yaml_parser):
distribution_type = yaml_parser.subjects_distribution
if distribution_type is static.NORMAL:
try:
s = get_truncated_normal(yaml_parser.subjects_mean,
yaml_parser.subjects_sd,
yaml_parser.subjects_lower_bound,
yaml_parser.subjects_upper_bound)
except:
print('Incorrect parameters for that kind of distribution. Using default value.')
s = get_truncated_normal(static.NORMAL_DISTRIBUTION[1],
static.NORMAL_DISTRIBUTION[2],
static.NORMAL_DISTRIBUTION[3],
static.NORMAL_DISTRIBUTION[4])
return s
else:
try:
s = get_truncated_exponential(yaml_parser.subjects_upper_bound,
yaml_parser.subjects_lower_bound,
yaml_parser.subjects_scale)
except:
print('Incorrect parameters for that kind of distribution. Using default value.')
s = get_truncated_exponential(static.EXPONENTIAL_DISTRIBUTION[4],
static.EXPONENTIAL_DISTRIBUTION[3],
static.EXPONENTIAL_DISTRIBUTION[5])
return s
@staticmethod
def build_age_distribution(yaml_parser):
return get_truncated_normal(yaml_parser.age_mean,
yaml_parser.age_sd,
yaml_parser.age_lower_bound,
yaml_parser.age_upper_bound)
@staticmethod
def build_compensations_distribution(yaml_parser):
salary_distribution_per_occupation = {}
for occupation in yaml_parser.compensations_distribution:
prob = yaml_parser.compensations_distribution[occupation]
salary_distribution_per_occupation[occupation] = get_truncated_normal(prob[0], prob[1], prob[2], prob[3])
return salary_distribution_per_occupation
@staticmethod
def build_likes_distribution(yaml_parser):
return get_truncated_exponential(yaml_parser.likes_upper_bound, yaml_parser.likes_lower_bound,
yaml_parser.likes_scale)
@staticmethod
def build_followers_distribution(yaml_parser):
return get_truncated_exponential(yaml_parser.followers_upper_bound, yaml_parser.followers_lower_bound,
yaml_parser.followers_scale)
|
the-stack_0_17239 | import numpy as np
import scipy.io.wavfile
import sys
from aubio import source, pitch
win_s = 4096
hop_s = 512
out = scipy.io.wavfile.read("StarWars60.wav", mmap=False)
a, b = out
your_file = "StarWars60.wav"
samplerate = a
s = source(your_file, samplerate, hop_s)
samplerate = s.samplerate
tolerance = 0.8
pitch_o = pitch("yin", win_s, hop_s, samplerate)
pitch_o.set_unit("midi")
pitch_o.set_tolerance(tolerance)
pitches = []
confidences = []
total_frames = 0
while True:
samples, read = s()
pitch = pitch_o(samples)[0]
pitches += [pitch]
confidence = pitch_o.get_confidence()
confidences += [confidence]
total_frames += read
if read < hop_s: break
print("Average frequency = " + str(np.array(pitches).mean()) + " hz") |
the-stack_0_17240 | import logging
from typing import Dict, List, Optional, Tuple, Any
import aiosqlite
import zstd
from chia.consensus.block_record import BlockRecord
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.full_block import FullBlock
from chia.types.weight_proof import SubEpochChallengeSegment, SubEpochSegments
from chia.util.db_wrapper import DBWrapper
from chia.util.ints import uint32
from chia.util.lru_cache import LRUCache
log = logging.getLogger(__name__)
class BlockStore:
db: aiosqlite.Connection
block_cache: LRUCache
db_wrapper: DBWrapper
ses_challenge_cache: LRUCache
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
# All full blocks which have been added to the blockchain. Header_hash -> block
self.db_wrapper = db_wrapper
self.db = db_wrapper.db
if self.db_wrapper.db_version == 2:
# TODO: most data in block is duplicated in block_record. The only
# reason for this is that our parsing of a FullBlock is so slow,
# it's faster to store duplicate data to parse less when we just
# need the BlockRecord. Once we fix the parsing (and data structure)
# of FullBlock, this can use less space
await self.db.execute(
"CREATE TABLE IF NOT EXISTS full_blocks("
"header_hash blob PRIMARY KEY,"
"prev_hash blob,"
"height bigint,"
"sub_epoch_summary blob,"
"is_fully_compactified tinyint,"
"in_main_chain tinyint,"
"block blob,"
"block_record blob)"
)
# This is a single-row table containing the hash of the current
# peak. The "key" field is there to make update statements simple
await self.db.execute("CREATE TABLE IF NOT EXISTS current_peak(key int PRIMARY KEY, hash blob)")
await self.db.execute("CREATE INDEX IF NOT EXISTS height on full_blocks(height)")
# Sub epoch segments for weight proofs
await self.db.execute(
"CREATE TABLE IF NOT EXISTS sub_epoch_segments_v3("
"ses_block_hash blob PRIMARY KEY,"
"challenge_segments blob)"
)
await self.db.execute(
"CREATE INDEX IF NOT EXISTS is_fully_compactified ON"
" full_blocks(is_fully_compactified, in_main_chain) WHERE in_main_chain=1"
)
await self.db.execute(
"CREATE INDEX IF NOT EXISTS main_chain ON full_blocks(height, in_main_chain) WHERE in_main_chain=1"
)
else:
await self.db.execute(
"CREATE TABLE IF NOT EXISTS full_blocks(header_hash text PRIMARY KEY, height bigint,"
" is_block tinyint, is_fully_compactified tinyint, block blob)"
)
# Block records
await self.db.execute(
"CREATE TABLE IF NOT EXISTS block_records(header_hash "
"text PRIMARY KEY, prev_hash text, height bigint,"
"block blob, sub_epoch_summary blob, is_peak tinyint, is_block tinyint)"
)
# Sub epoch segments for weight proofs
await self.db.execute(
"CREATE TABLE IF NOT EXISTS sub_epoch_segments_v3(ses_block_hash text PRIMARY KEY,"
"challenge_segments blob)"
)
# Height index so we can look up in order of height for sync purposes
await self.db.execute("CREATE INDEX IF NOT EXISTS full_block_height on full_blocks(height)")
await self.db.execute(
"CREATE INDEX IF NOT EXISTS is_fully_compactified on full_blocks(is_fully_compactified)"
)
await self.db.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)")
if self.db_wrapper.allow_upgrades:
await self.db.execute("DROP INDEX IF EXISTS hh")
await self.db.execute("DROP INDEX IF EXISTS is_block")
await self.db.execute("DROP INDEX IF EXISTS peak")
await self.db.execute(
"CREATE INDEX IF NOT EXISTS is_peak_eq_1_idx on block_records(is_peak) where is_peak = 1"
)
else:
await self.db.execute("CREATE INDEX IF NOT EXISTS peak on block_records(is_peak) where is_peak = 1")
await self.db.commit()
self.block_cache = LRUCache(1000)
self.ses_challenge_cache = LRUCache(50)
return self
def maybe_from_hex(self, field: Any) -> bytes:
if self.db_wrapper.db_version == 2:
return field
else:
return bytes.fromhex(field)
def maybe_to_hex(self, field: bytes) -> Any:
if self.db_wrapper.db_version == 2:
return field
else:
return field.hex()
def compress(self, block: FullBlock) -> bytes:
return zstd.compress(bytes(block))
def maybe_decompress(self, block_bytes: bytes) -> FullBlock:
if self.db_wrapper.db_version == 2:
return FullBlock.from_bytes(zstd.decompress(block_bytes))
else:
return FullBlock.from_bytes(block_bytes)
async def rollback(self, height: int) -> None:
if self.db_wrapper.db_version == 2:
await self.db.execute(
"UPDATE OR FAIL full_blocks SET in_main_chain=0 WHERE height>? AND in_main_chain=1", (height,)
)
async def set_in_chain(self, header_hashes: List[Tuple[bytes32]]) -> None:
if self.db_wrapper.db_version == 2:
await self.db.executemany(
"UPDATE OR FAIL full_blocks SET in_main_chain=1 WHERE header_hash=?", header_hashes
)
async def add_full_block(self, header_hash: bytes32, block: FullBlock, block_record: BlockRecord) -> None:
self.block_cache.put(header_hash, block)
if self.db_wrapper.db_version == 2:
ses: Optional[bytes] = (
None
if block_record.sub_epoch_summary_included is None
else bytes(block_record.sub_epoch_summary_included)
)
await self.db.execute(
"INSERT OR REPLACE INTO full_blocks VALUES(?, ?, ?, ?, ?, ?, ?, ?)",
(
header_hash,
block.prev_header_hash,
block.height,
ses,
int(block.is_fully_compactified()),
0, # in_main_chain
self.compress(block),
bytes(block_record),
),
)
else:
await self.db.execute(
"INSERT OR REPLACE INTO full_blocks VALUES(?, ?, ?, ?, ?)",
(
header_hash.hex(),
block.height,
int(block.is_transaction_block()),
int(block.is_fully_compactified()),
bytes(block),
),
)
await self.db.execute(
"INSERT OR REPLACE INTO block_records VALUES(?, ?, ?, ?,?, ?, ?)",
(
header_hash.hex(),
block.prev_header_hash.hex(),
block.height,
bytes(block_record),
None
if block_record.sub_epoch_summary_included is None
else bytes(block_record.sub_epoch_summary_included),
False,
block.is_transaction_block(),
),
)
async def persist_sub_epoch_challenge_segments(
self, ses_block_hash: bytes32, segments: List[SubEpochChallengeSegment]
) -> None:
async with self.db_wrapper.lock:
await self.db.execute(
"INSERT OR REPLACE INTO sub_epoch_segments_v3 VALUES(?, ?)",
(self.maybe_to_hex(ses_block_hash), bytes(SubEpochSegments(segments))),
)
await self.db.commit()
async def get_sub_epoch_challenge_segments(
self,
ses_block_hash: bytes32,
) -> Optional[List[SubEpochChallengeSegment]]:
cached = self.ses_challenge_cache.get(ses_block_hash)
if cached is not None:
return cached
async with self.db.execute(
"SELECT challenge_segments from sub_epoch_segments_v3 WHERE ses_block_hash=?",
(self.maybe_to_hex(ses_block_hash),),
) as cursor:
row = await cursor.fetchone()
if row is not None:
challenge_segments = SubEpochSegments.from_bytes(row[0]).challenge_segments
self.ses_challenge_cache.put(ses_block_hash, challenge_segments)
return challenge_segments
return None
def rollback_cache_block(self, header_hash: bytes32):
try:
self.block_cache.remove(header_hash)
except KeyError:
# this is best effort. When rolling back, we may not have added the
# block to the cache yet
pass
async def get_full_block(self, header_hash: bytes32) -> Optional[FullBlock]:
cached = self.block_cache.get(header_hash)
if cached is not None:
log.debug(f"cache hit for block {header_hash.hex()}")
return cached
log.debug(f"cache miss for block {header_hash.hex()}")
async with self.db.execute(
"SELECT block from full_blocks WHERE header_hash=?", (self.maybe_to_hex(header_hash),)
) as cursor:
row = await cursor.fetchone()
if row is not None:
block = self.maybe_decompress(row[0])
self.block_cache.put(header_hash, block)
return block
return None
async def get_full_block_bytes(self, header_hash: bytes32) -> Optional[bytes]:
cached = self.block_cache.get(header_hash)
if cached is not None:
log.debug(f"cache hit for block {header_hash.hex()}")
return bytes(cached)
log.debug(f"cache miss for block {header_hash.hex()}")
async with self.db.execute(
"SELECT block from full_blocks WHERE header_hash=?", (self.maybe_to_hex(header_hash),)
) as cursor:
row = await cursor.fetchone()
if row is not None:
if self.db_wrapper.db_version == 2:
return zstd.decompress(row[0])
else:
return row[0]
return None
async def get_full_blocks_at(self, heights: List[uint32]) -> List[FullBlock]:
if len(heights) == 0:
return []
heights_db = tuple(heights)
formatted_str = f'SELECT block from full_blocks WHERE height in ({"?," * (len(heights_db) - 1)}?)'
async with self.db.execute(formatted_str, heights_db) as cursor:
ret: List[FullBlock] = []
for row in await cursor.fetchall():
ret.append(self.maybe_decompress(row[0]))
return ret
async def get_block_records_by_hash(self, header_hashes: List[bytes32]):
"""
Returns a list of Block Records, ordered by the same order in which header_hashes are passed in.
Throws an exception if the blocks are not present
"""
if len(header_hashes) == 0:
return []
all_blocks: Dict[bytes32, BlockRecord] = {}
if self.db_wrapper.db_version == 2:
async with self.db.execute(
"SELECT header_hash,block_record FROM full_blocks "
f'WHERE header_hash in ({"?," * (len(header_hashes) - 1)}?)',
tuple(header_hashes),
) as cursor:
for row in await cursor.fetchall():
header_hash = bytes32(row[0])
all_blocks[header_hash] = BlockRecord.from_bytes(row[1])
else:
formatted_str = f'SELECT block from block_records WHERE header_hash in ({"?," * (len(header_hashes) - 1)}?)'
async with self.db.execute(formatted_str, tuple([hh.hex() for hh in header_hashes])) as cursor:
for row in await cursor.fetchall():
block_rec: BlockRecord = BlockRecord.from_bytes(row[0])
all_blocks[block_rec.header_hash] = block_rec
ret: List[BlockRecord] = []
for hh in header_hashes:
if hh not in all_blocks:
raise ValueError(f"Header hash {hh} not in the blockchain")
ret.append(all_blocks[hh])
return ret
async def get_blocks_by_hash(self, header_hashes: List[bytes32]) -> List[FullBlock]:
"""
Returns a list of Full Blocks blocks, ordered by the same order in which header_hashes are passed in.
Throws an exception if the blocks are not present
"""
if len(header_hashes) == 0:
return []
header_hashes_db: Tuple[Any, ...]
if self.db_wrapper.db_version == 2:
header_hashes_db = tuple(header_hashes)
else:
header_hashes_db = tuple([hh.hex() for hh in header_hashes])
formatted_str = (
f'SELECT header_hash, block from full_blocks WHERE header_hash in ({"?," * (len(header_hashes_db) - 1)}?)'
)
all_blocks: Dict[bytes32, FullBlock] = {}
async with self.db.execute(formatted_str, header_hashes_db) as cursor:
for row in await cursor.fetchall():
header_hash = self.maybe_from_hex(row[0])
full_block: FullBlock = self.maybe_decompress(row[1])
# TODO: address hint error and remove ignore
# error: Invalid index type "bytes" for "Dict[bytes32, FullBlock]";
# expected type "bytes32" [index]
all_blocks[header_hash] = full_block # type: ignore[index]
self.block_cache.put(header_hash, full_block)
ret: List[FullBlock] = []
for hh in header_hashes:
if hh not in all_blocks:
raise ValueError(f"Header hash {hh} not in the blockchain")
ret.append(all_blocks[hh])
return ret
async def get_block_record(self, header_hash: bytes32) -> Optional[BlockRecord]:
if self.db_wrapper.db_version == 2:
async with self.db.execute(
"SELECT block_record FROM full_blocks WHERE header_hash=?",
(header_hash,),
) as cursor:
row = await cursor.fetchone()
if row is not None:
return BlockRecord.from_bytes(row[0])
else:
async with self.db.execute(
"SELECT block from block_records WHERE header_hash=?",
(header_hash.hex(),),
) as cursor:
row = await cursor.fetchone()
if row is not None:
return BlockRecord.from_bytes(row[0])
return None
async def get_block_records_in_range(
self,
start: int,
stop: int,
) -> Dict[bytes32, BlockRecord]:
"""
Returns a dictionary with all blocks in range between start and stop
if present.
"""
ret: Dict[bytes32, BlockRecord] = {}
if self.db_wrapper.db_version == 2:
async with self.db.execute(
"SELECT header_hash, block_record FROM full_blocks WHERE height >= ? AND height <= ?",
(start, stop),
) as cursor:
for row in await cursor.fetchall():
header_hash = bytes32(row[0])
ret[header_hash] = BlockRecord.from_bytes(row[1])
else:
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {start} and height <= {stop}"
async with await self.db.execute(formatted_str) as cursor:
for row in await cursor.fetchall():
header_hash = bytes32(self.maybe_from_hex(row[0]))
ret[header_hash] = BlockRecord.from_bytes(row[1])
return ret
async def get_peak(self) -> Optional[Tuple[bytes32, uint32]]:
if self.db_wrapper.db_version == 2:
async with self.db.execute("SELECT hash FROM current_peak WHERE key = 0") as cursor:
peak_row = await cursor.fetchone()
if peak_row is None:
return None
async with self.db.execute("SELECT height FROM full_blocks WHERE header_hash=?", (peak_row[0],)) as cursor:
peak_height = await cursor.fetchone()
if peak_height is None:
return None
return bytes32(peak_row[0]), uint32(peak_height[0])
else:
async with self.db.execute("SELECT header_hash, height from block_records WHERE is_peak = 1") as cursor:
peak_row = await cursor.fetchone()
if peak_row is None:
return None
return bytes32(bytes.fromhex(peak_row[0])), uint32(peak_row[1])
async def get_block_records_close_to_peak(
self, blocks_n: int
) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]:
"""
Returns a dictionary with all blocks that have height >= peak height - blocks_n, as well as the
peak header hash.
"""
peak = await self.get_peak()
if peak is None:
return {}, None
ret: Dict[bytes32, BlockRecord] = {}
if self.db_wrapper.db_version == 2:
async with self.db.execute(
"SELECT header_hash, block_record FROM full_blocks WHERE height >= ?",
(peak[1] - blocks_n,),
) as cursor:
for row in await cursor.fetchall():
header_hash = bytes32(row[0])
ret[header_hash] = BlockRecord.from_bytes(row[1])
else:
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {peak[1] - blocks_n}"
async with self.db.execute(formatted_str) as cursor:
for row in await cursor.fetchall():
header_hash = bytes32(self.maybe_from_hex(row[0]))
ret[header_hash] = BlockRecord.from_bytes(row[1])
return ret, peak[0]
async def set_peak(self, header_hash: bytes32) -> None:
# We need to be in a sqlite transaction here.
# Note: we do not commit this to the database yet, as we need to also change the coin store
if self.db_wrapper.db_version == 2:
# Note: we use the key field as 0 just to ensure all inserts replace the existing row
await self.db.execute("INSERT OR REPLACE INTO current_peak VALUES(?, ?)", (0, header_hash))
else:
await self.db.execute("UPDATE block_records SET is_peak=0 WHERE is_peak=1")
await self.db.execute(
"UPDATE block_records SET is_peak=1 WHERE header_hash=?",
(self.maybe_to_hex(header_hash),),
)
async def is_fully_compactified(self, header_hash: bytes32) -> Optional[bool]:
async with self.db.execute(
"SELECT is_fully_compactified from full_blocks WHERE header_hash=?", (self.maybe_to_hex(header_hash),)
) as cursor:
row = await cursor.fetchone()
if row is None:
return None
return bool(row[0])
async def get_random_not_compactified(self, number: int) -> List[int]:
if self.db_wrapper.db_version == 2:
async with self.db.execute(
f"SELECT height FROM full_blocks WHERE in_main_chain=1 AND is_fully_compactified=0 "
f"ORDER BY RANDOM() LIMIT {number}"
) as cursor:
rows = await cursor.fetchall()
else:
# Since orphan blocks do not get compactified, we need to check whether all blocks with a
# certain height are not compact. And if we do have compact orphan blocks, then all that
# happens is that the occasional chain block stays uncompact - not ideal, but harmless.
async with self.db.execute(
f"SELECT height FROM full_blocks GROUP BY height HAVING sum(is_fully_compactified)=0 "
f"ORDER BY RANDOM() LIMIT {number}"
) as cursor:
rows = await cursor.fetchall()
heights = [int(row[0]) for row in rows]
return heights
|
the-stack_0_17242 | import numpy as np
import sys, os
import yaml
import trimesh
parent_dir = os.path.dirname(os.getcwd())
pykin_path = parent_dir + "/../../"
sys.path.append(pykin_path)
from pykin.robots.single_arm import SingleArm
from pykin.planners.rrt_star_planner import RRTStarPlanner
from pykin.collision.collision_manager import CollisionManager
from pykin.kinematics.transform import Transform
from pykin.utils.object_utils import ObjectManager
from pykin.utils import plot_utils as plt
fig, ax = plt.init_3d_figure(figsize=(10,6), dpi= 100)
file_path = '../../../asset/urdf/sawyer/sawyer.urdf'
mesh_path = pykin_path+"/asset/urdf/sawyer/"
yaml_path = '../../../asset/config/sawyer_init_params.yaml'
with open(yaml_path) as f:
controller_config = yaml.safe_load(f)
robot = SingleArm(file_path, Transform(rot=[0.0, 0.0, 0], pos=[0, 0, 0]))
robot.setup_link_name("sawyer_base", "sawyer_right_hand")
##################################################################
init_qpos = controller_config["init_qpos"]
init_fk = robot.forward_kin(np.concatenate((np.zeros(1), init_qpos)))
init_eef_pose = robot.get_eef_pose(init_fk)
goal_eef_pose = controller_config["goal_pose"]
c_manager = CollisionManager(mesh_path)
c_manager.setup_robot_collision(robot, init_fk)
milk_path = pykin_path+"/asset/objects/meshes/milk.stl"
milk_mesh = trimesh.load_mesh(milk_path)
obs = ObjectManager()
o_manager = CollisionManager(milk_path)
for i in range(9):
name = "miik_" + str(i)
if i < 3:
obs_pos = [0.3, -0.5 + i * 0.5, 0.3]
elif 3 <= i < 6:
obs_pos = [0.3, -0.5 + (i-3) * 0.5, 0.9]
else:
obs_pos = [0.3, -0.5 + (i-6) * 0.5, -0.3]
o_manager.add_object(name, gtype="mesh", gparam=milk_mesh, transform=Transform(pos=obs_pos).h_mat)
obs(name=name, gtype="mesh", gparam=milk_mesh, transform=Transform(pos=obs_pos).h_mat)
##################################################################
planner = RRTStarPlanner(
robot=robot,
self_collision_manager=c_manager,
object_collision_manager=o_manager,
delta_distance=0.1,
epsilon=0.2,
max_iter=300,
gamma_RRT_star=0.2,
dimension=7,
n_step=1
)
joint_path,_ = planner.get_path_in_joinst_space(cur_q=init_qpos, goal_pose=goal_eef_pose)
if joint_path is None :
print("Cannot Visulization Path")
exit()
joint_trajectory = []
eef_poses = []
for step, joint in enumerate(joint_path):
transformations = robot.forward_kin(np.concatenate((np.zeros(1),joint)))
joint_trajectory.append(transformations)
eef_poses.append(transformations[robot.eef_name].pos)
plt.plot_animation(
robot,
joint_trajectory,
fig,
ax,
eef_poses=eef_poses,
objects=obs,
visible_objects=True,
visible_collision=True,
interval=1,
repeat=True) |
the-stack_0_17243 | #!/usr/bin/env python
from __future__ import unicode_literals
from mptt import VERSION
requires = ()
try:
from setuptools import setup
kwargs = {str('install_requires'): requires}
except ImportError:
from distutils.core import setup
kwargs = {str('requires'): requires}
# Dynamically calculate the version based on mptt.VERSION
version_tuple = VERSION
version = ".".join([str(v) for v in version_tuple])
# on py3, all these are text strings
# on py2, they're all byte strings.
# ... and that's how setuptools likes it.
setup(
name=str('django-mptt'),
description=str('''Utilities for implementing Modified Preorder Tree Traversal
with your Django Models and working with trees of Model instances.'''),
version=version,
author=str('Craig de Stigter'),
author_email=str('[email protected]'),
url=str('http://github.com/django-mptt/django-mptt'),
packages=[str('mptt'), str('mptt.templatetags')],
package_data={str('mptt'): [str('templates/admin/*'), str('locale/*/*/*.*')]},
classifiers=[
str('Development Status :: 4 - Beta'),
str('Environment :: Web Environment'),
str('Framework :: Django'),
str('Intended Audience :: Developers'),
str('License :: OSI Approved :: MIT License'),
str('Operating System :: OS Independent'),
str('Programming Language :: Python'),
str('Programming Language :: Python :: 2'),
str('Programming Language :: Python :: 3'),
str('Topic :: Utilities'),
],
**kwargs
)
|
the-stack_0_17246 | import matplotlib
matplotlib.use('Agg')
#matplotlib.use("gtk")
#matplotlib.use('Qt5Agg')
from table_functions import *
import pickle
import os
import pandas as pd
import matplotlib.pyplot as plt
import sys
#sys.path.insert()
# print(data)
import numpy as np
import os
from scipy import stats
from matplotlib.pyplot import figure
import glob
import numpy as np
from hist_functions import *
import scipy.stats
from pathlib import Path
# ipdb.set_trace()
import ipdb
from scatter_plot_functions import *
from rectify_vars_and_wald_functions import *
from checkpickle_EFFECT_new import parse_dir
SMALL_SIZE = 13
MEDIUM_SIZE = 10
BIGGER_SIZE = 14
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=8.5) # fontsize of the tick labels
plt.rc('ytick', labelsize=10) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
PROP_EPS_KEY = "prop_exploring_ppd_cuml"
def plot_hist_and_table(df_for_num_steps_eg0pt1, df_for_num_steps_eg0pt3, df_for_num_steps_ts, df_for_num_steps_unif, num_steps, epsilon, n):
fig_h, ax_h = plt.subplots()
proportions_unif = df_for_num_steps_unif['sample_size_1'] / num_steps
proportions_eg0pt1 = df_for_num_steps_eg0pt1['sample_size_1'] / num_steps
proportions_eg0pt3 = df_for_num_steps_eg0pt3['sample_size_1'] / num_steps
proportions_ts = df_for_num_steps_ts['sample_size_1'] / num_steps
ax_h.hist(proportions_eg0pt1, alpha = 0.5, label = "Epsilon Greedy 0.1")
ax_h.hist(proportions_eg0pt3, alpha = 0.5, label = "Epsilon Greedy 0.3")
ax_h.hist(proportions_unif, alpha = 0.5, label = "Uniform Random")
ax_h.hist(proportions_ts, alpha = 0.5, label = "Thompson Sampling")
ax_h.legend()
fig_h.suptitle("Histogram of Proportion of {} Participants Assigned to Condition 1 Across 500 Simulations".format(num_steps))
# rows = ["Areferg"]
# columns = ["Berger"]
# cell_text = ["ergerg"]
# the_table = ax_h.table(cellText=cell_text,
# rowLabels=rows,
# colLabels=columns,
# loc='right')
# fig_h.subplots_adjust(left=0.2, wspace=0.4)
data = np.random.uniform(0, 1, 80).reshape(20, 4)
mean_ts = np.mean(proportions_ts)
var_ts = np.var(proportions_ts)
mean_eg0pt1 = np.mean(proportions_eg0pt1)
mean_eg0pt3 = np.mean(proportions_eg0pt3)
var_eg0pt1 = np.var(proportions_eg0pt1)
var_eg0pt3 = np.var(proportions_eg0pt3)
prop_lt_25_eg0pt1 = np.sum(proportions_eg0pt1 < 0.25) / len(proportions_eg0pt1)
prop_lt_25_eg0pt3 = np.sum(proportions_eg0pt3 < 0.25) / len(proportions_eg0pt3)
prop_lt_25_ts = np.sum(proportions_ts < 0.25) / len(proportions_ts)
# prop_gt_25_lt_5_eg = np.sum(> proportions > 0.25) / len(proportions)
# prop_gt_25_lt_5_ts = np.sum(> proportions_ts > 0.25) / len(proportions_ts)
data = [[mean_ts, var_ts, prop_lt_25_ts],\
[mean_eg0pt1, var_eg0pt1, prop_lt_25_eg0pt1],\
[mean_eg0pt3, var_eg0pt3, prop_lt_25_eg0pt3]]
final_data = [['%.3f' % j for j in i] for i in data] #<0.25, 0.25< & <0.5, <0.5 & <0.75, <0.75 & <1.0
#table.auto_set_font_size(False)
# table.set_fontsize(7)
# table.auto_set_column_width((-1, 0, 1, 2, 3))
table = ax_h.table(cellText=final_data, colLabels=['Mean', 'Variance', 'prop < 0.25'], rowLabels = ["Thompson Sampling", "Epsilon Greedy 0.1", "Epsilon Greedy 0.3"], loc='bottom', cellLoc='center', bbox=[0.25, -0.5, 0.5, 0.3])
table.auto_set_font_size(False)
table.set_fontsize(7)
table.auto_set_column_width((-1, 0, 1, 2, 3))
# Adjust layout to make room for the table:
#ax_h.tick_params(axis='x', pad=20)
#fig_h.subplots_adjust(left=0.2, bottom=0.5)
#fig_h.tight_layout()
save_dir = "../simulation_analysis_saves/histograms/ExploreAndExploit/N={}".format(n)
Path(save_dir).mkdir(parents=True, exist_ok=True)
fig_h.savefig(save_dir + "/condition_prop_n={}.png".format(num_steps), bbox_inches = 'tight')
fig_h.clf()
def stacked_bar_plot_with_cutoff(df_ts = None, df_eg0pt1 = None, df_eg0pt3 = None, df_unif = None, df_tsppd = None, n = None, num_sims = None, df_ets = None, \
title = None, bs_prop = 0.0,\
ax = None, ax_idx = None, epsilon = None, es=None):
step_sizes = df_unif['num_steps'].unique()
size_vars = ["n/2", "n", "2*n", "4*n"]
t1_list_eg0pt1 = []
t1_list_eg0pt3 = []
t1_list_unif = []
t1_wald_list_unif = [] #IS REWARD NOT T1 TODO CHnage VAR NAMES
var_list = []
t1_list_ts = []
t1_list_tsppd = []
t1_list_ets = []
for num_steps in step_sizes:
df_for_num_steps_eg0pt1 = df_eg0pt1[df_eg0pt1['num_steps'] == num_steps].dropna()
df_for_num_steps_eg0pt3 = df_eg0pt3[df_eg0pt3['num_steps'] == num_steps].dropna()
df_for_num_steps_unif = df_unif[df_unif['num_steps'] == num_steps].dropna()
df_for_num_steps_ts = df_ts[df_ts['num_steps'] == num_steps].dropna()
df_for_num_steps_tsppd = df_tsppd[df_tsppd['num_steps'] == num_steps].dropna()
df_for_num_steps_ets = df_ets[df_ets['num_steps'] == num_steps].dropna()
#df_for_num_steps_unif = df_for_num_steps_unif.dropna()
# bins = np.arange(0, 1.01, .025)
unif_reward_mean = (df_for_num_steps_unif['total_reward']/num_steps).mean()
ts_reward_mean = (df_for_num_steps_ts['total_reward']/num_steps).mean()
eps_greedy_reward_mean_0pt1 = (df_for_num_steps_eg0pt1['total_reward']/num_steps).mean()
eps_greedy_reward_mean_0pt3 = (df_for_num_steps_eg0pt3['total_reward']/num_steps).mean()
tsppd_reward_mean = (df_for_num_steps_tsppd['total_reward']/num_steps).mean()
ets_reward_mean = (df_for_num_steps_ets['total_reward']/num_steps).mean()
t1_list_unif.append(unif_reward_mean)
t1_list_ts.append(ts_reward_mean)
t1_list_eg0pt1.append(eps_greedy_reward_mean_0pt1)
t1_list_eg0pt3.append(eps_greedy_reward_mean_0pt3)
t1_list_tsppd.append(tsppd_reward_mean)
t1_list_ets.append(ets_reward_mean)
t1_list_ts = np.array(t1_list_ts)
t1_list_tsppd = np.array(t1_list_tsppd)
t1_list_ets = np.array(t1_list_ets)
ind = np.arange(3*len(step_sizes), step=3)
# print(ind)
# print(step_sizes)
ax.set_xticks(ind)
ax.set_xticklabels(step_sizes)
print("var", var_list)
width = 0.44
capsize = width*4
width_total = 2*width
t1_list_eg0pt1 = np.array(t1_list_eg0pt1)
t1_list_eg0pt3 = np.array(t1_list_eg0pt3)
t1_list_unif = np.array(t1_list_unif)
num_sims_RL4RL = 5000
t1_eg0pt1_se = stats.t.ppf(1-0.025, num_sims)*np.sqrt(t1_list_eg0pt1*(1-t1_list_eg0pt1)/num_sims_RL4RL) #95 CI for Proportion
t1_eg0pt3_se = stats.t.ppf(1-0.025, num_sims)*np.sqrt(t1_list_eg0pt3*(1-t1_list_eg0pt3)/num_sims_RL4RL) #95 CI for Proportion
t1_se_unif = stats.t.ppf(1-0.025, num_sims)*np.sqrt(t1_list_unif*(1-t1_list_unif)/num_sims_RL4RL)
t1_se_ts = stats.t.ppf(1-0.025, num_sims)*np.sqrt(t1_list_ts*(1-t1_list_ts)/num_sims_RL4RL)
num_sims_ppd = num_sims
t1_se_tsppd = stats.t.ppf(1-0.025, num_sims_ppd)*np.sqrt(t1_list_tsppd*(1-t1_list_tsppd)/num_sims_ppd)
num_sims_ets = num_sims
t1_se_ets = stats.t.ppf(1-0.025, num_sims_ppd)*np.sqrt(t1_list_ets*(1-t1_list_ets)/num_sims_ets)
print(t1_se_unif) #note that power goes to 1.0 for unif, thus error bars
#print(t1_se_unif)
p1 = ax.bar(ind, t1_list_eg0pt1, width = width, yerr = t1_eg0pt1_se, \
ecolor='black', capsize=capsize, color = 'yellow', edgecolor='black')
p3 = ax.bar(ind+width, t1_list_eg0pt3, width = width, yerr = t1_eg0pt3_se, \
ecolor='black', capsize=capsize, color = 'green', edgecolor='black')
p4 = ax.bar(ind+2*width, t1_list_ts, width = width, yerr = t1_se_ts,
ecolor='black', capsize=capsize, color = 'blue', edgecolor='black')
p5 = ax.bar(ind+3*width, t1_list_tsppd, width = width, yerr = t1_se_tsppd,
ecolor='black', capsize=capsize, color = 'purple', edgecolor='black')
p6 = ax.bar(ind+4*width, t1_list_ets, width = width, yerr = t1_se_ets,
ecolor='black', capsize=capsize, color = 'brown', edgecolor='black')
p2 = ax.bar(ind-width, t1_list_unif, width = width,\
yerr = t1_se_unif, ecolor='black', \
capsize=capsize, color = 'red', \
edgecolor='black')
if ax_idx == 2:
# leg1 = ax.legend((p1[0], p2[0], p3[0], p4[0]), ('Epsilon Greedy Chi Squared 0.1', "Uniform Chi Squared", "Epsilon Greedy Chi Squared 0.3", "Thompson Sampling Chi Squared"), bbox_to_anchor=(1.0, 1.76))
leg1 = ax.legend((p2[0], p1[0], p3[0], p4[0], p5[0], p6[0]), ("Uniform Wald", 'Epsilon Greedy 0.1 Wald', "Epsilon Greedy 0.3 Wald", "Thompson Sampling Wald", "PPD c 0.1 Thompson Sampling Wald", "Epsilon 0.1 Thompson Sampling Wald"), bbox_to_anchor=(1.0, 1.76))
#leg1 = ax.legend((p2[0], p1[0], p3[0], p4[0]), ("Uniform Chi Squared", 'Epsilon Greedy Chi Squared 0.1', "Epsilon Greedy Chi Squared 0.3", "Thompson Sampling Chi Squared"), bbox_to_anchor=(1.0, 1.76))
#leg2 = ax.legend(loc = 2)
ax.add_artist(leg1)
# plt.tight_layout()
# plt.title(title)
# if ax_idx == 6 or ax_idx == 7 or ax_idx == 8:
ax.set_xlabel("number of participants = \n n/2, n, 2*n, 4*n")
ax.set_ylim(0.40, 0.8)
x = es / 2
optimal_arm = 0.5 + x
ax.axhline(y=optimal_arm, linestyle='--')
return [t1_list_unif, t1_list_eg0pt1, t1_list_ts] #returns [UR Eps_Greedy, TS], in this case, need to return for each step size, but only plotting for one bs, so save step size by model (4x2)
def parse_dir_old(root, root_cutoffs, num_sims):
arm_prob= 0.5
arm_prob_list = [0.2, 0.5, 0.8]
es_list = [0.5, 0.3, 0.1]
n_list = [32, 88, 785]
epsilon = 0.1
#EpsilonGreedyIsEffect/num_sims=5armProb=0.5/es=0.3epsilon=0.1/
root_dir = root + "/num_sims={}armProb={}".format(num_sims, arm_prob)
fig, ax = plt.subplots(1,3, figsize = (12,5))
#fig.set_size_inches(17.5, 13.5)
ax = ax.ravel()
i = 0
# ipdb.set_trace()
c = 0.1
num_sims_secb = 5000
root_ts = "../../../RL4RLSectionB/simulation_saves/IsEffect_fixedbs_RL4RLMay8/num_sims={}armProb=0.5".format(num_sims_secb)
root_eg = "../../../RL4RLSectionB/simulation_saves/EpsilonGreedyIsEffect/num_sims={}armProb=0.5".format(num_sims_secb)
root_unif = "../../../RL4RLSectionB/simulation_saves/UniformIsEffect/num_sims={}armProb=0.5".format(num_sims_secb)
root_ets = "../simulation_saves/EpsilonTSIsEffect/num_sims={}armProb=0.5".format(5000)
for n in n_list:
es = es_list[i]
bs = 1
es_dir_0pt1 = root_eg + "/es={}epsilon={}/".format(es, 0.1)
es_dir_0pt3 = root_eg + "/es={}epsilon={}/".format(es, 0.3)
ts_dir = root_ts + "/es={}/".format(es)
tsppd_dir = root_dir + "/es={}c={}/".format(es, c)
ets_dir = root_ets + "/es={}epsilon={}/".format(es, 0.1)
unif_dir = root_unif + "/es={}/".format(es)
to_check_eg0pt1 = glob.glob(es_dir_0pt1 + "/*Prior*{}*{}Df.pkl".format(bs,es))[0] #Has eg, 34 in 348!!
assert(len(glob.glob(es_dir_0pt1 + "/*Prior*{}*{}Df.pkl".format(bs,es))) == 1)
to_check_eg0pt3 = glob.glob(es_dir_0pt3 + "/*Prior*{}*{}Df.pkl".format(bs,es))[0] #Has eg, 34 in 348!!
assert(len(glob.glob(es_dir_0pt3 + "/*Prior*{}*{}Df.pkl".format(bs,es))) == 1)
to_check_unif = glob.glob(unif_dir + "/*Uniform*{}*{}Df.pkl".format(bs, es))[0]
assert(len(glob.glob(unif_dir + "/*Uniform*{}*{}Df.pkl".format(bs, es))) == 1)
to_check_ts = glob.glob(ts_dir + "/*Prior*{}*{}Df.pkl".format(bs,es))[0] #Has eg, 34 in 348!!
assert(len(glob.glob(ts_dir + "/*Prior*{}*{}Df.pkl".format(bs,es))) == 1)
to_check_tsppd = glob.glob(tsppd_dir + "/*Prior*{}*{}Df.pkl".format(bs,es))[0] #Has eg, 34 in 348!!
assert(len(glob.glob(tsppd_dir + "/*Prior*{}*{}Df.pkl".format(bs,es))) == 1)
to_check_ets = glob.glob(ets_dir + "/*Prior*{}*{}Df.pkl".format(bs,es))[0] #Has eg, 34 in 348!!
assert(len(glob.glob(ets_dir + "/*Prior*{}*{}Df.pkl".format(bs,es))) == 1)
#------hists, tables etc
with open(to_check_eg0pt1, 'rb') as f:
df_eg0pt1 = pickle.load(f)
with open(to_check_eg0pt3, 'rb') as f:
df_eg0pt3 = pickle.load(f)
with open(to_check_unif, 'rb') as f:
df_unif = pickle.load(f)
if to_check_ts != None:
with open(to_check_ts, 'rb') as t:
df_ts = pickle.load(t)
with open(to_check_tsppd, 'rb') as f:
df_tsppd = pickle.load(f)
with open(to_check_ets, 'rb') as f:
df_ets = pickle.load(f)
# ipdb.set_trace()
rect_key = "TS"
rect_key = "Drop NA"
rectify_vars_noNa(df_eg0pt1, alg_key = rect_key)
rectify_vars_noNa(df_eg0pt3, alg_key = rect_key)
rectify_vars_noNa(df_ts, alg_key = rect_key)
rectify_vars_noNa(df_unif, alg_key = rect_key)
assert np.sum(df_eg0pt1["wald_type_stat"].isna()) == 0
assert np.sum(df_eg0pt1["wald_pval"].isna()) == 0
next_df = stacked_bar_plot_with_cutoff(df_eg0pt1 = df_eg0pt1, df_eg0pt3 = df_eg0pt3,\
df_unif = df_unif, df_ts = df_ts, df_tsppd = df_tsppd, df_ets = df_ets,\
n = n, es=es,num_sims = num_sims,
ax = ax[i], ax_idx = i, epsilon = epsilon)
#
ax[i].set_title("es = {}, n = {}".format(es, n))
ax[i].set_ylabel("Reward")
i += 1
df = pd.DataFrame(next_df, columns = ["n/2","n","2n","4n"])
df.index = ["Uniform Random Chi Squared","Epsilon Greedy Chi Squared", "Thompson Sampling Chi Squared"]
save_dir = "../simulation_analysis_saves/histograms/ExploreAndExploit/N={}".format(n)
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_dir + "/Reward={}_numsims={}.csv".format(n, num_sims))
title = "Average Reward \n Across {} Simulations".format(num_sims)
#ax[i].set_title(title, fontsize = 55)
#i +=1
#fig.suptitle("Type One Error Rates Across {} Simulations".format(num_sims))
fig.suptitle(title)
#fig.tight_layout(rect=[0, 0.03, 1, 0.95])
#handles, labels = ax[i-1].get_legend_handles_labels()
#fig.legend(handles, labels, loc='upper right', prop={'size': 50})
#fig.tight_layout()
save_dir = "../simulation_analysis_saves/power_t1_plots"
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
# print("saving to ", "plots/{}.png".format(title))
#fig.set_tight_layout(True)
fig.tight_layout()
fig.subplots_adjust(top=.8)
fig.savefig(save_dir + "/{}.svg".format(title), bbox_inches = 'tight')
# plt.show()
fig.clf()
plt.close(fig)
if __name__ == "__main__":
root = "../simulation_saves/TSPPDIsEffect"
#parse_dir(root, root_cutoffs)
num_sims = 500
num_sims = 5000
num_sims = 10000
title = "Mean Reward \n Averaged Across {} Simulations".format(num_sims)
# parse_dir_old(root, root, num_sims)
parse_dir(root, root, num_sims, title = title, metric = "Reward", ylabel = "Reward", ymax = 0.80, num_es = 3)
title = "Proportion of Optimal Allocations \n Averaged Across {} Simulations".format(num_sims)
parse_dir(root, root, num_sims, title = title, metric = "PropOpt", ylabel = "Proportion Optimal Allocation", ymax = 1.0, num_es = 3)
parse_dir(root, root, num_sims, title = title, metric = "PropEps", ylabel = "Proportion Eps", ymax = 1.0, num_es = 3)
|
the-stack_0_17247 | from warnings import simplefilter
import numpy as np
from sklearn import model_selection
import wandb
from wandb.sklearn import utils
# ignore all future warnings
simplefilter(action="ignore", category=FutureWarning)
def learning_curve(
model,
X,
y,
cv=None,
shuffle=False,
random_state=None,
train_sizes=None,
n_jobs=1,
scoring=None,
):
"""Trains model on datasets of varying size and generates plot of score vs size.
Called by plot_learning_curve to visualize learning curve. Please use the function
plot_learning_curve() if you wish to visualize your learning curves.
"""
train_sizes, train_scores, test_scores = model_selection.learning_curve(
model,
X,
y,
cv=cv,
n_jobs=n_jobs,
train_sizes=train_sizes,
scoring=scoring,
shuffle=shuffle,
random_state=random_state,
)
train_scores_mean = np.mean(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
table = make_table(train_scores_mean, test_scores_mean, train_sizes)
chart = wandb.visualize("wandb/learning_curve/v1", table)
return chart
def make_table(train, test, train_sizes):
data = []
for i in range(len(train)):
if utils.check_against_limit(
i,
"learning_curve",
utils.chart_limit / 2,
):
break
train_set = ["train", utils.round_2(train[i]), train_sizes[i]]
test_set = ["test", utils.round_2(test[i]), train_sizes[i]]
data.append(train_set)
data.append(test_set)
table = wandb.Table(columns=["dataset", "score", "train_size"], data=data)
return table
|
the-stack_0_17248 | import time
import math
import numpy as np
import tensorflow as tf
import ops
from config import config
from mac_cell import MACCell
'''
The MAC network model. It performs reasoning processes to answer a question over
knowledge base (the image) by decomposing it into attention-based computational steps,
each perform by a recurrent MAC cell.
The network has three main components.
Input unit: processes the network inputs: raw question strings and image into
distributional representations.
The MAC network: calls the MACcells (mac_cell.py) config.netLength number of times,
to perform the reasoning process over the question and image.
The output unit: a classifier that receives the question and final state of the MAC
network and uses them to compute log-likelihood over the possible one-word answers.
'''
class MACnet(object):
'''Initialize the class.
Args:
embeddingsInit: initialization for word embeddings (random / glove).
answerDict: answers dictionary (mapping between integer id and symbol).
'''
def __init__(self, embeddingsInit, answerDict, questionDict, nextElement = None):
self.input = nextElement
self.embeddingsInit = embeddingsInit
self.answerDict = answerDict
self.questionDict = questionDict
self.build()
'''
Initializes placeholders.
questionIndicesAll: integer ids of question words.
[batchSize, questionLength]
questionLengthsAll: length of each question.
[batchSize]
imagesPlaceholder: image features.
[batchSize, channels, height, width]
(converted internally to [batchSize, height, width, channels])
answerIndicesAll: integer ids of answer words.
[batchSize]
lr: learning rate (tensor scalar)
train: train / evaluation (tensor boolean)
dropout values dictionary (tensor scalars)
'''
# change to H x W x C?
def addPlaceholders(self):
with tf.variable_scope("Placeholders"):
## data
# questions
self.questionIndicesAll = tf.placeholder(tf.int32, shape = (None, None))
self.questionLengthsAll = tf.placeholder(tf.int32, shape = (None, ))
# images
# put image known dimension as last dim?
if config.imageObjects:
self.imagesAll = tf.placeholder(tf.float32, shape = (None, None, None))
self.imagesObjectNumAll = tf.placeholder(tf.int32, shape = (None, ))
else:
self.imagesPlaceholder = tf.placeholder(tf.float32, shape = (None, None, None, None))
self.imagesAll = tf.transpose(self.imagesPlaceholder, (0, 2, 3, 1))
# answers
self.answerIndicesAll = tf.placeholder(tf.int32, shape = (None, ))
if config.dataset == "VQA":
self.answerFreqListsAll = tf.placeholder(tf.int32, shape = (None, None))
self.answerFreqNumsAll = tf.placeholder(tf.int32, shape = (None, ))
if config.ansFormat == "mc":
self.choicesIndicesAll = tf.placeholder(tf.int32, shape = (None, None))
self.choicesNumsAll = tf.placeholder(tf.int32, shape = (None, ))
# in general could consolidate that with mc and make it more general if i did choicesIndices all of them
# in case of open ended
## optimization
self.lr = tf.placeholder(tf.float32, shape = ())
self.train = tf.placeholder(tf.bool, shape = ())
self.batchSizeAll = tf.shape(self.questionIndicesAll)[0]
## dropouts
# TODO: change dropouts to be 1 - current
self.dropouts = {
"encInput": tf.placeholder(tf.float32, shape = ()),
"encState": tf.placeholder(tf.float32, shape = ()),
"stem": tf.placeholder(tf.float32, shape = ()),
"question": tf.placeholder(tf.float32, shape = ()),
"read": tf.placeholder(tf.float32, shape = ()),
"write": tf.placeholder(tf.float32, shape = ()),
"memory": tf.placeholder(tf.float32, shape = ()),
"output": tf.placeholder(tf.float32, shape = ()),
"controlPre": tf.placeholder(tf.float32, shape = ()),
"controlPost": tf.placeholder(tf.float32, shape = ()),
"wordEmb": tf.placeholder(tf.float32, shape = ()),
"word": tf.placeholder(tf.float32, shape = ()),
"vocab": tf.placeholder(tf.float32, shape = ()),
"object": tf.placeholder(tf.float32, shape = ()),
"wordStandard": tf.placeholder(tf.float32, shape = ())
}
# batch norm params
self.batchNorm = {"decay": config.bnDecay, "train": self.train}
self.imageInDim = config.imageDims[-1]
if not config.imageObjects:
self.H, self.W, self.imageInDim = 7, 7, 2048# config.imageDims
if config.dataset == "CLEVR":
self.H, self.W, self.imageInDim = 14, 14, 1024
# Feeds data into placeholders. See addPlaceholders method for further details.
def createFeedDict(self, data, images, train):
feedDict = {
self.questionIndicesAll: data["questions"],
self.questionLengthsAll: data["questionLengths"],
self.answerIndicesAll: data["answers"],
self.dropouts["encInput"]: config.encInputDropout if train else 1.0,
self.dropouts["encState"]: config.encStateDropout if train else 1.0,
self.dropouts["stem"]: config.stemDropout if train else 1.0,
self.dropouts["question"]: config.qDropout if train else 1.0, #_
self.dropouts["memory"]: config.memoryDropout if train else 1.0,
self.dropouts["read"]: config.readDropout if train else 1.0, #_
self.dropouts["write"]: config.writeDropout if train else 1.0,
self.dropouts["output"]: config.outputDropout if train else 1.0,
self.dropouts["controlPre"]: config.controlPreDropout if train else 1.0,
self.dropouts["controlPost"]: config.controlPostDropout if train else 1.0,
self.dropouts["wordEmb"]: config.wordEmbDropout if train else 1.0,
self.dropouts["word"]: config.wordDp if train else 1.0,
self.dropouts["vocab"]: config.vocabDp if train else 1.0,
self.dropouts["object"]: config.objectDp if train else 1.0,
self.dropouts["wordStandard"]: config.wordStandardDp if train else 1.0,
self.lr: config.lr,
self.train: train
}
if config.imageObjects:
feedDict.update({
self.imagesAll: images["images"],
self.imagesObjectNumAll: data["objectsNums"],
})
else:
feedDict.update({
self.imagesPlaceholder: images["images"]
})
if config.dataset == "VQA":
feedDict.update({
self.answerFreqListsAll: data["answerFreqs"],
self.answerFreqNumsAll: data["answerFreqNums"]
})
if config.ansFormat == "mc":
feedDict.update({
self.choicesIndicesAll: data["choices"],
self.choicesNumsAll: data["choicesNums"]
})
return feedDict
# Splits data to a specific GPU (tower) for parallelization
def initTowerBatch(self, towerI, towersNum, dataSize):
towerBatchSize = tf.floordiv(dataSize, towersNum)
start = towerI * towerBatchSize
end = (towerI + 1) * towerBatchSize if towerI < towersNum - 1 else dataSize
self.questionIndices = self.questionIndicesAll[start:end]
self.questionLengths = self.questionLengthsAll[start:end]
self.images = self.imagesAll[start:end]
self.imagesObjectNum = None
if config.imageObjects:
self.imagesObjectNum = self.imagesObjectNumAll[start:end]
self.answerIndices = self.answerIndicesAll[start:end]
self.answerFreqs = self.answerFreqNums = None
if config.dataset == "VQA":
self.answerFreqLists = self.answerFreqListsAll[start:end]
self.answerFreqNums = self.answerFreqNumsAll[start:end]
self.choicesIndices = self.choicesNums = None
if config.ansFormat == "mc":
self.choicesIndices = self.choicesIndicesAll[start:end]
self.choicesNums = self.choicesNumsAll[start:end]
self.batchSize = end - start
'''
The Image Input Unit (stem). Passes the image features through a CNN-network
Optionally adds position encoding (doesn't in the default behavior).
Flatten the image into Height * Width "Knowledge base" array.
Args:
images: image input. [batchSize, height, width, inDim]
inDim: input image dimension
outDim: image out dimension
addLoc: if not None, adds positional encoding to the image
Returns preprocessed images.
[batchSize, height * width, outDim]
'''
def stem(self, images, inDim, outDim, addLoc = None):
with tf.variable_scope("stem"):
if config.stemNormalize:
images = tf.nn.l2_normalize(images, dim = -1)
if config.imageObjects: # VQA ??? or config.useBaseline:
features, dim = images, inDim
if config.stemLinear:
features = ops.linear(images, inDim, outDim, dropout = self.dropouts["stem"])
dim = outDim
elif config.stemDeep:
dims = [inDim] + config.stemDims + [outDim]
features = ops.FCLayer(features, dims, dropout = self.dropouts["stem"])
if config.stemAct != "NON":
features = ops.actF(config.stemAct)(features)
return features, dim
if addLoc is None:
addLoc = config.locationAware
if config.stemLinear:
features = ops.linear(images, inDim, outDim)
else:
if config.stemNumLayers == 0:
outDim = inDim
else:
dims = [inDim] + ([config.stemDim] * (config.stemNumLayers - 1)) + [outDim]
if addLoc:
images, inDim = ops.addLocation(images, inDim, config.locationDim,
h = self.H, w = self.W, locType = config.locationType)
dims[0] = inDim
features = ops.CNNLayer(images, dims,
batchNorm = self.batchNorm if config.stemBN else None,
dropout = self.dropouts["stem"],
kernelSizes = config.stemKernelSizes,
strides = config.stemStrideSizes)
if config.stemGridRnn:
features = ops.multigridRNNLayer(features, H, W, outDim)
if config.baselineNew or (not config.useBaseline):
features = tf.reshape(features, (self.batchSize, -1, outDim))
return features, outDim
# Embed question using parametrized word embeddings.
# The embedding are initialized to the values supported to the class initialization
def qEmbeddingsOp(self, qIndices, embInit):
with tf.variable_scope("qEmbeddings"):
embInit = tf.to_float(embInit)
embeddingsVar = tf.get_variable("emb", initializer = embInit,
dtype = tf.float32, trainable = (not config.wrdEmbQFixed))
embeddings = tf.concat([tf.zeros((1, config.wrdQEmbDim)), embeddingsVar], axis = 0)
questions = tf.nn.embedding_lookup(embeddings, qIndices)
return questions, embeddings
# Embed answer words
def aEmbeddingsOp(self, aIndices, embInit):
with tf.variable_scope("aEmbeddings"):
if embInit is None:
return None
embInit = tf.to_float(embInit)
embeddings = tf.get_variable("emb", initializer = embInit,
dtype = tf.float32, trainable = (not config.wrdEmbAFixed))
if config.ansFormat == "mc":
answers = tf.nn.embedding_lookup(embeddings, aIndices)
else:
answers = embeddings
return answers
def vocabEmbeddings(self, embInit, name):
with tf.variable_scope("vocabEmbeddings" + name):
embInit = tf.to_float(embInit)
embeddings = tf.get_variable("emb", initializer = embInit,
dtype = tf.float32, trainable = (not config.semanticFixEmbs))
return embeddings
# Embed question and answer words with tied embeddings
def qaEmbeddingsOp(self, qIndices, aIndices, embInit):
questions, embeddings = self.qEmbeddingsOp(qIndices, embInit)
answers = tf.nn.embedding_lookup(embeddings, aIndices)
return questions, answers, embeddings
'''
Embed question (and optionally answer) using parametrized word embeddings.
The embedding are initialized to the values supported to the class initialization
'''
def embeddingsOp(self, qIndices, aIndices, embInit):
# nullWord = tf.tile(tf.expand_dims(nullWord, axis = 0), [self.batchSize, 1, 1])
if config.ansEmbMod == "SHARED":
if config.ansFormat == "oe":
#if aIndices is None:
aIndices = embInit["oeAnswers"]
questions, answers, qaEmbeddings = self.qaEmbeddingsOp(qIndices, aIndices, embInit["qa"])
else:
questions, qEmbeddings = self.qEmbeddingsOp(qIndices, embInit["q"])
answers = self.aEmbeddingsOp(aIndices, embInit["a"])
if config.ansFormat == "oe" and config.ansEmbMod != "NON":
answers = tf.tile(tf.expand_dims(answers, axis = 0), [self.batchSize, 1, 1])
return questions, answers # , embeddings
'''
The Question Input Unit embeds the questions to randomly-initialized word vectors,
and runs a recurrent bidirectional encoder (RNN/LSTM etc.) that gives back
vector representations for each question (the RNN final hidden state), and
representations for each of the question words (the RNN outputs for each word).
The method uses bidirectional LSTM, by default.
Optionally projects the outputs of the LSTM (with linear projection /
optionally with some activation).
Args:
questions: question word embeddings
[batchSize, questionLength, wordEmbDim]
questionLengths: the question lengths.
[batchSize]
projWords: True to apply projection on RNN outputs.
projQuestion: True to apply projection on final RNN state.
projDim: projection dimension in case projection is applied.
Returns:
Contextual Words: RNN outputs for the words.
[batchSize, questionLength, ctrlDim]
Vectorized Question: Final hidden state representing the whole question.
[batchSize, ctrlDim]
'''
def encoder(self, questions, questionLengths, projWords = False,
projQuestion = False, projDim = None):
with tf.variable_scope("encoder"):
# variational dropout option
varDp = None
if config.encVariationalDropout:
varDp = {"stateDp": self.dropouts["stateInput"],
"inputDp": self.dropouts["encInput"],
"inputSize": config.wrdQEmbDim}
# rnns
for i in range(config.encNumLayers):
questionCntxWords, vecQuestions = ops.RNNLayer(questions, questionLengths,
config.encDim, bi = config.encBi, cellType = config.encType,
dropout = self.dropouts["encInput"], varDp = varDp, name = "rnn%d" % i)
# dropout for the question vector
vecQuestions = tf.nn.dropout(vecQuestions, self.dropouts["question"])
# projection of encoder outputs
if projWords:
questionCntxWords = ops.linear(questionCntxWords, config.encDim, projDim,
name = "projCW")
if projQuestion:
vecQuestions = ops.linear(vecQuestions, config.encDim, projDim,
act = config.encProjQAct, name = "projQ")
return questionCntxWords, vecQuestions
'''
Stacked Attention Layer for baseline. Computes interaction between images
and the previous memory, and casts it back to compute attention over the
image, which in turn is summed up with the previous memory to result in the
new one.
Args:
images: input image.
[batchSize, H * W, inDim]
memory: previous memory value
[batchSize, inDim]
inDim: inputs dimension
hDim: hidden dimension to compute interactions between image and memory
Returns the new memory value.
'''
def baselineAttLayer(self, images, memory, inDim, hDim, name = "", reuse = None):
with tf.variable_scope("attLayer" + name, reuse = reuse):
# projImages = ops.linear(images, inDim, hDim, name = "projImage")
# projMemory = tf.expand_dims(ops.linear(memory, inDim, hDim, name = "projMemory"), axis = -2)
# if config.saMultiplicative:
# interactions = projImages * projMemory
# else:
# interactions = tf.tanh(projImages + projMemory)
interactions, hDim = ops.mul(images, memory, inDim, proj = {"dim": hDim, "shared": False},
interMod = config.baselineAttType)
attention = ops.inter2att(interactions, hDim, mask = self.imagesObjectNum)
summary = ops.att2Smry(attention, images)
newMemory = memory + summary
return newMemory
'''
Baseline approach:
If baselineAtt is True, applies several layers (baselineAttNumLayers)
of stacked attention to image and memory, when memory is initialized
to the vector questions. See baselineAttLayer for further details.
Otherwise, computes result output features based on image representation
(baselineCNN), or question (baselineLSTM) or both.
Args:
vecQuestions: question vector representation
[batchSize, questionDim]
questionDim: dimension of question vectors
images: (flattened) image representation
[batchSize, imageDim]
imageDim: dimension of image representations.
hDim: hidden dimension to compute interactions between image and memory
(for attention-based baseline).
Returns final features to use in later classifier.
[batchSize, outDim] (out dimension depends on baseline method)
'''
def baseline(self, vecQuestions, questionDim, images, imageDim, hDim):
with tf.variable_scope("baseline"):
if config.baselineAtt:
memory = ops.linear(vecQuestions, questionDim, hDim, name = "qProj")
images = ops.linear(images, imageDim, hDim, name = "iProj")
for i in range(config.baselineAttNumLayers):
memory = self.baselineAttLayer(images, memory, hDim, hDim,
name = "baseline%d" % i)
memDim = hDim
else:
if config.imageObjects:
cff = tf.get_variable("cff", shape = (imageDim, ), initializer = tf.random_normal_initializer())
interactions, hDim = ops.mul(images, cff, imageDim)
attention = ops.inter2att(interactions, hDim, mask = self.imagesObjectNum)
images = ops.att2Smry(attention, images)
else:
images, imageDim = ops.linearizeFeatures(images, self.H, self.W,
imageDim, projDim = config.baselineProjDim)
if config.baselineLSTM and config.baselineCNN:
memory = tf.concat([vecQuestions, images], axis = -1)
memDim = questionDim + imageDim
elif config.baselineLSTM:
memory = vecQuestions
memDim = questionDim
else: # config.baselineCNN
memory = images
memDim = imageDim
return memory, memDim
'''
Runs the MAC recurrent network to perform the reasoning process.
Initializes a MAC cell and runs netLength iterations.
Currently it passes the question and knowledge base to the cell during
its creating, such that it doesn't need to interact with it through
inputs / outputs while running. The recurrent computation happens
by working iteratively over the hidden (control, memory) states.
Args:
images: flattened image features. Used as the "Knowledge Base".
(Received by default model behavior from the Image Input Units).
[batchSize, H * W, memDim]
vecQuestions: vector questions representations.
(Received by default model behavior from the Question Input Units
as the final RNN state).
[batchSize, ctrlDim]
questionWords: question word embeddings.
[batchSize, questionLength, ctrlDim]
questionCntxWords: question contextual words.
(Received by default model behavior from the Question Input Units
as the series of RNN output states).
[batchSize, questionLength, ctrlDim]
questionLengths: question lengths.
[batchSize]
Returns the final control state and memory state resulted from the network.
([batchSize, ctrlDim], [bathSize, memDim])
'''
def MACnetwork(self, images, vecQuestions, questionWords, questionCntxWords,
questionLengths, name = "", reuse = None):
with tf.variable_scope("MACnetwork" + name, reuse = reuse):
self.macCell = MACCell(
vecQuestions = vecQuestions,
questionWords = questionWords,
questionCntxWords = questionCntxWords,
questionLengths = questionLengths,
knowledgeBase = images,
kbSize = self.imagesObjectNum,
memoryDropout = self.dropouts["memory"],
readDropout = self.dropouts["read"],
writeDropout = self.dropouts["write"],
controlDropoutPre = self.dropouts["controlPre"],
controlDropoutPost = self.dropouts["controlPost"],
wordDropout = self.dropouts["word"],
vocabDropout = self.dropouts["vocab"],
objectDropout = self.dropouts["object"],
# qDropoutMAC = self.qDropoutMAC,
batchSize = self.batchSize,
train = self.train,
reuse = reuse)
state = self.macCell.zero_state(self.batchSize, tf.float32)
none = tf.zeros((self.batchSize, 1), dtype = tf.float32)
for i in range(config.netLength):
self.macCell.iteration = i
_, state = self.macCell(none, state)
finalControl = state.control
finalMemory = state.memory
return finalControl, finalMemory
'''
Output Unit (step 1): chooses the inputs to the output classifier.
By default the classifier input will be the the final memory state of the MAC network.
If outQuestion is True, concatenate the question representation to that.
If outImage is True, concatenate the image flattened representation.
Args:
memory: (final) memory state of the MAC network.
[batchSize, memDim]
vecQuestions: question vector representation.
[batchSize, ctrlDim]
images: image features.
[batchSize, H, W, imageInDim]
imageInDim: images dimension.
Returns the resulted features and their dimension.
'''
def outputOp(self, memory, control, vecQuestions, images, imageInDim):
with tf.variable_scope("outputUnit"):
features = memory
dim = config.memDim
if config.outQuestion:
q = vecQuestions
eQ = ops.linear(q, config.ctrlDim, config.memDim, name = "outQuestion")
features, dim = ops.concat(features, eQ, config.memDim, mul = config.outQuestionMul)
# assumes imageObjects False
if config.outImage:
images, imagesDim = ops.linearizeFeatures(images, self.H, self.W, self.imageInDim,
outputDim = config.outImageDim)
images = ops.linear(images, config.memDim, config.outImageDim, name = "outImage")
features = tf.concat([features, images], axis = -1)
dim += config.outImageDim
return features, dim
'''
Output Unit (step 2): Computes the logits for the answers. Passes the features
through fully-connected network to get the logits over the possible answers.
Optionally uses answer word embeddings in computing the logits (by default, it doesn't).
Args:
features: features used to compute logits
[batchSize, inDim]
inDim: features dimension
aEmbedding: supported word embeddings for answer words in case answerMod is not NON.
Optionally computes logits by computing dot-product with answer embeddings.
Returns: the computed logits.
[batchSize, answerWordsNum]
'''
# in mc has to be ansMod not NON
def classifier(self, features, inDim, choices = None, choicesNums = None):
with tf.variable_scope("classifier"):
outDim = config.answerWordsNum
dims = [inDim] + config.outClassifierDims + [outDim]
if config.answerMod != "NON":
dims[-1] = config.wrdAEmbDim
logits = ops.FCLayer(features, dims,
batchNorm = self.batchNorm if config.outputBN else None,
dropout = self.dropouts["output"])
if config.answerMod != "NON":
logits = ops.gatedAct(config.outAct, gate = config.outGate)(logits)
logits = tf.nn.dropout(logits, self.dropouts["output"])
concat = {"x": config.answerBias}
interactions, interDim = ops.mul(choices, logits, dims[-1], interMod = config.answerMod, concat = concat)
logits = ops.inter2logits(interactions, interDim, sumMod = config.answerSumMod)
if config.ansFormat == "oe":
logits += ops.getBias((outDim, ), "ans")
else:
logits = ops.expMask(logits, choicesNums)
return logits
def aggregateFreqs(self, answerFreqs, answerFreqNums):
if answerFreqs is None:
return None
answerFreqs = tf.one_hot(answerFreqs, config.answerWordsNum) # , axis = -1
mask = tf.sequence_mask(answerFreqNums, maxlen = config.AnswerFreqMaxNum)
mask = tf.expand_dims(tf.to_float(mask), axis = -1)
answerFreqs *= mask
answerFreqs = tf.reduce_sum(answerFreqs, axis = 1)
return answerFreqs
# Computes mean cross entropy loss between logits and answers.
def addAnswerLossOp(self, logits, answers, answerFreqs, answerFreqNums):
if config.lossType == "softmax": # or config.ansFormat == "mc":
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = answers, logits = logits)
elif config.lossType == "svm":
answers = tf.one_hot(answers, config.answerWordsNum) # , axis = -1
losses = ops.hingeLoss(labels = answers, logits = logits)
elif config.lossType == "probSoftmax":
answerFreqs = tf.to_float(answerFreqs)
answerDist = answerFreqs / tf.expand_dims(tf.to_float(answerFreqNums), axis = -1)
losses = tf.nn.softmax_cross_entropy_with_logits_v2(labels = answerDist, logits = logits)
if config.weightedSoftmax:
weights = tf.to_float(answerFreqNums) / float(config.AnswerFreqMaxNum)
losses *= weights
elif config.lossType == "sigmoid":
if config.dataset == "VQA":
answerFreqs = tf.to_float(answerFreqs)
answerDist = answerFreqs / float(config.AnswerFreqMaxNum)
else:
answerDist = tf.one_hot(answers, config.answerWordsNum)
if config.lossWeight == 1:
losses = tf.nn.sigmoid_cross_entropy_with_logits(labels = answerDist, logits = logits)
else:
print("weighted sigmoid")
losses = tf.nn.weighted_cross_entropy_with_logits(targets = answerDist, logits = logits,
pos_weight = config.lossWeight)
if config.ansWeighting or config.ansWeightingRoot:
losses *= self.answerDict.weights
losses = tf.reduce_sum(losses, axis = -1)
else:
print("non-identified loss")
loss = tf.reduce_mean(losses)
self.answerLossList.append(loss)
return loss, losses
# Computes predictions (by finding maximal logit value, corresponding to highest probability)
# and mean accuracy between predictions and answers.
def addPredOp(self, logits, answers): # , answerFreqs
with tf.variable_scope("pred"):
if config.ansFormat == "oe":# and config.ansAddUnk:
mask = tf.to_float(tf.sequence_mask([2], config.answerWordsNum)) * (-1e30) # 1 or 2?
logits += mask
preds = tf.to_int32(tf.argmax(logits, axis = -1)) # tf.nn.softmax(
if config.dataset == "VQA" and config.ansFormat == "oe":
agreeing = tf.reduce_sum(tf.one_hot(preds, config.answerWordsNum) * self.answerFreqs, axis = -1)
corrects = tf.minimum(agreeing * 0.3, 1.0) # /3 ?
else:
corrects = tf.to_float(tf.equal(preds, answers))
correctNum = tf.reduce_sum(corrects)
acc = tf.reduce_mean(corrects)
self.correctNumList.append(correctNum)
self.answerAccList.append(acc)
return preds, corrects, correctNum
# Creates optimizer (adam)
def addOptimizerOp(self):
with tf.variable_scope("trainAddOptimizer"):
self.globalStep = tf.Variable(0, dtype = tf.int32, trainable = False, name = "globalStep") # init to 0 every run?
optimizer = tf.train.AdamOptimizer(learning_rate = self.lr)
if config.subsetOpt:
self.subsetOptimizer = tf.train.AdamOptimizer(learning_rate = self.lr * config.subsetOptMult)
return optimizer
'''
Computes gradients for all variables or subset of them, based on provided loss,
using optimizer.
'''
def computeGradients(self, optimizer, loss, trainableVars = None): # tf.trainable_variables()
with tf.variable_scope("computeGradients"):
if config.trainSubset:
trainableVars = []
allVars = tf.trainable_variables()
for var in allVars:
if any((s in var.name) for s in config.varSubset):
trainableVars.append(var)
if config.subsetOpt:
trainableVars = []
subsetVars = []
allVars = tf.trainable_variables()
for var in allVars:
if any((s in var.name) for s in config.varSubset):
subsetVars.append(var)
else:
trainableVars.append(var)
gradients_vars = optimizer.compute_gradients(loss, trainableVars)
if config.subsetOpt:
self.subset_gradients_vars = self.subsetOptimizer.compute_gradients(loss, subsetVars)
self.subset_gradientVarsList.append(self.subset_gradients_vars)
return gradients_vars
'''
Apply gradients. Optionally clip them, and update exponential moving averages
for parameters.
'''
def addTrainingOp(self, optimizer, gradients_vars):
with tf.variable_scope("train"):
gradients, variables = zip(*gradients_vars)
norm = tf.global_norm(gradients)
# gradient clipping
if config.clipGradients:
clippedGradients, _ = tf.clip_by_global_norm(gradients, config.gradMaxNorm, use_norm = norm)
gradients_vars = zip(clippedGradients, variables)
# updates ops (for batch norm) and train op
updateOps = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(updateOps):
train = optimizer.apply_gradients(gradients_vars, global_step = self.globalStep)
if config.subsetOpt:
subsetTrain = self.subsetOptimizer.apply_gradients(self.subset_gradientVarsAll)
train = tf.group(train, subsetTrain)
# exponential moving average
if config.useEMA:
ema = tf.train.ExponentialMovingAverage(decay = config.emaDecayRate)
maintainAveragesOp = ema.apply(tf.trainable_variables())
with tf.control_dependencies([train]):
trainAndUpdateOp = tf.group(maintainAveragesOp)
train = trainAndUpdateOp
self.emaDict = ema.variables_to_restore()
return train, norm
def averageAcrossTowers(self, gpusNum):
if gpusNum == 1:
self.lossAll = self.lossList[0]
self.answerLossAll = self.answerLossList[0]
self.answerAccAll = self.answerAccList[0]
self.correctNumAll = self.correctNumList[0]
self.predsAll = self.predsList[0]
self.gradientVarsAll = self.gradientVarsList[0]
if config.subsetOpt:
self.subset_gradientVarsAll = self.subset_gradientVarsList[0]
else:
self.lossAll = tf.reduce_mean(tf.stack(self.lossList, axis = 0), axis = 0)
self.answerLossAll = tf.reduce_mean(tf.stack(self.answerLossList, axis = 0), axis = 0)
self.answerAccAll = tf.reduce_mean(tf.stack(self.answerAccList, axis = 0), axis = 0)
self.correctNumAll = tf.reduce_sum(tf.stack(self.correctNumList, axis = 0), axis = 0)
self.predsAll = tf.concat(self.predsList, axis = 0)
self.gradientVarsAll = []
for grads_var in zip(*self.gradientVarsList):
gradients, variables = zip(*grads_var)
if gradients[0] != None:
avgGradient = tf.reduce_mean(tf.stack(gradients, axis = 0), axis = 0)
else:
avgGradient = None
var = variables[0]
grad_var = (avgGradient, var)
self.gradientVarsAll.append(grad_var)
if config.subsetOpt:
self.subset_gradientVarsAll = []
for grads_var in zip(*self.subset_gradientVarsList):
gradients, variables = zip(*grads_var)
if gradients[0] != None:
avgGradient = tf.reduce_mean(tf.stack(gradients, axis = 0), axis = 0)
else:
avgGradient = None
var = variables[0]
grad_var = (avgGradient, var)
self.subset_gradientVarsAll.append(grad_var)
def trim2DVectors(self, vectors, vectorsLengths):
maxLength = np.max(vectorsLengths)
return vectors[:,:maxLength]
def trimData(self, data):
data["questions"] = self.trim2DVectors(data["questions"], data["questionLengths"])
return data
'''
Builds predictions JSON, by adding the model's predictions and attention maps
back to the original data JSON.
'''
def buildPredsList(self, data, predictions, attentionMaps):
predsList = []
for i, instance in enumerate(data["instances"]):
if predictions is not None:
if config.ansFormat == "oe":
pred = self.answerDict.decodeId(predictions[i])
else:
pred = instance["choices"][predictions[i]]
instance["prediction"] = pred
# aggregate np attentions of instance i in the batch into 2d list
attMapToList = lambda attMap: [step[i].tolist() for step in attMap]
if attentionMaps is not None:
attentions = {k: attMapToList(attentionMaps[k]) for k in attentionMaps}
instance["attentions"] = attentions
predsList.append(instance)
return predsList
'''
Processes a batch of data with the model.
Args:
sess: TF session
data: Data batch. Dictionary that contains numpy array for:
questions, questionLengths, answers.
See preprocess.py for further information of the batch structure.
images: batch of image features, as numpy array. images["images"] contains
[batchSize, channels, h, w]
train: True to run batch for training.
getAtt: True to return attention maps for question and image (and optionally
self-attention and gate values).
Returns results: e.g. loss, accuracy, running time.
'''
def runBatch(self, sess, data, images, train, getPreds = False, getAtt = False, allData = None):
batchSizeOp = self.batchSizeAll
indicesOp = self.noOp
trainOp = self.trainOp if train else self.noOp
gradNormOp = self.gradNorm if train else self.noOp
predsOp = (self.predsAll, self.correctNumAll, self.answerAccAll)
attOp = self.macCell.attentions if not config.useBaseline else (self.attentions if config.baselineNew else self.noOp)
time0 = time.time()
feed = self.createFeedDict(data, images, train)
time1 = time.time()
batchSize, indices, _, loss, predsInfo, gradNorm, attentionMaps = sess.run(
[batchSizeOp, indicesOp, trainOp, self.lossAll, predsOp, gradNormOp, attOp],
feed_dict = feed)
time2 = time.time()
predsList = []
if getPreds:
if data is None:
data = [allData["instances"][i] for i in indices]
predsList = self.buildPredsList(data, predsInfo[0], attentionMaps if getAtt else None)
return {"loss": loss,
"correctNum": predsInfo[1],
"acc": predsInfo[2],
"preds": predsList,
"gradNorm": gradNorm if train else -1,
"readTime": time1 - time0,
"trainTime": time2 - time1,
"batchSize": batchSize}
def build(self):
self.addPlaceholders()
self.optimizer = self.addOptimizerOp()
self.gradientVarsList = []
if config.subsetOpt:
self.subset_gradientVarsList = []
self.lossList = []
self.answerLossList = []
self.correctNumList = []
self.answerAccList = []
self.predsList = []
with tf.variable_scope("macModel"):
for i in range(config.gpusNum):
with tf.device("/gpu:{}".format(i)):
with tf.name_scope("tower{}".format(i)) as scope:
self.initTowerBatch(i, config.gpusNum, self.batchSizeAll)
self.loss = tf.constant(0.0)
# embed questions words (and optionally answer words)
questionWords, choices = self.embeddingsOp(self.questionIndices,
self.choicesIndices, self.embeddingsInit)
projWords = projQuestion = ((config.encDim != config.ctrlDim) or config.encProj)
questionCntxWords, vecQuestions = self.encoder(questionWords,
self.questionLengths, projWords, projQuestion, config.ctrlDim)
# Image Input Unit (stem)
imageFeatures, imageDim = self.stem(self.images, self.imageInDim, config.memDim)
# baseline model
if config.useBaseline:
# inpImg = imageFeatures if config.baselineNew else self.images
# inpDim = imageDim if config.baselineNew else self.imageInDim
output, dim = self.baseline(vecQuestions, config.ctrlDim,
imageFeatures, imageDim, config.attDim) # self.images
# MAC model
else:
finalControl, finalMemory = self.MACnetwork(imageFeatures, vecQuestions,
questionWords, questionCntxWords, self.questionLengths)
# Output Unit - step 1 (preparing classifier inputs)
output, dim = self.outputOp(finalMemory, finalControl, vecQuestions,
self.images, self.imageInDim)
# Output Unit - step 2 (classifier)
logits = self.classifier(output, dim, choices, self.choicesNums)
# compute loss, predictions, accuracy
if config.dataset == "VQA":
self.answerFreqs = self.aggregateFreqs(self.answerFreqLists, self.answerFreqNums)
else:
self.answerFreqs = None
self.answerFreqNums = None
answerLoss, self.losses = self.addAnswerLossOp(logits, self.answerIndices,
self.answerFreqs, self.answerFreqNums)
self.preds, self.corrects, self.correctNum = self.addPredOp(logits, self.answerIndices) # ,self.answerFreqs
self.loss += answerLoss
self.predsList.append(self.preds)
self.lossList.append(self.loss)
# compute gradients
gradient_vars = self.computeGradients(self.optimizer, self.loss, trainableVars = None)
self.gradientVarsList.append(gradient_vars)
# reuse variables in next towers
tf.get_variable_scope().reuse_variables()
self.averageAcrossTowers(config.gpusNum)
self.trainOp, self.gradNorm = self.addTrainingOp(self.optimizer, self.gradientVarsAll)
self.noOp = tf.no_op()
|
the-stack_0_17249 | # Copyright 2017 Netflix
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.github.team
:platform: Unix
:synopsis: Watcher for GitHub Organization Teams.
.. version:: $$VERSION$$
.. moduleauthor:: Mike Grima <[email protected]>
"""
from security_monkey import app
from security_monkey.common.github.util import get_github_creds, iter_org, strip_url_fields
from security_monkey.datastore import Account
from security_monkey.decorators import record_exception
from security_monkey.exceptions import InvalidResponseCodeFromGitHubError
from security_monkey.watcher import Watcher, ChangeItem
import requests
GITHUB_URL = "https://api.github.com/"
class GitHubTeam(Watcher):
index = 'team'
i_am_singular = 'team'
i_am_plural = 'teams'
account_type = 'GitHub'
def __init__(self, accounts=None, debug=False):
super(GitHubTeam, self).__init__(accounts=accounts, debug=debug)
self.honor_ephemerals = True
self.ephemeral_paths = []
self.github_creds = get_github_creds(self.accounts)
def slurp(self):
@record_exception(source="{index}-watcher".format(index=self.index))
def fetch_org_teams(**kwargs):
account = Account.query.filter(Account.name == kwargs["account_name"]).first()
item_list = []
# Fetch teams:
app.logger.debug("Fetching organization teams for: {}".format(account.identifier))
teams = strip_url_fields(self.list_org_teams(account.identifier))
for team in teams:
item_list.append(GitHubTeamItem(
account=account.name,
name=team["name"],
arn="{}/team/{}".format(account.identifier, team["slug"]),
config=team,
source_watcher=self
))
return item_list, kwargs["exception_map"]
@iter_org(orgs=self.accounts)
def slurp_items(**kwargs):
# Are we skipping this org?
if self.check_ignore_list(kwargs["account_name"]):
app.logger.debug("Skipping ignored account: {}".format(kwargs["account_name"]))
return [], kwargs["exception_map"]
# Exception handling complexities...
results = fetch_org_teams(**kwargs)
if not results:
return [], kwargs["exception_map"]
return results
items, exc = slurp_items(index=self.index)
return items, exc
def list_org_teams(self, org):
headers = {
'Authorization': 'token {}'.format(self.github_creds[org])
}
params = {
"page": 1,
}
done = False
teams = []
while not done:
url = "{}orgs/{}/teams".format(GITHUB_URL, org)
result = requests.get(url, headers=headers, params=params)
if result.status_code != 200:
raise InvalidResponseCodeFromGitHubError(org, result.status_code)
if not result.links.get("last"):
done = True
else:
params["page"] += 1
result_json = result.json()
teams += result_json
return teams
class GitHubTeamItem(ChangeItem):
def __init__(self, account=None, name=None, arn=None, config=None, source_watcher=None):
super(GitHubTeamItem, self).__init__(index=GitHubTeam.index,
region="universal",
account=account,
name=name,
arn=arn,
new_config=config if config else {},
source_watcher=source_watcher)
|
the-stack_0_17250 | ####
# MQTT Publish
#
# Wertet Temperatur Sensor, Button und RFID Reader aus und sendet die Daten an MQTT Broker
import time
from machine import I2C, Pin, SoftI2C, SoftSPI
from lib.sensors.bmp180 import BMP180
from lib.sensors.mfrc522 import MFRC522
from lib.config import *
from umqtt.robust import MQTTClient
import ubinascii
import machine
from lib.config import *
import micropython
import network
client_id = ubinascii.hexlify(machine.unique_id())
# Topic's
topicTEMP = b"iotkit/sensor"
topicALERT = b"iotkit/alert"
topicRFID = b"iotkit/rfid"
topicSERVO = b"iotkit/servo"
# MQTT Brocker
hostname = "cloud.tbz.ch"
port = 1883
# Klassifikation
cls = ( "low", "middle", "high" )
type = 0
# Temperatur Sensor
bus = SoftI2C(sda=Pin(DEFAULT_IOTKIT_I2C_SDA), scl=Pin(DEFAULT_IOTKIT_I2C_SCL))
bmp180 = BMP180(bus)
bmp180.oversample_sett = 2
bmp180.baseline = 101325
# RFID Reader
sck = Pin(DEFAULT_IOTKIT_SPI_SCLK)
mosi = Pin(DEFAULT_IOTKIT_SPI_MOSI)
miso = Pin(DEFAULT_IOTKIT_SPI_MISO)
spi = SoftSPI(baudrate=100000, polarity=0, phase=0, sck=sck, mosi=mosi, miso=miso)
sda = Pin(DEFAULT_IOTKIT_SPI_SS, Pin.OUT)
rdr = MFRC522(spi, sda)
# Button
button = Pin(DEFAULT_IOTKIT_BUTTON1, Pin.IN)
# MQTT Subscribe
def sub_cb(topic, msg):
print((topic, msg))
if topic == b'notification' and msg == b'received':
print('ESP received hello message')
# MQTT Login
def connect_and_subscribe():
global client_id, hostname, port, topicSERVO
client = MQTTClient(client_id, hostname, port)
client.set_callback(sub_cb)
client.connect()
client.subscribe(topicSERVO)
print('Connected to %s MQTT broker, subscribed to %s topic' % (hostname, topicSERVO))
return client
# MQTT Restart
def restart_and_reconnect():
print('Failed to connect to MQTT broker. Reconnecting...')
time.sleep(10)
machine.reset()
### Hauptprogramm
counter = 1
try:
client = connect_and_subscribe()
except OSError as e:
restart_and_reconnect()
while True:
try:
client.check_msg()
# Temperatur
if counter % 3 == 1:
msg = "0xBC," + str(bmp180.temperature - 5) + "," + str(bmp180.pressure / 1000) + ",low"
if counter % 3 == 2:
msg = "0xBC," + str(bmp180.temperature) + "," + str(bmp180.pressure / 1000) + ",middle"
if counter % 3 == 0:
msg = "0xBC," + str(bmp180.temperature + 5) + "," + str(bmp180.pressure / 1000) + ",high"
client.publish(topicTEMP, msg)
print( topicTEMP, counter, msg )
counter = counter + 1
# Button startet BPMN Prozess
if button.value() == 0:
client.publish(topicALERT, "alert")
print( topicALERT, counter, "alert" )
uid = ""
(stat, tag_type) = rdr.request(rdr.REQIDL)
if stat == rdr.OK:
(stat, raw_uid) = rdr.anticoll()
if stat == rdr.OK:
uid = ("0x%02x%02x%02x%02x" % (raw_uid[0], raw_uid[1], raw_uid[2], raw_uid[3]))
client.publish(topicRFID, uid )
print(topicRFID, counter, uid)
time.sleep( 1.0 )
except OSError as e:
restart_and_reconnect() |
the-stack_0_17251 | #!/usr/bin/python -u
# -*- coding: latin-1 -*-
#
# Monks and doors problem in Z3
#
# From http://user.it.uu.se/~rolandb/LP/gammal/960615_facit.ps
# """
# There is a room with four doors and eight monks. One or more of
# the doors may be exit. Each monk is either telling a lie or the truth.
#
# The monks make the following statements:
# Monk 1: Door A is the exit.
# Monk 2: At least one of the doors B and C is the exit.
# Monk 3: Monk 1 and Monk 2 are telling the truth.
# Monk 4: Doors A and B are both exits.
# Monk 5: Doors A and B are both exits.
# Monk 6: Either Monk 4 or Monk 5 is telling the truth.
# Monk 7: If Monk 3 is telling the truth, so is Monk 6.
# Monk 8: If Monk 7 and Monk 8 are telling the truth, so is Monk 1.
#
# Which door is an exit no matter who is a liar and who is telling the
# truth.
# """
#
# Answer: Door A is an exit.
# And monks 1, 7, and 8 are telling the truth.
#
# This Z3 model was written by Hakan Kjellerstrand ([email protected])
# See also my Z3 page: http://hakank.org/z3/
#
from z3_utils_hakank import *
sol = Solver()
# variables
A,B,C,D = Bools("A B C D") # Doors
doors = [A,B,C,D]
M1,M2,M3,M4,M5,M6,M7,M8 = Bools("M1 M2 M3 M4 M5 M6 M7 M8") # monks
monks = [M1,M2,M3,M4,M5,M6,M7,M8]
# constraints
# Monk 1: Door A is the exit.
sol.add(M1 == A)
# Monk 2: At least one of the doors B and C is the exit.
sol.add(M2 == (If(B,1,0) + If(C,1,0) >= 1))
# Monk 3: Monk 1 and Monk 2 are telling the truth.
sol.add(M3 == And(M1, M2))
# Monk 4: Doors A and B are both exits.
sol.add(M4 == And(A,B))
# Monk 5: Doors A and C are both exits.
sol.add(M5 == And(A, C))
# Monk 6: Either Monk 4 or Monk 5 is telling the truth.
sol.add(M6 == Or(M4,M5))
# Monk 7: If Monk 3 is telling the truth, so is Monk 6.
sol.add(M7 == Implies(M3, M6))
# Monk 8: If Monk 7 and Monk 8 are telling the truth, so is Monk 1.
sol.add(M8 == (Implies(And(M7, M8),M1)))
# Exactly one door is an exit.
sol.add(If(A,1,0) + If(B,1,0) + If(C,1,0) + If(D,1,0) == 1)
num_solutions = 0
while sol.check() == sat:
num_solutions += 1
mod = sol.model()
print("doors:", [mod.eval(D) for D in doors])
print("monks:", [mod.eval(M) for M in monks])
getDifferentSolution(sol,mod,doors,monks)
print("num_solutions:", num_solutions)
|
the-stack_0_17252 | import random
from collections import namedtuple
from threading import RLock
import numpy as np
Transition = namedtuple("Transition", ("s", "a", "r", "s_", "done"))
class ServerBuffer:
def __init__(self, capacity, observation_shapes, action_size):
self.size = capacity
self.num_in_buffer = 0
self.stored_in_buffer = 0
self.num_parts = len(observation_shapes)
self.obs_shapes = observation_shapes
self.act_shape = (action_size,)
# initialize all np.arrays which store necessary data
self.observations = []
for part_id in range(self.num_parts):
obs = np.empty((self.size, ) + self.obs_shapes[part_id], dtype=np.float32)
self.observations.append(obs)
self.actions = np.empty((self.size, ) + self.act_shape, dtype=np.float32)
self.rewards = np.empty((self.size, ), dtype=np.float32)
self.dones = np.empty((self.size, ), dtype=np.bool)
self.td_errors = np.empty((self.size, ), dtype=np.float32)
self.pointer = 0
self._store_lock = RLock()
def push_episode(self, episode):
""" episode = [observations, actions, rewards, dones]
observations = [obs_part_1, ..., obs_part_n]
"""
with self._store_lock:
observations, actions, rewards, dones = episode
episode_len = len(actions)
self.stored_in_buffer += episode_len
self.num_in_buffer = min(self.size, self.num_in_buffer + episode_len)
indices = np.arange(self.pointer, self.pointer + episode_len) % self.size
for part_id in range(self.num_parts):
self.observations[part_id][indices] = np.array(observations[part_id])
self.actions[indices] = np.array(actions)
self.rewards[indices] = np.array(rewards)
self.dones[indices] = np.array(dones)
self.td_errors[indices] = np.ones(len(indices))
self.pointer = (self.pointer + episode_len) % self.size
def get_stored_in_buffer(self):
return self.stored_in_buffer
def get_state(self, idx, history_len=1):
""" compose the state from a number (history_len) of observations
"""
state = []
for part_id in range(self.num_parts):
start_idx = idx - history_len + 1
if (start_idx < 0 or np.any(self.dones[start_idx:idx+1])):
s = np.zeros((history_len, ) + self.obs_shapes[part_id], dtype=np.float32)
indices = [idx]
for i in range(history_len-1):
next_idx = (idx-i-1) % self.size
if next_idx >= self.num_in_buffer or self.dones[next_idx]:
break
indices.append(next_idx)
indices = indices[::-1]
s[-len(indices):] = self.observations[part_id][indices]
else:
s = self.observations[part_id][slice(start_idx, idx+1, 1)]
state.append(s)
return state
def get_transition_n_step(self, idx, history_len=1, n_step=1, gamma=0.99):
state = self.get_state(idx, history_len)
next_state = self.get_state((idx + n_step) % self.size, history_len)
cum_reward = 0
indices = np.arange(idx, idx + n_step) % self.size
for num, i in enumerate(indices):
cum_reward += self.rewards[i] * (gamma ** num)
done = self.dones[i]
if done:
break
return state, self.actions[idx], cum_reward, next_state, done, self.td_errors[idx]
def update_td_errors(self, indices, td_errors):
self.td_errors[indices] = td_errors
def get_batch(self, batch_size, history_len=1, n_step=1, gamma=0.99, indices=None):
with self._store_lock:
if indices is None:
indices = random.sample(range(self.num_in_buffer), k=batch_size)
transitions = []
for idx in indices:
transition = self.get_transition_n_step(idx, history_len, n_step, gamma)
transitions.append(transition)
states = []
for part_id in range(self.num_parts):
state = [transitions[i][0][part_id] for i in range(batch_size)]
states.append(state)
actions = [transitions[i][1] for i in range(batch_size)]
rewards = [transitions[i][2] for i in range(batch_size)]
next_states = []
for part_id in range(self.num_parts):
next_state = [transitions[i][3][part_id] for i in range(batch_size)]
next_states.append(next_state)
dones = [transitions[i][4] for i in range(batch_size)]
batch = Transition(
np.array(states, dtype=np.float32),
np.array(actions, dtype=np.float32),
np.array(rewards, dtype=np.float32),
np.array(next_states, dtype=np.float32),
np.array(dones, dtype=np.bool)
)
return batch
def get_prioritized_batch(self, batch_size, history_len=1,
n_step=1, gamma=0.99,
priority="proportional", alpha=0.6, beta=1.0):
with self._store_lock:
if priority == "proportional":
p = np.power(np.abs(self.td_errors[:self.num_in_buffer])+1e-6, alpha)
p = p / p.sum()
indices = np.random.choice(range(self.num_in_buffer), size=batch_size, p=p)
probs = p[indices]
is_weights = np.power(self.num_in_buffer * probs, -beta)
is_weights = is_weights / is_weights.max()
batch = self.get_batch(batch_size, history_len, n_step, gamma, indices)
return batch, indices, is_weights
|
the-stack_0_17254 | import logging
from collections import OrderedDict
from dataclasses import dataclass, field
from typing import Dict, Type
import requests
from requests.exceptions import HTTPError
from datahub.configuration.common import ConfigModel
from datahub.ingestion.api.common import RecordEnvelope, WorkUnit
from datahub.ingestion.api.sink import Sink, SinkReport, WriteCallback
from datahub.metadata import ( # MLFeatureSnapshotClass,
ChartSnapshotClass,
CorpGroupSnapshotClass,
CorpUserSnapshotClass,
DashboardSnapshotClass,
DataProcessSnapshotClass,
DatasetSnapshotClass,
MLModelSnapshotClass,
)
from datahub.metadata.com.linkedin.pegasus2avro.mxe import MetadataChangeEvent
logger = logging.getLogger(__name__)
resource_locator: Dict[Type[object], str] = {
ChartSnapshotClass: "charts",
DashboardSnapshotClass: "dashboards",
CorpUserSnapshotClass: "corpUsers",
CorpGroupSnapshotClass: "corpGroups",
DatasetSnapshotClass: "datasets",
DataProcessSnapshotClass: "dataProcesses",
MLModelSnapshotClass: "mlModels",
}
def _rest_li_ify(obj):
if isinstance(obj, (dict, OrderedDict)):
if len(obj.keys()) == 1:
key = list(obj.keys())[0]
value = obj[key]
if key.find("com.linkedin.pegasus2avro.") >= 0:
new_key = key.replace("com.linkedin.pegasus2avro.", "com.linkedin.")
return {new_key: _rest_li_ify(value)}
elif key == "string" or key == "array":
return value
new_obj = {}
for key, value in obj.items():
if value is not None:
new_obj[key] = _rest_li_ify(value)
return new_obj
elif isinstance(obj, list):
new_obj = [_rest_li_ify(item) for item in obj]
return new_obj
return obj
class DatahubRestSinkConfig(ConfigModel):
"""Configuration class for holding connectivity to datahub gms"""
server: str = "http://localhost:8080"
@dataclass
class DatahubRestSink(Sink):
config: DatahubRestSinkConfig
report: SinkReport = field(default_factory=SinkReport)
@classmethod
def create(cls, config_dict, ctx):
config = DatahubRestSinkConfig.parse_obj(config_dict)
return cls(ctx, config)
def get_ingest_endpoint(self, mce: MetadataChangeEvent):
snapshot_type = type(mce.proposedSnapshot)
snapshot_resource = resource_locator.get(snapshot_type, None)
if not snapshot_resource:
raise ValueError(
f"Failed to locate a snapshot resource for type {snapshot_type}"
)
return f"{self.config.server}/{snapshot_resource}?action=ingest"
def handle_work_unit_start(self, workunit: WorkUnit) -> None:
pass
def handle_work_unit_end(self, workunit: WorkUnit) -> None:
pass
def write_record_async(
self,
record_envelope: RecordEnvelope[MetadataChangeEvent],
write_callback: WriteCallback,
):
headers = {"X-RestLi-Protocol-Version": "2.0.0"}
mce = record_envelope.record
url = self.get_ingest_endpoint(mce)
raw_mce_obj = mce.proposedSnapshot.to_obj()
mce_obj = _rest_li_ify(raw_mce_obj)
snapshot = {"snapshot": mce_obj}
try:
response = requests.post(url, headers=headers, json=snapshot)
# with open('data.json', 'w') as outfile:
# json.dump(serialized_snapshot, outfile)
response.raise_for_status()
self.report.report_record_written(record_envelope)
write_callback.on_success(record_envelope, {})
except HTTPError as e:
info = response.json()
self.report.report_failure({"e": e, "info": info})
write_callback.on_failure(record_envelope, e, info)
except Exception as e:
self.report.report_failure({"e": e})
write_callback.on_failure(record_envelope, e, {})
def get_report(self) -> SinkReport:
return self.report
def close(self):
pass
|
the-stack_0_17256 | #***************************************************************
#* Name: LMS7002_GFIR3.py
#* Purpose: Class implementing LMS7002 GFIR3 functions
#* Author: Lime Microsystems ()
#* Created: 2016-11-14
#* Copyright: Lime Microsystems (limemicro.com)
#* License:
#**************************************************************
from LMS7002_base import *
from LMS7002_GFIR import *
class LMS7002_GFIR3(LMS7002_base):
__slots__ = ['CMB0a', 'CMB0b', 'CMB0c','CMB1a','CMB1b','CMB1c', 'CMB2a', 'CMB2b', 'CMB2c', 'CMB3a', 'CMB3b', 'CMB3c', 'CMB4a', 'CMB4b', 'CMB4c', 'rxtx'] # Used to generate error on typos
def __init__(self, chip, RxTx, Channel):
if RxTx not in ['RX', 'TX']:
raise ValueError("Parameter RxTx must be 'RX' or 'TX'")
if Channel not in ['A', 'B']:
raise ValueError("Parameter Channel must be 'A' or 'B'")
self.chip = chip
self.rxtx = RxTx
self.channel = Channel
self.CMB0a = LMS7002_GFIR(chip, RxTx, Channel, 3, '0a')
self.CMB1a = LMS7002_GFIR(chip, RxTx, Channel, 3, '1a')
self.CMB2a = LMS7002_GFIR(chip, RxTx, Channel, 3, '2a')
self.CMB3a = LMS7002_GFIR(chip, RxTx, Channel, 3, '3a')
self.CMB4a = LMS7002_GFIR(chip, RxTx, Channel, 3, '4a')
self.CMB0b = LMS7002_GFIR(chip, RxTx, Channel, 3, '0b')
self.CMB1b = LMS7002_GFIR(chip, RxTx, Channel, 3, '1b')
self.CMB2b = LMS7002_GFIR(chip, RxTx, Channel, 3, '2b')
self.CMB3b = LMS7002_GFIR(chip, RxTx, Channel, 3, '3b')
self.CMB4b = LMS7002_GFIR(chip, RxTx, Channel, 3, '4b')
self.CMB0c = LMS7002_GFIR(chip, RxTx, Channel, 3, '0c')
self.CMB1c = LMS7002_GFIR(chip, RxTx, Channel, 3, '1c')
self.CMB2c = LMS7002_GFIR(chip, RxTx, Channel, 3, '2c')
self.CMB3c = LMS7002_GFIR(chip, RxTx, Channel, 3, '3c')
self.CMB4c = LMS7002_GFIR(chip, RxTx, Channel, 3, '4c')
def zeroOut(self):
"""
Initialize all FIR coefficients to 0
"""
for i in range(0, 8):
self.CMB0a[i] = 0
self.CMB1a[i] = 0
self.CMB2a[i] = 0
self.CMB3a[i] = 0
self.CMB4a[i] = 0
self.CMB0b[i] = 0
self.CMB1b[i] = 0
self.CMB2b[i] = 0
self.CMB3b[i] = 0
self.CMB4b[i] = 0
self.CMB0c[i] = 0
self.CMB1c[i] = 0
self.CMB2c[i] = 0
self.CMB3c[i] = 0
self.CMB4c[i] = 0
#
# Operator overloading for easy access FIR[index]=val
#
def __getitem__(self, key):
"""
Get the FIR coefficient bank
"""
if key not in [(0,'a'), (0, 'b'), (0, 'c'),
(1,'a'), (1, 'b'), (1, 'c'),
(2,'a'), (2, 'b'), (2, 'c'),
(3,'a'), (3, 'b'), (3, 'c'),
(4,'a'), (4, 'b'), (4, 'c')]:
raise ValueError("Index must be in [(0,'a'), (0, 'b'), (0, 'c'), (1,'a'), (1, 'b'), (1, 'c'), (2,'a'), (2, 'b'), (2, 'c'), (3,'a'), (3, 'b'), (3, 'c'), (4,'a'), (4, 'b'), (4, 'c')")
if key==(0,'a'):
return self.CMB0a
elif key==(1,'a'):
return self.CMB1a
elif key==(2,'a'):
return self.CMB2a
elif key==(3,'a'):
return self.CMB3a
elif key==(4,'a'):
return self.CMB4a
elif key==(0,'b'):
return self.CMB0b
elif key==(1,'b'):
return self.CMB1b
elif key==(2,'b'):
return self.CMB2b
elif key==(3,'b'):
return self.CMB3b
elif key==(4,'b'):
return self.CMB4b
elif key==(0,'c'):
return self.CMB0c
elif key==(1,'c'):
return self.CMB1c
elif key==(2,'c'):
return self.CMB2c
elif key==(3,'c'):
return self.CMB3c
else:
return self.CMB4c
#
# Operator overloading for readable representation of FIR coefficients
#
def __str__(self):
return self.__repr__()
def __repr__(self):
ret = self.rxtx+"GFIR3 Channel "+self.channel+"\n"
for coef in [self.CMB0a, self.CMB1a, self.CMB2a, self.CMB3a, self.CMB4a,
self.CMB0b, self.CMB1b, self.CMB2b, self.CMB3b, self.CMB4b,
self.CMB0c, self.CMB1c, self.CMB2c, self.CMB3c, self.CMB4c]:
tmp = "CMB"+coef.suffix+" = ["
for i in range(0,8):
tmp += self.intToHex(coef[i])+', '
tmp = tmp[:-2] + "]\n"
ret += tmp
return ret
|
the-stack_0_17257 | """
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or is_platform_32bit()
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_repr_embedded_ndarray(self):
arr = np.empty(10, dtype=[("err", object)])
for i in range(len(arr)):
arr["err"][i] = np.random.randn(i)
df = DataFrame(arr)
repr(df["err"])
repr(df)
df.to_string()
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(null_counts, result):
buf = StringIO()
df.info(buf=buf, null_counts=null_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
def test_repr_tuples(self):
buf = StringIO()
df = DataFrame({"tups": list(zip(range(10), range(10)))})
repr(df)
df.to_string(col_space=10, buf=buf)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt._get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# FIXME: remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
pd.reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = pd.DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = pd.MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = pd.DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = pd.DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = pd.DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = pd.DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = pd.DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = pd.DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = pd.DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_to_string_truncate(self):
# GH 9784 - dont truncate when calling DataFrame.to_string
df = pd.DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "let's make this a very VERY long line that is longer "
"than the default 50 character limit",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert df.to_string() == (
" a b "
" c d\n"
"0 foo bar let's make this a very VERY long line t"
"hat is longer than the default 50 character limit 1\n"
"1 foo bar "
" stuff 1"
)
with option_context("max_colwidth", 20):
# the display option has no effect on the to_string method
assert df.to_string() == (
" a b "
" c d\n"
"0 foo bar let's make this a very VERY long line t"
"hat is longer than the default 50 character limit 1\n"
"1 foo bar "
" stuff 1"
)
assert df.to_string(max_colwidth=20) == (
" a b c d\n"
"0 foo bar let's make this ... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("max_rows", None):
with option_context("max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("max_rows", 0):
with option_context("max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("max_rows", 0):
with option_context("max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("max_rows", None):
with option_context("max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_to_string_unicode_columns(self, float_frame):
df = DataFrame({"\u03c3": np.arange(10.0)})
buf = StringIO()
df.to_string(buf=buf)
buf.getvalue()
buf = StringIO()
df.info(buf=buf)
buf.getvalue()
result = float_frame.to_string()
assert isinstance(result, str)
def test_to_string_utf8_columns(self):
n = "\u05d0".encode("utf-8")
with option_context("display.max_rows", 1):
df = DataFrame([1, 2], columns=[n])
repr(df)
def test_to_string_unicode_two(self):
dm = DataFrame({"c/\u03c3": []})
buf = StringIO()
dm.to_string(buf)
def test_to_string_unicode_three(self):
dm = DataFrame(["\xc2"])
buf = StringIO()
dm.to_string(buf)
def test_to_string_with_formatters(self):
df = DataFrame(
{
"int": [1, 2, 3],
"float": [1.0, 2.0, 3.0],
"object": [(1, 2), True, False],
},
columns=["int", "float", "object"],
)
formatters = [
("int", lambda x: f"0x{x:x}"),
("float", lambda x: f"[{x: 4.1f}]"),
("object", lambda x: f"-{x!s}-"),
]
result = df.to_string(formatters=dict(formatters))
result2 = df.to_string(formatters=list(zip(*formatters))[1])
assert result == (
" int float object\n"
"0 0x1 [ 1.0] -(1, 2)-\n"
"1 0x2 [ 2.0] -True-\n"
"2 0x3 [ 3.0] -False-"
)
assert result == result2
def test_to_string_with_datetime64_monthformatter(self):
months = [datetime(2016, 1, 1), datetime(2016, 2, 2)]
x = DataFrame({"months": months})
def format_func(x):
return x.strftime("%Y-%m")
result = x.to_string(formatters={"months": format_func})
expected = "months\n0 2016-01\n1 2016-02"
assert result.strip() == expected
def test_to_string_with_datetime64_hourformatter(self):
x = DataFrame(
{
"hod": pd.to_datetime(
["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f"
)
}
)
def format_func(x):
return x.strftime("%H:%M")
result = x.to_string(formatters={"hod": format_func})
expected = "hod\n0 10:10\n1 12:12"
assert result.strip() == expected
def test_to_string_with_formatters_unicode(self):
df = DataFrame({"c/\u03c3": [1, 2, 3]})
result = df.to_string(formatters={"c/\u03c3": str})
assert result == " c/\u03c3\n" + "0 1\n1 2\n2 3"
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=pd.Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=pd.Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = pd.DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=pd.Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=pd.Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = pd.DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with pd.option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with pd.option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame(
{"date": [pd.Timestamp("20130101").tz_localize("UTC")] + [pd.NaT] * 5}
)
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [pd.Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [pd.NaT] * 5
df = pd.DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [pd.NaT] * 5 + [pd.Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = pd.DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [pd.Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
pd.Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = pd.DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split("\n")
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({"c/\u03c3": Series({"test": np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = pd.read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.max_rows", 1)
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
fmt.set_option("display.max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = "DataFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(
tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.max_columns", 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(df)
assert "ccccc" in result
assert "ddddd" in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
import re
str_rep = str(s)
nmatches = len(re.findall("dtype", str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame(
{
"id1": {0: "1a3", 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: "78d", 1: "79d"},
"value": {0: 123, 1: 64},
}
)
# multi-index
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# index
y = df.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nd67 9h4 79d 64"
)
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nNaN 9h4 79d 64"
)
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index(["id2", "id3"])
result = y.to_string()
expected = (
" id1 value\nid2 id3 \n"
"NaN 78d 1a3 123\n 79d 9h4 64"
)
assert result == expected
df = DataFrame(
{
"id1": {0: np.nan, 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: np.nan, 1: "79d"},
"value": {0: 123, 1: 64},
}
)
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"NaN NaN NaN 123\n9h4 d67 79d 64"
)
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
# print in right order
result = biggie.to_string(
columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
)
lines = result.split("\n")
header = lines[0].strip().split()
joined = "\n".join(re.sub(r"\s+", " ", x).strip() for x in lines[1:])
recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
tm.assert_series_equal(recons["B"], biggie["B"])
assert recons["A"].count() == biggie["A"].count()
assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=["A"], col_space=17)
header = result.split("\n")[0].strip().split()
expected = ["A"]
assert header == expected
biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
biggie.to_string(columns=["B", "A"], float_format=str)
biggie.to_string(columns=["B", "A"], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=["X", "Y"])
expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
df.to_string(header=["X"])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = " x y z\n 11 33 AAA\n 22 -44 "
assert df_s == expected
df_s = df[["y", "x", "z"]].to_string(index=False)
expected = " y x z\n 33 11 AAA\n-44 22 "
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n 33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option(
"display.precision",
5,
"display.column_space",
12,
"display.notebook_repr_html",
False,
)
df = DataFrame(
{"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
)
df_s = df.to_string()
if _three_digit_exp():
expected = (
" x\n0 0.00000e+000\n1 2.50000e-001\n"
"2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
"5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
"8 -1.00000e+006"
)
else:
expected = (
" x\n0 0.00000e+00\n1 2.50000e-01\n"
"2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
"5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
"8 -1.00000e+06"
)
assert df_s == expected
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string()
expected = " x\n0 3234.000\n1 0.253"
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({"x": [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = " x\n0 1.000000e+009\n1 2.512000e-001"
else:
expected = " x\n0 1.000000e+09\n1 2.512000e-01"
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = DataFrame({"x": [0.19999]})
expected = " x\n0 0.200"
assert df.to_string(float_format="%.3f") == expected
# GH 22270
df = DataFrame({"x": [100.0]})
expected = " x\n0 100"
assert df.to_string(float_format="%.0f") == expected
def test_to_string_small_float_values(self):
df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if _three_digit_exp():
expected = (
" a\n"
"0 1.500000e+000\n"
"1 1.000000e-017\n"
"2 -5.500000e-007"
)
else:
expected = (
" a\n"
"0 1.500000e+00\n"
"1 1.000000e-17\n"
"2 -5.500000e-07"
)
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = " 0\n0 0\n1 0\n2 -0"
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.arange(5), index=index)
result = df.to_string()
expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4"
assert result == expected
def test_to_string_complex_float_formatting(self):
# GH #25514, 25745
with pd.option_context("display.precision", 5):
df = DataFrame(
{
"x": [
(0.4467846931321966 + 0.0715185102060818j),
(0.2739442392974528 + 0.23515228785438969j),
(0.26974928742135185 + 0.3250604054898979j),
(-1j),
]
}
)
result = df.to_string()
expected = (
" x\n0 0.44678+0.07152j\n"
"1 0.27394+0.23515j\n"
"2 0.26975+0.32506j\n"
"3 -0.00000-1.00000j"
)
assert result == expected
def test_to_string_ascii_error(self):
data = [
(
"0 ",
" .gitignore ",
" 5 ",
" \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
)
]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({"x": [-15, 20, 25, -35]})
assert issubclass(df["x"].dtype.type, np.integer)
output = df.to_string()
expected = " x\n0 -15\n1 20\n2 25\n3 -35"
assert output == expected
def test_to_string_index_formatter(self):
df = DataFrame([range(5), range(5, 10), range(10, 15)])
rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_to_string_left_justify_cols(self):
tm.reset_display_options()
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string(justify="left")
expected = " x \n0 3234.000\n1 0.253"
assert df_s == expected
def test_to_string_format_na(self):
tm.reset_display_options()
df = DataFrame(
{
"A": [np.nan, -1, -2.1234, 3, 4],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0000 foo\n"
"2 -2.1234 foooo\n"
"3 3.0000 fooooo\n"
"4 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [np.nan, -1.0, -2.0, 3.0, 4.0],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0 foo\n"
"2 -2.0 foooo\n"
"3 3.0 fooooo\n"
"4 4.0 bar"
)
assert result == expected
def test_to_string_format_inf(self):
# Issue #24861
tm.reset_display_options()
df = DataFrame(
{
"A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0000 foo\n"
"3 -2.1234 foooo\n"
"4 3.0000 fooooo\n"
"5 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0 foo\n"
"3 -2.0 foooo\n"
"4 3.0 fooooo\n"
"5 4.0 bar"
)
assert result == expected
def test_to_string_decimal(self):
# Issue #23614
df = DataFrame({"A": [6.0, 3.1, 2.2]})
expected = " A\n0 6,0\n1 3,1\n2 2,2"
assert df.to_string(decimal=",") == expected
def test_to_string_line_width(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
s = df.to_string(line_width=80)
assert max(len(l) for l in s.split("\n")) == 80
def test_show_dimensions(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
True,
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
False,
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
with option_context(
"display.max_rows",
2,
"display.max_columns",
2,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
def test_repr_html(self, float_frame):
df = float_frame
df._repr_html_()
fmt.set_option("display.max_rows", 1, "display.max_columns", 1)
df._repr_html_()
fmt.set_option("display.notebook_repr_html", False)
df._repr_html_()
tm.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option("display.show_dimensions", True)
assert "2 rows" in df._repr_html_()
fmt.set_option("display.show_dimensions", False)
assert "2 rows" not in df._repr_html_()
tm.reset_display_options()
def test_repr_html_mathjax(self):
df = DataFrame([[1, 2], [3, 4]])
assert "tex2jax_ignore" not in df._repr_html_()
with pd.option_context("display.html.use_mathjax", False):
assert "tex2jax_ignore" in df._repr_html_()
def test_repr_html_wide(self):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in wide_df._repr_html_()
def test_repr_html_wide_multiindex_cols(self):
max_cols = 20
mcols = MultiIndex.from_product(
[np.arange(max_cols // 2), ["foo", "bar"]], names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
reg_repr = df._repr_html_()
assert "..." not in reg_repr
mcols = MultiIndex.from_product(
(np.arange(1 + (max_cols // 2)), ["foo", "bar"]), names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_repr_html_long(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
long_repr = df._repr_html_()
assert ".." in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_float(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert f"<td>{40 + h}</td>" in reg_repr
h = max_rows + 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
long_repr = df._repr_html_()
assert ".." in long_repr
assert "<td>31</td>" not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_long_multiindex(self):
max_rows = 60
max_L1 = max_rows // 2
tuples = list(itertools.product(np.arange(max_L1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx, columns=["A", "B"])
with option_context("display.max_rows", 60, "display.max_columns", 20):
reg_repr = df._repr_html_()
assert "..." not in reg_repr
tuples = list(itertools.product(np.arange(max_L1 + 1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(
np.random.randn((max_L1 + 1) * 2, 2), index=idx, columns=["A", "B"]
)
long_repr = df._repr_html_()
assert "..." in long_repr
def test_repr_html_long_and_wide(self):
max_cols = 20
max_rows = 60
h, w = max_rows - 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
h, w = max_rows + 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_info_repr(self):
# GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
# the terminal size to ensure that we try to print something "too big"
term_width, term_height = get_terminal_size()
max_rows = 60
max_cols = 20 + (max(term_width, 80) - 80) // 4
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_vertically_truncated_repr(df)
with option_context("display.large_repr", "info"):
assert has_info_repr(df)
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_horizontally_truncated_repr(df)
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(np.random.randn(10, 5))
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
4,
):
assert has_non_verbose_info_repr(df)
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
5,
):
assert not has_non_verbose_info_repr(df)
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = 60
max_cols = 20
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert r"<class" not in df._repr_html_()
with option_context("display.large_repr", "info"):
assert r"<class" in df._repr_html_()
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert "<class" not in df._repr_html_()
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert "<class" in df._repr_html_()
def test_fake_qtconsole_repr_html(self, float_frame):
df = float_frame
def get_ipython():
return {"config": {"KernelApp": {"parent_appname": "ipython-qtconsole"}}}
repstr = df._repr_html_()
assert repstr is not None
fmt.set_option("display.max_rows", 5, "display.max_columns", 2)
repstr = df._repr_html_()
assert "class" in repstr # info fallback
tm.reset_display_options()
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't die
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
skip = True
for line in repr(DataFrame({"A": vals})).split("\n")[:-2]:
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert ("+010" in line) or skip
else:
assert ("+10" in line) or skip
skip = False
def test_dict_entries(self):
df = DataFrame({"A": [{"a": 1, "b": 2}]})
val = df.to_string()
assert "'a': 1" in val
assert "'b': 2" in val
def test_period(self):
# GH 12615
df = pd.DataFrame(
{
"A": pd.period_range("2013-01", periods=4, freq="M"),
"B": [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
pd.Period("2011-04", freq="M"),
],
"C": list("abcd"),
}
)
exp = (
" A B C\n"
"0 2013-01 2011-01 a\n"
"1 2013-02 2011-02-01 b\n"
"2 2013-03 2011-03-01 09:00 c\n"
"3 2013-04 2011-04 d"
)
assert str(df) == exp
def gen_series_formatting():
s1 = pd.Series(["a"] * 100)
s2 = pd.Series(["ab"] * 100)
s3 = pd.Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
s4 = s3[::-1]
test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4}
return test_sers
class TestSeriesFormatting:
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series(["\u03c3"] * 10)
repr(s)
a = Series(["\u05d0"] * 1000)
a.name = "title1"
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
assert retval is None
assert buf.getvalue().strip() == s
# pass float_format
format = "%.4f".__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split("\n")[:-1]]
expected = [format(x) for x in self.ts]
assert result == expected
# empty string
result = self.ts[:0].to_string()
assert result == "Series([], Freq: B)"
result = self.ts[:0].to_string(length=0)
assert result == "Series([], Freq: B)"
# name and length
cp = self.ts.copy()
cp.name = "foo"
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split("\n")[-1].strip()
assert last_line == (f"Freq: B, Name: foo, Length: {len(cp)}, dtype: float64")
def test_freq_name_separation(self):
s = Series(
np.random.randn(10), index=date_range("1/1/2000", periods=10), name=0
)
result = repr(s)
assert "Freq: D, Name: 0" in result
def test_to_string_mixed(self):
s = Series(["foo", np.nan, -1.23, 4.56])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 -1.23\n" + "3 4.56"
assert result == expected
# but don't count NAs as floats
s = Series(["foo", np.nan, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 bar\n" + "3 baz"
assert result == expected
s = Series(["foo", 5, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 5\n" + "2 bar\n" + "3 baz"
assert result == expected
def test_to_string_float_na_spacing(self):
s = Series([0.0, 1.5678, 2.0, -3.0, 4.0])
s[::2] = np.nan
result = s.to_string()
expected = (
"0 NaN\n"
+ "1 1.5678\n"
+ "2 NaN\n"
+ "3 -3.0000\n"
+ "4 NaN"
)
assert result == expected
def test_to_string_without_index(self):
# GH 11729 Test index=False option
s = Series([1, 2, 3, 4])
result = s.to_string(index=False)
expected = " 1\n" + " 2\n" + " 3\n" + " 4"
assert result == expected
def test_unicode_name_in_footer(self):
s = Series([1, 2], name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf = fmt.SeriesFormatter(s, name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf._get_footer() # should not raise exception
def test_east_asian_unicode_series(self):
# not aligned properly because of east asian width
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = "あ a\nいい bb\nううう CCC\nええええ D\ndtype: object"
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = "a あ\nbb いい\nc ううう\nddd ええええ\ndtype: object"
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\nいいいい いい\nう ううう\nえええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"], name="おおおおおおお"
)
expected = (
"ああ あ\nいいいい いい\nう ううう\n"
"えええ ええええ\nName: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\nあああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444], index=[1, "AB", pd.Timestamp("2011-01-01"), "あああ"]
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# Emable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = (
"あ a\nいい bb\nううう CCC\n"
"ええええ D\ndtype: object"
)
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = (
"a あ\nbb いい\nc ううう\n"
"ddd ええええ\ndtype: object"
)
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"],
index=["ああ", "いいいい", "う", "えええ"],
name="おおおおおおお",
)
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\n"
"Name: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\n"
"dtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444],
index=[1, "AB", pd.Timestamp("2011-01-01"), "あああ"],
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n"
" ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# ambiguous unicode
s = Series(
["¡¡", "い¡¡", "ううう", "ええええ"], index=["ああ", "¡¡¡¡いい", "¡¡", "えええ"]
)
expected = (
"ああ ¡¡\n"
"¡¡¡¡いい い¡¡\n"
"¡¡ ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
for line in repr(Series(vals)).split("\n"):
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert "+010" in line
else:
assert "+10" in line
def test_datetimeindex(self):
index = date_range("20130102", periods=6)
s = Series(1, index=index)
result = s.to_string()
assert "2013-01-02" in result
# nat in index
s2 = Series(2, index=[Timestamp("20130111"), NaT])
s = s2.append(s)
result = s.to_string()
assert "NaT" in result
# nat in summary
result = str(s2.index)
assert "NaT" in result
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
s1 = Series(date_range(start=start_date, freq="D", periods=5))
result = str(s1)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
s2 = Series(3, index=dti)
result = str(s2.index)
assert start_date in result
def test_timedelta64(self):
from datetime import datetime, timedelta
Series(np.array([1100, 20], dtype="timedelta64[ns]")).to_string()
s = Series(date_range("2012-1-1", periods=3, freq="D"))
# GH2146
# adding NaTs
y = s - s.shift(1)
result = y.to_string()
assert "1 days" in result
assert "00:00:00" not in result
assert "NaT" in result
# with frac seconds
o = Series([datetime(2012, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +23:59:59.999850" in result
# rounding?
o = Series([datetime(2012, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +23:00:00" in result
assert "1 days 23:00:00" in result
o = Series([datetime(2012, 1, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +22:59:00" in result
assert "1 days 22:59:00" in result
o = Series([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +22:58:59.999850" in result
assert "0 days 22:58:59.999850" in result
# neg time
td = timedelta(minutes=5, seconds=3)
s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - s2
result = y.to_string()
assert "-1 days +23:54:57" in result
td = timedelta(microseconds=550)
s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - td
result = y.to_string()
assert "2012-01-01 23:59:59.999450" in result
# no boxing of the actual elements
td = Series(pd.timedelta_range("1 days", periods=3))
result = td.to_string()
assert result == "0 1 days\n1 2 days\n2 3 days"
def test_mixed_datetime64(self):
df = DataFrame({"A": [1, 2], "B": ["2012-01-01", "2012-01-02"]})
df["B"] = pd.to_datetime(df.B)
result = repr(df.loc[0])
assert "2012-01-01" in result
def test_period(self):
# GH 12615
index = pd.period_range("2013-01", periods=6, freq="M")
s = Series(np.arange(6, dtype="int64"), index=index)
exp = (
"2013-01 0\n"
"2013-02 1\n"
"2013-03 2\n"
"2013-04 3\n"
"2013-05 4\n"
"2013-06 5\n"
"Freq: M, dtype: int64"
)
assert str(s) == exp
s = Series(index)
exp = (
"0 2013-01\n"
"1 2013-02\n"
"2 2013-03\n"
"3 2013-04\n"
"4 2013-05\n"
"5 2013-06\n"
"dtype: period[M]"
)
assert str(s) == exp
# periods with mixed freq
s = Series(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
]
)
exp = (
"0 2011-01\n1 2011-02-01\n"
"2 2011-03-01 09:00\ndtype: object"
)
assert str(s) == exp
def test_max_multi_index_display(self):
# GH 7101
# doc example (indexing.rst)
# multi-index
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=["first", "second"])
s = Series(np.random.randn(8), index=index)
with option_context("display.max_rows", 10):
assert len(str(s).split("\n")) == 10
with option_context("display.max_rows", 3):
assert len(str(s).split("\n")) == 5
with option_context("display.max_rows", 2):
assert len(str(s).split("\n")) == 5
with option_context("display.max_rows", 1):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 0):
assert len(str(s).split("\n")) == 10
# index
s = Series(np.random.randn(8), None)
with option_context("display.max_rows", 10):
assert len(str(s).split("\n")) == 9
with option_context("display.max_rows", 3):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 2):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 1):
assert len(str(s).split("\n")) == 3
with option_context("display.max_rows", 0):
assert len(str(s).split("\n")) == 9
# Make sure #8532 is fixed
def test_consistent_format(self):
s = pd.Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10)
with option_context("display.max_rows", 10, "display.show_dimensions", False):
res = repr(s)
exp = (
"0 1.0000\n1 1.0000\n2 1.0000\n3 "
"1.0000\n4 1.0000\n ... \n125 "
"1.0000\n126 1.0000\n127 0.9999\n128 "
"1.0000\n129 1.0000\ndtype: float64"
)
assert res == exp
def chck_ncols(self, s):
with option_context("display.max_rows", 10):
res = repr(s)
lines = res.split("\n")
lines = [
line for line in repr(s).split("\n") if not re.match(r"[^\.]*\.+", line)
][:-1]
ncolsizes = len({len(line.strip()) for line in lines})
assert ncolsizes == 1
def test_format_explicit(self):
test_sers = gen_series_formatting()
with option_context("display.max_rows", 4, "display.show_dimensions", False):
res = repr(test_sers["onel"])
exp = "0 a\n1 a\n ..\n98 a\n99 a\ndtype: object"
assert exp == res
res = repr(test_sers["twol"])
exp = "0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype: object"
assert exp == res
res = repr(test_sers["asc"])
exp = (
"0 a\n1 ab\n ... \n4 abcde\n5 "
"abcdef\ndtype: object"
)
assert exp == res
res = repr(test_sers["desc"])
exp = (
"5 abcdef\n4 abcde\n ... \n1 ab\n0 "
"a\ndtype: object"
)
assert exp == res
def test_ncols(self):
test_sers = gen_series_formatting()
for s in test_sers.values():
self.chck_ncols(s)
def test_max_rows_eq_one(self):
s = Series(range(10), dtype="int64")
with option_context("display.max_rows", 1):
strrepr = repr(s).split("\n")
exp1 = ["0", "0"]
res1 = strrepr[0].split()
assert exp1 == res1
exp2 = [".."]
res2 = strrepr[1].split()
assert exp2 == res2
def test_truncate_ndots(self):
def getndots(s):
return len(re.match(r"[^\.]*(\.*)", s).groups()[0])
s = Series([0, 2, 3, 6])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace("\n", "")
assert getndots(strrepr) == 2
s = Series([0, 100, 200, 400])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace("\n", "")
assert getndots(strrepr) == 3
def test_show_dimensions(self):
# gh-7117
s = Series(range(5))
assert "Length" not in repr(s)
with option_context("display.max_rows", 4):
assert "Length" in repr(s)
with option_context("display.show_dimensions", True):
assert "Length" in repr(s)
with option_context("display.max_rows", 4, "display.show_dimensions", False):
assert "Length" not in repr(s)
def test_repr_min_rows(self):
s = pd.Series(range(20))
# default setting no truncation even if above min_rows
assert ".." not in repr(s)
s = pd.Series(range(61))
# default of max_rows 60 triggers truncation if above
assert ".." in repr(s)
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(s)
assert "2 " not in repr(s)
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(s)
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(s)
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(s)
def test_to_string_name(self):
s = Series(range(100), dtype="int64")
s.name = "myser"
res = s.to_string(max_rows=2, name=True)
exp = "0 0\n ..\n99 99\nName: myser"
assert res == exp
res = s.to_string(max_rows=2, name=False)
exp = "0 0\n ..\n99 99"
assert res == exp
def test_to_string_dtype(self):
s = Series(range(100), dtype="int64")
res = s.to_string(max_rows=2, dtype=True)
exp = "0 0\n ..\n99 99\ndtype: int64"
assert res == exp
res = s.to_string(max_rows=2, dtype=False)
exp = "0 0\n ..\n99 99"
assert res == exp
def test_to_string_length(self):
s = Series(range(100), dtype="int64")
res = s.to_string(max_rows=2, length=True)
exp = "0 0\n ..\n99 99\nLength: 100"
assert res == exp
def test_to_string_na_rep(self):
s = pd.Series(index=range(100), dtype=np.float64)
res = s.to_string(na_rep="foo", max_rows=2)
exp = "0 foo\n ..\n99 foo"
assert res == exp
def test_to_string_float_format(self):
s = pd.Series(range(10), dtype="float64")
res = s.to_string(float_format=lambda x: f"{x:2.1f}", max_rows=2)
exp = "0 0.0\n ..\n9 9.0"
assert res == exp
def test_to_string_header(self):
s = pd.Series(range(10), dtype="int64")
s.index.name = "foo"
res = s.to_string(header=True, max_rows=2)
exp = "foo\n0 0\n ..\n9 9"
assert res == exp
res = s.to_string(header=False, max_rows=2)
exp = "0 0\n ..\n9 9"
assert res == exp
def test_to_string_multindex_header(self):
# GH 16718
df = pd.DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}).set_index(
["a", "b"]
)
res = df.to_string(header=["r1", "r2"])
exp = " r1 r2\na b \n0 1 2 3"
assert res == exp
def _three_digit_exp():
return f"{1.7e8:.4g}" == "1.7e+008"
class TestFloatArrayFormatter:
def test_misc(self):
obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
result = obj.get_result()
assert len(result) == 0
def test_format(self):
obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
result = obj.get_result()
assert result[0] == " 12.0"
assert result[1] == " 0.0"
def test_output_significant_digits(self):
# Issue #9764
# In case default display precision changes:
with pd.option_context("display.precision", 6):
# DataFrame example from issue #9764
d = pd.DataFrame(
{
"col1": [
9.999e-8,
1e-7,
1.0001e-7,
2e-7,
4.999e-7,
5e-7,
5.0001e-7,
6e-7,
9.999e-7,
1e-6,
1.0001e-6,
2e-6,
4.999e-6,
5e-6,
5.0001e-6,
6e-6,
]
}
)
expected_output = {
(0, 6): " col1\n"
"0 9.999000e-08\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07",
(1, 6): " col1\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07",
(1, 8): " col1\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07\n"
"6 5.000100e-07\n"
"7 6.000000e-07",
(8, 16): " col1\n"
"8 9.999000e-07\n"
"9 1.000000e-06\n"
"10 1.000100e-06\n"
"11 2.000000e-06\n"
"12 4.999000e-06\n"
"13 5.000000e-06\n"
"14 5.000100e-06\n"
"15 6.000000e-06",
(9, 16): " col1\n"
"9 0.000001\n"
"10 0.000001\n"
"11 0.000002\n"
"12 0.000005\n"
"13 0.000005\n"
"14 0.000005\n"
"15 0.000006",
}
for (start, stop), v in expected_output.items():
assert str(d[start:stop]) == v
def test_too_long(self):
# GH 10451
with pd.option_context("display.precision", 4):
# need both a number > 1e6 and something that normally formats to
# having length > display.precision + 6
df = pd.DataFrame(dict(x=[12345.6789]))
assert str(df) == " x\n0 12345.6789"
df = pd.DataFrame(dict(x=[2e6]))
assert str(df) == " x\n0 2000000.0"
df = pd.DataFrame(dict(x=[12345.6789, 2e6]))
assert str(df) == " x\n0 1.2346e+04\n1 2.0000e+06"
class TestRepr_timedelta64:
def test_none(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1s = pd.to_timedelta(1, unit="s")
delta_500ms = pd.to_timedelta(500, unit="ms")
drepr = lambda x: x._repr_base()
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "0 days"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_sub_day(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1s = pd.to_timedelta(1, unit="s")
delta_500ms = pd.to_timedelta(500, unit="ms")
drepr = lambda x: x._repr_base(format="sub_day")
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "00:00:00"
assert drepr(delta_1s) == "00:00:01"
assert drepr(delta_500ms) == "00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_long(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1s = pd.to_timedelta(1, unit="s")
delta_500ms = pd.to_timedelta(500, unit="ms")
drepr = lambda x: x._repr_base(format="long")
assert drepr(delta_1d) == "1 days 00:00:00"
assert drepr(-delta_1d) == "-1 days +00:00:00"
assert drepr(delta_0d) == "0 days 00:00:00"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_all(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1ns = pd.to_timedelta(1, unit="ns")
drepr = lambda x: x._repr_base(format="all")
assert drepr(delta_1d) == "1 days 00:00:00.000000000"
assert drepr(-delta_1d) == "-1 days +00:00:00.000000000"
assert drepr(delta_0d) == "0 days 00:00:00.000000000"
assert drepr(delta_1ns) == "0 days 00:00:00.000000001"
assert drepr(-delta_1d + delta_1ns) == "-1 days +00:00:00.000000001"
class TestTimedelta64Formatter:
def test_days(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit="D")
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
assert result[1].strip() == "'1 days'"
result = fmt.Timedelta64Formatter(x[1:2], box=True).get_result()
assert result[0].strip() == "'1 days'"
result = fmt.Timedelta64Formatter(x, box=False).get_result()
assert result[0].strip() == "0 days"
assert result[1].strip() == "1 days"
result = fmt.Timedelta64Formatter(x[1:2], box=False).get_result()
assert result[0].strip() == "1 days"
def test_days_neg(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit="D")
result = fmt.Timedelta64Formatter(-x, box=True).get_result()
assert result[0].strip() == "'0 days'"
assert result[1].strip() == "'-1 days'"
def test_subdays(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit="s")
result = fmt.Timedelta64Formatter(y, box=True).get_result()
assert result[0].strip() == "'00:00:00'"
assert result[1].strip() == "'00:00:01'"
def test_subdays_neg(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit="s")
result = fmt.Timedelta64Formatter(-y, box=True).get_result()
assert result[0].strip() == "'00:00:00'"
assert result[1].strip() == "'-1 days +23:59:59'"
def test_zero(self):
x = pd.to_timedelta(list(range(1)) + [pd.NaT], unit="D")
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
x = pd.to_timedelta(list(range(1)), unit="D")
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
class TestDatetime64Formatter:
def test_mixed(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 00:00:00"
assert result[1].strip() == "2013-01-01 12:00:00"
def test_dates(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01"
assert result[1].strip() == "2013-01-02"
def test_date_nanos(self):
x = Series([Timestamp(200)])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "1970-01-01 00:00:00.000000200"
def test_dates_display(self):
# 10170
# make sure that we are consistently display date formatting
x = Series(date_range("20130101 09:00:00", periods=5, freq="D"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-05 09:00:00"
x = Series(date_range("20130101 09:00:00", periods=5, freq="s"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:04"
x = Series(date_range("20130101 09:00:00", periods=5, freq="ms"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.004"
x = Series(date_range("20130101 09:00:00", periods=5, freq="us"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.000004"
x = Series(date_range("20130101 09:00:00", periods=5, freq="N"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000000000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.000000004"
def test_datetime64formatter_yearmonth(self):
x = Series([datetime(2016, 1, 1), datetime(2016, 2, 2)])
def format_func(x):
return x.strftime("%Y-%m")
formatter = fmt.Datetime64Formatter(x, formatter=format_func)
result = formatter.get_result()
assert result == ["2016-01", "2016-02"]
def test_datetime64formatter_hoursecond(self):
x = Series(
pd.to_datetime(["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f")
)
def format_func(x):
return x.strftime("%H:%M")
formatter = fmt.Datetime64Formatter(x, formatter=format_func)
result = formatter.get_result()
assert result == ["10:10", "12:12"]
class TestNaTFormatting:
def test_repr(self):
assert repr(pd.NaT) == "NaT"
def test_str(self):
assert str(pd.NaT) == "NaT"
class TestDatetimeIndexFormat:
def test_datetime(self):
formatted = pd.to_datetime([datetime(2003, 1, 1, 12), pd.NaT]).format()
assert formatted[0] == "2003-01-01 12:00:00"
assert formatted[1] == "NaT"
def test_date(self):
formatted = pd.to_datetime([datetime(2003, 1, 1), pd.NaT]).format()
assert formatted[0] == "2003-01-01"
assert formatted[1] == "NaT"
def test_date_tz(self):
formatted = pd.to_datetime([datetime(2013, 1, 1)], utc=True).format()
assert formatted[0] == "2013-01-01 00:00:00+00:00"
formatted = pd.to_datetime([datetime(2013, 1, 1), pd.NaT], utc=True).format()
assert formatted[0] == "2013-01-01 00:00:00+00:00"
def test_date_explicit_date_format(self):
formatted = pd.to_datetime([datetime(2003, 2, 1), pd.NaT]).format(
date_format="%m-%d-%Y", na_rep="UT"
)
assert formatted[0] == "02-01-2003"
assert formatted[1] == "UT"
class TestDatetimeIndexUnicode:
def test_dates(self):
text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(2014, 1, 1)]))
assert "['2013-01-01'," in text
assert ", '2014-01-01']" in text
def test_mixed(self):
text = str(
pd.to_datetime(
[datetime(2013, 1, 1), datetime(2014, 1, 1, 12), datetime(2014, 1, 1)]
)
)
assert "'2013-01-01 00:00:00'," in text
assert "'2014-01-01 00:00:00']" in text
class TestStringRepTimestamp:
def test_no_tz(self):
dt_date = datetime(2013, 1, 2)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
ts_nanos_only = Timestamp(200)
assert str(ts_nanos_only) == "1970-01-01 00:00:00.000000200"
ts_nanos_micros = Timestamp(1200)
assert str(ts_nanos_micros) == "1970-01-01 00:00:00.000001200"
def test_tz_pytz(self):
dt_date = datetime(2013, 1, 2, tzinfo=pytz.utc)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=pytz.utc)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=pytz.utc)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
def test_tz_dateutil(self):
utc = dateutil.tz.tzutc()
dt_date = datetime(2013, 1, 2, tzinfo=utc)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=utc)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=utc)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
def test_nat_representations(self):
for f in (str, repr, methodcaller("isoformat")):
assert f(pd.NaT) == "NaT"
def test_format_percentiles():
result = fmt.format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
expected = ["1.999%", "2.001%", "50%", "66.667%", "99.99%"]
assert result == expected
result = fmt.format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
expected = ["0%", "50%", "2.0%", "50%", "66.67%", "99.99%"]
assert result == expected
msg = r"percentiles should all be in the interval \[0,1\]"
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([0.1, np.nan, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([-0.001, 0.1, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([2, 0.1, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([0.1, 0.5, "a"])
def test_format_percentiles_integer_idx():
# Issue #26660
result = fmt.format_percentiles(np.linspace(0, 1, 10 + 1))
expected = [
"0%",
"10%",
"20%",
"30%",
"40%",
"50%",
"60%",
"70%",
"80%",
"90%",
"100%",
]
assert result == expected
def test_repr_html_ipython_config(ip):
code = textwrap.dedent(
"""\
import pandas as pd
df = pd.DataFrame({"A": [1, 2]})
df._repr_html_()
cfg = get_ipython().config
cfg['IPKernelApp']['parent_appname']
df._repr_html_()
"""
)
result = ip.run_cell(code)
assert not result.error_in_exec
@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
@pytest.mark.parametrize(
"encoding, data",
[(None, "abc"), ("utf-8", "abc"), ("gbk", "造成输出中文显示乱码"), ("foo", "abc")],
)
def test_filepath_or_buffer_arg(
method,
filepath_or_buffer,
assert_filepath_or_buffer_equals,
encoding,
data,
filepath_or_buffer_id,
):
df = DataFrame([data])
if filepath_or_buffer_id not in ["string", "pathlike"] and encoding is not None:
with pytest.raises(
ValueError, match="buf is not a file name and encoding is specified."
):
getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
elif encoding == "foo":
with tm.assert_produces_warning(None):
with pytest.raises(LookupError, match="unknown encoding"):
getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
else:
expected = getattr(df, method)()
getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
assert_filepath_or_buffer_equals(expected)
@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
def test_filepath_or_buffer_bad_arg_raises(float_frame, method):
msg = "buf is not a file name and it has no write method"
with pytest.raises(TypeError, match=msg):
getattr(float_frame, method)(buf=object())
|
the-stack_0_17258 | # ! Warning !
# This code is the worst code
# I have ever writen. Reading
# it can cause and has caused
# permanent eye damage.
import sys
file = open(sys.argv[1])
assembly = file.read()
file.close()
tokens = assembly.split()
output = ""
curInstr = ""
jump = False
for tok in tokens:
if tok == "add":
curInstr += "0001"
elif tok == "addi":
curInstr += "0010"
elif tok == "jmp":
curInstr += "0011000000"
elif tok == "jeq":
curInstr += "0100"
elif tok == "store":
curInstr += "0101"
elif tok == "load":
curInstr += "0110"
elif tok == "xor":
curInstr += "0111"
elif tok == "and":
curInstr += "1000"
elif tok[0] == "r":
curInstr += "{0:03b}".format(int(tok[1]))
else:
curInstr += "{0:016b}".format(int(tok))
if len(curInstr) == 26:
output += curInstr + "\n"
curInstr = ""
elif len(curInstr) == 13:
curInstr += "0000000000000\n"
output += curInstr
curInstr = ""
file = open(sys.argv[2], "w")
file.write(output)
|
the-stack_0_17260 | import datetime
import logging
import multiprocessing
import os
import secrets
import shutil
from typing import Any, Dict, Iterable, List, Optional, Tuple
import boto3
import orjson
from bs4 import BeautifulSoup
from django.conf import settings
from django.core.cache import cache
from django.db import connection
from django.utils.timezone import now as timezone_now
from psycopg2.extras import execute_values
from psycopg2.sql import SQL, Identifier
from analytics.models import RealmCount, StreamCount, UserCount
from zerver.lib.actions import (
UserMessageLite,
bulk_insert_ums,
do_change_avatar_fields,
do_change_plan_type,
)
from zerver.lib.avatar_hash import user_avatar_path_from_ids
from zerver.lib.bulk_create import bulk_create_users, bulk_set_users_or_streams_recipient_fields
from zerver.lib.export import DATE_FIELDS, Field, Path, Record, TableData, TableName
from zerver.lib.markdown import markdown_convert
from zerver.lib.markdown import version as markdown_version
from zerver.lib.message import get_last_message_id
from zerver.lib.server_initialization import create_internal_realm, server_initialized
from zerver.lib.streams import render_stream_description
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.upload import BadImageError, guess_type, sanitize_name
from zerver.lib.utils import generate_api_key, process_list_in_batches
from zerver.models import (
AlertWord,
Attachment,
BotConfigData,
BotStorageData,
Client,
CustomProfileField,
CustomProfileFieldValue,
DefaultStream,
Huddle,
Message,
MutedTopic,
Reaction,
Realm,
RealmAuditLog,
RealmDomain,
RealmEmoji,
RealmFilter,
Recipient,
Service,
Stream,
Subscription,
UserActivity,
UserActivityInterval,
UserGroup,
UserGroupMembership,
UserHotspot,
UserMessage,
UserPresence,
UserProfile,
get_huddle_hash,
get_system_bot,
get_user_profile_by_id,
)
realm_tables = [("zerver_defaultstream", DefaultStream, "defaultstream"),
("zerver_realmemoji", RealmEmoji, "realmemoji"),
("zerver_realmdomain", RealmDomain, "realmdomain"),
("zerver_realmfilter", RealmFilter, "realmfilter")] # List[Tuple[TableName, Any, str]]
# ID_MAP is a dictionary that maps table names to dictionaries
# that map old ids to new ids. We use this in
# re_map_foreign_keys and other places.
#
# We explicitly initialize ID_MAP with the tables that support
# id re-mapping.
#
# Code reviewers: give these tables extra scrutiny, as we need to
# make sure to reload related tables AFTER we re-map the ids.
ID_MAP: Dict[str, Dict[int, int]] = {
'alertword': {},
'client': {},
'user_profile': {},
'huddle': {},
'realm': {},
'stream': {},
'recipient': {},
'subscription': {},
'defaultstream': {},
'reaction': {},
'realmemoji': {},
'realmdomain': {},
'realmfilter': {},
'message': {},
'user_presence': {},
'useractivity': {},
'useractivityinterval': {},
'usermessage': {},
'customprofilefield': {},
'customprofilefieldvalue': {},
'attachment': {},
'realmauditlog': {},
'recipient_to_huddle_map': {},
'userhotspot': {},
'mutedtopic': {},
'service': {},
'usergroup': {},
'usergroupmembership': {},
'botstoragedata': {},
'botconfigdata': {},
'analytics_realmcount': {},
'analytics_streamcount': {},
'analytics_usercount': {},
}
id_map_to_list: Dict[str, Dict[int, List[int]]] = {
'huddle_to_user_list': {},
}
path_maps: Dict[str, Dict[str, str]] = {
'attachment_path': {},
}
def update_id_map(table: TableName, old_id: int, new_id: int) -> None:
if table not in ID_MAP:
raise Exception(f'''
Table {table} is not initialized in ID_MAP, which could
mean that we have not thought through circular
dependencies.
''')
ID_MAP[table][old_id] = new_id
def fix_datetime_fields(data: TableData, table: TableName) -> None:
for item in data[table]:
for field_name in DATE_FIELDS[table]:
if item[field_name] is not None:
item[field_name] = datetime.datetime.fromtimestamp(item[field_name], tz=datetime.timezone.utc)
def fix_upload_links(data: TableData, message_table: TableName) -> None:
"""
Because the URLs for uploaded files encode the realm ID of the
organization being imported (which is only determined at import
time), we need to rewrite the URLs of links to uploaded files
during the import process.
"""
for message in data[message_table]:
if message['has_attachment'] is True:
for key, value in path_maps['attachment_path'].items():
if key in message['content']:
message['content'] = message['content'].replace(key, value)
if message['rendered_content']:
message['rendered_content'] = message['rendered_content'].replace(key, value)
def create_subscription_events(data: TableData, realm_id: int) -> None:
"""
When the export data doesn't contain the table `zerver_realmauditlog`,
this function creates RealmAuditLog objects for `subscription_created`
type event for all the existing Stream subscriptions.
This is needed for all the export tools which do not include the
table `zerver_realmauditlog` (Slack, Gitter, etc.) because the appropriate
data about when a user was subscribed is not exported by the third-party
service.
"""
all_subscription_logs = []
event_last_message_id = get_last_message_id()
event_time = timezone_now()
recipient_id_to_stream_id = {
d['id']: d['type_id']
for d in data['zerver_recipient']
if d['type'] == Recipient.STREAM
}
for sub in data['zerver_subscription']:
recipient_id = sub['recipient_id']
stream_id = recipient_id_to_stream_id.get(recipient_id)
if stream_id is None:
continue
user_id = sub['user_profile_id']
all_subscription_logs.append(RealmAuditLog(realm_id=realm_id,
acting_user_id=user_id,
modified_user_id=user_id,
modified_stream_id=stream_id,
event_last_message_id=event_last_message_id,
event_time=event_time,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED))
RealmAuditLog.objects.bulk_create(all_subscription_logs)
def fix_service_tokens(data: TableData, table: TableName) -> None:
"""
The tokens in the services are created by 'generate_api_key'.
As the tokens are unique, they should be re-created for the imports.
"""
for item in data[table]:
item['token'] = generate_api_key()
def process_huddle_hash(data: TableData, table: TableName) -> None:
"""
Build new huddle hashes with the updated ids of the users
"""
for huddle in data[table]:
user_id_list = id_map_to_list['huddle_to_user_list'][huddle['id']]
huddle['huddle_hash'] = get_huddle_hash(user_id_list)
def get_huddles_from_subscription(data: TableData, table: TableName) -> None:
"""
Extract the IDs of the user_profiles involved in a huddle from the subscription object
This helps to generate a unique huddle hash from the updated user_profile ids
"""
id_map_to_list['huddle_to_user_list'] = {
value: [] for value in ID_MAP['recipient_to_huddle_map'].values()}
for subscription in data[table]:
if subscription['recipient'] in ID_MAP['recipient_to_huddle_map']:
huddle_id = ID_MAP['recipient_to_huddle_map'][subscription['recipient']]
id_map_to_list['huddle_to_user_list'][huddle_id].append(subscription['user_profile_id'])
def fix_customprofilefield(data: TableData) -> None:
"""
In CustomProfileField with 'field_type' like 'USER', the IDs need to be
re-mapped.
"""
field_type_USER_id_list = []
for item in data['zerver_customprofilefield']:
if item['field_type'] == CustomProfileField.USER:
field_type_USER_id_list.append(item['id'])
for item in data['zerver_customprofilefieldvalue']:
if item['field_id'] in field_type_USER_id_list:
old_user_id_list = orjson.loads(item['value'])
new_id_list = re_map_foreign_keys_many_to_many_internal(
table='zerver_customprofilefieldvalue',
field_name='value',
related_table='user_profile',
old_id_list=old_user_id_list)
item['value'] = orjson.dumps(new_id_list).decode()
def fix_message_rendered_content(realm: Realm,
sender_map: Dict[int, Record],
messages: List[Record]) -> None:
"""
This function sets the rendered_content of all the messages
after the messages have been imported from a non-Zulip platform.
"""
for message in messages:
if message['rendered_content'] is not None:
# For Zulip->Zulip imports, we use the original rendered
# Markdown; this avoids issues where e.g. a mention can no
# longer render properly because a user has changed their
# name.
#
# However, we still need to update the data-user-id and
# similar values stored on mentions, stream mentions, and
# similar syntax in the rendered HTML.
soup = BeautifulSoup(message["rendered_content"], "html.parser")
user_mentions = soup.findAll("span", {"class": "user-mention"})
if len(user_mentions) != 0:
user_id_map = ID_MAP["user_profile"]
for mention in user_mentions:
if not mention.has_attr("data-user-id"):
# Legacy mentions don't have a data-user-id
# field; we should just import them
# unmodified.
continue
if mention['data-user-id'] == "*":
# No rewriting is required for wildcard mentions
continue
old_user_id = int(mention["data-user-id"])
if old_user_id in user_id_map:
mention["data-user-id"] = str(user_id_map[old_user_id])
message['rendered_content'] = str(soup)
stream_mentions = soup.findAll("a", {"class": "stream"})
if len(stream_mentions) != 0:
stream_id_map = ID_MAP["stream"]
for mention in stream_mentions:
old_stream_id = int(mention["data-stream-id"])
if old_stream_id in stream_id_map:
mention["data-stream-id"] = str(stream_id_map[old_stream_id])
message['rendered_content'] = str(soup)
user_group_mentions = soup.findAll("span", {"class": "user-group-mention"})
if len(user_group_mentions) != 0:
user_group_id_map = ID_MAP["usergroup"]
for mention in user_group_mentions:
old_user_group_id = int(mention["data-user-group-id"])
if old_user_group_id in user_group_id_map:
mention["data-user-group-id"] = str(user_group_id_map[old_user_group_id])
message['rendered_content'] = str(soup)
continue
try:
content = message['content']
sender_id = message['sender_id']
sender = sender_map[sender_id]
sent_by_bot = sender['is_bot']
translate_emoticons = sender['translate_emoticons']
# We don't handle alert words on import from third-party
# platforms, since they generally don't have an "alert
# words" type feature, and notifications aren't important anyway.
realm_alert_words_automaton = None
rendered_content = markdown_convert(
content=content,
realm_alert_words_automaton=realm_alert_words_automaton,
message_realm=realm,
sent_by_bot=sent_by_bot,
translate_emoticons=translate_emoticons,
)
message['rendered_content'] = rendered_content
message['rendered_content_version'] = markdown_version
except Exception:
# This generally happens with two possible causes:
# * rendering Markdown throwing an uncaught exception
# * rendering Markdown failing with the exception being
# caught in Markdown (which then returns None, causing the the
# rendered_content assert above to fire).
logging.warning("Error in Markdown rendering for message ID %s; continuing", message['id'])
def current_table_ids(data: TableData, table: TableName) -> List[int]:
"""
Returns the ids present in the current table
"""
id_list = []
for item in data[table]:
id_list.append(item["id"])
return id_list
def idseq(model_class: Any) -> str:
if model_class == RealmDomain:
return 'zerver_realmalias_id_seq'
elif model_class == BotStorageData:
return 'zerver_botuserstatedata_id_seq'
elif model_class == BotConfigData:
return 'zerver_botuserconfigdata_id_seq'
return f'{model_class._meta.db_table}_id_seq'
def allocate_ids(model_class: Any, count: int) -> List[int]:
"""
Increases the sequence number for a given table by the amount of objects being
imported into that table. Hence, this gives a reserved range of IDs to import the
converted Slack objects into the tables.
"""
conn = connection.cursor()
sequence = idseq(model_class)
conn.execute("select nextval(%s) from generate_series(1, %s)",
[sequence, count])
query = conn.fetchall() # Each element in the result is a tuple like (5,)
conn.close()
# convert List[Tuple[int]] to List[int]
return [item[0] for item in query]
def convert_to_id_fields(data: TableData, table: TableName, field_name: Field) -> None:
'''
When Django gives us dict objects via model_to_dict, the foreign
key fields are `foo`, but we want `foo_id` for the bulk insert.
This function handles the simple case where we simply rename
the fields. For cases where we need to munge ids in the
database, see re_map_foreign_keys.
'''
for item in data[table]:
item[field_name + "_id"] = item[field_name]
del item[field_name]
def re_map_foreign_keys(data: TableData,
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool=False,
id_field: bool=False,
recipient_field: bool=False,
reaction_field: bool=False) -> None:
"""
This is a wrapper function for all the realm data tables
and only avatar and attachment records need to be passed through the internal function
because of the difference in data format (TableData corresponding to realm data tables
and List[Record] corresponding to the avatar and attachment records)
"""
# See comments in bulk_import_user_message_data.
assert('usermessage' not in related_table)
re_map_foreign_keys_internal(data[table], table, field_name, related_table, verbose, id_field,
recipient_field, reaction_field)
def re_map_foreign_keys_internal(data_table: List[Record],
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool=False,
id_field: bool=False,
recipient_field: bool=False,
reaction_field: bool=False) -> None:
'''
We occasionally need to assign new ids to rows during the
import/export process, to accommodate things like existing rows
already being in tables. See bulk_import_client for more context.
The tricky part is making sure that foreign key references
are in sync with the new ids, and this fixer function does
the re-mapping. (It also appends `_id` to the field.)
'''
lookup_table = ID_MAP[related_table]
for item in data_table:
old_id = item[field_name]
if recipient_field:
if related_table == "stream" and item['type'] == 2:
pass
elif related_table == "user_profile" and item['type'] == 1:
pass
elif related_table == "huddle" and item['type'] == 3:
# save the recipient id with the huddle id, so that we can extract
# the user_profile ids involved in a huddle with the help of the
# subscription object
# check function 'get_huddles_from_subscription'
ID_MAP['recipient_to_huddle_map'][item['id']] = lookup_table[old_id]
else:
continue
old_id = item[field_name]
if reaction_field:
if item['reaction_type'] == Reaction.REALM_EMOJI:
old_id = int(old_id)
else:
continue
if old_id in lookup_table:
new_id = lookup_table[old_id]
if verbose:
logging.info('Remapping %s %s from %s to %s',
table, field_name + '_id', old_id, new_id)
else:
new_id = old_id
if not id_field:
item[field_name + "_id"] = new_id
del item[field_name]
else:
if reaction_field:
item[field_name] = str(new_id)
else:
item[field_name] = new_id
def re_map_foreign_keys_many_to_many(data: TableData,
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool=False) -> None:
"""
We need to assign new ids to rows during the import/export
process.
The tricky part is making sure that foreign key references
are in sync with the new ids, and this wrapper function does
the re-mapping only for ManyToMany fields.
"""
for item in data[table]:
old_id_list = item[field_name]
new_id_list = re_map_foreign_keys_many_to_many_internal(
table, field_name, related_table, old_id_list, verbose)
item[field_name] = new_id_list
del item[field_name]
def re_map_foreign_keys_many_to_many_internal(table: TableName,
field_name: Field,
related_table: TableName,
old_id_list: List[int],
verbose: bool=False) -> List[int]:
"""
This is an internal function for tables with ManyToMany fields,
which takes the old ID list of the ManyToMany relation and returns the
new updated ID list.
"""
lookup_table = ID_MAP[related_table]
new_id_list = []
for old_id in old_id_list:
if old_id in lookup_table:
new_id = lookup_table[old_id]
if verbose:
logging.info('Remapping %s %s from %s to %s',
table, field_name + '_id', old_id, new_id)
else:
new_id = old_id
new_id_list.append(new_id)
return new_id_list
def fix_bitfield_keys(data: TableData, table: TableName, field_name: Field) -> None:
for item in data[table]:
item[field_name] = item[field_name + '_mask']
del item[field_name + '_mask']
def fix_realm_authentication_bitfield(data: TableData, table: TableName, field_name: Field) -> None:
"""Used to fixup the authentication_methods bitfield to be a string"""
for item in data[table]:
values_as_bitstring = ''.join('1' if field[1] else '0' for field in
item[field_name])
values_as_int = int(values_as_bitstring, 2)
item[field_name] = values_as_int
def remove_denormalized_recipient_column_from_data(data: TableData) -> None:
"""
The recipient column shouldn't be imported, we'll set the correct values
when Recipient table gets imported.
"""
for stream_dict in data['zerver_stream']:
if "recipient" in stream_dict:
del stream_dict["recipient"]
for user_profile_dict in data['zerver_userprofile']:
if 'recipient' in user_profile_dict:
del user_profile_dict['recipient']
for huddle_dict in data['zerver_huddle']:
if 'recipient' in huddle_dict:
del huddle_dict['recipient']
def get_db_table(model_class: Any) -> str:
"""E.g. (RealmDomain -> 'zerver_realmdomain')"""
return model_class._meta.db_table
def update_model_ids(model: Any, data: TableData, related_table: TableName) -> None:
table = get_db_table(model)
# Important: remapping usermessage rows is
# not only unnessary, it's expensive and can cause
# memory errors. We don't even use ids from ID_MAP.
assert('usermessage' not in table)
old_id_list = current_table_ids(data, table)
allocated_id_list = allocate_ids(model, len(data[table]))
for item in range(len(data[table])):
update_id_map(related_table, old_id_list[item], allocated_id_list[item])
re_map_foreign_keys(data, table, 'id', related_table=related_table, id_field=True)
def bulk_import_user_message_data(data: TableData, dump_file_id: int) -> None:
model = UserMessage
table = 'zerver_usermessage'
lst = data[table]
# IMPORTANT NOTE: We do not use any primary id
# data from either the import itself or ID_MAP.
# We let the DB itself generate ids. Note that
# no tables use user_message.id as a foreign key,
# so we can safely avoid all re-mapping complexity.
def process_batch(items: List[Dict[str, Any]]) -> None:
ums = [
UserMessageLite(
user_profile_id = item['user_profile_id'],
message_id = item['message_id'],
flags=item['flags'],
)
for item in items
]
bulk_insert_ums(ums)
chunk_size = 10000
process_list_in_batches(
lst=lst,
chunk_size=chunk_size,
process_batch=process_batch,
)
logging.info("Successfully imported %s from %s[%s].", model, table, dump_file_id)
def bulk_import_model(data: TableData, model: Any, dump_file_id: Optional[str]=None) -> None:
table = get_db_table(model)
# TODO, deprecate dump_file_id
model.objects.bulk_create(model(**item) for item in data[table])
if dump_file_id is None:
logging.info("Successfully imported %s from %s.", model, table)
else:
logging.info("Successfully imported %s from %s[%s].", model, table, dump_file_id)
# Client is a table shared by multiple realms, so in order to
# correctly import multiple realms into the same server, we need to
# check if a Client object already exists, and so we need to support
# remap all Client IDs to the values in the new DB.
def bulk_import_client(data: TableData, model: Any, table: TableName) -> None:
for item in data[table]:
try:
client = Client.objects.get(name=item['name'])
except Client.DoesNotExist:
client = Client.objects.create(name=item['name'])
update_id_map(table='client', old_id=item['id'], new_id=client.id)
def process_avatars(record: Dict[str, Any]) -> None:
from zerver.lib.upload import upload_backend
if record['s3_path'].endswith('.original'):
user_profile = get_user_profile_by_id(record['user_profile_id'])
if settings.LOCAL_UPLOADS_DIR is not None:
avatar_path = user_avatar_path_from_ids(user_profile.id, record['realm_id'])
medium_file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars",
avatar_path) + '-medium.png'
if os.path.exists(medium_file_path):
# We remove the image here primarily to deal with
# issues when running the import script multiple
# times in development (where one might reuse the
# same realm ID from a previous iteration).
os.remove(medium_file_path)
try:
upload_backend.ensure_medium_avatar_image(user_profile=user_profile)
if record.get("importer_should_thumbnail"):
upload_backend.ensure_basic_avatar_image(user_profile=user_profile)
except BadImageError:
logging.warning(
"Could not thumbnail avatar image for user %s; ignoring",
user_profile.id,
)
# Delete the record of the avatar to avoid 404s.
do_change_avatar_fields(user_profile, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=None)
def import_uploads(realm: Realm, import_dir: Path, processes: int, processing_avatars: bool=False,
processing_emojis: bool=False, processing_realm_icons: bool=False) -> None:
if processing_avatars and processing_emojis:
raise AssertionError("Cannot import avatars and emojis at the same time!")
if processing_avatars:
logging.info("Importing avatars")
elif processing_emojis:
logging.info("Importing emojis")
elif processing_realm_icons:
logging.info("Importing realm icons and logos")
else:
logging.info("Importing uploaded files")
records_filename = os.path.join(import_dir, "records.json")
with open(records_filename, "rb") as records_file:
records: List[Dict[str, Any]] = orjson.loads(records_file.read())
timestamp = datetime_to_timestamp(timezone_now())
re_map_foreign_keys_internal(records, 'records', 'realm_id', related_table="realm",
id_field=True)
if not processing_emojis and not processing_realm_icons:
re_map_foreign_keys_internal(records, 'records', 'user_profile_id',
related_table="user_profile", id_field=True)
s3_uploads = settings.LOCAL_UPLOADS_DIR is None
if s3_uploads:
if processing_avatars or processing_emojis or processing_realm_icons:
bucket_name = settings.S3_AVATAR_BUCKET
else:
bucket_name = settings.S3_AUTH_UPLOADS_BUCKET
session = boto3.Session(settings.S3_KEY, settings.S3_SECRET_KEY)
bucket = session.resource('s3', endpoint_url=settings.S3_ENDPOINT_URL).Bucket(bucket_name)
count = 0
for record in records:
count += 1
if count % 1000 == 0:
logging.info("Processed %s/%s uploads", count, len(records))
if processing_avatars:
# For avatars, we need to rehash the user ID with the
# new server's avatar salt
relative_path = user_avatar_path_from_ids(record['user_profile_id'], record['realm_id'])
if record['s3_path'].endswith('.original'):
relative_path += '.original'
else:
# TODO: This really should be unconditional. However,
# until we fix the S3 upload backend to use the .png
# path suffix for its normal avatar URLs, we need to
# only do this for the LOCAL_UPLOADS_DIR backend.
if not s3_uploads:
relative_path += '.png'
elif processing_emojis:
# For emojis we follow the function 'upload_emoji_image'
relative_path = RealmEmoji.PATH_ID_TEMPLATE.format(
realm_id=record['realm_id'],
emoji_file_name=record['file_name'])
record['last_modified'] = timestamp
elif processing_realm_icons:
icon_name = os.path.basename(record["path"])
relative_path = os.path.join(str(record['realm_id']), "realm", icon_name)
record['last_modified'] = timestamp
else:
# Should be kept in sync with its equivalent in zerver/lib/uploads in the
# function 'upload_message_file'
relative_path = "/".join([
str(record['realm_id']),
secrets.token_urlsafe(18),
sanitize_name(os.path.basename(record['path'])),
])
path_maps['attachment_path'][record['s3_path']] = relative_path
if s3_uploads:
key = bucket.Object(relative_path)
metadata = {}
if processing_emojis and "user_profile_id" not in record:
# Exported custom emoji from tools like Slack don't have
# the data for what user uploaded them in `user_profile_id`.
pass
elif processing_realm_icons and "user_profile_id" not in record:
# Exported realm icons and logos from local export don't have
# the value of user_profile_id in the associated record.
pass
else:
user_profile_id = int(record['user_profile_id'])
# Support email gateway bot and other cross-realm messages
if user_profile_id in ID_MAP["user_profile"]:
logging.info("Uploaded by ID mapped user: %s!", user_profile_id)
user_profile_id = ID_MAP["user_profile"][user_profile_id]
user_profile = get_user_profile_by_id(user_profile_id)
metadata["user_profile_id"] = str(user_profile.id)
if 'last_modified' in record:
metadata["orig_last_modified"] = str(record['last_modified'])
metadata["realm_id"] = str(record['realm_id'])
# Zulip exports will always have a content-type, but third-party exports might not.
content_type = record.get("content_type")
if content_type is None:
content_type = guess_type(record['s3_path'])[0]
if content_type is None:
# This is the default for unknown data. Note that
# for `.original` files, this is the value we'll
# set; that is OK, because those are never served
# directly anyway.
content_type = 'application/octet-stream'
key.upload_file(os.path.join(import_dir, record['path']),
ExtraArgs={
'ContentType': content_type,
'Metadata': metadata})
else:
if processing_avatars or processing_emojis or processing_realm_icons:
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", relative_path)
else:
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "files", relative_path)
orig_file_path = os.path.join(import_dir, record['path'])
os.makedirs(os.path.dirname(file_path), exist_ok=True)
shutil.copy(orig_file_path, file_path)
if processing_avatars:
# Ensure that we have medium-size avatar images for every
# avatar. TODO: This implementation is hacky, both in that it
# does get_user_profile_by_id for each user, and in that it
# might be better to require the export to just have these.
if processes == 1:
for record in records:
process_avatars(record)
else:
connection.close()
cache._cache.disconnect_all()
with multiprocessing.Pool(processes) as p:
for out in p.imap_unordered(process_avatars, records):
pass
# Importing data suffers from a difficult ordering problem because of
# models that reference each other circularly. Here is a correct order.
#
# * Client [no deps]
# * Realm [-notifications_stream]
# * Stream [only depends on realm]
# * Realm's notifications_stream
# * Now can do all realm_tables
# * UserProfile, in order by ID to avoid bot loop issues
# * Huddle
# * Recipient
# * Subscription
# * Message
# * UserMessage
#
# Because the Python object => JSON conversion process is not fully
# faithful, we have to use a set of fixers (e.g. on DateTime objects
# and Foreign Keys) to do the import correctly.
def do_import_realm(import_dir: Path, subdomain: str, processes: int=1) -> Realm:
logging.info("Importing realm dump %s", import_dir)
if not os.path.exists(import_dir):
raise Exception("Missing import directory!")
realm_data_filename = os.path.join(import_dir, "realm.json")
if not os.path.exists(realm_data_filename):
raise Exception("Missing realm.json file!")
if not server_initialized():
create_internal_realm()
logging.info("Importing realm data from %s", realm_data_filename)
with open(realm_data_filename, "rb") as f:
data = orjson.loads(f.read())
remove_denormalized_recipient_column_from_data(data)
sort_by_date = data.get('sort_by_date', False)
bulk_import_client(data, Client, 'zerver_client')
# We don't import the Stream model yet, since it depends on Realm,
# which isn't imported yet. But we need the Stream model IDs for
# notifications_stream.
update_model_ids(Stream, data, 'stream')
re_map_foreign_keys(data, 'zerver_realm', 'notifications_stream', related_table="stream")
re_map_foreign_keys(data, 'zerver_realm', 'signup_notifications_stream', related_table="stream")
fix_datetime_fields(data, 'zerver_realm')
# Fix realm subdomain information
data['zerver_realm'][0]['string_id'] = subdomain
data['zerver_realm'][0]['name'] = subdomain
fix_realm_authentication_bitfield(data, 'zerver_realm', 'authentication_methods')
update_model_ids(Realm, data, 'realm')
realm = Realm(**data['zerver_realm'][0])
if realm.notifications_stream_id is not None:
notifications_stream_id: Optional[int] = int(realm.notifications_stream_id)
else:
notifications_stream_id = None
realm.notifications_stream_id = None
if realm.signup_notifications_stream_id is not None:
signup_notifications_stream_id: Optional[int] = int(realm.signup_notifications_stream_id)
else:
signup_notifications_stream_id = None
realm.signup_notifications_stream_id = None
realm.save()
# Email tokens will automatically be randomly generated when the
# Stream objects are created by Django.
fix_datetime_fields(data, 'zerver_stream')
re_map_foreign_keys(data, 'zerver_stream', 'realm', related_table="realm")
# Handle rendering of stream descriptions for import from non-Zulip
for stream in data['zerver_stream']:
if 'rendered_description' in stream:
continue
stream["rendered_description"] = render_stream_description(stream["description"])
bulk_import_model(data, Stream)
realm.notifications_stream_id = notifications_stream_id
realm.signup_notifications_stream_id = signup_notifications_stream_id
realm.save()
# Remap the user IDs for notification_bot and friends to their
# appropriate IDs on this server
for item in data['zerver_userprofile_crossrealm']:
logging.info("Adding to ID map: %s %s", item['id'], get_system_bot(item['email']).id)
new_user_id = get_system_bot(item['email']).id
update_id_map(table='user_profile', old_id=item['id'], new_id=new_user_id)
new_recipient_id = Recipient.objects.get(type=Recipient.PERSONAL, type_id=new_user_id).id
update_id_map(table='recipient', old_id=item['recipient_id'], new_id=new_recipient_id)
# Merge in zerver_userprofile_mirrordummy
data['zerver_userprofile'] = data['zerver_userprofile'] + data['zerver_userprofile_mirrordummy']
del data['zerver_userprofile_mirrordummy']
data['zerver_userprofile'].sort(key=lambda r: r['id'])
# To remap foreign key for UserProfile.last_active_message_id
update_message_foreign_keys(import_dir=import_dir, sort_by_date=sort_by_date)
fix_datetime_fields(data, 'zerver_userprofile')
update_model_ids(UserProfile, data, 'user_profile')
re_map_foreign_keys(data, 'zerver_userprofile', 'realm', related_table="realm")
re_map_foreign_keys(data, 'zerver_userprofile', 'bot_owner', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_userprofile', 'default_sending_stream',
related_table="stream")
re_map_foreign_keys(data, 'zerver_userprofile', 'default_events_register_stream',
related_table="stream")
re_map_foreign_keys(data, 'zerver_userprofile', 'last_active_message_id',
related_table="message", id_field=True)
for user_profile_dict in data['zerver_userprofile']:
user_profile_dict['password'] = None
user_profile_dict['api_key'] = generate_api_key()
# Since Zulip doesn't use these permissions, drop them
del user_profile_dict['user_permissions']
del user_profile_dict['groups']
# The short_name field is obsolete in Zulip, but it's
# convenient for third party exports to populate it.
if 'short_name' in user_profile_dict:
del user_profile_dict['short_name']
user_profiles = [UserProfile(**item) for item in data['zerver_userprofile']]
for user_profile in user_profiles:
user_profile.set_unusable_password()
UserProfile.objects.bulk_create(user_profiles)
re_map_foreign_keys(data, 'zerver_defaultstream', 'stream', related_table="stream")
re_map_foreign_keys(data, 'zerver_realmemoji', 'author', related_table="user_profile")
for (table, model, related_table) in realm_tables:
re_map_foreign_keys(data, table, 'realm', related_table="realm")
update_model_ids(model, data, related_table)
bulk_import_model(data, model)
if 'zerver_huddle' in data:
update_model_ids(Huddle, data, 'huddle')
# We don't import Huddle yet, since we don't have the data to
# compute huddle hashes until we've imported some of the
# tables below.
# TODO: double-check this.
re_map_foreign_keys(data, 'zerver_recipient', 'type_id', related_table="stream",
recipient_field=True, id_field=True)
re_map_foreign_keys(data, 'zerver_recipient', 'type_id', related_table="user_profile",
recipient_field=True, id_field=True)
re_map_foreign_keys(data, 'zerver_recipient', 'type_id', related_table="huddle",
recipient_field=True, id_field=True)
update_model_ids(Recipient, data, 'recipient')
bulk_import_model(data, Recipient)
bulk_set_users_or_streams_recipient_fields(Stream, Stream.objects.filter(realm=realm))
bulk_set_users_or_streams_recipient_fields(UserProfile, UserProfile.objects.filter(realm=realm))
re_map_foreign_keys(data, 'zerver_subscription', 'user_profile', related_table="user_profile")
get_huddles_from_subscription(data, 'zerver_subscription')
re_map_foreign_keys(data, 'zerver_subscription', 'recipient', related_table="recipient")
update_model_ids(Subscription, data, 'subscription')
bulk_import_model(data, Subscription)
if 'zerver_realmauditlog' in data:
fix_datetime_fields(data, 'zerver_realmauditlog')
re_map_foreign_keys(data, 'zerver_realmauditlog', 'realm', related_table="realm")
re_map_foreign_keys(data, 'zerver_realmauditlog', 'modified_user',
related_table='user_profile')
re_map_foreign_keys(data, 'zerver_realmauditlog', 'acting_user',
related_table='user_profile')
re_map_foreign_keys(data, 'zerver_realmauditlog', 'modified_stream',
related_table="stream")
update_model_ids(RealmAuditLog, data, related_table="realmauditlog")
bulk_import_model(data, RealmAuditLog)
else:
logging.info('about to call create_subscription_events')
create_subscription_events(
data=data,
realm_id=realm.id,
)
logging.info('done with create_subscription_events')
if 'zerver_huddle' in data:
process_huddle_hash(data, 'zerver_huddle')
bulk_import_model(data, Huddle)
for huddle in Huddle.objects.filter(recipient_id=None):
recipient = Recipient.objects.get(type=Recipient.HUDDLE, type_id=huddle.id)
huddle.recipient = recipient
huddle.save(update_fields=["recipient"])
if 'zerver_alertword' in data:
re_map_foreign_keys(data, 'zerver_alertword', 'user_profile', related_table='user_profile')
re_map_foreign_keys(data, 'zerver_alertword', 'realm', related_table='realm')
update_model_ids(AlertWord, data, 'alertword')
bulk_import_model(data, AlertWord)
if 'zerver_userhotspot' in data:
fix_datetime_fields(data, 'zerver_userhotspot')
re_map_foreign_keys(data, 'zerver_userhotspot', 'user', related_table='user_profile')
update_model_ids(UserHotspot, data, 'userhotspot')
bulk_import_model(data, UserHotspot)
if 'zerver_mutedtopic' in data:
fix_datetime_fields(data, 'zerver_mutedtopic')
re_map_foreign_keys(data, 'zerver_mutedtopic', 'user_profile', related_table='user_profile')
re_map_foreign_keys(data, 'zerver_mutedtopic', 'stream', related_table='stream')
re_map_foreign_keys(data, 'zerver_mutedtopic', 'recipient', related_table='recipient')
update_model_ids(MutedTopic, data, 'mutedtopic')
bulk_import_model(data, MutedTopic)
if 'zerver_service' in data:
re_map_foreign_keys(data, 'zerver_service', 'user_profile', related_table='user_profile')
fix_service_tokens(data, 'zerver_service')
update_model_ids(Service, data, 'service')
bulk_import_model(data, Service)
if 'zerver_usergroup' in data:
re_map_foreign_keys(data, 'zerver_usergroup', 'realm', related_table='realm')
re_map_foreign_keys_many_to_many(data, 'zerver_usergroup',
'members', related_table='user_profile')
update_model_ids(UserGroup, data, 'usergroup')
bulk_import_model(data, UserGroup)
re_map_foreign_keys(data, 'zerver_usergroupmembership',
'user_group', related_table='usergroup')
re_map_foreign_keys(data, 'zerver_usergroupmembership',
'user_profile', related_table='user_profile')
update_model_ids(UserGroupMembership, data, 'usergroupmembership')
bulk_import_model(data, UserGroupMembership)
if 'zerver_botstoragedata' in data:
re_map_foreign_keys(data, 'zerver_botstoragedata', 'bot_profile', related_table='user_profile')
update_model_ids(BotStorageData, data, 'botstoragedata')
bulk_import_model(data, BotStorageData)
if 'zerver_botconfigdata' in data:
re_map_foreign_keys(data, 'zerver_botconfigdata', 'bot_profile', related_table='user_profile')
update_model_ids(BotConfigData, data, 'botconfigdata')
bulk_import_model(data, BotConfigData)
fix_datetime_fields(data, 'zerver_userpresence')
re_map_foreign_keys(data, 'zerver_userpresence', 'user_profile', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_userpresence', 'client', related_table='client')
re_map_foreign_keys(data, 'zerver_userpresence', 'realm', related_table="realm")
update_model_ids(UserPresence, data, 'user_presence')
bulk_import_model(data, UserPresence)
fix_datetime_fields(data, 'zerver_useractivity')
re_map_foreign_keys(data, 'zerver_useractivity', 'user_profile', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_useractivity', 'client', related_table='client')
update_model_ids(UserActivity, data, 'useractivity')
bulk_import_model(data, UserActivity)
fix_datetime_fields(data, 'zerver_useractivityinterval')
re_map_foreign_keys(data, 'zerver_useractivityinterval', 'user_profile', related_table="user_profile")
update_model_ids(UserActivityInterval, data, 'useractivityinterval')
bulk_import_model(data, UserActivityInterval)
re_map_foreign_keys(data, 'zerver_customprofilefield', 'realm', related_table="realm")
update_model_ids(CustomProfileField, data, related_table="customprofilefield")
bulk_import_model(data, CustomProfileField)
re_map_foreign_keys(data, 'zerver_customprofilefieldvalue', 'user_profile',
related_table="user_profile")
re_map_foreign_keys(data, 'zerver_customprofilefieldvalue', 'field',
related_table="customprofilefield")
fix_customprofilefield(data)
update_model_ids(CustomProfileFieldValue, data, related_table="customprofilefieldvalue")
bulk_import_model(data, CustomProfileFieldValue)
# Import uploaded files and avatars
import_uploads(realm, os.path.join(import_dir, "avatars"), processes, processing_avatars=True)
import_uploads(realm, os.path.join(import_dir, "uploads"), processes)
# We need to have this check as the emoji files are only present in the data
# importer from Slack
# For Zulip export, this doesn't exist
if os.path.exists(os.path.join(import_dir, "emoji")):
import_uploads(realm, os.path.join(import_dir, "emoji"), processes, processing_emojis=True)
if os.path.exists(os.path.join(import_dir, "realm_icons")):
import_uploads(realm, os.path.join(import_dir, "realm_icons"), processes,
processing_realm_icons=True)
sender_map = {
user['id']: user
for user in data['zerver_userprofile']
}
# Import zerver_message and zerver_usermessage
import_message_data(realm=realm, sender_map=sender_map, import_dir=import_dir)
re_map_foreign_keys(data, 'zerver_reaction', 'message', related_table="message")
re_map_foreign_keys(data, 'zerver_reaction', 'user_profile', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_reaction', 'emoji_code', related_table="realmemoji", id_field=True,
reaction_field=True)
update_model_ids(Reaction, data, 'reaction')
bulk_import_model(data, Reaction)
# Similarly, we need to recalculate the first_message_id for stream objects.
for stream in Stream.objects.filter(realm=realm):
recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream.id)
first_message = Message.objects.filter(recipient=recipient).first()
if first_message is None:
stream.first_message_id = None
else:
stream.first_message_id = first_message.id
stream.save(update_fields=["first_message_id"])
# Do attachments AFTER message data is loaded.
# TODO: de-dup how we read these json files.
fn = os.path.join(import_dir, "attachment.json")
if not os.path.exists(fn):
raise Exception("Missing attachment.json file!")
logging.info("Importing attachment data from %s", fn)
with open(fn, "rb") as f:
data = orjson.loads(f.read())
import_attachments(data)
# Import the analytics file.
import_analytics_data(realm=realm, import_dir=import_dir)
if settings.BILLING_ENABLED:
do_change_plan_type(realm, Realm.LIMITED)
else:
do_change_plan_type(realm, Realm.SELF_HOSTED)
return realm
# create_users and do_import_system_bots differ from their equivalent
# in zerver/lib/server_initialization.py because here we check if the
# bots don't already exist and only then create a user for these bots.
def do_import_system_bots(realm: Any) -> None:
internal_bots = [(bot['name'], bot['email_template'] % (settings.INTERNAL_BOT_DOMAIN,))
for bot in settings.INTERNAL_BOTS]
create_users(realm, internal_bots, bot_type=UserProfile.DEFAULT_BOT)
print("Finished importing system bots.")
def create_users(realm: Realm, name_list: Iterable[Tuple[str, str]],
bot_type: Optional[int]=None) -> None:
user_set = set()
for full_name, email in name_list:
if not UserProfile.objects.filter(email=email):
user_set.add((email, full_name, True))
bulk_create_users(realm, user_set, bot_type)
def update_message_foreign_keys(import_dir: Path,
sort_by_date: bool) -> None:
old_id_list = get_incoming_message_ids(
import_dir=import_dir,
sort_by_date=sort_by_date,
)
count = len(old_id_list)
new_id_list = allocate_ids(model_class=Message, count=count)
for old_id, new_id in zip(old_id_list, new_id_list):
update_id_map(
table='message',
old_id=old_id,
new_id=new_id,
)
# We don't touch user_message keys here; that happens later when
# we're actually read the files a second time to get actual data.
def get_incoming_message_ids(import_dir: Path,
sort_by_date: bool) -> List[int]:
'''
This function reads in our entire collection of message
ids, which can be millions of integers for some installations.
And then we sort the list. This is necessary to ensure
that the sort order of incoming ids matches the sort order
of date_sent, which isn't always guaranteed by our
utilities that convert third party chat data. We also
need to move our ids to a new range if we're dealing
with a server that has data for other realms.
'''
if sort_by_date:
tups: List[Tuple[int, int]] = []
else:
message_ids: List[int] = []
dump_file_id = 1
while True:
message_filename = os.path.join(import_dir, f"messages-{dump_file_id:06}.json")
if not os.path.exists(message_filename):
break
with open(message_filename, "rb") as f:
data = orjson.loads(f.read())
# Aggressively free up memory.
del data['zerver_usermessage']
for row in data['zerver_message']:
# We truncate date_sent to int to theoretically
# save memory and speed up the sort. For
# Zulip-to-Zulip imports, the
# message_id will generally be a good tiebreaker.
# If we occasionally mis-order the ids for two
# messages from the same second, it's not the
# end of the world, as it's likely those messages
# arrived to the original server in somewhat
# arbitrary order.
message_id = row['id']
if sort_by_date:
date_sent = int(row['date_sent'])
tup = (date_sent, message_id)
tups.append(tup)
else:
message_ids.append(message_id)
dump_file_id += 1
if sort_by_date:
tups.sort()
message_ids = [tup[1] for tup in tups]
return message_ids
def import_message_data(realm: Realm,
sender_map: Dict[int, Record],
import_dir: Path) -> None:
dump_file_id = 1
while True:
message_filename = os.path.join(import_dir, f"messages-{dump_file_id:06}.json")
if not os.path.exists(message_filename):
break
with open(message_filename, "rb") as f:
data = orjson.loads(f.read())
logging.info("Importing message dump %s", message_filename)
re_map_foreign_keys(data, 'zerver_message', 'sender', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_message', 'recipient', related_table="recipient")
re_map_foreign_keys(data, 'zerver_message', 'sending_client', related_table='client')
fix_datetime_fields(data, 'zerver_message')
# Parser to update message content with the updated attachment URLs
fix_upload_links(data, 'zerver_message')
# We already create mappings for zerver_message ids
# in update_message_foreign_keys(), so here we simply
# apply them.
message_id_map = ID_MAP['message']
for row in data['zerver_message']:
row['id'] = message_id_map[row['id']]
for row in data['zerver_usermessage']:
assert(row['message'] in message_id_map)
fix_message_rendered_content(
realm=realm,
sender_map=sender_map,
messages=data['zerver_message'],
)
logging.info("Successfully rendered Markdown for message batch")
# A LOT HAPPENS HERE.
# This is where we actually import the message data.
bulk_import_model(data, Message)
# Due to the structure of these message chunks, we're
# guaranteed to have already imported all the Message objects
# for this batch of UserMessage objects.
re_map_foreign_keys(data, 'zerver_usermessage', 'message', related_table="message")
re_map_foreign_keys(data, 'zerver_usermessage', 'user_profile', related_table="user_profile")
fix_bitfield_keys(data, 'zerver_usermessage', 'flags')
bulk_import_user_message_data(data, dump_file_id)
dump_file_id += 1
def import_attachments(data: TableData) -> None:
# Clean up the data in zerver_attachment that is not
# relevant to our many-to-many import.
fix_datetime_fields(data, 'zerver_attachment')
re_map_foreign_keys(data, 'zerver_attachment', 'owner', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_attachment', 'realm', related_table="realm")
# Configure ourselves. Django models many-to-many (m2m)
# relations asymmetrically. The parent here refers to the
# Model that has the ManyToManyField. It is assumed here
# the child models have been loaded, but we are in turn
# responsible for loading the parents and the m2m rows.
parent_model = Attachment
parent_db_table_name = 'zerver_attachment'
parent_singular = 'attachment'
child_singular = 'message'
child_plural = 'messages'
m2m_table_name = 'zerver_attachment_messages'
parent_id = 'attachment_id'
child_id = 'message_id'
update_model_ids(parent_model, data, 'attachment')
# We don't bulk_import_model yet, because we need to first compute
# the many-to-many for this table.
# First, build our list of many-to-many (m2m) rows.
# We do this in a slightly convoluted way to anticipate
# a future where we may need to call re_map_foreign_keys.
m2m_rows: List[Record] = []
for parent_row in data[parent_db_table_name]:
for fk_id in parent_row[child_plural]:
m2m_row: Record = {}
m2m_row[parent_singular] = parent_row['id']
m2m_row[child_singular] = ID_MAP['message'][fk_id]
m2m_rows.append(m2m_row)
# Create our table data for insert.
m2m_data: TableData = {m2m_table_name: m2m_rows}
convert_to_id_fields(m2m_data, m2m_table_name, parent_singular)
convert_to_id_fields(m2m_data, m2m_table_name, child_singular)
m2m_rows = m2m_data[m2m_table_name]
# Next, delete out our child data from the parent rows.
for parent_row in data[parent_db_table_name]:
del parent_row[child_plural]
# Update 'path_id' for the attachments
for attachment in data[parent_db_table_name]:
attachment['path_id'] = path_maps['attachment_path'][attachment['path_id']]
# Next, load the parent rows.
bulk_import_model(data, parent_model)
# Now, go back to our m2m rows.
# TODO: Do this the kosher Django way. We may find a
# better way to do this in Django 1.9 particularly.
with connection.cursor() as cursor:
sql_template = SQL('''
INSERT INTO {m2m_table_name} ({parent_id}, {child_id}) VALUES %s
''').format(
m2m_table_name=Identifier(m2m_table_name),
parent_id=Identifier(parent_id),
child_id=Identifier(child_id),
)
tups = [(row[parent_id], row[child_id]) for row in m2m_rows]
execute_values(cursor.cursor, sql_template, tups)
logging.info('Successfully imported M2M table %s', m2m_table_name)
def import_analytics_data(realm: Realm, import_dir: Path) -> None:
analytics_filename = os.path.join(import_dir, "analytics.json")
if not os.path.exists(analytics_filename):
return
logging.info("Importing analytics data from %s", analytics_filename)
with open(analytics_filename, "rb") as f:
data = orjson.loads(f.read())
# Process the data through the fixer functions.
fix_datetime_fields(data, 'analytics_realmcount')
re_map_foreign_keys(data, 'analytics_realmcount', 'realm', related_table="realm")
update_model_ids(RealmCount, data, 'analytics_realmcount')
bulk_import_model(data, RealmCount)
fix_datetime_fields(data, 'analytics_usercount')
re_map_foreign_keys(data, 'analytics_usercount', 'realm', related_table="realm")
re_map_foreign_keys(data, 'analytics_usercount', 'user', related_table="user_profile")
update_model_ids(UserCount, data, 'analytics_usercount')
bulk_import_model(data, UserCount)
fix_datetime_fields(data, 'analytics_streamcount')
re_map_foreign_keys(data, 'analytics_streamcount', 'realm', related_table="realm")
re_map_foreign_keys(data, 'analytics_streamcount', 'stream', related_table="stream")
update_model_ids(StreamCount, data, 'analytics_streamcount')
bulk_import_model(data, StreamCount)
|
the-stack_0_17261 | import numpy,random,os
lr = 1
bias = 1
weights = list()
for k in range(3):
weights.append(random.random()) #Assigning random weights
def ptron(inp1,inp2,outp):
outp_pn = inp1*weights[0]+inp2*weights[1]+bias*weights[2]
outp_pn = 1.0/(1+numpy.exp(-outp_pn)) #Sigmoid Function
err = outp - outp_pn
weights[0] += err*inp1*lr #Modifying weights
weights[1] += err*inp2*lr
weights[2] += err*bias*lr
for i in range(100): #Training With Data
ptron(0,0,0) #Passing the tryth values of OR
ptron(1,1,1)
ptron(1,0,0)
ptron(0,1,0)
for x,y in [(0,0),(1,0),(0,1),(1,1)]:
outp_pn = x*weights[0]+y*weights[1]+bias*weights[2]
#Based on the trained wieghts
outp = 1.0/(1+numpy.exp(-outp_pn))
print(str(x) + " AND " + str(y) + " yields: " + str(outp)) |
the-stack_0_17263 | from queue import Queue
class Node:
def __init__(self, data):
self.data = data
self.parent = None
self.children = dict()
"""
Dictionary whose values are the node children and whose keys are the corresponding nodes
data.
"""
def add_child(self, child):
child.parent = self
self.children[child.data] = child
class Tree:
def __init__(self, root: Node):
self.root = root
def bfs_search(self, data, depth=None):
"""
Searches for a node, given its data. The search starts from the root.
:param data: Data of the node to find.
:param depth: Limits the search to nodes with the given depth.
:return: The node if it's found, None otherwise.
"""
visited, queue = set(), Queue()
# Each element of the queue is a couple (node, level):
queue.put((self.root, 0))
while not queue.empty():
node, level = queue.get()
if depth is not None and level > depth:
break
if depth is None:
if node.data == data:
return node
else:
if level == depth and node.data == data:
return node
for child in node.children.values():
if child in visited:
continue
queue.put((child, level + 1))
visited.add(node)
return None
def _bfs_insert(self, child: Node, parent: Node) -> bool:
node = self.bfs_search(parent.data)
if node is not None:
node.add_child(child)
return True
else:
return False
def insert(self, child: Node, parent: Node) -> bool:
"""
Inserts a node given its parent. Note: insertion is done on the first node with the same
data as the given parent node.
:param child: Node to insert.
:param parent: Parent node.
:return: True if the node has been inserted, False otherwise.
"""
return self._bfs_insert(child, parent)
def parent(self, data):
"""
Gets the parent of a node, given the node data.
:param data: Data of the node to find.
:return: Parent node if found, None otherwise.
"""
node = self.bfs_search(data)
if node is not None:
return node.parent
else:
return None
|
the-stack_0_17265 | # -*- coding: utf-8 -*-
"""
mygeotab.py3.api_async
~~~~~~~~~~~~~~~~~~~~~~
Async/Await-able (Python 3.5+) public objects and methods wrapping the MyGeotab API.
"""
import asyncio
import sys
if sys.version_info < (3, 5):
raise Exception("Python 3.5+ is required to use the async API")
import ssl
from concurrent.futures import TimeoutError
import aiohttp
from mygeotab import api
from mygeotab.api import DEFAULT_TIMEOUT, get_headers
from mygeotab.exceptions import MyGeotabException, TimeoutException, AuthenticationException
from mygeotab.serializers import json_serialize, json_deserialize
class API(api.API):
"""A simple, asynchronous, and Pythonic wrapper for the MyGeotab API."""
def __init__(
self,
username,
password=None,
database=None,
session_id=None,
server="my.geotab.com",
timeout=DEFAULT_TIMEOUT,
proxies=None,
):
"""
Initialize the asynchronous MyGeotab API object with credentials.
:param username: The username used for MyGeotab servers. Usually an email address.
:param password: The password associated with the username. Optional if `session_id` is provided.
:param database: The database or company name. Optional as this usually gets resolved upon authentication.
:param session_id: A session ID, assigned by the server.
:param server: The server ie. my23.geotab.com. Optional as this usually gets resolved upon authentication.
:param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes).
:param proxies: The proxies dictionary to apply to the request.
:raise Exception: Raises an Exception if a username, or one of the session_id or password is not provided.
"""
super().__init__(username, password, database, session_id, server, timeout, proxies=proxies)
async def call_async(self, method, **parameters):
"""Makes an async call to the API.
:param method: The method name.
:param params: Additional parameters to send (for example, search=dict(id='b123') )
:return: The JSON result (decoded into a dict) from the server.abs
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server.
:raise TimeoutException: Raises when the request does not respond after some time.
"""
if method is None:
raise Exception("A method name must be specified")
params = api.process_parameters(parameters)
if self.credentials and not self.credentials.session_id:
self.authenticate()
if "credentials" not in params and self.credentials.session_id:
params["credentials"] = self.credentials.get_param()
try:
result = await _query(self._server, method, params, verify_ssl=self._is_verify_ssl)
if result is not None:
self.__reauthorize_count = 0
return result
except MyGeotabException as exception:
if exception.name == "InvalidUserException":
if self.__reauthorize_count == 0 and self.credentials.password:
self.__reauthorize_count += 1
self.authenticate()
return await self.call_async(method, **parameters)
else:
raise AuthenticationException(
self.credentials.username, self.credentials.database, self.credentials.server
)
raise
async def multi_call_async(self, calls):
"""Performs an async multi-call to the API
:param calls: A list of call 2-tuples with method name and params (for example, ('Get', dict(typeName='Trip')) )
:return: The JSON result (decoded into a dict) from the server
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server
:raise TimeoutException: Raises when the request does not respond after some time.
"""
formatted_calls = [dict(method=call[0], params=call[1] if len(call) > 1 else {}) for call in calls]
return await self.call_async("ExecuteMultiCall", calls=formatted_calls)
async def get_async(self, type_name, **parameters):
"""Gets entities asynchronously using the API. Shortcut for using async_call() with the 'Get' method.
:param type_name: The type of entity.
:param parameters: Additional parameters to send.
:return: The JSON result (decoded into a dict) from the server.
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server.
:raise TimeoutException: Raises when the request does not respond after some time.
"""
if parameters:
results_limit = parameters.get("resultsLimit", None)
if results_limit is not None:
del parameters["resultsLimit"]
if "search" in parameters:
parameters.update(parameters["search"])
del parameters["search"]
parameters = dict(search=parameters, resultsLimit=results_limit)
return await self.call_async("Get", type_name=type_name, **parameters)
async def add_async(self, type_name, entity):
"""
Adds an entity asynchronously using the API. Shortcut for using async_call() with the 'Add' method.
:param type_name: The type of entity.
:param entity: The entity to add.
:return: The id of the object added.
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server.
:raise TimeoutException: Raises when the request does not respond after some time.
"""
return await self.call_async("Add", type_name=type_name, entity=entity)
async def set_async(self, type_name, entity):
"""Sets an entity asynchronously using the API. Shortcut for using async_call() with the 'Set' method.
:param type_name: The type of entity
:param entity: The entity to set
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server
"""
return await self.call_async("Set", type_name=type_name, entity=entity)
async def remove_async(self, type_name, entity):
"""Removes an entity asynchronously using the API. Shortcut for using async_call() with the 'Remove' method.
:param type_name: The type of entity.
:param entity: The entity to remove.
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server.
:raise TimeoutException: Raises when the request does not respond after some time.
"""
return await self.call_async("Remove", type_name=type_name, entity=entity)
@staticmethod
def from_credentials(credentials):
"""Returns a new async API object from an existing Credentials object.
:param credentials: The existing saved credentials.
:return: A new API object populated with MyGeotab credentials.
"""
return API(
username=credentials.username,
password=credentials.password,
database=credentials.database,
session_id=credentials.session_id,
server=credentials.server,
)
async def server_call_async(method, server, timeout=DEFAULT_TIMEOUT, verify_ssl=True, **parameters):
"""Makes an asynchronous call to an un-authenticated method on a server.
:param method: The method name.
:param server: The MyGeotab server.
:param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes).
:param verify_ssl: If True, verify the SSL certificate. It's recommended not to modify this.
:param parameters: Additional parameters to send (for example, search=dict(id='b123') ).
:return: The JSON result (decoded into a dict) from the server.
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server.
:raise TimeoutException: Raises when the request does not respond after some time.
"""
if method is None:
raise Exception("A method name must be specified")
if server is None:
raise Exception("A server (eg. my3.geotab.com) must be specified")
parameters = api.process_parameters(parameters)
return await _query(server, method, parameters, timeout=timeout, verify_ssl=verify_ssl)
async def _query(server, method, parameters, timeout=DEFAULT_TIMEOUT, verify_ssl=True):
"""Formats and performs the asynchronous query against the API
:param server: The server to query.
:param method: The method name.
:param parameters: A dict of parameters to send
:param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes).
:param verify_ssl: Whether or not to verify SSL connections
:return: The JSON-decoded result from the server
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server
:raise TimeoutException: Raises when the request does not respond after some time.
:raise aiohttp.ClientResponseError: Raises when there is an HTTP status code that indicates failure.
"""
api_endpoint = api.get_api_url(server)
params = dict(id=-1, method=method, params=parameters)
headers = get_headers()
conn = aiohttp.TCPConnector(ssl=ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) if verify_ssl else False)
try:
async with aiohttp.ClientSession(connector=conn) as session:
response = await session.post(
api_endpoint, data=json_serialize(params), headers=headers, timeout=timeout, allow_redirects=True
)
response.raise_for_status()
content_type = response.headers.get("Content-Type")
body = await response.text()
except (TimeoutError, asyncio.TimeoutError):
raise TimeoutException(server)
if content_type and "application/json" not in content_type.lower():
return body
return api._process(json_deserialize(body))
|
the-stack_0_17269 | import Tkinter
# snake0.py
from Tkinter import *
def mousePressed(event):
canvas = event.widget.canvas
redrawAll(canvas)
def keyPressed(event):
canvas = event.widget.canvas
redrawAll(canvas)
def timerFired(canvas):
redrawAll(canvas)
delay = 250 # milliseconds
canvas.after(delay, timerFired, canvas) # pause, then call timerFired again
def redrawAll(canvas):
canvas.delete(ALL)
drawSnakeBoard(canvas)
def drawSnakeBoard(canvas):
space = 30
findSnakeHead(canvas)
for i in range(len(canvas.data["snakeBoard"])):
canvas.create_line(i*space, 0,i*space, 300)
canvas.create_line(0, i*space, 300, i*space)
for r, row in enumerate (canvas.data["snakeBoard"]):
for c, col in enumerate (row):
if col > 0 :
drawSnakeCell(canvas, r, c)
return
def drawSnakeCell(canvas, row, col):
x0 = 30*col
y0 = 30*row
x1 = 30*col + 30
y1 = 30*row + 30
id = canvas.create_oval(x0, y0, x1, y1, fill = "blue")
margin = 5
cellSize = 30
return
def findSnakeHead(canvas):
for r, row in enumerate(canvas.data['snakeBoard']):
for c, col in enumerate(row):
if col == 9:
break
if col == 9:
break
canvas.data["headRow"]= r
canvas.data["headCol"]= c
def removeTail(canvas):
for r, row in enumerate(canvas.data['snakeBoard']):
for c, col in enumerate(row):
if col > 0:
canvas.data['snakeBoard'][r][c] = col - 1
def placeFood(canvas):
snakeBoard = canvas.data["snakeBoard"]
rows = len(snakeBoard)
cols = len(snakeBoard[0])
while True:
row = random.randint(0,rows-1)
col = random.randint(0,cols-1)
if (snakeBoard[row][col] == 0):
break
snakeBoard[row][col] = -1
def moveSnake(canvas, drow, dcol):
headCol = canvas.data["headCol"]
headRow = canvas.data["headRow"]
newHeadRow = headRow + drow;
if newHeadRow < 0:
newHeadRow = len(canvas.data["snakeBoard"]) - 1;
elif newHeadRow >= len(canvas.data["snakeBoard"]):
newHeadRow = 0
newHeadCol = headCol + dcol;
if newHeadCol < 0:
newHeadCol = len(canvas.data["snakeBoard"][0]) - 1;
elif newHeadCol >= len(canvas.data["snakeBoard"][0]):
newHeadCol = 0
canvas.data["snakeBoard"][newHeadRow][newHeadCol] = canvas.data["snakeBoard"][headRow][headCol] + 1
canvas.data["headRow"] = newHeadRow
canvas.data["headCol"] = newHeadCol
removeTail(canvas);
def gameOver(canvas):
canvas.data["isGameOver"] = True
def keyPressed(event):
canvas = event.widget.canvas
if (event.keysym == "Up"):
moveSnake(canvas, -1, 0)
elif (event.keysym == "Down"):
moveSnake(canvas, +1, 0)
elif (event.keysym == "Left"):
moveSnake(canvas, 0,-1)
elif (event.keysym == "Right"):
moveSnake(canvas, 0,+1)
elif (event.char == "d"):
canvas.data["inDebugMode"] = not canvas.data["inDebugMode"]
redrawAll(canvas)
def loadSnakeBoard(canvas):
canvas.data["snakeBoard"] = [ [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 4, 5, 6, 0, 0, 0 ],
[ 0, 0, 0, 0, 3, 0, 7, 0, 0, 0 ],
[ 0, 0, 0, 1, 2, 0, 8, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 9, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
]
findSnakeHead(canvas)
return
def printInstructions():
print("Snake! Use the arrow keys to move the snake." +
" Eat food to grow."+
"Stay on the board!"+
"And don't crash into yourslf")
return
def init(canvas):
printInstructions()
loadSnakeBoard(canvas)
redrawAll(canvas)
########### copy-paste below here ###########
def run():
# create the root and the canvas
root = Tk()
canvas = Canvas(root, width=310, height=310)
canvas.pack()
# Store canvas in root and in canvas itself for callbacks
root.canvas = canvas.canvas = canvas
# Set up canvas data and call init
canvas.data = { }
init(canvas)
# set up events
root.bind("<Button-1>", mousePressed)
root.bind("<Key>", keyPressed)
timerFired(canvas)
# and launch the app
root.mainloop() # This call BLOCKS (so your program waits until you close the window!)
if __name__ == '__main__':
run() |
the-stack_0_17271 | from .views import UplinkViewSet
from .views import SegmentsListViewSet
UPLINK_LIST = UplinkViewSet.as_view({
'get': 'list',
'post': 'create'
})
UPLINK_DETAIL = UplinkViewSet.as_view({
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
})
SEGMENTS_LIST = SegmentsListViewSet.as_view({
'get': 'list',
'post': 'create'
})
SEGMENTS_DETAIL = SegmentsListViewSet.as_view({
'get': 'list',
'patch': 'partial_update',
'put': 'update',
'delete': 'destroy',
})
|
the-stack_0_17272 | import pytest
from .factories.metrics import MetricFactory, RecordFactory
@pytest.mark.django_db
class TestMetricModel:
def test_repr(self):
obj = MetricFactory.create(name='my-metric')
assert str(obj) == 'my-metric'
@pytest.mark.django_db
class TestRecordModel:
def test_repr(self):
obj = RecordFactory.create(value=23)
assert str(obj).startswith('{0}/'.format(obj.metric_id))
assert '/{0:%s}/'.format(obj.timestamp) in str(obj)
assert str(obj).endswith('/23')
|
the-stack_0_17273 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# https://realpython.com/async-io-python/ + myself modifications
import asyncio
import random
import time
async def part1(n: int) -> str:
i = random.randint(0, 10)
print(f"{n} part1 sleeping for [{i}] seconds")
await asyncio.sleep(i)
result = f"result1"
print(f"{n} part1 returning [{result}]")
return result
async def part2(n: int, arg: str) -> str:
i = random.randint(0, 10)
print(f"{n} part2 receive [{arg}] and sleeping for [{i}] seconds")
await asyncio.sleep(i)
result = f"result2+{arg}"
print(f"{n} part2 receive [{arg}] returning [{result}]")
return result
async def chain(n: int) -> None:
print(f'{n} Starting')
start = time.perf_counter()
p1 = await part1(n)
p2 = await part2(n, p1)
end = time.perf_counter() - start
print(f"{n} -->Chained result: [{p2}] (took {end:0.2f} seconds).")
async def main(*args):
await asyncio.gather(*(chain(n) for n in args))
if __name__ == "__main__":
import sys
random.seed(444)
args = [1, 2, 3] if len(sys.argv) == 1 else map(int, sys.argv[1:])
start = time.perf_counter()
asyncio.run(main(*args))
end = time.perf_counter() - start
print(f"Program finished in {end:0.2f} seconds.")
|
the-stack_0_17274 | import torch
import pandas as pd
import numpy as np
def _isEven(num):
return num % 2 == 0
# return next multiple of 16 that is larger than num
def _nextMult16(num):
return (num // 16 + 1) * 16
# given the current dimension return the needed padding on each side
# for this dimension
def _determineDimPadding(dim):
is_even = _isEven(dim)
next_mult = _nextMult16(dim)
if is_even:
padding_lt = (next_mult - dim) // 2
padding_rb = padding_lt
else:
padding_lt = (next_mult - dim) // 2
padding_rb = padding_lt + 1
return padding_lt, padding_rb
class DataPadding:
# dataV is a tensor variable that is to be padded so that it's dimensions
# are a mutliple of 16. This is mainly used for the DCGAN
@staticmethod
def padData(dataV, row_dim, col_dim):
padding_left, padding_right = _determineDimPadding(row_dim)
padding_top, padding_bottom = _determineDimPadding(col_dim)
torch_padder = torch.nn.ZeroPad2d((padding_left, padding_right,
padding_top, padding_bottom))
temp = torch_padder(dataV)
return temp
|
the-stack_0_17275 | # -*- coding: utf-8 -*-
'''
pip install weather-api
'''
from weather import Weather, Unit
import translator
weather = Weather(unit=Unit.CELSIUS)
'''
w = Weather(Unit.CELSIUS)
lookup = w.lookup_by_latlng(53.3494,-6.2601)
condition = lookup.condition
print(condition.text)
'''
location = weather.lookup_by_location('หนองคาย')
forecasts = location.forecast
def get_now_day():
text="พยากรณ์อากาศวันนี้ \n"+"สภาพอากาศ : "+translator.translator_to_thai(forecasts[0].text)+"\nมีอุณหภูมิสูงสุด : "+forecasts[0].high+" C\nและมีอุณหภูมิต่ำสุด : "+forecasts[0].low+" C"
return text |
the-stack_0_17277 | # model settings
temperature = 0.2
with_norm = True
query_dim = 128
model = dict(
type='SimSiamBaseTSNTracker',
backbone=dict(
type='ResNet',
pretrained=None,
depth=18,
out_indices=(3, ),
# strides=(1, 2, 1, 1),
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
zero_init_residual=True),
# cls_head=None,
# patch_head=None,
att_plugin=dict(
type='SelfAttention', dropout=0., matmul_norm=True, use_residual=True),
img_head=dict(
type='SimSiamHead',
in_channels=512,
norm_cfg=dict(type='SyncBN'),
num_projection_fcs=3,
projection_mid_channels=512,
projection_out_channels=512,
num_predictor_fcs=2,
predictor_mid_channels=128,
predictor_out_channels=512,
with_norm=True,
loss_feat=dict(type='CosineSimLoss', negative=False),
spatial_type='avg'))
# model training and testing settings
train_cfg = dict(
intra_video=False,
att_indices=(3, ),
att_to_target=False,
feat_rescale=True,
aux_as_value=False)
test_cfg = dict(
precede_frames=20,
topk=10,
temperature=0.2,
strides=(1, 2, 1, 1),
out_indices=(2, 3),
neighbor_range=24,
with_first=True,
with_first_neighbor=True,
output_dir='eval_results')
# dataset settings
dataset_type = 'VideoDataset'
dataset_type_val = 'DavisDataset'
data_prefix = 'data/kinetics400/videos_train'
ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
data_prefix_val = 'data/davis/DAVIS/JPEGImages/480p'
anno_prefix_val = 'data/davis/DAVIS/Annotations/480p'
data_root_val = 'data/davis/DAVIS'
ann_file_val = 'data/davis/DAVIS/ImageSets/davis2017_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=1, frame_interval=8, num_clips=4),
# dict(type='DuplicateFrames', times=4),
dict(type='DecordDecode'),
dict(
type='RandomResizedCrop',
area_range=(0.2, 1.),
same_across_clip=False,
same_on_clip=False,
same_clip_indices=(1, 3)),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(
type='Flip',
flip_ratio=0.5,
same_across_clip=False,
same_on_clip=False,
same_clip_indices=(1, 3)),
# dict(
# type='ColorJitter',
# brightness=0.4,
# contrast=0.4,
# saturation=0.4,
# hue=0.1,
# p=0.8,
# same_across_clip=False,
# same_on_clip=False),
# dict(
# type='RandomGrayScale',
# p=0.2,
# same_across_clip=False,
# same_on_clip=False),
# dict(
# type='RandomGaussianBlur',
# p=0.5,
# same_across_clip=False,
# same_on_clip=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='SequentialSampleFrames', frame_interval=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 480), keep_ratio=True),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(
type='Collect',
keys=['imgs', 'ref_seg_map'],
meta_keys=('frame_dir', 'frame_inds', 'original_shape', 'seg_map')),
dict(type='ToTensor', keys=['imgs', 'ref_seg_map'])
]
data = dict(
videos_per_gpu=128,
workers_per_gpu=16,
val_workers_per_gpu=1,
train=dict(
type='RepeatDataset',
times=2,
dataset=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_prefix,
pipeline=train_pipeline)),
val=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True))
# optimizer
# optimizer = dict(type='Adam', lr=1e-4)
optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
# lr_config = dict(policy='Fixed')
# lr_config = dict(
# policy='step',
# warmup='linear',
# warmup_iters=100,
# warmup_ratio=0.001,
# step=[1, 2])
total_epochs = 50
checkpoint_config = dict(interval=1, max_keep_ckpts=5)
evaluation = dict(
interval=1,
metrics='davis',
key_indicator='feat_1.J&F-Mean',
rule='greater')
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
dict(
type='WandbLoggerHook',
init_kwargs=dict(
project='mmaction2',
name='{{fileBasenameNoExtension}}',
resume=True,
tags=['ssbt'],
dir='wandb/{{fileBasenameNoExtension}}',
config=dict(
model=model,
train_cfg=train_cfg,
test_cfg=test_cfg,
data=data))),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = False
|
the-stack_0_17278 | from keras.layers import Dense,Activation,Dropout,Flatten,Conv2D,MaxPooling2D
from keras.models import Sequential
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
import matplotlib.pyplot as plt
from random import shuffle
import numpy as np
from glob import glob
Image_width=128
Image_height=59
threshold=50
Epochs=40
input_shape=(Image_width,Image_height,1)
model=Sequential()
model.add(Conv2D(32,(3,3),input_shape=input_shape))
model.add(Activation('relu'))
model.add(Conv2D(32,(3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(64,(3,3)))
model.add(Activation('relu'))
model.add(Conv2D(64,(3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(4))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',optimizer=Adam(lr=1e-4,beta_1=0.9,beta_2=0.999,epsilon=1e-8),metrics=['accuracy'])
call_back=TensorBoard(log_dir="D:\DL PYTHON\AI\TRAINING MODELS\LOGS")
data=np.load("Training_data.npy",allow_pickle=True)
left=[];right=[];forward=[];reverse=[]
for each_sample in data:
if each_sample[1]==[1,0,0,0]:
left.append(each_sample)
elif each_sample[1]==[0,1,0,0]:
right.append(each_sample)
elif each_sample[1]==[0,0,1,0]:
forward.append(each_sample)
elif each_sample[1]==[0,0,0,1]:
reverse.append(each_sample)
training_data=left[:-threshold]+right[:-threshold]+forward[:-threshold]+reverse[:-threshold]
testing_data=left[-threshold:]+right[-threshold:]+forward[-threshold:]+reverse[-threshold:]
print("Total data-points considered for training : {}".format(len(training_data)))
print("Total data-points considered for testing : {}".format(len(testing_data)))
input("Press any key to start training")
x_train=np.array([data[0] for data in training_data]).reshape(len(training_data),Image_width,Image_height,1)
x_train=x_train/255
y_train=np.array([data[1] for data in training_data])
x_test=np.array([data[0] for data in testing_data]).reshape(len(testing_data),Image_width,Image_height,1)
x_test=x_test/255
y_test=np.array([data[1] for data in testing_data])
h=model.fit(x_train,y_train,epochs=Epochs,validation_data=(x_test,y_test),callbacks=[call_back])
model.save('model.h5') |
the-stack_0_17279 | import random
import timeit
from PIL import Image
from models import Yolov3Model
from config_reader import CocoConfigReader
from utils.dataset import CocoImagePathFileDataset
from utils.dataloader import get_data_loader
from utils.utils import (convert_corner_to_pyplot_repr,
non_max_suppression, load_classes)
import torch
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import NullLocator
YOLO_CONFIG_PATH = "../cfg/yolov3.cfg"
COCO_CONFIG_PATH = "../cfg/coco.data"
def plot_detections_on_image(detections, image):
#Ignore image index
detections = detections[:,1:]
#load class names
classes=load_classes("/home/ubuntu/workspace/pytorch_yolov3/data/coco.names")
img = image.permute(1, 2, 0).numpy()
img = Image.fromarray(np.uint8(img*255))
plt.figure()
fig, ax = plt.subplots(1)
ax.imshow(img)
cmap = plt.get_cmap('tab20')
colors = [cmap(i) for i in np.linspace(0, 1, 30)]
unique_labels = torch.unique(detections[:, -1]).cpu()
n_cls_preds = unique_labels.shape[0]
bbox_colors = random.sample(colors, n_cls_preds)
for prediction in detections:
x1, y1, h, w = convert_corner_to_pyplot_repr(
prediction[:4].unsqueeze(0)).squeeze()
class_prob = prediction[-2]
pred_class = prediction[-1]
color = bbox_colors[int(np.where(unique_labels == int(pred_class))[0])]
bbox = patches.Rectangle((x1, y1), h, w, linewidth=2,
edgecolor=color,
facecolor="none")
# Add the bbox to the image
ax.add_patch(bbox)
# Add class with probability
plt.text(x1, y1, s="P(" + classes[int(pred_class)] +f")={class_prob:.2f}",
color='white', verticalalignment='top',
bbox={'color': color, 'pad': 0})
plt.axis('off') #remove axes
plt.gca().xaxis.set_major_locator(NullLocator())#remove axis markings
plt.gca().yaxis.set_major_locator(NullLocator())
plt.savefig('../inference_test.png' , bbox_inches='tight', pad_inches=0.0)
plt.close()
if __name__ == "__main__":
yolo = Yolov3Model(YOLO_CONFIG_PATH)
yolo.load_weights()
yolo.eval()
data_loader = get_data_loader(COCO_CONFIG_PATH, CocoConfigReader,
CocoImagePathFileDataset, mode="valid")
for i, (image, _) in enumerate(data_loader):
out = yolo(image)
out = out.to("cuda")
#Also changes (center_x, center_y, x, y) to (x1, y1, x2, y2)
detections = non_max_suppression(out, object_thresh=0.7)
plot_detections_on_image(detections[0], image[0])
print ("Image generated")
break |
the-stack_0_17281 | from __future__ import print_function, division, absolute_import, with_statement, unicode_literals, generators
import os
import argparse
import importlib
def flag(s):
return bool(int(s))
def get_data_name(config):
return '{}__'.format(config.training_data_hparams['source_dataset']['vocab_file'].split('/')[-2])
def get_model_name(config):
name = ''
name += 'model_{}__'.format(config.task)
name += 'dim_{}__'.format(config.dim)
name += 'embdim_{}__'.format(config.embdim)
name += 'nlayerssrc_{}__'.format(config.nlayerssrc)
name += 'nlayerstgt_{}__'.format(config.nlayerstgt)
name += 'bidir_{}__'.format(config.bidir)
return name
def get_train_name(config):
name = ''
name += 'bleu_w_{}__'.format(config.bleu_w)
name += 'max_order_{}__'.format(config.max_order)
name += 'dropout_{}__'.format(config.dropout)
name += 'soft_length_mask_{}__'.format(config.soft_length_mask)
name += 'recall_w_{}__'.format(config.recall_w)
name += 'max_decode_length_{}__'.format(config.max_decode_length)
name += 'gamma_{}__'.format(config.gamma)
name += 'lr_{}__'.format(config.lr)
name += 'pretrain_{}__'.format(config.pretrain)
if config.enable_pg:
if config.enable_xe:
name += 'xe_w_{}__'.format(config.xe_w)
if config.enable_pg:
name += 'pg_w_{}__'.format(config.pg_w)
if config.enable_bleu:
name += 'fix_rate_{}_{}_{}__'.format(config.fix_teach_gap, config.teach_gap, config.teach_cont)
name += 'teach_anneal_{}_{}_{}__'.format(config.initial_teach_rate, config.teach_rate_anneal, config.teach_rate_anneal_steps)
if hasattr(config, 'teach_X'):
name += 'teach_X_{}__'.format(config.teach_X)
if hasattr(config, 'seed'):
name += 'seed_{}__'.format(config.seed)
return name
argparser = argparse.ArgumentParser()
argparser.add_argument('--train', type=str, default='train_config')
argparser.add_argument('--model', type=str, default='model_config')
argparser.add_argument('--data', type=str, default='data_configs')
argparser.add_argument('--verbose', type=str, default='verbose_config')
argparser.add_argument('--running_mode', type=str, default='train')
argparser.add_argument('--caption', type=str, default='')
args = argparser.parse_args()
train_config = importlib.import_module(args.train)
model_config = importlib.import_module(args.model)
data_config = importlib.import_module(args.data)
verbose_config = importlib.import_module(args.verbose)
if args.caption:
captioning = True
caption_config = importlib.import_module(args.caption)
else:
captioning = False
mBLEU = train_config.mBLEU
if hasattr(train_config, "exp_name"):
exp_name = train_config.exp_name
else:
raise Exception("train config has no exp_name")
exp_name = get_data_name(data_config) + get_model_name(model_config) + get_train_name(train_config)
logdir = os.path.join('log', exp_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
logging_file = os.path.join(logdir, 'logging.txt')
|
the-stack_0_17282 | # APIs for Windows 64-bit MSVC 2013 runtime library (msvcr120).
# Built as a delta from the 32-bit version.
# Format: retval, rettype, callconv, exactname, arglist(type, name)
# arglist type is one of ['int', 'void *']
# arglist name is one of [None, 'funcptr', 'obj', 'ptr']
# List the normalized name of any 32-bit functions to omit.
api_32_omits = [
'msvcr120.??2@yapaxi@z',
'msvcr120.??_u@yapaxi@z',
'msvcr120.??3@yaxpax@z',
'msvcr120.??_v@yaxpax@z'
]
# Define any functions specific to 64-bit.
api_64_adds = {
'msvcr120.??2@yapeax_k@z':( 'int', None, 'cdecl', 'msvcr120.??2@YAPEAX_K@Z', (('int', None),) ),
'msvcr120.??_u@yapeax_k@z':( 'int', None, 'cdecl', 'msvcr120.??_U@YAPEAX_K@Z', (('int', None),) ),
'msvcr120.??3@yaxpeax@z':( 'void', None, 'cdecl', 'msvcr120.??3@YAXPEAX@Z', (('void *', 'ptr'),) ),
'msvcr120.??_v@yaxpeax@z':( 'void', None, 'cdecl', 'msvcr120.??_V@YAXPEAX@Z', (('void *', 'ptr'),) ),
}
# Build from the 32-bit API, skipping omits, changing the calling convention,
# and adding any specific 64-bit functions.
api_defs_64 = {}
import vivisect.impapi.windows.msvcr120_32 as m32
for name in m32.api_defs.iterkeys():
if name in api_32_omits:
continue
(rtype,rname,cconv,cname,cargs) = m32.api_defs[name]
api_defs_64[name] = (rtype, rname, 'msx64call', cname, cargs)
api_defs_64.update(api_64_adds)
|
the-stack_0_17283 | import requests
import sys
def request_api(username: str) -> list:
page = 1
followers = []
while True:
url = "https://api.github.com/users/"+username+"/followers?page="+str(page)+"&per_page=100"
req = requests.get(url)
if req.status_code == 200:
data = req.json()
if len(data) == 0:
break
else:
followers += data
page += 1
else:
print("URL: " + url)
print("request status_code:"+str(req.status_code))
sys.exit(-1)
return followers |
the-stack_0_17284 | import sys
n, m, *sc = map(int, sys.stdin.read().split())
sc = zip(*[iter(sc)] * 2)
def main():
res = [0] * n
determined = set()
for s, c in sc:
if s in determined:
if res[s-1] != c:
return -1
else:
res[s-1] = c
determined.add(s)
if n > 1 and not res[0] == 0:
return -1
res = ''.join([str(d) for d in res])
return res
if __name__ == '__main__':
ans = main()
print(ans)
|
the-stack_0_17285 | _base_ = [
'../_base_/datasets/waymo_cars_and_peds.py',
'../_base_/models/pointnet2_seg.py',
'../_base_/schedules/cyclic_20e.py', '../_base_/default_runtime.py'
]
# data settings
data = dict(samples_per_gpu=16)
evaluation = dict(interval=50)
# runtime settings
checkpoint_config = dict(interval=50)
# PointNet2-MSG needs longer training time than PointNet2-SSG
runner = dict(type='EpochBasedRunner', max_epochs=1000)
#resume_from = 'work_dirs/pointnet2_cars_and_peds/latest.pth'
|
the-stack_0_17286 | import unittest
from panelexpr._utils.utils import *
from panelexpr.base.operator import TimeSeriesOperator
import panelexpr as pe
import pandas as pd
THRESHOLD = 1e-6
class MyMovingAverageOperator(TimeSeriesOperator):
def eval(self, series: pd.Series, window):
s = series.rolling(window).mean()
return s
pe.register("my_ma", MyMovingAverageOperator)
class BasicTest(unittest.TestCase): # 继承unittest.TestCase
@classmethod
def setUpClass(cls):
# 必须使用@classmethod 装饰器,所有test运行前运行一次
cls.data = pd.read_csv("data/sample_zh_2.csv")
def test_rolling_mean(self):
s1 = pe.eval("mmean(Open, 2, group_by='windcode')", data=self.data)
s2 = pe.eval("my_ma(Open, 2)", data=self.data, group_tag="windcode")
v = mean_absolute_deviation(s1, s2)
matching = nan_matching(s1, s2)
self.assertTrue(v < THRESHOLD)
self.assertTrue(matching)
if __name__ == '__main__':
unittest.main() # 运行所有的测试用例
|
the-stack_0_17287 |
from django.db import models
from mongoengine import DynamicDocument, ListField, StringField
class CollectionList(DynamicDocument):
meta = {'collection' : 'CollectionList'}
collectionlist = ListField(StringField())
class testcl(DynamicDocument): # raw tweet data
meta = {'collection' : 'testcl'}
text = StringField()
class rt_eliminated(DynamicDocument): # retweets are eliminated
meta = {'collection' : 'rt_eliminated'}
text = StringField()
class duplicates_eliminated(DynamicDocument): # duplicates are eliminated
meta = {'collection' : 'duplicates_eliminated'}
text = StringField()
class Clusters(DynamicDocument):
meta = {'abstract': True,}
ctweettuplelist = ListField(StringField())
cstr = StringField()
cno = StringField()
cnoprefix = StringField()
rif = ListField(StringField())
twids = ListField(StringField())
user_entropy = StringField()
label = StringField()
class all_data_clusters(Clusters):
meta = {'collection': 'all_data_clusters'}
class genocide_clusters_20151005(Clusters):
meta = {'collection': 'genocide_clusters_20151005'}
|
the-stack_0_17288 | from pytorch_toolbelt.modules import ABN
from pytorch_toolbelt.modules import decoders as D
from pytorch_toolbelt.modules import encoders as E
from torch import nn
from torch.nn import functional as F
from ..dataset import OUTPUT_MASK_32_KEY, OUTPUT_MASK_KEY
from catalyst.registry import Model
__all__ = ["DeeplabV3SegmentationModel", "resnet34_deeplab128", "seresnext101_deeplab256"]
class DeeplabV3SegmentationModel(nn.Module):
def __init__(
self,
encoder: E.EncoderModule,
num_classes: int,
dropout=0.25,
abn_block=ABN,
high_level_bottleneck=256,
low_level_bottleneck=32,
full_size_mask=True,
):
super().__init__()
self.encoder = encoder
self.decoder = D.DeeplabV3Decoder(
feature_maps=encoder.output_filters,
output_stride=encoder.output_strides[-1],
num_classes=num_classes,
high_level_bottleneck=high_level_bottleneck,
low_level_bottleneck=low_level_bottleneck,
abn_block=abn_block,
dropout=dropout,
)
self.full_size_mask = full_size_mask
def forward(self, x):
enc_features = self.encoder(x)
# Decode mask
mask, dsv = self.decoder(enc_features)
if self.full_size_mask:
mask = F.interpolate(mask, size=x.size()[2:], mode="bilinear", align_corners=False)
output = {OUTPUT_MASK_KEY: mask, OUTPUT_MASK_32_KEY: dsv}
return output
@Model
def resnet34_deeplab128(num_classes=1, dropout=0.0, pretrained=True):
encoder = E.Resnet34Encoder(pretrained=pretrained)
return DeeplabV3SegmentationModel(encoder, num_classes=num_classes, high_level_bottleneck=128, dropout=dropout)
@Model
def seresnext101_deeplab256(num_classes=1, dropout=0.0, pretrained=True):
encoder = E.SEResNeXt101Encoder(pretrained=pretrained)
return DeeplabV3SegmentationModel(encoder, num_classes=num_classes, high_level_bottleneck=256, dropout=dropout)
|
the-stack_0_17289 | """Script to transform and upload IRENA's capacity data to Resource Watch.
IRENA information is available through a Tableau applet.
This data must be downloaded manually, it is not possible to acquire
through an HTTP GET as we can tell.
Once downloaded, only minor transformation is needed to prepare it for upload.
The core issue is that the information does not fall into a data cube without
aggregating some rows to fit with expectations around data dimensionality.
It seems the data should be keyed on the dimensions:
- country
- year
- most granular technology (e.g. "offshore wind" not "wind")
- on-grid/off-grid
When keyed in this way there are still many compound keys
that have multiple rows and need to be summed to produce the
values expressed in Tableau visualization.
"""
import os
import pandas as pd
from zipfile import ZipFile
import shutil
utils_path = os.path.join(os.path.abspath(os.getenv('PROCESSING_DIR')),'utils')
if utils_path not in sys.path:
sys.path.append(utils_path)
import util_files
import util_cloud
import util_carto
import logging
# Set up logging
# Get the top-level logger object
logger = logging.getLogger()
for handler in logger.handlers: logger.removeHandler(handler)
logger.setLevel(logging.INFO)
# make it print to the console.
console = logging.StreamHandler()
logger.addHandler(console)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# name of table on Carto where you want to upload data
# this should be a table name that is not currently in use
dataset_name = 'ene_009_renewable_generation_annually'
logger.info('Executing script for dataset: ' + dataset_name)
# create a new sub-directory within your specified dir called 'data'
# within this directory, create files to store raw and processed data
data_dir = util_files.prep_dirs(dataset_name)
'''
Download data and save to your data directory
'''
# data can be downloaded by following the steps in the 'Data Acquisition' section of the README file
# generate path to downloaded data file
download = os.path.join(os.path.expanduser("~"), 'Downloads', 'Export_Full_Data_data.csv')
# Move this file into your data directory
raw_data_file = os.path.join(data_dir, os.path.basename(download))
shutil.move(download,raw_data_file)
'''
Process data
'''
# read in csv file as Dataframe
df = pd.read_csv(raw_data_file, dtype=str)
# filter pumped storage plants just like IRENA default
df = df[df['Sub-technology'] != 'Pumped Storage']
# convert values from string to float because summing later
df['Values_asfloat'] = df['Values'].astype(float)
# subset into generation
generation_data = df[df['DataType'] == 'Electricity Generation']
# assuming GWh everywhere, check that; yes the field name has a space at the end
assert (generation_data['Unit '] == 'GWh').all()
# group by the key dimensions
grouped = generation_data.groupby(['ISO', 'Years', 'Sub-technology', 'Type'])
# ensure Technology is mapped 1:1 with Sub-technology
assert grouped.agg({'Technology': lambda x: len(set(x)) == 1}).Technology.all()
# create the data frame, renaming values and organizing the column order
data = grouped.agg({
'Values_asfloat': 'sum', # sum the numeric capacity value
'IRENA Label': 'first', # take a long name for the country
'Technology': 'first', # take the technology (superclass)
}).reset_index().rename(columns={
'ISO': 'iso_a3',
'Years': 'year',
'Sub-technology': 'subtechnology',
'Technology': 'technology',
'Type': 'grid_connection',
'IRENA Label': 'country_name',
'Values_asfloat': 'generation_GWh',
})[[ # set a new column order
'iso_a3', # key
'country_name', # 1:1 with iso_a3
'year', # key
'subtechnology', # key
'technology', # 1:n with subtechnology
'grid_connection', # key
'generation_GWh' # the numeric generation value in gigawatt-hours
]]
#save processed dataset to csv
processed_data_file = os.path.join(data_dir, dataset_name+'_edit.csv')
data.to_csv(processed_data_file, index=False)
'''
Upload processed data to Carto
'''
logger.info('Uploading processed data to Carto.')
util_carto.upload_to_carto(processed_data_file, 'LINK')
'''
Upload original data and processed data to Amazon S3 storage
'''
# initialize AWS variables
aws_bucket = 'wri-public-data'
s3_prefix = 'resourcewatch/'
logger.info('Uploading original data to S3.')
# Upload raw data file to S3
# Copy the raw data into a zipped file to upload to S3
raw_data_dir = os.path.join(data_dir, dataset_name+'.zip')
with ZipFile(raw_data_dir,'w') as zip:
zip.write(raw_data_file, os.path.basename(raw_data_file))
# Upload raw data file to S3
uploaded = util_cloud.aws_upload(raw_data_dir, aws_bucket, s3_prefix+os.path.basename(raw_data_dir))
logger.info('Uploading processed data to S3.')
# Copy the processed data into a zipped file to upload to S3
processed_data_dir = os.path.join(data_dir, dataset_name+'_edit.zip')
with ZipFile(processed_data_dir,'w') as zip:
zip.write(processed_data_file, os.path.basename(processed_data_file))
# Upload processed data file to S3
uploaded = util_cloud.aws_upload(processed_data_dir, aws_bucket, s3_prefix+os.path.basename(processed_data_dir)) |
the-stack_0_17290 | from fastapi import APIRouter, Request
from app.dependencies import templates
from app.internal.celebrity import get_today_month_and_day
router = APIRouter()
@router.get("/celebrity")
def celebrity(request: Request):
today = get_today_month_and_day()
return templates.TemplateResponse("celebrity.html", {
"request": request,
"date": today
})
|
the-stack_0_17291 | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Copyright (c) 2017-2018 The Cryptomiles Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for setting nMinimumChainWork on command line.
Nodes don't consider themselves out of "initial block download" until
their active chain has more work than nMinimumChainWork.
Nodes don't download blocks from a peer unless the peer's best known block
has more work than nMinimumChainWork.
While in initial block download, nodes won't relay blocks to their peers, so
test that this parameter functions as intended by verifying that block relay
only succeeds past a given node once its nMinimumChainWork has been exceeded.
"""
import time
from test_framework.test_framework import CryptomilesTestFramework
from test_framework.util import sync_blocks, connect_nodes, assert_equal
# 2 hashes required per regtest block (with no difficulty adjustment)
REGTEST_WORK_PER_BLOCK = 2
class MinimumChainWorkTest(CryptomilesTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101]
def setup_network(self):
# This test relies on the chain setup being:
# node0 <- node1 <- node2
# Before leaving IBD, nodes prefer to download blocks from outbound
# peers, so ensure that we're mining on an outbound peer and testing
# block relay to inbound peers.
self.setup_nodes()
for i in range(self.num_nodes-1):
connect_nodes(self.nodes[i+1], i)
def run_test(self):
# Start building a chain on node0. node2 shouldn't be able to sync until node1's
# minchainwork is exceeded
starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1])
starting_blockcount = self.nodes[2].getblockcount()
num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
self.log.info("Generating %d blocks on node0", num_blocks_to_generate)
hashes = self.nodes[0].generate(num_blocks_to_generate)
self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork'])
# Sleep a few seconds and verify that node2 didn't get any new blocks
# or headers. We sleep, rather than sync_blocks(node0, node1) because
# it's reasonable either way for node1 to get the blocks, or not get
# them (since they're below node1's minchainwork).
time.sleep(3)
self.log.info("Verifying node 2 has no more blocks than before")
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
# Node2 shouldn't have any new headers yet, because node1 should not
# have relayed anything.
assert_equal(len(self.nodes[2].getchaintips()), 1)
assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)
assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
assert_equal(self.nodes[2].getblockcount(), starting_blockcount)
self.log.info("Generating one more block")
self.nodes[0].generate(1)
self.log.info("Verifying nodes are all synced")
# Because nodes in regtest are all manual connections (eg using
# addnode), node1 should not have disconnected node0. If not for that,
# we'd expect node1 to have disconnected node0 for serving an
# insufficient work chain, in which case we'd need to reconnect them to
# continue the test.
self.sync_all()
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
if __name__ == '__main__':
MinimumChainWorkTest().main()
|
the-stack_0_17292 | """This module implements the AppFutures.
We have two basic types of futures:
1. DataFutures which represent data objects
2. AppFutures which represent the futures on App/Leaf tasks.
"""
from concurrent.futures import Future
import logging
import threading
from parsl.app.errors import RemoteExceptionWrapper
logger = logging.getLogger(__name__)
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
class AppFuture(Future):
"""An AppFuture wraps a sequence of Futures which may fail and be retried.
An AppFuture starts with no parent future. A sequence of parent futures may
be assigned by code outside of this class, by passing that new parent future
into "update_future".
The AppFuture will set its result to the result of the parent future, if that
parent future completes without an exception. This result setting should
cause .result(), .exception() and done callbacks to fire as expected when a
Future has a result set.
The AppFuture will not set its result to the result of the parent future, if
that parent future completes with an exception, and if that parent future
has retries left. In that case, no result(), exception() or done callbacks should
report a result.
The AppFuture will set its result to the result of the parent future, if that
parent future completes with an exception and if that parent future has no
retries left, or if it has no retry field. .result(), .exception() and done callbacks
should give a result as expected when a Future has a result set
The parent future may return a RemoteExceptionWrapper as a result
and AppFuture will treat this an an exception for the above
retry and result handling behaviour.
"""
def __init__(self, tid=None, stdout=None, stderr=None):
"""Initialize the AppFuture.
Args:
KWargs:
- tid (Int) : Task id should be any unique identifier. Now Int.
- stdout (str) : Stdout file of the app.
Default: None
- stderr (str) : Stderr file of the app.
Default: None
"""
self._tid = tid
super().__init__()
self.parent = None
self._update_lock = threading.Lock()
self._outputs = []
self._stdout = stdout
self._stderr = stderr
def parent_callback(self, executor_fu):
"""Callback from a parent future to update the AppFuture.
Used internally by AppFuture, and should not be called by code using AppFuture.
Args:
- executor_fu (Future): Future returned by the executor along with callback.
This may not be the current parent future, as the parent future may have
already been updated to point to a retrying execution, and in that case,
this is logged.
In the case that a new parent has been attached, we must immediately discard
this result no matter what it contains (although it might be interesting
to log if it was successful...)
Returns:
- None
Updates the super() with the result() or exception()
"""
with self._update_lock:
if not executor_fu.done():
raise ValueError("done callback called, despite future not reporting itself as done")
# this is for consistency checking
if executor_fu != self.parent:
if executor_fu.exception() is None and not isinstance(executor_fu.result(), RemoteExceptionWrapper):
# ... then we completed with a value, not an exception or wrapped exception,
# but we've got an updated executor future.
# This is bad - for example, we've started a retry even though we have a result
raise ValueError("internal consistency error: AppFuture done callback called without an exception, but parent has been changed since then")
try:
res = executor_fu.result()
if isinstance(res, RemoteExceptionWrapper):
res.reraise()
super().set_result(executor_fu.result())
except Exception as e:
if executor_fu.retries_left > 0:
# ignore this exception, because assume some later
# parent executor, started external to this class,
# will provide the answer
pass
else:
super().set_exception(e)
@property
def stdout(self):
return self._stdout
@property
def stderr(self):
return self._stderr
@property
def tid(self):
return self._tid
def update_parent(self, fut):
"""Add a callback to the parent to update the state.
This handles the case where the user has called result on the AppFuture
before the parent exists.
"""
self.parent = fut
try:
fut.add_done_callback(self.parent_callback)
except Exception as e:
logger.error("add_done_callback got an exception {} which will be ignored".format(e))
def cancel(self):
raise NotImplementedError("Cancel not implemented")
def cancelled(self):
return False
def running(self):
if self.parent:
return self.parent.running()
else:
return False
@property
def outputs(self):
return self._outputs
def __repr__(self):
return '<%s super=%s parent=%s>' % (
self.__class__.__name__,
super().__repr__(),
self.parent.__repr__())
|
the-stack_0_17294 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cpovc_ovc', '0002_remove_ovcregistration_art_status'),
]
operations = [
migrations.AddField(
model_name='ovcregistration',
name='art_status',
field=models.CharField(max_length=4, null=True),
),
]
|
the-stack_0_17295 | """Posterior/Prior predictive plot."""
from numbers import Integral
import platform
import logging
import numpy as np
from .plot_utils import (
xarray_var_iter,
_scale_fig_size,
default_grid,
filter_plotters_list,
get_plotting_function,
)
from ..utils import _var_names
_log = logging.getLogger(__name__)
def plot_ppc(
data,
kind="kde",
alpha=None,
mean=True,
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
coords=None,
flatten=None,
flatten_pp=None,
num_pp_samples=None,
random_seed=None,
jitter=None,
animated=False,
animation_kwargs=None,
legend=True,
ax=None,
backend=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot for posterior/prior predictive checks.
Parameters
----------
data : az.InferenceData object
InferenceData object containing the observed and posterior/prior predictive data.
kind : str
Type of plot to display (kde, cumulative, or scatter). Defaults to kde.
alpha : float
Opacity of posterior/prior predictive density curves.
Defaults to 0.2 for kind = kde and cumulative, for scatter defaults to 0.7
mean : bool
Whether or not to plot the mean posterior/prior predictive distribution. Defaults to True
figsize : tuple
Figure size. If None it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be
autoscaled based on figsize.
data_pairs : dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, `data_pairs = {'y' : 'y_hat'}`
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
var_names : list
List of variables to be plotted. Defaults to all observed variables in the
model if None.
coords : dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten : list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the coords argument. Defaults to flattening all of the dimensions.
flatten_pp : list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the coords argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for data_pairs
parameters. If flatten is defined and flatten_pp is None, then `flatten_pp=flatten`.
num_pp_samples : int
The number of posterior/prior predictive samples to plot. For `kind` = 'scatter' and
`animation = False` if defaults to a maximum of 5 samples and will set jitter to 0.7
unless defined otherwise. Otherwise it defaults to all provided samples.
random_seed : int
Random number generator seed passed to numpy.random.seed to allow
reproducibility of the plot. By default, no seed will be provided
and the plot will change each call if a random sample is specified
by `num_pp_samples`.
jitter : float
If kind is "scatter", jitter will add random uniform noise to the height
of the ppc samples and observed data. By default 0.
animated : bool
Create an animation of one posterior/prior predictive sample per frame. Defaults to False.
animation_kwargs : dict
Keywords passed to `animation.FuncAnimation`.
legend : bool
Add legend to figure. By default True.
ax: axes, optional
Matplotlib axes or bokeh figures.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
group : {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
Other value can be 'prior'.
show : bool, optional
Call backend show function.
Returns
-------
axes : matplotlib axes or bokeh figures
Examples
--------
Plot the observed data KDE overlaid on posterior predictive KDEs.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('radon')
>>> az.plot_ppc(data,data_pairs={"obs":"obs"})
>>> #az.plot_ppc(data,data_pairs={"obs":"obs_hat"})
Plot the overlay with empirical CDFs.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='cumulative')
Use the coords and flatten parameters to plot selected variable dimensions
across multiple plots.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, coords={'observed_county': ['ANOKA', 'BELTRAMI']}, flatten=[])
Plot the overlay using a stacked scatter plot that is particularly useful
when the sample sizes are small.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='scatter', flatten=[],
>>> coords={'observed_county': ['AITKIN', 'BELTRAMI']})
Plot random posterior predictive sub-samples.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in ("{}_predictive".format(group), "observed_data"):
if not hasattr(data, groups):
raise TypeError(
'`data` argument must have the group "{group}" for ppcplot'.format(group=groups)
)
if kind.lower() not in ("kde", "cumulative", "scatter"):
raise TypeError("`kind` argument must be either `kde`, `cumulative`, or `scatter`")
if data_pairs is None:
data_pairs = {}
if animation_kwargs is None:
animation_kwargs = {}
if platform.system() == "Linux":
animation_kwargs.setdefault("blit", True)
else:
animation_kwargs.setdefault("blit", False)
if animated and backend == "bokeh":
raise TypeError("Animation option is only supported with matplotlib backend.")
if animated and animation_kwargs["blit"] and platform.system() != "Linux":
_log.warning(
"If you experience problems rendering the animation try setting"
"`animation_kwargs({'blit':False}) or changing the plotting backend (e.g. to TkAgg)"
)
if alpha is None:
if animated:
alpha = 1
else:
if kind.lower() == "scatter":
alpha = 0.7
else:
alpha = 0.2
if jitter is None:
jitter = 0.0
assert jitter >= 0.0
observed = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed.data_vars)
var_names = _var_names(var_names, observed)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed.dims.keys())
if coords is None:
coords = {}
if random_seed is not None:
np.random.seed(random_seed)
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
if num_pp_samples is None:
if kind == "scatter" and not animated:
num_pp_samples = min(5, total_pp_samples)
else:
num_pp_samples = total_pp_samples
if (
not isinstance(num_pp_samples, Integral)
or num_pp_samples < 1
or num_pp_samples > total_pp_samples
):
raise TypeError(
"`num_pp_samples` must be an integer between 1 and "
+ "{limit}.".format(limit=total_pp_samples)
)
pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False)
for key in coords.keys():
coords[key] = np.where(np.in1d(observed[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True
)
),
"plot_ppc",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters)
(figsize, ax_labelsize, _, xt_labelsize, linewidth, markersize) = _scale_fig_size(
figsize, textsize, rows, cols
)
ppcplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
figsize=figsize,
animated=animated,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
predictive_dataset=predictive_dataset,
pp_sample_ix=pp_sample_ix,
kind=kind,
alpha=alpha,
linewidth=linewidth,
mean=mean,
xt_labelsize=xt_labelsize,
ax_labelsize=ax_labelsize,
jitter=jitter,
total_pp_samples=total_pp_samples,
legend=legend,
markersize=markersize,
animation_kwargs=animation_kwargs,
num_pp_samples=num_pp_samples,
backend_kwargs=backend_kwargs,
show=show,
)
if backend == "bokeh":
ppcplot_kwargs.pop("animated")
ppcplot_kwargs.pop("animation_kwargs")
ppcplot_kwargs.pop("legend")
ppcplot_kwargs.pop("xt_labelsize")
ppcplot_kwargs.pop("ax_labelsize")
# TODO: Add backend kwargs
plot = get_plotting_function("plot_ppc", "ppcplot", backend)
axes = plot(**ppcplot_kwargs)
return axes
|
the-stack_0_17296 | import warnings
import json
import os
from vaex.utils import _ensure_strings_from_expressions
class DataFrameAccessorML(object):
def __init__(self, df):
self.df = df
def state_transfer(self):
from .transformations import StateTransfer
state = self.df.state_get()
state.pop('active_range') # we are not interested in this..
return StateTransfer(state=state)
def train_test_split(self, test_size=0.2, strings=True, virtual=True, verbose=True):
'''Will split the DataFrame in train and test part, assuming it is shuffled.
:param test_size: The fractional size of the test set.
:param strings: If True, the output DataFrames will also contain string columns, if any.
:param virtual: If True, the output DataFrames will also contain virtual contain, if any.
:param verbose: If True, print warnings to screen.
'''
if verbose:
warnings.warn('Make sure the DataFrame is shuffled')
initial = None
try:
assert self.df.filtered is False, 'Filtered DataFrames are not yet supported.'
# full_length = len(self)
df = self.df.trim()
initial = self.df.get_active_range()
df.set_active_fraction(test_size)
test = df.trim()
__, end = df.get_active_range()
df.set_active_range(end, df.length_original())
train = df.trim()
finally:
if initial is not None:
df.set_active_range(*initial)
return train, test
filename_spec = os.path.join(os.path.dirname(__file__), 'spec.json')
if os.path.exists(filename_spec):
# add DataFrameAccessorML.<snake_name> wrapper methods
with open(filename_spec) as f:
try:
spec = json.load(f)
except json.decoder.JSONDecodeError:
pass # we are generating the file probably
else:
for class_spec in spec:
def closure(class_spec=class_spec):
def wrapper(self, features=None, transform=True, **kwargs):
kwargs = kwargs.copy() # we do modifications, so make a copy
features = features or self.df.get_column_names()
features = _ensure_strings_from_expressions(features)
import importlib
module = importlib.import_module(class_spec['module'])
cls = getattr(module, class_spec['classname'])
if 'target' in kwargs:
kwargs['target'] = str(kwargs['target'])
object = cls(features=features, **kwargs)
object.fit(self.df)
if transform:
dft = object.transform(self.df)
return dft
else:
return object
# Append trait help strings to the docstring
doc = '\n'
for trait in class_spec['traits']:
doc += f':param {trait["name"]}: {trait["help"]} \n'
doc += ':param transform: If True, return a shallow copy of the transformed DataFrame, otherwise the return fitted transformer. \n'
try:
wrapper.__doc__= class_spec['doc'] + doc
except TypeError: # unsupported operand type(s) for +: 'NoneType' and 'str'
wrapper.__doc__= doc
return wrapper
accessor = DataFrameAccessorML
name = class_spec['snake_name']
setattr(accessor, name, closure())
from .transformations import PCA, PCAIncremental, RandomProjections
from .transformations import StandardScaler, MinMaxScaler, MaxAbsScaler, RobustScaler
from .transformations import LabelEncoder, OneHotEncoder, MultiHotEncoder, FrequencyEncoder
from .transformations import CycleTransformer
from .transformations import BayesianTargetEncoder
from .transformations import WeightOfEvidenceEncoder
from .transformations import GroupByTransformer, KBinsDiscretizer
from .pipeline import Pipeline |
the-stack_0_17298 | #!/usr/bin/python
# Compresses the files for one game into a single JavaScript file.
#
# Copyright 2013 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script generates two files:
# compressed.js
# uncompressed.js
# The compressed file is a concatenation of all the relevant JavaScript which
# has then been run through Google's Closure Compiler.
# The uncompressed file is a script that loads in each JavaScript file
# one by one. This takes much longer for a browser to load, but is useful
# when debugging code since line numbers are meaningful and variables haven't
# been renamed. The uncompressed file also allows for a faster development
# cycle since there is no need to rebuild or recompile, just reload.
import json
import os.path
import re
import subprocess
import sys
import threading
# Define a warning message for all the generated files.
WARNING = '// Automatically generated file. Do not edit!\n'
messageNames = []
def main(name, lang):
if lang != None:
filterMessages(name, lang)
language(name, lang)
else:
# Extract the list of supported languages from boot.js.
# This is a bit fragile.
boot = open('appengine/common/boot.js', 'r')
js = ' '.join(boot.readlines())
boot.close()
m = re.search('\[\'BlocklyGamesLanguages\'\] = (\[[-,\'\\s\\w]+\])', js)
if not m:
raise Exception("Can't find BlocklyGamesLanguages in boot.js")
langs = m.group(1)
langs = langs.replace("'", '"')
langs = json.loads(langs)
filterMessages(name, langs[0])
for lang in langs:
language(name, lang)
def filterMessages(name, lang):
global messageNames
# Do a dummy compile and identify all the Blockly messages used.
print("Scanning for Blockly messages in %s..." % name)
f = open('appengine/%s/generated/%s/msg.js' % (name, lang), 'w')
f.write("""
goog.provide('BlocklyGames.Msg');
goog.require('Blockly.Msg');
Blockly.Msg["ybr8uu2q3b"] = '';
""")
f.close()
thread0 = Gen_compressed(name, lang)
thread0.start()
thread0.join()
f = open('appengine/%s/generated/%s/compressed.js' % (name, lang), 'r')
js = f.read()
f.close()
# Locate what Blockly.Msg has compiled into (e.g. h.Y)
m = re.search('([\w.$]+)\.ybr8uu2q3b=', js)
if m:
blocklyMsg = m.group(1)
blocklyMsg = blocklyMsg.replace('.', '\\.').replace('$', '\\$')
msgs1 = re.findall('\W' + blocklyMsg + '.([A-Z0-9_]+)', js);
msgs2 = re.findall('\WBKY_([A-Z0-9_]+)', js);
messageNames = list(set(msgs1 + msgs2))
# Resolve references.
# Blockly.Msg["TEXT_APPEND_VAR"] = Blockly.Msg["VAR_DEFAULT_NAME"];
# Does not handle long chains of references.
msgs = getMessages(lang)
for msg in msgs:
m = re.search('Blockly\.Msg\["([A-Z0-9_]+)"\] = Blockly\.Msg\["([A-Z0-9_]+)"\]', msg)
if m and m.group(1) in messageNames:
messageNames.append(m.group(2))
messageNames.sort()
print("Found %d Blockly messages." % len(messageNames))
def getMessages(lang):
# Read Blockly's message file for this language (default to English).
blocklyMsgFileName = 'appengine/third-party/blockly/msg/js/%s.js' % lang;
if not os.path.exists(blocklyMsgFileName):
blocklyMsgFileName = 'appengine/third-party/blockly/msg/js/en.js';
f = open(blocklyMsgFileName, 'r')
msgs = f.readlines()
f.close()
return msgs
def language(name, lang):
global messageNames
msgs = getMessages(lang)
# Write copy to Blockly Games.
f = open('appengine/%s/generated/%s/msg.js' % (name, lang), 'w')
for msg in msgs:
if msg == "'use strict';\n":
f.write("""'use strict';
goog.provide('BlocklyGames.Msg');
goog.require('Blockly.Msg');
""")
else:
# Only write out messages that are used (as detected in filterMessages).
m = re.search('Blockly\.Msg\["([A-Z0-9_]+)"\] = ', msg)
if not m or m.group(1) in messageNames:
f.write(msg)
f.close()
print('Compiling %s - %s' % (name.title(), lang))
# Run uncompressed and compressed code generation in separate threads.
# For multi-core computers, this offers a significant speed boost.
thread1 = Gen_uncompressed(name, lang)
thread2 = Gen_compressed(name, lang)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print("")
class Gen_uncompressed(threading.Thread):
def __init__(self, name, lang):
threading.Thread.__init__(self)
self.name = name
self.lang = lang
def run(self):
cmd = ['third-party-downloads/build/closurebuilder.py',
'--root=appengine/third-party/',
'--root=appengine/generated/%s/' % self.lang,
'--root=appengine/js/',
'--namespace=%s' % self.name.replace('/', '.').title(),
'--output_mode=list']
directory = self.name
while directory:
cmd.append('--root=appengine/%s/generated/%s/' % (directory, self.lang))
cmd.append('--root=appengine/%s/js/' % directory)
(directory, sep, fragment) = directory.rpartition(os.path.sep)
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except:
raise Exception("Failed to Popen: %s" % ' '.join(cmd))
files = readStdout(proc)
if self.name == 'pond/docs':
path = '../'
else:
path = ''
prefix = 'appengine/'
srcs = []
for file in files:
file = file.strip()
if file[:len(prefix)] == prefix:
file = file[len(prefix):]
else:
raise Exception('"%s" is not in "%s".' % (file, prefix))
srcs.append('"%s%s"' % (path, file))
f = open('appengine/%s/generated/%s/uncompressed.js' %
(self.name, self.lang), 'w')
f.write("""%s
window.CLOSURE_NO_DEPS = true;
(function() {
var srcs = [
%s
];
function loadScript() {
var src = srcs.shift();
if (src) {
var script = document.createElement('script');
script.src = src;
script.type = 'text/javascript';
script.onload = loadScript;
document.head.appendChild(script);
}
}
loadScript();
})();
""" % (WARNING, ',\n '.join(srcs)))
f.close()
print('Found %d dependencies.' % len(srcs))
class Gen_compressed(threading.Thread):
def __init__(self, name, lang):
threading.Thread.__init__(self)
self.name = name
self.lang = lang
def run(self):
cmd = [
'java',
'-jar', 'third-party-downloads/closure-compiler.jar',
'--generate_exports',
'--compilation_level', 'ADVANCED_OPTIMIZATIONS',
'--dependency_mode=PRUNE',
'--externs', 'externs/gviz-externs.js',
'--externs', 'externs/interpreter-externs.js',
'--externs', 'externs/prettify-externs.js',
'--externs', 'externs/soundJS-externs.js',
'--externs', 'externs/storage-externs.js',
'--externs', 'appengine/third-party/blockly/externs/svg-externs.js',
'--language_in', 'ECMASCRIPT5_STRICT',
'--language_out', 'ECMASCRIPT5_STRICT',
'--entry_point=%s' % self.name.replace('/', '.').title(),
"--js='appengine/third-party/**.js'",
"--js='!appengine/third-party/base.js'",
"--js='!appengine/third-party/blockly/externs/**.js'",
"--js='appengine/generated/%s/*.js'" % self.lang,
"--js='appengine/js/*.js'",
'--warning_level', 'QUIET',
]
directory = self.name
while directory:
cmd.append("--js='appengine/%s/generated/%s/*.js'" %
(directory, self.lang))
cmd.append("--js='appengine/%s/js/*.js'" % directory)
(directory, sep, fragment) = directory.rpartition(os.path.sep)
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except:
print("Failed to Popen: %s" % cmd)
raise
script = readStdout(proc)
script = ''.join(script)
script = self.trim_licence(script)
print('Compressed to %d KB.' % (len(script) / 1024))
f = open('appengine/%s/generated/%s/compressed.js' %
(self.name, self.lang), 'w')
f.write(WARNING)
f.write(script)
f.close()
def trim_licence(self, code):
"""Strip out Google's and MIT's Apache licences.
JS Compiler preserves dozens of Apache licences in the Blockly code.
Remove these if they belong to Google or MIT.
MIT's permission to do this is logged in Blockly issue 2412.
Args:
code: Large blob of compiled source code.
Returns:
Code with Google's and MIT's Apache licences trimmed.
"""
apache2 = re.compile("""/\\*
(Copyright \\d+ (Google LLC|Massachusetts Institute of Technology))
( All rights reserved.
)? SPDX-License-Identifier: Apache-2.0
\\*/""")
return re.sub(apache2, '', code)
def readStdout(proc):
data = proc.stdout.readlines()
# Python 2 reads stdout as text.
# Python 3 reads stdout as bytes.
return list(map(lambda line:
type(line) == str and line or str(line, 'utf-8'), data))
if __name__ == '__main__':
if len(sys.argv) == 2:
main(sys.argv[1], None)
elif len(sys.argv) == 3:
main(sys.argv[1], sys.argv[2])
else:
print('Format: %s <appname> [<language>]' % sys.argv[0])
sys.exit(2)
|
the-stack_0_17299 | from keras.datasets import mnist, fashion_mnist
from models import load_model
import numpy as np
import os
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import math
curdir = os.path.dirname(os.path.abspath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument('--optimizer', choices=['adam','sgd','adagrad'], default='adam')
parser.add_argument('--loss', choices=['mean_squared_error', 'binary_crossentropy'], default='mean_squared_error')
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--test_samples', type=int, default=50)
parser.add_argument('--result', default=os.path.join(curdir, 'result.png'))
def main(args):
# prepare normal dataset (Mnist)
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train / 255. # normalize into [0,1]
x_test = x_test / 255.
# prapare abnormal dataset (Fashion Mnist)
(_, _), (x_abnormal, _) = fashion_mnist.load_data()
x_abnormal = x_abnormal / 255.
# sample args.test_samples images from eaech of x_test and x_abnormal
perm = np.random.permutation(args.test_samples)
x_test = x_test[perm][:args.test_samples]
x_abnormal = x_abnormal[perm][:args.test_samples]
# train each model and test their capabilities of anomaly deteciton
model_names = ['autoencoder', 'deep_autoencoder', 'convolutional_autoencoder']
for model_name in model_names:
# instantiate model
model = load_model(model_name)
# reshape input data according to the model's input tensor
if model_name == 'convolutional_autoencoder':
x_train = x_train.reshape(-1,28,28,1)
x_test = x_test.reshape(-1,28,28,1)
x_abnormal = x_abnormal.reshape(-1,28,28,1)
elif model_name == 'autoencoder' or model_name == 'deep_autoencoder':
x_train = x_train.reshape(-1,28*28)
x_test = x_test.reshape(-1,28*28)
x_abnormal = x_abnormal.reshape(-1,28*28)
else:
raise ValueError('Unknown model_name %s was given' % model_name)
# compile model
model.compile(optimizer=args.optimizer, loss=args.loss)
# train on only normal training data
model.fit(
x=x_train,
y=x_train,
epochs=args.epochs,
batch_size=args.batch_size,
)
# test
x_concat = np.concatenate([x_test, x_abnormal], axis=0)
losses = []
for x in x_concat:
# compule loss for each test sample
x = np.expand_dims(x, axis=0)
loss = model.test_on_batch(x, x)
losses.append(loss)
# plot
plt.plot(range(len(losses)), losses, linestyle='-', linewidth=1, label=model_name)
# delete model for saving memory
del model
# create graph
plt.legend(loc='best')
plt.grid()
plt.xlabel('sample index')
plt.ylabel('loss')
plt.savefig(args.result)
plt.clf()
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
the-stack_0_17302 | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import aws_cdk.core as cdk
import aws_cdk.aws_s3 as s3
import aws_cdk.aws_ec2 as ec2
import aws_cdk.aws_glue as glue
import aws_cdk.aws_iam as iam
import aws_cdk.aws_kms as kms
import aws_cdk.aws_s3_deployment as s3_deployment
from .configuration import (
AVAILABILITY_ZONE_1, SUBNET_ID_1,
S3_ACCESS_LOG_BUCKET, S3_KMS_KEY, S3_CONFORMED_BUCKET, S3_PURPOSE_BUILT_BUCKET, SHARED_SECURITY_GROUP_ID,
get_environment_configuration, get_logical_id_prefix, get_resource_name_prefix
)
class GlueStack(cdk.Stack):
def __init__(
self,
scope: cdk.Construct,
construct_id: str,
target_environment: str,
**kwargs
) -> None:
"""
CloudFormation stack to create Glue Jobs, Connections,
Script Bucket, Temporary Bucket, and an IAM Role for permissions.
@param scope cdk.Construct: Parent of this stack, usually an App or a Stage, but could be any construct.
@param construct_id str:
The construct ID of this stack. If stackName is not explicitly defined,
this id (and any parent IDs) will be used to determine the physical ID of the stack.
@param target_environment str: The target environment for stacks in the deploy stage
@param kwargs:
"""
super().__init__(scope, construct_id, **kwargs)
self.mappings = get_environment_configuration(target_environment)
logical_id_prefix = get_logical_id_prefix()
resource_name_prefix = get_resource_name_prefix()
existing_access_logs_bucket_name = cdk.Fn.import_value(self.mappings[S3_ACCESS_LOG_BUCKET])
access_logs_bucket = s3.Bucket.from_bucket_attributes(
self,
'ImportedBucket',
bucket_name=existing_access_logs_bucket_name
)
s3_kms_key_parameter = cdk.Fn.import_value(self.mappings[S3_KMS_KEY])
s3_kms_key = kms.Key.from_key_arn(self, 'ImportedKmsKey', s3_kms_key_parameter)
shared_security_group_parameter = cdk.Fn.import_value(self.mappings[SHARED_SECURITY_GROUP_ID])
glue_connection_subnet = cdk.Fn.import_value(self.mappings[SUBNET_ID_1])
glue_connection_availability_zone = cdk.Fn.import_value(self.mappings[AVAILABILITY_ZONE_1])
conformed_bucket_name = cdk.Fn.import_value(self.mappings[S3_CONFORMED_BUCKET])
conformed_bucket = s3.Bucket.from_bucket_name(
self,
id='ImportedConformedBucket',
bucket_name=conformed_bucket_name
)
purposebuilt_bucket_name = cdk.Fn.import_value(self.mappings[S3_PURPOSE_BUILT_BUCKET])
purposebuilt_bucket = s3.Bucket.from_bucket_name(
self,
id='ImportedPurposeBuiltBucket',
bucket_name=purposebuilt_bucket_name
)
shared_security_group = ec2.SecurityGroup.from_security_group_id(
self,
'ImportedSecurityGroup',
shared_security_group_parameter
)
subnet = ec2.Subnet.from_subnet_attributes(
self,
'ImportedSubnet',
subnet_id=glue_connection_subnet,
availability_zone=glue_connection_availability_zone
)
glue_scripts_bucket = self.glue_scripts_bucket(
target_environment,
logical_id_prefix,
resource_name_prefix,
s3_kms_key,
access_logs_bucket
)
glue_scripts_temp_bucket = self.glue_scripts_temporary_bucket(
target_environment,
logical_id_prefix,
resource_name_prefix,
s3_kms_key,
access_logs_bucket
)
glue_role = self.get_role(
target_environment,
logical_id_prefix,
resource_name_prefix,
s3_kms_key,
)
job_connection = glue.Connection(
self,
f'{target_environment}{logical_id_prefix}RawToConformedWorkflowConnection',
type=glue.ConnectionType.NETWORK,
connection_name=f'{target_environment.lower()}-{resource_name_prefix}-raw-to-conformed-connection',
security_groups=[shared_security_group],
subnet=subnet
)
self.raw_to_conformed_job = glue.CfnJob(
self,
f'{target_environment}{logical_id_prefix}RawToConformedJob',
name=f'{target_environment.lower()}-{resource_name_prefix}-raw-to-conformed-job',
command=glue.CfnJob.JobCommandProperty(
name='glueetl',
python_version='3',
script_location=f's3://{glue_scripts_bucket.bucket_name}/etl/etl_raw_to_conformed.py'
),
connections=glue.CfnJob.ConnectionsListProperty(
connections=[job_connection.connection_name],
),
default_arguments={
'--enable-glue-datacatalog': '""',
'--target_database_name': 'datablog_arg',
'--target_bucket': conformed_bucket.bucket_name,
'--target_table_name': 'datablog_nyc_raw',
'--TempDir': f's3://{glue_scripts_temp_bucket.bucket_name}/etl/raw-to-conformed',
},
execution_property=glue.CfnJob.ExecutionPropertyProperty(
max_concurrent_runs=1,
),
glue_version='2.0',
max_retries=0,
number_of_workers=5,
role=glue_role.role_arn,
worker_type='G.1X',
)
self.conformed_to_purpose_built_job = glue.CfnJob(
self,
f'{target_environment}{logical_id_prefix}ConformedToPurposeBuiltJob',
name=f'{target_environment.lower()}-{resource_name_prefix}-conformed-to-purpose-built-job',
command=glue.CfnJob.JobCommandProperty(
name='glueetl',
python_version='3',
script_location=f's3://{glue_scripts_bucket.bucket_name}/etl/etl_conformed_to_purposebuilt.py'
),
connections=glue.CfnJob.ConnectionsListProperty(
connections=[job_connection.connection_name],
),
default_arguments={
'--enable-glue-datacatalog': '""',
'--target_database_name': 'datablog_conformed_arg',
'--target_bucketname': purposebuilt_bucket.bucket_name,
'--target_table_name': 'datablog_nyc_purposebuilt',
'--txn_bucket_name': glue_scripts_bucket.bucket_name,
'--txn_sql_prefix_path': '/etl/transformation-sql/',
'--TempDir': f's3://{glue_scripts_temp_bucket.bucket_name}/etl/conformed-to-purpose-built'
},
execution_property=glue.CfnJob.ExecutionPropertyProperty(
max_concurrent_runs=1,
),
glue_version='2.0',
max_retries=0,
number_of_workers=5,
role=glue_role.role_arn,
worker_type='G.1X',
)
def glue_scripts_bucket(
self,
target_environment,
logical_id_prefix: str,
resource_name_prefix: str,
s3_kms_key: kms.Key,
access_logs_bucket: s3.Bucket
) -> s3.Bucket:
"""
Creates S3 Bucket that contains glue scripts used in Job execution
@param target_environment str: The target environment for stacks in the deploy stage
@param logical_id_prefix str: The logical id prefix to apply to all CloudFormation resources
@param resource_name_prefix str: The prefix applied to all resource names
@param s3_kms_key kms.Key: The KMS Key to use for encryption of data at rest
@param access_logs_bucket s3.Bucket: The access logs target for this bucket
"""
bucket_name = f'{target_environment.lower()}-{resource_name_prefix}-{self.account}-etl-scripts'
bucket = s3.Bucket(
self,
f'{target_environment}{logical_id_prefix}RawGlueScriptsBucket',
bucket_name=bucket_name,
access_control=s3.BucketAccessControl.PRIVATE,
block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
bucket_key_enabled=s3_kms_key is not None,
encryption=s3.BucketEncryption.KMS,
encryption_key=s3_kms_key,
public_read_access=False,
removal_policy=cdk.RemovalPolicy.DESTROY,
versioned=True,
object_ownership=s3.ObjectOwnership.OBJECT_WRITER,
server_access_logs_bucket=access_logs_bucket,
server_access_logs_prefix=bucket_name,
)
# Dynamically upload resources to the script target
s3_deployment.BucketDeployment(
self,
'DeployGlueJobScript',
# This path is relative to the root of the project
sources=[s3_deployment.Source.asset('./lib/glue_scripts')],
destination_bucket=bucket,
destination_key_prefix='etl',
)
return bucket
def glue_scripts_temporary_bucket(
self, target_environment, logical_id_prefix: str, resource_name_prefix: str,
s3_kms_key: kms.Key, access_logs_bucket: s3.Bucket
) -> s3.Bucket:
"""
Creates S3 Bucket used as a temporary file store in Job execution
@param target_environment str: The target environment for stacks in the deploy stage
@param logical_id_prefix str: The logical id prefix to apply to all CloudFormation resources
@param resource_name_prefix str: The prefix applied to all resource names
@param s3_kms_key kms.Key: The KMS Key to use for encryption of data at rest
@param access_logs_bucket s3.Bucket: The access logs target for this bucket
"""
bucket_name = f'{target_environment.lower()}-{resource_name_prefix}-{self.account}-glue-temporary-scripts'
bucket = s3.Bucket(
self,
f'{target_environment}{logical_id_prefix}RawGlueScriptsTemporaryBucket',
bucket_name=bucket_name,
access_control=s3.BucketAccessControl.PRIVATE,
block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
bucket_key_enabled=s3_kms_key is not None,
encryption=s3.BucketEncryption.KMS if s3_kms_key else s3.BucketEncryption.S3_MANAGED,
encryption_key=s3_kms_key if s3_kms_key else None,
public_read_access=False,
removal_policy=cdk.RemovalPolicy.DESTROY,
versioned=True,
object_ownership=s3.ObjectOwnership.OBJECT_WRITER,
server_access_logs_bucket=access_logs_bucket,
server_access_logs_prefix=bucket_name,
)
return bucket
def get_role(
self,
target_environment: str,
logical_id_prefix: str,
resource_name_prefix: str,
s3_kms_key: kms.Key,
) -> iam.Role:
"""
Creates the role used during Glue Job execution
@param target_environment str: The target environment for stacks in the deploy stage
@param logical_id_prefix str: The logical id prefix to apply to all CloudFormation resources
@param resource_name_prefix str: The prefix applied to all resource names
@param s3_kms_key kms.Key: The KMS Key to provide permissions to
@returns iam.Role: The role that was created
"""
return iam.Role(
self,
f'{target_environment}{logical_id_prefix}RawGlueRole',
role_name=f'{target_environment.lower()}-{resource_name_prefix}-raw-glue-role',
assumed_by=iam.ServicePrincipal('glue.amazonaws.com'),
inline_policies=[
iam.PolicyDocument(statements=[
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
's3:ListBucketVersions',
's3:ListBucket',
's3:GetBucketNotification',
's3:GetBucketLocation',
],
resources=[
'arn:aws:s3:::*'
]
)
]),
iam.PolicyDocument(statements=[
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
's3:ReplicationObject',
's3:PutObject',
's3:GetObject',
's3:DeleteObject',
],
resources=[
'arn:aws:s3:::*/*'
]
)
]),
iam.PolicyDocument(statements=[
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
's3:ListAllMyBuckets',
],
resources=[
'*'
]
)
]),
# NOTE: This is required due to bucket level encryption on S3 Buckets
iam.PolicyDocument(statements=[
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=[
'kms:*',
],
resources=[
s3_kms_key.key_arn,
]
)
]),
],
managed_policies=[
iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSGlueServiceRole'),
]
)
|
the-stack_0_17304 | """
udp_server
"""
from socket import *
# 创建套接字
udp_socket = socket(AF_INET, SOCK_DGRAM)
# 绑定地址、端口
udp_socket.bind(('0.0.0.0', 8888))
"""
功能: 接收UDP消息
参数: 每次最多接收多少字节
返回值: data 接收到的内容
addr 消息发送方地址
"""
data, addr = udp_socket.recvfrom(1024 * 1024)
print(addr)
print(data.decode()) # 字节串收发
udp_socket.sendto(b'thanks', addr)
|
the-stack_0_17305 | # Copyright 1996-2019 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""led_controller controller."""
from controller import Robot
robot = Robot()
timestep = int(robot.getBasicTimeStep())
led = robot.getLED('led')
led.set(True)
positionSensor = robot.getPositionSensor('emergency button sensor')
positionSensor.enable(timestep)
released = True
while robot.step(timestep) != -1:
value = positionSensor.getValue()
if value > -0.002:
released = True
if released and value < -0.010:
released = False
led.set(not led.get())
|
the-stack_0_17306 | """This module contains the main functions used to load the required data from disk for training."""
import functools
import gzip
import pickle
import os
import numpy as np
import torch
from sketchgraphs_models import distributed_utils
from sketchgraphs_models.nn import data_util
from sketchgraphs_models.graph import dataset
from sketchgraphs.data import flat_array
def load_sequences_and_mappings(dataset_file, auxiliary_file, quantization, entity_features=True, edge_features=True):
data = flat_array.load_dictionary_flat(np.load(dataset_file, mmap_mode='r'))
if auxiliary_file is None:
root, _ = os.path.splitext(dataset_file)
auxiliary_file = root + '.stats.pkl.gz'
if entity_features or edge_features:
with gzip.open(auxiliary_file, 'rb') as f:
auxiliary_dict = pickle.load(f)
if entity_features:
entity_feature_mapping = dataset.EntityFeatureMapping(auxiliary_dict['node'])
else:
entity_feature_mapping = None
seqs = data['sequences']
weights = data['sequence_lengths']
if edge_features:
if isinstance(quantization['angle'], dataset.QuantizationMap):
angle_map = quantization['angle']
else:
angle_map = dataset.QuantizationMap.from_counter(auxiliary_dict['edge']['angle'], quantization['angle'])
if isinstance(quantization['length'], dataset.QuantizationMap):
length_map = quantization['length']
else:
length_map = dataset.QuantizationMap.from_counter(auxiliary_dict['edge']['length'], quantization['length'])
edge_feature_mapping = dataset.EdgeFeatureMapping(angle_map, length_map)
else:
edge_feature_mapping = None
return {
'sequences': seqs.share_memory_(),
'entity_feature_mapping': entity_feature_mapping,
'edge_feature_mapping': edge_feature_mapping,
'weights': weights
}
def load_dataset_and_weights_with_mapping(dataset_file, node_feature_mapping, edge_feature_mapping, seed=None):
data = flat_array.load_dictionary_flat(np.load(dataset_file, mmap_mode='r'))
seqs = data['sequences']
seqs.share_memory_()
ds = dataset.GraphDataset(seqs, node_feature_mapping, edge_feature_mapping, seed)
return ds, data['sequence_lengths']
def load_dataset_and_weights(dataset_file, auxiliary_file, quantization, seed=None,
entity_features=True, edge_features=True, force_entity_categorical_features=False):
data = load_sequences_and_mappings(dataset_file, auxiliary_file, quantization, entity_features, edge_features)
if data['entity_feature_mapping'] is None and force_entity_categorical_features:
# Create an entity mapping which only computes the categorical features (i.e. isConstruction and clockwise)
data['entity_feature_mapping'] = dataset.EntityFeatureMapping()
return dataset.GraphDataset(
data['sequences'], data['entity_feature_mapping'], data['edge_feature_mapping'], seed=seed), data['weights']
def make_dataloader_train(collate_fn, ds_train, weights, batch_size, num_epochs, num_workers, distributed_config=None):
sampler = torch.utils.data.WeightedRandomSampler(
weights, len(weights), replacement=True)
if distributed_config is not None:
sampler = distributed_utils.DistributedSampler(
sampler, distributed_config.world_size, distributed_config.rank)
batch_sampler = torch.utils.data.BatchSampler(
sampler, batch_size, drop_last=False)
dataloader_train = torch.utils.data.DataLoader(
ds_train,
collate_fn=collate_fn,
batch_sampler=data_util.MultiEpochSampler(batch_sampler, num_epochs),
num_workers=num_workers,
pin_memory=True)
batches_per_epoch = len(batch_sampler)
return dataloader_train, batches_per_epoch
def _make_dataloader_eval(ds_eval, weights, batch_size, num_workers, distributed_config=None):
sampler = torch.utils.data.WeightedRandomSampler(
weights, len(weights), replacement=True)
if distributed_config is not None:
sampler = distributed_utils.DistributedSampler(
sampler, distributed_config.world_size, distributed_config.rank)
dataloader_eval = torch.utils.data.DataLoader(
ds_eval,
collate_fn=functools.partial(
dataset.collate,
entity_feature_mapping=ds_eval.node_feature_mapping,
edge_feature_mapping=ds_eval.edge_feature_mapping),
sampler=sampler,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True)
return dataloader_eval
def initialize_datasets(args, distributed_config: distributed_utils.DistributedTrainingInfo = None):
"""Initialize datasets and dataloaders.
Parameters
----------
args : dict
Dictionary containing all the dataset configurations.
distributed_config : distributed_utils.DistributedTrainingInfo, optional
If not None, configuration options for distributed training.
Returns
-------
torch.data.utils.Dataloader
Training dataloader
torch.data.utils.Dataloader
If not None, testing dataloader
int
Number of batches per training epoch
dataset.EntityFeatureMapping
Feature mapping in use for entities
dataset.EdgeFeatureMapping
Feature mapping in use for constraints
"""
quantization = {'angle': args['num_quantize_angle'], 'length': args['num_quantize_length']}
dataset_train_path = args['dataset_train']
auxiliary_path = args['dataset_auxiliary']
ds_train, weights_train = load_dataset_and_weights(
dataset_train_path, auxiliary_path, quantization, args['seed'],
not args.get('disable_entity_features', False), not args.get('disable_edge_features', False),
args.get('force_entity_categorical_features', False))
batch_size = args['batch_size']
num_workers = args['num_workers']
if distributed_config:
batch_size = batch_size // distributed_config.world_size
num_workers = num_workers // distributed_config.world_size
collate_fn = functools.partial(
dataset.collate,
entity_feature_mapping=ds_train.node_feature_mapping,
edge_feature_mapping=ds_train.edge_feature_mapping)
dl_train, batches_per_epoch = make_dataloader_train(
collate_fn, ds_train, weights_train, batch_size, args['num_epochs'], num_workers, distributed_config)
if args['dataset_test'] is not None:
ds_test, weights_test = load_dataset_and_weights_with_mapping(
args['dataset_test'], ds_train.node_feature_mapping, ds_train.edge_feature_mapping, args['seed'])
dl_test = _make_dataloader_eval(
ds_test, weights_test, batch_size, num_workers, distributed_config)
else:
dl_test = None
return dl_train, dl_test, batches_per_epoch, ds_train.node_feature_mapping, ds_train.edge_feature_mapping
|
the-stack_0_17307 | import graphene
from ...core.permissions import ShippingPermissions
from ..channel.types import ChannelContext
from ..core.fields import ChannelContextFilterConnectionField
from ..decorators import permission_required
from ..translations.mutations import ShippingPriceTranslate
from .bulk_mutations import ShippingPriceBulkDelete, ShippingZoneBulkDelete
from .filters import ShippingZoneFilterInput
from .mutations.channels import ShippingMethodChannelListingUpdate
from .mutations.shippings import (
ShippingPriceCreate,
ShippingPriceDelete,
ShippingPriceExcludeProducts,
ShippingPriceRemoveProductFromExclude,
ShippingPriceUpdate,
ShippingZoneCreate,
ShippingZoneDelete,
ShippingZoneUpdate,
)
from .resolvers import resolve_shipping_zones
from .types import ShippingZone
class ShippingQueries(graphene.ObjectType):
shipping_zone = graphene.Field(
ShippingZone,
id=graphene.Argument(
graphene.ID, description="ID of the shipping zone.", required=True
),
channel=graphene.String(
description="Slug of a channel for which the data should be returned."
),
description="Look up a shipping zone by ID.",
)
shipping_zones = ChannelContextFilterConnectionField(
ShippingZone,
filter=ShippingZoneFilterInput(
description="Filtering options for shipping zones."
),
channel=graphene.String(
description="Slug of a channel for which the data should be returned."
),
description="List of the shop's shipping zones.",
)
@permission_required(ShippingPermissions.MANAGE_SHIPPING)
def resolve_shipping_zone(self, info, id, channel=None):
instance = graphene.Node.get_node_from_global_id(info, id, ShippingZone)
return ChannelContext(node=instance, channel_slug=channel) if instance else None
@permission_required(ShippingPermissions.MANAGE_SHIPPING)
def resolve_shipping_zones(self, info, channel=None, **_kwargs):
return resolve_shipping_zones(channel)
class ShippingMutations(graphene.ObjectType):
shipping_method_channel_listing_update = ShippingMethodChannelListingUpdate.Field()
shipping_price_create = ShippingPriceCreate.Field()
shipping_price_delete = ShippingPriceDelete.Field()
shipping_price_bulk_delete = ShippingPriceBulkDelete.Field()
shipping_price_update = ShippingPriceUpdate.Field()
shipping_price_translate = ShippingPriceTranslate.Field()
shipping_price_exclude_products = ShippingPriceExcludeProducts.Field()
shipping_price_remove_product_from_exclude = (
ShippingPriceRemoveProductFromExclude.Field()
)
shipping_zone_create = ShippingZoneCreate.Field()
shipping_zone_delete = ShippingZoneDelete.Field()
shipping_zone_bulk_delete = ShippingZoneBulkDelete.Field()
shipping_zone_update = ShippingZoneUpdate.Field()
|
the-stack_0_17308 | import json
import logging
import pytest
from model.msg_model import MsgModel
from util.auth_util import Identity
from util.auth_util import JWTAuthenticator
from util.auth_util import Role
from util.config_util import Config
logger = logging.getLogger(__name__)
class TestReadMsgs:
path = "/api/user/read_msgs"
@pytest.fixture(autouse=True)
def __setup__(self, client):
self.client = client
def trigger_run(self, role, payload):
headers = {}
if role:
auth_token = JWTAuthenticator.dump_access_token(
Config.auth_secret_key, Identity(user="xxx", role=role), exp=86400
)
headers = {"Authorization": f"bearer {auth_token}"}
return self.client.post(
url=self.path, data=json.dumps(payload), headers=headers
)
def test__ok(self):
# prepare fixture
MsgModel.leave_msg("hello")
MsgModel.leave_msg("world")
# user read two messages
resp = self.trigger_run(Role.USER, {})
assert resp.status_code == 200
assert resp.json() == {"msgs": ["hello", "world"]}
# admin also has permission to read
resp = self.trigger_run(Role.ADMIN, {"limit": 1})
assert resp.status_code == 200
assert resp.json() == {"msgs": ["hello"]}
def test__authentication_error(self):
resp = self.trigger_run(None, {"msg": "hello"})
assert resp.status_code == 401
assert resp.json() == {"code": "UNAUTHENTICATED", "msg": "JWT is missing"}
def test__limit_error(self):
resp = self.trigger_run(Role.USER, {"limit": 101})
assert resp.status_code == 400
assert resp.json() == {
"code": "INVALID_PARAM",
"msg": "Invalid body param: limit",
}
|
the-stack_0_17309 | from corehq.apps.app_manager.suite_xml.contributors import PostProcessor
from corehq.apps.app_manager.suite_xml.post_process.workflow import (
CommandId,
WorkflowDatumMeta,
WorkflowHelper,
prepend_parent_frame_children,
)
from corehq.apps.app_manager.suite_xml.xml_models import (
Argument,
PushFrame,
SessionEndpoint,
Stack,
StackDatum,
)
from corehq.util.timer import time_method
class EndpointsHelper(PostProcessor):
"""
Generates "Session Endpoints" - user-defined labels for forms or modules.
They end up as entries in the suite file that declare stack operations
necessary to navigate to the form or module, as well as what arguments (eg:
case IDs) must be provided to get there.
"""
@time_method()
def update_suite(self):
for module in self.modules:
if module.session_endpoint_id:
self.suite.endpoints.append(self._make_session_endpoint(module))
if module.module_type != "shadow":
for form in module.get_suite_forms():
if form.session_endpoint_id:
self.suite.endpoints.append(self._make_session_endpoint(module, form))
def _make_session_endpoint(self, module, form=None):
if form is not None:
endpoint_id = form.session_endpoint_id
else:
endpoint_id = module.session_endpoint_id
stack = Stack()
children = self.get_frame_children(module, form)
argument_ids = self._get_argument_ids(children)
# Add a claim request for each endpoint argument.
# This assumes that all arguments are case ids.
for arg_id in argument_ids:
self._add_claim_frame(stack, arg_id, endpoint_id)
# Add a frame to navigate to the endpoint
frame = PushFrame()
stack.add_frame(frame)
for child in children:
if isinstance(child, CommandId):
frame.add_command(child.to_command())
elif child.id in argument_ids:
self._add_datum_for_arg(frame, child.id)
return SessionEndpoint(
id=endpoint_id,
arguments=[Argument(id=i) for i in argument_ids],
stack=stack,
)
def _get_argument_ids(self, frame_children):
return [
child.id for child in frame_children
if isinstance(child, WorkflowDatumMeta) and child.requires_selection
]
def _add_claim_frame(self, stack, arg_id, endpoint_id):
frame = PushFrame()
stack.add_frame(frame)
self._add_datum_for_arg(frame, arg_id)
frame.add_command(f"'claim_command.{endpoint_id}.{arg_id}'")
def _add_datum_for_arg(self, frame, arg_id):
frame.add_datum(
StackDatum(id=arg_id, value=f"${arg_id}")
)
def get_frame_children(self, module, form):
helper = WorkflowHelper(self.suite, self.app, self.app.get_modules())
frame_children = helper.get_frame_children(module, form)
if module.root_module_id:
frame_children = prepend_parent_frame_children(helper, frame_children, module.root_module)
return frame_children
|
the-stack_0_17311 | from pathlib import Path
from urllib.request import urlopen
from .element import Element
import json
import shutil
import logging
logger = logging.getLogger(__name__)
def read_gltf(fin):
with open(fin, encoding='utf-8') as f:
gltf = json.load(f, object_hook=lambda d: Element(**d))
# buffers = []
# for buffer in gltf.buffers:
# buffers.append(read_buffer(buffer.uri))
# with open(Path(fin).parent / gltf.buffers[0].uri, "rb") as f:
# buffer = f.read()
buffer = read_buffer(gltf.buffers[0].uri, Path(fin).parent)
return gltf, buffer
def read_buffer(uri, parent):
if is_data_uri(uri):
with urlopen(uri) as response:
return response.read()
with open(parent / uri, "rb") as f:
return f.read()
def is_data_uri(uri):
return uri.startswith("data:")
def copy_textures(fin, fout, images):
if not images:
return
src_parent = Path(fin).parent
dest_parent = Path(fout).parent
if src_parent == dest_parent:
return
for image in images:
dest = dest_parent / image.uri
try:
dest.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(src_parent / image.uri, dest)
except Exception as e:
logger.error(e)
|
the-stack_0_17312 | import io
import json
import re
from operator import itemgetter
from typing import Any, Dict, List, Optional, Union, cast
import pytest # type: ignore
from PIL import Image # type: ignore
from looker_sdk.sdk.api40 import methods as mtds
from looker_sdk.sdk.api40 import models as ml
@pytest.fixture(scope="module")
def sdk(sdk40) -> mtds.Looker40SDK:
return sdk40
def test_crud_user(sdk: mtds.Looker40SDK):
"""Test creating, retrieving, updating and deleting a user."""
# Create user
user = sdk.create_user(
ml.WriteUser(first_name="John", last_name="Doe", is_disabled=False, locale="fr")
)
assert isinstance(user, ml.User)
assert isinstance(user.id, int)
assert user.first_name == "John"
assert user.last_name == "Doe"
assert not user.is_disabled
assert user.locale == "fr"
# sudo checks
user_id = user.id
sdk.login_user(user_id)
user = sdk.me()
assert user.first_name == "John"
assert user.last_name == "Doe"
sdk.logout()
user = sdk.me()
assert user.first_name != "John"
assert user.last_name != "Doe"
# Update user and check fields we didn't intend to change didn't change
update_user = ml.WriteUser(is_disabled=True, locale="uk")
sdk.update_user(user_id, update_user)
user = sdk.user(user_id)
assert user.first_name == "John"
assert user.last_name == "Doe"
assert user.locale == "uk"
assert user.is_disabled
# Update user and check fields we intended to wipe out are now None
# first way to specify nulling out a field
update_user = ml.WriteUser(first_name=ml.EXPLICIT_NULL)
# second way
update_user.last_name = ml.EXPLICIT_NULL
sdk.update_user(user_id, update_user)
user = sdk.user(user_id)
assert user.first_name == ""
assert user.last_name == ""
# Try adding email creds
sdk.create_user_credentials_email(
user_id, ml.WriteCredentialsEmail(email="[email protected]")
)
user = sdk.user(user_id)
assert isinstance(user.credentials_email, ml.CredentialsEmail)
assert user.credentials_email.email == "[email protected]"
# Delete user
resp = sdk.delete_user(user_id)
assert resp == ""
def test_me_returns_correct_result(sdk: mtds.Looker40SDK):
"""me() should return the current authenticated user"""
me = sdk.me()
assert isinstance(me, ml.User)
assert isinstance(me.credentials_api3, list)
assert len(me.credentials_api3) > 0
assert isinstance(me.credentials_api3[0], ml.CredentialsApi3)
def test_me_field_filters(sdk: mtds.Looker40SDK):
"""me() should return only the requested fields."""
me = sdk.me("id, first_name, last_name")
assert isinstance(me, ml.User)
assert isinstance(me.id, int)
assert isinstance(me.first_name, str)
assert me.first_name != ""
assert isinstance(me.last_name, str)
assert me.last_name != ""
assert not me.display_name
assert not me.email
assert not me.personal_space_id
@pytest.mark.usefixtures("test_users")
def test_bad_user_search_returns_no_results(sdk: mtds.Looker40SDK):
"""search_users() should return an empty list when no match is found."""
resp = sdk.search_users(first_name="Bad", last_name="News")
assert isinstance(resp, list)
assert len(resp) == 0
@pytest.mark.usefixtures("test_users")
def test_search_users_matches_pattern(
sdk: mtds.Looker40SDK, users: List[Dict[str, str]], email_domain: str
):
"""search_users should return a list of all matches."""
user = users[0]
# Search by full email
search_email = f'{user["first_name"]}.{user["last_name"]}{email_domain}'
search_results = sdk.search_users_names(pattern=search_email)
assert len(search_results) == 1
assert search_results[0].first_name == user["first_name"]
assert search_results[0].last_name == user["last_name"]
assert search_results[0].email == search_email
# Search by first name
search_results = sdk.search_users_names(pattern=user["first_name"])
assert len(search_results) > 0
assert search_results[0].first_name == user["first_name"]
# First name with spaces
u = sdk.create_user(ml.WriteUser(first_name="John Allen", last_name="Smith"))
if u.id:
search_results = sdk.search_users_names(pattern="John Allen")
assert len(search_results) == 1
assert search_results[0].first_name == "John Allen"
assert search_results[0].last_name == "Smith"
# Delete user
resp = sdk.delete_user(u.id)
assert resp == ""
@pytest.mark.usefixtures("test_users")
def test_it_matches_email_domain_and_returns_sorted(
sdk: mtds.Looker40SDK, email_domain: str, users: List[Dict[str, str]]
):
"""search_users_names() should search users matching a given pattern and return
sorted results if sort fields are specified.
"""
search_results = sdk.search_users_names(
pattern=f"%{email_domain}", sorts="last_name, first_name"
)
assert len(search_results) == len(users)
sorted_test_data: List[Dict[str, str]] = sorted(
users, key=itemgetter("last_name", "first_name")
)
for actual, expected in zip(search_results, sorted_test_data):
assert actual.first_name == expected["first_name"]
assert actual.last_name == expected["last_name"]
@pytest.mark.usefixtures("test_users")
def test_delim_sequence(
sdk: mtds.Looker40SDK, email_domain: str, users: List[Dict[str, str]]
):
search_results = sdk.search_users_names(pattern=f"%{email_domain}")
assert len(search_results) == len(users)
delim_ids = ml.DelimSequence([cast(int, u.id) for u in search_results])
all_users = sdk.all_users(ids=delim_ids)
assert len(all_users) == len(users)
def test_it_retrieves_session(sdk: mtds.Looker40SDK):
"""session() should return the current session."""
current_session = sdk.session()
assert current_session.workspace_id == "production"
def test_it_updates_session(sdk: mtds.Looker40SDK):
"""update_session() should allow us to change the current workspace."""
# Switch workspace to dev mode
sdk.update_session(ml.WriteApiSession(workspace_id="dev"))
current_session = sdk.session()
assert isinstance(current_session, ml.ApiSession)
assert current_session.workspace_id == "dev"
# Switch workspace back to production
current_session = sdk.update_session(ml.WriteApiSession(workspace_id="production"))
assert isinstance(current_session, ml.ApiSession)
assert current_session.workspace_id == "production"
TQueries = List[Dict[str, Union[str, List[str], Dict[str, str]]]]
def test_it_creates_and_runs_query(
sdk: mtds.Looker40SDK, queries_system_activity: TQueries
):
"""create_query() creates a query and run_query() returns its result."""
for q in queries_system_activity:
limit = cast(str, q["limit"]) or "10"
request = create_query_request(q, limit)
query = sdk.create_query(request)
assert isinstance(query, ml.Query)
assert query.id
assert isinstance(query.id, int)
assert query.id > 0
sql = sdk.run_query(query.id, "sql")
assert "SELECT" in sql
json_ = sdk.run_query(query.id, "json")
assert isinstance(json_, str)
json_ = json.loads(json_)
assert isinstance(json_, list)
assert len(json_) == int(limit)
row = json_[0]
if q.get("fields"):
for field in q["fields"]:
assert field in row.keys()
csv = sdk.run_query(query.id, "csv")
assert isinstance(csv, str)
assert len(re.findall(r"\n", csv)) == int(limit) + 1
def test_it_runs_inline_query(sdk: mtds.Looker40SDK, queries_system_activity: TQueries):
"""run_inline_query() should run a query and return its results."""
for q in queries_system_activity:
limit = cast(str, q["limit"]) or "10"
request = create_query_request(q, limit)
json_resp = sdk.run_inline_query("json", request)
assert isinstance(json_resp, str)
json_: List[Dict[str, Any]] = json.loads(json_resp)
assert len(json_) == int(limit)
row = json_[0]
if q.get("fields"):
for field in q["fields"]:
assert field in row.keys()
csv = sdk.run_inline_query("csv", request)
assert isinstance(csv, str)
assert len(re.findall(r"\n", csv)) == int(limit) + 1
# only do 1 image download since it takes a while
png = sdk.run_inline_query("png", request)
assert isinstance(png, bytes)
try:
Image.open(io.BytesIO(png))
except IOError:
raise AssertionError("png format failed to return an image")
@pytest.mark.usefixtures("remove_test_looks")
def test_crud_look(sdk: mtds.Looker40SDK, looks):
"""Test creating, retrieving, updating and deleting a look."""
for l in looks:
request = create_query_request(l["query"][0], "10")
query = sdk.create_query(request)
look = sdk.create_look(
ml.WriteLookWithQuery(
title=l.get("title"),
description=l.get("description"),
deleted=l.get("deleted"),
is_run_on_load=l.get("is_run_on_load"),
public=l.get("public"),
query_id=query.id,
space_id=l.get("space_id") or str(sdk.me().personal_space_id),
)
)
assert isinstance(look, ml.LookWithQuery)
assert look.title == l.get("title")
assert look.description == l.get("description")
assert look.deleted == l.get("deleted")
assert look.is_run_on_load == l.get("is_run_on_load")
# TODO this is broken for local dev but works for CI...
# assert look.public == l.get("public")
assert look.query_id == query.id
assert look.space_id == l.get("space_id") or sdk.me().home_space_id
assert look.user_id == l.get("user_id") or sdk.me().id
# Update
assert isinstance(look.id, int)
updated_look = sdk.update_look(look.id, ml.WriteLookWithQuery(deleted=True))
assert updated_look.deleted
assert updated_look.title == look.title
look = sdk.update_look(look.id, ml.WriteLookWithQuery(deleted=False))
assert not look.deleted
def test_search_looks_returns_looks(sdk: mtds.Looker40SDK):
"""search_looks() should return a list of looks."""
search_results = sdk.search_looks()
assert isinstance(search_results, list)
assert len(search_results) > 0
look = search_results[0]
assert isinstance(look, ml.Look)
assert look.title != ""
assert look.created_at is not None
def test_search_looks_fields_filter(sdk: mtds.Looker40SDK):
"""search_looks() should only return the requested fields passed in the fields
argument of the request.
"""
search_results = sdk.search_looks(fields="id, title, description")
assert isinstance(search_results, list)
assert len(search_results) > 0
look = search_results[0]
assert isinstance(look, ml.Look)
assert look.title is not None
assert look.created_at is None
def test_search_looks_title_fields_filter(sdk: mtds.Looker40SDK):
"""search_looks() should be able to filter on title."""
search_results = sdk.search_looks(title="An SDK%", fields="id, title")
assert isinstance(search_results, list)
assert len(search_results) > 0
look = search_results[0]
assert isinstance(look.id, int)
assert look.id > 0
assert "SDK" in look.title
assert look.description is None
def test_search_look_and_run(sdk: mtds.Looker40SDK):
"""run_look() should return CSV and JSON
CSV will use column descriptions
JSON will use column names
JSON_LABEL will use column descriptions
"""
search_results = sdk.search_looks(title="An SDK Look", fields="id, title")
assert isinstance(search_results, list)
assert len(search_results) > 0
look = search_results[0]
assert isinstance(look.id, int)
assert look.id > 0
assert "SDK" in look.title
assert look.description is None
actual = sdk.run_look(look_id=look.id, result_format="csv")
assert "Dashboard Count" in actual
assert "Dashboard ID" in actual
actual = sdk.run_look(look_id=look.id, result_format="json")
assert "dashboard.count" in actual
assert "dashboard.id" in actual
actual = sdk.run_look(look_id=look.id, result_format="json_label")
assert "Dashboard Count" in actual
assert "Dashboard ID" in actual
def create_query_request(q, limit: Optional[str] = None) -> ml.WriteQuery:
return ml.WriteQuery(
model=q.get("model"),
view=q.get("view"),
fields=q.get("fields"),
pivots=q.get("pivots"),
fill_fields=q.get("fill_fields"),
filters=q.get("filters"),
filter_expression=q.get("filter_expressions"),
sorts=q.get("sorts"),
limit=q.get("limit") or limit,
column_limit=q.get("column_limit"),
total=q.get("total"),
row_total=q.get("row_total"),
subtotals=q.get("subtotal"),
runtime=q.get("runtime"),
vis_config=q.get("vis_config"),
filter_config=q.get("filter_config"),
visible_ui_sections=q.get("visible_ui_sections"),
dynamic_fields=q.get("dynamic_fields"),
client_id=q.get("client_id"),
query_timezone=q.get("query_timezone"),
)
@pytest.mark.usefixtures("remove_test_dashboards")
def test_crud_dashboard(sdk: mtds.Looker40SDK, queries_system_activity, dashboards):
"""Test creating, retrieving, updating and deleting a dashboard.
"""
qhash: Dict[Union[str, int], ml.Query] = {}
for idx, q in enumerate(queries_system_activity):
limit = "10"
request = create_query_request(q, limit)
key = q.get("id") or str(idx)
qhash[key] = sdk.create_query(request)
for d in dashboards:
dashboard = sdk.create_dashboard(
ml.WriteDashboard(
description=d.get("description"),
hidden=d.get("hidden"),
query_timezone=d.get("query_timezone"),
refresh_interval=d.get("refresh_interval"),
title=d.get("title"),
background_color=d.get("background_color"),
load_configuration=d.get("load_configuration"),
lookml_link_id=d.get("lookml_link_id"),
show_filters_bar=d.get("show_filters_bar"),
show_title=d.get("show_title"),
slug=d.get("slug"),
space_id=d.get("space_id") or sdk.me().home_space_id,
text_tile_text_color=d.get("text_tile_text_color"),
tile_background_color=d.get("tile_background_color"),
tile_text_color=d.get("tile_text_color"),
title_color=d.get("title_color"),
)
)
assert isinstance(dashboard, ml.Dashboard)
if d.get("background_color"):
assert d["background_color"] == dashboard.background_color
if d.get("text_tile_text_color"):
assert d["text_tile_text_color"] == dashboard.text_tile_text_color
if d.get("tile_background_color"):
assert d["tile_background_color"] == dashboard.tile_background_color
if d.get("tile_text_color"):
assert d["tile_text_color"] == dashboard.tile_text_color
if d.get("title_color"):
assert d["title_color"] == dashboard.title_color
# Update dashboard
assert isinstance(dashboard.id, str)
update_response = sdk.update_dashboard(
dashboard.id, ml.WriteDashboard(deleted=True)
)
assert update_response.deleted
assert update_response.title == dashboard.title
dashboard = sdk.update_dashboard(dashboard.id, ml.WriteDashboard(deleted=False))
assert isinstance(dashboard.id, str)
assert not dashboard.deleted
if d.get("filters"):
for f in d["filters"]:
filter = sdk.create_dashboard_filter(
ml.WriteCreateDashboardFilter(
dashboard_id=dashboard.id,
name=f.get("name"),
title=f.get("title"),
type=f.get("type"),
default_value=f.get("default_value"),
model=f.get("model"),
explore=f.get("explore"),
dimension=f.get("dimension"),
row=f.get("row"),
listens_to_filters=f.get("listens_to_filters"),
allow_multiple_values=f.get("allow_multiple_values"),
required=f.get("required"),
)
)
assert isinstance(filter, ml.DashboardFilter)
assert filter.name == f.get("name")
assert filter.title == f.get("title")
assert filter.type == f.get("type")
assert filter.default_value == f.get("default_value")
assert filter.model == f.get("model")
assert filter.explore == f.get("explore")
assert filter.dimension == f.get("dimension")
assert filter.row == f.get("row")
assert filter.allow_multiple_values == f.get(
"allow_multiple_values", False
)
assert filter.required == f.get("required", False)
if d.get("tiles"):
for t in d["tiles"]:
tile = sdk.create_dashboard_element(
ml.WriteDashboardElement(
body_text=t.get("body_text"),
dashboard_id=dashboard.id,
look=t.get("look"),
look_id=t.get("look_id"),
merge_result_id=t.get("merge_result_id"),
note_display=t.get("note_display"),
note_state=t.get("note_state"),
note_text=t.get("note_text"),
query=t.get("query"),
query_id=get_query_id(qhash, t.get("query_id")),
refresh_interval=t.get("refresh_interval"),
subtitle_text=t.get("subtitle_text"),
title=t.get("title"),
title_hidden=t.get("title_hidden"),
type=t.get("type"),
)
)
assert isinstance(tile, ml.DashboardElement)
assert tile.dashboard_id == dashboard.id
assert tile.title == t.get("title")
assert tile.type == t.get("type")
def get_query_id(
qhash: Dict[Union[str, int], ml.Query], id: Union[str, int]
) -> Optional[int]:
if isinstance(id, str) and id.startswith("#"):
id = id[1:]
# if id is invalid, default to first query. test data is bad
query = qhash.get(id) or list(qhash.values())[0]
query_id = query.id
elif (isinstance(id, str) and id.isdigit()) or isinstance(id, int):
query_id = int(id)
else:
query_id = None
return query_id
|
the-stack_0_17314 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.conf.urls import include, url
# 用户自定义 urlconf
urlpatterns_custom = [
url(r"^", include("gcloud.core.urls")),
url(r"^", include("gcloud.resources.urls")),
url(r"^apigw/", include("gcloud.apigw.urls")),
url(r"^common_template/", include("gcloud.commons.template.urls")),
url(r"^template/", include("gcloud.tasktmpl3.urls")),
url(r"^taskflow/", include("gcloud.taskflow3.urls")),
url(r"^appmaker/", include("gcloud.contrib.appmaker.urls")),
url(r"^develop/", include("gcloud.contrib.develop.urls")),
url(r"^pipeline/", include("pipeline_plugins.base.urls")),
url(r"^pipeline/", include("pipeline_plugins.components.urls")),
url(r"^pipeline/", include("pipeline_plugins.variables.urls")),
url(r"^analysis/", include("gcloud.contrib.analysis.urls")),
url(r"^periodictask/", include("gcloud.periodictask.urls")),
url(r"^weixin/", include("weixin.urls")),
url(r"^weixin/login/", include("weixin.core.urls")),
url(r"^admin/", include("gcloud.contrib.admin.urls")),
]
|
the-stack_0_17315 | from google.cloud import bigquery, storage
import argparse
from abc import ABCMeta
import yaml
import os
from ga_bq_pipeline.logger import Logger
from pathlib import Path
GOOGLE_APP_CREDENTIALS_ENV_NAME = 'GOOGLE_APPLICATION_CREDENTIALS'
GOOGLE_CREDENTIALS_PATH = '/google-keys/'
ROOT = str(Path(os.path.dirname(os.path.abspath(__file__))).parent.parent)
class ETL(metaclass=ABCMeta):
"""
Skeleton of a generic ETL pipeline.
This class take care of parsing and loading environment and arguments
A concrete ETL MUST inherit from this class and implements the 3 abstract
methods
def pre_execution_cleansing(self):
pass
def pipeline(self)
pass
def post_execution_cleanup(self):
pass
This methods are called by the execute() method and they run in sequence.
"""
def __init__(self, app_name, conf_file_path, args_file_name, logger_name, env_name=None):
"""
Configure the ETL class
:param app_name: application name used for logging references
:param conf_file_path: environment configuration directory path
:param args_file_name: arguments definition file path
:param logger_name: logger name
"""
self.__get_arguments(args_file_name)
# configure the logging
self._logger = Logger(app_name, logger_name)
env_name = self.__args.get('environment', None) if env_name is None else env_name
# get the environment variables from env configuration file
self.__get_env_vars(conf_file_path, env_name)
prefix = ROOT if env_name in ['local', 'local-dev'] else ''
# Google key credentials file path
if not os.environ.get(GOOGLE_APP_CREDENTIALS_ENV_NAME):
os.environ[GOOGLE_APP_CREDENTIALS_ENV_NAME] = prefix + GOOGLE_CREDENTIALS_PATH + self.env['service_account']
@property
def logger(self):
"""
Get the logger
:return: logger
"""
return self._logger
@property
def args(self):
"""
Get the arguments
:return: arguments
"""
return self.__args
@property
def env(self):
"""
Get the environment
:return: environment variables
"""
return self.__env
@property
def service_account(self):
"""
Get the Service Account
:return: Service Account File Location
"""
return os.environ.get(GOOGLE_APP_CREDENTIALS_ENV_NAME)
@property
def bq_client(self):
"""
Creates a BigQuery Client
:return: BigQuery Client
"""
return bigquery.Client()
@property
def gs_client(self):
"""
Creates a Cloud Storage Client
:return: Cloud Storage Client
"""
return storage.Client()
@property
def bigquery(self):
"""
Get BigQuery properties
"""
return self.env['bigquery']
@property
def storage(self):
"""
Get BigQuery properties
"""
return self.env['storage']
def __get_arguments(self, args_file_name):
"""
Get all arguments from the arg configuration file and parse them.
:param args_file_name: arguments definition file path
"""
if args_file_name is None:
self.__args = {}
return
try:
with open(args_file_name) as args_file:
args_data = yaml.load(args_file.read(), Loader=yaml.FullLoader)
except IOError as ex:
self.logger.critical('Fail to read configuration file: {0}'.format(ex))
return
try:
description = args_data['description']
except KeyError:
print("Argument description is required.")
return
parser = argparse.ArgumentParser(description=description)
try:
args = args_data['args']
except KeyError:
print("No arguments is found!")
return
for arg in args:
try:
short = args[arg]['short']
except KeyError:
print("Short name is required for an argument!")
return
arg_required = args[arg].get('required', False)
arg_choices = args[arg].get('choices', None)
arg_help = args[arg].get('help', None)
arg_type = int if args[arg].get('type', None) == 'int' else None
parser.add_argument(
'-{0}'.format(short),
'--{0}'.format(arg),
required=arg_required,
help=arg_help,
choices=arg_choices,
type=arg_type
)
self.__args = vars(parser.parse_args())
def __get_env_vars(self, env_path, env_name):
"""
Get the environment variables from env configuration file
:param env_path: environment configuration directory path
:param env_name: environment name
"""
conf_file_name = '{env_path}/{env_name}.yaml'.format(
env_path=env_path,
env_name=env_name
)
try:
with open(conf_file_name) as conf:
env = yaml.load(conf.read(), Loader=yaml.FullLoader)
except IOError as ex:
self.logger.critical('Fail to read environment variables: {0}'.format(ex))
return
self.__env = env
|
the-stack_0_17316 | from typing import NamedTuple
from src.message import Message
from src.lib import get_by_path
color = {
4293271831: "red",
4293467747: "pink",
4294278144: "orange",
4294953512: "yellow",
4280150454: "green",
4278248959: "water",
4280191205: "blue"
}
class MessageRendererTuple(NamedTuple):
message: str
authorName: str
authorExternalChannelId: str
timestampUsec: str
timestampText: str
message_type: str
purchaseAmountText: str
bodyBackgroundColor: str
id: str
class MessageRenderer:
def __init__(self, item: dict) -> None:
conv_c = {
"liveChatTextMessageRenderer": LiveChatText,
"liveChatPaidMessageRenderer": LiveChatPaid,
"liveChatMembershipItemRenderer": LiveChatMembership,
"liveChatViewerEngagementMessageRenderer": LiveChatViewerEngagementMessage,
"liveChatPaidStickerRenderer": LiveChatPaidSticker,
}
self.item = item
renderer_type = list(item.keys())[0]
if "showItemEndpoint" in list(item[renderer_type].keys()):
massage = f"[warn] showItemEndpoint [id] {item[list(item.keys())[0]]['id']}"
raise KeyError(massage)
else:
input = item[renderer_type]
self.renderer = conv_c[renderer_type](input)
class LiveChatText(MessageRendererTuple):
def __new__(cls, input: dict) -> MessageRendererTuple:
params = [
str(Message(input["message"]["runs"])),
input["authorName"]["simpleText"],
input["authorExternalChannelId"],
input["timestampUsec"],
input["timestampText"]["simpleText"],
"LiveChatText",
"",
"",
input["id"]
]
return super().__new__(cls, *params)
class LiveChatPaid(MessageRendererTuple):
def __new__(cls, input: dict) -> MessageRendererTuple:
message = str(Message(input["message"]["runs"])
) if "message" in input.keys() else ""
params = [
message,
input["authorName"]["simpleText"],
input["authorExternalChannelId"],
input["timestampUsec"],
input["timestampText"]["simpleText"],
"LiveChatPaid",
input["purchaseAmountText"]["simpleText"],
color[input["bodyBackgroundColor"]],
input["id"]
]
return super().__new__(cls, *params)
class LiveChatMembership(MessageRendererTuple):
def __new__(cls, input: dict) -> MessageRendererTuple:
params = [
"".join(x["text"] for x in input["headerSubtext"]["runs"]),
input["authorName"]["simpleText"],
input["authorExternalChannelId"],
input["timestampUsec"],
input["timestampText"]["simpleText"],
"LiveChatMembership",
"",
"",
input["id"]
]
return super().__new__(cls, *params)
class LiveChatPaidSticker(MessageRendererTuple):
def __new__(cls, input: dict) -> MessageRendererTuple:
message = get_by_path(
input,
[
"sticker",
"accessibility",
"accessibilityData",
"label"
]
)
params = [
message,
input["authorName"]["simpleText"],
input["authorExternalChannelId"],
input["timestampUsec"],
input["timestampText"]["simpleText"],
"LiveChatPaidSticker",
input["purchaseAmountText"]["simpleText"],
color[input["moneyChipBackgroundColor"]],
input["id"]
]
return super().__new__(cls, *params)
class LiveChatViewerEngagementMessage(MessageRendererTuple):
def __new__(cls, input: dict) -> MessageRendererTuple:
params = [
"".join(x["text"] for x in input["message"]["runs"]),
"YOUTUBE",
"",
input["timestampUsec"],
"0:00",
"LiveChatViewerEngagementMessage",
"",
"",
input["id"]
]
return super().__new__(cls, *params)
|
the-stack_0_17318 | from sim_common import *
ass_cards = MyCards([
Assist("劇場乳神", 1207, 213,
643, 312, 406, 438, 289,
skill=Skill(buffs=[Effect(Scope.my_team, Ability.dex, 0.2)], debuffs=[Effect(Scope.foes, Endurance.foe, 0.15)],
adj_buffs=[], )
),
Assist("奧娜", 1202, 238,
412, 232, 401, 663, 412,
skill=Skill(
buffs=[Effect(Scope.my_self, Ability.counter_rate, 0.3), Effect(Scope.my_self, Ability.guard_rate, 0.3)],
debuffs=[Effect(Scope.foes, Endurance.phy, 0.15), Effect(Scope.foes, Endurance.mag, 0.15)],
adj_buffs=[], )
),
Assist("情人埃伊娜", 946, 196,
299, 180, 304, 434, 315,
skill=Skill(buffs=[Effect(Scope.my_team, Damage.earth, 0.1), Effect(Scope.my_team, Damage.light, 0.1)],
debuffs=[Effect(Scope.foes, Endurance.earth, 0.05), Effect(Scope.foes, Endurance.light, 0.05)],
adj_buffs=[], )
),
Assist("新娘乳神", 1015, 158,
221, 168, 266, 249, 486,
skill=Skill(buffs=[Effect(Scope.my_team, Ability.mag, 0.15), Effect(Scope.my_team, Ability.crit_rate, 0.08)],
debuffs=[], adj_buffs=[], )
),
Assist("溫泉乳神", 942, 278,
293, 244, 406, 329, 591,
skill=Skill(buffs=[], debuffs=[Effect(Scope.foes, Ability.str, 0.15)], adj_buffs=[], )
),
Assist("洋裝埃伊娜", 1197, 215,
265, 227, 391, 393, 652,
skill=Skill(buffs=[Effect(Scope.my_team, Endurance.foes, 0.1), Effect(Scope.my_team, Endurance.phy, 0.15)],
debuffs=[], adj_buffs=[], )
),
Assist("伯爵希兒", 1188, 138,
323, 268, 297, 206, 389,
skill=Skill(buffs=[Effect(Scope.my_self, Ability.energy_bar, 0.66)], debuffs=[], adj_buffs=[], )
),
])
adv_cards = MyCards([
Adventurer("折紙", 4045, 414,
423, 548, 737, 929, 2045,
skills=[Skill(Scope.foe, Power.high, Damage.light, Attack.mag, temp_boost=True, mp=34, buffs=[],
debuffs=[Effect(Scope.foe, Endurance.light, 0.35, 4)], adj_buffs=[], ),
Skill(Scope.foe, Power.mid, Damage.light, Attack.mag, mp=30,
buffs=[Effect(Scope.my_self, Ability.mag, 0.8, 4)], debuffs=[], adj_buffs=[], ),
Skill(Scope.foes, Power.high, Damage.light, Attack.mag, temp_boost=True, mp=29, buffs=[],
debuffs=[], adj_buffs=[], ),
Skill(Scope.foe, Power.ultra, Damage.light, Attack.mag, temp_boost=True, is_special=True,
buffs=[], debuffs=[], adj_buffs=[], )],
passive_skills=[Skill(buffs=[Effect(Scope.my_self, SuccessUp.pene, 0.2),
Effect(Scope.my_self, Endurance.dark, 0.35)])],
killer=Killer.fairy,
),
Adventurer("情人艾斯", 4017, 421,
459, 560, 744, 902, 1802,
skills=[
Skill(Scope.foe, Power.high, Damage.light, Attack.mag, temp_boost=True, mp=41, buffs=[], debuffs=[],
adj_buffs=[Effect(Scope.foes, AdjBuff.clear_buff, 0, 0, Ability.mag)], ),
Skill(Scope.foe, Power.high, Damage.light, Attack.mag, temp_boost=True, mp=44,
buffs=[Effect(Scope.my_team, Damage.light, 0.2, 4)], debuffs=[], adj_buffs=[], ),
Skill(Scope.foe, Power.low, Damage.light, Attack.mag, mp=27,
buffs=[Effect(Scope.my_self, Ability.mag, 0.75, 4)], debuffs=[], adj_buffs=[], ),
Skill(Scope.foe, Power.ultra, Damage.light, Attack.mag, temp_boost=True, is_special=True, buffs=[],
debuffs=[], adj_buffs=[], )],
passive_skills=[Skill(buffs=[Effect(Scope.my_self, SuccessUp.pene, 0.2),
Effect(Scope.my_self, Endurance.dark, 0.35)])],
killer=Killer.rock,
),
Adventurer("春姬", 4140, 438,
1040, 514, 848, 838, 1647,
skills=[Skill(Scope.foes, Power.mid, Damage.fire, Attack.mag, mp=34, buffs=[],
debuffs=[Effect(Scope.foes, Ability.str, 0.4, 3), Effect(Scope.foes, Ability.mag, 0.4, 3)],
adj_buffs=[], ),
Skill(mp=12, buffs=[Effect(Scope.my_self, Recover.mp_imm, 0.15),
Effect(Scope.my_team, Ability.counter_rate, 0.3, 3),
Effect(Scope.my_team, Ability.pene_rate, 0.3, 3)], debuffs=[],
adj_buffs=[], ),
Skill(mp=141, buffs=[Effect(Scope.my_team, Recover.hp_imm, 0.3)], debuffs=[],
adj_buffs=[Effect(Scope.my_team, AdjBuff.extend_buff, 2, 0),
Effect(Scope.foes, AdjBuff.extend_debuff, 2, 0)], ),
Skill(is_special=True, buffs=[Effect(Scope.my_team, Recover.hp_imm, 0.8),
Effect(Scope.my_team, Recover.hp_turn, 0.4, 3),
Effect(Scope.my_team, Ability.str, 1.0, 3),
Effect(Scope.my_team, Ability.mag, 1.0, 3)], debuffs=[],
adj_buffs=[], )],
passive_skills=[Skill(buffs=[Effect(Scope.my_self, SuccessUp.guard, 0.3),
Effect(Scope.my_self, Endurance.wind, 0.35),
Effect(Scope.my_self, Ability.mag, 0.25),
Effect(Scope.my_self, Ability.agi, 0.25),
Effect(Scope.my_self, Ability.dex, 0.25),
Effect(Scope.my_self, Recover.hp_turn, 0.04),
Effect(Scope.my_self, Recover.mp_turn, 0.04)])],
counter_hp=True,
),
Adventurer("偶像莉涅", 2510 + 1084, 312 + 87,
721 + 201, 212 + 69, 413 + 81, 762 + 284, 727 + 304,
skills=[
Skill(Scope.foes, Power.super, Damage.light, Attack.phy, mp=59,
adj_buffs=[Effect(Scope.foes, AdjBuff.clear_buff, 0, 0, Ability.str),
Effect(Scope.foes, AdjBuff.clear_buff, 0, 0, Ability.mag)]),
Skill(is_fast=True, p=45, buffs=[Effect(Scope.my_team, Ability.energy_bar, 0.33, 4),
Effect(Scope.my_team, Ability.counter_rate, 0.20, 4),
Effect(Scope.my_team, Ability.crit_rate, 0.20, 4),
Effect(Scope.my_team, Ability.pene_rate, 0.20, 4)]),
Skill(Scope.foe, Power.high, Damage.light, Attack.phy, mp=25,
boost_by_buff=[Effect(Scope.my_self, Ability.crit_rate, 0.40)]),
]),
Adventurer("無人島春姬", 2103, 313,
209, 183, 397, 392, 849,
skills=[Skill(mp=52, buffs=[Effect(Scope.my_self, Ability.mag, 0.6, 4),
Effect(Scope.my_self, Ability.dex, 0.6, 4),
Effect(Scope.my_self, Damage.light, 0.6, 4),
Effect(Scope.my_team, Ability.mag, 0.3, 4),
Effect(Scope.my_team, Ability.dex, 0.3, 4),
Effect(Scope.my_team, Damage.light, 0.3, 4)], debuffs=[], adj_buffs=[], ),
Skill(mp=20, buffs=[], debuffs=[Effect(Scope.foe, Endurance.foe, 0.2, 4)],
adj_buffs=[Effect(Scope.foe, AdjBuff.clear_buff, 0, 0, Ability.str),
Effect(Scope.foe, AdjBuff.clear_buff, 0, 0, Ability.mag),
Effect(Scope.foe, AdjBuff.clear_buff, 0, 0, Ability.agi)], ),
Skill(Scope.foe, Power.super, Damage.light, Attack.mag,
boost_by_buff=[Effect(Scope.my_self, Ability.mag, 0.4)], mp=136,
buffs=[Effect(Scope.my_team, Recover.hp_turn, 0.2, 1)], debuffs=[], adj_buffs=[], ),
Skill(Scope.foe, Power.ultra, Damage.light, Attack.mag,
boost_by_buff=[Effect(Scope.my_self, Ability.mag, 0.8)], is_special=True,
buffs=[Effect(Scope.my_team, Recover.hp_turn, 0.4, 3),
Effect(Scope.my_team, Damage.light, 0.8, 3)], debuffs=[], adj_buffs=[], )],
passive_skills=[Skill(
buffs=[Effect(Scope.my_self, Recover.hp_turn, 0.08), Effect(Scope.my_self, Recover.mp_turn, 0.08),
Effect(Scope.my_self, SuccessUp.counter, 0.5),
Effect(Scope.my_self, Endurance.dark, 0.35)])],
killer=Killer.undead,
),
Adventurer("18", 2506, 224,
1387, 599, 601, 416, 981,
skills=[Skill(is_fast=True, mp=47, buffs=[Effect(Scope.my_team, Endurance.foes, 0.35, 3),
Effect(Scope.my_team, Endurance.foe, 0.35, 3)], debuffs=[],
adj_buffs=[], ),
Skill(Scope.foe, Power.high, Damage.earth, Attack.phy, temp_boost=True, mp=30, buffs=[],
debuffs=[], adj_buffs=[], ),
Skill(Scope.foes, Power.super, Damage.earth, Attack.phy, temp_boost=True, mp=69, buffs=[],
debuffs=[], adj_buffs=[Effect(Scope.foes, AdjBuff.shorten_buff, 1, 0)], ),
Skill(Scope.foes, Power.ultra, Damage.earth, Attack.phy, temp_boost=True, is_special=True,
buffs=[], debuffs=[], adj_buffs=[], )],
passive_skills=[Skill(buffs=[Effect(Scope.my_self, SuccessUp.guard, 0.3),
Effect(Scope.my_self, Endurance.thunder, 0.35),
Effect(Scope.my_self, Ability.str, 0.4),
Effect(Scope.my_self, Ability.end, 0.4)])],
killer=Killer.dragon,
),
])
boss_cards = MyCards([
Adventurer("九魔姬", 100000000, 0,
0, 100, 0, 0, 1000,
skills=[Skill(Scope.foes, Power.low, Damage.dark, Attack.mag)],
passive_skills=[Skill(buffs=[Effect(Scope.my_self, Endurance.fire, 0.1)])]
),
Adventurer("紅髮怪人", 100000000, 0,
100, 100, 0, 0, 0,
skills=[Skill(Scope.foes, Power.low, Damage.none, Attack.phy),
Skill(Scope.foes, Power.high, Damage.none, Attack.phy),
Skill(debuffs=[Effect(Scope.my_self, Endurance.mag, 0.7, 15)]),
Skill(adj_buffs=[Effect(Scope.my_self, AdjBuff.clear_debuff, 0, 0)]),
Skill(buffs=[Effect(Scope.my_self, Ability.str, 0.20, 3)]),
],
passive_skills=[Skill(debuffs=[Effect(Scope.my_self, Endurance.light, 0.7)])],
init_skill=Skill(debuffs=[Effect(Scope.my_self, Endurance.mag, 0.7, 15)], idx="init"),
),
])
ranker = Ranker()
boss1 = boss_cards.get("紅髮怪人")
enemy_team = Team(1, [boss1.set_steps([
[1, 1], # 1
[1, 1, 1], # 2
[1, 1, 1, 5], # 3
[1, 1, 2], # 4
[1, 1, 4], # 5
[1, 1, 1], # 6
[1, 1, 1, 5], # 7
[1, 1, 2, 3], # 8
[1, 1, 4], # 9
[1, 1, 1], # 10
[1, 1, 1, 5], # 11
[1, 1, 2, 3], # 12
[1, 1, 1], # 13
[1, 1, 1, 1], # 14
1, # 15
])
])
p1 = adv_cards.get("無人島春姬").set_assist(ass_cards.get("溫泉乳神"))
p2 = adv_cards.get("折紙").set_assist(ass_cards.get("奧娜"))
p3 = adv_cards.get("情人艾斯").set_assist(ass_cards.get("洋裝埃伊娜"))
p4 = adv_cards.get("偶像莉涅").set_one_shot().set_assist(ass_cards.get("新娘乳神"))
p5 = adv_cards.get("18").set_one_shot().set_assist(ass_cards.get("情人埃伊娜"))
p6 = adv_cards.get("春姬").set_assist(ass_cards.get("劇場乳神"))
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
my_team = Team(4, [p1.set_steps([1, 2, 3, 4, 3, 2, 3, 3, 3, 2, 3, 2, 4, 3, 3]),
p2.set_steps([2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 1, 1]),
p3.set_steps([3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 4, 1, 1]),
p4.set_steps([2]),
p5.set_steps([x, 1]),
p6.set_steps([x, x, 1, 3, 3, 1, 3, 2, 3, 1, 3, 2, 3, 3, 1]),
]
)
battle = BattleStage(15)
battle.set_player_team(my_team).set_enemy_team(enemy_team)
battle.run()
rank = ranker.add(battle)
ranker.report(rank=rank, detail=True)
# ranker.report(limit=1, detail=False)
# ranker.report(rank=rank, detail=False)
# ranker.report()
|
the-stack_0_17326 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='ScreenshotFormat',
version='1',
packages=setuptools.find_packages(),
author="Gomes Alexis",
author_email="[email protected]",
description="Python package to help create screenshot to upload on stores",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/AlexisGomes/ScreenshotFormat",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) |
the-stack_0_17327 | # -*- coding: utf-8 -*-
#
# MEOPAR MIDOSS project documentation Sphinx builder configuration file.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import datetime
# -- Project information -----------------------------------------------------
project = 'MEOPAR MIDOSS Project Docs'
author = (
'The MIDOSS Project Contributors, '
'the University of British Columbia, '
'and Dalhousie University')
copyright_years = (
"2018"
if datetime.date.today().year == 2018
else f"2018-{datetime.date.today():%Y}"
)
copyright = f"{copyright_years}, {author}"
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
]
intersphinx_mapping = {
"salishseanowcast": ("https://salishsea-nowcast.readthedocs.io/en/latest/", None),
}
# Private GitHub repositories that linkcheck will ignore
linkcheck_ignore = [
'https://github.com/MIDOSS/MIDOSS-MOHID-CODE',
'https://github.com/MIDOSS/MIDOSS-MOHID-grid',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/MEOPAR_favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
|
the-stack_0_17328 | import numpy as np
from sklearn.metrics import r2_score
from sklearn.datasets import load_svmlight_file
from sklearn.linear_model import Ridge
from train.build_model import *
np.random.seed(1337)
x, y = load_svmlight_file('data/reg_big.data')
x = np.asarray(x.todense())
tri, tei = split_testing_data_r(y)
xtr = x[tri]
ytr = y[tri]
xte = x[tei]
yte = y[tei]
alp = 1000
m = Ridge(alpha=alp)
m.fit(xtr, ytr)
r2_train = r2_score(ytr, m.predict(xtr))
r2_test = r2_score(yte, m.predict(xte))
print('Traing R2 Score: {0}'.format(np.round(r2_train, 5)))
print('Testing R2 Score: {0}'.format(np.round(r2_test, 5)))
|
the-stack_0_17330 | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import os
import random
from smarts.core.utils.logging import surpress_stdout
import subprocess
import time
from typing import List, Sequence
import numpy as np
from shapely.geometry import Polygon, box as shapely_box
from shapely.affinity import rotate as shapely_rotate
import traci.constants as tc
from traci.exceptions import FatalTraCIError, TraCIException
from smarts.core import gen_id
from .colors import SceneColors
from .coordinates import Heading, Pose
from .provider import ProviderState, ProviderTLS, ProviderTrafficLight
from .vehicle import VEHICLE_CONFIGS, VehicleState
# We need to import .utils.sumo before we can use traci
from .utils.sumo import SUMO_PATH, traci
from .utils import networking
class SumoTrafficSimulation:
"""
Args:
net_file:
path to sumo .net.xml file
headless:
False to run with `sumo-gui`. True to run with `sumo`
time_resolution:
SUMO simulation is descretized into steps of `time_resolution` seconds
WARNING:
Since our interface(TRACI) to SUMO is delayed by one simulation step,
setting a higher time resolution may lead to unexpected artifacts
"""
def __init__(
self,
headless=True,
time_resolution=0.1,
num_clients=1,
num_external_sumo_clients=0,
sumo_port=None,
auto_start=True,
endless_traffic=True,
debug=True,
):
self._log = logging.getLogger(self.__class__.__name__)
self._debug = debug
self._scenario = None
self._log_file = None
self._time_resolution = time_resolution
self._headless = headless
self._cumulative_sim_seconds = 0
self._non_sumo_vehicle_ids = set()
self._sumo_vehicle_ids = set()
self._is_setup = False
self._last_trigger_time = -1000000
self._num_dynamic_ids_used = 0
self._traci_conn = None
self._sumo_proc = None
self._num_clients = 1 + num_external_sumo_clients
self._sumo_port = sumo_port
self._auto_start = auto_start
self._endless_traffic = endless_traffic
self._to_be_teleported = dict()
self._reserved_areas = dict()
def __repr__(self):
return f"""SumoTrafficSim(
_scenario={repr(self._scenario)},
_time_resolution={self._time_resolution},
_headless={self._headless},
_cumulative_sim_seconds={self._cumulative_sim_seconds},
_non_sumo_vehicle_ids={self._non_sumo_vehicle_ids},
_sumo_vehicle_ids={self._sumo_vehicle_ids},
_is_setup={self._is_setup},
_last_trigger_time={self._last_trigger_time},
_num_dynamic_ids_used={self._num_dynamic_ids_used},
_traci_conn={repr(self._traci_conn)}
)"""
def __str__(self):
return repr(self)
def _initialize_traci_conn(self, num_retries=5):
# TODO: inline sumo or process pool
# the retries are to deal with port collisions
# since the way we start sumo here has a race condition on
# each spawned process claiming a port
for _ in range(num_retries):
self._close_traci_and_pipes()
sumo_port = self._sumo_port
if sumo_port is None:
sumo_port = networking.find_free_port()
sumo_binary = "sumo" if self._headless else "sumo-gui"
sumo_cmd = [
os.path.join(SUMO_PATH, "bin", sumo_binary),
"--remote-port=%s" % sumo_port,
*self._base_sumo_load_params(),
]
self._log.debug("Starting sumo process:\n\t %s", sumo_cmd)
self._sumo_proc = subprocess.Popen(
sumo_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
time.sleep(0.05) # give SUMO time to start
try:
with surpress_stdout():
self._traci_conn = traci.connect(
sumo_port,
numRetries=100,
proc=self._sumo_proc,
waitBetweenRetries=0.05,
) # SUMO must be ready within 5 seconds
try:
assert (
self._traci_conn.getVersion()[0] >= 20
), "TraCI API version must be >= 20 (SUMO 1.5.0)"
# We will retry since this is our first sumo command
except FatalTraCIError:
logging.debug("Connection closed. Retrying...")
self._close_traci_and_pipes()
continue
except ConnectionRefusedError:
logging.debug(
"Connection refused. Tried to connect to unpaired TraCI client."
)
self._close_traci_and_pipes()
continue
# It is mandatory to set order when using multiple clients.
self._traci_conn.setOrder(0)
break
try:
self._traci_conn.getVersion()
except Exception as e:
logging.error(
f"""Failed to initialize SUMO
Your scenario might not be configured correctly or
you were trying to initialize many SUMO instances at
once and we were not able to assign unique port
numbers to all SUMO processes.
Check {self._log_file} for hints"""
)
raise e
self._log.debug("Finished starting sumo process")
def _base_sumo_load_params(self):
load_params = [
"--num-clients=%d" % self._num_clients,
"--net-file=%s" % self._scenario.net_filepath,
"--quit-on-end",
"--log=%s" % self._log_file,
"--error-log=%s" % self._log_file,
"--no-step-log",
"--no-warnings=1",
"--seed=%s" % random.randint(0, 2147483648),
"--time-to-teleport=%s" % -1,
"--collision.check-junctions=true",
"--collision.action=none",
"--lanechange.duration=3.0",
# TODO: 1--lanechange.duration1 or 1--lateral-resolution`, in combination with `route_id`,
# causes lane change crashes as of SUMO 1.6.0.
# Controlling vehicles that have been added to the simulation with a route causes
# lane change related crashes.
# "--lateral-resolution=100", # smooth lane changes
"--step-length=%f" % self._time_resolution,
"--default.action-step-length=%f" % self._time_resolution,
"--begin=0", # start simulation at time=0
"--end=31536000", # keep the simulation running for a year
]
if self._auto_start:
load_params.append("--start")
if self._scenario.route_files_enabled:
load_params.append("--route-files={}".format(self._scenario.route_filepath))
return load_params
def setup(self, scenario) -> ProviderState:
self._log.debug("Setting up SumoTrafficSim %s" % self)
assert not self._is_setup, (
"Can't setup twice, %s, see teardown()" % self._is_setup
)
# restart sumo process only when map file changes
if self._scenario and self._scenario.net_file_hash == scenario.net_file_hash:
restart_sumo = False
else:
restart_sumo = True
self._scenario = scenario
self._log_file = scenario.unique_sumo_log_file()
if restart_sumo:
self._initialize_traci_conn()
else:
self._traci_conn.load(self._base_sumo_load_params())
assert self._traci_conn is not None, "No active traci conn"
self._traci_conn.simulation.subscribe(
[tc.VAR_DEPARTED_VEHICLES_IDS, tc.VAR_ARRIVED_VEHICLES_IDS]
)
for tls_id in self._traci_conn.trafficlight.getIDList():
self._traci_conn.trafficlight.subscribe(
tls_id, [tc.TL_RED_YELLOW_GREEN_STATE, tc.TL_CONTROLLED_LINKS]
)
# XXX: SUMO caches the previous subscription results. Calling `simulationStep`
# effectively flushes the results. We need to use epsilon instead of zero
# as zero will step according to a default (non-zero) step-size.
self.step({}, 1e-6, 0)
self._is_setup = True
return self._compute_provider_state()
def _close_traci_and_pipes(self):
if self._sumo_proc:
self._sumo_proc.stdin.close()
self._sumo_proc.stdout.close()
self._sumo_proc.stderr.close()
self._sumo_proc = None
if self._traci_conn:
self._traci_conn.close()
self._traci_conn = None
def teardown(self):
self._log.debug("Tearing down SUMO traffic sim %s" % self)
if not self._is_setup:
self._log.debug("Nothing to teardown")
return
assert self._is_setup
self._cumulative_sim_seconds = 0
self._non_sumo_vehicle_ids = set()
self._sumo_vehicle_ids = set()
self._is_setup = False
self._num_dynamic_ids_used = 0
self._to_be_teleported = dict()
self._reserved_areas = dict()
@property
def action_spaces(self):
# Unify interfaces with other providers
return {}
def reset(self):
# Unify interfaces with other providers
pass
def step(self, provider_actions, dt, elapsed_sim_time) -> ProviderState:
"""
Args:
dt: time (in seconds) to simulate during this simulation step
managed_vehicles: dict of {vehicle_id: (x, y, heading)}
!! The vehicle state should represent the state of the
!! vehicles at the start of the current simulation step
Returns:
ProviderState representing the state of the SUMO simulation
"""
# we tell SUMO to step through dt more seconds of the simulation
self._cumulative_sim_seconds += dt
self._traci_conn.simulationStep(self._cumulative_sim_seconds)
return self._compute_provider_state()
def sync(self, provider_state: ProviderState):
provider_vehicles = {v.vehicle_id: v for v in provider_state.vehicles}
external_vehicles = [v for v in provider_state.vehicles if v.source != "SUMO"]
external_vehicle_ids = {v.vehicle_id for v in external_vehicles}
# Represents current state
traffic_vehicle_states = self._traci_conn.vehicle.getAllSubscriptionResults()
traffic_vehicle_ids = set(traffic_vehicle_states)
# State / ownership changes
external_vehicles_that_have_left = (
self._non_sumo_vehicle_ids - external_vehicle_ids - traffic_vehicle_ids
)
external_vehicles_that_have_joined = (
external_vehicle_ids - self._non_sumo_vehicle_ids - traffic_vehicle_ids
)
vehicles_that_have_become_external = (
traffic_vehicle_ids & external_vehicle_ids - self._non_sumo_vehicle_ids
)
# XXX: They may have become internal because they've been relinquished or
# because they've been destroyed from a collision. Presently we're not
# differentiating and will take over as social vehicles regardless.
vehicles_that_have_become_internal = (
self._non_sumo_vehicle_ids - external_vehicle_ids
) & traffic_vehicle_ids
log = ""
if external_vehicles_that_have_left:
log += (
f"external_vehicles_that_have_left={external_vehicles_that_have_left}\n"
)
if external_vehicles_that_have_joined:
log += f"external_vehicles_that_have_joined={external_vehicles_that_have_joined}\n"
if vehicles_that_have_become_external:
log += f"vehicles_that_have_become_external={vehicles_that_have_become_external}\n"
if vehicles_that_have_become_internal:
log += f"vehicles_that_have_become_internal={vehicles_that_have_become_internal}\n"
if log:
self._log.debug(log)
for vehicle_id in external_vehicles_that_have_left:
self._log.debug("Non SUMO vehicle %s left simulation", vehicle_id)
self._non_sumo_vehicle_ids.remove(vehicle_id)
self._traci_conn.vehicle.remove(vehicle_id)
for vehicle_id in external_vehicles_that_have_joined:
dimensions = provider_vehicles[vehicle_id].dimensions
self._create_vehicle(vehicle_id, dimensions)
# update the state of all current managed vehicles
for vehicle_id in self._non_sumo_vehicle_ids:
provider_vehicle = provider_vehicles[vehicle_id]
pos, sumo_heading = provider_vehicle.pose.as_sumo(
provider_vehicle.dimensions.length, Heading(0)
)
# See https://sumo.dlr.de/docs/TraCI/Change_Vehicle_State.html#move_to_xy_0xb4
# for flag values
try:
self._move_vehicle(
provider_vehicle.vehicle_id,
pos,
sumo_heading,
provider_vehicle.speed,
)
except TraCIException as e:
# Likely as a result of https://github.com/eclipse/sumo/issues/3993
# the vehicle got removed because we skipped a moveToXY call between
# internal stepSimulations, so we add the vehicle back here.
self._log.warning(
"Attempted to (TraCI) SUMO.moveToXY(...) on missing "
f"vehicle(id={vehicle_id})"
)
self._create_vehicle(vehicle_id, provider_vehicle.dimensions)
self._move_vehicle(
provider_vehicle.vehicle_id,
pos,
sumo_heading,
provider_vehicle.speed,
)
for vehicle_id in vehicles_that_have_become_external:
self._traci_conn.vehicle.setColor(
vehicle_id, SumoTrafficSimulation._social_agent_vehicle_color()
)
self._non_sumo_vehicle_ids.add(vehicle_id)
for vehicle_id in vehicles_that_have_become_internal:
self._traci_conn.vehicle.setColor(
vehicle_id, SumoTrafficSimulation._social_vehicle_color()
)
self._non_sumo_vehicle_ids.remove(vehicle_id)
# Let sumo take over speed again
self._traci_conn.vehicle.setSpeed(vehicle_id, -1)
if self._endless_traffic:
self._reroute_vehicles(traffic_vehicle_states)
self._teleport_exited_vehicles()
@staticmethod
def _ego_agent_vehicle_color():
return np.array(SceneColors.Agent.value[:3]) * 255
@staticmethod
def _social_agent_vehicle_color():
return np.array(SceneColors.SocialAgent.value[:3]) * 255
@staticmethod
def _social_vehicle_color():
return np.array(SceneColors.SocialVehicle.value[:3]) * 255
def _move_vehicle(self, vehicle_id, position, heading, speed):
x, y, _ = position
self._traci_conn.vehicle.moveToXY(
vehID=vehicle_id,
edgeID="", # let sumo choose the edge
lane=-1, # let sumo choose the lane
x=x,
y=y,
angle=heading, # only used for visualizing in sumo-gui
keepRoute=0,
)
self._traci_conn.vehicle.setSpeed(vehicle_id, speed)
def _create_vehicle(self, vehicle_id, dimensions):
assert (
type(vehicle_id) == str
), f"SUMO expects string ids: {vehicle_id} is a {type(vehicle_id)}"
self._log.debug("Non SUMO vehicle %s joined simulation", vehicle_id)
self._non_sumo_vehicle_ids.add(vehicle_id)
self._traci_conn.vehicle.add(
vehID=vehicle_id,
routeID="", # we don't care which route this vehicle is on
)
# TODO: Vehicle Id should not be using prefixes this way
if vehicle_id.startswith("social-agent"):
# This is based on ID convention
vehicle_color = SumoTrafficSimulation._social_agent_vehicle_color()
else:
vehicle_color = SumoTrafficSimulation._ego_agent_vehicle_color()
self._traci_conn.vehicle.setColor(vehicle_id, vehicle_color)
# Directly below are two of the main factors that affect vehicle secure gap for
# purposes of determining the safety gaps that SUMO vehicles will abide by. The
# remaining large factor is vehicle speed.
# See:
# http://sumo-user-mailing-list.90755.n8.nabble.com/sumo-user-Questions-on-SUMO-Built-In-Functions-getSecureGap-amp-brakeGap-td3254.html
# Set the controlled vehicle's time headway in seconds
self._traci_conn.vehicle.setTau(vehicle_id, 4)
# Set the controlled vehicle's maximum natural deceleration in m/s
self._traci_conn.vehicle.setDecel(vehicle_id, 6)
# setup the vehicle size
self._traci_conn.vehicle.setLength(vehicle_id, dimensions.length)
self._traci_conn.vehicle.setWidth(vehicle_id, dimensions.width)
self._traci_conn.vehicle.setHeight(vehicle_id, dimensions.height)
def _compute_provider_state(self) -> ProviderState:
return ProviderState(
vehicles=self._compute_traffic_vehicles(),
traffic_light_systems=self._compute_traffic_lights(),
)
def _compute_traffic_vehicles(self) -> List[VehicleState]:
sub_results = self._traci_conn.simulation.getSubscriptionResults()
if sub_results is None or sub_results == {}:
return {}
# New social vehicles that have entered the map
newly_departed_sumo_traffic = [
vehicle_id
for vehicle_id in sub_results[tc.VAR_DEPARTED_VEHICLES_IDS]
if vehicle_id not in self._non_sumo_vehicle_ids
]
reserved_areas = [position for position in self._reserved_areas.values()]
for vehicle_id in newly_departed_sumo_traffic:
other_vehicle_shape = self._shape_of_vehicle(vehicle_id)
violates_reserved_area = False
for reserved_area in reserved_areas:
if reserved_area.intersects(other_vehicle_shape):
violates_reserved_area = True
break
if violates_reserved_area:
self._traci_conn.vehicle.remove(vehicle_id)
continue
self._log.debug("SUMO vehicle %s entered simulation", vehicle_id)
self._traci_conn.vehicle.subscribe(
vehicle_id,
[
tc.VAR_POSITION,
tc.VAR_ANGLE,
tc.VAR_SPEED,
tc.VAR_VEHICLECLASS,
tc.VAR_ROUTE_INDEX,
tc.VAR_EDGES,
tc.VAR_TYPE,
],
)
# Non-sumo vehicles will show up the step after the sync where the non-sumo vehicle is
# added.
newly_departed_non_sumo_vehicles = [
vehicle_id
for vehicle_id in sub_results[tc.VAR_DEPARTED_VEHICLES_IDS]
if vehicle_id not in newly_departed_sumo_traffic
]
for vehicle_id in newly_departed_non_sumo_vehicles:
if vehicle_id in self._reserved_areas:
del self._reserved_areas[vehicle_id]
sumo_vehicle_state = self._traci_conn.vehicle.getAllSubscriptionResults()
self._sumo_vehicle_ids = (
set(sumo_vehicle_state.keys()) - self._non_sumo_vehicle_ids
)
provider_vehicles = []
# batched conversion of positions to numpy arrays
front_bumper_positions = np.array(
[
sumo_vehicle[tc.VAR_POSITION]
for sumo_vehicle in sumo_vehicle_state.values()
]
).reshape(-1, 2)
for i, (sumo_id, sumo_vehicle) in enumerate(sumo_vehicle_state.items()):
# XXX: We can safely rely on iteration order over dictionaries being
# stable on py3.7.
# See: https://www.python.org/downloads/release/python-370/
# "The insertion-order preservation nature of dict objects is now an
# official part of the Python language spec."
front_bumper_pos = front_bumper_positions[i]
heading = Heading.from_sumo(sumo_vehicle[tc.VAR_ANGLE])
speed = sumo_vehicle[tc.VAR_SPEED]
vehicle_type = sumo_vehicle[tc.VAR_VEHICLECLASS]
dimensions = VEHICLE_CONFIGS[vehicle_type].dimensions
provider_vehicles.append(
VehicleState(
# XXX: In the case of the SUMO traffic provider, the vehicle ID is
# the sumo ID is the actor ID.
vehicle_id=sumo_id,
vehicle_type=vehicle_type,
pose=Pose.from_front_bumper(
front_bumper_pos, heading, dimensions.length
),
dimensions=dimensions,
speed=speed,
source="SUMO",
)
)
return provider_vehicles
def _teleport_exited_vehicles(self):
sub_results = self._traci_conn.simulation.getSubscriptionResults()
if not sub_results:
return
exited_sumo_traffic = [
vehicle_id
for vehicle_id in sub_results[tc.VAR_ARRIVED_VEHICLES_IDS]
if vehicle_id not in self._non_sumo_vehicle_ids
]
for v_id in exited_sumo_traffic:
if v_id in self._to_be_teleported:
route = self._to_be_teleported[v_id]["route"]
type_id = self._to_be_teleported[v_id]["type_id"]
self._teleport_vehicle(v_id, route, 0, type_id)
def _teleport_vehicle(self, vehicle_id, route, lane_offset, type_id):
self._log.debug(
f"Teleporting {vehicle_id} to lane_offset={lane_offset} route={route}"
)
spawn_edge = self._scenario.road_network.graph.getEdge(route[0])
lane_index = random.randint(0, len(spawn_edge.getLanes()) - 1)
self._emit_vehicle_by_route(vehicle_id, route, lane_index, lane_offset, type_id)
def _reroute_vehicles(self, vehicle_states):
for vehicle_id, state in vehicle_states.items():
if vehicle_id not in self._sumo_vehicle_ids:
continue
route_index = state[tc.VAR_ROUTE_INDEX]
route_edges = state[tc.VAR_EDGES]
type_id = state[tc.VAR_TYPE]
if route_index != len(route_edges) - 1:
# The vehicle is not in the last route edge.
continue
# Check if these edges forms a loop.
from_edge = self._scenario.road_network.graph.getEdge(route_edges[-1])
to_edge = self._scenario.road_network.graph.getEdge(route_edges[0])
next_edges = from_edge.getOutgoing().keys()
if to_edge not in next_edges:
# Reroute only if it's loop, otherwise, teleport the vehicle.
self._to_be_teleported[vehicle_id] = {
"route": route_edges,
"type_id": type_id,
}
continue
# The first edge in the list has to be the one that the vehicle
# is in at the moment, which is the last edge in current route_edges.
new_route_edges = route_edges[-1:] + route_edges
self._traci_conn.vehicle.setRoute(vehicle_id, new_route_edges)
def _compute_traffic_lights(self) -> List[ProviderTLS]:
"""TraCI will automatically generate TLS programs if none was specified
according to the net/program. To support this we opt to use TraCI instead
of the sumolib interface for TLS support.
"""
sub_results = self._traci_conn.trafficlight.getSubscriptionResults(None)
tlss = []
if not sub_results:
return tlss
for tls_id in sub_results:
light_states = sub_results[tls_id][tc.TL_RED_YELLOW_GREEN_STATE]
links = sub_results[tls_id][tc.TL_CONTROLLED_LINKS]
traffic_lights = []
for link, state in zip(links, light_states):
lane_start, lane_end, lane_via = [
self._scenario.road_network.lane_by_id(lane) for lane in link[0]
]
traffic_lights.append(
ProviderTrafficLight(
lane_in=lane_start,
lane_via=lane_via,
lane_out=lane_end,
state=state,
)
)
tlss.append(ProviderTLS(tls_id, traffic_lights))
return tlss
def _unique_id(self):
route_id = "hiway_id_%s" % self._num_dynamic_ids_used
self._num_dynamic_ids_used += 1
return route_id
def vehicle_route(self, vehicle_id) -> Sequence[str]:
return self._traci_conn.vehicle.getRoute(vehicle_id)
def reserve_traffic_location_for_vehicle(
self, vehicle_id: str, reserved_location: Polygon,
):
"""Reserve an area around a location where vehicles cannot spawn until a given vehicle
is added.
Args:
vehicle_id: The vehicle to wait for.
reserved_location: The space the vehicle takes up.
"""
self._reserved_areas[vehicle_id] = reserved_location
def remove_traffic_vehicle(self, vehicle_id: str):
self._traci_conn.vehicle.remove(vehicle_id)
self._sumo_vehicle_ids.remove(vehicle_id)
def _shape_of_vehicle(self, vehicle_id):
p = self._traci_conn.vehicle.getPosition(vehicle_id)
length = self._traci_conn.vehicle.getLength(vehicle_id)
width = self._traci_conn.vehicle.getWidth(vehicle_id)
heading = Heading.from_sumo(self._traci_conn.vehicle.getAngle(vehicle_id))
poly = shapely_box(p[0] - width * 0.5, p[1] - length, p[0] + width * 0.5, p[1],)
return shapely_rotate(poly, heading, use_radians=True)
def _emit_vehicle_by_route(
self, vehicle_id, route, lane_index, lane_offset, type_id="DEFAULT_VEHTYPE"
):
route_id = f"route-{gen_id()}"
self._traci_conn.route.add(route_id, route)
self._traci_conn.vehicle.add(
vehicle_id,
route_id,
typeID=type_id,
departPos=lane_offset,
departLane=lane_index,
)
return vehicle_id
def _emit_vehicle_near_position(self, position, vehicle_id=None) -> str:
wp = self._scenario.waypoints.closest_waypoint(position)
lane = self._scenario.road_network.lane_by_id(wp.lane_id)
offset_in_lane = self._scenario.road_network.offset_into_lane(
lane, tuple(wp.pos)
)
if not vehicle_id:
vehicle_id = self._unique_id()
# XXX: Do not give this a route or it will crash on `moveTo` calls
self._traci_conn.vehicle.add(
vehicle_id, "", departPos=offset_in_lane, departLane=wp.lane_index,
)
self._traci_conn.vehicle.moveToXY(
vehID=vehicle_id,
edgeID="", # let sumo choose the edge
lane=-1, # let sumo choose the lane
x=position[0],
y=position[1],
# angle=sumo_heading, # only used for visualizing in sumo-gui
keepRoute=0b000, # On lane
)
return vehicle_id
|
the-stack_0_17331 | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pickle
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
ak_Array = ak._v2.highlevel.Array
ak_Record = ak._v2.highlevel.Record
ak_to_buffers = ak._v2.operations.to_buffers
ak_from_buffers = ak._v2.operations.from_buffers
def test_numpyarray():
assert ak_from_buffers(*ak_to_buffers(ak_Array([1, 2, 3, 4, 5]))).tolist() == [
1,
2,
3,
4,
5,
]
assert pickle.loads(pickle.dumps(ak_Array([1, 2, 3, 4, 5]), -1)).tolist() == [
1,
2,
3,
4,
5,
]
def test_listoffsetarray():
assert ak_from_buffers(*ak_to_buffers([[1, 2, 3], [], [4, 5]])).tolist() == [
[1, 2, 3],
[],
[4, 5],
]
assert ak_from_buffers(
*ak_to_buffers(["one", "two", "three", "four", "five"])
).tolist() == ["one", "two", "three", "four", "five"]
assert ak_from_buffers(
*ak_to_buffers([["one", "two", "three"], [], ["four", "five"]])
).tolist() == [["one", "two", "three"], [], ["four", "five"]]
assert pickle.loads(
pickle.dumps(ak_Array([[1, 2, 3], [], [4, 5]]), -1)
).tolist() == [[1, 2, 3], [], [4, 5]]
def test_listarray():
listoffsetarray = ak_Array([[1, 2, 3], [], [4, 5]]).layout
listarray = ak._v2.contents.ListArray(
listoffsetarray.starts, listoffsetarray.stops, listoffsetarray.content
)
assert ak_from_buffers(*ak_to_buffers(listarray)).tolist() == [
[1, 2, 3],
[],
[4, 5],
]
assert pickle.loads(pickle.dumps(ak_Array(listarray), -1)).tolist() == [
[1, 2, 3],
[],
[4, 5],
]
def test_indexedoptionarray():
assert ak_from_buffers(*ak_to_buffers([1, 2, 3, None, None, 5])).tolist() == [
1,
2,
3,
None,
None,
5,
]
assert pickle.loads(
pickle.dumps(ak_Array([1, 2, 3, None, None, 5]), -1)
).tolist() == [1, 2, 3, None, None, 5]
def test_indexedarray():
content = ak_Array([0.0, 1.1, 2.2, 3.3, 4.4]).layout
index = ak._v2.index.Index64(np.array([3, 1, 1, 4, 2], dtype=np.int64))
indexedarray = ak._v2.contents.IndexedArray(index, content)
assert ak_from_buffers(*ak_to_buffers(indexedarray)).tolist() == [
3.3,
1.1,
1.1,
4.4,
2.2,
]
assert pickle.loads(pickle.dumps(ak_Array(indexedarray), -1)).tolist() == [
3.3,
1.1,
1.1,
4.4,
2.2,
]
def test_emptyarray():
assert ak_from_buffers(*ak_to_buffers([])).tolist() == []
assert ak_from_buffers(*ak_to_buffers([[], [], []])).tolist() == [[], [], []]
assert pickle.loads(pickle.dumps(ak_Array([]), -1)).tolist() == []
assert pickle.loads(pickle.dumps(ak_Array([[], [], []]), -1)).tolist() == [
[],
[],
[],
]
def test_bytemaskedarray():
content = ak_Array([0.0, 1.1, 2.2, 3.3, 4.4]).layout
mask = ak._v2.index.Index8(
np.array([False, True, True, False, False], dtype=np.int8)
)
bytemaskedarray = ak._v2.contents.ByteMaskedArray(mask, content, True)
assert ak_from_buffers(*ak_to_buffers(bytemaskedarray)).tolist() == [
None,
1.1,
2.2,
None,
None,
]
assert pickle.loads(pickle.dumps(ak_Array(bytemaskedarray), -1)).tolist() == [
None,
1.1,
2.2,
None,
None,
]
def test_bitmaskedarray():
content = ak_Array([0.0, 1.1, 2.2, 3.3, 4.4]).layout
mask = ak._v2.index.IndexU8(
np.packbits(np.array([False, True, True, False, False], dtype=np.int8))
)
bitmaskedarray = ak._v2.contents.BitMaskedArray(mask, content, True, 5, False)
assert ak_from_buffers(*ak_to_buffers(bitmaskedarray)).tolist() == [
None,
1.1,
2.2,
None,
None,
]
assert pickle.loads(pickle.dumps(ak_Array(bitmaskedarray), -1)).tolist() == [
None,
1.1,
2.2,
None,
None,
]
def test_recordarray():
assert ak_from_buffers(
*ak_to_buffers([(1.1, [1]), (2.2, [1, 2]), (3.3, [1, 2, 3])])
).tolist() == [(1.1, [1]), (2.2, [1, 2]), (3.3, [1, 2, 3])]
assert ak_from_buffers(
*ak_to_buffers(
[{"x": 1.1, "y": [1]}, {"x": 2.2, "y": [1, 2]}, {"x": 3.3, "y": [1, 2, 3]}]
)
).tolist() == [
{"x": 1.1, "y": [1]},
{"x": 2.2, "y": [1, 2]},
{"x": 3.3, "y": [1, 2, 3]},
]
assert pickle.loads(
pickle.dumps(ak_Array([(1.1, [1]), (2.2, [1, 2]), (3.3, [1, 2, 3])]), -1)
).tolist() == [(1.1, [1]), (2.2, [1, 2]), (3.3, [1, 2, 3])]
assert pickle.loads(
pickle.dumps(
ak_Array(
[
{"x": 1.1, "y": [1]},
{"x": 2.2, "y": [1, 2]},
{"x": 3.3, "y": [1, 2, 3]},
]
),
-1,
)
).tolist() == [
{"x": 1.1, "y": [1]},
{"x": 2.2, "y": [1, 2]},
{"x": 3.3, "y": [1, 2, 3]},
]
def test_record():
assert pickle.loads(
pickle.dumps(ak_Record({"x": 2.2, "y": [1, 2]}), -1)
).tolist() == {"x": 2.2, "y": [1, 2]}
assert pickle.loads(
pickle.dumps(
ak_Array(
[
{"x": 1.1, "y": [1]},
{"x": 2.2, "y": [1, 2]},
{"x": 3.3, "y": [1, 2, 3]},
]
)[1],
-1,
)
).tolist() == {"x": 2.2, "y": [1, 2]}
def test_regulararray():
content = ak_Array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]).layout
regulararray = ak._v2.contents.RegularArray(content, 3, zeros_length=0)
assert ak_from_buffers(*ak_to_buffers(regulararray)).tolist() == [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12],
]
assert pickle.loads(pickle.dumps(ak_Array(regulararray), -1)).tolist() == [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12],
]
def test_unionarray():
assert ak_from_buffers(*ak_to_buffers([[1, 2, 3], [], 4, 5])).tolist() == [
[1, 2, 3],
[],
4,
5,
]
assert pickle.loads(pickle.dumps(ak_Array([[1, 2, 3], [], 4, 5]), -1)).tolist() == [
[1, 2, 3],
[],
4,
5,
]
def test_unmaskedarray():
content = ak_Array([1, 2, 3, 4, 5]).layout
unmaskedarray = ak._v2.contents.UnmaskedArray(content)
assert ak_from_buffers(*ak_to_buffers(unmaskedarray)).tolist() == [1, 2, 3, 4, 5]
assert pickle.loads(pickle.dumps(ak_Array(unmaskedarray), -1)).tolist() == [
1,
2,
3,
4,
5,
]
|
the-stack_0_17332 | """Given an string S, find all its permutations.
Example:
S = "abc"
Permutations found = ["abc", "cab", "bac", "acb", "bac", "cba"]
"""
def find_permutations(s):
if len(s) <= 1:
return [s]
permutations = []
def _find_permutations(partial, rest, permutations):
if len(rest) == 0:
permutations.append(partial)
for i in range(len(rest)):
_find_permutations(partial + rest[i], rest[:i] + rest[i+1:], permutations)
_find_permutations("", s, permutations)
return permutations
if __name__ == "__main__":
test_cases = [
("", [""]),
("a", ["a"]),
("ab", ["ab", "ba"]),
("abc", ["abc", "acb", "cab", "bac", "bca", "cba"]),
("abcd", [
"abcd", "abdc", "adbc", "dabc",
"acbd", "acdb", "adcb", "dacb",
"cabd", "cadb", "cdab", "dcab",
"bacd", "badc", "bdac", "dbac",
"bcad", "bcda", "bdca", "dbca",
"cbad", "cbda", "cdba", "dcba"
]),
]
for s, expected in test_cases:
found_permutations = find_permutations(s)
print(found_permutations)
assert set(found_permutations) == set(expected)
|
the-stack_0_17335 |
from cloudify import ctx
from cloudify import utils
from cloudify.exceptions import NonRecoverableError
from StringIO import StringIO
import base64
import os
import platform
import re
import subprocess
import sys
import time
import threading
import platform
import json
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_attribute_user(ctx):
if get_attribute_from_top_host(ctx, 'user'):
return get_attribute_from_top_host(ctx, 'user')
if get_attribute(ctx, 'cloudify_agent'):
return get_attribute(ctx, 'cloudify_agent').get('user', None)
if get_attribute(ctx, 'agent_config'):
return get_attribute(ctx, 'agent_config').get('user', None)
return None
def get_attribute_key(ctx):
if get_attribute_from_top_host(ctx, 'key'):
return get_attribute_from_top_host(ctx, 'key')
if get_attribute(ctx, 'cloudify_agent'):
return get_attribute(ctx, 'cloudify_agent').get('key', None)
if get_attribute(ctx, 'agent_config'):
return get_attribute(ctx, 'agent_config').get('key', None)
return None
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
# ctx.logger.debug('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name,json.dumps(entity.node.properties)))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
# ctx.logger.debug('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, json.dumps(mapping_configuration)))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
# ctx.logger.debug('Mapping exists for attribute {0} with value {1}'.format(attribute_name, json.dumps(mapped_value)))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
# ctx.logger.debug('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, json.dumps(attribute_value), entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
# ctx.logger.debug('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def get_target_capa_or_node_attribute(entity, capability_attribute_name, attribute_name):
attribute_value = entity.instance.runtime_properties.get(capability_attribute_name, None)
if attribute_value is not None:
# ctx.logger.debug('Found the capability attribute {0} with value {1} on the node {2}'.format(attribute_name, json.dumps(attribute_value), entity.node.id))
return attribute_value
return get_attribute(entity, attribute_name)
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
# ctx.logger.debug('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, json.dumps(prop_value), entity.node.id,
# node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
# Same as previous method but will first try to find the attribute on the capability.
def _all_instances_get_target_capa_or_node_attribute(entity, capability_attribute_name, attribute_name):
result_map = {}
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
attribute_value = node_instance.runtime_properties.get(capability_attribute_name, None)
if attribute_value is not None:
prop_value = attribute_value
else:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
# ctx.logger.debug('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, json.dumps(prop_value), entity.node.id,
# node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
# ctx.logger.debug('Found the property {0} with value {1} on the node {2}'.format(property_name, json.dumps(property_value), entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
# ctx.logger.debug('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
# ctx.logger.debug('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, json.dumps(node.properties)))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
# ctx.logger.debug('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, json.dumps(mapping_configuration)))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
def get_public_or_private_ip(entity):
public_ip = get_attribute(entity, 'public_ip_address')
if not public_ip:
return get_attribute(entity, 'ip_address')
return public_ip
def get_attribute_from_top_host(entity, attribute_name):
host = get_host(entity)
while host is not None:
entity = host
host = get_host(entity)
return get_attribute(entity, attribute_name)
ctx.instance.runtime_properties['tosca_id'] = ctx.instance.id
ctx.instance.runtime_properties['tosca_name'] = ctx.node.id
ctx.instance.runtime_properties['component_version'] = r'5'
|
the-stack_0_17336 | """Service calls related dependencies for LCN component."""
import pypck
import voluptuous as vol
from homeassistant.const import (
CONF_ADDRESS,
CONF_BRIGHTNESS,
CONF_STATE,
CONF_UNIT_OF_MEASUREMENT,
TIME_SECONDS,
)
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_CONNECTIONS,
CONF_KEYS,
CONF_LED,
CONF_OUTPUT,
CONF_PCK,
CONF_RELVARREF,
CONF_ROW,
CONF_SETPOINT,
CONF_TABLE,
CONF_TEXT,
CONF_TIME,
CONF_TIME_UNIT,
CONF_TRANSITION,
CONF_VALUE,
CONF_VARIABLE,
DATA_LCN,
LED_PORTS,
LED_STATUS,
OUTPUT_PORTS,
RELVARREF,
SENDKEYCOMMANDS,
SETPOINTS,
THRESHOLDS,
TIME_UNITS,
VAR_UNITS,
VARIABLES,
)
from .helpers import (
get_connection,
is_address,
is_key_lock_states_string,
is_relays_states_string,
)
class LcnServiceCall:
"""Parent class for all LCN service calls."""
schema = vol.Schema({vol.Required(CONF_ADDRESS): is_address})
def __init__(self, hass):
"""Initialize service call."""
self.connections = hass.data[DATA_LCN][CONF_CONNECTIONS]
def get_address_connection(self, call):
"""Get address connection object."""
addr, connection_id = call.data[CONF_ADDRESS]
addr = pypck.lcn_addr.LcnAddr(*addr)
if connection_id is None:
connection = self.connections[0]
else:
connection = get_connection(self.connections, connection_id)
return connection.get_address_conn(addr)
class OutputAbs(LcnServiceCall):
"""Set absolute brightness of output port in percent."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_OUTPUT): vol.All(vol.Upper, vol.In(OUTPUT_PORTS)),
vol.Required(CONF_BRIGHTNESS): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
vol.Optional(CONF_TRANSITION, default=0): vol.All(
vol.Coerce(float), vol.Range(min=0.0, max=486.0)
),
}
)
def __call__(self, call):
"""Execute service call."""
output = pypck.lcn_defs.OutputPort[call.data[CONF_OUTPUT]]
brightness = call.data[CONF_BRIGHTNESS]
transition = pypck.lcn_defs.time_to_ramp_value(
call.data[CONF_TRANSITION] * 1000
)
address_connection = self.get_address_connection(call)
address_connection.dim_output(output.value, brightness, transition)
class OutputRel(LcnServiceCall):
"""Set relative brightness of output port in percent."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_OUTPUT): vol.All(vol.Upper, vol.In(OUTPUT_PORTS)),
vol.Required(CONF_BRIGHTNESS): vol.All(
vol.Coerce(int), vol.Range(min=-100, max=100)
),
}
)
def __call__(self, call):
"""Execute service call."""
output = pypck.lcn_defs.OutputPort[call.data[CONF_OUTPUT]]
brightness = call.data[CONF_BRIGHTNESS]
address_connection = self.get_address_connection(call)
address_connection.rel_output(output.value, brightness)
class OutputToggle(LcnServiceCall):
"""Toggle output port."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_OUTPUT): vol.All(vol.Upper, vol.In(OUTPUT_PORTS)),
vol.Optional(CONF_TRANSITION, default=0): vol.All(
vol.Coerce(float), vol.Range(min=0.0, max=486.0)
),
}
)
def __call__(self, call):
"""Execute service call."""
output = pypck.lcn_defs.OutputPort[call.data[CONF_OUTPUT]]
transition = pypck.lcn_defs.time_to_ramp_value(
call.data[CONF_TRANSITION] * 1000
)
address_connection = self.get_address_connection(call)
address_connection.toggle_output(output.value, transition)
class Relays(LcnServiceCall):
"""Set the relays status."""
schema = LcnServiceCall.schema.extend(
{vol.Required(CONF_STATE): is_relays_states_string}
)
def __call__(self, call):
"""Execute service call."""
states = [
pypck.lcn_defs.RelayStateModifier[state] for state in call.data[CONF_STATE]
]
address_connection = self.get_address_connection(call)
address_connection.control_relays(states)
class Led(LcnServiceCall):
"""Set the led state."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_LED): vol.All(vol.Upper, vol.In(LED_PORTS)),
vol.Required(CONF_STATE): vol.All(vol.Upper, vol.In(LED_STATUS)),
}
)
def __call__(self, call):
"""Execute service call."""
led = pypck.lcn_defs.LedPort[call.data[CONF_LED]]
led_state = pypck.lcn_defs.LedStatus[call.data[CONF_STATE]]
address_connection = self.get_address_connection(call)
address_connection.control_led(led, led_state)
class VarAbs(LcnServiceCall):
"""Set absolute value of a variable or setpoint.
Variable has to be set as counter!
Regulator setpoints can also be set using R1VARSETPOINT, R2VARSETPOINT.
"""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_VARIABLE): vol.All(
vol.Upper, vol.In(VARIABLES + SETPOINTS)
),
vol.Optional(CONF_VALUE, default=0): vol.All(
vol.Coerce(int), vol.Range(min=0)
),
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default="native"): vol.All(
vol.Upper, vol.In(VAR_UNITS)
),
}
)
def __call__(self, call):
"""Execute service call."""
var = pypck.lcn_defs.Var[call.data[CONF_VARIABLE]]
value = call.data[CONF_VALUE]
unit = pypck.lcn_defs.VarUnit.parse(call.data[CONF_UNIT_OF_MEASUREMENT])
address_connection = self.get_address_connection(call)
address_connection.var_abs(var, value, unit)
class VarReset(LcnServiceCall):
"""Reset value of variable or setpoint."""
schema = LcnServiceCall.schema.extend(
{vol.Required(CONF_VARIABLE): vol.All(vol.Upper, vol.In(VARIABLES + SETPOINTS))}
)
def __call__(self, call):
"""Execute service call."""
var = pypck.lcn_defs.Var[call.data[CONF_VARIABLE]]
address_connection = self.get_address_connection(call)
address_connection.var_reset(var)
class VarRel(LcnServiceCall):
"""Shift value of a variable, setpoint or threshold."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_VARIABLE): vol.All(
vol.Upper, vol.In(VARIABLES + SETPOINTS + THRESHOLDS)
),
vol.Optional(CONF_VALUE, default=0): int,
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default="native"): vol.All(
vol.Upper, vol.In(VAR_UNITS)
),
vol.Optional(CONF_RELVARREF, default="current"): vol.All(
vol.Upper, vol.In(RELVARREF)
),
}
)
def __call__(self, call):
"""Execute service call."""
var = pypck.lcn_defs.Var[call.data[CONF_VARIABLE]]
value = call.data[CONF_VALUE]
unit = pypck.lcn_defs.VarUnit.parse(call.data[CONF_UNIT_OF_MEASUREMENT])
value_ref = pypck.lcn_defs.RelVarRef[call.data[CONF_RELVARREF]]
address_connection = self.get_address_connection(call)
address_connection.var_rel(var, value, unit, value_ref)
class LockRegulator(LcnServiceCall):
"""Locks a regulator setpoint."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_SETPOINT): vol.All(vol.Upper, vol.In(SETPOINTS)),
vol.Optional(CONF_STATE, default=False): bool,
}
)
def __call__(self, call):
"""Execute service call."""
setpoint = pypck.lcn_defs.Var[call.data[CONF_SETPOINT]]
state = call.data[CONF_STATE]
reg_id = pypck.lcn_defs.Var.to_set_point_id(setpoint)
address_connection = self.get_address_connection(call)
address_connection.lock_regulator(reg_id, state)
class SendKeys(LcnServiceCall):
"""Sends keys (which executes bound commands)."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_KEYS): vol.All(
vol.Upper, cv.matches_regex(r"^([A-D][1-8])+$")
),
vol.Optional(CONF_STATE, default="hit"): vol.All(
vol.Upper, vol.In(SENDKEYCOMMANDS)
),
vol.Optional(CONF_TIME, default=0): vol.All(int, vol.Range(min=0)),
vol.Optional(CONF_TIME_UNIT, default=TIME_SECONDS): vol.All(
vol.Upper, vol.In(TIME_UNITS)
),
}
)
def __call__(self, call):
"""Execute service call."""
address_connection = self.get_address_connection(call)
keys = [[False] * 8 for i in range(4)]
key_strings = zip(call.data[CONF_KEYS][::2], call.data[CONF_KEYS][1::2])
for table, key in key_strings:
table_id = ord(table) - 65
key_id = int(key) - 1
keys[table_id][key_id] = True
delay_time = call.data[CONF_TIME]
if delay_time != 0:
hit = pypck.lcn_defs.SendKeyCommand.HIT
if pypck.lcn_defs.SendKeyCommand[call.data[CONF_STATE]] != hit:
raise ValueError(
"Only hit command is allowed when sending deferred keys."
)
delay_unit = pypck.lcn_defs.TimeUnit.parse(call.data[CONF_TIME_UNIT])
address_connection.send_keys_hit_deferred(keys, delay_time, delay_unit)
else:
state = pypck.lcn_defs.SendKeyCommand[call.data[CONF_STATE]]
address_connection.send_keys(keys, state)
class LockKeys(LcnServiceCall):
"""Lock keys."""
schema = LcnServiceCall.schema.extend(
{
vol.Optional(CONF_TABLE, default="a"): vol.All(
vol.Upper, cv.matches_regex(r"^[A-D]$")
),
vol.Required(CONF_STATE): is_key_lock_states_string,
vol.Optional(CONF_TIME, default=0): vol.All(int, vol.Range(min=0)),
vol.Optional(CONF_TIME_UNIT, default=TIME_SECONDS): vol.All(
vol.Upper, vol.In(TIME_UNITS)
),
}
)
def __call__(self, call):
"""Execute service call."""
address_connection = self.get_address_connection(call)
states = [
pypck.lcn_defs.KeyLockStateModifier[state]
for state in call.data[CONF_STATE]
]
table_id = ord(call.data[CONF_TABLE]) - 65
delay_time = call.data[CONF_TIME]
if delay_time != 0:
if table_id != 0:
raise ValueError(
"Only table A is allowed when locking keys for a specific time."
)
delay_unit = pypck.lcn_defs.TimeUnit.parse(call.data[CONF_TIME_UNIT])
address_connection.lock_keys_tab_a_temporary(delay_time, delay_unit, states)
else:
address_connection.lock_keys(table_id, states)
address_connection.request_status_locked_keys_timeout()
class DynText(LcnServiceCall):
"""Send dynamic text to LCN-GTxD displays."""
schema = LcnServiceCall.schema.extend(
{
vol.Required(CONF_ROW): vol.All(int, vol.Range(min=1, max=4)),
vol.Required(CONF_TEXT): vol.All(str, vol.Length(max=60)),
}
)
def __call__(self, call):
"""Execute service call."""
row_id = call.data[CONF_ROW] - 1
text = call.data[CONF_TEXT]
address_connection = self.get_address_connection(call)
address_connection.dyn_text(row_id, text)
class Pck(LcnServiceCall):
"""Send arbitrary PCK command."""
schema = LcnServiceCall.schema.extend({vol.Required(CONF_PCK): str})
def __call__(self, call):
"""Execute service call."""
pck = call.data[CONF_PCK]
address_connection = self.get_address_connection(call)
address_connection.pck(pck)
|
the-stack_0_17337 |
#from six.moves import xrange
import common
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
# basics
tf.app.flags.DEFINE_integer('batch_size', 24,
"""Number of images to process in a batch.""")
# naming
tf.app.flags.DEFINE_string('UPDATE_OPS_COLLECTION', 'update_ops',
""" collection of ops to be updated""")
tf.app.flags.DEFINE_string('LOSSES_COLLECTION', 'losses',
""" collection of ops to be updated""")
# training
tf.app.flags.DEFINE_integer('num_epochs_per_decay', 2,
"""number of epochs per decay""")
tf.app.flags.DEFINE_float('initial_learning_rate', 0.01,
"""initial learning rate""")
tf.app.flags.DEFINE_float('learning_rate_decay', 0.1,
"""decay factor of learning rate""")
tf.app.flags.DEFINE_float('momentum', 0.9,
"""momentum of optimization""")
# inference of resnet
def inference_resnet(images, num_output=1):
with tf.variable_scope('1'):
conv1 = common.conv(images, 64, ksize=7, stride=2)
conv1 = common.bn(conv1)
pool1 = common.max_pool(conv1)
with tf.variable_scope('2'):
stack2 = common.res_stack(pool1, [256, 256, 256], pool=False)
with tf.variable_scope('3'):
stack3 = common.res_stack(stack2, [512, 512, 512, 512])
with tf.variable_scope('4'):
stack4 = common.res_stack(stack3, [1024, 1024, 1024,
1024, 1024, 1024])
with tf.variable_scope('5'):
stack5 = common.res_stack(stack4, [2048, 2048, 2048])
pool5 = common.global_ave_pool(stack5)
with tf.variable_scope('fc'):
fc = common.fc(pool5, num_output)
return tf.sigmoid(fc)
|
the-stack_0_17338 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
NOTE: This module shall not be used by external projects. It will be moved
to neutron-lib in due course, and then it can be used from there.
"""
import contextlib
from neutron_lib.db import utils as db_utils
from oslo_log import log as logging
from oslo_utils import excutils
LOG = logging.getLogger(__name__)
@contextlib.contextmanager
def _noop_context_manager():
yield
def safe_creation(context, create_fn, delete_fn, create_bindings,
transaction=True):
'''This function wraps logic of object creation in safe atomic way.
In case of exception, object is deleted.
More information when this method could be used can be found in
developer guide - Effective Neutron: Database interaction section.
https://docs.openstack.org/neutron/latest/contributor/effective_neutron.html
:param context: context
:param create_fn: function without arguments that is called to create
object and returns this object.
:param delete_fn: function that is called to delete an object. It is
called with object's id field as an argument.
:param create_bindings: function that is called to create bindings for
an object. It is called with object's id field as an argument.
:param transaction: if true the whole operation will be wrapped in a
transaction. if false, no transaction will be used.
'''
cm = (context.session.begin(subtransactions=True)
if transaction else _noop_context_manager())
with cm:
obj = create_fn()
try:
value = create_bindings(obj['id'])
except Exception:
with excutils.save_and_reraise_exception():
try:
delete_fn(obj['id'])
except Exception as e:
LOG.error("Cannot clean up created object %(obj)s. "
"Exception: %(exc)s", {'obj': obj['id'],
'exc': e})
return obj, value
def model_query(context, model):
query = context.session.query(model)
# define basic filter condition for model query
query_filter = None
if db_utils.model_query_scope_is_project(context, model):
query_filter = (model.tenant_id == context.tenant_id)
if query_filter is not None:
query = query.filter(query_filter)
return query
|
the-stack_0_17340 | from PIL import Image,ImageTk
import random as rand
import turtle as trtl
# import required module
import os
from playsound import playsound
import tkinter.messagebox
wn = trtl.Screen()
#si = tk.Tk()
si = trtl.Turtle()
caller = trtl.Turtle()
st = trtl.Turtle()
rSt = trtl.Turtle()
user = trtl.Turtle()
point = trtl.Turtle()
#score = trtl.Turtle()
count = 0
caller_list = ['abrupt stop', 'speed bump','right','left','go']
caller_txt = []
#Message ="Abrupt stop = DOWN speed bump = SHIFT right = RIGHT left = LEFT go =UP"
tkinter.messagebox.showinfo('Directions','Abrupt stop = DOWN speed bump = SHIFT right = RIGHT left = LEFT go =UP')
#wn = tk.Tk()
#wn.screensize("400x400")
# --- Window Creator ---
wn.title("Vroom Vroom: BTS Edition")
#wn.window_height(150)
wn.setup(height=500,width=500)
#caller_img ="huh_resize.gif"
#user_label = Label(wn,image=caller_img)
# ---IMages ---
as_img = "vAb.gif"
wn.addshape(as_img)
sb_img = "vSb_resize.gif"
wn.addshape(sb_img)
r_img = "right_resize.gif"
wn.addshape(r_img)
l_img = "vL.gif"
wn.addshape(l_img)
go_img = "go_resize.gif"
wn.addshape(go_img)
caller_img = "huh_resize.gif"
wn.addshape(caller_img)
# --- Functions ---
x = -191
y = 180
caller.pu()
caller.goto(x,y)
si.pu()
si.ht()
si.goto(-120,150)
start_pic = "st_resize.gif"
wn.addshape(start_pic)
st.shape(start_pic)
st.pu()
st.goto(0,180)
restart_pic = "restart_resized.gif"
wn.addshape(restart_pic)
rSt.shape(restart_pic)
rSt.pu()
rSt.goto(0,180)
user_pic = "plyr_resize.gif"
wn.addshape(user_pic)
user.shape(user_pic)
user.pu()
user.goto(0,-50)
def startPress(x, y):
caller.shape(caller_img)
st.ht()
rSt.st()
#print('playing sound using native player')
playsound('vvvcopy.wav')
wn.delay(10)
si.clear()
callerChoose()
# callerSoundOs()
def rStPress(x, y):
rSt.ht()
st.st()
si.clear()
# gameMain()
def callerChoose():
#st.ht()
global caller_txt
si.ht()
caller_txt = rand.choice(caller_list)
si.write(caller_txt,font=("Arial",15))
print(caller_txt)
callerSoundOs()
#wn.delay(10)
#si.ht()
def callerSound():
#caller_pic = "huh_resize.gif"
if caller_txt == caller_list[0]:
print("Ab")
playsound('vDa_AS.wav')
cAs()
elif caller_txt == caller_list[1]:
print("sb")
playsound('vS_sb.wav')
cSb()
elif caller_txt == caller_list[2]:
print("right")
playsound('vR.wav')
cR()
elif caller_txt == caller_list[3]:
print("left")
playsound('vL.wav')
cL()
#vroomVroom_wn.addshape(caller_pic)
#caller.shape(caller_pic)
elif caller_txt == caller_list[4]:
print('go')
playsound('vUp_go.wav')
cGo()
def callerSoundOs():
global caller_txt
print("cSOs")
#caller_pic = "huh_resize.gif"
if caller_txt == caller_list[0]:
print("ab")
playsound('vDa_AS.wav')
#cAs()
elif caller_txt == caller_list[1]:
print("sb")
playsound('vS_sb.wav')
#cSb()
elif caller_txt == caller_list[2]:
print("r")
playsound('vR.wav')
#cR()
elif caller_txt == caller_list[3]:
print("l")
playsound('vL.wav')
#cL()
#vroomVroom_wn.addshape(caller_pic)
#caller.shape(caller_pic)
elif caller_txt == caller_list[4]:
print("g")
playsound('vUp_go.wav')
#cGo()
def playSound(caller_txt):
if caller_txt == [d for d in caller_list if "abrupt stop" in d]:
playsound('vDa_AS.wav')
caller == 'as_resize.gif'
elif caller_txt == [d for d in caller_list if 'speed bump'in d]:
playsound('vDa_AS.wav')
caller == 'vSb_resize.gif'
elif caller_list == [d for d in caller_list if 'right' in d]:
playsound('vR.wav')
caller == 'right_resize.gif'
elif caller_txt == [d for d in caller_list if 'left' in d]:
playsound('vL.wav')
caller == 'vL.gif'
elif caller_txt == [d for d in caller_list if 'go' in d]:
playsound('vUp_go')
caller == 'go_resize.gif'
def abruptStop():
user.shape(as_img)
caller.shape(as_img)
def speedBump():
user.shape(sb_img)
caller.shape(sb_img)
def rightTurn():
user.shape(r_img)
def leftTurn():
user.shape(l_img)
def goFD():
user.shape(go_img)
def cAs():
caller.shape(as_img)
def cSb():
caller.shape(sb_img)
def cR():
caller.shape(r_img)
def cL():
caller.shape(l_img)
def cGo():
caller.shape(go_img)
def gameMain():
#caller.shapesize(10)
callerChoose()
#callerSoundOs()
#callerSound()
gameMain()
st.onclick(startPress)
rSt.onclick(rStPress)
wn.onkeypress(abruptStop,'Down')
wn.onkeypress(speedBump,'Return')
wn.onkeypress(rightTurn,'Right')
wn.onkeypress(leftTurn,'Left')
wn.onkeypress(goFD,'Up')
# createCaller()
# createRestart_btn()
# createUser()
# createStart_btn()
wn.listen()
wn.mainloop() |
the-stack_0_17341 | '''
Created on 29 Oct 2013
@author: michael
'''
from django import template
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from tunobase.core import models, nodes
register = template.Library()
@register.tag
def smart_query_string(parser, token):
'''
Outputs current GET query string with additions appended.
Additions are provided in token pairs.
'''
args = token.split_contents()
additions = args[1:]
addition_pairs = []
while additions:
addition_pairs.append(additions[0:2])
additions = additions[2:]
return nodes.SmartQueryStringNode(addition_pairs)
@register.assignment_tag
def gallery_surrounding_image_pks(gallery, gallery_image_pk):
gallery_images = list(gallery.images.all())
previous_image_pk = None
next_image_pk = None
for i, gallery_image in enumerate(gallery_images):
if gallery_image.pk == gallery_image_pk:
try:
previous_image_pk = gallery_images[i+1].pk
except IndexError:
pass
try:
next_image_pk = gallery_images[i-1].pk
except IndexError:
pass
break
return {
'next_image_pk': next_image_pk,
'previous_image_pk': previous_image_pk
}
@register.filter
def letterify(value):
return str(unichr(65 + value))
@register.filter
def class_name(obj):
return obj.__class__.__name__ |
the-stack_0_17343 | from os import path
from setuptools import setup
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md')) as f:
long_description = f.read()
setup(
name='apt-ios-repo',
version='0.1',
packages=['apt_ios_repo'],
url='https://github.com/arturgoms/python-apt-ios-repo',
license='MIT',
author='Artur Gomes',
author_email='[email protected]',
description='Python library to manage and query APT repositories from iOS Jailbreak community',
long_description_content_type='text/markdown',
long_description=long_description,
python_requires='>=3.9',
install_requires=['requests'],
)
|
the-stack_0_17348 | import os
import traceback
import sys
from tensorflow.core.framework import summary_pb2
from tensorflow.python.summary.writer import writer_cache
import tensorflow as tf
import numpy as np
class InMemoryFinetune(tf.estimator.SessionRunHook):
def __init__(self, config_to_eval, model, eval_dir, X, Y, X_test, Y_test, name=None, every_n_iter=100):
if every_n_iter is None or every_n_iter <= 0:
raise ValueError('invalid every_n_iter=%s.' % every_n_iter)
self._current_finetune = model
self._config_to_finetune = config_to_eval
self._name = name
self._every_n_iter = every_n_iter
self._timer = tf.estimator.SecondOrStepTimer(every_steps=every_n_iter)
self._eval_dir = eval_dir
self.train_data = (X, Y)
self.test_data = (X_test, Y_test)
self._iter_count = 0
def begin(self):
self._timer.reset()
self._iter_count = 0
def after_create_session(self, session, coord):
"""Does first run which shows the metrics before training."""
self._evaluate(session)
def _evaluate(self, session):
try:
with tf.Graph().as_default():
from finetune import Classifier
model = Classifier(**self._config_to_finetune)
if self._current_finetune.saver.variables:
model.saver.variables = {
k: v.copy() for k, v in self._current_finetune.saver.variables.items()
if "global_step" not in k and "Adam" not in k
}
model.saver.fallback_ = {
k: v for k, v in self._current_finetune.saver.fallback.items() if "global_step" not in k
}
train_x, train_y = self.train_data
model.fit(train_x, train_y)
test_x, test_y = self.test_data
test_accuracy = np.mean(model.predict(test_x) == test_y)
train_accuracy = np.mean(model.predict(train_x) == train_y)
except IOError as e:
traceback.print_exc(file=sys.stdout)
test_accuracy = -1.0
train_accuracy = -1.0
global_step = session.run(tf.compat.v1.train.get_or_create_global_step())
directory = os.path.join(self._eval_dir, "..", "finetuning")
if not os.path.exists(directory):
os.makedirs(directory)
summary_writer = writer_cache.FileWriterCache.get(directory)
summary_proto = summary_pb2.Summary()
summary_proto.value.add(tag="finetuning/{}_train_accurary".format(self._name), simple_value=float(train_accuracy))
summary_proto.value.add(tag="finetuning/{}_test_accurary".format(self._name), simple_value=float(test_accuracy))
summary_writer.add_summary(summary_proto, global_step)
summary_writer.flush()
self._timer.update_last_triggered_step(self._iter_count)
def after_run(self, run_context, run_values):
self._iter_count += 1
if self._timer.should_trigger_for_step(self._iter_count):
self._evaluate(run_context.session)
def end(self, session):
self._evaluate(session)
def make_in_memory_finetune_hooks(model, estimator):
hooks = []
for f in model.config.in_memory_finetune:
hooks.append(InMemoryFinetune(
config_to_eval=f["config"],
model=model,
eval_dir=estimator.eval_dir(),
X=f["X"],
Y=f["Y"],
X_test=f["X_test"],
Y_test=f["Y_test"],
name=f["name"],
every_n_iter=f["every_n_iter"]
))
return hooks
|
the-stack_0_17349 | #!/usr/bin/env python
# coding: utf-8
# # Ans 1.
# In[16]:
def getrange(fibfun):
def fibseries():
print("Enter the range of Fibonacci Series")
end = int(input("Enter ending no: "))
fibfun(end)
return fibseries
# In[17]:
@getrange
def fibonacciSeries(ending):
starting = 0
num = 0
first = 1
for i in range(ending):
print("Fibonacci Series :", num)
starting = first
first = num
num = starting + first
# In[18]:
fibonacciSeries()
# # Ans 2)
# In[60]:
file = open("Day8-AA","w")
file.write("Hey there!, How are you")
file.close()
# In[61]:
file = open("Day8-AA","r")
fileData = file.read()
print(fileData)
file.close()
# In[62]:
file = open("Day8-AA","a")
file.write("?")
file.close()
# In[63]:
file = open("Day8-AA","r")
fileData = file.read()
print(fileData)
file.close()
# In[64]:
file = open("Day8-AA", "r")
try :
file.write("I am fine thankyou")
except :
print("File is opened in read only mode, not writable!!!")
fileData = file.read()
print(fileData)
file.close()
# In[ ]:
|
the-stack_0_17350 | # EQUILIBRIUM PROFILES
import numpy as np
import csv
from matplotlib import pyplot as plt
def eqProfs(state):
#Compiling CSV data into independent variables
dataArr = []
with open('output_runModel/equilibrium.csv', 'r') as equilibriumFile:
csvRead = csv.reader(equilibriumFile)
for row in csvRead:
dataArr.append(row)
dataArr = [ele for ele in dataArr if ele != []]
lwFluxNet, lwFluxUp, lwFluxDown, heatRate, airTemperatureProf = dataArr[0],dataArr[1],dataArr[2], dataArr[3], dataArr[4]
timeTaken = ''.join(dataArr[5])
airPressure_vCoord = np.array(state['air_pressure_on_interface_levels']).flatten()
airPressure_vCoord = [round((float(ele)/1000),0) for ele in airPressure_vCoord]
# Plotting Schwarzchild deltas.
fig = plt.figure()
lwFluxNet = [float(i) for i in lwFluxNet]
plt.plot(lwFluxNet,airPressure_vCoord)
plt.gca().invert_yaxis()
plt.xlabel("Net longwave radiative flux (Wm^-2)")
plt.ylabel("Pressure (kPa)")
plt.savefig("graphs/equilibrium_netFlux_vertical.png")
# Plotting upwelling longwave flux (p)
fig = plt.figure()
lwFluxUp = [float(i) for i in lwFluxUp]
plt.gca().invert_yaxis()
plt.plot(lwFluxUp, airPressure_vCoord)
plt.xlabel("Upwelling longwave radiative flux (Wm^-2)")
plt.ylabel("Pressure (kPa)")
plt.savefig("graphs/equilibrium_upFlux_vertical.png")
# Plotting downwelling longwave flux (p)
fig = plt.figure()
lwFluxDown = [float(i) for i in lwFluxDown]
plt.xlabel("Downwelling longwave radiative flux (Wm^-2)")
plt.ylabel("Pressure (kPa)")
plt.gca().invert_yaxis()
plt.plot(lwFluxDown,airPressure_vCoord)
plt.savefig("graphs/equilibrium_downFlux_vertical.png")
# Plotting heating rate (p)
fig = plt.figure()
heatRate = [float(i) for i in heatRate]
plt.xlabel("Longwave Heating Rate")
plt.ylabel("Pressure (kPa)")
plt.gca().invert_yaxis()
plt.plot(heatRate,airPressure_vCoord)
plt.savefig("graphs/equilibrium_heatRate_vertical.png")
fig = plt.figure()
airTemperatureProf = [float(i) for i in airTemperatureProf]
plt.xlabel("Air Temperature (K)")
plt.ylabel("Pressure (kPa)")
plt.gca().invert_yaxis()
plt.plot(airTemperatureProf,airPressure_vCoord[:28])
plt.savefig("graphs//equilibrium_airT_vertical.png")
return 0. |
the-stack_0_17351 | """
COSE_Encrypt0: Encrypted Messages with Implicit Key
COSE_Encrypt0 = [
Headers,
ciphertext: bstr / nil,
]
"""
from typing import Optional, TYPE_CHECKING
from cose import utils
from cose.messages import enccommon, cosemessage
if TYPE_CHECKING:
from cose.keys.symmetric import SK
CBOR = bytes
@cosemessage.CoseMessage.record_cbor_tag(16)
class Enc0Message(enccommon.EncCommon):
context = "Encrypt0"
cbor_tag = 16
@classmethod
def from_cose_obj(cls, cose_obj: list, *args, **kwargs) -> 'Enc0Message':
return super().from_cose_obj(cose_obj)
def __init__(self,
phdr: Optional[dict] = None,
uhdr: Optional[dict] = None,
payload: bytes = b'',
external_aad: bytes = b'',
key: Optional['SK'] = None):
"""
Create a COSE_encrypt0 message.
:param phdr: Protected header bucket.
:param uhdr: Unprotected header bucket.
:param payload: The payload (will be encrypted and authenticated).
:param external_aad: External data (is authenticated but not transported in the message).
:param key: The Symmetric COSE key for encryption/decryption of the message
:returns: Returns a COSE Encrypt0 message object.
"""
if phdr is None:
phdr = {}
if uhdr is None:
uhdr = {}
super().__init__(phdr, uhdr, payload, external_aad, key)
def encode(self, tag: bool = True, encrypt: bool = True, *args, **kwargs) -> CBOR:
"""
Encode and protect the COSE_Encrypt0 message.
:param tag: Boolean value which indicates if the COSE message will have a CBOR tag.
:param encrypt: Boolean which activates or deactivates the payload protection.
:return: Returns a CBOR-encoded COSE Encrypt0 message.
"""
if encrypt:
message = [self.phdr_encoded, self.uhdr_encoded, self.encrypt()]
else:
message = [self.phdr_encoded, self.uhdr_encoded, self.payload]
res = super(Enc0Message, self).encode(message, tag)
return res
def __repr__(self) -> str:
phdr, uhdr = self._hdr_repr()
return f'<COSE_Encrypt0: [{phdr}, {uhdr}, {utils.truncate(self._payload)}]>'
|
the-stack_0_17354 | def gen_fn():
result = yield 1
print('result of yield: {}'.format(result))
result2 = yield 2
print('result of 2nd yield: {}'.format(result2))
return 'done'
def caller_fn():
gen = gen_fn()
rv = yield from gen
print('return value of yield-from: {}'
.format(rv))
# Make a generator from the
# generator function.
caller = caller_fn()
caller.send(None)
caller.send('hello')
caller.send('goodbye')
# def gen_fn():
# result = yield 1
# print('result of yield: {}'.format(result))
# result2 = yield 2
# print('result of 2nd yield: {}'.format(result2))
# return 'done' |
the-stack_0_17355 | # Copyright (c) Microsoft Corporation
# Licensed under the MIT License.
# Defines common utilities for responsibleai tests
from dice_ml.utils import helpers
import numpy as np
import pandas as pd
from sklearn import svm
from sklearn.compose import ColumnTransformer
from sklearn.datasets import load_iris, load_breast_cancer, \
make_classification, load_boston
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from lightgbm import LGBMClassifier
from xgboost import XGBClassifier
def create_sklearn_random_forest_classifier(X, y):
rfc = RandomForestClassifier(n_estimators=10, max_depth=4,
random_state=777)
model = rfc.fit(X, y)
return model
def create_lightgbm_classifier(X, y):
lgbm = LGBMClassifier(boosting_type='gbdt', learning_rate=0.1,
max_depth=5, n_estimators=200, n_jobs=1,
random_state=777)
model = lgbm.fit(X, y)
return model
def create_xgboost_classifier(X, y):
xgb = XGBClassifier(learning_rate=0.1, max_depth=3, n_estimators=100,
n_jobs=1, random_state=777)
model = xgb.fit(X, y)
return model
def create_sklearn_svm_classifier(X, y, probability=True):
clf = svm.SVC(gamma=0.001, C=100., probability=probability,
random_state=777)
model = clf.fit(X, y)
return model
def create_sklearn_logistic_regressor(X, y, pipeline=False):
lin = LogisticRegression(solver='liblinear')
if pipeline:
lin = Pipeline([('lin', lin)])
model = lin.fit(X, y)
return model
def create_sklearn_random_forest_regressor(X, y):
rfc = RandomForestRegressor(n_estimators=10, max_depth=4,
random_state=777)
model = rfc.fit(X, y)
return model
def create_iris_data():
# Import Iris dataset
iris = load_iris()
# Split data into train and test
X_train, X_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=0)
feature_names = [name.replace(' (cm)', '') for name in iris.feature_names]
classes = iris.target_names
X_train = pd.DataFrame(X_train, columns=feature_names)
X_test = pd.DataFrame(X_test, columns=feature_names)
return X_train, X_test, y_train, y_test, feature_names, classes
def create_cancer_data():
breast_cancer_data = load_breast_cancer()
classes = breast_cancer_data.target_names.tolist()
# Split data into train and test
X_train, X_test, y_train, y_test = train_test_split(
breast_cancer_data.data, breast_cancer_data.target,
test_size=0.2, random_state=0)
feature_names = breast_cancer_data.feature_names
classes = breast_cancer_data.target_names.tolist()
X_train = pd.DataFrame(X_train, columns=feature_names)
X_test = pd.DataFrame(X_test, columns=feature_names)
return X_train, X_test, y_train, y_test, feature_names, classes
def create_binary_classification_dataset():
X, y = make_classification()
# Split data into train and test
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.2,
random_state=0)
classes = np.unique(y_train).tolist()
feature_names = ["col" + str(i) for i in list(range(X_train.shape[1]))]
X_train = pd.DataFrame(X_train, columns=feature_names)
X_test = pd.DataFrame(X_test, columns=feature_names)
return X_train, y_train, X_test, y_test, classes
def create_boston_data():
# Import Boston housing dataset
boston = load_boston()
# Split data into train and test
X_train, X_test, y_train, y_validation = train_test_split(
boston.data, boston.target,
test_size=0.2, random_state=7)
return X_train, X_test, y_train, y_validation, boston.feature_names
def create_adult_income_dataset():
dataset = helpers.load_adult_income_dataset()
continuous_features = ['age', 'hours_per_week']
target_name = 'income'
target = dataset[target_name]
classes = list(np.unique(target))
categorical_features = list(set(dataset.columns) -
set(continuous_features) -
set([target_name]))
# Split data into train and test
data_train, data_test, y_train, y_test = train_test_split(
dataset, target,
test_size=0.2, random_state=7, stratify=target)
return data_train, data_test, y_train, y_test, categorical_features, \
continuous_features, target_name, classes
def create_complex_classification_pipeline(
X_train, y_train, continuous_features, categorical_features):
# We create the preprocessing pipelines for both
# numeric and categorical data.
numeric_transformer = Pipeline(steps=[
('scaler', StandardScaler())])
categorical_transformer = Pipeline(steps=[
('onehot', OneHotEncoder(handle_unknown='ignore'))])
transformations = ColumnTransformer(
transformers=[
('num', numeric_transformer, continuous_features),
('cat', categorical_transformer, categorical_features)])
# Append classifier to preprocessing pipeline.
# Now we have a full prediction pipeline.
pipeline = Pipeline(steps=[('preprocessor', transformations),
('classifier', RandomForestClassifier())])
return pipeline.fit(X_train, y_train)
def create_models_classification(X_train, y_train):
svm_model = create_sklearn_svm_classifier(X_train, y_train)
log_reg_model = create_sklearn_logistic_regressor(X_train, y_train)
xgboost_model = create_xgboost_classifier(X_train, y_train)
lgbm_model = create_lightgbm_classifier(X_train, y_train)
rf_model = create_sklearn_random_forest_classifier(X_train, y_train)
return [svm_model, log_reg_model, xgboost_model, lgbm_model, rf_model]
def create_models_regression(X_train, y_train):
rf_model = create_sklearn_random_forest_regressor(X_train, y_train)
return [rf_model]
|
the-stack_0_17356 | '''
Descripttion:
version:
Author: LiQiang
Date: 2021-01-21 11:45:22
LastEditTime: 2021-01-21 13:05:07
'''
import argparse
import cv2
import torch
import os
import os.path as osp
import glob
from mmdet.apis import inference_detector, init_detector
from mmcv.utils import mkdir_or_exist
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection webcam demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--video_in_dir', type=str, default='',help='test video path')
parser.add_argument(
'--video_out_dir', type=str, default='', help='output video path')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
parser.add_argument(
'--show', type=bool, default=False, help='bbox score threshold')
args = parser.parse_args()
return args
def list_files(path, ends):
files = []
list_dir = os.walk(path)
for maindir, subdir, all_file in list_dir:
for filename in all_file:
apath = os.path.join(maindir, filename)
if apath.endswith(ends):
files.append(apath)
return files
def detectvideo(model, video_in, video_out, args):
cap = cv2.VideoCapture(video_in)
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
#获取视频帧率
#设置写入视频的编码格式
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
fps_video = cap.get(cv2.CAP_PROP_FPS)
####重要
videoWriter = cv2.VideoWriter(video_out, fourcc, fps_video, (frame_width, frame_height))
count=0
print('Press "Esc", "q" or "Q" to exit.')
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
pbar = tqdm(range(length))
for i in pbar:
torch.cuda.empty_cache()
ret_val, img = cap.read()
if ret_val:
if count<0:
count+=1
print('Write {} in result Successfully!'.format(count))
continue
#############################
result = inference_detector(model, img)
# ch = cv2.waitKey(1)
# if ch == 27 or ch == ord('q') or ch == ord('Q'):
# break
frame=model.show_result(
img, result, score_thr=args.score_thr, wait_time=1, show=False,thickness=1)
if args.show:
cv2.imshow('frame',frame)
cv2.waitKey(1)
if len(frame)>=1 or frame:
#写入视频
videoWriter.write(frame)
count+=1
if count == 1000:
break
#############################
"""
# if count%24==0: #快些看到效果
# result = inference_detector(model, img)
# ch = cv2.waitKey(1)
# if ch == 27 or ch == ord('q') or ch == ord('Q'):
# break
# frame=model.show_result(
# img, result, score_thr=args.score_thr, wait_time=1, show=False,thickness=1,font_scale=1)
# cv2.imshow('frame',frame)
# if len(frame)>=1 or frame:
# #写入视频
# videoWriter.write(frame)
# count+=1
# print('Write {} in result Successfully!'.format(count))
# else:
# count+=1
"""
else:
print('fail!!')
break
pbar.set_description("Processing video %s, frame : %d" % (video_in.replace(args.video_in_dir, ''), i))
cap.release()
videoWriter.release()
def main():
args = parse_args()
device = torch.device(args.device)
model = init_detector(args.config, args.checkpoint, device=device)
input_videos = list_files(args.video_in_dir, '.mp4')
print(input_videos)
for video in input_videos:
video_out = video.replace(args.video_in_dir, args.video_out_dir)
dir_name = osp.abspath(osp.dirname(video_out))
mkdir_or_exist(dir_name)
if 'RGB' in video:
continue
detectvideo(model, video, video_out, args)
if __name__ == '__main__':
main() |
the-stack_0_17357 | import numpy as np
from collections import OrderedDict
from concurrent import futures as futures
from os import path as osp
from pathlib import Path
from skimage import io
from pdb import set_trace
def get_image_index_str(img_idx, use_prefix_id=False):
if use_prefix_id:
return '{:07d}'.format(img_idx)
else:
return '{:06d}'.format(img_idx)
def get_kitti_info_path(idx,
prefix,
info_type='image_2',
file_tail='.png',
training=True,
relative_path=True,
exist_check=True,
use_prefix_id=False):
img_idx_str = get_image_index_str(idx, use_prefix_id)
img_idx_str += file_tail
prefix = Path(prefix)
if training:
file_path = Path('training') / info_type / img_idx_str
else:
file_path = Path('testing') / info_type / img_idx_str
if exist_check and not (prefix / file_path).exists():
raise ValueError('file not exist: {}'.format(file_path))
if relative_path:
return str(file_path)
else:
return str(prefix / file_path)
def get_image_path(idx,
prefix,
training=True,
relative_path=True,
exist_check=True,
info_type='image_2',
use_prefix_id=False):
exist_check=False
return get_kitti_info_path(idx, prefix, info_type, '.png', training,
relative_path, exist_check, use_prefix_id)
def get_label_path(idx,
prefix,
training=True,
relative_path=True,
exist_check=True,
info_type='label_2',
use_prefix_id=False):
return get_kitti_info_path(idx, prefix, info_type, '.txt', training,
relative_path, exist_check, use_prefix_id)
def get_velodyne_path(idx,
prefix,
training=True,
relative_path=True,
exist_check=True,
use_prefix_id=False):
return get_kitti_info_path(idx, prefix, 'velodyne', '.bin', training,
relative_path, exist_check, use_prefix_id)
def get_calib_path(idx,
prefix,
training=True,
relative_path=True,
exist_check=True,
use_prefix_id=False):
return get_kitti_info_path(idx, prefix, 'calib', '.txt', training,
relative_path, exist_check, use_prefix_id)
def get_pose_path(idx,
prefix,
training=True,
relative_path=True,
exist_check=True,
use_prefix_id=False):
return get_kitti_info_path(idx, prefix, 'pose', '.txt', training,
relative_path, exist_check, use_prefix_id)
def get_label_anno(label_path):
annotations = {}
annotations.update({
'name': [],
'truncated': [],
'occluded': [],
'alpha': [],
'bbox': [],
'dimensions': [],
'location': [],
'rotation_y': []
})
with open(label_path, 'r') as f:
lines = f.readlines()
# if len(lines) == 0 or len(lines[0]) < 15:
# content = []
# else:
content = [line.strip().split(' ') for line in lines]
num_objects = len([x[0] for x in content if x[0] != 'DontCare'])
annotations['name'] = np.array([x[0] for x in content])
num_gt = len(annotations['name'])
annotations['truncated'] = np.array([float(x[1]) for x in content])
annotations['occluded'] = np.array([int(x[2]) for x in content])
annotations['alpha'] = np.array([float(x[3]) for x in content])
annotations['bbox'] = np.array([[float(info) for info in x[4:8]]
for x in content]).reshape(-1, 4)
# dimensions will convert hwl format to standard lhw(camera) format.
annotations['dimensions'] = np.array([[float(info) for info in x[8:11]]
for x in content
]).reshape(-1, 3)[:, [2, 0, 1]]
annotations['location'] = np.array([[float(info) for info in x[11:14]]
for x in content]).reshape(-1, 3)
annotations['rotation_y'] = np.array([float(x[14])
for x in content]).reshape(-1)
if len(content) != 0 and len(content[0]) == 16: # have score
annotations['score'] = np.array([float(x[15]) for x in content])
else:
annotations['score'] = np.zeros((annotations['bbox'].shape[0], ))
index = list(range(num_objects)) + [-1] * (num_gt - num_objects)
annotations['index'] = np.array(index, dtype=np.int32)
annotations['group_ids'] = np.arange(num_gt, dtype=np.int32)
return annotations
def _extend_matrix(mat):
mat = np.concatenate([mat, np.array([[0., 0., 0., 1.]])], axis=0)
return mat
def get_kitti_image_info(path,
training=True,
label_info=True,
velodyne=False,
calib=False,
image_ids=7481,
extend_matrix=True,
num_worker=8,
relative_path=True,
with_imageshape=True):
"""
KITTI annotation format version 2:
{
[optional]points: [N, 3+] point cloud
[optional, for kitti]image: {
image_idx: ...
image_path: ...
image_shape: ...
}
point_cloud: {
num_features: 4
velodyne_path: ...
}
[optional, for kitti]calib: {
R0_rect: ...
Tr_velo_to_cam: ...
P2: ...
}
annos: {
location: [num_gt, 3] array
dimensions: [num_gt, 3] array
rotation_y: [num_gt] angle array
name: [num_gt] ground truth name array
[optional]difficulty: kitti difficulty
[optional]group_ids: used for multi-part object
}
}
"""
root_path = Path(path)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
def map_func(idx):
info = {}
pc_info = {'num_features': 4}
calib_info = {}
image_info = {'image_idx': idx}
annotations = None
if velodyne:
pc_info['velodyne_path'] = get_velodyne_path(
idx, path, training, relative_path)
image_info['image_path'] = get_image_path(idx, path, training,
relative_path)
if with_imageshape:
img_path = image_info['image_path']
if relative_path:
img_path = str(root_path / img_path)
image_info['image_shape'] = np.array(
io.imread(img_path).shape[:2], dtype=np.int32)
if label_info:
label_path = get_label_path(idx, path, training, relative_path)
if relative_path:
label_path = str(root_path / label_path)
annotations = get_label_anno(label_path)
info['image'] = image_info
info['point_cloud'] = pc_info
if calib:
calib_path = get_calib_path(
idx, path, training, relative_path=False)
with open(calib_path, 'r') as f:
lines = f.readlines()
P0 = np.array([float(info) for info in lines[0].split(' ')[1:13]
]).reshape([3, 4])
P1 = np.array([float(info) for info in lines[1].split(' ')[1:13]
]).reshape([3, 4])
P2 = np.array([float(info) for info in lines[2].split(' ')[1:13]
]).reshape([3, 4])
P3 = np.array([float(info) for info in lines[3].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
P0 = _extend_matrix(P0)
P1 = _extend_matrix(P1)
P2 = _extend_matrix(P2)
P3 = _extend_matrix(P3)
R0_rect = np.array([
float(info) for info in lines[4].split(' ')[1:10]
]).reshape([3, 3])
if extend_matrix:
rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype)
rect_4x4[3, 3] = 1.
rect_4x4[:3, :3] = R0_rect
else:
rect_4x4 = R0_rect
Tr_velo_to_cam = np.array([
float(info) for info in lines[5].split(' ')[1:13]
]).reshape([3, 4])
Tr_imu_to_velo = np.array([
float(info) for info in lines[6].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam)
Tr_imu_to_velo = _extend_matrix(Tr_imu_to_velo)
calib_info['P0'] = P0
calib_info['P1'] = P1
calib_info['P2'] = P2
calib_info['P3'] = P3
calib_info['R0_rect'] = rect_4x4
calib_info['Tr_velo_to_cam'] = Tr_velo_to_cam
calib_info['Tr_imu_to_velo'] = Tr_imu_to_velo
info['calib'] = calib_info
if annotations is not None:
info['annos'] = annotations
add_difficulty_to_annos(info)
return info
with futures.ThreadPoolExecutor(num_worker) as executor:
image_infos = executor.map(map_func, image_ids)
return list(image_infos)
def get_waymo_image_info(path,
training=True,
label_info=True,
velodyne=False,
calib=False,
pose=False,
image_ids=7481,
extend_matrix=True,
num_worker=1,
relative_path=True,
with_imageshape=True,
max_sweeps=5):
"""
Waymo annotation format version like KITTI:
{
[optional]points: [N, 3+] point cloud
[optional, for kitti]image: {
image_idx: ...
image_path: ...
image_shape: ...
}
point_cloud: {
num_features: 6
velodyne_path: ...
}
[optional, for kitti]calib: {
R0_rect: ...
Tr_velo_to_cam0: ...
P0: ...
}
annos: {
location: [num_gt, 3] array
dimensions: [num_gt, 3] array
rotation_y: [num_gt] angle array
name: [num_gt] ground truth name array
[optional]difficulty: kitti difficulty
[optional]group_ids: used for multi-part object
}
}
"""
root_path = Path(path)
if not isinstance(image_ids, list):
image_ids = list(range(image_ids))
def map_func(idx):
print(f"Sample idx:{idx}")
info = {}
pc_info = {'num_features': 6}
calib_info = {}
image_info = {'image_idx': idx}
annotations = None
if velodyne:
pc_info['velodyne_path'] = get_velodyne_path(
idx, path, training, relative_path, use_prefix_id=True)
points = np.fromfile(
Path(path) / pc_info['velodyne_path'], dtype=np.float32)
points = np.copy(points).reshape(-1, pc_info['num_features'])
info['timestamp'] = np.int64(points[0, -1])
# values of the last dim are all the timestamp
image_info['image_path'] = get_image_path(
idx,
path,
training,
relative_path,
info_type='image_0',
use_prefix_id=True)
if with_imageshape:
image_info['image_shape'] = np.array([1,1], dtype=np.int32)
#img_path = image_info['image_path']
#if relative_path:
# img_path = str(root_path / img_path)
#image_info['image_shape'] = np.array(
# io.imread(img_path).shape[:2], dtype=np.int32)
if label_info:
label_path = get_label_path(
idx,
path,
training,
relative_path,
info_type='label_all',
use_prefix_id=True)
if relative_path:
label_path = str(root_path / label_path)
annotations = get_label_anno(label_path)
info['image'] = image_info
info['point_cloud'] = pc_info
if calib:
calib_path = get_calib_path(
idx, path, training, relative_path=False, use_prefix_id=True)
with open(calib_path, 'r') as f:
lines = f.readlines()
P0 = np.array([float(info) for info in lines[0].split(' ')[1:13]
]).reshape([3, 4])
P1 = np.array([float(info) for info in lines[1].split(' ')[1:13]
]).reshape([3, 4])
P2 = np.array([float(info) for info in lines[2].split(' ')[1:13]
]).reshape([3, 4])
P3 = np.array([float(info) for info in lines[3].split(' ')[1:13]
]).reshape([3, 4])
P4 = np.array([float(info) for info in lines[4].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
P0 = _extend_matrix(P0)
P1 = _extend_matrix(P1)
P2 = _extend_matrix(P2)
P3 = _extend_matrix(P3)
P4 = _extend_matrix(P4)
R0_rect = np.array([
float(info) for info in lines[5].split(' ')[1:10]
]).reshape([3, 3])
if extend_matrix:
rect_4x4 = np.zeros([4, 4], dtype=R0_rect.dtype)
rect_4x4[3, 3] = 1.
rect_4x4[:3, :3] = R0_rect
else:
rect_4x4 = R0_rect
Tr_velo_to_cam = np.array([
float(info) for info in lines[6].split(' ')[1:13]
]).reshape([3, 4])
if extend_matrix:
Tr_velo_to_cam = _extend_matrix(Tr_velo_to_cam)
calib_info['P0'] = P0
calib_info['P1'] = P1
calib_info['P2'] = P2
calib_info['P3'] = P3
calib_info['P4'] = P4
calib_info['R0_rect'] = rect_4x4
calib_info['Tr_velo_to_cam'] = Tr_velo_to_cam
info['calib'] = calib_info
if pose:
pose_path = get_pose_path(
idx, path, training, relative_path=False, use_prefix_id=True)
info['pose'] = np.loadtxt(pose_path)
if annotations is not None:
info['annos'] = annotations
info['annos']['camera_id'] = info['annos'].pop('score')
add_difficulty_to_annos(info)
sweeps = []
prev_idx = idx
while len(sweeps) < max_sweeps:
prev_info = {}
prev_idx -= 1
prev_info['velodyne_path'] = get_velodyne_path(
prev_idx,
path,
training,
relative_path,
exist_check=False,
use_prefix_id=True)
if_prev_exists = osp.exists(
Path(path) / prev_info['velodyne_path'])
if if_prev_exists:
prev_points = np.fromfile(
Path(path) / prev_info['velodyne_path'], dtype=np.float32)
prev_points = np.copy(prev_points).reshape(
-1, pc_info['num_features'])
prev_info['timestamp'] = np.int64(prev_points[0, -1])
prev_pose_path = get_pose_path(
prev_idx,
path,
training,
relative_path=False,
use_prefix_id=True)
prev_info['pose'] = np.loadtxt(prev_pose_path)
sweeps.append(prev_info)
else:
break
info['sweeps'] = sweeps
return info
with futures.ThreadPoolExecutor(num_worker) as executor:
image_infos = executor.map(map_func, image_ids)
return list(image_infos)
def kitti_anno_to_label_file(annos, folder):
folder = Path(folder)
for anno in annos:
image_idx = anno['metadata']['image_idx']
label_lines = []
for j in range(anno['bbox'].shape[0]):
label_dict = {
'name': anno['name'][j],
'alpha': anno['alpha'][j],
'bbox': anno['bbox'][j],
'location': anno['location'][j],
'dimensions': anno['dimensions'][j],
'rotation_y': anno['rotation_y'][j],
'score': anno['score'][j],
}
label_line = kitti_result_line(label_dict)
label_lines.append(label_line)
label_file = folder / f'{get_image_index_str(image_idx)}.txt'
label_str = '\n'.join(label_lines)
with open(label_file, 'w') as f:
f.write(label_str)
def add_difficulty_to_annos(info):
min_height = [40, 25,
25] # minimum height for evaluated groundtruth/detections
max_occlusion = [
0, 1, 2
] # maximum occlusion level of the groundtruth used for evaluation
max_trunc = [
0.15, 0.3, 0.5
] # maximum truncation level of the groundtruth used for evaluation
annos = info['annos']
dims = annos['dimensions'] # lhw format
bbox = annos['bbox']
height = bbox[:, 3] - bbox[:, 1]
occlusion = annos['occluded']
truncation = annos['truncated']
diff = []
easy_mask = np.ones((len(dims), ), dtype=np.bool)
moderate_mask = np.ones((len(dims), ), dtype=np.bool)
hard_mask = np.ones((len(dims), ), dtype=np.bool)
i = 0
for h, o, t in zip(height, occlusion, truncation):
if o > max_occlusion[0] or h <= min_height[0] or t > max_trunc[0]:
easy_mask[i] = False
if o > max_occlusion[1] or h <= min_height[1] or t > max_trunc[1]:
moderate_mask[i] = False
if o > max_occlusion[2] or h <= min_height[2] or t > max_trunc[2]:
hard_mask[i] = False
i += 1
is_easy = easy_mask
is_moderate = np.logical_xor(easy_mask, moderate_mask)
is_hard = np.logical_xor(hard_mask, moderate_mask)
for i in range(len(dims)):
if is_easy[i]:
diff.append(0)
elif is_moderate[i]:
diff.append(1)
elif is_hard[i]:
diff.append(2)
else:
diff.append(-1)
annos['difficulty'] = np.array(diff, np.int32)
return diff
def kitti_result_line(result_dict, precision=4):
prec_float = '{' + ':.{}f'.format(precision) + '}'
res_line = []
all_field_default = OrderedDict([
('name', None),
('truncated', -1),
('occluded', -1),
('alpha', -10),
('bbox', None),
('dimensions', [-1, -1, -1]),
('location', [-1000, -1000, -1000]),
('rotation_y', -10),
('score', 0.0),
])
res_dict = [(key, None) for key, val in all_field_default.items()]
res_dict = OrderedDict(res_dict)
for key, val in result_dict.items():
if all_field_default[key] is None and val is None:
raise ValueError('you must specify a value for {}'.format(key))
res_dict[key] = val
for key, val in res_dict.items():
if key == 'name':
res_line.append(val)
elif key in ['truncated', 'alpha', 'rotation_y', 'score']:
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append(prec_float.format(val))
elif key == 'occluded':
if val is None:
res_line.append(str(all_field_default[key]))
else:
res_line.append('{}'.format(val))
elif key in ['bbox', 'dimensions', 'location']:
if val is None:
res_line += [str(v) for v in all_field_default[key]]
else:
res_line += [prec_float.format(v) for v in val]
else:
raise ValueError('unknown key. supported key:{}'.format(
res_dict.keys()))
return ' '.join(res_line)
|
the-stack_0_17358 | import os
import queue
import sys
from time import time
import sounddevice as sd
import soundfile as sf
from PySide2.QtCore import QObject, Signal
from ..models.ActualProjectModel import ActualProjectModel as actual_project
from . import file as fileutils
class MicWorker(QObject):
update_volume = Signal(object)
finished = Signal(bool)
@staticmethod
def log(msg):
print(f'[MicWorker] {msg}')
def config_mic(self, input_device, buffer=0):
"""[summary]
Args:
input_device ([type]): index of the actual input mic.
fs (int, optional): Sampling frequency 44.1 kHz.
buffer (int, optional): Defaults to 0 => automatic blocksize
"""
self.io = (input_device, sd.default.device[1])
self.fs = int(sd.query_devices()[input_device]['default_samplerate'])
self.buffer = buffer
self.q = queue.Queue()
self._running = False
self._rec = False
def rec(self):
self.log("Start recording!")
self.start_time = time()
self._rec = True
def stop_rec(self):
self.log("Stopping rec...")
self._rec = False
self._running = False
def stop(self):
self.error = True
self._running = False # to raise the Exception
def run(self):
self.error = False
self.log("Running!")
self._running = True
self.stream = sd.Stream(device=self.io,
channels=2,
samplerate=self.fs,
blocksize=self.buffer,
callback=self.callback)
path = os.path.join(actual_project.project_location, 'Audio Files')
fileutils.mkdir(path)
self.file_stream = sf.SoundFile(os.path.join(path, 'audio.wav'),
mode='w',
samplerate=self.fs,
channels=2)
try:
with self.file_stream as file:
with self.stream:
while self._running:
if self._rec:
file.write(self.q.get())
else:
self.q.get()
if not self._running:
raise KeyboardInterrupt("Recording stopped!")
except KeyboardInterrupt as e:
self.log(e)
except Exception as e:
self.log("Unexpected Exception:", e)
if not self.error:
elapsed = time()-self.start_time
print(f' -> Time spent recording: {round(elapsed,2)}s')
print(f' -> fs = {self.fs}')
print(
f' -> Theoretical num of samples => {round(elapsed*self.fs)}')
self.finished.emit(self.error)
def callback(self, indata, outdata, frames, time, status):
"""This is called (from a separate thread) for each audio block."""
if status:
print(status, file=sys.stderr)
# outdata[:] = indata
# self.update_volume.emit(indata.copy())
# print(indata)
self.q.put(indata.copy())
|
the-stack_0_17359 | import cv2
# Chapter 1 - Reading in Images, Videos, and WebCam
if __name__ == '__main__':
"""
print("Package imported")
# reading in an image file
img = cv2.imread('static/mugshot.png')
# showing an image
cv2.imshow("Output", img)
# delaying the amount of time the image stays
cv2.waitKey(7000)
"""
"""
# reading in a video file
cap = cv2.VideoCapture('static/forest.mov')
# display the video
while True:
# read in one image frame from thr video at a time
sucess, img = cap.read()
cv2.imshow("Video", img)
# event-driven loop early exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
"""
# use the webcam for video
cam = cv2.VideoCapture(0)
# set the dimensions of the video to take
cam.set(3, 640) # 3 is for the width
cam.set(4, 480) # 4 is for the height
cam.set(10, 100) # 10 is for brightness settings
# display the video from the camera - same as before
while True:
# read in one image frame from thr video at a time
sucess, img = cam.read()
cv2.imshow("Video", img)
# event-driven loop early exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.