filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_16567 | #!/usr/bin/env python3.6
"""
TODO:
- docs/source links
- how to find the source of a builtin module?
- header
- note that this is run on a Posix machine
- sys.platform ?
- footer
- include propsed additions to pathlib
- P for proposed?
"""
import collections
import functools
import inspect
import itertools
import os as _os
import os.path as _ospath
import re
import shutil as _shutil
import pathlib as _pathlib
import textwrap
import path as _path
#import trio as _trio
meta = {}
meta['os'] = {
'source': 'https://github.com/python/cpython/tree/3.6/Lib/os.py',
'docs': 'https://docs.python.org/3/library/os.html',
'docsbaseurl': 'https://docs.python.org/3/library/os.html#os.',
}
meta['os.path'] = {
'source': [
'https://github.com/python/cpython/blob/3.6/Lib/posixpath.py',
'https://github.com/python/cpython/tree/3.6/Lib/ntpath.py',
'https://github.com/python/cpython/tree/3.6/Lib/macpath.py'],
'docs': 'https://docs.python.org/3/library/os.path.html',
'docsbaseurl': 'https://docs.python.org/3/library/os.path.html#os.path.',
}
meta['shutil'] = {
'source': 'https://github.com/python/cpython/tree/3.6/Lib/shutil.py',
'docs': 'https://docs.python.org/3/library/shutil.html',
'docsbaseurl': 'https://docs.python.org/3/library/shutil.html#shutil.',
}
meta['pathlib'] = {
'source': 'https://github.com/python/cpython/blob/3.6/Lib/pathlib.py',
'docs': 'https://docs.python.org/3/library/pathlib.html',
'docsbaseurl': 'https://docs.python.org/3/library/pathlib.html#pathlib.Path.', # pathlib.PurePath.
}
meta['pathpy'] = {
'source': [
'https://github.com/jaraco/path.py/blob/master/path.py',
],
'src': 'https://github.com/jaraco/path.py',
'docs': 'https://pathpy.readthedocs.io/en/latest/',
'docsbaseurl': 'https://pathpy.readthedocs.io/en/latest/api.html#path.Path.',
}
meta['trio'] = {
'source': 'https://github.com/python-trio/trio/blob/master/trio/_path.py',
'docs': 'https://trio.readthedocs.io/en/latest/reference-io.html#trio.Path',
'src': 'https://github.com/python-trio/trio',
'docsbaseurl': 'https://trio.readthedocs.io/en/latest/reference-io.html#trio.Path.'
}
def maybe_list(obj):
if isinstance(obj, (tuple, list)):
return obj
return (obj,)
def print_header__modules():
print('Modules')
print('+++++++++')
for key, data in meta.items():
print('- %s' % key)
print('')
for url in maybe_list(data.get('src', [])):
print(' - Src: %s' % url)
for url in maybe_list(data['source']):
print(' - Source: %s' % url)
for url in maybe_list(data['docs']):
print(' - Docs: %s' % url)
print('')
mappings = {}
mappings['os'] = {
'unlink': {
'pathpy': 'remove',
'os': 'remove',
},
'lstat': {
'os': 'stat',
'pathlib': 'stat',
}
}
mappings['pathpy'] = {
# 'getcwd': {
# 'os': ['getcwdu', 'getcwdb'],
# },
'__div__': {
'os.path': 'join',
'pathlib': 'joinpath',
'pathpy': ['__rdiv__', 'joinpath'],
},
'__rdiv__': {
'os.path': 'join',
'pathlib': 'joinpath',
'pathpy': ['__div__', 'joinpath'],
},
'cd': {
'os': 'chdir',
},
'getsize': {
'pathpy': 'size'
},
'lines': {
'pathpy': 'text',
},
'name': {
'os.path': 'basename',
'pathpy': 'basename',
},
'parent': {
'os.path': 'dirname',
'pathpy': 'dirname',
},
'read_md5': {
'pathpy': 'read_hash',
},
'readlink': {
'pathpy': 'readlinkabs',
},
'readlinkabs': {
'os': 'readlink',
},
'splitpath': {
'pathpy': ['parent', 'name'],
'os.path': 'split',
},
'stat': {
'pathpy': 'lstat',
},
'size': {
'os.path': 'getsize',
},
'unlink': {
'pathpy': 'remove',
'os': 'remove',
},
'unlink_p': {
'pathpy': 'remove_p',
}
}
mappings['pathlib'] = {
'atime': {
'pathpy': 'getatime',
'os.path': 'getatime',
},
'ctime': {
'pathpy': 'getctime',
'os.path': 'getctime',
},
'mtime': {
'pathpy': 'getmtime',
'os.path': 'getmtime',
},
'cwd': {
'pathpy': 'getcwd',
'os': 'getcwd',
},
'name': {
'os.path': 'basename',
'pathpy': 'basename',
},
'owner': {
'pathpy': 'get_owner',
},
'parent': {
'os.path': 'dirname',
'pathpy': ['parent', 'dirname'],
},
'stat': {
'pathlib': 'lstat',
},
'is_absolute': {
'pathlib': 'absolute',
'pathpy': 'isabs',
'os.path': 'isabs',
},
'is_file': {
'pathpy': 'isfile',
'os.path': 'isfile',
},
'is_dir': {
'pathpy': 'isdir',
'os.path': 'isdir',
},
'is_symlink': {
'pathpy': 'islink',
'os.path': 'islink',
},
'joinpath': {
'os.path': 'join'
},
'iterdir': {
'pathpy': 'listdir',
},
# TODO
}
def build_seealso(mappings=mappings):
"""
Kwargs:
mappings (dict): ``{'pathlib': {'is_abs': {'pathpy': 'isabs'}}}``
Returns:
dict[attrname] = {destattr: [modnames]}
"""
seealso = {}
for mapsetname, mappingset in mappings.items():
for attrname, mappings in mappingset.items():
for modname, destattrs in mappings.items():
for destattr in maybe_list(destattrs):
seealso.setdefault(attrname, {}).setdefault(destattr, {}).setdefault(modname, True)
seealso.setdefault(destattr, {}).setdefault(attrname, {}).setdefault(mapsetname, True)
return seealso
_Thing = collections.namedtuple('Thing',
('name', 'signature', 'docstring', 'source', 'iscallable', 'attr', 'obj'))
class Thing(_Thing):
pass
def get_signatures(obj, additional_attrs=None):
attrs = sorted(x for x in dir(obj) if not x.startswith('_'))
if additional_attrs:
attrs = sorted(attrs + additional_attrs)
for attrname in attrs:
try:
attr = getattr(obj, attrname)
except AttributeError:
continue
if inspect.isbuiltin(attr):
iscallable = True
try:
signature = inspect.signature(attr)
except ValueError:
signature = attr.__class__ #TODO
docstring = inspect.getdoc(attr)
# source = inspect.getsource(attr)
source = None
elif (inspect.isfunction(attr) or inspect.ismethod(attr)):
iscallable = True
signature = inspect.signature(attr)
docstring = inspect.getdoc(attr)
source = inspect.getsource(attr)
elif isinstance(attr, functools.partial):
iscallable = True
signature = inspect.signature(attr)
docstring = inspect.getdoc(attr)
source = None # inspect.getsource(attr)
else:
iscallable = False
signature = ''
docstring = inspect.getdoc(attr) # TODO
source = None
yield attrname, Thing(
name=attrname,
signature=signature,
docstring=docstring,
source=source,
iscallable=iscallable,
attr=attr,
obj=obj)
def build_methods():
methods = {}
methods['os'] = dict(get_signatures(_os))
methods['os.path'] = dict(get_signatures(_ospath))
methods['shutil'] = dict(get_signatures(_shutil))
methods['pathlib'] = dict(get_signatures(_pathlib.Path))
methods['pathpy'] = dict(get_signatures(_path.Path,
['__div__', '__rdiv__']))
# methods['trio'] = dict(get_signatures(_trio.Path))
return methods
def build_sets(methods):
sets = {}
sets['union'] = (
set(methods['pathlib'])
.union(methods['pathpy'])
#.union(methods['os'])
.union(methods['os.path'])
#.union(methods['shutil'])
)
sets['union'].difference_update((
'sys',
'supports_unicode_filenames',
'genericpath',
#'sameopenfile',
#'samestat',
#'extsep',
#'pathsep',
))
sets['union'] = sorted(sets['union'])
sets['pathlib_and_pathpy'] = sorted(
set(methods['pathlib']).intersection(methods['pathpy']))
sets['pathlib_not_pathpy'] = sorted(
set(methods['pathlib']).difference(methods['pathpy']))
sets['pathpy_not_pathlib'] = sorted(
set(methods['pathpy']).difference(methods['pathlib']))
# sets['pathlib_not_trio'] = sorted(
# set(methods['pathlib']).difference(methods['trio']))
return sets
methods = build_methods()
sets = build_sets(methods=methods)
def print_report_header():
print('')
print('==================================')
print('Python file methods and attributes')
print('==================================')
print('')
print('- Objective: Identify and compare Python file '
'functions/methods and attributes from '
'os, os.path, shutil, pathlib, path.py, and trio')
print('- Source: https://github.com/westurner/pyfilemods')
print('- Docs: https://westurner.github.io/pyfilemods/')
print('')
print('Contents')
print('++++++++')
print('.. contents::')
print('')
print_header__modules()
print_report_header()
def print_table(sets=sets, methods=methods):
hdr = '================== == ======= ====== ======= ======='
print(hdr)
print('attr os os.path shutil pathlib path.py')
print(hdr)
for attr in sets['union']:
print('%-18s %-2s %-7s %-6s %-8s %-7s' % (
'`%s`_' % attr,
'X' if attr in methods['os'] else ' ',
'X' if attr in methods['os.path'] else ' ',
'X' if attr in methods['shutil'] else ' ',
'X' if attr in methods['pathlib'] else ' ',
'X' if attr in methods['pathpy'] else ' ',
#'X' if attr in methods['trio'] else ' '
))
print(hdr)
print('')
print('Sets')
print('++++')
print('')
print('attr table')
print('==========')
print('')
print_table(sets=sets, methods=methods)
def print_thing(varname, sets=sets):
print(varname)
print('='*len(varname))
_var = sets[varname]
for x in _var:
print('- `%s`_' % x)
setnames = [
'pathlib_and_pathpy',
'pathlib_not_pathpy',
'pathpy_not_pathlib',
#'pathlib_not_trio',
]
for x in setnames:
print_thing(x, sets=sets)
print('')
def indent(text, n, char=' '):
if not text:
return text
return textwrap.indent(text, char*n)
print('')
print('attrs')
print('+++++')
print('')
def print_code(obj, attr):
_attr = getattr(obj, attr)
if obj and _attr:
print('')
print('.. code:: python')
#print(' :class: highlight')
print('')
print(indent(_attr, 4))
print('')
def fmtsignature(obj):
if obj is None:
return '``None``'
if obj.iscallable:
if not obj.signature:
return ' '
return '``%s``' % (re.sub(
r'<function (.*?) at 0x[\da-f]+>',
r'<function \1 at 0x...>',
str(obj.signature), 1))
else:
return '*attribute*'
modnames = ['os', 'os.path', 'shutil', 'pathlib', 'pathpy'] # , 'trio']
def print_attr_methods(sets=sets, methods=methods, modnames=modnames):
seealso = build_seealso(mappings=mappings)
for method in sets['union']:
methodstr = '``{}``'.format(method)
print(methodstr)
print('=' * (len(methodstr)+1))
attrs = {}
for modname in modnames:
attrs[modname] = methods[modname].get(method)
for name in modnames:
obj = attrs[name]
if obj and obj.signature:
print('| **%s.%s**\ %s' % (name, method, fmtsignature(obj)))
print('')
_seealso = seealso.get(method, {})
if _seealso:
seealsostrs = {m: list() for m in modnames}
for methodname, mods in _seealso.items():
for mod in mods:
seealsostrs[mod].append(
'`%s <#%s>`_' % (
'%s.%s' % (mod, methodname),
methodname.replace('_', '-').strip('-')))
print('| seealso: %s' % ', '.join(
itertools.chain.from_iterable(
(sorted(v) for v in seealsostrs.values()))))
print(' ')
for modname in modnames:
metadata = meta[modname]
obj = attrs[modname]
if obj:
print('| **%s.%s**%s:' % (
modname, method,
('\ %s' % fmtsignature(obj) if obj and obj.signature
else '')))
source_links = [
'`source (%s) <%s>`__' % (_ospath.basename(l), l)
for l in maybe_list(metadata['source'])]
print('| `docs <%s%s>`__ %s' % (
metadata['docsbaseurl'],
method,
' '.join(source_links)))
if obj.source:
print_code(obj, 'source')
else:
print_code(obj, 'docstring')
print('')
print_attr_methods(sets=sets, methods=methods)
if __name__ == '__main__':
import sys
if '-i' in sys.argv:
import ipdb
ipdb.set_trace()
|
the-stack_0_16568 | # qubit number=5
# total number=44
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
prog.cx(input_qubit[3],input_qubit[0]) # number=32
prog.z(input_qubit[3]) # number=33
prog.cx(input_qubit[3],input_qubit[0]) # number=34
prog.rx(0.11938052083641225,input_qubit[1]) # number=36
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.rx(1.4765485471872026,input_qubit[2]) # number=35
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=41
prog.x(input_qubit[0]) # number=42
prog.cx(input_qubit[1],input_qubit[0]) # number=43
prog.x(input_qubit[4]) # number=30
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.rx(0.45238934211692994,input_qubit[3]) # number=38
prog.y(input_qubit[1]) # number=39
prog.rx(-2.5258404934861938,input_qubit[1]) # number=25
prog.h(input_qubit[3]) # number=29
prog.cx(input_qubit[0],input_qubit[3]) # number=22
prog.x(input_qubit[3]) # number=23
prog.cx(input_qubit[0],input_qubit[3]) # number=24
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.rx(-0.0722566310325653,input_qubit[4]) # number=37
prog.x(input_qubit[1]) # number=14
prog.cx(input_qubit[0],input_qubit[2]) # number=26
prog.x(input_qubit[2]) # number=27
prog.h(input_qubit[4]) # number=40
prog.cx(input_qubit[0],input_qubit[2]) # number=28
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC1353.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
the-stack_0_16570 | import torch
import torch.nn as nn
import torch.nn.functional as F
# Based on
# https://github.com/tensorflow/models/blob/master/research/struct2depth/model.py#L625-L641
class DepthSmoothnessLoss(nn.Module):
r"""Criterion that computes image-aware depth smoothness loss.
.. math::
\text{loss} = \left | \partial_x d_{ij} \right | e^{-\left \|
\partial_x I_{ij} \right \|} + \left |
\partial_y d_{ij} \right | e^{-\left \| \partial_y I_{ij} \right \|}
Shape:
- Depth: :math:`(N, 1, H, W)`
- Image: :math:`(N, 3, H, W)`
- Output: scalar
Examples::
>>> depth = torch.rand(1, 1, 4, 5)
>>> image = torch.rand(1, 3, 4, 5)
>>> smooth = tgm.losses.DepthSmoothnessLoss()
>>> loss = smooth(depth, image)
"""
def __init__(self) -> None:
super(DepthSmoothnessLoss, self).__init__()
@staticmethod
def gradient_x(img: torch.Tensor) -> torch.Tensor:
assert len(img.shape) == 4, img.shape
return img[:, :, :, :-1] - img[:, :, :, 1:]
@staticmethod
def gradient_y(img: torch.Tensor) -> torch.Tensor:
assert len(img.shape) == 4, img.shape
return img[:, :, :-1, :] - img[:, :, 1:, :]
def forward(self, depth: torch.Tensor, image: torch.Tensor) -> torch.Tensor:
if not torch.is_tensor(depth):
raise TypeError("Input depth type is not a torch.Tensor. Got {}"
.format(type(depth)))
if not torch.is_tensor(image):
raise TypeError("Input image type is not a torch.Tensor. Got {}"
.format(type(image)))
if not len(depth.shape) == 4:
raise ValueError("Invalid depth shape, we expect BxCxHxW. Got: {}"
.format(depth.shape))
if not len(image.shape) == 4:
raise ValueError("Invalid image shape, we expect BxCxHxW. Got: {}"
.format(image.shape))
if not depth.shape[-2:] == image.shape[-2:]:
raise ValueError("depth and image shapes must be the same. Got: {}"
.format(depth.shape, image.shape))
if not depth.device == image.device:
raise ValueError(
"depth and image must be in the same device. Got: {}" .format(
depth.device, image.device))
if not depth.dtype == image.dtype:
raise ValueError(
"depth and image must be in the same dtype. Got: {}" .format(
depth.dtype, image.dtype))
# compute the gradients
depth_dx: torch.Tensor = self.gradient_x(depth)
depth_dy: torch.Tensor = self.gradient_y(depth)
image_dx: torch.Tensor = self.gradient_x(image)
image_dy: torch.Tensor = self.gradient_y(image)
# compute image weights
weights_x: torch.Tensor = torch.exp(
-torch.mean(torch.abs(image_dx), dim=1, keepdim=True))
weights_y: torch.Tensor = torch.exp(
-torch.mean(torch.abs(image_dy), dim=1, keepdim=True))
# apply image weights to depth
smoothness_x: torch.Tensor = torch.abs(depth_dx * weights_x)
smoothness_y: torch.Tensor = torch.abs(depth_dy * weights_y)
return torch.mean(smoothness_x) + torch.mean(smoothness_y)
######################
# functional interface
######################
def depth_smoothness_loss(
depth: torch.Tensor,
image: torch.Tensor) -> torch.Tensor:
r"""Computes image-aware depth smoothness loss.
See :class:`~torchgeometry.losses.DepthSmoothnessLoss` for details.
"""
return DepthSmoothnessLoss()(depth, image)
|
the-stack_0_16571 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 07 13:30:00 2020
@author: Alan J.X. Guo
"""
import argparse
import scipy.io as sio
import numpy as np
import random
import os
# os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"]="3"
import sys
sys.path.append('./VCA')
from VCA import vca
import tensorflow as tf
from tensorflow import keras
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Input, Dense, Softmax, Conv1D, Flatten, Add, MaxPooling1D
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau, EarlyStopping
class En_De(keras.layers.Layer):
def __init__(self, endmembers_init, **kwargs):
self.emb_init = np.copy(endmembers_init)
self.channels = self.emb_init.shape[-1]
super(En_De, self).__init__(**kwargs)
def build(self, input_shape):
self.emb_wt = self.add_weight(name='emb_wt',
shape=self.emb_init.shape,
initializer=tf.constant_initializer(self.emb_init),
trainable=True)
super(En_De, self).build(input_shape) # Be sure to call this at the end
def call(self, inputs):
return [K.dot(inputs,self.emb_wt),tf.einsum('ij,jk->ijk',inputs,self.emb_wt)]
def compute_output_shape(self, input_shape):
return [(input_shape[0], self.channels),(input_shape[0],input_shape[1],self.channels)]
parser = argparse.ArgumentParser()
parser.add_argument('-p','--path',
help='Path of HSI datamat')
parser.add_argument('-k','--key',
default=None,
help='key of the HSI tensor in the matlab datamat, valid when using *.mat file')
args = parser.parse_args()
if os.path.splitext(args.path)[-1] == '.npy':
print('load {0}.'.format(args.path))
data_mat = np.load(args.path)
elif os.path.splitext(args.path)[-1] == '.mat':
print('load {0} from {1}.'.format(args.key,args.path))
data_mat = sio.loadmat(args.path)
assert args.key in data_mat
data_mat = data_mat[args.key]
def abs_softmax(x):
return tf.math.abs(x)/tf.math.reduce_sum(tf.math.abs(x),
axis=-1,
keepdims=True)
R = 16
CHANNELS = data_mat.shape[-1]
LAMBDA = 0.5
EPOCHS = 200
BATCH_SIZE = 256
vca_x = (data_mat.reshape(-1,CHANNELS).T-np.min(data_mat))/np.max(data_mat)
endmembers, no, reconstruct = vca(vca_x,R)
inputs = Input(shape=(CHANNELS,1))
e1 = Conv1D(512,3,data_format='channels_last',use_bias=True,activation='relu')(inputs)
e2 = Conv1D(128,3,data_format='channels_last',use_bias=True,activation='relu')(e1)
e2 = Flatten()(e2)
e3 = Dense(R,activation=abs_softmax)(e2)
ende = En_De(endmembers.T)
de, de_spand = ende(e3)
d1 = Conv1D(256,1,data_format='channels_first',use_bias=True, activation='relu')(de_spand)
d2 = Conv1D(256,1,data_format='channels_first',use_bias=True, activation='relu')(d1)
d3 = Conv1D(16,1,data_format='channels_first',use_bias=True, activation='relu')(d2)
d4 = Conv1D(16,1,data_format='channels_first',use_bias=True, activation='relu')(d3)
d5 = Conv1D(1,1,data_format='channels_first',use_bias=True, activation='linear')(d4)
d5 = Flatten()(d5)
output = Add()([d5*(1-LAMBDA),de*LAMBDA])
autoencoder = keras.models.Model(inputs=inputs, outputs=output)
ae_x = np.copy(vca_x.T)
np.random.shuffle(ae_x)
ae_x = ae_x[:,:,np.newaxis]
optimizer = keras.optimizers.Adam(lr=0.001)
ende.trainable = True
autoencoder.compile(optimizer, loss='mean_squared_error')
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
cooldown=0,
patience=30,
monitor='loss',
min_delta=1e-8,
min_lr=1e-6,verbose=True)
earlystopping = EarlyStopping(monitor='loss', min_delta=1e-8, patience=50,
verbose=1, mode='auto', baseline=None,
restore_best_weights=True)
callbacks = [lr_reducer, earlystopping]
history = autoencoder.fit(x=[ae_x],y=[ae_x],batch_size=BATCH_SIZE, epochs=EPOCHS, verbose=1, callbacks=callbacks,
shuffle=True)
re = autoencoder.predict(ae_x,batch_size=1024)
diff = re - ae_x.reshape(re.shape)
print("reconstruction error: {0}".format(np.mean(np.mean(np.square(diff),axis=1))))
encoder = keras.models.Model(inputs=inputs, outputs=e3)
abundance = encoder.predict(x=[ae_x],batch_size=1024)
shape = list(data_mat.shape)
shape[-1] = R
abundance = abundance.reshape(shape)
save_path = os.path.splitext(args.path)[0] + '_abundance.npy'
np.save(save_path,abundance)
print('abundance saved to {0}.'.format(save_path))
|
the-stack_0_16572 | #!/usr/bin/env python
from django.conf.urls import patterns, url
urlpatterns = patterns(
'pyhn.apps.account.views',
url(r'^$', 'index', name='index'),
url(r'^login/$', 'login', name='login'),
)
urlpatterns += patterns(
'django.contrib.auth.views',
url(r'^logout/$', 'logout', {'next_page': '/'}, 'logout'),
)
|
the-stack_0_16575 | from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
import sys
import stylesheet
import yaml
from random import shuffle
import meal
from meal import Meal
from functools import partial
import logging
from imp import reload
reload(meal)
class Window(QDialog):
days = ['Sunday',
'Monday',
'Tuesday',
'Wednesday',
'Thursday',
'Friday',
'Saturday']
path = 'meals2.yaml'
def __init__(self):
super(Window, self).__init__()
self.setWindowFlags(Qt.WindowMinimizeButtonHint | Qt.WindowMinMaxButtonsHint)
self.setStyleSheet(stylesheet.main())
self.createUI()
self.createMeals()
self.resize(1200, 600)
def createUI(self):
self.mealUI = {}
self.meals = []
mainLayout = QVBoxLayout()
daysLayout = QHBoxLayout()
self.create = QPushButton('Create Meal Plan')
self.create.clicked.connect(self.createMenu)
for day in self.days:
label = QLabel(day)
self.mealUI[day] = QTextEdit()
refresh = QPushButton('Refresh')
healthier = QPushButton('Healthier')
healthier.clicked.connect(partial(self.healthier, day))
easier = QPushButton('Easier')
easier.clicked.connect(partial(self.easier, day))
layout = QVBoxLayout()
layout.addWidget(label)
layout.addWidget(self.mealUI[day])
layout.addWidget(refresh)
layout.addWidget(healthier)
layout.addWidget(easier)
daysLayout.addLayout(layout)
mainLayout.addWidget(self.create)
mainLayout.addLayout(daysLayout)
self.setLayout(mainLayout)
def createMeals(self):
with open(self.path, 'r') as f:
data = yaml.safe_load(f)
for item in data:
meal = Meal(item['name'])
meal.set_protein(item['protein'])
meal.set_health(item['health'])
meal.set_frequency(item['frequency'])
meal.set_difficulty(item['difficulty'])
self.meals.append(meal)
def createMenu(self):
# Generate list of meals
masterList = []
for meal in self.meals:
# Assign Multiples by Frequency
for i in range(meal.get_frequency()):
masterList.append(meal)
# Randomize List
shuffle(masterList)
# Get Fisrt 7 Meals
weeklyMeals = []
i = 0
fish = False
for meal in masterList:
if i == 7:
break
if meal not in weeklyMeals:
# Only 1 Fish a Week
if meal.get_protein() == 'Fish':
if not fish:
fish = True
weeklyMeals.append(meal)
i += 1
else:
weeklyMeals.append(meal)
i += 1
# Add to UI
i=0
for day in self.days:
self.mealUI[day].setText(weeklyMeals[i].get_name())
i+=1
def refresh(self, day):
pass
def healthier(self, day):
origMeals = {}
for d in self.days:
origMeals[d] = [m for m in self.meals if m.get_name() == self.mealUI[d].toPlainText()][0]
origMeal = origMeals[day]
# Generate list of meals
masterList = []
for meal in self.meals:
# Assign Multiples by Frequency
for i in range(meal.get_frequency()):
masterList.append(meal)
# Randomize List
shuffle(masterList)
masterList = [m for m in masterList if m.getHealth() > origMeal.get_health() and m not in origMeals]
if not masterList:
logging.error('No Healthier Meal Found')
return
self.mealUI[day].clear()
self.mealUI[day].setText(masterList[0].get_name())
def easier(self, day):
origMeals = {}
for d in self.days:
origMeals[d] = [m for m in self.meals if m.get_name() == self.mealUI[d].toPlainText()][0]
origMeal = origMeals[day]
# Generate list of meals
masterList = []
for meal in self.meals:
# Assign Multiples by Frequency
for i in range(meal.get_frequency()):
masterList.append(meal)
# Randomize List
shuffle(masterList)
masterList = [m for m in masterList if m.getDifficulty() < origMeal.getDifficulty() and m not in origMeals]
if not masterList:
logging.error('No Easier Meal Found')
return
self.mealUI[day].clear()
self.mealUI[day].setText(masterList[0].get_name())
if __name__ == '__main__':
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
|
the-stack_0_16576 | __author__ = 'bmiller'
'''
This is the start of something that behaves like
the unittest module from cpython.
'''
import re
class _AssertRaisesContext(object):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __init__(self, expected, test_case):
self.test_case = test_case
self.expected = expected
self.exception = None
def _is_subtype(self, expected, basetype):
if isinstance(expected, tuple):
return all(self._is_subtype(e, basetype) for e in expected)
return isinstance(expected, type) and issubclass(expected, basetype)
def handle(self, args, kwargs):
"""
If args is empty, assertRaises is being used as a
context manager, so return self.
If args is not empty, call a callable passing positional and keyword
arguments.
"""
try:
if not self._is_subtype(self.expected, BaseException):
raise TypeError('assertRaises() arg 1 must be an exception type or tuple of exception types')
if not args:
return self
callable_obj = args[0]
args = args[1:]
with self:
callable_obj(*args, **kwargs)
finally:
# bpo-23890: manually break a reference cycle
self = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
res = True
feedback = ""
self.exception = exc_value
try:
act_exc = exc_type.__name__
except AttributeError:
act_exc = str(exc_type)
try:
exp_exc = self.expected.__name__
except AttributeError:
exp_exc = str(self.expected)
if exc_type is None:
res = False
feedback = "{} not raised".format(exp_exc)
elif not issubclass(exc_type, self.expected):
res = False
feedback = "Expected {} but got {}".format(exp_exc, act_exc)
self.test_case.appendResult(res, act_exc, exp_exc, feedback)
return True
class TestCase(object):
def __init__(self):
self.numPassed = 0
self.numFailed = 0
self.assertPassed = 0
self.assertFailed = 0
self.verbosity = 1
self.tlist = []
testNames = {}
for name in dir(self):
if name[:4] == 'test' and name not in testNames:
self.tlist.append(getattr(self,name))
testNames[name]=True
def setUp(self):
pass
def tearDown(self):
pass
def cleanName(self,funcName):
return funcName.__func__.__name__
def main(self):
for func in self.tlist:
if self.verbosity > 1:
print('Running %s' % self.cleanName(func))
try:
self.setUp()
self.assertPassed = 0
self.assertFailed = 0
func()
self.tearDown()
if self.assertFailed == 0:
self.numPassed += 1
else:
self.numFailed += 1
print('Tests failed in %s ' % self.cleanName(func))
except Exception as e:
self.assertFailed += 1
self.numFailed += 1
print('Test threw exception in %s (%s)' % (self.cleanName(func), e))
self.showSummary()
def assertEqual(self, actual, expected, feedback=""):
res = actual==expected
if not res and feedback == "":
feedback = "Expected %s to equal %s" % (str(actual),str(expected))
self.appendResult(res, actual ,expected, feedback)
def assertNotEqual(self, actual, expected, feedback=""):
res = actual != expected
if not res and feedback == "":
feedback = "Expected %s to not equal %s" % (str(actual),str(expected))
self.appendResult(res, actual, expected, feedback)
def assertTrue(self,x, feedback=""):
res = bool(x) is True
if not res and feedback == "":
feedback = "Expected %s to be True" % (str(x))
self.appendResult(res, x, True, feedback)
def assertFalse(self,x, feedback=""):
res = not bool(x)
if not res and feedback == "":
feedback = "Expected %s to be False" % (str(x))
self.appendResult(res, x, False, feedback)
def assertIs(self,a,b, feedback=""):
res = a is b
if not res and feedback == "":
feedback = "Expected %s to be the same object as %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertIsNot(self,a,b, feedback=""):
res = a is not b
if not res and feedback == "":
feedback = "Expected %s to not be the same object as %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertIsNone(self,x, feedback=""):
res = x is None
if not res and feedback == "":
feedback = "Expected %s to be None" % (str(x))
self.appendResult(res, x, None, feedback)
def assertIsNotNone(self,x, feedback=""):
res = x is not None
if not res and feedback == "":
feedback = "Expected %s to not be None" % (str(x))
self.appendResult(res, x, None, feedback)
def assertIn(self, a, b, feedback=""):
res = a in b
if not res and feedback == "":
feedback = "Expected %s to be in %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertNotIn(self, a, b, feedback=""):
res = a not in b
if not res and feedback == "":
feedback = "Expected %s to not be in %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertIsInstance(self,a,b, feedback=""):
res = isinstance(a,b)
if not res and feedback == "":
feedback = "Expected %s to be an instance of %s" % (str(a), str(b))
self.appendResult(res, a, b, feedback)
def assertNotIsInstance(self,a,b, feedback=""):
res = not isinstance(a,b)
if not res and feedback == "":
feedback = "Expected %s to not be an instance of %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertRegex(self, text, expected_regex, feedback=""):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regex, (str, )): #bytes
assert expected_regex, "expected_regex must not be empty."
expected_regex = re.compile(expected_regex)
if not expected_regex.search(text):
res = False
feedback = "Regex didn't match: %r not found in %r" % (
repr(expected_regex), text)
else:
res = True
self.appendResult(res, text, expected_regex, feedback)
def assertNotRegex(self, text, unexpected_regex, feedback=""):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regex, (str, )): # bytes
unexpected_regex = re.compile(unexpected_regex)
match = unexpected_regex.search(text)
if match:
feedback = 'Regex matched: %r matches %r in %r' % (
text[match.start() : match.end()],
repr(unexpected_regex),
text)
# _formatMessage ensures the longMessage option is respected
self.appendResult(not bool(match), text, unexpected_regex, feedback)
def assertAlmostEqual(self, a, b, places=7, feedback="", delta=None):
if delta is not None:
res = abs(a-b) <= delta
else:
if places is None:
places = 7
res = round(a-b, places) == 0
if not res and feedback == "":
feedback = "Expected %s to equal %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertNotAlmostEqual(self, a, b, places=7, feedback="", delta=None):
if delta is not None:
res = not (a == b) and abs(a - b) > delta
else:
if places is None:
places = 7
res = round(a-b, places) != 0
if not res and feedback == "":
feedback = "Expected %s to not equal %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertGreater(self,a,b, feedback=""):
res = a > b
if not res and feedback == "":
feedback = "Expected %s to be greater than %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertGreaterEqual(self,a,b, feedback=""):
res = a >= b
if not res and feedback == "":
feedback = "Expected %s to be >= %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertLess(self, a, b, feedback=""):
res = a < b
if not res and feedback == "":
feedback = "Expected %s to be less than %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertLessEqual(self,a,b, feedback=""):
res = a <= b
if not res and feedback == "":
feedback = "Expected %s to be <= %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def appendResult(self,res,actual,expected,feedback):
if res:
msg = 'Pass'
self.assertPassed += 1
else:
msg = 'Fail: ' + feedback
print(msg)
self.assertFailed += 1
def assertRaises(self, expected_exception, *args, **kwargs):
context = _AssertRaisesContext(expected_exception, self)
try:
return context.handle(args, kwargs)
finally:
# bpo-23890: manually break a reference cycle
context = None
def fail(self, msg=None):
if msg is None:
msg = 'Fail'
else:
msg = 'Fail: ' + msg
print(msg)
self.assertFailed += 1
def showSummary(self):
pct = self.numPassed / (self.numPassed+self.numFailed) * 100
print("Ran %d tests, passed: %d failed: %d\n" % (self.numPassed+self.numFailed,
self.numPassed, self.numFailed))
def main(verbosity=1):
glob = globals() # globals() still needs work
for name in glob:
if type(glob[name]) == type and issubclass(glob[name], TestCase):
try:
tc = glob[name]()
tc.verbosity = verbosity
tc.main()
except:
print("Uncaught Error in: ", name)
|
the-stack_0_16578 | #!/usr/bin/env python
"""give me some AFOS data please"""
from __future__ import print_function
import cgi
import unittest
from pyiem.util import get_dbconn, ssw
def pil_logic(s):
"""Convert the CGI pil value into something we can query
Args:
s (str): The CGI variable wanted
Returns:
list of PILs to send to the databae"""
if s == '':
return []
s = s.upper()
pils = []
if s.find(",") == -1:
pils.append(s)
else:
pils = s.split(",")
res = []
for pil in pils:
if pil[:3] == "WAR":
for q in ['FLS', 'FFS', 'AWW', 'TOR', 'SVR', 'FFW', 'SVS',
'LSR', 'SPS', 'WSW', 'FFA', 'WCN']:
res.append("%s%s" % (q, pil[3:6]))
else:
res.append("%6.6s" % (pil.strip() + ' ', ))
return res
def main():
"""Process the request"""
# Attempt to keep the file from downloading and just displaying in chrome
form = cgi.FieldStorage()
pils = pil_logic(form.getfirst('pil', ''))
try:
limit = int(form.getfirst('limit', 1))
except ValueError:
limit = 1
center = form.getfirst('center', '')[:4]
sdate = form.getfirst('sdate', '')[:10]
edate = form.getfirst('edate', '')[:10]
ttaaii = form.getfirst('ttaaii', '')[:6]
fmt = form.getfirst('fmt', 'text')
ssw("X-Content-Type-Options: nosniff\n")
if form.getfirst('dl') == "1":
ssw("Content-type: application/octet-stream\n")
ssw("Content-Disposition: attachment; filename=afos.txt\n\n")
else:
if fmt == 'text':
ssw("Content-type: text/plain\n\n")
elif fmt == 'html':
ssw("Content-type: text/html\n\n")
if not pils:
ssw("ERROR: No pil specified...")
return
centerlimit = '' if center == '' else (" and source = '%s' " % (center, ))
timelimit = ''
if sdate != '':
timelimit += " and entered >= '%s' " % (sdate, )
if edate != '':
timelimit += " and entered < '%s' " % (edate, )
if pils[0][:3] == 'MTR':
access = get_dbconn('iem', user='nobody')
cursor = access.cursor()
sql = """
SELECT raw from current_log c JOIN stations t
on (t.iemid = c.iemid)
WHERE raw != '' and id = '%s' ORDER by valid DESC LIMIT %s
""" % (pils[0][3:].strip(), limit)
cursor.execute(sql)
for row in cursor:
if fmt == 'html':
ssw("<pre>\n")
else:
ssw("\001\n")
ssw(row[0].replace("\r\r\n", "\n"))
if fmt == 'html':
ssw("</pre>\n")
else:
ssw("\003\n")
if cursor.rowcount == 0:
ssw("ERROR: METAR lookup for %s failed" % (
pils[0][3:].strip(), ))
return
try:
mydb = get_dbconn('afos', user='nobody')
except Exception as _exp: # noqa
ssw('Error Connecting to Database, please try again!\n')
return
cursor = mydb.cursor()
if len(pils) == 1:
pillimit = " pil = '%s' " % (pils[0], )
if len(pils[0].strip()) == 3:
pillimit = " substr(pil, 1, 3) = '%s' " % (pils[0].strip(), )
else:
pillimit = " pil in %s" % (tuple(pils), )
ttlimit = ''
if len(ttaaii) == 6:
ttlimit = " and wmo = '%s' " % (ttaaii, )
# Do optimized query first, see if we can get our limit right away
sql = """
SELECT data, pil,
to_char(entered at time zone 'UTC', 'YYYYMMDDHH24MI') as ts
from products WHERE %s
and entered > now() - '31 days'::interval %s %s %s
ORDER by entered DESC LIMIT %s""" % (pillimit, centerlimit,
timelimit, ttlimit, limit)
cursor.execute(sql)
if cursor.rowcount != limit:
sql = """
SELECT data, pil,
to_char(entered at time zone 'UTC', 'YYYYMMDDHH24MI') as ts
from products WHERE %s %s %s %s
ORDER by entered DESC LIMIT %s """ % (pillimit, centerlimit,
timelimit, ttlimit, limit)
cursor.execute(sql)
for row in cursor:
if fmt == 'html':
ssw((
"<a href=\"/wx/afos/p.php?pil=%s&e=%s\">Permalink</a> "
"for following product: "
) % (row[1], row[2]))
ssw("<br /><pre>\n")
else:
ssw("\001\n")
# Remove control characters from the product as we are including
# them manually here...
ssw((row[0]).replace(
"\003", "").replace("\001\r\r\n", "").replace("\r\r\n", "\n"))
if fmt == 'html':
ssw("</pre><hr>\n")
else:
ssw("\n\003\n")
if cursor.rowcount == 0:
print("ERROR: Could not Find: %s" % (",".join(pils), ))
if __name__ == '__main__':
main()
class TestRetrieve(unittest.TestCase):
"""some tests"""
def test_pil_logic(self):
"""Make sure our pil logic works! """
res = pil_logic("AFDDMX")
assert len(res) == 1
assert res[0] == 'AFDDMX'
res = pil_logic("WAREWX")
assert len(res) == 12
res = pil_logic("STOIA,AFDDMX")
assert res[0] == 'STOIA '
assert res[1] == 'AFDDMX'
|
the-stack_0_16579 | registros = []
def make_album(artista, album, num_musicas=None):
dicionario = {"artista": artista, "album": album}
if num_musicas:
dicionario["num_musicas"] = num_musicas
return dicionario
def print_album(album):
if "num_musicas" in album:
print(f'Artista: {album["artista"]}, Album: {album["album"]}, Numero de Musicas: {album["num_musicas"]}')
else:
print(f'Artista: {album["artista"]}, Album: {album["album"]}')
while True:
artista = input("Qual o nome do artista? ")
album = input("Qual o nome do album? ")
num_musica = input("Qual o numero de musicas do album? Se não desejar adcionar um numero, aperte \"Enter\". ")
dic_album = make_album(artista, album, num_musica)
print_album(dic_album)
continuar = input("Você deseja adcionar mais um resgistro(s/n)? ")
if continuar.lower() == "n":
print("Até a proxima!!!")
break
|
the-stack_0_16580 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""optimizer"""
from typing import Iterable
import numpy as np
import mindspore
from mindspore.ops import functional as F, composite as C, operations as P
from mindspore.ops.operations import _inner_ops as inner
from mindspore.nn.cell import Cell
from mindspore.nn.layer.container import CellList
from mindspore.common.parameter import Parameter, ParameterTuple
from mindspore.common.initializer import initializer
from mindspore.common.tensor import Tensor, RowTensor
import mindspore.common.dtype as mstype
from mindspore._checkparam import Validator as validator
from mindspore import log as logger
from mindspore.parallel._utils import _get_global_rank, _get_device_num, _get_parallel_mode
from mindspore.context import ParallelMode
from mindspore import context
from mindspore.nn.learning_rate_schedule import LearningRateSchedule
__all__ = ['Optimizer']
class Optimizer(Cell):
"""
Base class for all optimizers.
Note:
This class defines the API to add Ops to train a model. Never use
this class directly, but instead instantiate one of its subclasses.
Different parameter groups can set different `learning_rate`, `weight_decay` and `grad_centralization`.
When separating parameter groups, the weight decay in each group will be applied on the parameters if the
weight_decay is positive. For most optimizer, when not separating parameters, the `weight_decay` in the API will
be applied on the parameters without 'beta' or 'gamma' in their names if `weight_decay` is positive.
When separating parameter groups, if you want to centralize the gradient, set grad_centralization to True,
but the gradient centralization can only be applied to the parameters of the convolution layer.
If the parameters of the non convolution layer are set to True, an error will be reported.
To improve parameter groups performance, the customized order of parameters can be supported.
Args:
learning_rate (Union[float, Tensor, Iterable, LearningRateSchedule]): A value or a graph for the learning
rate. When the learning_rate is an Iterable or a Tensor in a 1D dimension, use dynamic learning rate, then
the i-th step will take the i-th value as the learning rate. When the learning_rate is LearningRateSchedule,
use dynamic learning rate, the i-th learning rate will be calculated during the process of training
according to the formula of LearningRateSchedule. When the learning_rate is a float or a Tensor in a zero
dimension, use fixed learning rate. Other cases are not supported. The float learning rate must be
equal to or greater than 0. If the type of `learning_rate` is int, it will be converted to float.
parameters (Union[list[Parameter], list[dict]]): When the `parameters` is a list of `Parameter` which will be
updated, the element in `parameters` must be class `Parameter`. When the `parameters` is a list of `dict`,
the "params", "lr", "weight_decay" and "order_params" are the keys can be parsed.
- params: Required. The value must be a list of `Parameter`.
- lr: Optional. If "lr" in the keys, the value of corresponding learning rate will be used.
If not, the `learning_rate` in the API will be used.
- weight_decay: Optional. If "weight_decay" in the keys, the value of corresponding weight decay
will be used. If not, the `weight_decay` in the API will be used.
- order_params: Optional. If "order_params" in the keys, the value must be the order of parameters and
the order will be followed in optimizer. There are no other keys in the `dict` and the parameters which
in the value of 'order_params' must be in one of group parameters.
- grad_centralization: Optional. The data type of "grad_centralization" is Bool. If "grad_centralization"
is in the keys, the set value will be used. If not, the `grad_centralization` is False by default.
This parameter only works on the convolution layer.
weight_decay (Union[float, int]): An int or a floating point value for the weight decay.
It must be equal to or greater than 0.
If the type of `weight_decay` input is int, it will be converted to float. Default: 0.0.
loss_scale (float): A floating point value for the loss scale. It must be greater than 0. If the
type of `loss_scale` input is int, it will be converted to float. Default: 1.0.
Raises:
TypeError: If `learning_rate` is not one of int, float, Tensor, Iterable, LearningRateSchedule.
TypeError: If element of `parameters` is neither Parameter nor dict.
TypeError: If `loss_scale` is not a float.
TypeError: If `weight_decay` is neither float nor int.
ValueError: If `loss_scale` is less than or equal to 0.
ValueError: If `weight_decay` is less than 0.
ValueError: If `learning_rate` is a Tensor, but the dimension of tensor is greater than 1.
Supported Platforms:
``Ascend`` ``GPU``
"""
def __init__(self, learning_rate, parameters, weight_decay=0.0, loss_scale=1.0):
super(Optimizer, self).__init__(auto_prefix=False)
if parameters is not None and not isinstance(parameters, list):
parameters = list(parameters)
if not parameters:
raise ValueError("Optimizer got an empty parameter list.")
if not isinstance(parameters[0], (dict, Parameter)):
raise TypeError("Only a list of Parameter or dict can be supported.")
if isinstance(loss_scale, int):
loss_scale = float(loss_scale)
validator.check_value_type("loss_scale", loss_scale, [float], self.cls_name)
validator.check_positive_float(loss_scale, "loss_scale", self.cls_name)
self.loss_scale = loss_scale
weight_decay = self._preprocess_weight_decay(weight_decay)
self.grad_centralization = False
self._unique = True
self._target = context.get_context("device_target")
self.dynamic_lr = False
self.assignadd = None
self.global_step = None
self.is_group = False
self.is_group_lr = False
self.is_group_params_ordered = False
learning_rate = self._preprocess_single_lr(learning_rate)
if isinstance(parameters[0], dict):
self.is_group = True
self.group_params = []
self.group_lr = []
self.group_weight_decay = []
self.group_grad_centralization = []
self._init_group_params(parameters, learning_rate, weight_decay, self.grad_centralization)
# The final value of dynamic_lr can be determined after the process of parse_single_lr and init_group_params
if self.dynamic_lr:
self.assignadd = P.AssignAdd()
self.global_step = Parameter(initializer(0, [1], mindspore.int32), name='global_step')
if self.is_group_lr:
self.learning_rate = CellList(self.group_lr, auto_prefix=False) if self.dynamic_lr \
else ParameterTuple(self.group_lr)
else:
self.learning_rate = self._build_single_lr(learning_rate, 'learning_rate')
if self.is_group:
self.parameters = ParameterTuple(self.group_params)
self.weight_decay = tuple(self.group_weight_decay)
self.weight_decay_tensor_tuple = tuple(Tensor(x, mstype.float32) for x in self.group_weight_decay)
decay_filter = lambda x: x > 0
self.decay_flags = tuple(decay_filter(x) for x in self.weight_decay)
self.exec_weight_decay = any(self.decay_flags)
self.grad_centralization_flags = tuple(self.group_grad_centralization)
else:
self.parameters = ParameterTuple(parameters)
self.weight_decay = weight_decay * loss_scale
self.weight_decay_tensor = Tensor(self.weight_decay, mstype.float32)
decay_filter = lambda x: 'beta' not in x.name and 'gamma' not in x.name
self.decay_flags = tuple(decay_filter(x) for x in self.parameters)
self.exec_weight_decay = self.weight_decay > 0
# when a parameter has been unique, there is no need do another unique in optimizer.
for param in self.parameters:
if param.unique:
self._unique = False
break
ps_filter = lambda x: x.is_param_ps
self.ps_parameters = tuple(ps_filter(x) for x in self.parameters)
cache_filter = lambda x: x.cache_enable
self.cache_enable = tuple(cache_filter(x) for x in self.parameters)
self.reciprocal_scale = Tensor(1.0 / loss_scale, mstype.float32)
self.need_scale = loss_scale != 1.0
self.global_step_increase_tensor = Tensor(1, mstype.int32)
self.param_length = len(self.parameters)
self.map_ = C.Map()
self._use_parallel_optimizer()
def _use_parallel_optimizer(self):
"""Indicates whether to use automatic parallelism."""
if context.get_auto_parallel_context("enable_parallel_optimizer"):
if _get_parallel_mode() == ParallelMode.DATA_PARALLEL and context.get_context("device_target") == "Ascend":
self.use_parallel = True
elif _get_parallel_mode() == ParallelMode.DATA_PARALLEL \
and context.get_context("device_target") != "Ascend":
raise RuntimeError("Parallel optimizer only supports Ascend in data parallel mode.")
elif _get_parallel_mode() in (ParallelMode.STAND_ALONE, ParallelMode.HYBRID_PARALLEL):
raise RuntimeError("Parallel optimizer is not supported in {}.".format(_get_parallel_mode()))
else:
self.use_parallel = False
else:
self.use_parallel = False
if self.use_parallel:
if self.cls_name not in ["Lamb", "AdamWeightDecay"]:
raise RuntimeError("Parallel optimizer does not support optimizer {}".format(self.cls_name))
self.dev_num = _get_device_num()
if self.dev_num > self.param_length:
raise RuntimeError("Parallel optimizer can not be applied when the number of parameters {} is"
" less than the number of devices {}".format(self.param_length, self.dev_num))
self.param_rank = self._get_parameter_group_id()
self.optim_filter = tuple(map(lambda x: x == _get_global_rank(), self.param_rank))
self.param_names = []
for param in self.parameters:
self.param_names.append(param.name)
else:
self.optim_filter = (True,) * self.param_length
@property
def unique(self):
"""The method is to see whether to make unique. The input type is bool. The method is read-only."""
return self._unique
@unique.setter
def unique(self, value):
"""Set whether the input value is unique."""
if not isinstance(value, bool):
raise TypeError("The value type must be bool, but got value type is {}".format(type(value)))
self._unique = value
@property
def target(self):
"""The method is used to determine whether the parameter is updated on host or device. The input type is str
and can only be 'CPU', 'Ascend' or 'GPU'."""
return self._target
@target.setter
def target(self, value):
"""If the input value is set to "CPU", the parameters will be updated on the host using the Fused
optimizer operation."""
raise NotImplementedError
def decay_weight(self, gradients):
"""
Weight decay.
An approach to reduce the overfitting of a deep learning neural network model.
Args:
gradients (tuple[Tensor]): The gradients of `self.parameters`, and have the same shape as
`self.parameters`.
Returns:
tuple[Tensor], The gradients after weight decay.
"""
if self.exec_weight_decay:
params = self.parameters
if self.is_group:
gradients = self.map_(F.partial(_apply_decay), self.weight_decay_tensor_tuple, self.decay_flags,
params, gradients)
else:
gradients = self.map_(F.partial(_apply_decay, self.weight_decay_tensor), self.decay_flags,
params, gradients)
return gradients
def gradients_centralization(self, gradients):
"""
Gradients centralization.
A method for optimizing convolutional layer parameters to impore the training speed of a deep learning neural
network model.
Args:
gradients (tuple[Tensor]): The gradients of `self.parameters`, and have the same shape as
`self.parameters`.
Returns:
tuple[Tensor], The gradients after gradients centralization.
"""
if self.is_group:
gradients = self.map_(F.partial(_apply_grad_centralization), self.grad_centralization_flags, gradients)
return gradients
def scale_grad(self, gradients):
"""
Loss scale for mixed precision.
An approach of mixed precision training to improve the speed and energy efficiency of training deep neural
network.
Args:
gradients (tuple[Tensor]): The gradients of `self.parameters`, and have the same shape as
`self.parameters`.
Returns:
tuple[Tensor], The gradients after loss scale.
"""
if self.need_scale:
gradients = self.map_(F.partial(_grad_scale, self.reciprocal_scale), gradients)
return gradients
def _grad_sparse_indices_deduplicate(self, gradients):
""" In the case of using big operators, deduplicate the 'indexes' in gradients."""
if self._target != 'CPU' and self._unique:
gradients = self.map_(F.partial(_indices_deduplicate), gradients)
return gradients
def _preprocess_weight_decay(self, weight_decay):
"""Check weight decay, and convert int to float."""
if isinstance(weight_decay, (float, int)):
weight_decay = float(weight_decay)
validator.check_non_negative_float(weight_decay, "weight_decay", self.cls_name)
return weight_decay
raise TypeError("Weight decay should be int or float.")
def _preprocess_grad_centralization(self, grad_centralization):
if not isinstance(grad_centralization, bool):
raise TypeError("The gradients centralization should be bool")
return grad_centralization
def _preprocess_single_lr(self, learning_rate):
"""Check lr value, and convert lr to a float, a Tensor or a LearningRateSchedule."""
if isinstance(learning_rate, (float, int)):
learning_rate = float(learning_rate)
validator.check_non_negative_float(learning_rate, "learning rate", self.cls_name)
return learning_rate
if isinstance(learning_rate, Tensor) and learning_rate.ndim == 0:
return learning_rate
self.dynamic_lr = True
if isinstance(learning_rate, Iterable):
return Tensor(np.array(list(learning_rate)).astype(np.float32))
if isinstance(learning_rate, Tensor):
if learning_rate.ndim > 1:
raise ValueError("The dim of `Tensor` type Learning rate should be a 0 or 1,"
f"but got {learning_rate.ndim}.")
if learning_rate.ndim == 1 and learning_rate.size < 2:
logger.warning("If use `Tensor` type dynamic learning rate, please make sure that the number"
"of elements in the tensor passed is greater than 1.")
return learning_rate
if isinstance(learning_rate, LearningRateSchedule):
return learning_rate
raise TypeError("Learning rate should be int, float, Tensor, Iterable or LearningRateSchedule.")
def _build_single_lr(self, learning_rate, name):
"""Build learning rate value, convert learning rate to a Parameter or a LearningRateSchedule."""
if isinstance(learning_rate, float):
learning_rate = Parameter(Tensor(learning_rate, mstype.float32), name)
if self.is_group_lr and self.dynamic_lr:
learning_rate = _ConvertToCell(learning_rate)
return learning_rate
if isinstance(learning_rate, Tensor) and learning_rate.ndim == 0:
learning_rate = Parameter(learning_rate, name)
if self.is_group_lr and self.dynamic_lr:
learning_rate = _ConvertToCell(learning_rate)
return learning_rate
if isinstance(learning_rate, Tensor) and learning_rate.ndim == 1:
return _IteratorLearningRate(learning_rate, name)
return learning_rate
def _check_group_params(self, parameters):
"""Check group params."""
parse_keys = ['params', 'lr', 'weight_decay', 'order_params', 'grad_centralization']
for group_param in parameters:
invalid_key = list(filter(lambda x: x not in parse_keys, group_param.keys()))
if invalid_key:
raise KeyError(f'The key "{invalid_key}" cannot be recognized in group params.')
if 'order_params' in group_param.keys():
if len(group_param.keys()) > 1:
raise ValueError("The order params dict in group parameters should "
"only include the 'order_params' key.")
if not isinstance(group_param['order_params'], Iterable):
raise TypeError("The value of 'order_params' should be an Iterable type.")
continue
if not group_param['params']:
raise ValueError("Optimizer got an empty group parameter list.")
for param in group_param['params']:
if not isinstance(param, Parameter):
raise TypeError("The group param should be an iterator of Parameter type.")
def _parse_group_params(self, parameters, learning_rate):
"""Parse group params."""
self._check_group_params(parameters)
if isinstance(learning_rate, Tensor) and learning_rate.ndim == 1:
tensor_lr_length = learning_rate.size
else:
tensor_lr_length = 0
for group_param in parameters:
if 'order_params' in group_param.keys():
if len(group_param.keys()) > 1:
raise ValueError("The order params dict in group parameters should "
"only include the 'order_params' key.")
if not isinstance(group_param['order_params'], Iterable):
raise TypeError("The value of 'order_params' should be an Iterable type.")
self.is_group_params_ordered = True
continue
if 'lr' in group_param.keys():
self.is_group_lr = True
group_lr = self._preprocess_single_lr(group_param['lr'])
if isinstance(group_lr, Tensor) and group_lr.ndim == 1:
group_lr_length = group_lr.size
if tensor_lr_length == 0:
tensor_lr_length = group_lr_length
elif group_lr_length != tensor_lr_length:
raise ValueError("The Tensor type dynamic learning rate in group should be the same size.")
def _init_group_params(self, parameters, learning_rate, weight_decay, grad_centralization):
"""Initialize learning rate, weight decay or grad centralization in group params."""
self._parse_group_params(parameters, learning_rate)
default_lr = self._build_single_lr(learning_rate, 'learning_rate')
params_store = []
for group_num, group_param in enumerate(parameters):
if 'order_params' in group_param.keys():
ordered_parameters = group_param['order_params']
continue
self.group_params += group_param['params']
if 'lr' in group_param.keys():
lr_param_name = 'learning_rate_group_' + str(group_num)
lr = self._preprocess_single_lr(group_param['lr'])
lr = self._build_single_lr(lr, lr_param_name)
else:
lr = default_lr
if 'weight_decay' in group_param.keys():
cur_weight_decay = self._preprocess_weight_decay(group_param['weight_decay'])
weight_decay_ = cur_weight_decay * self.loss_scale
else:
weight_decay_ = weight_decay * self.loss_scale
if 'grad_centralization' in group_param.keys():
self.grad_centralization = self._preprocess_grad_centralization(group_param['grad_centralization'])
for param in group_param['params']:
validator.check_value_type("parameter", param, [Parameter], self.cls_name)
if "conv" not in param.name and self.grad_centralization is True:
raise ValueError("Grad centralization can be perform only on the conv layer. If the parameter"
"is not a convolution layer, this parameter cannot be set to True.")
grad_centralization_ = self.grad_centralization
else:
grad_centralization_ = grad_centralization
for key in group_param.keys():
if key not in ('params', 'lr', 'weight_decay', 'grad_centralization'):
logger.warning(f"The optimizer cannot parse '{key}' when setting parameter groups.")
for param in group_param['params']:
validator.check_value_type("parameter", param, [Parameter], self.cls_name)
if param.name in params_store:
raise RuntimeError(f"The {param.name} parameter has appeared in parameter groups.")
params_store.append(param.name)
self.group_lr.append(lr)
self.group_weight_decay.append(weight_decay_)
self.group_grad_centralization.append(grad_centralization_)
if self.is_group_params_ordered:
self._order_and_adjust_group_params(ordered_parameters)
def _order_and_adjust_group_params(self, ordered_parameters):
"""
Order group parameter, learning rate, weight decay and grad centralization in group params.
"""
params_length = len(self.group_params)
if len(ordered_parameters) != len(self.group_params):
raise ValueError(f"The value of 'order_params' should be same with all group parameters.")
ordered_params = [None] * params_length
ordered_learning_rate = [None] * params_length
ordered_weight_decay = [None] * params_length
ordered_grad_centralization = [None] * params_length
params_name = [param.name for param in ordered_parameters]
for param, lr, wd, gc in zip(self.group_params, self.group_lr, self.group_weight_decay,
self.group_grad_centralization):
index = params_name.index(param.name)
ordered_params[index] = param
ordered_learning_rate[index] = lr
ordered_weight_decay[index] = wd
ordered_grad_centralization[index] = gc
self.group_params = ordered_params
self.group_lr = ordered_learning_rate
self.group_weight_decay = ordered_weight_decay
self.group_grad_centralization = ordered_grad_centralization
def get_lr(self):
"""
Get the learning rate of current step.
Returns:
float, the learning rate of current step.
"""
lr = self.learning_rate
if self.dynamic_lr:
if self.is_group_lr:
lr = ()
for learning_rate in self.learning_rate:
current_dynamic_lr = learning_rate(self.global_step)
lr += (current_dynamic_lr,)
else:
lr = self.learning_rate(self.global_step)
self.assignadd(self.global_step, self.global_step_increase_tensor)
return lr
def get_lr_parameter(self, param):
"""
Get the learning rate of parameter.
Args:
param (Union[Parameter, list[Parameter]]): The `Parameter` or list of `Parameter`.
Returns:
Parameter, single `Parameter` or `list[Parameter]` according to the input type.
"""
def get_lr_value(learning_rate):
if isinstance(learning_rate, (_ConvertToCell, _IteratorLearningRate)):
return learning_rate.learning_rate
return learning_rate
if isinstance(param, Parameter):
param_list = [param]
elif isinstance(param, list):
param_list = param
else:
raise TypeError(f"The parameter only support 'Parameter' or 'list' type.")
lr = []
ids = [id(p) for p in self.parameters]
for p in param_list:
validator.check_value_type("parameter", p, [Parameter], self.cls_name)
if id(p) not in ids:
raise ValueError(f"The parameter {p.name} is not in optimizer.")
if self.is_group_lr:
index = ids.index(id(p))
lr.append(get_lr_value(self.learning_rate[index]))
else:
lr.append(get_lr_value(self.learning_rate))
return lr if isinstance(param, list) else lr[0]
def _get_parameter_group_id(self):
"""
Get the parameter partition group id, which is less than the number of devices.
Returns:
tuple, the group id tuple of parameters.
"""
rank_list = ()
count = 0
for _ in range(self.param_length):
rank_list = rank_list + (count,)
count = count + 1
if count == self.dev_num:
count = 0
return rank_list
def broadcast_params(self, optim_result):
"""
Apply Broadcast operations in the sequential order of parameter groups.
Returns:
bool, the status flag.
"""
param_group = []
key_group = []
for _ in range(self.dev_num):
param_group.append(F.make_tuple())
key_group.append(F.make_tuple())
for i in range(self.param_length):
param_group[self.param_rank[i]] = param_group[self.param_rank[i]] + (self.parameters[i],)
key = P.MakeRefKey(self.param_names[i])()
key_group[self.param_rank[i]] = key_group[self.param_rank[i]] + (key,)
new_param_group = []
for root in range(self.dev_num):
ops = P.Broadcast(root)
if root > 0:
param_group[root] = F.depend(param_group[root], new_param_group[root-1])
next_params = ops(param_group[root])
new_param_group.append(next_params)
for i in range(F.tuple_len(next_params)):
F.assign(key_group[root][i], next_params[i])
return new_param_group
def construct(self, *hyper_params):
raise NotImplementedError
op_add = P.AddN()
op_gather = P.Gather()
op_mul = P.Mul()
op_gc = inner.Centralization()
_apply_decay = C.MultitypeFuncGraph("apply_decay")
_apply_grad_centralization = C.MultitypeFuncGraph("apply_grad_centralization")
@_apply_decay.register("Tensor", "Bool", "Tensor", "RowTensor")
def _tensor_apply_decay_with_sparse(weight_decay, if_apply, weight, gradient):
"""Get grad with weight_decay."""
if if_apply:
indices = gradient.indices
values = op_add((op_gather(weight, indices, 0) * F.cast(weight_decay, F.dtype(weight)), gradient.values))
shape = gradient.dense_shape
return RowTensor(indices, values, shape)
return gradient
@_apply_decay.register("Tensor", "Bool", "Tensor", "Tensor")
def _tensor_apply_decay(weight_decay, if_apply, weight, gradient):
"""Get grad with weight_decay."""
if if_apply:
return op_add((op_mul(weight, F.cast(weight_decay, F.dtype(weight))), gradient))
return gradient
@_apply_grad_centralization.register("Bool", "RowTensor")
def _tensor_apply_grad_centralization_with_sparse(if_apply, gradient):
"""Get grad with grad_centralization."""
if if_apply:
indices = gradient.indices
values = op_gc(gradient.values, -1)
shape = gradient.dense_shape
return RowTensor(indices, values, shape)
return gradient
@_apply_grad_centralization.register("Bool", "Tensor")
def _tensor_apply_grad_centralization(if_apply, gradient):
"""Get grad with grad_centralization."""
if if_apply:
return op_gc(gradient, -1)
return gradient
_grad_scale = C.MultitypeFuncGraph("grad_scale")
_indices_deduplicate = C.MultitypeFuncGraph("indices_deduplicate")
@_grad_scale.register("Number", "Tensor")
def tensor_grad_scale(scale, grad):
"""Get grad with scale."""
if scale == 1.0:
return grad
return op_mul(grad, F.cast(scale, F.dtype(grad)))
@_grad_scale.register("Tensor", "Tensor")
def tensor_grad_scale_with_tensor(scale, grad):
"""Get grad with scale."""
return op_mul(grad, F.cast(scale, F.dtype(grad)))
@_grad_scale.register("Tensor", "RowTensor")
def tensor_grad_scale_with_sparse(scale, grad):
"""Get grad with scale."""
return RowTensor(grad.indices, grad.values * F.cast(scale, F.dtype(grad.values)), grad.dense_shape)
@_indices_deduplicate.register("RowTensor")
def rowtensor_deduplicate_indices_slices(grad):
"""Unique the indices and sums the 'values' corresponding to the duplicate indices."""
indices = grad.indices
values = grad.values
unique_indices, index_position = P.Unique()(indices)
summed_values = P.UnsortedSegmentSum()(values, index_position, P.DynamicShape()(unique_indices)[0])
return RowTensor(unique_indices, summed_values, grad.dense_shape)
@_indices_deduplicate.register("Tensor")
def tensor_deduplicate_indice_slices(grad):
"""Return the input gradient directly in the dense sences."""
return grad
class _ConvertToCell(LearningRateSchedule):
"""Inner api, convert learning rate of scalar to LearningRateSchedule."""
def __init__(self, learning_rate):
super(_ConvertToCell, self).__init__()
if not isinstance(learning_rate, Parameter):
raise TypeError('Learning rate must be Parameter.')
self.learning_rate = learning_rate
def construct(self, global_step):
return self.learning_rate + 1.0 - 1.0
class _IteratorLearningRate(LearningRateSchedule):
"""Inner api, convert learning rate of Tensor(list) to LearningRateSchedule."""
def __init__(self, learning_rate, name):
super(_IteratorLearningRate, self).__init__()
if isinstance(learning_rate, Tensor):
if learning_rate.ndim != 1:
raise ValueError("The dim of `Tensor` type dynamic learning rate should be a 1,"
f"but got {learning_rate.ndim}.")
else:
raise TypeError("Learning rate should be Tensor.")
self.learning_rate = Parameter(learning_rate, name)
self.gather = P.Gather()
def construct(self, global_step):
return self.gather(self.learning_rate, global_step, 0)
|
the-stack_0_16581 | import time
import boto3
import interfaces
def _return_default_port_on_redshift_engines():
return 5439
def _return_default_custom_master_username_on_redshift_engines():
return 'awsuser'
class Tester(interfaces.TesterInterface):
def __init__(self):
self.aws_redshift_client = boto3.client('redshift')
self.cache = {}
self.user_id = boto3.client('sts').get_caller_identity().get('UserId')
self.account_arn = boto3.client('sts').get_caller_identity().get('Arn')
self.account_id = boto3.client('sts').get_caller_identity().get('Account')
self.redshift_clusters = self._get_all_redshift_clusters()
def declare_tested_service(self) -> str:
return 'redshift'
def declare_tested_provider(self) -> str:
return 'aws'
def run_tests(self) -> list:
return self.detect_redshift_cluster_encrypted() + \
self.detect_redshift_cluster_not_publicly_accessible() + \
self.detect_redshift_cluster_not_using_default_port() + \
self.detect_redshift_cluster_not_using_custom_master_username() + \
self.detect_redshift_cluster_using_logging() + \
self.detect_redshift_cluster_allow_version_upgrade() + \
self.detect_redshift_cluster_requires_ssl() + \
self.detect_redshift_cluster_not_using_ec2_classic() + \
self.get_redshift_cluster_not_encrypted_with_kms()
def _append_redshift_test_result(self, redshift, test_name, issue_status):
return {
"user": self.user_id,
"account_arn": self.account_arn,
"account": self.account_id,
"timestamp": time.time(),
"item": redshift['ClusterIdentifier'],
"item_type": "redshift_cluster",
"test_name": test_name,
"test_result": issue_status
}
def _return_redshift_logging_status(self, cluster_identifier):
return self.aws_redshift_client.describe_logging_status(ClusterIdentifier=cluster_identifier)
def _return_parameter_group_names(self, parameter_groups):
result = []
for pg in parameter_groups:
result.append(pg['ParameterGroupName'])
return result
def _return_cluster_parameter_data(self, group_name):
return self.aws_redshift_client.describe_cluster_parameters(ParameterGroupName=group_name)
def _return_ssl_enabled_on_parameter_groups(self, params):
ssl_enabled = False
for pg in params:
if pg['ParameterName'].lower() == 'require_ssl' and pg['ParameterValue'].lower() == 'true':
ssl_enabled = True
break
return ssl_enabled
def detect_redshift_cluster_encrypted(self):
test_name = "aws_redshift_encrypted_redshift_cluster"
result = []
for redshift in self.redshift_clusters['Clusters']:
if not redshift['Encrypted']:
result.append(self._append_redshift_test_result(redshift, test_name, "issue_found"))
else:
result.append(self._append_redshift_test_result(redshift, test_name, "no_issue_found"))
return result
def detect_redshift_cluster_not_publicly_accessible(self):
test_name = "aws_redshift_not_publicly_accessible_redshift_cluster"
result = []
for redshift in self.redshift_clusters['Clusters']:
if redshift['PubliclyAccessible']:
result.append(self._append_redshift_test_result(redshift, test_name, "issue_found"))
else:
result.append(self._append_redshift_test_result(redshift, test_name, "no_issue_found"))
return result
def detect_redshift_cluster_not_using_default_port(self):
test_name = "aws_redshift_cluster_not_using_default_port"
result = []
for redshift in self.redshift_clusters['Clusters']:
if _return_default_port_on_redshift_engines() == redshift['Endpoint']['Port']:
result.append(self._append_redshift_test_result(redshift, test_name, "issue_found"))
else:
result.append(self._append_redshift_test_result(redshift, test_name, "no_issue_found"))
return result
def detect_redshift_cluster_not_using_custom_master_username(self):
test_name = "aws_redshift_cluster_not_using_custom_master_username"
result = []
for redshift in self.redshift_clusters['Clusters']:
if _return_default_custom_master_username_on_redshift_engines() == redshift['MasterUsername'].lower():
result.append(self._append_redshift_test_result(redshift, test_name, "issue_found"))
else:
result.append(self._append_redshift_test_result(redshift, test_name, "no_issue_found"))
return result
def detect_redshift_cluster_using_logging(self):
test_name = "aws_redshift_cluster_using_logging"
result = []
for redshift in self.redshift_clusters['Clusters']:
logging_metadata = self._return_redshift_logging_status(redshift['ClusterIdentifier'])
if not logging_metadata['LoggingEnabled']:
result.append(self._append_redshift_test_result(redshift, test_name, "issue_found"))
else:
result.append(self._append_redshift_test_result(redshift, test_name, "no_issue_found"))
return result
def detect_redshift_cluster_allow_version_upgrade(self):
test_name = "aws_redshift_cluster_allow_version_upgrade"
result = []
for redshift in self.redshift_clusters['Clusters']:
if not redshift['AllowVersionUpgrade']:
result.append(self._append_redshift_test_result(redshift, test_name, "issue_found"))
else:
result.append(self._append_redshift_test_result(redshift, test_name, "no_issue_found"))
return result
def detect_redshift_cluster_requires_ssl(self):
test_name = "aws_redshift_cluster_requires_ssl"
result = []
for redshift in self.redshift_clusters['Clusters']:
issue_found = True
for parameter_group_name in self._return_parameter_group_names(redshift['ClusterParameterGroups']):
param_key_value = self._return_cluster_parameter_data(parameter_group_name)
if 'Parameters' in param_key_value and len(param_key_value['Parameters']):
if self._return_ssl_enabled_on_parameter_groups(param_key_value['Parameters']):
issue_found = False
if not issue_found:
result.append(self._append_redshift_test_result(redshift, test_name, "no_issue_found"))
else:
result.append(self._append_redshift_test_result(redshift, test_name, "issue_found"))
return result
def detect_redshift_cluster_not_using_ec2_classic(self):
test_name = "aws_redshift_cluster_not_using_ec2_classic"
result = []
for redshift in self.redshift_clusters['Clusters']:
if not ('VpcId' in redshift and redshift['VpcId']):
result.append(self._append_redshift_test_result(redshift, test_name, "issue_found"))
else:
result.append(self._append_redshift_test_result(redshift, test_name, "no_issue_found"))
return result
def get_redshift_cluster_not_encrypted_with_kms(self):
test_name = "aws_redshift_cluster_not_encrypted_with_KMS_customer_master_keys"
result = []
clusters = self.redshift_clusters["Clusters"]
for cluster in clusters:
encrypted = cluster["Encrypted"]
if encrypted:
result.append(self._append_redshift_test_result(cluster, test_name, "no_issue_found"))
else:
result.append(self._append_redshift_test_result(cluster, test_name, "issue_found"))
return result
def _get_all_redshift_clusters(self):
clusters = []
paginator = self.aws_redshift_client.get_paginator('describe_clusters')
response_iterator = paginator.paginate()
for page in response_iterator:
clusters.extend(page['Clusters'])
return { "Clusters" : clusters }
|
the-stack_0_16582 | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-10-07 11:08
import functools
from typing import Union, List, Dict, Any, Set
from hanlp_trie import DictInterface, TrieDict
from hanlp.common.dataset import SamplerBuilder
from hanlp.components.taggers.transformers.transformer_tagger import TransformerTagger
from hanlp.metrics.chunking.sequence_labeling import get_entities
from hanlp.metrics.f1 import F1
from hanlp.datasets.ner.loaders.json_ner import prune_ner_tagset
from hanlp.utils.string_util import guess_delimiter
from hanlp_common.util import merge_locals_kwargs
class TransformerNamedEntityRecognizer(TransformerTagger):
def __init__(self, **kwargs) -> None:
r"""A simple tagger using transformers and a linear layer with an optional CRF
(:cite:`lafferty2001conditional`) layer for
NER task. It can utilize whitelist gazetteers which is dict mapping from entity name to entity type.
During decoding, it performs longest-prefix-matching of these words to override the prediction from
underlying statistical model. It also uses a blacklist to mask out mis-predicted entities.
.. Note:: For algorithm beginners, longest-prefix-matching is the prerequisite to understand what dictionary can
do and what it can't do. The tutorial in `this book <http://nlp.hankcs.com/book.php>`_ can be very helpful.
Args:
**kwargs: Not used.
"""
super().__init__(**kwargs)
def build_metric(self, **kwargs):
return F1()
# noinspection PyMethodOverriding
def update_metrics(self, metric, logits, y, mask, batch, prediction):
for p, g in zip(prediction, self.tag_to_span(batch['tag'], batch)):
pred = set(p)
gold = set(g)
metric(pred, gold)
# noinspection PyMethodOverriding
def decode_output(self, logits, mask, batch, model=None):
output = super().decode_output(logits, mask, batch, model)
prediction = super().prediction_to_human(output, self.vocabs['tag'].idx_to_token, batch)
return self.tag_to_span(prediction, batch)
def tag_to_span(self, batch_tags, batch):
spans = []
sents = batch[self.config.token_key]
dict_whitelist = self.dict_whitelist
dict_blacklist = self.dict_blacklist
merge_types = self.config.get('merge_types', None)
for tags, tokens in zip(batch_tags, sents):
entities = get_entities(tags)
if dict_whitelist:
matches = dict_whitelist.tokenize(tokens)
if matches:
# Fix O E-LOC O like predictions
entities = get_entities(tags)
for label, start, end in entities:
if end - start == 1:
tags[start] = 'S-' + label
else:
tags[start] = 'B-' + label
for i in range(start + 1, end - 1):
tags[i] = 'I-' + label
tags[end - 1] = 'E-' + label
for start, end, label in matches:
if (not tags[start][0] in 'ME') and (not tags[end - 1][0] in 'BM'):
if end - start == 1:
tags[start] = 'S-' + label
else:
tags[start] = 'B-' + label
for i in range(start + 1, end - 1):
tags[i] = 'I-' + label
tags[end - 1] = 'E-' + label
entities = get_entities(tags)
if merge_types and len(entities) > 1:
merged_entities = []
begin = 0
for i in range(1, len(entities)):
if entities[begin][0] != entities[i][0] or entities[i - 1][2] != entities[i][1] \
or entities[i][0] not in merge_types:
merged_entities.append((entities[begin][0], entities[begin][1], entities[i - 1][2]))
begin = i
merged_entities.append((entities[begin][0], entities[begin][1], entities[-1][2]))
entities = merged_entities
if dict_blacklist:
pruned = []
delimiter_in_entity = self.config.get('delimiter_in_entity', ' ')
for label, start, end in entities:
entity = delimiter_in_entity.join(tokens[start:end])
if entity not in dict_blacklist:
pruned.append((label, start, end))
entities = pruned
spans.append(entities)
return spans
def decorate_spans(self, spans, batch):
batch_ner = []
delimiter_in_entity = self.config.get('delimiter_in_entity', ' ')
for spans_per_sent, tokens in zip(spans, batch.get(f'{self.config.token_key}_', batch[self.config.token_key])):
ner_per_sent = []
for label, start, end in spans_per_sent:
ner_per_sent.append((delimiter_in_entity.join(tokens[start:end]), label, start, end))
batch_ner.append(ner_per_sent)
return batch_ner
def generate_prediction_filename(self, tst_data, save_dir):
return super().generate_prediction_filename(tst_data.replace('.tsv', '.txt'), save_dir)
def prediction_to_human(self, pred, vocab, batch):
return self.decorate_spans(pred, batch)
def input_is_flat(self, tokens):
return tokens and isinstance(tokens, list) and isinstance(tokens[0], str)
def fit(self, trn_data, dev_data, save_dir, transformer,
delimiter_in_entity=None,
merge_types: List[str] = None,
average_subwords=False,
word_dropout: float = 0.2,
hidden_dropout=None,
layer_dropout=0,
scalar_mix=None,
grad_norm=5.0,
lr=5e-5,
transformer_lr=None,
adam_epsilon=1e-8,
weight_decay=0,
warmup_steps=0.1,
crf=False,
secondary_encoder=None,
reduction='sum',
batch_size=32,
sampler_builder: SamplerBuilder = None,
epochs=3,
tagset=None,
token_key=None,
max_seq_len=None,
sent_delimiter=None,
char_level=False,
hard_constraint=False,
transform=None,
logger=None,
seed=None,
devices: Union[float, int, List[int]] = None,
**kwargs):
"""Fit component to training set.
Args:
trn_data: Training set.
dev_data: Development set.
save_dir: The directory to save trained component.
transformer: An identifier of a pre-trained transformer.
delimiter_in_entity: The delimiter between tokens in entity, which is used to rebuild entity by joining
tokens during decoding.
merge_types: The types of consecutive entities to be merged.
average_subwords: ``True`` to average subword representations.
word_dropout: Dropout rate to randomly replace a subword with MASK.
hidden_dropout: Dropout rate applied to hidden states.
layer_dropout: Randomly zero out hidden states of a transformer layer.
scalar_mix: Layer attention.
grad_norm: Gradient norm for clipping.
lr: Learning rate for decoder.
transformer_lr: Learning for encoder.
adam_epsilon: The epsilon to use in Adam.
weight_decay: The weight decay to use.
warmup_steps: The number of warmup steps.
crf: ``True`` to enable CRF (:cite:`lafferty2001conditional`).
secondary_encoder: An optional secondary encoder to provide enhanced representation by taking the hidden
states from the main encoder as input.
reduction: The loss reduction used in aggregating losses.
batch_size: The number of samples in a batch.
sampler_builder: The builder to build sampler, which will override batch_size.
epochs: The number of epochs to train.
tagset: Optional tagset to prune entities outside of this tagset from datasets.
token_key: The key to tokens in dataset.
max_seq_len: The maximum sequence length. Sequence longer than this will be handled by sliding
window.
sent_delimiter: Delimiter between sentences, like period or comma, which indicates a long sentence can
be split here.
char_level: Whether the sequence length is measured at char level, which is never the case for
lemmatization.
hard_constraint: Whether to enforce hard length constraint on sentences. If there is no ``sent_delimiter``
in a sentence, it will be split at a token anyway.
transform: An optional transform to be applied to samples. Usually a character normalization transform is
passed in.
devices: Devices this component will live on.
logger: Any :class:`logging.Logger` instance.
seed: Random seed to reproduce this training.
**kwargs: Not used.
Returns:
The best metrics on training set.
"""
return super().fit(**merge_locals_kwargs(locals(), kwargs))
def build_vocabs(self, trn, logger, **kwargs):
super().build_vocabs(trn, logger, **kwargs)
if self.config.get('delimiter_in_entity', None) is None:
# Check the first sample to guess the delimiter between tokens in a NE
tokens = trn[0][self.config.token_key]
delimiter_in_entity = guess_delimiter(tokens)
logger.info(f'Guess the delimiter between tokens in named entity could be [blue]"{delimiter_in_entity}'
f'"[/blue]. If not, specify `delimiter_in_entity` in `fit()`')
self.config.delimiter_in_entity = delimiter_in_entity
def build_dataset(self, data, transform=None, **kwargs):
dataset = super().build_dataset(data, transform, **kwargs)
if isinstance(data, str):
tagset = self.config.get('tagset', None)
if tagset:
dataset.append_transform(functools.partial(prune_ner_tagset, tagset=tagset))
return dataset
@property
def dict_whitelist(self) -> DictInterface:
return self.config.get('dict_whitelist', None)
@dict_whitelist.setter
def dict_whitelist(self, dictionary: Union[DictInterface, Union[Dict[str, Any], Set[str]]]):
if dictionary is not None and not isinstance(dictionary, DictInterface):
dictionary = TrieDict(dictionary)
self.config.dict_whitelist = dictionary
@property
def dict_blacklist(self) -> DictInterface:
return self.config.get('dict_blacklist', None)
@dict_blacklist.setter
def dict_blacklist(self, dictionary: Union[DictInterface, Union[Dict[str, Any], Set[str]]]):
if dictionary is not None and not isinstance(dictionary, DictInterface):
dictionary = TrieDict(dictionary)
self.config.dict_blacklist = dictionary
|
the-stack_0_16584 | import asyncio
import logging
import struct
from . import package
from .constants import MQTTv50, MQTTCommands
logger = logging.getLogger(__name__)
class BaseMQTTProtocol(asyncio.StreamReaderProtocol):
def __init__(self, buffer_size=2**16, loop=None):
if not loop:
loop = asyncio.get_event_loop()
self._connection = None
self._transport = None
self._connected = asyncio.Event(loop=loop)
reader = asyncio.StreamReader(limit=buffer_size, loop=loop)
super(BaseMQTTProtocol, self).__init__(reader, loop=loop)
def set_connection(self, conn):
self._connection = conn
def _parse_packet(self):
raise NotImplementedError
def connection_made(self, transport: asyncio.Transport):
super(BaseMQTTProtocol, self).connection_made(transport)
logger.info('[CONNECTION MADE]')
self._transport = transport
self._connected.set()
def data_received(self, data):
super(BaseMQTTProtocol, self).data_received(data)
def write_data(self, data: bytes):
if not self._transport.is_closing():
self._transport.write(data)
else:
logger.warning('[TRYING WRITE TO CLOSED SOCKET]')
def connection_lost(self, exc):
self._connected.clear()
super(BaseMQTTProtocol, self).connection_lost(exc)
if exc:
logger.warning('[EXC: CONN LOST]', exc_info=exc)
else:
logger.info('[CONN CLOSE NORMALLY]')
async def read(self, n=-1):
bs = await self._stream_reader.read(n=n)
# so we don't receive anything but connection is not closed -
# let's close it manually
if not bs and not self._transport.is_closing():
self._transport.close()
# self.connection_lost(ConnectionResetError())
raise ConnectionResetError("Reset connection manually.")
return bs
class MQTTProtocol(BaseMQTTProtocol):
proto_name = b'MQTT'
proto_ver = MQTTv50
def __init__(self, *args, **kwargs):
super(MQTTProtocol, self).__init__(*args, **kwargs)
self._queue = asyncio.Queue()
self._disconnect = asyncio.Event()
self._read_loop_future = None
def connection_made(self, transport: asyncio.Transport):
super().connection_made(transport)
self._read_loop_future = asyncio.ensure_future(self._read_loop())
async def send_auth_package(self, client_id, username, password, clean_session, keepalive,
will_message=None, **kwargs):
pkg = package.LoginPackageFactor.build_package(client_id, username, password, clean_session,
keepalive, self, will_message=will_message, **kwargs)
self.write_data(pkg)
def send_subscribe_packet(self, topic, qos, **kwargs):
pkg = package.SubscribePacket.build_package(topic, qos, self, **kwargs)
self.write_data(pkg)
def send_simple_command_packet(self, cmd):
pkg = package.SimpleCommandPacket.build_package(cmd)
self.write_data(pkg)
def send_ping_request(self):
self.send_simple_command_packet(MQTTCommands.PINGREQ)
def send_publish(self, message):
mid, pkg = package.PublishPacket.build_package(message, self)
self.write_data(pkg)
return mid, pkg
def send_disconnect(self, reason_code=0, **properties):
pkg = package.DisconnectPacket.build_package(self, reason_code=reason_code, **properties)
self.write_data(pkg)
return pkg
def send_command_with_mid(self, cmd, mid, dup, reason_code=0):
pkg = package.CommandWithMidPacket.build_package(cmd, mid, dup, reason_code=reason_code,
proto_ver=self.proto_ver)
self.write_data(pkg)
async def _read_packet(self):
remaining_count = []
remaining_length = 0
remaining_mult = 1
while True:
byte, = struct.unpack("!B", await self.read(1))
remaining_count.append(byte)
if len(remaining_count) > 4:
logger.warning('[MQTT ERR PROTO] RECV MORE THAN 4 bytes for remaining length.')
return None
remaining_length += (byte & 127) * remaining_mult
remaining_mult *= 128
if byte & 128 == 0:
break
packet = b''
while remaining_length > 0:
chunk = await self.read(remaining_length)
remaining_length -= len(chunk)
packet += chunk
return packet
async def _read_loop(self):
await self._connected.wait()
while self._connected.is_set():
try:
byte = await self.read(1)
command, = struct.unpack("!B", byte)
packet = await self._read_packet()
self._connection.put_package((command, packet))
except ConnectionResetError as exc:
# This connection will be closed, because we received the empty data.
# So we can safely break the while
logger.debug("[RECV EMPTY] Connection will be reset automatically.")
break
def connection_lost(self, exc):
super(MQTTProtocol, self).connection_lost(exc)
self._connection.put_package((MQTTCommands.DISCONNECT, b''))
if self._read_loop_future is not None:
self._read_loop_future.cancel()
self._read_loop_future = None
self._queue = asyncio.Queue()
|
the-stack_0_16585 | try:
from kaggle.api.kaggle_api_extended import KaggleApi
except Exception as error:
try:
from kaggle.api.kaggle_api_extended import KaggleApi
except ImportError:
raise ImportError('Kaggle API not properly set up')
pass
import datetime
import glob
import os
import sys
import pandas as pd
"""
Extracts data from three possible sources:
1) Lending club API (not in production)
2) Kaggle API (is very slow)
3) As a proxy for the other two, a local directory of the data from kaggle
"""
def get_raw_data(call_type='local', source_path="./Data/source/", save_bool=False, raw_path="./Data/raw/",
username=None, key=None, api_path=None, kaggle_dataset_name='accepted'):
if call_type == 'api':
""" Production implementation should connect to Lending Club API
# https://www.lendingclub.com/developers/versioning
"""
print('Starting LC API connection')
data = pd.DataFrame()
if data.empty:
print('DataFrame is empty from LC API!')
else:
print(data.head())
if save_bool:
save_raw(data, kaggle_dataset_name, raw_path)
return data
# Kaggle data
elif call_type == 'kaggle':
print('Starting Kaggle Scraping')
try:
if (username is not None) & (key is not None):
os.environ['KAGGLE_USERNAME'] = username # assign environment username
os.environ['KAGGLE_KEY'] = key # assign environment key from kaggle.com
api = KaggleApi() # connect to api
api.authenticate() # authenticate
# get list of files that are in dataset and return the newest "accepted" dataset
file = get_kaggle_file(api_path, api, kaggle_dataset_name)
# download accepted dataset VERY SLOW
api.dataset_download_file(dataset=api_path, file_name=file, path=source_path, force=True)
# unzip and convert data to pandas
data = pd.read_csv(source_path + "/" + file, compression='gzip', error_bad_lines=False)
if data.empty:
print("DataFrame is empty!")
else:
print(data.head())
if save_bool:
# save the untouched raw data in flat file warehouse (currently local directory but could be S3
save_raw(data, kaggle_dataset_name, raw_path)
return data
else:
print("No credentials provided, will try to retrieve from local source")
except Exception as exe:
print(sys.stderr, "Unable to access kaggle data")
print(sys.stderr, "Exception: %s" % str(exe))
sys.exit(1)
try:
print("Retrieving data from Local CSV")
# access source data from local directory
list_of_files = glob.glob('./Data/source/*%s*.csv' % kaggle_dataset_name)
# get newest accepted dataset
file = max(list_of_files, key=os.path.getctime)
data = pd.read_csv(file)
if data.empty:
print("DataFrame is empty, cannot find any data source")
sys.exit(1)
else:
print(data.head())
if save_bool:
save_raw(data, kaggle_dataset_name, raw_path)
return data
except Exception as exe:
print(sys.stderr, "Cannot access raw data. Please check dependencies are installed")
print(sys.stderr, "Exception: %s" % str(exe))
sys.exit(1)
def get_kaggle_file(path, a, name):
# kaggle api returns a list of data objects, each containing the metadata for every dataset on the page
dataset_info = a.dataset_list_files(path)
# get the file objects
dataset_obs = dataset_info.__getattribute__('files')
file_string = ''
max_date = datetime.datetime(1900, 1, 1)
for file in dataset_obs:
if name in file.__str__():
# find files with 'accepted' string in name and track the one that was created the most recently
if file.creationDate > max_date:
max_date = file.creationDate
assert isinstance(file.__str__(), object)
file_string = file.__str__()
return file_string
def save_raw(data, name, raw_path):
print("Raw Data Successfully Retrieved")
print("Saving Raw CSV file in Simple Storage Bucket Warehouse..........")
data.to_csv(raw_path + '{}_{}.csv'.format(name, datetime.datetime.today().strftime('%y_%m_%d')), index=False)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
the-stack_0_16586 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# py_tst documentation build configuration file.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
# import py_tst
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = u'py-tst'
copyright = u"2021, [A[BAditya Bhatraju"
author = u"[A[BAditya Bhatraju"
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
# version = py_tst.__version__
# The full version, including alpha/beta/rc tags.
# release = py_tst.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_sidebars = {
"**": ["about.html", "navigation.html", "searchbox.html"]
}
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'py_tstdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'py_tst.tex',
u'py-tst Documentation',
u'[A[BAditya Bhatraju', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'py_tst',
u'py-tst Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'py_tst',
u'py-tst Documentation',
author,
'py_tst',
'One line description of project.',
'Miscellaneous'),
]
|
the-stack_0_16587 | import time
from logging import LogRecord, getLogger, basicConfig
from logging.handlers import BufferingHandler
from multiprocessing.pool import ThreadPool
from ...backend_api.services import events
from ...config import config
buffer_capacity = config.get('log.task_log_buffer_capacity', 100)
class TaskHandler(BufferingHandler):
__flush_max_history_seconds = 30.
__once = False
@property
def task_id(self):
return self._task_id
@task_id.setter
def task_id(self, value):
self._task_id = value
def __init__(self, session, task_id, capacity=buffer_capacity):
super(TaskHandler, self).__init__(capacity)
self.task_id = task_id
self.session = session
self.last_timestamp = 0
self.counter = 1
self._last_event = None
self._thread_pool = ThreadPool(processes=1)
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
# Notice! protect against infinite loops, i.e. flush while sending previous records
# if self.lock._is_owned():
# return False
# if we need to add handlers to the base_logger,
# it will not automatically create stream one when first used, so we must manually configure it.
if not TaskHandler.__once:
base_logger = getLogger()
if len(base_logger.handlers) == 1 and isinstance(base_logger.handlers[0], TaskHandler):
if record.name != 'console' and not record.name.startswith('trains.'):
base_logger.removeHandler(self)
basicConfig()
base_logger.addHandler(self)
TaskHandler.__once = True
else:
TaskHandler.__once = True
# if we passed the max buffer
if len(self.buffer) >= self.capacity:
return True
# if the first entry in the log was too long ago.
if len(self.buffer) and (time.time() - self.buffer[0].created) > self.__flush_max_history_seconds:
return True
return False
def _record_to_event(self, record):
# type: (LogRecord) -> events.TaskLogEvent
timestamp = int(record.created * 1000)
if timestamp == self.last_timestamp:
timestamp += self.counter
self.counter += 1
else:
self.last_timestamp = timestamp
self.counter = 1
# unite all records in a single second
if self._last_event and timestamp - self._last_event.timestamp < 1000 and \
record.levelname.lower() == str(self._last_event.level):
# ignore backspaces (they are often used)
self._last_event.msg += '\n' + record.getMessage().replace('\x08', '')
return None
self._last_event = events.TaskLogEvent(
task=self.task_id,
timestamp=timestamp,
level=record.levelname.lower(),
worker=self.session.worker,
msg=record.getMessage().replace('\x08', '') # ignore backspaces (they are often used)
)
return self._last_event
def flush(self):
if not self.buffer:
return
self.acquire()
buffer = self.buffer
try:
if not buffer:
return
self.buffer = []
record_events = [self._record_to_event(record) for record in buffer]
self._last_event = None
batch_requests = events.AddBatchRequest(requests=[events.AddRequest(e) for e in record_events if e])
except Exception:
batch_requests = None
print("Failed logging task to backend ({:d} lines)".format(len(buffer)))
finally:
self.release()
if batch_requests:
self._thread_pool.apply_async(self._send_events, args=(batch_requests, ))
def wait_for_flush(self):
self.acquire()
try:
self._thread_pool.close()
self._thread_pool.join()
except Exception:
pass
self._thread_pool = ThreadPool(processes=1)
self.release()
def _send_events(self, a_request):
try:
res = self.session.send(a_request)
if not res.ok():
print("Failed logging task to backend ({:d} lines, {})".format(len(a_request.requests), str(res.meta)))
except Exception:
print("Failed logging task to backend ({:d} lines)".format(len(a_request.requests)))
|
the-stack_0_16589 | """
Various utilities for neural networks.
"""
import math
import torch as th
import torch.nn as nn
import torch.nn.functional as F
class GroupNorm32(nn.GroupNorm):
def __init__(self, num_groups, num_channels, swish, eps=1e-5):
super().__init__(num_groups=num_groups, num_channels=num_channels, eps=eps)
self.swish = swish
def forward(self, x):
y = super().forward(x.float()).to(x.dtype)
if self.swish == 1.0:
y = F.silu(y)
elif self.swish:
y = y * F.sigmoid(y * float(self.swish))
return y
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module
def normalization(channels, swish=0.0):
"""
Make a standard normalization layer, with an optional swish activation.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(num_channels=channels, num_groups=32, swish=swish)
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = th.exp(
-math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
if dim % 2:
embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
return embedding
|
the-stack_0_16590 | import sys
from threading import RLock
from typing import Dict, List, Optional, Tuple
from ..constants import MAXIMUM_TXDATA_CACHE_SIZE_MB, MINIMUM_TXDATA_CACHE_SIZE_MB
class Node:
previous: 'Node'
next: 'Node'
key: bytes
value: bytes
def __init__(self, previous: Optional['Node']=None, next: Optional['Node']=None,
key: bytes=b'', value: bytes=b'') -> None:
self.previous = previous if previous is not None else self
self.next = previous if previous is not None else self
self.key = key
self.value = value
# Derived from functools.lrucache, LRUCache should be considered licensed under Python license.
# This intentionally does not have a dictionary interface for now.
class LRUCache:
def __init__(self, max_count: Optional[int]=None, max_size: Optional[int]=None) -> None:
self._cache: Dict[bytes, Node] = {}
assert max_count is not None or max_size is not None, "need some limit"
if max_size is None:
max_size = MAXIMUM_TXDATA_CACHE_SIZE_MB * (1024 * 1024)
assert MINIMUM_TXDATA_CACHE_SIZE_MB * (1024 * 1024) <= max_size <= \
MAXIMUM_TXDATA_CACHE_SIZE_MB * (1024 * 1024), \
f"maximum size {max_size} not within min/max constraints"
self._max_size = max_size
self._max_count: int = max_count if max_count is not None else sys.maxsize
self.current_size = 0
self.hits = self.misses = 0
self._lock = RLock()
# This will be a node in a bi-directional circular linked list with itself as sole entry.
self._root = Node()
def set_maximum_size(self, maximum_size: int, resize: bool=True) -> None:
self._max_size = maximum_size
if resize:
with self._lock:
self._resize()
def get_sizes(self) -> Tuple[int, int]:
return (self.current_size, self._max_size)
def _add(self, key: bytes, value: bytes) -> Node:
most_recent_node = self._root.previous
new_node = Node(most_recent_node, self._root, key, value)
most_recent_node.next = self._root.previous = self._cache[key] = new_node
self.current_size += len(value)
return new_node
def __len__(self) -> int:
return len(self._cache)
def __contains__(self, key: bytes) -> bool:
return key in self._cache
def set(self, key: bytes, value: Optional[bytes]) -> Tuple[bool, List[Tuple[bytes, bytes]]]:
added = False
removals: List[Tuple[bytes, bytes]] = []
with self._lock:
node = self._cache.get(key, None)
if node is not None:
previous_node, next_node, old_value = node.previous, node.next, node.value
assert value != old_value, "duplicate set not supported"
previous_node.next = next_node
next_node.previous = previous_node
self.current_size -= len(old_value)
del self._cache[key]
removals.append((key, old_value))
if value is not None and len(value) <= self._max_size:
added_node = self._add(key, value)
added = True
# Discount the root node when considering count.
resize_removals = self._resize()
assert all(t[0] != added_node.key for t in resize_removals), "removed added node"
removals.extend(resize_removals)
return added, removals
def get(self, key: bytes) -> Optional[bytes]:
with self._lock:
node = self._cache.get(key)
if node is not None:
previous_node, next_node, value = node.previous, node.next, node.value
previous_node.next = next_node
next_node.previous = previous_node
most_recent_node = self._root.previous
most_recent_node.next = self._root.previous = node
node.previous = most_recent_node
node.next = self._root
self.hits += 1
return value
self.misses += 1
return None
def _resize(self) -> List[Tuple[bytes, bytes]]:
removals = []
while len(self._cache)-1 >= self._max_count or self.current_size > self._max_size:
node = self._root.next
previous_node, next_node, discard_key, discard_value = \
node.previous, node.next, node.key, node.value
previous_node.next = next_node
next_node.previous = previous_node
self.current_size -= len(discard_value)
del self._cache[discard_key]
removals.append((discard_key, discard_value))
return removals
|
the-stack_0_16593 | from great_expectations.core.usage_statistics.anonymizers.anonymizer import Anonymizer
from great_expectations.core.usage_statistics.anonymizers.store_backend_anonymizer import (
StoreBackendAnonymizer,
)
from great_expectations.data_context.store import (
EvaluationParameterStore,
ExpectationsStore,
HtmlSiteStore,
MetricStore,
Store,
ValidationsStore,
)
class StoreAnonymizer(Anonymizer):
def __init__(self, salt=None):
super(StoreAnonymizer, self).__init__(salt=salt)
# ordered bottom up in terms of inheritance order
self._ge_classes = [
ValidationsStore,
ExpectationsStore,
EvaluationParameterStore,
MetricStore,
Store,
HtmlSiteStore,
]
self._store_backend_anonymizer = StoreBackendAnonymizer(salt=salt)
def anonymize_store_info(self, store_name, store_obj):
anonymized_info_dict = dict()
anonymized_info_dict["anonymized_name"] = self.anonymize(store_name)
store_backend_obj = store_obj.store_backend
self.anonymize_object_info(
object_=store_obj,
anonymized_info_dict=anonymized_info_dict,
ge_classes=self._ge_classes,
)
anonymized_info_dict[
"anonymized_store_backend"
] = self._store_backend_anonymizer.anonymize_store_backend_info(
store_backend_obj=store_backend_obj
)
return anonymized_info_dict
|
the-stack_0_16594 | import torch
import argparse
import cv2
import os
import numpy as np
import torch
from PIL import Image
from torch.autograd import Function
from torchvision import models, transforms
# from utils.dataloader import MyDataSet
from torch import nn
class FeatureExtractor():
""" Class for extracting activations and
registering gradients from targetted intermediate layers """
def __init__(self, model, target_layers):
self.model = model
self.target_layers = target_layers
# print(target_layers)
self.gradients = []
def save_gradient(self, grad):
self.gradients.append(grad)
def __call__(self, x):
outputs = []
self.gradients = []
x = x.view(x.size(0), -1)
for name, module in self.model._modules.items():
x = module(x)
# print("passed!")
if name in self.target_layers:
x.register_hook(self.save_gradient)
outputs += [x]
return outputs, x
class ModelOutputs():
""" Class for making a forward pass, and getting:
1. The network output.
2. Activations from intermeddiate targetted layers.
3. Gradients from intermeddiate targetted layers. """
def __init__(self, model, feature_module, target_layers):
self.model = model
self.feature_module = feature_module
self.target_layers = target_layers
self.gradients = []
def get_gradients(self):
return self.gradients
def save_gradient(self, grad):
self.gradients.append(grad)
def __call__(self, x):
target_activations = []
self.gradients = []
for name, module in self.model._modules.items():
# print(name)
# print(name)
# print(self.target_layers)
if name in self.target_layers:
x = x.view(x.size(0), -1)
x = module(x)
x.register_hook(self.save_gradient)
target_activations += [x]
# elif "aux_logits1" in name.lower():
# pass
# elif "aux_logits2" in name.lower():
# pass
else:
x = module(x)
x.register_hook(self.save_gradient)
target_activations += [x]
return target_activations, x
def preprocess_image(img):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
preprocessing = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
return preprocessing(img)
def show_cam_on_image(img, mask):
heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
cam = heatmap + np.float32(img)
cam = cam / np.max(cam)
return np.uint8(255 * cam)
class GradCam:
def __init__(self, model, feature_module, target_layer_names, use_cuda):
self.model = model
self.feature_module = feature_module
self.model.eval()
self.cuda = use_cuda
if self.cuda:
self.model = model.cuda()
self.extractor = ModelOutputs(self.model, self.feature_module, target_layer_names)
def forward(self, input_img):
return self.model(input_img)
def __call__(self, input_img, target_category=None):
if self.cuda:
input_img = input_img.cuda()
features, output = self.extractor(input_img)
if target_category == None:
target_category = np.argmax(output.cpu().data.numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][target_category] = 1
one_hot = torch.from_numpy(one_hot).requires_grad_(True)
if self.cuda:
one_hot = one_hot.cuda()
one_hot = torch.sum(one_hot * output)
self.feature_module.zero_grad()
self.model.zero_grad()
one_hot.backward(retain_graph=True)
# print(len(self.extractor.get_gradients()))
# for fea in features:
# print(fea.shape)
grads_val = self.extractor.get_gradients()[-1].cpu().data.numpy()
# print(grads_val.shape)
target = features[0]
target = target.cpu().data.numpy()[0, :]
weights = np.mean(grads_val, axis=(2, 3))[0, :]
cam = np.zeros(target.shape[1:], dtype=np.float32)
for i, w in enumerate(weights):
cam += w * target[i, :, :]
cam = np.maximum(cam, 0)
cam = cv2.resize(cam, input_img.shape[2:])
cam = cam - np.min(cam)
cam = cam / np.max(cam)
return cam
class GuidedBackpropReLU(Function):
@staticmethod
def forward(self, input_img):
positive_mask = (input_img > 0).type_as(input_img)
output = torch.addcmul(torch.zeros(input_img.size()).type_as(input_img), input_img, positive_mask)
self.save_for_backward(input_img, output)
return output
@staticmethod
def backward(self, grad_output):
input_img, output = self.saved_tensors
grad_input = None
positive_mask_1 = (input_img > 0).type_as(grad_output)
positive_mask_2 = (grad_output > 0).type_as(grad_output)
grad_input = torch.addcmul(torch.zeros(input_img.size()).type_as(input_img),
torch.addcmul(torch.zeros(input_img.size()).type_as(input_img), grad_output,
positive_mask_1), positive_mask_2)
return grad_input
class GuidedBackpropReLUModel:
def __init__(self, model, use_cuda):
self.model = model
self.model.eval()
self.cuda = use_cuda
if self.cuda:
self.model = model.cuda()
def recursive_relu_apply(module_top):
for idx, module in module_top._modules.items():
recursive_relu_apply(module)
if module.__class__.__name__ == 'ReLU':
module_top._modules[idx] = GuidedBackpropReLU.apply
# replace ReLU with GuidedBackpropReLU
recursive_relu_apply(self.model)
def forward(self, input_img):
return self.model(input_img)
def __call__(self, input_img, target_category=None):
if self.cuda:
input_img = input_img.cuda()
input_img = input_img.requires_grad_(True)
output = self.forward(input_img)
if target_category == None:
target_category = np.argmax(output.cpu().data.numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][target_category] = 1
one_hot = torch.from_numpy(one_hot).requires_grad_(True)
if self.cuda:
one_hot = one_hot.cuda()
one_hot = torch.sum(one_hot * output)
one_hot.backward(retain_graph=True)
output = input_img.grad.cpu().data.numpy()
output = output[0, :, :, :]
return output
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--use-cuda', action='store_true', default=True,
help='Use NVIDIA GPU acceleration')
parser.add_argument('--weight-path', type=str, default='./model/InceptionV2_1209/net.pkl',
help='pretrained weight path')
parser.add_argument('--image-path', type=str, default='./data/para_test',
help='Input image path')
parser.add_argument('--output-path', type=str, default='./results/0117test/Ip2',
help='Input image path')
args = parser.parse_args()
args.use_cuda = args.use_cuda and torch.cuda.is_available()
if args.use_cuda:
print("Using GPU for acceleration")
else:
print("Using CPU for computation")
return args
def deprocess_image(img):
""" see https://github.com/jacobgil/keras-grad-cam/blob/master/grad-cam.py#L65 """
img = img - np.mean(img)
img = img / (np.std(img) + 1e-5)
img = img * 0.1
img = img + 0.5
img = np.clip(img, 0, 1)
return np.uint8(img * 255)
def get_last_conv_name(net):
"""
获取网络的最后一个卷积层的名字
:param net:
:return:
"""
layer_name = None
for name, m in net.named_modules():
if isinstance(m, nn.Conv2d):
layer_name = name
return layer_name
if __name__ == '__main__':
""" python grad_cam.py <path_to_image>
1. Loads an image with opencv.
2. Preprocesses it for VGG19 and converts to a pytorch variable.
3. Makes a forward pass to find the category index with the highest score,
and computes intermediate activations.
Makes the visualization. """
args = get_args()
model = torch.load(args.weight_path)
layer4 = None
name4 = None
for name, module in model._modules.items():
layer4 = module
# break
for name, module in model._modules.items():
name4 = name
# print(name)
# input()
grad_cam = GradCam(model=model, feature_module=layer4,
target_layer_names=[name4], use_cuda=args.use_cuda)
patht = args.image_path
gb_model = GuidedBackpropReLUModel(model=model, use_cuda=args.use_cuda)
if not os.path.exists(patht):
os.mkdir(patht)
for dir in os.listdir(patht):
folderpath = patht + '/' + dir
outfolderpath = args.output_path + '/' + dir
if not os.path.exists(outfolderpath):
os.mkdir(outfolderpath)
# print(outfolderpath)
# input()
count = 0
oplen = min(len(os.listdir(folderpath)), 20)
for img_name in os.listdir(folderpath):
count += 1
if count > oplen:
break
print("{}/{}".format(count, oplen))
image_path = folderpath + '/' + img_name
iop = outfolderpath + '/' + img_name.split('.')[0]
# print(image_path)
# print(iop)
img = Image.open(image_path)
img = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224)
])(img)
# print(type(img))
# input()
img = np.float32(img) / 255
# Opencv loads as BGR:
img = img[:, :, ::-1]
cv2.imwrite(iop + "_resize.jpg", np.uint8(255 * img))
# input()
input_img = preprocess_image(Image.open(image_path))
# print(input_img)
# print(type(input_img))
# print(input_img.shape)
input_img = input_img.unsqueeze(0)
# If None, returns the map for the highest scoring category.
# Otherwise, targets the requested category.
target_category = None
grayscale_cam = grad_cam(input_img, target_category)
grayscale_cam = cv2.resize(grayscale_cam, (img.shape[1], img.shape[0]))
cam = show_cam_on_image(img, grayscale_cam)
gb = gb_model(input_img, target_category=target_category)
gb = gb.transpose((1, 2, 0))
cam_mask = cv2.merge([grayscale_cam, grayscale_cam, grayscale_cam])
cam_gb = deprocess_image(cam_mask * gb)
gb = deprocess_image(gb)
cv2.imwrite(iop + "_cam.jpg", cam)
cv2.imwrite(iop + '_gb.jpg', gb)
cv2.imwrite(iop + '_cam_gb.jpg', cam_gb)
# for dir in os.listdir(args.image_path):
# print(dir)
# input()
# img = Image.open(args.image_path)
# img = transforms.Compose([
# transforms.Resize(256),
# transforms.CenterCrop(224)
# ])(img)
# img = np.float32(img) / 255
# # Opencv loads as BGR:
# img = img[:, :, ::-1]
# input_img = preprocess_image(img)
# input_img = input_img.unsqueeze(0)
#
# # If None, returns the map for the highest scoring category.
# # Otherwise, targets the requested category.
# target_category = None
# grayscale_cam = grad_cam(input_img, target_category)
#
# grayscale_cam = cv2.resize(grayscale_cam, (img.shape[1], img.shape[0]))
# cam = show_cam_on_image(img, grayscale_cam)
#
# gb_model = GuidedBackpropReLUModel(model=model, use_cuda=args.use_cuda)
# gb = gb_model(input_img, target_category=target_category)
# gb = gb.transpose((1, 2, 0))
#
# cam_mask = cv2.merge([grayscale_cam, grayscale_cam, grayscale_cam])
# cam_gb = deprocess_image(cam_mask * gb)
# gb = deprocess_image(gb)
#
# cv2.imwrite(args.output_path + "_cam.jpg", cam)
# cv2.imwrite(args.output_path + '_gb.jpg', gb)
# cv2.imwrite(args.output_path + '_cam_gb.jpg', cam_gb)
|
the-stack_0_16595 | from django.conf.urls import include, url
from core.tests.api import Api, NoteResource, UserResource
from core.tests.resources import SubjectResource
api = Api()
api.register(NoteResource())
api.register(UserResource())
api.register(SubjectResource())
urlpatterns = [
url(r'^api/', include(api.urls)),
]
|
the-stack_0_16596 | from abc import (
ABC,
abstractmethod
)
from argparse import (
ArgumentParser,
Namespace,
_SubParsersAction,
)
from enum import (
auto,
Enum,
)
import logging
from multiprocessing import (
Process
)
from typing import (
Any,
Dict,
NamedTuple,
)
from lahja.base import EndpointAPI
from trinity.config import (
TrinityConfig
)
from trinity.extensibility.events import (
ComponentStartedEvent,
)
from trinity.extensibility.exceptions import (
InvalidComponentStatus,
)
from trinity._utils.mp import (
ctx,
)
from trinity._utils.logging import (
setup_log_levels,
setup_queue_logging,
)
from trinity._utils.os import (
friendly_filename_or_url,
)
from trinity._utils.profiling import (
profiler,
)
class ComponentStatus(Enum):
NOT_READY = auto()
READY = auto()
STARTED = auto()
STOPPED = auto()
INVALID_START_STATUS = (ComponentStatus.NOT_READY, ComponentStatus.STARTED,)
class TrinityBootInfo(NamedTuple):
args: Namespace
trinity_config: TrinityConfig
boot_kwargs: Dict[str, Any] = None
class BaseComponent(ABC):
_status: ComponentStatus = ComponentStatus.NOT_READY
def __init__(self, boot_info: TrinityBootInfo) -> None:
self.boot_info = boot_info
@property
@abstractmethod
def event_bus(self) -> EndpointAPI:
...
@property
@abstractmethod
def name(self) -> str:
"""
Describe the name of the component.
"""
...
@property
def normalized_name(self) -> str:
"""
The normalized (computer readable) name of the component
"""
return friendly_filename_or_url(self.name)
@classmethod
def get_logger(cls) -> logging.Logger:
return logging.getLogger(f'trinity.extensibility.component(#{cls.__name__})')
@property
def logger(self) -> logging.Logger:
return self.get_logger()
@property
def running(self) -> bool:
"""
Return ``True`` if the ``status`` is ``ComponentStatus.STARTED``,
otherwise return ``False``.
"""
return self._status is ComponentStatus.STARTED
@property
def status(self) -> ComponentStatus:
"""
Return the current :class:`~trinity.extensibility.component.ComponentStatus`
of the component.
"""
return self._status
def ready(self, manager_eventbus: EndpointAPI) -> None:
"""
Set the ``status`` to ``ComponentStatus.READY`` and delegate to
:meth:`~trinity.extensibility.component.BaseComponent.on_ready`
"""
self._status = ComponentStatus.READY
self.on_ready(manager_eventbus)
def on_ready(self, manager_eventbus: EndpointAPI) -> None:
"""
Notify the component that it is ready to bootstrap itself.
The ``manager_eventbus`` refers to the instance of the
:class:`~lahja.endpoint.Endpoint` that the
:class:`~trinity.extensibility.component_manager.ComponentManager` uses which may or may not
be the same :class:`~lahja.endpoint.Endpoint` as the component uses depending on the type
of the component. The component should use this :class:`~lahja.endpoint.Endpoint` instance
to listen for events *before* the component has started.
"""
pass
@classmethod
def configure_parser(cls, arg_parser: ArgumentParser, subparser: _SubParsersAction) -> None:
"""
Give the component a chance to amend the Trinity CLI argument parser. This hook is called
before :meth:`~trinity.extensibility.component.BaseComponent.on_ready`
"""
pass
def start(self) -> None:
"""
Delegate to :meth:`~trinity.extensibility.component.BaseComponent.do_start` and set
``running`` to ``True``. Broadcast a
:class:`~trinity.extensibility.events.ComponentStartedEvent` on the event bus and hence
allow other components to act accordingly.
"""
if self._status in INVALID_START_STATUS:
raise InvalidComponentStatus(
f"Can not start component when the component status is {self.status}"
)
self._status = ComponentStatus.STARTED
self.do_start()
self.event_bus.broadcast_nowait(
ComponentStartedEvent(type(self))
)
self.logger.info("Component started: %s", self.name)
def do_start(self) -> None:
"""
Perform the actual component start routine. In the case of a `BaseIsolatedComponent` this
method will be called in a separate process.
This method should usually be overwritten by subclasses with the exception of components
that set ``func`` on the ``ArgumentParser`` to redefine the entire host program.
"""
pass
class BaseMainProcessComponent(BaseComponent):
"""
A :class:`~trinity.extensibility.component.BaseMainProcessComponent` overtakes the whole main
process early before any of the subsystems started. In that sense it redefines the whole meaning
of the ``trinity`` command.
"""
@property
def event_bus(self) -> EndpointAPI:
raise NotImplementedError('BaseMainProcessComponents do not have event busses')
class BaseIsolatedComponent(BaseComponent):
"""
A :class:`~trinity.extensibility.component.BaseIsolatedComponent` runs in an isolated process
and hence provides security and flexibility by not making assumptions about its internal
operations.
Such components are free to use non-blocking asyncio as well as synchronous calls. When an
isolated component is stopped it does first receive a SIGINT followed by a SIGTERM soon after.
It is up to the component to handle these signals accordingly.
"""
_process: Process = None
_event_bus: EndpointAPI = None
@property
def process(self) -> Process:
"""
Return the ``Process`` created by the isolated component.
"""
return self._process
def start(self) -> None:
"""
Prepare the component to get started and eventually call ``do_start`` in a separate process.
"""
self._status = ComponentStatus.STARTED
self._process = ctx.Process(
target=self._prepare_spawn,
)
self._process.start()
self.logger.info("Component started: %s (pid=%d)", self.name, self._process.pid)
def _prepare_spawn(self) -> None:
if self.boot_info.boot_kwargs.pop('profile', False):
with profiler(f'profile_{self.normalized_name}'):
self._spawn_start()
else:
self._spawn_start()
@abstractmethod
def _spawn_start(self) -> None:
...
def stop(self) -> None:
"""
Set the ``status`` to `STOPPED`` but rely on the
:class:`~trinity.extensibility.component_manager.ComponentManager` to tear down the process.
This allows isolated components to be taken down concurrently without depending on a running
event loop.
"""
self._status = ComponentStatus.STOPPED
def _setup_logging(self) -> None:
log_queue = self.boot_info.boot_kwargs['log_queue']
level = self.boot_info.boot_kwargs.get('log_level', logging.INFO)
setup_queue_logging(log_queue, level)
if self.boot_info.args.log_levels:
setup_log_levels(self.boot_info.args.log_levels)
|
the-stack_0_16598 | import collections
from datetime import timedelta
import functools
import gc
import json
import operator
import pickle
import re
from textwrap import dedent
from typing import (
Callable,
Dict,
FrozenSet,
Hashable,
List,
Optional,
Sequence,
Set,
Union,
)
import warnings
import weakref
import numpy as np
from pandas._config import config
from pandas._libs import Timestamp, iNaT, properties
from pandas.compat import set_function_name
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, rewrite_axis_style_signature
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.dtypes.cast import maybe_promote, maybe_upcast_putmask
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_dict_like,
is_extension_array_dtype,
is_integer,
is_list_like,
is_number,
is_numeric_dtype,
is_object_dtype,
is_period_arraylike,
is_re_compilable,
is_scalar,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
import pandas as pd
from pandas._typing import Dtype, FilePathOrBuffer
from pandas.core import missing, nanops
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.index import (
Index,
InvalidIndexError,
MultiIndex,
RangeIndex,
ensure_index,
)
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import Period, PeriodIndex
import pandas.core.indexing as indexing
from pandas.core.internals import BlockManager
from pandas.core.ops import _align_method_FRAME
from pandas.io.formats import format as fmt
from pandas.io.formats.format import DataFrameFormatter, format_percentiles
from pandas.io.formats.printing import pprint_thing
from pandas.tseries.frequencies import to_offset
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs = dict() # type: Dict[str, str]
_shared_doc_kwargs = dict(
axes="keywords for axes",
klass="Series/DataFrame",
axes_single_arg="int or labels for object",
args_transpose="axes to permute (int or label for object)",
optional_by="""
by : str or list of str
Name or list of names to sort by""",
)
# sentinel value to use as kwarg in place of None when None has special meaning
# and needs to be distinguished from a user explicitly passing None.
sentinel = object()
def _single_replace(self, to_replace, method, inplace, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
if self.ndim != 1:
raise TypeError(
"cannot replace {0} with method {1} on a {2}".format(
to_replace, method, type(self).__name__
)
)
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result
bool_t = bool # Need alias because NDFrame has def bool:
class NDFrame(PandasObject, SelectionMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : bool, default False
"""
_internal_names = [
"_data",
"_cacher",
"_item_cache",
"_cache",
"_is_copy",
"_subtyp",
"_name",
"_index",
"_default_kind",
"_default_fill_value",
"_metadata",
"__array_struct__",
"__array_interface__",
] # type: List[str]
_internal_names_set = set(_internal_names) # type: Set[str]
_accessors = set() # type: Set[str]
_deprecations = frozenset(
["as_blocks", "blocks", "is_copy"]
) # type: FrozenSet[str]
_metadata = [] # type: List[str]
_is_copy = None
_data = None # type: BlockManager
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data: BlockManager,
axes: Optional[List[Index]] = None,
copy: bool = False,
dtype: Optional[Dtype] = None,
fastpath: bool = False,
):
if not fastpath:
if dtype is not None:
data = data.astype(dtype)
elif copy:
data = data.copy()
if axes is not None:
for i, ax in enumerate(axes):
data = data.reindex_axis(ax, axis=i)
object.__setattr__(self, "_is_copy", None)
object.__setattr__(self, "_data", data)
object.__setattr__(self, "_item_cache", {})
def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(
axe, axis=self._get_block_manager_axis(a), copy=False
)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr
# ----------------------------------------------------------------------
@property
def is_copy(self):
"""
Return the copy.
"""
warnings.warn(
"Attribute 'is_copy' is deprecated and will be removed "
"in a future version.",
FutureWarning,
stacklevel=2,
)
return self._is_copy
@is_copy.setter
def is_copy(self, msg):
warnings.warn(
"Attribute 'is_copy' is deprecated and will be removed "
"in a future version.",
FutureWarning,
stacklevel=2,
)
self._is_copy = msg
def _validate_dtype(self, dtype):
""" validate the passed dtype """
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == "V":
raise NotImplementedError(
"compound dtypes are not implemented"
" in the {0} constructor".format(self.__class__.__name__)
)
return dtype
# ----------------------------------------------------------------------
# Construction
@property
def _constructor(self):
"""Used when a manipulation result has the same dimensions as the
original.
"""
raise AbstractMethodError(self)
@property
def _constructor_sliced(self):
"""Used when a manipulation result has one lower dimension(s) as the
original, such as DataFrame single columns slicing.
"""
raise AbstractMethodError(self)
@property
def _constructor_expanddim(self):
"""Used when a manipulation result has one higher dimension as the
original, such as Series.to_frame()
"""
raise NotImplementedError
# ----------------------------------------------------------------------
# Axis
@classmethod
def _setup_axes(
cls,
axes,
info_axis=None,
stat_axis=None,
aliases=None,
axes_are_reversed=False,
build_axes=True,
ns=None,
docs=None,
):
"""
Provide axes setup for the major PandasObjects.
Parameters
----------
axes : the names of the axes in order (lowest to highest)
info_axis_num : the axis of the selector dimension (int)
stat_axis_num : the number of axis for the default stats (int)
aliases : other names for a single axis (dict)
axes_are_reversed : bool
Whether to treat passed axes as reversed (DataFrame).
build_axes : setup the axis properties (default True)
"""
cls._AXIS_ORDERS = axes
cls._AXIS_NUMBERS = {a: i for i, a in enumerate(axes)}
cls._AXIS_LEN = len(axes)
cls._AXIS_ALIASES = aliases or dict()
cls._AXIS_IALIASES = {v: k for k, v in cls._AXIS_ALIASES.items()}
cls._AXIS_NAMES = dict(enumerate(axes))
cls._AXIS_REVERSED = axes_are_reversed
# typ
setattr(cls, "_typ", cls.__name__.lower())
# indexing support
cls._ix = None
if info_axis is not None:
cls._info_axis_number = info_axis
cls._info_axis_name = axes[info_axis]
if stat_axis is not None:
cls._stat_axis_number = stat_axis
cls._stat_axis_name = axes[stat_axis]
# setup the actual axis
if build_axes:
def set_axis(a, i):
setattr(cls, a, properties.AxisProperty(i, docs.get(a, a)))
cls._internal_names_set.add(a)
if axes_are_reversed:
m = cls._AXIS_LEN - 1
for i, a in cls._AXIS_NAMES.items():
set_axis(a, m - i)
else:
for i, a in cls._AXIS_NAMES.items():
set_axis(a, i)
assert not isinstance(ns, dict)
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
@staticmethod
def _construct_axes_dict_from(self, axes, **kwargs):
"""Return an axes dictionary for the passed axes."""
d = {a: ax for a, ax in zip(self._AXIS_ORDERS, axes)}
d.update(kwargs)
return d
def _construct_axes_from_arguments(
self, args, kwargs, require_all=False, sentinel=None
):
"""Construct and returns axes if supplied in args/kwargs.
If require_all, raise if all axis arguments are not supplied
return a tuple of (axes, kwargs).
sentinel specifies the default parameter when an axis is not
supplied; useful to distinguish when a user explicitly passes None
in scenarios where None has special meaning.
"""
# construct the args
args = list(args)
for a in self._AXIS_ORDERS:
# if we have an alias for this axis
alias = self._AXIS_IALIASES.get(a)
if alias is not None:
if a in kwargs:
if alias in kwargs:
raise TypeError(
"arguments are mutually exclusive "
"for [%s,%s]" % (a, alias)
)
continue
if alias in kwargs:
kwargs[a] = kwargs.pop(alias)
continue
# look for a argument by position
if a not in kwargs:
try:
kwargs[a] = args.pop(0)
except IndexError:
if require_all:
raise TypeError("not enough/duplicate arguments specified!")
axes = {a: kwargs.pop(a, sentinel) for a in self._AXIS_ORDERS}
return axes, kwargs
@classmethod
def _from_axes(cls, data, axes, **kwargs):
# for construction from BlockManager
if isinstance(data, BlockManager):
return cls(data, **kwargs)
else:
if cls._AXIS_REVERSED:
axes = axes[::-1]
d = cls._construct_axes_dict_from(cls, axes, copy=False)
d.update(kwargs)
return cls(data, **d)
@classmethod
def _get_axis_number(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if is_integer(axis):
if axis in cls._AXIS_NAMES:
return axis
else:
try:
return cls._AXIS_NUMBERS[axis]
except KeyError:
pass
raise ValueError("No axis named {0} for object type {1}".format(axis, cls))
@classmethod
def _get_axis_name(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, str):
if axis in cls._AXIS_NUMBERS:
return axis
else:
try:
return cls._AXIS_NAMES[axis]
except KeyError:
pass
raise ValueError("No axis named {0} for object type {1}".format(axis, cls))
def _get_axis(self, axis):
name = self._get_axis_name(axis)
return getattr(self, name)
@classmethod
def _get_block_manager_axis(cls, axis):
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis
def _get_axis_resolvers(self, axis):
# index or columns
axis_index = getattr(self, axis)
d = dict()
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
key = "{prefix}level_{i}".format(prefix=prefix, i=i)
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
# put the index/columns itself in the dict
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
def _get_index_resolvers(self):
d = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return d
def _get_space_character_free_column_resolvers(self):
"""Return the space character free column resolvers of a dataframe.
Column names with spaces are 'cleaned up' so that they can be referred
to by backtick quoting.
Used in :meth:`DataFrame.eval`.
"""
from pandas.core.computation.common import _remove_spaces_column_name
return {_remove_spaces_column_name(k): v for k, v in self.items()}
@property
def _info_axis(self):
return getattr(self, self._info_axis_name)
@property
def _stat_axis(self):
return getattr(self, self._stat_axis_name)
@property
def shape(self):
"""
Return a tuple of axis dimensions
"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self):
"""
Return index label(s) of the internal NDFrame
"""
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
@property
def ndim(self):
"""
Return an int representing the number of axes / array dimensions.
Return 1 if Series. Otherwise return 2 if DataFrame.
See Also
--------
ndarray.ndim : Number of array dimensions.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.ndim
1
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.ndim
2
"""
return self._data.ndim
@property
def size(self):
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
See Also
--------
ndarray.size : Number of elements in the array.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.size
3
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.size
4
"""
return np.prod(self.shape)
@property
def _selected_obj(self):
""" internal compat with SelectionMixin """
return self
@property
def _obj_with_exclusions(self):
""" internal compat with SelectionMixin """
return self
def set_axis(self, labels, axis=0, inplace=False):
"""
Assign desired index to given axis.
Indexes for column or row labels can be changed by assigning
a list-like or Index.
.. versionchanged:: 0.21.0
The signature is now `labels` and `axis`, consistent with
the rest of pandas API. Previously, the `axis` and `labels`
arguments were respectively the first and second positional
arguments.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to update. The value 0 identifies the rows, and 1
identifies the columns.
inplace : bool, default False
Whether to return a new %(klass)s instance.
Returns
-------
renamed : %(klass)s or None
An object of same type as caller if inplace=False, None otherwise.
See Also
--------
DataFrame.rename_axis : Alter the name of the index or columns.
Examples
--------
**Series**
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.set_axis(['a', 'b', 'c'], axis=0)
a 1
b 2
c 3
dtype: int64
**DataFrame**
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
Change the row labels.
>>> df.set_axis(['a', 'b', 'c'], axis='index')
A B
a 1 4
b 2 5
c 3 6
Change the column labels.
>>> df.set_axis(['I', 'II'], axis='columns')
I II
0 1 4
1 2 5
2 3 6
Now, update the labels inplace.
>>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)
>>> df
i ii
0 1 4
1 2 5
2 3 6
"""
if is_scalar(labels):
warnings.warn(
'set_axis now takes "labels" as first argument, and '
'"axis" as named parameter. The old form, with "axis" as '
'first parameter and "labels" as second, is still supported '
"but will be deprecated in a future version of pandas.",
FutureWarning,
stacklevel=2,
)
labels, axis = axis, labels
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def _set_axis(self, axis, labels):
self._data.set_axis(axis, labels)
self._clear_item_cache()
def transpose(self, *args, **kwargs):
"""
Permute the dimensions of the %(klass)s
Parameters
----------
args : %(args_transpose)s
copy : bool, default False
Make a copy of the underlying data. Mixed-dtype data will
always result in a copy
**kwargs
Additional keyword arguments will be passed to the function.
Returns
-------
y : same as input
Examples
--------
>>> p.transpose(2, 0, 1)
>>> p.transpose(2, 0, 1, copy=True)
"""
# construct the args
axes, kwargs = self._construct_axes_from_arguments(
args, kwargs, require_all=True
)
axes_names = tuple(self._get_axis_name(axes[a]) for a in self._AXIS_ORDERS)
axes_numbers = tuple(self._get_axis_number(axes[a]) for a in self._AXIS_ORDERS)
# we must have unique axes
if len(axes) != len(set(axes)):
raise ValueError("Must specify %s unique axes" % self._AXIS_LEN)
new_axes = self._construct_axes_dict_from(
self, [self._get_axis(x) for x in axes_names]
)
new_values = self.values.transpose(axes_numbers)
if kwargs.pop("copy", None) or (len(args) and args[-1]):
new_values = new_values.copy()
nv.validate_transpose(tuple(), kwargs)
return self._constructor(new_values, **new_axes).__finalize__(self)
def swapaxes(self, axis1, axis2, copy=True):
"""
Interchange axes and swap values axes appropriately.
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self)
def droplevel(self, level, axis=0):
"""
Return DataFrame with requested index / column level(s) removed.
.. versionadded:: 0.24.0
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
axis : {0 or 'index', 1 or 'columns'}, default 0
Returns
-------
DataFrame.droplevel()
Examples
--------
>>> df = pd.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]
... ]).set_index([0, 1]).rename_axis(['a', 'b'])
>>> df.columns = pd.MultiIndex.from_tuples([
... ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])
>>> df
level_1 c d
level_2 e f
a b
1 2 3 4
5 6 7 8
9 10 11 12
>>> df.droplevel('a')
level_1 c d
level_2 e f
b
2 3 4
6 7 8
10 11 12
>>> df.droplevel('level2', axis=1)
level_1 c d
a b
1 2 3 4
5 6 7 8
9 10 11 12
"""
labels = self._get_axis(axis)
new_labels = labels.droplevel(level)
result = self.set_axis(new_labels, axis=axis, inplace=False)
return result
def pop(self, item):
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result
def squeeze(self, axis=None):
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
.. versionadded:: 0.20.0
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_0a.squeeze()
1
"""
axis = self._AXIS_NAMES if axis is None else (self._get_axis_number(axis),)
try:
return self.iloc[
tuple(
0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes)
)
]
except Exception:
return self
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, str (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : same type as caller (new object)
"""
axis = self._get_axis_number(axis)
result = self.copy()
labels = result._data.axes[axis]
result._data.set_axis(axis, labels.swaplevel(i, j))
return result
# ----------------------------------------------------------------------
# Rename
def rename(self, *args, **kwargs):
"""
Alter axes input function or functions. Function / dict values must be
unique (1-to-1). Labels not contained in a dict / Series will be left
as-is. Extra labels listed don't throw an error. Alternatively, change
``Series.name`` with a scalar value (Series only).
Parameters
----------
%(axes)s : scalar, list-like, dict-like or function, optional
Scalar or list-like will alter the ``Series.name`` attribute,
and raise on DataFrame.
dict-like or functions are transformations to apply to
that axis' values
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
renamed : %(klass)s (new object)
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
NDFrame.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
Since ``DataFrame`` doesn't have a ``.name`` attribute,
only mapping-type arguments are allowed.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(2)
Traceback (most recent call last):
...
TypeError: 'int' object is not callable
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
See the :ref:`user guide <basics.rename>` for more.
"""
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
copy = kwargs.pop("copy", True)
inplace = kwargs.pop("inplace", False)
level = kwargs.pop("level", None)
axis = kwargs.pop("axis", None)
errors = kwargs.pop("errors", "ignore")
if axis is not None:
# Validate the axis
self._get_axis_number(axis)
if kwargs:
raise TypeError(
"rename() got an unexpected keyword "
'argument "{0}"'.format(list(kwargs.keys())[0])
)
if com.count_not_none(*axes.values()) == 0:
raise TypeError("must pass an index to rename")
self._consolidate_inplace()
result = self if inplace else self.copy(deep=copy)
# start in the axis order to eliminate too many copies
for axis in range(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is None:
continue
f = com.get_rename_function(v)
baxis = self._get_block_manager_axis(axis)
if level is not None:
level = self.axes[axis]._get_level_number(level)
# GH 13473
if not callable(v):
indexer = self.axes[axis].get_indexer_for(v)
if errors == "raise" and len(indexer[indexer == -1]):
missing_labels = [
label for index, label in enumerate(v) if indexer[index] == -1
]
raise KeyError("{} not found in axis".format(missing_labels))
result._data = result._data.rename_axis(
f, axis=baxis, copy=copy, level=level
)
result._clear_item_cache()
if inplace:
self._update_inplace(result._data)
else:
return result.__finalize__(self)
@rewrite_axis_style_signature("mapper", [("copy", True), ("inplace", False)])
def rename_axis(self, mapper=sentinel, **kwargs):
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
.. versionchanged:: 0.24.0
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
the corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
"""
axes, kwargs = self._construct_axes_from_arguments(
(), kwargs, sentinel=sentinel
)
copy = kwargs.pop("copy", True)
inplace = kwargs.pop("inplace", False)
axis = kwargs.pop("axis", 0)
if axis is not None:
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError(
"rename_axis() got an unexpected keyword "
'argument "{0}"'.format(list(kwargs.keys())[0])
)
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper is not sentinel:
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (
is_list_like(mapper) and not is_dict_like(mapper)
)
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
raise ValueError("Use `.rename` to alter labels with a mapper.")
else:
# Use new behavior. Means that index and/or columns
# is specified
result = self if inplace else self.copy(deep=copy)
for axis in range(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is sentinel:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))
if non_mapper:
newnames = v
else:
f = com.get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
if not inplace:
return result
def _set_axis_name(self, name, axis=0, inplace=False):
"""
Set the name(s) of the axis.
Parameters
----------
name : str or list of str
Name(s) to set.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to set the label. The value 0 or 'index' specifies index,
and the value 1 or 'columns' specifies columns.
inplace : bool, default False
If `True`, do operation inplace and return None.
.. versionadded:: 0.21.0
Returns
-------
Series, DataFrame, or None
The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
Series.rename : Alter the index labels or set the index name
of :class:`Series`.
Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.
Examples
--------
>>> df = pd.DataFrame({"num_legs": [4, 4, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs
dog 4
cat 4
monkey 2
>>> df._set_axis_name("animal")
num_legs
animal
dog 4
cat 4
monkey 2
>>> df.index = pd.MultiIndex.from_product(
... [["mammal"], ['dog', 'cat', 'monkey']])
>>> df._set_axis_name(["type", "name"])
legs
type name
mammal dog 4
cat 4
monkey 2
"""
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, "inplace")
renamed = self if inplace else self.copy()
renamed.set_axis(idx, axis=axis, inplace=True)
if not inplace:
return renamed
# ----------------------------------------------------------------------
# Comparison Methods
def _indexed_same(self, other):
return all(
self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS
)
def equals(self, other):
"""
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal. The column headers do not
need to have the same type, but the elements within the columns must
be the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
assert_series_equal : Return True if left and right Series are equal,
False otherwise.
assert_frame_equal : Return True if left and right DataFrames are
equal, False otherwise.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Notes
-----
This function requires that the elements have the same dtype as their
respective elements in the other Series or DataFrame. However, the
column labels do not need to have the same type, as long as they are
still considered equal.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
"""
if not isinstance(other, self._constructor):
return False
return self._data.equals(other._data)
# -------------------------------------------------------------------------
# Unary Methods
def __neg__(self):
values = com.values_from_object(self)
if is_bool_dtype(values):
arr = operator.inv(values)
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.neg(values)
else:
raise TypeError(
"Unary negative expects numeric dtype, not {}".format(values.dtype)
)
return self.__array_wrap__(arr)
def __pos__(self):
values = com.values_from_object(self)
if is_bool_dtype(values) or is_period_arraylike(values):
arr = values
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.pos(values)
else:
raise TypeError(
"Unary plus expects numeric dtype, not {}".format(values.dtype)
)
return self.__array_wrap__(arr)
def __invert__(self):
try:
arr = operator.inv(com.values_from_object(self))
return self.__array_wrap__(arr)
except Exception:
# inv fails with 0 len
if not np.prod(self.shape):
return self
raise
def __nonzero__(self):
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(
self.__class__.__name__
)
)
__bool__ = __nonzero__
def bool(self):
"""
Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
Returns
-------
bool
Same single boolean value converted to bool type.
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError(
"bool cannot act on a non-boolean single element "
"{0}".format(self.__class__.__name__)
)
self.__nonzero__()
def __abs__(self):
return self.abs()
def __round__(self, decimals=0):
return self.round(decimals)
# -------------------------------------------------------------------------
# Label or Level Combination Helpers
#
# A collection of helper methods for DataFrame/Series operations that
# accept a combination of column/index labels and levels. All such
# operations should utilize/extend these methods when possible so that we
# have consistent precedence and validation logic throughout the library.
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis = self._get_axis_number(axis)
return (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and not self._is_label_reference(key, axis=axis)
)
def _is_label_reference(self, key, axis=0):
"""
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key: str
Potential label name
axis: int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
return (
key is not None
and is_hashable(key)
and any(key in self.axes[ax] for ax in other_axes)
)
def _is_label_or_level_reference(self, key, axis=0):
"""
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key: str
Potential label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_label_or_level: bool
"""
return self._is_level_reference(key, axis=axis) or self._is_label_reference(
key, axis=axis
)
def _check_label_or_level_ambiguity(self, key, axis=0):
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key: str or object
label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Raises
------
ValueError: `key` is ambiguous
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
if (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and any(key in self.axes[ax] for ax in other_axes)
):
# Build an informative and grammatical warning
level_article, level_type = (
("an", "index") if axis == 0 else ("a", "column")
)
label_article, label_type = (
("a", "column") if axis == 0 else ("an", "index")
)
msg = (
"'{key}' is both {level_article} {level_type} level and "
"{label_article} {label_type} label, which is ambiguous."
).format(
key=key,
level_article=level_article,
level_type=level_type,
label_article=label_article,
label_type=label_type,
)
raise ValueError(msg)
def _get_label_or_level_values(self, key, axis=0):
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key: str
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
values: np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
"""
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
# Check for duplicates
if values.ndim > 1:
if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):
multi_message = (
"\n"
"For a multi-index, the label must be a "
"tuple with elements corresponding to "
"each level."
)
else:
multi_message = ""
label_axis_name = "column" if axis == 0 else "index"
raise ValueError(
(
"The {label_axis_name} label '{key}' "
"is not unique.{multi_message}"
).format(
key=key,
label_axis_name=label_axis_name,
multi_message=multi_message,
)
)
return values
def _drop_labels_or_levels(self, keys, axis=0):
"""
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys: str or list of str
labels or levels to drop
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
"""
axis = self._get_axis_number(axis)
# Validate keys
keys = com.maybe_make_list(keys)
invalid_keys = [
k for k in keys if not self._is_label_or_level_reference(k, axis=axis)
]
if invalid_keys:
raise ValueError(
(
"The following keys are not valid labels or "
"levels for axis {axis}: {invalid_keys}"
).format(axis=axis, invalid_keys=invalid_keys)
)
# Compute levels and labels to drop
levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]
# Perform copy upfront and then use inplace operations below.
# This ensures that we always perform exactly one copy.
# ``copy`` and/or ``inplace`` options could be added in the future.
dropped = self.copy()
if axis == 0:
# Handle dropping index levels
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
# Handle dropping columns labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
# Handle dropping column levels
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
# Drop the specified levels from the MultiIndex
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
# Drop the last level of Index by replacing with
# a RangeIndex
dropped.columns = RangeIndex(dropped.columns.size)
# Handle dropping index labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
# ----------------------------------------------------------------------
# Iteration
def __hash__(self):
raise TypeError(
"{0!r} objects are mutable, thus they cannot be"
" hashed".format(self.__class__.__name__)
)
def __iter__(self):
"""
Iterate over info axis.
Returns
-------
iterator
Info axis as iterator.
"""
return iter(self._info_axis)
# can we get a better explanation of this?
def keys(self):
"""
Get the 'info axis' (see Indexing for more).
This is index for Series, columns for DataFrame.
Returns
-------
Index
Info axis.
"""
return self._info_axis
def items(self):
"""Iterate over (label, values) on info axis
This is index for Series and columns for DataFrame.
Returns
-------
Generator
"""
for h in self._info_axis:
yield h, self[h]
@Appender(items.__doc__)
def iteritems(self):
return self.items()
def __len__(self):
"""Returns length of info axis"""
return len(self._info_axis)
def __contains__(self, key):
"""True if the key is in the info axis"""
return key in self._info_axis
@property
def empty(self):
"""
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna
DataFrame.dropna
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
# ----------------------------------------------------------------------
# Array Interface
# This is also set in IndexOpsMixin
# GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented
__array_priority__ = 1000
def __array__(self, dtype=None):
return com.values_from_object(self)
def __array_wrap__(self, result, context=None):
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
return self._constructor(result, **d).__finalize__(self)
# ideally we would define this to avoid the getattr checks, but
# is slower
# @property
# def __array_interface__(self):
# """ provide numpy array interface method """
# values = self.values
# return dict(typestr=values.dtype.str,shape=values.shape,data=values)
def to_dense(self):
"""
Return dense representation of Series/DataFrame (as opposed to sparse).
.. deprecated:: 0.25.0
Returns
-------
%(klass)s
Dense %(klass)s.
"""
warnings.warn(
"DataFrame/Series.to_dense is deprecated "
"and will be removed in a future version",
FutureWarning,
stacklevel=2,
)
# compat
return self
# ----------------------------------------------------------------------
# Picklability
def __getstate__(self):
meta = {k: getattr(self, k, None) for k in self._metadata}
return dict(_data=self._data, _typ=self._typ, _metadata=self._metadata, **meta)
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._data = state
elif isinstance(state, dict):
typ = state.get("_typ")
if typ is not None:
# set in the order of internal names
# to avoid definitional recursion
# e.g. say fill_value needing _data to be
# defined
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state:
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
self._unpickle_series_compat(state)
elif isinstance(state[0], dict):
if len(state) == 5:
self._unpickle_sparse_frame_compat(state)
else:
self._unpickle_frame_compat(state)
elif len(state) == 4:
self._unpickle_panel_compat(state)
elif len(state) == 2:
self._unpickle_series_compat(state)
else: # pragma: no cover
# old pickling format, for compatibility
self._unpickle_matrix_compat(state)
self._item_cache = {}
# ----------------------------------------------------------------------
# Rendering Methods
def __repr__(self):
# string representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = "[%s]" % ",".join(map(pprint_thing, self))
return "%s(%s)" % (self.__class__.__name__, prepr)
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if config.get_option("display.latex.repr"):
return self.to_latex()
else:
return None
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option("display.max_rows"))
payload = json.loads(
data.to_json(orient="table"), object_pairs_hook=collections.OrderedDict
)
return payload
# ----------------------------------------------------------------------
# I/O Methods
_shared_docs[
"to_excel"
] = """
Write %(klass)s to an Excel sheet.
To write a single %(klass)s to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
.. versionadded:: 0.20.0.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
ExcelWriter : Class for writing DataFrame objects into excel sheets.
read_excel : Read an Excel file into a pandas DataFrame.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Notes
-----
For compatibility with :meth:`~DataFrame.to_csv`,
to_excel serializes lists and dicts to strings before writing.
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
Examples
--------
Create, write to and save a workbook:
>>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> df2 = df1.copy()
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
ExcelWriter can also be used to append to an existing Excel file:
>>> with pd.ExcelWriter('output.xlsx',
... mode='a') as writer: # doctest: +SKIP
... df.to_excel(writer, sheet_name='Sheet_name_3')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
@Appender(_shared_docs["to_excel"] % dict(klass="object"))
def to_excel(
self,
excel_writer,
sheet_name="Sheet1",
na_rep="",
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
engine=None,
merge_cells=True,
encoding=None,
inf_rep="inf",
verbose=True,
freeze_panes=None,
):
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(
df,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
)
def to_json(
self,
path_or_buf=None,
orient=None,
date_format=None,
double_precision=10,
force_ascii=True,
date_unit="ms",
default_handler=None,
lines=False,
compression="infer",
index=True,
):
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : str or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : str
Indication of expected JSON string format.
* Series
- default is 'index'
- allowed values are: {'split','records','index','table'}
* DataFrame
- default is 'columns'
- allowed values are:
{'split','records','index','columns','values','table'}
* The format of the JSON string
- 'split' : dict like {'index' -> [index],
'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
- 'columns' : dict like {column -> {index -> value}}
- 'values' : just the values array
- 'table' : dict like {'schema': {schema}, 'data': {data}}
describing the data, and the data component is
like ``orient='records'``.
.. versionchanged:: 0.20.0
date_format : {None, 'epoch', 'iso'}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : string, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
.. versionadded:: 0.21.0
.. versionchanged:: 0.24.0
'infer' option added and set to default
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
.. versionadded:: 0.23.0
Returns
-------
None or str
If path_or_buf is None, returns the resulting json format as a
string. Otherwise returns None.
See Also
--------
read_json
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> df.to_json(orient='columns')
'{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}'
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> df.to_json(orient='values')
'[["a","b"],["c","d"]]'
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
from pandas.io import json
if date_format is None and orient == "table":
date_format = "iso"
elif date_format is None:
date_format = "epoch"
return json.to_json(
path_or_buf=path_or_buf,
obj=self,
orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler,
lines=lines,
compression=compression,
index=index,
)
def to_hdf(self, path_or_buf, key, **kwargs):
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
format : {'fixed', 'table'}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
append : bool, default False
For Table formats, append the input data to the existing.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
Applicable only to format='table'.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum.
dropna : bool, default False
If true, ALL nan rows will not be written to store.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
See Also
--------
DataFrame.read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
"""
from pandas.io import pytables
pytables.to_hdf(path_or_buf, key, self, **kwargs)
def to_msgpack(self, path_or_buf=None, encoding="utf-8", **kwargs):
"""
Serialize object to input file path using msgpack format.
.. deprecated:: 0.25.0
to_msgpack is deprecated and will be removed in a future version.
It is recommended to use pyarrow for on-the-wire transmission of
pandas objects.
Parameters
----------
path : string File path, buffer-like, or None
if None, return generated bytes
append : bool whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
Returns
-------
None or bytes
If path_or_buf is None, returns the resulting msgpack format as a
byte string. Otherwise returns None.
"""
from pandas.io import packers
return packers.to_msgpack(path_or_buf, self, encoding=encoding, **kwargs)
def to_sql(
self,
name: str,
con,
schema=None,
if_exists="fail",
index=True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
):
"""
Write records stored in a DataFrame to a SQL database.
Databases supported by SQLAlchemy [1]_ are supported. Tables can be
newly created, appended to, or overwritten.
Parameters
----------
name : str
Name of SQL table.
con : sqlalchemy.engine.Engine or sqlite3.Connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. Legacy support is provided for sqlite3.Connection objects.
schema : str, optional
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
How to behave if the table already exists.
* fail: Raise a ValueError.
* replace: Drop the table before inserting new values.
* append: Insert new values to the existing table.
index : bool, default True
Write DataFrame index as a column. Uses `index_label` as the column
name in the table.
index_label : str or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 legacy mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
Raises
------
ValueError
When the table already exists and `if_exists` is 'fail' (the
default).
See Also
--------
read_sql : Read a DataFrame from a table.
Notes
-----
Timezone aware datetime columns will be written as
``Timestamp with timezone`` type with SQLAlchemy if supported by the
database. Otherwise, the datetimes will be stored as timezone unaware
timestamps local to the original timezone.
.. versionadded:: 0.24.0
References
----------
.. [1] http://docs.sqlalchemy.org
.. [2] https://www.python.org/dev/peps/pep-0249/
Examples
--------
Create an in-memory SQLite database.
>>> from sqlalchemy import create_engine
>>> engine = create_engine('sqlite://', echo=False)
Create a table from scratch with 3 rows.
>>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})
>>> df
name
0 User 1
1 User 2
2 User 3
>>> df.to_sql('users', con=engine)
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]
>>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
>>> df1.to_sql('users', con=engine, if_exists='append')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),
(0, 'User 4'), (1, 'User 5')]
Overwrite the table with just ``df1``.
>>> df1.to_sql('users', con=engine, if_exists='replace',
... index_label='id')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 4'), (1, 'User 5')]
Specify the dtype (especially useful for integers with missing values).
Notice that while pandas is forced to store the data as floating point,
the database supports nullable integers. When fetching the data with
Python, we get back integer scalars.
>>> df = pd.DataFrame({"A": [1, None, 2]})
>>> df
A
0 1.0
1 NaN
2 2.0
>>> from sqlalchemy.types import Integer
>>> df.to_sql('integers', con=engine, index=False,
... dtype={"A": Integer()})
>>> engine.execute("SELECT * FROM integers").fetchall()
[(1,), (None,), (2,)]
"""
from pandas.io import sql
sql.to_sql(
self,
name,
con,
schema=schema,
if_exists=if_exists,
index=index,
index_label=index_label,
chunksize=chunksize,
dtype=dtype,
method=method,
)
def to_pickle(self, path, compression="infer", protocol=pickle.HIGHEST_PROTOCOL):
"""
Pickle (serialize) object to file.
Parameters
----------
path : str
File path where the pickled object will be stored.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \
default 'infer'
A string representing the compression to use in the output file. By
default, infers from the file extension in specified path.
.. versionadded:: 0.20.0
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible
values are 0, 1, 2, 3, 4. A negative value for the protocol
parameter is equivalent to setting its value to HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html
.. versionadded:: 0.21.0
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> original_df.to_pickle("./dummy.pkl")
>>> unpickled_df = pd.read_pickle("./dummy.pkl")
>>> unpickled_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> import os
>>> os.remove("./dummy.pkl")
"""
from pandas.io.pickle import to_pickle
to_pickle(self, path, compression=compression, protocol=protocol)
def to_clipboard(self, excel=True, sep=None, **kwargs):
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',')
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False)
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
xarray.DataArray or xarray.Dataset
Data in the pandas structure converted to Dataset if the object is
a DataFrame, or a DataArray if the object is a Series.
See Also
--------
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Notes
-----
See the `xarray docs <http://xarray.pydata.org/en/stable/>`__
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
... ('parrot', 'bird', 24.0, 2),
... ('lion', 'mammal', 80.5, 4),
... ('monkey', 'mammal', np.nan, 4)],
... columns=['name', 'class', 'max_speed',
... 'num_legs'])
>>> df
name class max_speed num_legs
0 falcon bird 389.0 2
1 parrot bird 24.0 2
2 lion mammal 80.5 4
3 monkey mammal NaN 4
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 4)
Coordinates:
* index (index) int64 0 1 2 3
Data variables:
name (index) object 'falcon' 'parrot' 'lion' 'monkey'
class (index) object 'bird' 'bird' 'mammal' 'mammal'
max_speed (index) float64 389.0 24.0 80.5 nan
num_legs (index) int64 2 2 4 4
>>> df['max_speed'].to_xarray()
<xarray.DataArray 'max_speed' (index: 4)>
array([389. , 24. , 80.5, nan])
Coordinates:
* index (index) int64 0 1 2 3
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
... '2018-01-02', '2018-01-02'])
>>> df_multiindex = pd.DataFrame({'date': dates,
... 'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}).set_index(['date',
... 'animal'])
>>> df_multiindex
speed
date animal
2018-01-01 falcon 350
parrot 18
2018-01-02 falcon 361
parrot 15
>>> df_multiindex.to_xarray()
<xarray.Dataset>
Dimensions: (animal: 2, date: 2)
Coordinates:
* date (date) datetime64[ns] 2018-01-01 2018-01-02
* animal (animal) object 'falcon' 'parrot'
Data variables:
speed (date, animal) int64 350 18 361 15
"""
xarray = import_optional_dependency("xarray")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
else:
return xarray.Dataset.from_dataframe(self)
@Substitution(returns=fmt.return_docstring)
def to_latex(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
bold_rows=False,
column_format=None,
longtable=None,
escape=None,
encoding=None,
decimal=".",
multicolumn=None,
multicolumn_format=None,
multirow=None,
):
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice
this into a LaTeX document. Requires \usepackage{booktabs}.
.. versionchanged:: 0.20.2
Added to Series
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function or str, optional, default None
Formatter for floating point numbers. For example
``float_format="%%.2f"`` and ``float_format="{:0.2f}".format`` will
both result in 0.1234 being formatted as 0.12.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row. By default, the value will be
read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
adding a \usepackage{longtable} to your LaTeX preamble.
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
.. versionadded:: 0.20.0
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
.. versionadded:: 0.20.0
multirow : bool, default False
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{multirow} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module.
.. versionadded:: 0.20.0
%(returns)s
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
\begin{tabular}{lll}
\toprule
name & mask & weapon \\
\midrule
Raphael & red & sai \\
Donatello & purple & bo staff \\
\bottomrule
\end{tabular}
"""
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option("display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
formatter = DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape,
decimal=decimal,
)
return formatter.to_latex(
buf=buf,
column_format=column_format,
longtable=longtable,
encoding=encoding,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
)
def to_csv(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
sep: str = ",",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Sequence[Hashable]] = None,
header: Union[bool_t, List[str]] = True,
index: bool_t = True,
index_label: Optional[Union[bool_t, str, Sequence[Hashable]]] = None,
mode: str = "w",
encoding: Optional[str] = None,
compression: Optional[Union[str, Dict[str, str]]] = "infer",
quoting: Optional[int] = None,
quotechar: str = '"',
line_terminator: Optional[str] = None,
chunksize: Optional[int] = None,
date_format: Optional[str] = None,
doublequote: bool_t = True,
escapechar: Optional[str] = None,
decimal: Optional[str] = ".",
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. versionchanged:: 0.24.0
The order of arguments for Series was changed.
Parameters
----------
path_or_buf : str or file handle, default None
File path or object, if None is provided the result is returned as
a string. If a file object is passed it should be opened with
`newline=''`, disabling universal newlines.
.. versionchanged:: 0.24.0
Was previously named "path" for Series.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
float_format : str, default None
Format string for floating point numbers.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
.. versionchanged:: 0.24.0
Previously defaulted to False for Series.
index : bool, default True
Write row names (index).
index_label : str or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the object uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R.
mode : str
Python write mode, default 'w'.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
compression : str or dict, default 'infer'
If str, represents compression mode. If dict, value at 'method' is
the compression mode. Compression mode may be any of the following
possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If
compression mode is 'infer' and `path_or_buf` is path-like, then
detect compression mode from the following extensions: '.gz',
'.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given
and mode is 'zip' or inferred as 'zip', other entries passed as
additional compression options.
.. versionchanged:: 0.25.0
May now be a dict with key 'method' as compression mode
and other entries as additional compression options if
compression mode is 'zip'.
quoting : optional constant from csv module
Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
line_terminator : str, optional
The newline character or character sequence to use in the output
file. Defaults to `os.linesep`, which depends on the OS in which
this method is called ('\n' for linux, '\r\n' for Windows, i.e.).
.. versionchanged:: 0.24.0
chunksize : int or None
Rows to write at a time.
date_format : str, default None
Format string for datetime objects.
doublequote : bool, default True
Control quoting of `quotechar` inside a field.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
decimal : str, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data.
Returns
-------
None or str
If path_or_buf is None, returns the resulting csv format as a
string. Otherwise returns None.
See Also
--------
read_csv : Load a CSV file into a DataFrame.
to_excel : Write DataFrame to an Excel file.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
# create 'out.zip' containing 'out.csv'
>>> compression_opts = dict(method='zip',
... archive_name='out.csv') # doctest: +SKIP
>>> df.to_csv('out.zip', index=False,
... compression=compression_opts) # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.csvs import CSVFormatter
formatter = CSVFormatter(
df,
path_or_buf,
line_terminator=line_terminator,
sep=sep,
encoding=encoding,
compression=compression,
quoting=quoting,
na_rep=na_rep,
float_format=float_format,
cols=columns,
header=header,
index=index,
index_label=index_label,
mode=mode,
chunksize=chunksize,
quotechar=quotechar,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
decimal=decimal,
)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
return None
# ----------------------------------------------------------------------
# Fancy Indexing
@classmethod
def _create_indexer(cls, name, indexer):
"""Create an indexer like _name in the class."""
if getattr(cls, name, None) is None:
_indexer = functools.partial(indexer, name)
setattr(cls, name, property(_indexer, doc=indexer.__doc__))
# ----------------------------------------------------------------------
# Lookup Caching
def _set_as_cached(self, item, cacher):
"""Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher))
def _reset_cacher(self):
"""Reset the cacher."""
if hasattr(self, "_cacher"):
del self._cacher
def _maybe_cache_changed(self, item, value):
"""The object has called back to us saying maybe it has changed.
"""
self._data.set(item, value)
@property
def _is_cached(self):
"""Return boolean indicating if self is cached or not."""
return getattr(self, "_cacher", None) is not None
def _get_cacher(self):
"""return my cacher or None"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
cacher = cacher[1]()
return cacher
def _maybe_update_cacher(self, clear=False, verify_is_copy=True):
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : bool, default False
Clear the item cache.
verify_is_copy : bool, default True
Provide is_copy checks.
"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referant, hence
# a copy
if ref is None:
del self._cacher
else:
try:
ref._maybe_cache_changed(cacher[0], self)
except Exception:
pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t="referant")
if clear:
self._clear_item_cache()
def _clear_item_cache(self):
self._item_cache.clear()
# ----------------------------------------------------------------------
# Indexing Methods
def take(self, indices, axis=0, is_copy=True, **kwargs):
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
is_copy : bool, default True
Whether to return a copy of the original object or not.
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3])
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2])
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
nv.validate_take(tuple(), kwargs)
self._consolidate_inplace()
new_data = self._data.take(
indices, axis=self._get_block_manager_axis(axis), verify=True
)
result = self._constructor(new_data).__finalize__(self)
# Maybe set copy if we didn't actually change the index.
if is_copy:
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
def xs(self, key, axis=0, level=None, drop_level=True):
"""
Return cross-section from the Series/DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to retrieve cross-section on.
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
drop_level : bool, default True
If False, returns object with same levels as self.
Returns
-------
Series or DataFrame
Cross-section from the original Series or DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Notes
-----
`xs` can not be used to set values.
MultiIndex Slicers is a generic way to get/set values on
any level or levels.
It is a superset of `xs` functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = pd.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal')
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog'))
num_legs num_wings
locomotion
walks 4 0
Get values at specified index and level
>>> df.xs('cat', level=1)
num_legs num_wings
class locomotion
mammal walks 4 0
Get values at several indexes and levels
>>> df.xs(('bird', 'walks'),
... level=[0, 'locomotion'])
num_legs num_wings
animal
penguin 2 2
Get values at specified column and axis
>>> df.xs('num_wings', axis=1)
class animal locomotion
mammal cat walks 0
dog walks 0
bat flies 2
bird penguin walks 2
Name: num_wings, dtype: int64
"""
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level is not None:
loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)
# create the tuple of the indexer
indexer = [slice(None)] * self.ndim
indexer[axis] = loc
indexer = tuple(indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
return self[key]
self._consolidate_inplace()
index = self.index
if isinstance(index, MultiIndex):
loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)
else:
loc = self.index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
inds, = loc.nonzero()
return self.take(inds, axis=axis)
else:
return self.take(loc, axis=axis)
if not is_scalar(loc):
new_index = self.index[loc]
if is_scalar(loc):
new_values = self._data.fast_xs(loc)
# may need to box a datelike-scalar
#
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
if not is_list_like(new_values) or self.ndim == 1:
return com.maybe_box_datetimelike(new_values)
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[loc],
dtype=new_values.dtype,
)
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view sliceable case
result._set_is_copy(self, copy=not result._is_view)
return result
_xs = xs # type: Callable
def __getitem__(self, item):
raise AbstractMethodError(self)
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res
def _iget_item_cache(self, item):
"""Return the cached item, item represents a positional indexer."""
ax = self._info_axis
if ax.is_unique:
lower = self._get_item_cache(ax[item])
else:
lower = self.take(item, axis=self._info_axis_number)
return lower
def _box_item_values(self, key, values):
raise AbstractMethodError(self)
def _slice(self, slobj: slice, axis=0, kind=None):
"""
Construct a slice of this container.
kind parameter is maintained for compatibility with Series slicing.
"""
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._data.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view sliceable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
def _set_item(self, key, value):
self._data.set(key, value)
self._clear_item_cache()
def _set_is_copy(self, ref=None, copy=True):
if not copy:
self._is_copy = None
else:
if ref is not None:
self._is_copy = weakref.ref(ref)
else:
self._is_copy = None
def _check_is_chained_assignment_possible(self):
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t="referant", force=True)
return True
elif self._is_copy:
self._check_setitem_copy(stacklevel=4, t="referant")
return False
def _check_setitem_copy(self, stacklevel=4, t="setting", force=False):
"""
Parameters
----------
stacklevel : int, default 4
the level to show of the stack when the error is output
t : str, the type of setting error
force : bool, default False
If True, then force showing an error.
validate if we are doing a setitem on a chained copy.
If you call this function, be sure to set the stacklevel such that the
user will see the error *at the level of setting*
It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some
blocks may be views while other are not. Currently _is_view will ALWAYS
return False for multi-blocks to avoid having to handle this case.
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
# This technically need not raise SettingWithCopy if both are view
# (which is not # generally guaranteed but is usually True. However,
# this is in general not a good practice and we recommend using .loc.
df.iloc[0:5]['group'] = 'a'
"""
# return early if the check is not needed
if not (force or self._is_copy):
return
value = config.get_option("mode.chained_assignment")
if value is None:
return
# see if the copy is not actually referred; if so, then dissolve
# the copy weakref
if self._is_copy is not None and not isinstance(self._is_copy, str):
r = self._is_copy()
if not gc.get_referents(r) or r.shape == self.shape:
self._is_copy = None
return
# a custom message
if isinstance(self._is_copy, str):
t = self._is_copy
elif t == "referant":
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the caveats in the documentation: "
"http://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
else:
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value "
"instead\n\nSee the caveats in the documentation: "
"http://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
if value == "raise":
raise com.SettingWithCopyError(t)
elif value == "warn":
warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)
def __delitem__(self, key):
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if self.ndim == 2 and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key,)
for col in self.columns:
if isinstance(col, tuple) and col[: len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
self._data.delete(key)
# delete from the caches
try:
del self._item_cache[key]
except KeyError:
pass
# ----------------------------------------------------------------------
# Unsorted
def get(self, key, default=None):
"""
Get item from object for given key (ex: DataFrame column).
Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
@property
def _is_view(self):
"""Return boolean indicating if self is view of another array """
return self._data.is_view
def reindex_like(self, other, method=None, copy=True, limit=None, tolerance=None):
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(
axes=self._AXIS_ORDERS,
method=method,
copy=copy,
limit=limit,
tolerance=tolerance,
)
return self.reindex(**d)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
def _drop_axis(self, labels, axis, level=None, errors="raise"):
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == "raise" and indexer.all():
raise KeyError("{} not found in axis".format(labels))
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == "raise" and labels_missing:
raise KeyError("{} not found in axis".format(labels))
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
def _update_inplace(self, result, verify_is_copy=True):
"""
Replace self internals with result.
Parameters
----------
verify_is_copy : bool, default True
Provide is_copy checks.
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result, "_data", result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
def add_prefix(self, prefix):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{prefix}{}".format, prefix=prefix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper)
def add_suffix(self, suffix):
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{}{suffix}".format, suffix=suffix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper)
def sort_values(
self,
by=None,
axis=0,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
):
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted values if inplace=False, None otherwise.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
"""
raise AbstractMethodError(self)
def sort_index(
self,
axis=0,
level=None,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
sort_remaining=True,
):
"""
Sort object by labels (along an axis).
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis along which to sort. The value 0 identifies the rows,
and 1 identifies the columns.
level : int or level name or list of ints or list of level names
If not None, sort on values in specified index level(s).
ascending : bool, default True
Sort ascending vs. descending.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted index if inplace=False, None otherwise.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
labels = self._get_axis(axis)
if level is not None:
raise NotImplementedError("level is not implemented")
if inplace:
raise NotImplementedError("inplace is not implemented")
sort_index = labels.argsort()
if not ascending:
sort_index = sort_index[::-1]
new_axis = labels.take(sort_index)
return self.reindex(**{axis_name: new_axis})
def reindex(self, *args, **kwargs):
"""
Conform %(klass)s to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
%(optional_labels)s
%(axes)s : array-like, optional
New labels / index to conform to, should be specified using
keywords. Preferably an Index object to avoid duplicating data
%(optional_axis)s
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : bool, default True
Return a new object, even if the passed indexes are the same.
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
limit : int, default None
Maximum number of consecutive elements to forward or backward fill.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
%(klass)s with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({
... 'http_status': [200,200,404,404,301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing')
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to back-propagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill')
prices
2009-12-29 100.0
2009-12-30 100.0
2009-12-31 100.0
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
"""
# TODO: Decide if we care about having different examples for different
# kinds
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop("method", None))
level = kwargs.pop("level", None)
copy = kwargs.pop("copy", True)
limit = kwargs.pop("limit", None)
tolerance = kwargs.pop("tolerance", None)
fill_value = kwargs.pop("fill_value", None)
# Series.reindex doesn't use / need the axis kwarg
# We pop and ignore it here, to make writing Series/Frame generic code
# easier
kwargs.pop("axis", None)
if kwargs:
raise TypeError(
"reindex() got an unexpected keyword "
'argument "{0}"'.format(list(kwargs.keys())[0])
)
self._consolidate_inplace()
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all(
self._get_axis(axis).identical(ax)
for axis, ax in axes.items()
if ax is not None
):
if copy:
return self.copy()
return self
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
try:
return self._reindex_multi(axes, copy, fill_value)
except Exception:
pass
# perform the reindex on the axes
return self._reindex_axes(
axes, level, limit, tolerance, method, fill_value, copy
).__finalize__(self)
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy):
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(
labels, level=level, limit=limit, tolerance=tolerance, method=method
)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers(
{axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy,
allow_dups=False,
)
return obj
def _needs_reindex_multi(self, axes, method, level):
"""Check if we do need a multi reindex."""
return (
(com.count_not_none(*axes.values()) == self._AXIS_LEN)
and method is None
and level is None
and not self._is_mixed_type
)
def _reindex_multi(self, axes, copy, fill_value):
return NotImplemented
def _reindex_with_indexers(
self, reindexers, fill_value=None, copy=False, allow_dups=False
):
"""allow_dups indicates an internal call here """
# reindex doing multiple operations on different axes if indicated
new_data = self._data
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = ensure_index(index)
if indexer is not None:
indexer = ensure_int64(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(
index,
indexer,
axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy,
)
if copy and new_data is self._data:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self)
def filter(self, items=None, like=None, regex=None, axis=None):
"""
Subset rows or columns of dataframe according to labels in
the specified index.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : str
Keep labels from axis for which "like in label == True".
regex : str (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
nkw = com.count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(**{name: [r for r in items if r in labels]})
elif like:
def f(x):
return like in ensure_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
def f(x):
return matcher.search(ensure_str(x)) is not None
matcher = re.compile(regex)
values = labels.map(f)
return self.loc(axis=axis)[values]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def head(self, n=5):
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : same type as caller
The first `n` rows of the caller object.
See Also
--------
DataFrame.tail: Returns the last `n` rows.
Examples
--------
>>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
return self.iloc[:n]
def tail(self, n=5):
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail()
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3)
animal
6 shark
7 whale
8 zebra
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
def sample(
self,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
):
"""
Return a random sample of items from an axis of object.
You can use `random_state` for reproducibility.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
Infinite values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
axis : int or string, optional
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames).
Returns
-------
Series or DataFrame
A new object of same type as caller containing `n` items randomly
sampled from the caller object.
See Also
--------
numpy.random.choice: Generates a random sample from a given 1-D numpy
array.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'])
>>> df
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
Extract 3 random elements from the ``Series`` ``df['num_legs']``:
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df['num_legs'].sample(n=3, random_state=1)
fish 0
spider 8
falcon 2
Name: num_legs, dtype: int64
A random 50% sample of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.5, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
Using a DataFrame column as weights. Rows with larger value in the
`num_specimen_seen` column are more likely to be sampled.
>>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
axis_length = self.shape[axis]
# Process random_state argument
rs = com.random_state(random_state)
# Check weights for compliance
if weights is not None:
# If a series, align with frame
if isinstance(weights, ABCSeries):
weights = weights.reindex(self.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, str):
if isinstance(self, ABCDataFrame):
if axis == 0:
try:
weights = self[weights]
except KeyError:
raise KeyError(
"String passed to weights not a valid column"
)
else:
raise ValueError(
"Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame"
)
else:
raise ValueError(
"Strings cannot be passed as weights "
"when sampling from a Series."
)
weights = pd.Series(weights, dtype="float64")
if len(weights) != axis_length:
raise ValueError(
"Weights and axis to be sampled must be of same length"
)
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative values")
# If has nan, set to zero.
weights = weights.fillna(0)
# Renormalize if don't sum to 1
if weights.sum() != 1:
if weights.sum() != 0:
weights = weights / weights.sum()
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
# If no frac or n, default to n=1.
if n is None and frac is None:
n = 1
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
raise ValueError("Please enter a value for `frac` OR `n`, not both")
# Check for negative sizes
if n < 0:
raise ValueError(
"A negative number of rows requested. Please provide positive value."
)
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis, is_copy=False)
_shared_docs[
"pipe"
] = r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the %(klass)s.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the %(klass)s.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
See Also
--------
DataFrame.apply
DataFrame.applymap
Series.map
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> f(g(h(df), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((f, 'arg2'), arg1=a, arg3=c)
... )
"""
@Appender(_shared_docs["pipe"] % _shared_doc_kwargs)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
_shared_docs["aggregate"] = dedent(
"""
Aggregate using one or more operations over the specified axis.
%(versionadded)s
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
scalar, Series or DataFrame
The return can be:
* scalar : when Series.agg is called with single function
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
Return scalar, Series or DataFrame.
%(see_also)s
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
A passed user-defined-function will be passed a Series for evaluation.
%(examples)s"""
)
_shared_docs[
"transform"
] = """
Call ``func`` on self producing a %(klass)s with transformed values
and that has the same axis length as self.
.. versionadded:: 0.20.0
Parameters
----------
func : function, str, list or dict
Function to use for transforming the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
%(klass)s
A %(klass)s that must have the same length as self.
Raises
------
ValueError : If the returned %(klass)s has a different length than self.
See Also
--------
%(klass)s.agg : Only perform aggregating type operations.
%(klass)s.apply : Invoke function on a %(klass)s.
Examples
--------
>>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)})
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> df.transform(lambda x: x + 1)
A B
0 1 2
1 2 3
2 3 4
Even though the resulting %(klass)s must have the same length as the
input %(klass)s, it is possible to provide several input functions:
>>> s = pd.Series(range(3))
>>> s
0 0
1 1
2 2
dtype: int64
>>> s.transform([np.sqrt, np.exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
"""
# ----------------------------------------------------------------------
# Attribute access
def __finalize__(self, other, method=None, **kwargs):
"""
Propagate metadata from other to self.
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : optional, a passed method name ; possibly to take different
types of propagation actions based on this
"""
if isinstance(other, NDFrame):
for name in self._metadata:
object.__setattr__(self, name, getattr(other, name, None))
return self
def __getattr__(self, name):
"""After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if (
name in self._internal_names_set
or name in self._metadata
or name in self._accessors
):
return object.__getattribute__(self, name)
else:
if self._info_axis._can_hold_identifiers_and_holds_name(name):
return self[name]
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
"""After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
"""
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
warnings.warn(
"Pandas doesn't allow columns to be "
"created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2,
)
object.__setattr__(self, name, value)
def _dir_additions(self):
""" add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, it's first level values are used.
"""
additions = {
c
for c in self._info_axis.unique(level=0)[:100]
if isinstance(c, str) and c.isidentifier()
}
return super()._dir_additions().union(additions)
# ----------------------------------------------------------------------
# Consolidation of internals
def _protect_consolidate(self, f):
"""Consolidate _data -- if the blocks have changed, then clear the
cache
"""
blocks_before = len(self._data.blocks)
result = f()
if len(self._data.blocks) != blocks_before:
self._clear_item_cache()
return result
def _consolidate_inplace(self):
"""Consolidate data in place and return None"""
def f():
self._data = self._data.consolidate()
self._protect_consolidate(f)
def _consolidate(self, inplace=False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Parameters
----------
inplace : bool, default False
If False return new object, otherwise modify existing object.
Returns
-------
consolidated : same type as caller
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
self._consolidate_inplace()
else:
f = lambda: self._data.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
@property
def _is_mixed_type(self):
f = lambda: self._data.is_mixed_type
return self._protect_consolidate(f)
@property
def _is_numeric_mixed_type(self):
f = lambda: self._data.is_numeric_mixed_type
return self._protect_consolidate(f)
@property
def _is_datelike_mixed_type(self):
f = lambda: self._data.is_datelike_mixed_type
return self._protect_consolidate(f)
def _check_inplace_setting(self, value):
""" check whether we allow in-place setting with this type of value """
if self._is_mixed_type:
if not self._is_numeric_mixed_type:
# allow an actual np.nan thru
try:
if np.isnan(value):
return True
except Exception:
pass
raise TypeError(
"Cannot do inplace boolean setting on "
"mixed-types with a non np.nan value"
)
return True
def _get_numeric_data(self):
return self._constructor(self._data.get_numeric_data()).__finalize__(self)
def _get_bool_data(self):
return self._constructor(self._data.get_bool_data()).__finalize__(self)
# ----------------------------------------------------------------------
# Internal Interface Methods
def as_matrix(self, columns=None):
"""
Convert the frame to its Numpy-array representation.
.. deprecated:: 0.23.0
Use :meth:`DataFrame.values` instead.
Parameters
----------
columns : list, optional, default:None
If None, return all columns, otherwise, returns specified columns.
Returns
-------
values : ndarray
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
See Also
--------
DataFrame.values
Notes
-----
Return is NOT a Numpy-matrix, rather, a Numpy-array.
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcase to
int32. By numpy.find_common_type convention, mixing int64 and uint64
will result in a float64 dtype.
This method is provided for backwards compatibility. Generally,
it is recommended to use '.values'.
"""
warnings.warn(
"Method .as_matrix will be removed in a future version. "
"Use .values instead.",
FutureWarning,
stacklevel=2,
)
self._consolidate_inplace()
return self._data.as_array(transpose=self._AXIS_REVERSED, items=columns)
@property
def values(self):
"""
Return a Numpy representation of the DataFrame.
.. warning::
We recommend using :meth:`DataFrame.to_numpy` instead.
Only the values in the DataFrame will be returned, the axes labels
will be removed.
Returns
-------
numpy.ndarray
The values of the DataFrame.
See Also
--------
DataFrame.to_numpy : Recommended alternative to this method.
DataFrame.index : Retrieve the index labels.
DataFrame.columns : Retrieving the column names.
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By :func:`numpy.find_common_type` convention, mixing int64
and uint64 will result in a float64 dtype.
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results
in an array of the same type.
>>> df = pd.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]], dtype=int64)
A DataFrame with mixed type columns(e.g., str/object, int64, float32)
results in an ndarray of the broadest type that accommodates these
mixed types (e.g., object).
>>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 1),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 1],
['monkey', nan, None]], dtype=object)
"""
self._consolidate_inplace()
return self._data.as_array(transpose=self._AXIS_REVERSED)
@property
def _values(self):
"""internal implementation"""
return self.values
@property
def _get_values(self):
# compat
return self.values
def get_values(self):
"""
Return an ndarray after converting sparse values to dense.
.. deprecated:: 0.25.0
Use ``np.asarray(..)`` or :meth:`DataFrame.values` instead.
This is the same as ``.values`` for non-sparse data. For sparse
data contained in a `SparseArray`, the data are first
converted to a dense representation.
Returns
-------
numpy.ndarray
Numpy representation of DataFrame.
See Also
--------
values : Numpy representation of DataFrame.
SparseArray : Container for sparse data.
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2], 'b': [True, False],
... 'c': [1.0, 2.0]})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
>>> df.get_values()
array([[1, True, 1.0], [2, False, 2.0]], dtype=object)
>>> df = pd.DataFrame({"a": pd.SparseArray([1, None, None]),
... "c": [1.0, 2.0, 3.0]})
>>> df
a c
0 1.0 1.0
1 NaN 2.0
2 NaN 3.0
>>> df.get_values()
array([[ 1., 1.],
[nan, 2.],
[nan, 3.]])
"""
warnings.warn(
"The 'get_values' method is deprecated and will be removed in a "
"future version. Use '.values' or 'np.asarray(..)' instead.",
FutureWarning,
stacklevel=2,
)
return self._internal_get_values()
def _internal_get_values(self):
return self.values
def get_dtype_counts(self):
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.25.0
Use `.dtypes.value_counts()` instead.
Returns
-------
dtype : Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]]
>>> df = pd.DataFrame(a, columns=['str', 'int', 'float'])
>>> df
str int float
0 a 1 1.0
1 b 2 2.0
2 c 3 3.0
>>> df.get_dtype_counts()
float64 1
int64 1
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
stacklevel=2,
)
from pandas import Series
return Series(self._data.get_dtype_counts())
def get_ftype_counts(self):
"""
Return counts of unique ftypes in this object.
.. deprecated:: 0.23.0
This is useful for SparseDataFrame or for DataFrames containing
sparse arrays.
Returns
-------
dtype : Series
Series with the count of columns with each type and
sparsity (dense/sparse).
See Also
--------
ftypes : Return ftypes (indication of sparse/dense and dtype) in
this object.
Examples
--------
>>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]]
>>> df = pd.DataFrame(a, columns=['str', 'int', 'float'])
>>> df
str int float
0 a 1 1.0
1 b 2 2.0
2 c 3 3.0
>>> df.get_ftype_counts() # doctest: +SKIP
float64:dense 1
int64:dense 1
object:dense 1
dtype: int64
"""
warnings.warn(
"get_ftype_counts is deprecated and will be removed in a future version",
FutureWarning,
stacklevel=2,
)
from pandas import Series
return Series(self._data.get_ftype_counts())
@property
def dtypes(self):
"""
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
See Also
--------
DataFrame.ftypes : Dtype and sparsity information.
Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
... 'int': [1],
... 'datetime': [pd.Timestamp('20180310')],
... 'string': ['foo']})
>>> df.dtypes
float float64
int int64
datetime datetime64[ns]
string object
dtype: object
"""
from pandas import Series
return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
@property
def ftypes(self):
"""
Return the ftypes (indication of sparse/dense and dtype) in DataFrame.
.. deprecated:: 0.25.0
Use :func:`dtypes` instead.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type and indication of sparse/dense of each column.
See Also
--------
DataFrame.dtypes: Series with just dtype information.
SparseDataFrame : Container for sparse tabular data.
Notes
-----
Sparse data should have the same dtypes as its dense representation.
Examples
--------
>>> arr = np.random.RandomState(0).randn(100, 4)
>>> arr[arr < .8] = np.nan
>>> pd.DataFrame(arr).ftypes
0 float64:dense
1 float64:dense
2 float64:dense
3 float64:dense
dtype: object
>>> pd.SparseDataFrame(arr).ftypes # doctest: +SKIP
0 float64:sparse
1 float64:sparse
2 float64:sparse
3 float64:sparse
dtype: object
"""
warnings.warn(
"DataFrame.ftypes is deprecated and will "
"be removed in a future version. "
"Use DataFrame.dtypes instead.",
FutureWarning,
stacklevel=2,
)
from pandas import Series
return Series(self._data.get_ftypes(), index=self._info_axis, dtype=np.object_)
def as_blocks(self, copy=True):
"""
Convert the frame to a dict of dtype -> Constructor Types that each has
a homogeneous dtype.
.. deprecated:: 0.21.0
NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in
as_matrix)
Parameters
----------
copy : bool, default True
Returns
-------
dict
Mapping dtype -> Constructor Types.
"""
warnings.warn(
"as_blocks is deprecated and will be removed in a future version",
FutureWarning,
stacklevel=2,
)
return self._to_dict_of_blocks(copy=copy)
@property
def blocks(self):
"""
Internal property, property synonym for as_blocks().
.. deprecated:: 0.21.0
"""
return self.as_blocks()
def _to_dict_of_blocks(self, copy=True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY
"""
return {
k: self._constructor(v).__finalize__(self)
for k, v, in self._data.to_dict(copy=copy).items()
}
def astype(self, dtype, copy=True, errors="raise", **kwargs):
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
.. versionadded:: 0.20.0
kwargs : keyword arguments to pass on to the constructor
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Examples
--------
Create a DataFrame:
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df.dtypes
col1 int64
col2 int64
dtype: object
Cast all columns to int32:
>>> df.astype('int32').dtypes
col1 int32
col2 int32
dtype: object
Cast col1 to int32 using a dictionary:
>>> df.astype({'col1': 'int32'}).dtypes
col1 int32
col2 int64
dtype: object
Create a series:
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> cat_dtype = pd.api.types.CategoricalDtype(
... categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1,2])
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
"""
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError(
"Only the Series name can be used for "
"the key in Series dtype mappings."
)
new_type = dtype[self.name]
return self.astype(new_type, copy, errors, **kwargs)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
results = []
for col_name, col in self.items():
if col_name in dtype:
results.append(
col.astype(
dtype=dtype[col_name], copy=copy, errors=errors, **kwargs
)
)
else:
results.append(results.append(col.copy() if copy else col))
elif is_extension_array_dtype(dtype) and self.ndim > 1:
# GH 18099/22869: columnwise conversion to extension dtype
# GH 24704: use iloc to handle duplicate column names
results = (
self.iloc[:, i].astype(dtype, copy=copy)
for i in range(len(self.columns))
)
else:
# else, only a single dtype is given
new_data = self._data.astype(
dtype=dtype, copy=copy, errors=errors, **kwargs
)
return self._constructor(new_data).__finalize__(self)
# GH 19920: retain column metadata after concat
result = pd.concat(results, axis=1, copy=False)
result.columns = self.columns
return result
def copy(self, deep=True):
"""
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series or DataFrame
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
"""
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self)
def __copy__(self, deep=True):
return self.copy(deep=deep)
def __deepcopy__(self, memo=None):
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
if memo is None:
memo = {}
return self.copy(deep=True)
def _convert(
self, datetime=False, numeric=False, timedelta=False, coerce=False, copy=True
):
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : bool, default False
If True, convert to date where possible.
numeric : bool, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : bool, default False
If True, convert to timedelta where possible.
coerce : bool, default False
If True, force conversion with unconvertible values converted to
nulls (NaN or NaT).
copy : bool, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
validate_bool_kwarg(coerce, "coerce")
validate_bool_kwarg(copy, "copy")
return self._constructor(
self._data.convert(
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy,
)
).__finalize__(self)
def infer_objects(self):
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
.. versionadded:: 0.21.0
Returns
-------
converted : same type as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
# numeric=False necessary to only soft convert;
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
self._data.convert(
datetime=True, numeric=False, timedelta=True, coerce=False, copy=True
)
).__finalize__(self)
# ----------------------------------------------------------------------
# Filling NA's
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
each index (for a Series) or column (for a DataFrame). Values not
in the dict/Series/DataFrame will not be filled. This value cannot
be a list.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use next valid observation to fill gap.
axis : %(axes_single_arg)s
Axis along which to fill missing values.
inplace : bool, default False
If True, fill in-place. Note: this will modify any
other views on this object (e.g., a no-copy slice for a column in a
DataFrame).
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default is None
A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
Returns
-------
%(klass)s
Object with missing values filled.
See Also
--------
interpolate : Fill NaN values using interpolation.
reindex : Conform object to new index.
asfreq : Convert TimeSeries to specified frequency.
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
... [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5],
... [np.nan, 3, np.nan, 4]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 NaN 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 0.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 NaN 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 2.0 4
Only replace the first NaN element.
>>> df.fillna(value=values, limit=1)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 NaN 1
2 NaN 1.0 NaN 5
3 NaN 3.0 NaN 4
"""
inplace = validate_bool_kwarg(inplace, "inplace")
value, method = validate_fillna_kwargs(value, method)
self._consolidate_inplace()
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
if value is None:
if self._is_mixed_type and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
result._data = result._data.downcast()
return result
new_data = self._data.interpolate(
method=method,
axis=axis,
limit=limit,
inplace=inplace,
coerce=True,
downcast=downcast,
)
else:
if len(self._get_axis(axis)) == 0:
return self
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
from pandas import Series
value = Series(value)
elif not is_list_like(value):
pass
else:
raise TypeError(
'"value" parameter must be a scalar, dict '
"or Series, but you passed a "
'"{0}"'.format(type(value).__name__)
)
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
raise NotImplementedError(
"Currently only can fill "
"with dict/Series column "
"by column"
)
result = self if inplace else self.copy()
for k, v in value.items():
if k not in result:
continue
obj = result[k]
obj.fillna(v, limit=limit, inplace=True, downcast=downcast)
return result if not inplace else None
elif not is_list_like(value):
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, ABCDataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)
else:
raise ValueError("invalid fill value with a %s" % type(value))
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
"""
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
Returns
-------
%(klass)s
Object with missing values filled.
"""
return self.fillna(
method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
"""
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
Returns
-------
%(klass)s
Object with missing values filled.
"""
return self.fillna(
method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
_shared_docs[
"replace"
] = """
Replace values given in `to_replace` with `value`.
Values of the %(klass)s are replaced with other values dynamically.
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.
Parameters
----------
to_replace : str, regex, list, dict, Series, int, float, or None
How to find the values that will be replaced.
* numeric, str or regex:
- numeric: numeric values equal to `to_replace` will be
replaced with `value`
- str: string exactly matching `to_replace` will be replaced
with `value`
- regex: regexs matching `to_replace` will be replaced with
`value`
* list of str, regex, or numeric:
- First, if `to_replace` and `value` are both lists, they
**must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
lists will be interpreted as regexs otherwise they will match
directly. This doesn't matter much for `value` since there
are only a few possible substitution regexes you can use.
- str, regex and numeric rules apply as above.
* dict:
- Dicts can be used to specify different replacement values
for different existing values. For example,
``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and
'y' with 'z'. To use a dict in this way the `value`
parameter should be `None`.
- For a DataFrame a dict can specify that different values
should be replaced in different columns. For example,
``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'
and the value 'z' in column 'b' and replaces these values
with whatever is specified in `value`. The `value` parameter
should not be ``None`` in this case. You can treat this as a
special case of passing two lists except that you are
specifying the column to search in.
- For a DataFrame nested dictionaries, e.g.,
``{'a': {'b': np.nan}}``, are read as follows: look in column
'a' for the value 'b' and replace it with NaN. The `value`
parameter should be ``None`` to use a nested dict in this
way. You can nest regular expressions as well. Note that
column names (the top-level dictionary keys in a nested
dictionary) **cannot** be regular expressions.
* None:
- This means that the `regex` argument must be a string,
compiled regular expression, or list, dict, ndarray or
Series of such elements. If `value` is also ``None`` then
this **must** be a nested dictionary or Series.
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
Value to replace any values matching `to_replace` with.
For a DataFrame a dict of values can be used to specify which
value to use for each column (columns not in the dict will not be
filled). Regular expressions, strings and lists or dicts of such
objects are also allowed.
inplace : bool, default False
If True, in place. Note: this will modify any
other views on this object (e.g. a column from a DataFrame).
Returns the caller if this is True.
limit : int, default None
Maximum size gap to forward or backward fill.
regex : bool or same types as `to_replace`, default False
Whether to interpret `to_replace` and/or `value` as regular
expressions. If this is ``True`` then `to_replace` *must* be a
string. Alternatively, this could be a regular expression or a
list, dict, or array of regular expressions in which case
`to_replace` must be ``None``.
method : {'pad', 'ffill', 'bfill', `None`}
The method to use when for replacement, when `to_replace` is a
scalar, list or tuple and `value` is ``None``.
.. versionchanged:: 0.23.0
Added to DataFrame.
Returns
-------
%(klass)s
Object after replacement.
Raises
------
AssertionError
* If `regex` is not a ``bool`` and `to_replace` is not
``None``.
TypeError
* If `to_replace` is a ``dict`` and `value` is not a ``list``,
``dict``, ``ndarray``, or ``Series``
* If `to_replace` is ``None`` and `regex` is not compilable
into a regular expression or is a list, dict, ndarray, or
Series.
* When replacing multiple ``bool`` or ``datetime64`` objects and
the arguments to `to_replace` does not match the type of the
value being replaced
ValueError
* If a ``list`` or an ``ndarray`` is passed to `to_replace` and
`value` but they are not the same length.
See Also
--------
%(klass)s.fillna : Fill NA values.
%(klass)s.where : Replace values based on boolean condition.
Series.str.replace : Simple string replacement.
Notes
-----
* Regex substitution is performed under the hood with ``re.sub``. The
rules for substitution for ``re.sub`` are the same.
* Regular expressions will only substitute on strings, meaning you
cannot provide, for example, a regular expression matching floating
point numbers and expect the columns in your frame that have a
numeric dtype to be matched. However, if those floating point
numbers *are* strings, then you can do this.
* This method has *a lot* of options. You are encouraged to experiment
and play with this method to gain intuition about how it works.
* When dict is used as the `to_replace` value, it is like
key(s) in the dict are the to_replace part and
value(s) in the dict are the value parameter.
Examples
--------
**Scalar `to_replace` and `value`**
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> df.replace(0, 5)
A B C
0 5 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
**List-like `to_replace`**
>>> df.replace([0, 1, 2, 3], 4)
A B C
0 4 5 a
1 4 6 b
2 4 7 c
3 4 8 d
4 4 9 e
>>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])
A B C
0 4 5 a
1 3 6 b
2 2 7 c
3 1 8 d
4 4 9 e
>>> s.replace([1, 2], method='bfill')
0 0
1 3
2 3
3 3
4 4
dtype: int64
**dict-like `to_replace`**
>>> df.replace({0: 10, 1: 100})
A B C
0 10 5 a
1 100 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
**Regular expression `to_replace`**
>>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],
... 'B': ['abc', 'bar', 'xyz']})
>>> df.replace(to_replace=r'^ba.$', value='new', regex=True)
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)
A B
0 new abc
1 foo bar
2 bait xyz
>>> df.replace(regex=r'^ba.$', value='new')
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})
A B
0 new abc
1 xyz new
2 bait xyz
>>> df.replace(regex=[r'^ba.$', 'foo'], value='new')
A B
0 new abc
1 new new
2 bait xyz
Note that when replacing multiple ``bool`` or ``datetime64`` objects,
the data types in the `to_replace` parameter must match the data
type of the value being replaced:
>>> df = pd.DataFrame({'A': [True, False, True],
... 'B': [False, True, False]})
>>> df.replace({'a string': 'new value', True: False}) # raises
Traceback (most recent call last):
...
TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'
This raises a ``TypeError`` because one of the ``dict`` keys is not of
the correct type for replacement.
Compare the behavior of ``s.replace({'a': None})`` and
``s.replace('a', None)`` to understand the peculiarities
of the `to_replace` parameter:
>>> s = pd.Series([10, 'a', 'a', 'b', 'a'])
When one uses a dict as the `to_replace` value, it is like the
value(s) in the dict are equal to the `value` parameter.
``s.replace({'a': None})`` is equivalent to
``s.replace(to_replace={'a': None}, value=None, method=None)``:
>>> s.replace({'a': None})
0 10
1 None
2 None
3 b
4 None
dtype: object
When ``value=None`` and `to_replace` is a scalar, list or
tuple, `replace` uses the method parameter (default 'pad') to do the
replacement. So this is why the 'a' values are being replaced by 10
in rows 1 and 2 and 'b' in row 4 in this case.
The command ``s.replace('a', None)`` is actually equivalent to
``s.replace(to_replace='a', value=None, method='pad')``:
>>> s.replace('a', None)
0 10
1 10
2 10
3 b
4 b
dtype: object
"""
@Appender(_shared_docs["replace"] % _shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is not a bool")
self._consolidate_inplace()
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dict_like(to_replace) and not is_dict_like(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
if isinstance(self, ABCDataFrame):
return self.apply(
_single_replace, args=(to_replace, method, inplace, limit)
)
return _single_replace(self, to_replace, method, inplace, limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
raise TypeError(
'If "to_replace" and "value" are both None'
' and "to_replace" is not a list, then '
"regex must be a mapping"
)
to_replace = regex
regex = True
items = list(to_replace.items())
keys, values = zip(*items) if items else ([], [])
are_mappings = [is_dict_like(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError(
"If a nested mapping is passed, all values"
" of the top level mapping must be "
"mappings"
)
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = list(zip(*v.items())) or ([], [])
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(
to_replace, value, inplace=inplace, limit=limit, regex=regex
)
else:
# need a non-zero len on all axes
if not self.size:
return self
new_data = self._data
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
res = self if inplace else self.copy()
for c, src in to_replace.items():
if c in value and c in self:
# object conversion is handled in
# series.replace which is called recursively
res[c] = res[c].replace(
to_replace=src,
value=value[c],
inplace=False,
regex=regex,
)
return None if inplace else res
# {'A': NA} -> 0
elif not is_list_like(value):
keys = [(k, src) for k, src in to_replace.items() if k in self]
keys_len = len(keys) - 1
for i, (k, src) in enumerate(keys):
convert = i == keys_len
new_data = new_data.replace(
to_replace=src,
value=value,
filter=[k],
inplace=inplace,
regex=regex,
convert=convert,
)
else:
raise TypeError("value argument must be scalar, dict, or Series")
elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
if is_list_like(value):
if len(to_replace) != len(value):
raise ValueError(
"Replacement lists must match "
"in length. Expecting %d got %d "
% (len(to_replace), len(value))
)
new_data = self._data.replace_list(
src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex,
)
else: # [NA, ''] -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
elif to_replace is None:
if not (
is_re_compilable(regex)
or is_list_like(regex)
or is_dict_like(regex)
):
raise TypeError(
"'regex' must be a string or a compiled "
"regular expression or a list or dict of "
"strings or regular expressions, you "
"passed a"
" {0!r}".format(type(regex).__name__)
)
return self.replace(
regex, value, inplace=inplace, limit=limit, regex=True
)
else:
# dest iterable dict-like
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
new_data = self._data
for k, v in value.items():
if k in self:
new_data = new_data.replace(
to_replace=to_replace,
value=v,
filter=[k],
inplace=inplace,
regex=regex,
)
elif not is_list_like(value): # NA -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
else:
msg = ('Invalid "to_replace" type: ' "{0!r}").format(
type(to_replace).__name__
)
raise TypeError(msg) # pragma: no cover
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"interpolate"
] = """
Please note that only ``method='linear'`` is supported for
DataFrame/Series with a MultiIndex.
Parameters
----------
method : str, default 'linear'
Interpolation technique to use. One of:
* 'linear': Ignore the index and treat the values as equally
spaced. This is the only method supported on MultiIndexes.
* 'time': Works on daily and higher resolution data to interpolate
given length of interval.
* 'index', 'values': use the actual numerical values of the index.
* 'pad': Fill in NaNs using existing values.
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',
'barycentric', 'polynomial': Passed to
`scipy.interpolate.interp1d`. These methods use the numerical
values of the index. Both 'polynomial' and 'spline' require that
you also specify an `order` (int), e.g.
``df.interpolate(method='polynomial', order=5)``.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':
Wrappers around the SciPy interpolation methods of similar
names. See `Notes`.
* 'from_derivatives': Refers to
`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18.
axis : {0 or 'index', 1 or 'columns', None}, default None
Axis to interpolate along.
limit : int, optional
Maximum number of consecutive NaNs to fill. Must be greater than
0.
inplace : bool, default False
Update the data in place if possible.
limit_direction : {'forward', 'backward', 'both'}, default 'forward'
If limit is specified, consecutive NaNs will be filled in this
direction.
limit_area : {`None`, 'inside', 'outside'}, default None
If limit is specified, consecutive NaNs will be filled with this
restriction.
* ``None``: No fill restriction.
* 'inside': Only fill NaNs surrounded by valid values
(interpolate).
* 'outside': Only fill NaNs outside valid values (extrapolate).
.. versionadded:: 0.23.0
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
**kwargs
Keyword arguments to pass on to the interpolating function.
Returns
-------
Series or DataFrame
Returns the same object type as the caller, interpolated at
some or all ``NaN`` values.
See Also
--------
fillna : Fill missing values using different methods.
scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials
(Akima interpolator).
scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the
Bernstein basis.
scipy.interpolate.interp1d : Interpolate a 1-D function.
scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh
interpolator).
scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic
interpolation.
scipy.interpolate.CubicSpline : Cubic spline data interpolator.
Notes
-----
The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
methods are wrappers around the respective SciPy implementations of
similar names. These use the actual numerical values of the index.
For more information on their behavior, see the
`SciPy documentation
<http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `SciPy tutorial
<http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.
Examples
--------
Filling in ``NaN`` in a :class:`~pandas.Series` via linear
interpolation.
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s
0 0.0
1 1.0
2 NaN
3 3.0
dtype: float64
>>> s.interpolate()
0 0.0
1 1.0
2 2.0
3 3.0
dtype: float64
Filling in ``NaN`` in a Series by padding, but filling at most two
consecutive ``NaN`` at a time.
>>> s = pd.Series([np.nan, "single_one", np.nan,
... "fill_two_more", np.nan, np.nan, np.nan,
... 4.71, np.nan])
>>> s
0 NaN
1 single_one
2 NaN
3 fill_two_more
4 NaN
5 NaN
6 NaN
7 4.71
8 NaN
dtype: object
>>> s.interpolate(method='pad', limit=2)
0 NaN
1 single_one
2 single_one
3 fill_two_more
4 fill_two_more
5 fill_two_more
6 NaN
7 4.71
8 4.71
dtype: object
Filling in ``NaN`` in a Series via polynomial interpolation or splines:
Both 'polynomial' and 'spline' methods require that you also specify
an ``order`` (int).
>>> s = pd.Series([0, 2, np.nan, 8])
>>> s.interpolate(method='polynomial', order=2)
0 0.000000
1 2.000000
2 4.666667
3 8.000000
dtype: float64
Fill the DataFrame forward (that is, going down) along each column
using linear interpolation.
Note how the last entry in column 'a' is interpolated differently,
because there is no entry after it to use for interpolation.
Note how the first entry in column 'b' remains ``NaN``, because there
is no entry before it to use for interpolation.
>>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
... (np.nan, 2.0, np.nan, np.nan),
... (2.0, 3.0, np.nan, 9.0),
... (np.nan, 4.0, -4.0, 16.0)],
... columns=list('abcd'))
>>> df
a b c d
0 0.0 NaN -1.0 1.0
1 NaN 2.0 NaN NaN
2 2.0 3.0 NaN 9.0
3 NaN 4.0 -4.0 16.0
>>> df.interpolate(method='linear', limit_direction='forward', axis=0)
a b c d
0 0.0 NaN -1.0 1.0
1 1.0 2.0 -2.0 5.0
2 2.0 3.0 -3.0 9.0
3 2.0 4.0 -4.0 16.0
Using polynomial interpolation.
>>> df['d'].interpolate(method='polynomial', order=2)
0 1.0
1 4.0
2 9.0
3 16.0
Name: d, dtype: float64
"""
@Appender(_shared_docs["interpolate"] % _shared_doc_kwargs)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction="forward",
limit_area=None,
downcast=None,
**kwargs
):
"""
Interpolate values according to different methods.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if axis == 0:
ax = self._info_axis_name
_maybe_transposed_self = self
elif axis == 1:
_maybe_transposed_self = self.T
ax = 1
else:
_maybe_transposed_self = self
ax = _maybe_transposed_self._get_axis_number(ax)
if _maybe_transposed_self.ndim == 2:
alt_ax = 1 - ax
else:
alt_ax = ax
if isinstance(_maybe_transposed_self.index, MultiIndex) and method != "linear":
raise ValueError(
"Only `method=linear` interpolation is supported on MultiIndexes."
)
if _maybe_transposed_self._data.get_dtype_counts().get("object") == len(
_maybe_transposed_self.T
):
raise TypeError(
"Cannot interpolate with all object-dtype columns "
"in the DataFrame. Try setting at least one "
"column to a numeric dtype."
)
# create/use the index
if method == "linear":
# prior default
index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))
else:
index = _maybe_transposed_self._get_axis(alt_ax)
methods = {"index", "values", "nearest", "time"}
is_numeric_or_datetime = (
is_numeric_dtype(index)
or is_datetime64_any_dtype(index)
or is_timedelta64_dtype(index)
)
if method not in methods and not is_numeric_or_datetime:
raise ValueError(
"Index column must be numeric or datetime type when "
"using {method} method other than linear. "
"Try setting a numeric or datetime index column before "
"interpolating.".format(method=method)
)
if isna(index).any():
raise NotImplementedError(
"Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating."
)
data = _maybe_transposed_self._data
new_data = data.interpolate(
method=method,
axis=ax,
index=index,
values=_maybe_transposed_self,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
inplace=inplace,
downcast=downcast,
**kwargs
)
if inplace:
if axis == 1:
new_data = self._constructor(new_data).T._data
self._update_inplace(new_data)
else:
res = self._constructor(new_data).__finalize__(self)
if axis == 1:
res = res.T
return res
# ----------------------------------------------------------------------
# Timeseries methods Methods
def asof(self, where, subset=None):
"""
Return the last row(s) without any NaNs before `where`.
The last row (for each element in `where`, if list) without any
NaN is taken.
In case of a :class:`~pandas.DataFrame`, the last row without NaN
considering only the subset of columns (if not `None`)
If there is no good value, NaN is returned for a Series or
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array-like of dates
Date(s) before which the last row(s) are returned.
subset : str or array-like of str, default `None`
For DataFrame, if not `None`, only use these columns to
check for NaNs.
Returns
-------
scalar, Series, or DataFrame
The return can be:
* scalar : when `self` is a Series and `where` is a scalar
* Series: when `self` is a Series and `where` is an array-like,
or when `self` is a DataFrame and `where` is a scalar
* DataFrame : when `self` is a DataFrame and `where` is an
array-like
Return scalar, Series, or DataFrame.
See Also
--------
merge_asof : Perform an asof merge. Similar to left join.
Notes
-----
Dates are assumed to be sorted. Raises if this is not the case.
Examples
--------
A Series and a scalar `where`.
>>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10 1.0
20 2.0
30 NaN
40 4.0
dtype: float64
>>> s.asof(20)
2.0
For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.
>>> s.asof([5, 20])
5 NaN
20 2.0
dtype: float64
Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.
>>> s.asof(30)
2.0
Take all columns into consideration
>>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],
... 'b': [None, None, None, None, 500]},
... index=pd.DatetimeIndex(['2018-02-27 09:01:00',
... '2018-02-27 09:02:00',
... '2018-02-27 09:03:00',
... '2018-02-27 09:04:00',
... '2018-02-27 09:05:00']))
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']))
a b
2018-02-27 09:03:30 NaN NaN
2018-02-27 09:04:30 NaN NaN
Take a single column into consideration
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']),
... subset=['a'])
a b
2018-02-27 09:03:30 30.0 NaN
2018-02-27 09:04:30 40.0 NaN
"""
if isinstance(where, str):
where = Timestamp(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq).ordinal
start = start.ordinal
if where < start:
if not is_series:
from pandas import Series
return Series(index=self.columns, name=where)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side="right")
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isna(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isna() if is_series else self[subset].isna().any(1)
if nulls.all():
if is_series:
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
from pandas import DataFrame
return DataFrame(np.nan, index=where, columns=self.columns)
else:
from pandas import Series
return Series(np.nan, index=self.columns, name=where[0])
locs = self.index.asof_locs(where, ~(nulls.values))
# mask the missing
missing = locs == -1
data = self.take(locs, is_copy=False)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
_shared_docs[
"isna"
] = """
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.isnull : Alias of isna.
%(klass)s.notna : Boolean inverse of isna.
%(klass)s.dropna : Omit axes labels with missing values.
isna : Top-level isna.
Examples
--------
Show which entries in a DataFrame are NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
dtype: bool
"""
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isna(self):
return isna(self).__finalize__(self)
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isnull(self):
return isna(self).__finalize__(self)
_shared_docs[
"notna"
] = """
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.notnull : Alias of notna.
%(klass)s.isna : Boolean inverse of notna.
%(klass)s.dropna : Omit axes labels with missing values.
notna : Top-level notna.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
"""
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notna(self):
return notna(self).__finalize__(self)
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notnull(self):
return notna(self).__finalize__(self)
def _clip_with_scalar(self, lower, upper, inplace=False):
if (lower is not None and np.any(isna(lower))) or (
upper is not None and np.any(isna(upper))
):
raise ValueError("Cannot use an NA value as a clip threshold")
result = self
mask = isna(self.values)
with np.errstate(all="ignore"):
if upper is not None:
subset = self.to_numpy() <= upper
result = result.where(subset, upper, axis=None, inplace=False)
if lower is not None:
subset = self.to_numpy() >= lower
result = result.where(subset, lower, axis=None, inplace=False)
if np.any(mask):
result[mask] = np.nan
if inplace:
self._update_inplace(result)
else:
return result
def _clip_with_one_bound(self, threshold, method, axis, inplace):
if axis is not None:
axis = self._get_axis_number(axis)
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == "le":
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
subset = method(threshold, axis=axis) | isna(self)
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
threshold = self._constructor(threshold, index=self.index)
else:
threshold = _align_method_FRAME(self, threshold, axis)
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(self, lower=None, upper=None, axis=None, inplace=False, *args, **kwargs):
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values. Thresholds
can be singular values or array like, and in the latter case
the clipping is performed element-wise in the specified axis.
Parameters
----------
lower : float or array_like, default None
Minimum threshold value. All values below this
threshold will be set to it.
upper : float or array_like, default None
Maximum threshold value. All values above this
threshold will be set to it.
axis : int or str axis name, optional
Align object with lower and upper along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with numpy.
Returns
-------
Series or DataFrame
Same type as calling object with the values outside the
clip boundaries replaced.
Examples
--------
>>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
col_0 col_1
0 9 -2
1 -3 -7
2 0 6
3 -1 8
4 5 -5
Clips per column using lower and upper thresholds:
>>> df.clip(-4, 6)
col_0 col_1
0 6 -2
1 -3 -4
2 0 6
3 -1 6
4 5 -4
Clips using specific lower and upper thresholds per column element:
>>> t = pd.Series([2, -4, -1, 6, 3])
>>> t
0 2
1 -4
2 -1
3 6
4 3
dtype: int64
>>> df.clip(t, t + 4, axis=0)
col_0 col_1
0 6 2
1 -3 -4
2 0 3
3 6 8
4 5 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = nv.validate_clip_with_axis(axis, args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
# GH 19992
# numpy doesn't drop a list-like bound containing NaN
if not is_list_like(lower) and np.any(isna(lower)):
lower = None
if not is_list_like(upper) and np.any(isna(upper)):
upper = None
# GH 2747 (arguments were reversed)
if lower is not None and upper is not None:
if is_scalar(lower) and is_scalar(upper):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if (lower is None or (is_scalar(lower) and is_number(lower))) and (
upper is None or (is_scalar(upper) and is_number(upper))
):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result._clip_with_one_bound(
lower, method=self.ge, axis=axis, inplace=inplace
)
if upper is not None:
if inplace:
result = self
result = result._clip_with_one_bound(
upper, method=self.le, axis=axis, inplace=inplace
)
return result
def clip_upper(self, threshold, axis=None, inplace=False):
"""
Trim values above a given threshold.
.. deprecated:: 0.24.0
Use clip(upper=threshold) instead.
Elements above the `threshold` will be changed to match the
`threshold` value(s). Threshold can be a single value or an array,
in the latter case it performs the truncation element-wise.
Parameters
----------
threshold : numeric or array-like
Maximum value allowed. All values above threshold will be set to
this value.
* float : every value is compared to `threshold`.
* array-like : The shape of `threshold` should match the object
it's compared to. When `self` is a Series, `threshold` should be
the length. When `self` is a DataFrame, `threshold` should 2-D
and the same shape as `self` for ``axis=None``, or 1-D and the
same length as the axis being compared.
axis : {0 or 'index', 1 or 'columns'}, default 0
Align object with `threshold` along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
Returns
-------
Series or DataFrame
Original data with values trimmed.
See Also
--------
Series.clip : General purpose method to trim Series values to given
threshold(s).
DataFrame.clip : General purpose method to trim DataFrame values to
given threshold(s).
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.clip(upper=3)
0 1
1 2
2 3
3 3
4 3
dtype: int64
>>> elemwise_thresholds = [5, 4, 3, 2, 1]
>>> elemwise_thresholds
[5, 4, 3, 2, 1]
>>> s.clip(upper=elemwise_thresholds)
0 1
1 2
2 3
3 2
4 1
dtype: int64
"""
warnings.warn(
"clip_upper(threshold) is deprecated, use clip(upper=threshold) instead",
FutureWarning,
stacklevel=2,
)
return self._clip_with_one_bound(
threshold, method=self.le, axis=axis, inplace=inplace
)
def clip_lower(self, threshold, axis=None, inplace=False):
"""
Trim values below a given threshold.
.. deprecated:: 0.24.0
Use clip(lower=threshold) instead.
Elements below the `threshold` will be changed to match the
`threshold` value(s). Threshold can be a single value or an array,
in the latter case it performs the truncation element-wise.
Parameters
----------
threshold : numeric or array-like
Minimum value allowed. All values below threshold will be set to
this value.
* float : every value is compared to `threshold`.
* array-like : The shape of `threshold` should match the object
it's compared to. When `self` is a Series, `threshold` should be
the length. When `self` is a DataFrame, `threshold` should 2-D
and the same shape as `self` for ``axis=None``, or 1-D and the
same length as the axis being compared.
axis : {0 or 'index', 1 or 'columns'}, default 0
Align `self` with `threshold` along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
Returns
-------
Series or DataFrame
Original data with values trimmed.
See Also
--------
Series.clip : General purpose method to trim Series values to given
threshold(s).
DataFrame.clip : General purpose method to trim DataFrame values to
given threshold(s).
Examples
--------
Series single threshold clipping:
>>> s = pd.Series([5, 6, 7, 8, 9])
>>> s.clip(lower=8)
0 8
1 8
2 8
3 8
4 9
dtype: int64
Series clipping element-wise using an array of thresholds. `threshold`
should be the same length as the Series.
>>> elemwise_thresholds = [4, 8, 7, 2, 5]
>>> s.clip(lower=elemwise_thresholds)
0 5
1 8
2 7
3 8
4 9
dtype: int64
DataFrames can be compared to a scalar.
>>> df = pd.DataFrame({"A": [1, 3, 5], "B": [2, 4, 6]})
>>> df
A B
0 1 2
1 3 4
2 5 6
>>> df.clip(lower=3)
A B
0 3 3
1 3 4
2 5 6
Or to an array of values. By default, `threshold` should be the same
shape as the DataFrame.
>>> df.clip(lower=np.array([[3, 4], [2, 2], [6, 2]]))
A B
0 3 4
1 3 4
2 6 6
Control how `threshold` is broadcast with `axis`. In this case
`threshold` should be the same length as the axis specified by
`axis`.
>>> df.clip(lower=[3, 3, 5], axis='index')
A B
0 3 3
1 3 4
2 5 6
>>> df.clip(lower=[4, 5], axis='columns')
A B
0 4 5
1 4 5
2 5 6
"""
warnings.warn(
"clip_lower(threshold) is deprecated, use clip(lower=threshold) instead",
FutureWarning,
stacklevel=2,
)
return self._clip_with_one_bound(
threshold, method=self.ge, axis=axis, inplace=inplace
)
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False,
**kwargs
):
"""
Group DataFrame or Series using a mapper or by a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : mapping, function, label, or list of labels
Used to determine the groups for the groupby.
If ``by`` is a function, it's called on each value of the object's
index. If a dict or Series is passed, the Series or dict VALUES
will be used to determine the groups (the Series' values are first
aligned; see ``.align()`` method). If an ndarray is passed, the
values are used as-is determine the groups. A label or list of
labels may be passed to group by the columns in ``self``. Notice
that a tuple is interpreted as a (single) key.
axis : {0 or 'index', 1 or 'columns'}, default 0
Split along rows (0) or columns (1).
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
sort : bool, default True
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within each
group. Groupby preserves the order of rows within each group.
group_keys : bool, default True
When calling apply, add group keys to index to identify pieces.
squeeze : bool, default False
Reduce the dimensionality of the return type if possible,
otherwise return a consistent type.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionadded:: 0.23.0
**kwargs
Optional, only accepts keyword argument 'mutated' and is passed
to groupby.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
resample : Convenience method for frequency conversion and resampling
of time series.
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.
Examples
--------
>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]})
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean()
Max Speed
Animal
Falcon 375.0
Parrot 25.0
**Hierarchical Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},
... index=index)
>>> df
Max Speed
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
>>> df.groupby(level=0).mean()
Max Speed
Animal
Falcon 370.0
Parrot 25.0
>>> df.groupby(level=1).mean()
Max Speed
Type
Captive 210.0
Wild 185.0
"""
from pandas.core.groupby.groupby import groupby
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
return groupby(
self,
by=by,
axis=axis,
level=level,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze,
observed=observed,
**kwargs
)
def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None):
"""
Convert TimeSeries to specified frequency.
Optionally provide filling method to pad/backfill missing values.
Returns the original data conformed to a new index with the specified
frequency. ``resample`` is more appropriate if an operation, such as
summarization, is necessary to represent the data at the new frequency.
Parameters
----------
freq : DateOffset or str
method : {'backfill'/'bfill', 'pad'/'ffill'}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
* 'pad' / 'ffill': propagate last valid observation forward to next
valid
* 'backfill' / 'bfill': use NEXT valid observation to fill
how : {'start', 'end'}, default end
For PeriodIndex only, see PeriodIndex.asfreq
normalize : bool, default False
Whether to reset output index to midnight
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
.. versionadded:: 0.20.0
Returns
-------
converted : same type as caller
See Also
--------
reindex
Notes
-----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 4 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=4, freq='T')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({'s':series})
>>> df
s
2000-01-01 00:00:00 0.0
2000-01-01 00:01:00 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:03:00 3.0
Upsample the series into 30 second bins.
>>> df.asfreq(freq='30S')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 NaN
2000-01-01 00:03:00 3.0
Upsample again, providing a ``fill value``.
>>> df.asfreq(freq='30S', fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 9.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 9.0
2000-01-01 00:03:00 3.0
Upsample again, providing a ``method``.
>>> df.asfreq(freq='30S', method='bfill')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 2.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 3.0
2000-01-01 00:03:00 3.0
"""
from pandas.core.resample import asfreq
return asfreq(
self,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def at_time(self, time, asof=False, axis=None):
"""
Select values at particular time of day (e.g. 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> ts.at_time('12:00')
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_at_time(time, asof=asof)
except AttributeError:
raise TypeError("Index must be DatetimeIndex")
return self.take(indexer, axis=axis)
def between_time(
self, start_time, end_time, include_start=True, include_end=True, axis=None
):
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
end_time : datetime.time or str
include_start : bool, default True
include_end : bool, default True
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> ts.between_time('0:15', '0:45')
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> ts.between_time('0:45', '0:15')
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_between_time(
start_time,
end_time,
include_start=include_start,
include_end=include_end,
)
except AttributeError:
raise TypeError("Index must be DatetimeIndex")
return self.take(indexer, axis=axis)
def resample(
self,
rule,
how=None,
axis=0,
fill_method=None,
closed=None,
label=None,
convention="start",
kind=None,
loffset=None,
limit=None,
base=0,
on=None,
level=None,
):
"""
Resample time-series data.
Convenience method for frequency conversion and resampling of time
series. Object must have a datetime-like index (`DatetimeIndex`,
`PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values
to the `on` or `level` keyword.
Parameters
----------
rule : DateOffset, Timedelta or str
The offset string or object representing target conversion.
how : str
Method for down/re-sampling, default to 'mean' for downsampling.
.. deprecated:: 0.18.0
The new syntax is ``.resample(...).mean()``, or
``.resample(...).apply(<func>)``
axis : {0 or 'index', 1 or 'columns'}, default 0
Which axis to use for up- or down-sampling. For `Series` this
will default to 0, i.e. along the rows. Must be
`DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.
fill_method : str, default None
Filling method for upsampling.
.. deprecated:: 0.18.0
The new syntax is ``.resample(...).<func>()``,
e.g. ``.resample(...).pad()``
closed : {'right', 'left'}, default None
Which side of bin interval is closed. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
label : {'right', 'left'}, default None
Which bin edge label to label bucket with. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
convention : {'start', 'end', 's', 'e'}, default 'start'
For `PeriodIndex` only, controls whether to use the start or
end of `rule`.
kind : {'timestamp', 'period'}, optional, default None
Pass 'timestamp' to convert the resulting index to a
`DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.
By default the input representation is retained.
loffset : timedelta, default None
Adjust the resampled time labels.
limit : int, default None
Maximum size gap when reindexing with `fill_method`.
.. deprecated:: 0.18.0
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
on : str, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
Returns
-------
Resampler object
See Also
--------
groupby : Group by mapping, function, label, or list of labels.
Series.resample : Resample a Series.
DataFrame.resample: Resample a DataFrame.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`_
for more.
To learn more about the offset strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] # Select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``pad`` method.
>>> series.resample('30S').pad()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(array_like):
... return np.sum(array_like) + 5
...
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
Resample a year by quarter using 'start' `convention`. Values are
assigned to the first quarter of the period.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
... freq='A',
... periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
>>> s.resample('Q', convention='start').asfreq()
2012Q1 1.0
2012Q2 NaN
2012Q3 NaN
2012Q4 NaN
2013Q1 2.0
2013Q2 NaN
2013Q3 NaN
2013Q4 NaN
Freq: Q-DEC, dtype: float64
Resample quarters by month using 'end' `convention`. Values are
assigned to the last month of the period.
>>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',
... freq='Q',
... periods=4))
>>> q
2018Q1 1
2018Q2 2
2018Q3 3
2018Q4 4
Freq: Q-DEC, dtype: int64
>>> q.resample('M', convention='end').asfreq()
2018-03 1.0
2018-04 NaN
2018-05 NaN
2018-06 2.0
2018-07 NaN
2018-08 NaN
2018-09 3.0
2018-10 NaN
2018-11 NaN
2018-12 4.0
Freq: M, dtype: float64
For DataFrame objects, the keyword `on` can be used to specify the
column instead of the index for resampling.
>>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df = pd.DataFrame(d)
>>> df['week_starting'] = pd.date_range('01/01/2018',
... periods=8,
... freq='W')
>>> df
price volume week_starting
0 10 50 2018-01-07
1 11 60 2018-01-14
2 9 40 2018-01-21
3 13 100 2018-01-28
4 14 50 2018-02-04
5 18 100 2018-02-11
6 17 40 2018-02-18
7 19 50 2018-02-25
>>> df.resample('M', on='week_starting').mean()
price volume
week_starting
2018-01-31 10.75 62.5
2018-02-28 17.00 60.0
For a DataFrame with MultiIndex, the keyword `level` can be used to
specify on which level the resampling needs to take place.
>>> days = pd.date_range('1/1/2000', periods=4, freq='D')
>>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df2 = pd.DataFrame(d2,
... index=pd.MultiIndex.from_product([days,
... ['morning',
... 'afternoon']]
... ))
>>> df2
price volume
2000-01-01 morning 10 50
afternoon 11 60
2000-01-02 morning 9 40
afternoon 13 100
2000-01-03 morning 14 50
afternoon 18 100
2000-01-04 morning 17 40
afternoon 19 50
>>> df2.resample('D', level=0).sum()
price volume
2000-01-01 21 110
2000-01-02 22 140
2000-01-03 32 150
2000-01-04 36 90
"""
from pandas.core.resample import resample, _maybe_process_deprecations
axis = self._get_axis_number(axis)
r = resample(
self,
freq=rule,
label=label,
closed=closed,
axis=axis,
kind=kind,
loffset=loffset,
convention=convention,
base=base,
key=on,
level=level,
)
return _maybe_process_deprecations(
r, how=how, fill_method=fill_method, limit=limit
)
def first(self, offset):
"""
Convenience method for subsetting initial periods of time series data
based on a date offset.
Parameters
----------
offset : string, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
last : Select final periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the first 3 days:
>>> ts.first('3D')
A
2018-04-09 1
2018-04-11 2
Notice the data for 3 first calender days were returned, not the first
3 days observed in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if not offset.isAnchored() and hasattr(offset, "_inc"):
if end_date in self.index:
end = self.index.searchsorted(end_date, side="left")
return self.iloc[:end]
return self.loc[:end]
def last(self, offset):
"""
Convenience method for subsetting final periods of time series data
based on a date offset.
Parameters
----------
offset : string, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
first : Select initial periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> ts.last('3D')
A
2018-04-13 3
2018-04-15 4
Notice the data for 3 last calender days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'last' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = self.index[-1] - offset
start = self.index.searchsorted(start_date, side="right")
return self.iloc[start:]
def rank(
self,
axis=0,
method="average",
numeric_only=None,
na_option="keep",
ascending=True,
pct=False,
):
"""
Compute numerical data ranks (1 through n) along axis.
By default, equal values are assigned a rank that is the average of the
ranks of those values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Index to direct ranking.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
How to rank the group of records that have the same value
(i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
numeric_only : bool, optional
For DataFrame objects, rank only numeric columns if set to True.
na_option : {'keep', 'top', 'bottom'}, default 'keep'
How to rank NaN values:
* keep: assign NaN rank to NaN values
* top: assign smallest rank to NaN values if ascending
* bottom: assign highest rank to NaN values if ascending
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
Returns
-------
same type as caller
Return a Series or DataFrame with data ranks as values.
See Also
--------
core.groupby.GroupBy.rank : Rank of values within each group.
Examples
--------
>>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',
... 'spider', 'snake'],
... 'Number_legs': [4, 2, 4, 8, np.nan]})
>>> df
Animal Number_legs
0 cat 4.0
1 penguin 2.0
2 dog 4.0
3 spider 8.0
4 snake NaN
The following example shows how the method behaves with the above
parameters:
* default_rank: this is the default behaviour obtained without using
any parameter.
* max_rank: setting ``method = 'max'`` the records that have the
same values are ranked using the highest rank (e.g.: since 'cat'
and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)
* NA_bottom: choosing ``na_option = 'bottom'``, if there are records
with NaN values they are placed at the bottom of the ranking.
* pct_rank: when setting ``pct = True``, the ranking is expressed as
percentile rank.
>>> df['default_rank'] = df['Number_legs'].rank()
>>> df['max_rank'] = df['Number_legs'].rank(method='max')
>>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')
>>> df['pct_rank'] = df['Number_legs'].rank(pct=True)
>>> df
Animal Number_legs default_rank max_rank NA_bottom pct_rank
0 cat 4.0 2.5 3.0 2.5 0.625
1 penguin 2.0 1.0 1.0 1.0 0.250
2 dog 4.0 2.5 3.0 2.5 0.625
3 spider 8.0 4.0 4.0 4.0 1.000
4 snake NaN NaN NaN 5.0 NaN
"""
axis = self._get_axis_number(axis)
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
def ranker(data):
ranks = algos.rank(
data.values,
axis=axis,
method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
ranks = self._constructor(ranks, **data._construct_axes_dict())
return ranks.__finalize__(self)
# if numeric_only is None, and we can't get anything, we try with
# numeric_only=True
if numeric_only is None:
try:
return ranker(self)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
return ranker(data)
_shared_docs[
"align"
] = """
Align two objects on their axes with the
specified join method for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {'outer', 'inner', 'left', 'right'}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None)
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
fill_axis : %(axes_single_arg)s, default 0
Filling axis, method and limit
broadcast_axis : %(axes_single_arg)s, default None
Broadcast values along this axis, if aligning two objects of
different dimensions
Returns
-------
(left, right) : (%(klass)s, type of other)
Aligned objects.
"""
@Appender(_shared_docs["align"] % _shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
method = missing.clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, ABCSeries):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
df = cons(
{c: self for c in other.columns}, **other._construct_axes_dict()
)
return df._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
df = cons(
{c: other for c in self.columns}, **self._construct_axes_dict()
)
return self._align_frame(
df,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, ABCDataFrame):
return self._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
return self._align_series(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
else: # pragma: no cover
raise TypeError("unsupported type: %s" % type(other))
def _align_frame(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
is_series = isinstance(self, ABCSeries)
if axis is None or axis == 0:
if not self.index.equals(other.index):
join_index, ilidx, iridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if axis is None or axis == 1:
if not is_series and not self.columns.equals(other.columns):
join_columns, clidx, cridx = self.columns.join(
other.columns, how=join, level=level, return_indexers=True
)
if is_series:
reindexers = {0: [join_index, ilidx]}
else:
reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}
left = self._reindex_with_indexers(
reindexers, copy=copy, fill_value=fill_value, allow_dups=True
)
# other must be always DataFrame
right = other._reindex_with_indexers(
{0: [join_index, iridx], 1: [join_columns, cridx]},
copy=copy,
fill_value=fill_value,
allow_dups=True,
)
if method is not None:
left = left.fillna(axis=fill_axis, method=method, limit=limit)
right = right.fillna(axis=fill_axis, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _align_series(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
is_series = isinstance(self, ABCSeries)
# series/series compat, other must always be a Series
if is_series:
if axis:
raise ValueError("cannot align series to a series other than axis 0")
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
left = self._reindex_indexer(join_index, lidx, copy)
right = other._reindex_indexer(join_index, ridx, copy)
else:
# one has > 1 ndim
fdata = self._data
if axis == 0:
join_index = self.index
lidx, ridx = None, None
if not self.index.equals(other.index):
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=1)
elif axis == 1:
join_index = self.columns
lidx, ridx = None, None
if not self.columns.equals(other.index):
join_index, lidx, ridx = self.columns.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=0)
else:
raise ValueError("Must specify axis=0 or 1")
if copy and fdata is self._data:
fdata = fdata.copy()
left = self._constructor(fdata)
if ridx is None:
right = other
else:
right = other.reindex(join_index, level=level)
# fill
fill_na = notna(fill_value) or (method is not None)
if fill_na:
left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
"""
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# align the cond to same shape as myself
cond = com.apply_if_callable(cond, self)
if isinstance(cond, NDFrame):
cond, _ = cond.align(self, join="right", broadcast_axis=1)
else:
if not hasattr(cond, "shape"):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
cond = self._constructor(cond, **self._construct_axes_dict())
# make sure we are boolean
fill_value = bool(inplace)
cond = cond.fillna(fill_value)
msg = "Boolean array expected for the condition, not {dtype}"
if not isinstance(cond, ABCDataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
elif not cond.empty:
for dt in cond.dtypes:
if not is_bool_dtype(dt):
raise ValueError(msg.format(dtype=dt))
cond = -cond if inplace else cond
# try to align with other
try_quick = True
if hasattr(other, "align"):
# align with me
if other.ndim <= self.ndim:
_, other = self.align(
other, join="left", axis=axis, level=level, fill_value=np.nan
)
# if we are NOT aligned, raise as we cannot where index
if axis is None and not all(
other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes)
):
raise InvalidIndexError
# slice me out of the other
else:
raise NotImplementedError(
"cannot align with a higher dimensional NDFrame"
)
if isinstance(other, np.ndarray):
if other.shape != self.shape:
if self.ndim == 1:
icond = cond.values
# GH 2745 / GH 4192
# treat like a scalar
if len(other) == 1:
other = np.array(other[0])
# GH 3235
# match True cond to other
elif len(cond[icond]) == len(other):
# try to not change dtype at first (if try_quick)
if try_quick:
try:
new_other = com.values_from_object(self)
new_other = new_other.copy()
new_other[icond] = other
other = new_other
except Exception:
try_quick = False
# let's create a new (if we failed at the above
# or not try_quick
if not try_quick:
dtype, fill_value = maybe_promote(other.dtype)
new_other = np.empty(len(icond), dtype=dtype)
new_other.fill(fill_value)
maybe_upcast_putmask(new_other, icond, other)
other = new_other
else:
raise ValueError(
"Length of replacements must equal series length"
)
else:
raise ValueError(
"other must be the same shape as self when an ndarray"
)
# we are the same shape, so create an actual object for alignment
else:
other = self._constructor(other, **self._construct_axes_dict())
if axis is None:
axis = 0
if self.ndim == getattr(other, "ndim", 0):
align = True
else:
align = self._get_axis_number(axis) == 1
block_axis = self._get_block_manager_axis(axis)
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._data.putmask(
mask=cond,
new=other,
align=align,
inplace=True,
axis=block_axis,
transpose=self._AXIS_REVERSED,
)
self._update_inplace(new_data)
else:
new_data = self._data.where(
other=other,
cond=cond,
align=align,
errors=errors,
try_cast=try_cast,
axis=block_axis,
)
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"where"
] = """
Replace values where the condition is %(cond_rev)s.
Parameters
----------
cond : boolean %(klass)s, array-like, or callable
Where `cond` is %(cond)s, keep the original value. Where
%(cond_rev)s, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the %(klass)s and
should return boolean %(klass)s or array. The callable must
not change input %(klass)s (though pandas doesn't check it).
other : scalar, %(klass)s, or callable
Entries where `cond` is %(cond_rev)s are replaced with
corresponding value from `other`.
If other is callable, it is computed on the %(klass)s and
should return scalar or %(klass)s. The callable must not
change input %(klass)s (though pandas doesn't check it).
inplace : bool, default False
Whether to perform the operation in place on the data.
axis : int, default None
Alignment axis if needed.
level : int, default None
Alignment level if needed.
errors : str, {'raise', 'ignore'}, default 'raise'
Note that currently this parameter won't affect
the results and will always coerce to a suitable dtype.
- 'raise' : allow exceptions to be raised.
- 'ignore' : suppress exceptions. On error return original object.
try_cast : bool, default False
Try to cast the result back to the input type (if possible).
Returns
-------
Same type as caller
See Also
--------
:func:`DataFrame.%(name_other)s` : Return an object of same shape as
self.
Notes
-----
The %(name)s method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.
For further details and examples see the ``%(name)s`` documentation in
:ref:`indexing <indexing.where_mask>`.
Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s.mask(s > 0)
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s.where(s > 1, 10)
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> df
A B
0 0 1
1 2 3
2 4 5
3 6 7
4 8 9
>>> m = df %% 3 == 0
>>> df.where(m, -df)
A B
0 0 -1
1 -2 3
2 -4 -5
3 6 -7
4 -8 9
>>> df.where(m, -df) == np.where(m, df, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
>>> df.where(m, -df) == df.mask(~m, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
"""
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="True",
cond_rev="False",
name="where",
name_other="mask",
)
)
def where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
other = com.apply_if_callable(other, self)
return self._where(
cond, other, inplace, axis, level, errors=errors, try_cast=try_cast
)
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="False",
cond_rev="True",
name="mask",
name_other="where",
)
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
inplace = validate_bool_kwarg(inplace, "inplace")
cond = com.apply_if_callable(cond, self)
# see gh-21891
if not hasattr(cond, "__invert__"):
cond = np.array(cond)
return self.where(
~cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
try_cast=try_cast,
errors=errors,
)
_shared_docs[
"shift"
] = """
Shift index by desired number of periods with an optional time `freq`.
When `freq` is not passed, shift the index without realigning the data.
If `freq` is passed (in this case, the index must be date or datetime,
or it will raise a `NotImplementedError`), the index will be
increased using the periods and the `freq`.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
freq : DateOffset, tseries.offsets, timedelta, or str, optional
Offset to use from the tseries module or time rule (e.g. 'EOM').
If `freq` is specified then the index values are shifted but the
data is not realigned. That is, use `freq` if you would like to
extend the index when shifting and preserve the original data.
axis : {0 or 'index', 1 or 'columns', None}, default None
Shift direction.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
the default depends on the dtype of `self`.
For numeric data, ``np.nan`` is used.
For datetime, timedelta, or period data, etc. :attr:`NaT` is used.
For extension dtypes, ``self.dtype.na_value`` is used.
.. versionchanged:: 0.24.0
Returns
-------
%(klass)s
Copy of input object, shifted.
See Also
--------
Index.shift : Shift values of Index.
DatetimeIndex.shift : Shift values of DatetimeIndex.
PeriodIndex.shift : Shift values of PeriodIndex.
tshift : Shift the time index, using the index's frequency if
available.
Examples
--------
>>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]})
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=1, axis='columns')
Col1 Col2 Col3
0 NaN 10.0 13.0
1 NaN 20.0 23.0
2 NaN 15.0 18.0
3 NaN 30.0 33.0
4 NaN 45.0 48.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
@Appender(_shared_docs["shift"] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0, fill_value=None):
if periods == 0:
return self.copy()
block_axis = self._get_block_manager_axis(axis)
if freq is None:
new_data = self._data.shift(
periods=periods, axis=block_axis, fill_value=fill_value
)
else:
return self.tshift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def slice_shift(self, periods=1, axis=0):
"""
Equivalent to `shift` without copying data. The shifted data will
not include the dropped periods and the shifted axis will be smaller
than the original.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : same type as caller
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
"""
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self)
def tshift(self, periods=1, freq=None, axis=0):
"""
Shift the time index, using the index's frequency if available.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, default None
Increment to use from the tseries module or time rule (e.g. 'EOM')
axis : int or basestring
Corresponds to the axis that contains the Index
Returns
-------
shifted : Series/DataFrame
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
"""
index = self._get_axis(axis)
if freq is None:
freq = getattr(index, "freq", None)
if freq is None:
freq = getattr(index, "inferred_freq", None)
if freq is None:
msg = "Freq was not given and was not set in the index"
raise ValueError(msg)
if periods == 0:
return self
if isinstance(freq, str):
freq = to_offset(freq)
block_axis = self._get_block_manager_axis(axis)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq == orig_freq:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
else:
msg = "Given freq %s does not match PeriodIndex freq %s" % (
freq.rule_code,
orig_freq.rule_code,
)
raise ValueError(msg)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def truncate(self, before=None, after=None, axis=None, copy=True):
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, string, int
Truncate all rows before this index value.
after : date, string, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax.is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis), ax.truncate(before, after))
if copy:
result = result.copy()
return result
def tz_convert(self, tz, axis=0, level=None, copy=True):
"""
Convert tz-aware axis to target time zone.
Parameters
----------
tz : str or tzinfo object
axis : the axis to convert
level : int, str, default None
If axis is a MultiIndex, convert a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
Returns
-------
%(klass)s
Object with time zone converted axis.
Raises
------
TypeError
If the axis is tz-naive.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, "tz_convert"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
"%s is not a valid DatetimeIndex or PeriodIndex" % ax_name
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError("The level {0} is not valid".format(level))
ax = _tz_convert(ax, tz)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
def tz_localize(
self, tz, axis=0, level=None, copy=True, ambiguous="raise", nonexistent="raise"
):
"""
Localize tz-naive index of a Series or DataFrame to target time zone.
This operation localizes the Index. To localize the values in a
timezone-naive Series, use :meth:`Series.dt.tz_localize`.
Parameters
----------
tz : str or tzinfo
axis : the axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None
copy : bool, default True
Also make a copy of the underlying data
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times
nonexistent : str, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST. Valid values are:
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Same type as the input.
Raises
------
TypeError
If the TimeSeries is tz-aware and tz is not None.
Examples
--------
Localize local times:
>>> s = pd.Series([1],
... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))
>>> s.tz_localize('CET')
2018-09-15 01:30:00+02:00 1
dtype: int64
Be careful with DST changes. When there is sequential data, pandas
can infer the DST time:
>>> s = pd.Series(range(7), index=pd.DatetimeIndex([
... '2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.tz_localize('CET', ambiguous='infer')
2018-10-28 01:30:00+02:00 0
2018-10-28 02:00:00+02:00 1
2018-10-28 02:30:00+02:00 2
2018-10-28 02:00:00+01:00 3
2018-10-28 02:30:00+01:00 4
2018-10-28 03:00:00+01:00 5
2018-10-28 03:30:00+01:00 6
dtype: int64
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.Series(range(3), index=pd.DatetimeIndex([
... '2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))
2018-10-28 01:20:00+02:00 0
2018-10-28 02:36:00+02:00 1
2018-10-28 03:46:00+01:00 2
dtype: int64
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.Series(range(2), index=pd.DatetimeIndex([
... '2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
2015-03-29 03:00:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
2015-03-29 01:59:59.999999999+01:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
2015-03-29 03:30:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise',"
" 'NaT', 'shift_forward', 'shift_backward' or"
" a timedelta object"
)
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous, nonexistent):
if not hasattr(ax, "tz_localize"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
"%s is not a valid DatetimeIndex or PeriodIndex" % ax_name
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError("The level {0} is not valid".format(level))
ax = _tz_localize(ax, tz, ambiguous, nonexistent)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
# ----------------------------------------------------------------------
# Numeric Methods
def abs(self):
"""
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
abs
Series/DataFrame containing the absolute value of each element.
See Also
--------
numpy.absolute : Calculate the absolute value element-wise.
Notes
-----
For ``complex`` inputs, ``1.2 + 1j``, the absolute value is
:math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
Absolute numeric values in a Series.
>>> s = pd.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a Series with complex numbers.
>>> s = pd.Series([1.2 + 1j])
>>> s.abs()
0 1.56205
dtype: float64
Absolute numeric values in a Series with a Timedelta element.
>>> s = pd.Series([pd.Timedelta('1 days')])
>>> s.abs()
0 1 days
dtype: timedelta64[ns]
Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).
>>> df = pd.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... })
>>> df
a b c
0 4 10 100
1 5 20 50
2 6 30 -30
3 7 40 -50
>>> df.loc[(df.c - 43).abs().argsort()]
a b c
1 5 20 50
0 4 10 100
2 6 30 -30
3 7 40 -50
"""
return np.abs(self)
def describe(self, percentiles=None, include=None, exclude=None):
"""
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should
fall between 0 and 1. The default is
``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored
for ``Series``. Here are the options:
- 'all' : All columns of the input will be included in the output.
- A list-like of dtypes : Limits the results to the
provided data types.
To limit the result to numeric types submit
``numpy.number``. To limit it instead to object columns submit
the ``numpy.object`` data type. Strings
can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
select pandas categorical columns, use ``'category'``
- None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored
for ``Series``. Here are the options:
- A list-like of dtypes : Excludes the provided data types
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
DataFrame.select_dtypes: Subset of a DataFrame including/excluding
columns based on their dtype.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.
For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.
If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.
The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.
Examples
--------
Describing a numeric ``Series``.
>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
dtype: float64
Describing a categorical ``Series``.
>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count 4
unique 3
top a
freq 2
dtype: object
Describing a timestamp ``Series``.
>>> s = pd.Series([
... np.datetime64("2000-01-01"),
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
>>> s.describe()
count 3
unique 2
top 2010-01-01 00:00:00
freq 2
first 2000-01-01 00:00:00
last 2010-01-01 00:00:00
dtype: object
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),
... 'numeric': [1, 2, 3],
... 'object': ['a', 'b', 'c']
... })
>>> df.describe()
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing all columns of a ``DataFrame`` regardless of data type.
>>> df.describe(include='all')
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
top f NaN c
freq 1 NaN 1
mean NaN 2.0 NaN
std NaN 1.0 NaN
min NaN 1.0 NaN
25% NaN 1.5 NaN
50% NaN 2.0 NaN
75% NaN 2.5 NaN
max NaN 3.0 NaN
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Name: numeric, dtype: float64
Including only numeric columns in a ``DataFrame`` description.
>>> df.describe(include=[np.number])
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Including only string columns in a ``DataFrame`` description.
>>> df.describe(include=[np.object])
object
count 3
unique 3
top c
freq 1
Including only categorical columns from a ``DataFrame`` description.
>>> df.describe(include=['category'])
categorical
count 3
unique 3
top f
freq 1
Excluding numeric columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.number])
categorical object
count 3 3
unique 3 3
top f c
freq 1 1
Excluding object columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.object])
categorical numeric
count 3 3.0
unique 3 NaN
top f NaN
freq 1 NaN
mean NaN 2.0
std NaN 1.0
min NaN 1.0
25% NaN 1.5
50% NaN 2.0
75% NaN 2.5
max NaN 3.0
"""
if self.ndim == 2 and self.columns.size == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
# explicit conversion of `percentiles` to list
percentiles = list(percentiles)
# get them all to be in [0, 1]
self._check_percentile(percentiles)
# median should always be included
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.asarray(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
# sort and check for duplicates
unique_pcts = np.unique(percentiles)
if len(unique_pcts) < len(percentiles):
raise ValueError("percentiles cannot contain duplicates")
percentiles = unique_pcts
formatted_percentiles = format_percentiles(percentiles)
def describe_numeric_1d(series):
stat_index = (
["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
)
d = (
[series.count(), series.mean(), series.std(), series.min()]
+ series.quantile(percentiles).tolist()
+ [series.max()]
)
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ["count", "unique"]
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result = [data.count(), count_unique]
dtype = None
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
if is_datetime64_any_dtype(data):
tz = data.dt.tz
asint = data.dropna().values.view("i8")
top = Timestamp(top)
if top.tzinfo is not None and tz is not None:
# Don't tz_localize(None) if key is already tz-aware
top = top.tz_convert(tz)
else:
top = top.tz_localize(tz)
names += ["top", "freq", "first", "last"]
result += [
top,
freq,
Timestamp(asint.min(), tz=tz),
Timestamp(asint.max(), tz=tz),
]
else:
names += ["top", "freq"]
result += [top, freq]
# If the DataFrame is empty, set 'top' and 'freq' to None
# to maintain output shape consistency
else:
names += ["top", "freq"]
result += [np.nan, np.nan]
dtype = "object"
return pd.Series(result, index=names, name=data.name, dtype=dtype)
def describe_1d(data):
if is_bool_dtype(data):
return describe_categorical_1d(data)
elif is_numeric_dtype(data):
return describe_numeric_1d(data)
elif is_timedelta64_dtype(data):
return describe_numeric_1d(data)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self)
elif (include is None) and (exclude is None):
# when some numerics are found, keep only numerics
data = self.select_dtypes(include=[np.number])
if len(data.columns) == 0:
data = self
elif include == "all":
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s) for _, s in data.items()]
# set a convenient order for rows
names = []
ldesc_indexes = sorted((x.index for x in ldesc), key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat([x.reindex(names, copy=False) for x in ldesc], axis=1, sort=False)
d.columns = data.columns.copy()
return d
def _check_percentile(self, q):
"""
Validate percentiles (used by describe and quantile).
"""
msg = "percentiles should all be in the interval [0, 1]. Try {0} instead."
q = np.asarray(q)
if q.ndim == 0:
if not 0 <= q <= 1:
raise ValueError(msg.format(q / 100.0))
else:
if not all(0 <= qs <= 1 for qs in q):
raise ValueError(msg.format(q / 100.0))
return q
_shared_docs[
"pct_change"
] = """
Percentage change between the current and a prior element.
Computes the percentage change from the immediately previous row by
default. This is useful in comparing the percentage of change in a time
series of elements.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'pad'
How to handle NAs before computing percent changes.
limit : int, default None
The number of consecutive NAs to fill before stopping.
freq : DateOffset, timedelta, or offset alias string, optional
Increment to use from time series API (e.g. 'M' or BDay()).
**kwargs
Additional keyword arguments are passed into
`DataFrame.shift` or `Series.shift`.
Returns
-------
chg : Series or DataFrame
The same type as the calling object.
See Also
--------
Series.diff : Compute the difference of two elements in a Series.
DataFrame.diff : Compute the difference of two elements in a DataFrame.
Series.shift : Shift the index by some number of periods.
DataFrame.shift : Shift the index by some number of periods.
Examples
--------
**Series**
>>> s = pd.Series([90, 91, 85])
>>> s
0 90
1 91
2 85
dtype: int64
>>> s.pct_change()
0 NaN
1 0.011111
2 -0.065934
dtype: float64
>>> s.pct_change(periods=2)
0 NaN
1 NaN
2 -0.055556
dtype: float64
See the percentage change in a Series where filling NAs with last
valid observation forward to next valid.
>>> s = pd.Series([90, 91, None, 85])
>>> s
0 90.0
1 91.0
2 NaN
3 85.0
dtype: float64
>>> s.pct_change(fill_method='ffill')
0 NaN
1 0.011111
2 0.000000
3 -0.065934
dtype: float64
**DataFrame**
Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.
>>> df = pd.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.
>>> df = pd.DataFrame({
... '2016': [1769950, 30586265],
... '2015': [1500923, 40912316],
... '2014': [1371819, 41403351]},
... index=['GOOG', 'APPL'])
>>> df
2016 2015 2014
GOOG 1769950 1500923 1371819
APPL 30586265 40912316 41403351
>>> df.pct_change(axis='columns')
2016 2015 2014
GOOG NaN -0.151997 -0.086016
APPL NaN 0.337604 0.012002
"""
@Appender(_shared_docs["pct_change"] % _shared_doc_kwargs)
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, **kwargs):
# TODO: Not sure if above is correct - need someone to confirm.
axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name))
if fill_method is None:
data = self
else:
data = self.fillna(method=fill_method, limit=limit, axis=axis)
rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1
rs = rs.reindex_like(data)
if freq is None:
mask = isna(com.values_from_object(data))
np.putmask(rs.values, mask, np.nan)
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
if axis is None:
raise ValueError("Must specify 'axis' when aggregating by level.")
grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwargs)
axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
@classmethod
def _add_numeric_operations(cls):
"""
Add the operations to the cls; evaluate the doc strings again
"""
axis_descr, name, name2 = _doc_parms(cls)
cls.any = _make_logical_function(
cls,
"any",
name,
name2,
axis_descr,
_any_desc,
nanops.nanany,
_any_see_also,
_any_examples,
empty_value=False,
)
cls.all = _make_logical_function(
cls,
"all",
name,
name2,
axis_descr,
_all_desc,
nanops.nanall,
_all_see_also,
_all_examples,
empty_value=True,
)
@Substitution(
desc="Return the mean absolute deviation of the values "
"for the requested axis.",
name1=name,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
@Appender(_num_doc)
def mad(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna)
data = self._get_numeric_data()
if axis == 0:
demeaned = data - data.mean(axis=0)
else:
demeaned = data.sub(data.mean(axis=1), axis=0)
return np.abs(demeaned).mean(axis=axis, skipna=skipna)
cls.mad = mad
cls.sem = _make_stat_function_ddof(
cls,
"sem",
name,
name2,
axis_descr,
"Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
nanops.nansem,
)
cls.var = _make_stat_function_ddof(
cls,
"var",
name,
name2,
axis_descr,
"Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
nanops.nanvar,
)
cls.std = _make_stat_function_ddof(
cls,
"std",
name,
name2,
axis_descr,
"Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
nanops.nanstd,
)
@Substitution(
desc="Return the compound percentage of the values for "
"the requested axis.\n\n.. deprecated:: 0.25.0",
name1=name,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
@Appender(_num_doc)
def compound(self, axis=None, skipna=None, level=None):
msg = (
"The 'compound' method is deprecated and will be"
"removed in a future version."
)
warnings.warn(msg, FutureWarning, stacklevel=2)
if skipna is None:
skipna = True
return (1 + self).prod(axis=axis, skipna=skipna, level=level) - 1
cls.compound = compound
cls.cummin = _make_cum_function(
cls,
"cummin",
name,
name2,
axis_descr,
"minimum",
lambda y, axis: np.minimum.accumulate(y, axis),
"min",
np.inf,
np.nan,
_cummin_examples,
)
cls.cumsum = _make_cum_function(
cls,
"cumsum",
name,
name2,
axis_descr,
"sum",
lambda y, axis: y.cumsum(axis),
"sum",
0.0,
np.nan,
_cumsum_examples,
)
cls.cumprod = _make_cum_function(
cls,
"cumprod",
name,
name2,
axis_descr,
"product",
lambda y, axis: y.cumprod(axis),
"prod",
1.0,
np.nan,
_cumprod_examples,
)
cls.cummax = _make_cum_function(
cls,
"cummax",
name,
name2,
axis_descr,
"maximum",
lambda y, axis: np.maximum.accumulate(y, axis),
"max",
-np.inf,
np.nan,
_cummax_examples,
)
cls.sum = _make_min_count_stat_function(
cls,
"sum",
name,
name2,
axis_descr,
"""Return the sum of the values for the requested axis.\n
This is equivalent to the method ``numpy.sum``.""",
nanops.nansum,
_stat_func_see_also,
_sum_examples,
)
cls.mean = _make_stat_function(
cls,
"mean",
name,
name2,
axis_descr,
"Return the mean of the values for the requested axis.",
nanops.nanmean,
)
cls.skew = _make_stat_function(
cls,
"skew",
name,
name2,
axis_descr,
"Return unbiased skew over requested axis\nNormalized by N-1.",
nanops.nanskew,
)
cls.kurt = _make_stat_function(
cls,
"kurt",
name,
name2,
axis_descr,
"Return unbiased kurtosis over requested axis using Fisher's "
"definition of\nkurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1.",
nanops.nankurt,
)
cls.kurtosis = cls.kurt
cls.prod = _make_min_count_stat_function(
cls,
"prod",
name,
name2,
axis_descr,
"Return the product of the values for the requested axis.",
nanops.nanprod,
examples=_prod_examples,
)
cls.product = cls.prod
cls.median = _make_stat_function(
cls,
"median",
name,
name2,
axis_descr,
"Return the median of the values for the requested axis.",
nanops.nanmedian,
)
cls.max = _make_stat_function(
cls,
"max",
name,
name2,
axis_descr,
"""Return the maximum of the values for the requested axis.\n
If you want the *index* of the maximum, use ``idxmax``. This is
the equivalent of the ``numpy.ndarray`` method ``argmax``.""",
nanops.nanmax,
_stat_func_see_also,
_max_examples,
)
cls.min = _make_stat_function(
cls,
"min",
name,
name2,
axis_descr,
"""Return the minimum of the values for the requested axis.\n
If you want the *index* of the minimum, use ``idxmin``. This is
the equivalent of the ``numpy.ndarray`` method ``argmin``.""",
nanops.nanmin,
_stat_func_see_also,
_min_examples,
)
@classmethod
def _add_series_only_operations(cls):
"""
Add the series only operations to the cls; evaluate the doc
strings again.
"""
axis_descr, name, name2 = _doc_parms(cls)
def nanptp(values, axis=0, skipna=True):
nmax = nanops.nanmax(values, axis, skipna)
nmin = nanops.nanmin(values, axis, skipna)
warnings.warn(
"Method .ptp is deprecated and will be removed "
"in a future version. Use numpy.ptp instead.",
FutureWarning,
stacklevel=4,
)
return nmax - nmin
cls.ptp = _make_stat_function(
cls,
"ptp",
name,
name2,
axis_descr,
"""Return the difference between the maximum value and the
minimum value in the object. This is the equivalent of the
``numpy.ndarray`` method ``ptp``.\n\n.. deprecated:: 0.24.0
Use numpy.ptp instead""",
nanptp,
)
@classmethod
def _add_series_or_dataframe_operations(cls):
"""
Add the series or dataframe only operations to the cls; evaluate
the doc strings again.
"""
from pandas.core.window import EWM, Expanding, Rolling, Window
@Appender(Rolling.__doc__)
def rolling(
self,
window,
min_periods=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
):
axis = self._get_axis_number(axis)
if win_type is not None:
return Window(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
return Rolling(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
cls.rolling = rolling
@Appender(Expanding.__doc__)
def expanding(self, min_periods=1, center=False, axis=0):
axis = self._get_axis_number(axis)
return Expanding(self, min_periods=min_periods, center=center, axis=axis)
cls.expanding = expanding
@Appender(EWM.__doc__)
def ewm(
self,
com=None,
span=None,
halflife=None,
alpha=None,
min_periods=0,
adjust=True,
ignore_na=False,
axis=0,
):
axis = self._get_axis_number(axis)
return EWM(
self,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
)
cls.ewm = ewm
@Appender(_shared_docs["transform"] % dict(axis="", **_shared_doc_kwargs))
def transform(self, func, *args, **kwargs):
result = self.agg(func, *args, **kwargs)
if is_scalar(result) or len(result) != len(self):
raise ValueError("transforms cannot produce aggregated results")
return result
# ----------------------------------------------------------------------
# Misc methods
_shared_docs[
"valid_index"
] = """
Return index for %(position)s non-NA/null value.
Returns
-------
scalar : type of index
Notes
-----
If all elements are non-NA/null, returns None.
Also returns None for empty %(klass)s.
"""
def _find_valid_index(self, how):
"""
Retrieves the index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
idx_first_valid : type of index
"""
assert how in ["first", "last"]
if len(self) == 0: # early stop
return None
is_valid = ~self.isna()
if self.ndim == 2:
is_valid = is_valid.any(1) # reduce axis 1
if how == "first":
idxpos = is_valid.values[::].argmax()
if how == "last":
idxpos = len(self) - 1 - is_valid.values[::-1].argmax()
chk_notna = is_valid.iat[idxpos]
idx = self.index[idxpos]
if not chk_notna:
return None
return idx
@Appender(
_shared_docs["valid_index"] % {"position": "first", "klass": "Series/DataFrame"}
)
def first_valid_index(self):
return self._find_valid_index("first")
@Appender(
_shared_docs["valid_index"] % {"position": "last", "klass": "Series/DataFrame"}
)
def last_valid_index(self):
return self._find_valid_index("last")
def _doc_parms(cls):
"""Return a tuple of the doc parms."""
axis_descr = "{%s}" % ", ".join(
"{0} ({1})".format(a, i) for i, a in enumerate(cls._AXIS_ORDERS)
)
name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else "scalar"
name2 = cls.__name__
return axis_descr, name, name2
_num_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
%(min_count)s\
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
%(name1)s or %(name2)s (if level specified)\
%(see_also)s\
%(examples)s
"""
_num_ddof_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
%(name1)s or %(name2)s (if level specified)\n"""
_bool_doc = """
%(desc)s
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
* 1 / 'columns' : reduce the columns, return a Series whose index is the
original index.
* None : reduce all axes, return a scalar.
bool_only : bool, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data. Not implemented for Series.
skipna : bool, default True
Exclude NA/null values. If the entire row/column is NA and skipna is
True, then the result will be %(empty_value)s, as for an empty row/column.
If skipna is False, then NA are treated as True, because these are not
equal to zero.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
If level is specified, then, %(name2)s is returned; otherwise, %(name1)s
is returned.
%(see_also)s
%(examples)s"""
_all_desc = """\
Return whether all elements are True, potentially over an axis.
Returns True unless there at least one element within a series or
along a Dataframe axis that is False or equivalent (e.g. zero or
empty)."""
_all_examples = """\
Examples
--------
**Series**
>>> pd.Series([True, True]).all()
True
>>> pd.Series([True, False]).all()
False
>>> pd.Series([]).all()
True
>>> pd.Series([np.nan]).all()
True
>>> pd.Series([np.nan]).all(skipna=False)
True
**DataFrames**
Create a dataframe from a dictionary.
>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})
>>> df
col1 col2
0 True True
1 True False
Default behaviour checks if column-wise values all return True.
>>> df.all()
col1 True
col2 False
dtype: bool
Specify ``axis='columns'`` to check if row-wise values all return True.
>>> df.all(axis='columns')
0 True
1 False
dtype: bool
Or ``axis=None`` for whether every value is True.
>>> df.all(axis=None)
False
"""
_all_see_also = """\
See Also
--------
Series.all : Return True if all elements are True.
DataFrame.any : Return True if one (or more) elements are True.
"""
_cnum_doc = """
Return cumulative %(desc)s over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
%(desc)s.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs :
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
See Also
--------
core.window.Expanding.%(accum_func_name)s : Similar functionality
but ignores ``NaN`` values.
%(name2)s.%(accum_func_name)s : Return the %(desc)s over
%(name2)s axis.
%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.
%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.
%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.
%(name2)s.cumprod : Return cumulative product over %(name2)s axis.
%(examples)s"""
_cummin_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummin()
0 2.0
1 NaN
2 2.0
3 -1.0
4 -1.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummin(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
To iterate over columns and find the minimum in each row,
use ``axis=1``
>>> df.cummin(axis=1)
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
"""
_cumsum_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumsum()
0 2.0
1 NaN
2 7.0
3 6.0
4 6.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumsum(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
To iterate over columns and find the sum in each row,
use ``axis=1``
>>> df.cumsum(axis=1)
A B
0 2.0 3.0
1 3.0 NaN
2 1.0 1.0
"""
_cumprod_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumprod()
0 2.0
1 NaN
2 10.0
3 -10.0
4 -0.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumprod(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the product
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 6.0 0.0
To iterate over columns and find the product in each row,
use ``axis=1``
>>> df.cumprod(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 0.0
"""
_cummax_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummax()
0 2.0
1 NaN
2 5.0
3 5.0
4 5.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummax(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
To iterate over columns and find the maximum in each row,
use ``axis=1``
>>> df.cummax(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 1.0
"""
_any_see_also = """\
See Also
--------
numpy.any : Numpy version of this method.
Series.any : Return whether any element is True.
Series.all : Return whether all elements are True.
DataFrame.any : Return whether any element is True over requested axis.
DataFrame.all : Return whether all elements are True over requested axis.
"""
_any_desc = """\
Return whether any element is True, potentially over an axis.
Returns False unless there at least one element within a series or
along a Dataframe axis that is True or equivalent (e.g. non-zero or
non-empty)."""
_any_examples = """\
Examples
--------
**Series**
For Series input, the output is a scalar indicating whether any element
is True.
>>> pd.Series([False, False]).any()
False
>>> pd.Series([True, False]).any()
True
>>> pd.Series([]).any()
False
>>> pd.Series([np.nan]).any()
False
>>> pd.Series([np.nan]).any(skipna=False)
True
**DataFrame**
Whether each column contains at least one True element (the default).
>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]})
>>> df
A B C
0 1 0 0
1 2 2 0
>>> df.any()
A True
B True
C False
dtype: bool
Aggregating over the columns.
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]})
>>> df
A B
0 True 1
1 False 2
>>> df.any(axis='columns')
0 True
1 True
dtype: bool
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]})
>>> df
A B
0 True 1
1 False 0
>>> df.any(axis='columns')
0 True
1 False
dtype: bool
Aggregating over the entire DataFrame with ``axis=None``.
>>> df.any(axis=None)
True
`any` for an empty DataFrame is an empty Series.
>>> pd.DataFrame([]).any()
Series([], dtype: bool)
"""
_shared_docs[
"stat_func_example"
] = """
Examples
--------
>>> idx = pd.MultiIndex.from_arrays([
... ['warm', 'warm', 'cold', 'cold'],
... ['dog', 'falcon', 'fish', 'spider']],
... names=['blooded', 'animal'])
>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
>>> s
blooded animal
warm dog 4
falcon 2
cold fish 0
spider 8
Name: legs, dtype: int64
>>> s.{stat_func}()
{default_output}
{verb} using level names, as well as indices.
>>> s.{stat_func}(level='blooded')
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64
>>> s.{stat_func}(level=0)
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64"""
_sum_examples = _shared_docs["stat_func_example"].format(
stat_func="sum", verb="Sum", default_output=14, level_output_0=6, level_output_1=8
)
_sum_examples += """
By default, the sum of an empty or all-NA Series is ``0``.
>>> pd.Series([]).sum() # min_count=0 is the default
0.0
This can be controlled with the ``min_count`` parameter. For example, if
you'd like the sum of an empty series to be NaN, pass ``min_count=1``.
>>> pd.Series([]).sum(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).sum()
0.0
>>> pd.Series([np.nan]).sum(min_count=1)
nan"""
_max_examples = _shared_docs["stat_func_example"].format(
stat_func="max", verb="Max", default_output=8, level_output_0=4, level_output_1=8
)
_min_examples = _shared_docs["stat_func_example"].format(
stat_func="min", verb="Min", default_output=0, level_output_0=2, level_output_1=0
)
_stat_func_see_also = """
See Also
--------
Series.sum : Return the sum.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis."""
_prod_examples = """
Examples
--------
By default, the product of an empty or all-NA Series is ``1``
>>> pd.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> pd.Series([]).prod(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).prod()
1.0
>>> pd.Series([np.nan]).prod(min_count=1)
nan"""
_min_count_stub = """\
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
.. versionadded:: 0.22.0
Added with the default being 0. This means the sum of an all-NA
or empty Series is 0, and the product of an all-NA or empty
Series is 1.
"""
def _make_min_count_stat_function(
cls, name, name1, name2, axis_descr, desc, f, see_also="", examples=""
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count=_min_count_stub,
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs
):
if name == "sum":
nv.validate_sum(tuple(), kwargs)
elif name == "prod":
nv.validate_prod(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, min_count=min_count
)
return self._reduce(
f,
name,
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
return set_function_name(stat_func, name, cls)
def _make_stat_function(
cls, name, name1, name2, axis_descr, desc, f, see_also="", examples=""
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
):
if name == "median":
nv.validate_median(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
f, name, axis=axis, skipna=skipna, numeric_only=numeric_only
)
return set_function_name(stat_func, name, cls)
def _make_stat_function_ddof(cls, name, name1, name2, axis_descr, desc, f):
@Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)
@Appender(_num_ddof_doc)
def stat_func(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, ddof=ddof
)
return self._reduce(
f, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof
)
return set_function_name(stat_func, name, cls)
def _make_cum_function(
cls,
name,
name1,
name2,
axis_descr,
desc,
accum_func,
accum_func_name,
mask_a,
mask_b,
examples,
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name=accum_func_name,
examples=examples,
)
@Appender(_cnum_doc)
def cum_func(self, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
y = com.values_from_object(self).copy()
if skipna and issubclass(y.dtype.type, (np.datetime64, np.timedelta64)):
result = accum_func(y, axis)
mask = isna(self)
np.putmask(result, mask, iNaT)
elif skipna and not issubclass(y.dtype.type, (np.integer, np.bool_)):
mask = isna(self)
np.putmask(y, mask, mask_a)
result = accum_func(y, axis)
np.putmask(result, mask, mask_b)
else:
result = accum_func(y, axis)
d = self._construct_axes_dict()
d["copy"] = False
return self._constructor(result, **d).__finalize__(self)
return set_function_name(cum_func, name, cls)
def _make_logical_function(
cls, name, name1, name2, axis_descr, desc, f, see_also, examples, empty_value
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also=see_also,
examples=examples,
empty_value=empty_value,
)
@Appender(_bool_doc)
def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
nv.validate_logical_func(tuple(), kwargs, fname=name)
if level is not None:
if bool_only is not None:
raise NotImplementedError(
"Option bool_only is not implemented with option level."
)
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
f,
name,
axis=axis,
skipna=skipna,
numeric_only=bool_only,
filter_type="bool",
)
return set_function_name(logical_func, name, cls)
# install the indexes
for _name, _indexer in indexing.get_indexers_list():
NDFrame._create_indexer(_name, _indexer)
|
the-stack_0_16599 | # !/usr/bin/env python
# -*- coding: UTF-8 -*-
#
#
# ==================
# VIZ MARKDOWN - multiple file, markdown format
# ==================
import os, os.path, sys
import json
from ..utils import *
from ..builder import * # loads and sets up Django
from ..viz_factory import VizFactory
class SPDXViz(VizFactory):
"""
A simple markdown rendering in multi pages
"""
def __init__(self, ontospy_graph, title=""):
"""
Init
"""
super(SPDXViz, self).__init__(ontospy_graph, title)
def _buildTemplates(self):
"""
OVERRIDING THIS METHOD from Factory
"""
# Ontology - MAIN PAGE
contents = self._renderTemplate(
"spdx/markdown_ontoinfo.md", extraContext=None)
FILE_NAME = "index.md"
main_url = self._save2File(contents, FILE_NAME, self.output_path)
browser_output_path = self.output_path
if self.ontospy_graph.all_classes:
# BROWSER PAGES - CLASSES ======
for entity in self.ontospy_graph.all_classes:
desc = ""
if os.path.isfile(entity.slug + "_desc.md"):
file = open(entity.slug + "_desc.md")
desc = file.read()
extra_context = {
"main_entity": entity,
"main_entity_type": "class",
"ontograph": self.ontospy_graph,
"external_description": desc
}
contents = self._renderTemplate(
"spdx/markdown_classinfo.md",
extraContext=extra_context)
FILE_NAME = entity.slug + ".md"
self._save2File(contents, FILE_NAME, browser_output_path)
if self.ontospy_graph.all_properties:
# BROWSER PAGES - PROPERTIES ======
for entity in self.ontospy_graph.all_properties:
extra_context = {
"main_entity": entity,
"main_entity_type": "property",
"ontograph": self.ontospy_graph
}
contents = self._renderTemplate(
"spdx/markdown_propinfo.md",
extraContext=extra_context)
FILE_NAME = entity.slug + ".md"
self._save2File(contents, FILE_NAME, browser_output_path)
if self.ontospy_graph.all_skos_concepts:
# BROWSER PAGES - CONCEPTS ======
for entity in self.ontospy_graph.all_skos_concepts:
extra_context = {
"main_entity": entity,
"main_entity_type": "concept",
"ontograph": self.ontospy_graph
}
contents = self._renderTemplate(
"spdx/markdown_conceptinfo.md",
extraContext=extra_context)
FILE_NAME = entity.slug + ".ms"
self._save2File(contents, FILE_NAME, browser_output_path)
return main_url
# if called directly, for testing purposes pick a random ontology
if __name__ == '__main__':
TEST_ONLINE = False
try:
g = get_onto_for_testing(TEST_ONLINE)
v = SPDXViz(g, title="")
v.build()
v.preview()
sys.exit(0)
except KeyboardInterrupt as e: # Ctrl-C
raise e
|
the-stack_0_16600 | #!/usr/bin/env python
from setuptools import setup
def load_requirements(*requirements_paths):
"""
Load all requirements from the specified requirements files.
Returns a list of requirement strings.
"""
requirements = set()
for path in requirements_paths:
with open(path) as reqs:
requirements.update(
line.split('#')[0].strip() for line in reqs
if is_requirement(line.strip())
)
return list(requirements)
def is_requirement(line):
"""
Return True if the requirement line is a package requirement;
that is, it is not blank, a comment, a URL, or an included file.
"""
return line and not line.startswith(('-r', '#', '-e', 'git+', '-c'))
setup(
name='edx-i18n-tools',
version='0.5.3',
description='edX Internationalization Tools',
author='edX',
author_email='[email protected]',
url='https://github.com/edx/i18n-tools',
packages=[
'i18n',
],
install_requires=load_requirements('requirements/base.in'),
entry_points={
'console_scripts': [
'i18n_tool = i18n.main:main',
],
},
license='Apache License 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.8',
'Framework :: Django',
'Framework :: Django :: 2.2',
],
)
|
the-stack_0_16601 |
from detectors.mmdetection.mmdet.apis.inference import init_detector, inference_detector
from detectors.base_detector import Base_detector
from utilities.preprocessing import non_max_suppression
from utilities.helper import many_xyxy2xywh
import numpy as np
from utilities.helper import bboxes_round_int
import os
import mmcv
class Mmdetection_detector(Base_detector):
def __init__(self,cfg):
#Initialize the detector
Base_detector.__init__(self, cfg)
print("Mmdetection_detector init called")
mmdetection_checkpoint_file = os.path.join(self.cfg.general.repository_root,self.cfg.detector.mmdetection_checkpoint_file)
mmdetection_config = os.path.join(self.cfg.general.repository_root,self.cfg.detector.mmdetection_config)
self.detector_model = init_detector(mmdetection_config
, mmdetection_checkpoint_file
, device=self.cfg.detector.device)
pass
def detect(self,img):
# Run person detector
result = inference_detector(self.detector_model, img)
bboxes_with_score = result[0]
# Only take those detections with a higher confidence than the min confidence
bboxes_with_score = np.array(
[bbox_with_s for bbox_with_s in bboxes_with_score if bbox_with_s[4] >= self.cfg.detector.min_confidence])
if len(bboxes_with_score) == 0:
return (np.array([]),np.array([]))
#columns 0 to 3 are bbox
bboxes = bboxes_with_score[:, 0:4]
bboxes = many_xyxy2xywh(bboxes)
# Run non max suppression
scores = bboxes_with_score[:, 4]
indices = non_max_suppression(
bboxes, max_bbox_overlap=0.8, scores=scores)
bboxes = [bboxes[i, :] for i in indices]
bboxes = bboxes_round_int(bboxes)
return bboxes, scores
|
the-stack_0_16604 | # -*- coding: utf-8 -*-
"""
Created on Sun Nov 25 23:05:30 2018
@author: paulo
"""
# Standard imports
import cv2
import numpy as np;
# Read image
im = cv2.imread('C:/Users/PauloRenato/Desktop/img3.jpg', cv2.IMREAD_GRAYSCALE)
im = cv2.GaussianBlur(im, (3,3), 1)
im = cv2.Canny(im.copy(),10, 80)
#im = 255-im
# Setup SimpleBlobDetector parameters.
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = im.min()
params.maxThreshold = im.max()
params.thresholdStep = 100
# Filter by Area.
#params.filterByArea = True
#params.minArea = 1500
# Filter by Circularity
params.filterByCircularity = True
#params.minCircularity = 0.500
params.minCircularity = 0.7
# Filter by Convexity
#params.filterByConvexity = True
#params.minConvexity = 0.87
# Filter by Inertia
#params.filterByInertia = True
#params.minInertiaRatio = 0.01
# Create a detector with the parameters
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs.
keypoints = detector.detect(im)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures
# the size of the circle corresponds to the size of blob
im_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Show blobs
cv2.imshow("Keypoints", im_with_keypoints)
cv2.waitKey(0)
cv2.destroyAllWindows() |
the-stack_0_16605 | """Support for HDMI CEC devices as media players."""
from __future__ import annotations
import logging
from pycec.commands import CecCommand, KeyPressCommand, KeyReleaseCommand
from pycec.const import (
KEY_BACKWARD,
KEY_FORWARD,
KEY_MUTE_TOGGLE,
KEY_PAUSE,
KEY_PLAY,
KEY_STOP,
KEY_VOLUME_DOWN,
KEY_VOLUME_UP,
POWER_OFF,
POWER_ON,
STATUS_PLAY,
STATUS_STILL,
STATUS_STOP,
TYPE_AUDIO,
TYPE_PLAYBACK,
TYPE_RECORDER,
TYPE_TUNER,
)
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
DOMAIN,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
)
from . import ATTR_NEW, CecEntity
_LOGGER = logging.getLogger(__name__)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Find and return HDMI devices as +switches."""
if ATTR_NEW in discovery_info:
_LOGGER.debug("Setting up HDMI devices %s", discovery_info[ATTR_NEW])
entities = []
for device in discovery_info[ATTR_NEW]:
hdmi_device = hass.data.get(device)
entities.append(CecPlayerEntity(hdmi_device, hdmi_device.logical_address))
add_entities(entities, True)
class CecPlayerEntity(CecEntity, MediaPlayerEntity):
"""Representation of a HDMI device as a Media player."""
def __init__(self, device, logical) -> None:
"""Initialize the HDMI device."""
CecEntity.__init__(self, device, logical)
self.entity_id = f"{DOMAIN}.hdmi_{hex(self._logical_address)[2:]}"
def send_keypress(self, key):
"""Send keypress to CEC adapter."""
_LOGGER.debug(
"Sending keypress %s to device %s", hex(key), hex(self._logical_address)
)
self._device.send_command(KeyPressCommand(key, dst=self._logical_address))
self._device.send_command(KeyReleaseCommand(dst=self._logical_address))
def send_playback(self, key):
"""Send playback status to CEC adapter."""
self._device.async_send_command(CecCommand(key, dst=self._logical_address))
def mute_volume(self, mute):
"""Mute volume."""
self.send_keypress(KEY_MUTE_TOGGLE)
def media_previous_track(self):
"""Go to previous track."""
self.send_keypress(KEY_BACKWARD)
def turn_on(self):
"""Turn device on."""
self._device.turn_on()
self._state = STATE_ON
def clear_playlist(self):
"""Clear players playlist."""
raise NotImplementedError()
def turn_off(self):
"""Turn device off."""
self._device.turn_off()
self._state = STATE_OFF
def media_stop(self):
"""Stop playback."""
self.send_keypress(KEY_STOP)
self._state = STATE_IDLE
def play_media(self, media_type, media_id, **kwargs):
"""Not supported."""
raise NotImplementedError()
def media_next_track(self):
"""Skip to next track."""
self.send_keypress(KEY_FORWARD)
def media_seek(self, position):
"""Not supported."""
raise NotImplementedError()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
raise NotImplementedError()
def media_pause(self):
"""Pause playback."""
self.send_keypress(KEY_PAUSE)
self._state = STATE_PAUSED
def select_source(self, source):
"""Not supported."""
raise NotImplementedError()
def media_play(self):
"""Start playback."""
self.send_keypress(KEY_PLAY)
self._state = STATE_PLAYING
def volume_up(self):
"""Increase volume."""
_LOGGER.debug("%s: volume up", self._logical_address)
self.send_keypress(KEY_VOLUME_UP)
def volume_down(self):
"""Decrease volume."""
_LOGGER.debug("%s: volume down", self._logical_address)
self.send_keypress(KEY_VOLUME_DOWN)
@property
def state(self) -> str | None:
"""Cache state of device."""
return self._state
def update(self):
"""Update device status."""
device = self._device
if device.power_status in [POWER_OFF, 3]:
self._state = STATE_OFF
elif not self.support_pause:
if device.power_status in [POWER_ON, 4]:
self._state = STATE_ON
elif device.status == STATUS_PLAY:
self._state = STATE_PLAYING
elif device.status == STATUS_STOP:
self._state = STATE_IDLE
elif device.status == STATUS_STILL:
self._state = STATE_PAUSED
else:
_LOGGER.warning("Unknown state: %s", device.status)
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self.type_id == TYPE_RECORDER or self.type == TYPE_PLAYBACK:
return (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PLAY_MEDIA
| SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
)
if self.type == TYPE_TUNER:
return (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PLAY_MEDIA
| SUPPORT_PAUSE
| SUPPORT_STOP
)
if self.type_id == TYPE_AUDIO:
return (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_MUTE
)
return SUPPORT_TURN_ON | SUPPORT_TURN_OFF
|
the-stack_0_16606 | # Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
"""
This module accesses the DB table object UserCourseDisplay
"""
import logging
import traceback
from django.db import IntegrityError
from myuw.models import UserCourseDisplay
from myuw.dao.user import get_user_model
TOTAL_COURSE_COLORS = 8
logger = logging.getLogger(__name__)
def set_course_display_pref(request, schedule):
"""
Add display elements on the sections in the given schedule
"""
user = get_user_model(request)
existing_color_dict, colors_taken, pin_on_teaching_2nds =\
UserCourseDisplay.get_course_display(user,
schedule.term.year,
schedule.term.quarter)
primary_color_dict = {}
# record primary colors used {section_labels: color_id}
for section in schedule.sections:
section_label = section.section_label()
if section_label in pin_on_teaching_2nds:
section.pin_on_teaching = True
else:
section.pin_on_teaching = False
if section_label in existing_color_dict:
# exists in DB table
existing_color_id = existing_color_dict[section_label]
color_id = _validated_color(user, primary_color_dict,
section, existing_color_id)
_record_primary_colors(primary_color_dict, section, color_id)
else:
# a section with no color yet
if not section.is_primary_section:
primary_label = section.primary_section_label()
else:
primary_label = section_label
if primary_label in primary_color_dict:
color_id = primary_color_dict[primary_label]
else:
color_id, colors_taken = _get_next_color(colors_taken)
_record_primary_colors(primary_color_dict, section, color_id)
_save_section_color(user, section, color_id)
section.color_id = _make_colorid(section, color_id)
def _get_next_color(colors_taken):
"""
Return the next available color in the eight color list
"""
times = int(len(colors_taken) / TOTAL_COURSE_COLORS)
if len(colors_taken) >= TOTAL_COURSE_COLORS:
colors_taken = colors_taken[TOTAL_COURSE_COLORS * times:]
for new_color in range(1, TOTAL_COURSE_COLORS + 1, 1):
if new_color not in colors_taken:
colors_taken.append(new_color)
return new_color, colors_taken
def _make_colorid(section, color_id):
if section.is_primary_section:
return color_id
return "{}a".format(color_id)
def _record_primary_colors(primary_color_dict, section, color_id):
"""
Remember the primary colors we have used for the term to be
referenced by the the follow up secondary sections
"""
if not section.is_primary_section:
label = section.primary_section_label()
else:
label = section.section_label()
if label not in primary_color_dict:
primary_color_dict[label] = color_id
def _save_section_color(user, section, color_id):
"""
Store the color of the section in DB
"""
section_label = section.section_label()
if not UserCourseDisplay.exists_section_display(user, section_label):
try:
UserCourseDisplay.objects.create(user=user,
year=section.term.year,
quarter=section.term.quarter,
section_label=section_label,
color_id=color_id)
except Exception as ex:
logger.warning({'user': user.uwnetid,
'at': "create ({} color_id: {}) in DB".format(
section_label, color_id),
'err': ex})
if '1062, "Duplicate entry ' not in str(ex):
raise
def _update_color(user, section_label, color_id):
UserCourseDisplay.set_color(user, section_label, color_id)
def _validated_color(user, primary_color_dict,
sec_section, existing_color_id):
primary_section_label = sec_section.primary_section_label()
primary_color_id = primary_color_dict.get(primary_section_label, None)
if primary_color_id and primary_color_id != existing_color_id:
_update_color(user, sec_section.section_label(), primary_color_id)
return primary_color_id
return existing_color_id
|
the-stack_0_16608 | """Support for Meteo-France raining forecast sensor."""
from meteofrance_api.helpers import (
get_warning_text_status_from_indice_color,
readeable_phenomenoms_dict,
)
from homeassistant.components.sensor import SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from homeassistant.util import dt as dt_util
from .const import (
ATTR_NEXT_RAIN_1_HOUR_FORECAST,
ATTR_NEXT_RAIN_DT_REF,
ATTRIBUTION,
COORDINATOR_ALERT,
COORDINATOR_FORECAST,
COORDINATOR_RAIN,
DOMAIN,
MANUFACTURER,
MODEL,
SENSOR_TYPES,
SENSOR_TYPES_ALERT,
SENSOR_TYPES_PROBABILITY,
SENSOR_TYPES_RAIN,
MeteoFranceSensorEntityDescription,
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Meteo-France sensor platform."""
coordinator_forecast = hass.data[DOMAIN][entry.entry_id][COORDINATOR_FORECAST]
coordinator_rain = hass.data[DOMAIN][entry.entry_id][COORDINATOR_RAIN]
coordinator_alert = hass.data[DOMAIN][entry.entry_id][COORDINATOR_ALERT]
entities = [
MeteoFranceSensor(coordinator_forecast, description)
for description in SENSOR_TYPES
]
# Add rain forecast entity only if location support this feature
if coordinator_rain:
entities.extend(
[
MeteoFranceRainSensor(coordinator_rain, description)
for description in SENSOR_TYPES_RAIN
]
)
# Add weather alert entity only if location support this feature
if coordinator_alert:
entities.extend(
[
MeteoFranceAlertSensor(coordinator_alert, description)
for description in SENSOR_TYPES_ALERT
]
)
# Add weather probability entities only if location support this feature
if coordinator_forecast.data.probability_forecast:
entities.extend(
[
MeteoFranceSensor(coordinator_forecast, description)
for description in SENSOR_TYPES_PROBABILITY
]
)
async_add_entities(entities, False)
class MeteoFranceSensor(CoordinatorEntity, SensorEntity):
"""Representation of a Meteo-France sensor."""
entity_description: MeteoFranceSensorEntityDescription
def __init__(
self,
coordinator: DataUpdateCoordinator,
description: MeteoFranceSensorEntityDescription,
) -> None:
"""Initialize the Meteo-France sensor."""
super().__init__(coordinator)
self.entity_description = description
if hasattr(coordinator.data, "position"):
city_name = coordinator.data.position["name"]
self._attr_name = f"{city_name} {description.name}"
self._attr_unique_id = f"{coordinator.data.position['lat']},{coordinator.data.position['lon']}_{description.key}"
self._attr_extra_state_attributes = {ATTR_ATTRIBUTION: ATTRIBUTION}
@property
def device_info(self):
"""Return the device info."""
return {
"identifiers": {(DOMAIN, self.platform.config_entry.unique_id)},
"name": self.coordinator.name,
"manufacturer": MANUFACTURER,
"model": MODEL,
"entry_type": "service",
}
@property
def native_value(self):
"""Return the state."""
path = self.entity_description.data_path.split(":")
data = getattr(self.coordinator.data, path[0])
# Specific case for probability forecast
if path[0] == "probability_forecast":
if len(path) == 3:
# This is a fix compared to other entitty as first index is always null in API result for unknown reason
value = _find_first_probability_forecast_not_null(data, path)
else:
value = data[0][path[1]]
# General case
else:
if len(path) == 3:
value = data[path[1]][path[2]]
else:
value = data[path[1]]
if self.entity_description.key in ("wind_speed", "wind_gust"):
# convert API wind speed from m/s to km/h
value = round(value * 3.6)
return value
class MeteoFranceRainSensor(MeteoFranceSensor):
"""Representation of a Meteo-France rain sensor."""
@property
def native_value(self):
"""Return the state."""
# search first cadran with rain
next_rain = next(
(cadran for cadran in self.coordinator.data.forecast if cadran["rain"] > 1),
None,
)
return (
dt_util.utc_from_timestamp(next_rain["dt"]).isoformat()
if next_rain
else None
)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
reference_dt = self.coordinator.data.forecast[0]["dt"]
return {
ATTR_NEXT_RAIN_DT_REF: dt_util.utc_from_timestamp(reference_dt).isoformat(),
ATTR_NEXT_RAIN_1_HOUR_FORECAST: {
f"{int((item['dt'] - reference_dt) / 60)} min": item["desc"]
for item in self.coordinator.data.forecast
},
ATTR_ATTRIBUTION: ATTRIBUTION,
}
class MeteoFranceAlertSensor(MeteoFranceSensor):
"""Representation of a Meteo-France alert sensor."""
def __init__(
self,
coordinator: DataUpdateCoordinator,
description: MeteoFranceSensorEntityDescription,
) -> None:
"""Initialize the Meteo-France sensor."""
super().__init__(coordinator, description)
dept_code = self.coordinator.data.domain_id
self._attr_name = f"{dept_code} {description.name}"
self._attr_unique_id = self._attr_name
@property
def native_value(self):
"""Return the state."""
return get_warning_text_status_from_indice_color(
self.coordinator.data.get_domain_max_color()
)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {
**readeable_phenomenoms_dict(self.coordinator.data.phenomenons_max_colors),
ATTR_ATTRIBUTION: ATTRIBUTION,
}
def _find_first_probability_forecast_not_null(
probability_forecast: list, path: list
) -> int:
"""Search the first not None value in the first forecast elements."""
for forecast in probability_forecast[0:3]:
if forecast[path[1]][path[2]] is not None:
return forecast[path[1]][path[2]]
# Default return value if no value founded
return None
|
the-stack_0_16612 | import errno
import operator
import os
import shutil
import site
from optparse import SUPPRESS_HELP, Values
from typing import Iterable, List, Optional
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.cache import WheelCache
from pip._internal.cli import cmdoptions
from pip._internal.cli.cmdoptions import make_target_python
from pip._internal.cli.req_command import (
RequirementCommand,
warn_if_run_as_root,
with_cleanup,
)
from pip._internal.cli.status_codes import ERROR, SUCCESS
from pip._internal.exceptions import CommandError, InstallationError
from pip._internal.locations import get_scheme
from pip._internal.metadata import get_environment
from pip._internal.models.format_control import FormatControl
from pip._internal.operations.build.build_tracker import get_build_tracker
from pip._internal.operations.check import ConflictDetails, check_install_conflicts
from pip._internal.req import install_given_reqs
from pip._internal.req.req_install import InstallRequirement
from pip._internal.utils.compat import WINDOWS
from pip._internal.utils.distutils_args import parse_distutils_args
from pip._internal.utils.filesystem import test_writable_dir
from pip._internal.utils.logging import getLogger
from pip._internal.utils.misc import (
ensure_dir,
get_pip_version,
protect_pip_from_modification_on_windows,
write_output,
)
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.virtualenv import (
running_under_virtualenv,
virtualenv_no_global,
)
from pip._internal.wheel_builder import (
BinaryAllowedPredicate,
build,
should_build_for_install_command,
)
logger = getLogger(__name__)
def get_check_binary_allowed(format_control: FormatControl) -> BinaryAllowedPredicate:
def check_binary_allowed(req: InstallRequirement) -> bool:
canonical_name = canonicalize_name(req.name or "")
allowed_formats = format_control.get_allowed_formats(canonical_name)
return "binary" in allowed_formats
return check_binary_allowed
class InstallCommand(RequirementCommand):
"""
Install packages from:
- PyPI (and other indexes) using requirement specifiers.
- VCS project urls.
- Local project directories.
- Local or remote source archives.
pip also supports installing from "requirements files", which provide
an easy way to specify a whole environment to be installed.
"""
usage = """
%prog [options] <requirement specifier> [package-index-options] ...
%prog [options] -r <requirements file> [package-index-options] ...
%prog [options] [-e] <vcs project url> ...
%prog [options] [-e] <local project path> ...
%prog [options] <archive url/path> ..."""
def add_options(self) -> None:
self.cmd_opts.add_option(cmdoptions.requirements())
self.cmd_opts.add_option(cmdoptions.constraints())
self.cmd_opts.add_option(cmdoptions.no_deps())
self.cmd_opts.add_option(cmdoptions.pre())
self.cmd_opts.add_option(cmdoptions.editable())
self.cmd_opts.add_option(
"-t",
"--target",
dest="target_dir",
metavar="dir",
default=None,
help=(
"Install packages into <dir>. "
"By default this will not replace existing files/folders in "
"<dir>. Use --upgrade to replace existing packages in <dir> "
"with new versions."
),
)
cmdoptions.add_target_python_options(self.cmd_opts)
self.cmd_opts.add_option(
"--user",
dest="use_user_site",
action="store_true",
help=(
"Install to the Python user install directory for your "
"platform. Typically ~/.local/, or %APPDATA%\\Python on "
"Windows. (See the Python documentation for site.USER_BASE "
"for full details.)"
),
)
self.cmd_opts.add_option(
"--no-user",
dest="use_user_site",
action="store_false",
help=SUPPRESS_HELP,
)
self.cmd_opts.add_option(
"--root",
dest="root_path",
metavar="dir",
default=None,
help="Install everything relative to this alternate root directory.",
)
self.cmd_opts.add_option(
"--prefix",
dest="prefix_path",
metavar="dir",
default=None,
help=(
"Installation prefix where lib, bin and other top-level "
"folders are placed"
),
)
self.cmd_opts.add_option(cmdoptions.src())
self.cmd_opts.add_option(
"-U",
"--upgrade",
dest="upgrade",
action="store_true",
help=(
"Upgrade all specified packages to the newest available "
"version. The handling of dependencies depends on the "
"upgrade-strategy used."
),
)
self.cmd_opts.add_option(
"--upgrade-strategy",
dest="upgrade_strategy",
default="only-if-needed",
choices=["only-if-needed", "eager"],
help=(
"Determines how dependency upgrading should be handled "
"[default: %default]. "
'"eager" - dependencies are upgraded regardless of '
"whether the currently installed version satisfies the "
"requirements of the upgraded package(s). "
'"only-if-needed" - are upgraded only when they do not '
"satisfy the requirements of the upgraded package(s)."
),
)
self.cmd_opts.add_option(
"--force-reinstall",
dest="force_reinstall",
action="store_true",
help="Reinstall all packages even if they are already up-to-date.",
)
self.cmd_opts.add_option(
"-I",
"--ignore-installed",
dest="ignore_installed",
action="store_true",
help=(
"Ignore the installed packages, overwriting them. "
"This can break your system if the existing package "
"is of a different version or was installed "
"with a different package manager!"
),
)
self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
self.cmd_opts.add_option(cmdoptions.no_build_isolation())
self.cmd_opts.add_option(cmdoptions.use_pep517())
self.cmd_opts.add_option(cmdoptions.no_use_pep517())
self.cmd_opts.add_option(cmdoptions.install_options())
self.cmd_opts.add_option(cmdoptions.global_options())
self.cmd_opts.add_option(
"--compile",
action="store_true",
dest="compile",
default=True,
help="Compile Python source files to bytecode",
)
self.cmd_opts.add_option(
"--no-compile",
action="store_false",
dest="compile",
help="Do not compile Python source files to bytecode",
)
self.cmd_opts.add_option(
"--no-warn-script-location",
action="store_false",
dest="warn_script_location",
default=True,
help="Do not warn when installing scripts outside PATH",
)
self.cmd_opts.add_option(
"--no-warn-conflicts",
action="store_false",
dest="warn_about_conflicts",
default=True,
help="Do not warn about broken dependencies",
)
self.cmd_opts.add_option(cmdoptions.no_binary())
self.cmd_opts.add_option(cmdoptions.only_binary())
self.cmd_opts.add_option(cmdoptions.prefer_binary())
self.cmd_opts.add_option(cmdoptions.require_hashes())
self.cmd_opts.add_option(cmdoptions.progress_bar())
self.cmd_opts.add_option(cmdoptions.warn_about_root_user())
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, self.cmd_opts)
@with_cleanup
def run(self, options: Values, args: List[str]) -> int:
if options.use_user_site and options.target_dir is not None:
raise CommandError("Can not combine '--user' and '--target'")
cmdoptions.check_install_build_global(options)
upgrade_strategy = "to-satisfy-only"
if options.upgrade:
upgrade_strategy = options.upgrade_strategy
cmdoptions.check_dist_restriction(options, check_target=True)
install_options = options.install_options or []
logger.verbose("Using %s", get_pip_version())
options.use_user_site = decide_user_install(
options.use_user_site,
prefix_path=options.prefix_path,
target_dir=options.target_dir,
root_path=options.root_path,
isolated_mode=options.isolated_mode,
)
target_temp_dir: Optional[TempDirectory] = None
target_temp_dir_path: Optional[str] = None
if options.target_dir:
options.ignore_installed = True
options.target_dir = os.path.abspath(options.target_dir)
if (
# fmt: off
os.path.exists(options.target_dir) and
not os.path.isdir(options.target_dir)
# fmt: on
):
raise CommandError(
"Target path exists but is not a directory, will not continue."
)
# Create a target directory for using with the target option
target_temp_dir = TempDirectory(kind="target")
target_temp_dir_path = target_temp_dir.path
self.enter_context(target_temp_dir)
global_options = options.global_options or []
session = self.get_default_session(options)
target_python = make_target_python(options)
finder = self._build_package_finder(
options=options,
session=session,
target_python=target_python,
ignore_requires_python=options.ignore_requires_python,
)
wheel_cache = WheelCache(options.cache_dir, options.format_control)
build_tracker = self.enter_context(get_build_tracker())
directory = TempDirectory(
delete=not options.no_clean,
kind="install",
globally_managed=True,
)
try:
reqs = self.get_requirements(args, options, finder, session)
# Only when installing is it permitted to use PEP 660.
# In other circumstances (pip wheel, pip download) we generate
# regular (i.e. non editable) metadata and wheels.
for req in reqs:
req.permit_editable_wheels = True
reject_location_related_install_options(reqs, options.install_options)
preparer = self.make_requirement_preparer(
temp_build_dir=directory,
options=options,
build_tracker=build_tracker,
session=session,
finder=finder,
use_user_site=options.use_user_site,
verbosity=self.verbosity,
)
resolver = self.make_resolver(
preparer=preparer,
finder=finder,
options=options,
wheel_cache=wheel_cache,
use_user_site=options.use_user_site,
ignore_installed=options.ignore_installed,
ignore_requires_python=options.ignore_requires_python,
force_reinstall=options.force_reinstall,
upgrade_strategy=upgrade_strategy,
use_pep517=options.use_pep517,
)
self.trace_basic_info(finder)
requirement_set = resolver.resolve(
reqs, check_supported_wheels=not options.target_dir
)
try:
pip_req = requirement_set.get_requirement("pip")
except KeyError:
modifying_pip = False
else:
# If we're not replacing an already installed pip,
# we're not modifying it.
modifying_pip = pip_req.satisfied_by is None
protect_pip_from_modification_on_windows(modifying_pip=modifying_pip)
check_binary_allowed = get_check_binary_allowed(finder.format_control)
reqs_to_build = [
r
for r in requirement_set.requirements.values()
if should_build_for_install_command(r, check_binary_allowed)
]
_, build_failures = build(
reqs_to_build,
wheel_cache=wheel_cache,
verify=True,
build_options=[],
global_options=[],
)
# If we're using PEP 517, we cannot do a legacy setup.py install
# so we fail here.
pep517_build_failure_names: List[str] = [
r.name for r in build_failures if r.use_pep517 # type: ignore
]
if pep517_build_failure_names:
raise InstallationError(
"Could not build wheels for {}, which is required to "
"install pyproject.toml-based projects".format(
", ".join(pep517_build_failure_names)
)
)
# For now, we just warn about failures building legacy
# requirements, as we'll fall through to a setup.py install for
# those.
for r in build_failures:
if not r.use_pep517:
r.legacy_install_reason = 8368
to_install = resolver.get_installation_order(requirement_set)
# Check for conflicts in the package set we're installing.
conflicts: Optional[ConflictDetails] = None
should_warn_about_conflicts = (
not options.ignore_dependencies and options.warn_about_conflicts
)
if should_warn_about_conflicts:
conflicts = self._determine_conflicts(to_install)
# Don't warn about script install locations if
# --target or --prefix has been specified
warn_script_location = options.warn_script_location
if options.target_dir or options.prefix_path:
warn_script_location = False
installed = install_given_reqs(
to_install,
install_options,
global_options,
root=options.root_path,
home=target_temp_dir_path,
prefix=options.prefix_path,
warn_script_location=warn_script_location,
use_user_site=options.use_user_site,
pycompile=options.compile,
)
lib_locations = get_lib_location_guesses(
user=options.use_user_site,
home=target_temp_dir_path,
root=options.root_path,
prefix=options.prefix_path,
isolated=options.isolated_mode,
)
env = get_environment(lib_locations)
installed.sort(key=operator.attrgetter("name"))
items = []
for result in installed:
item = result.name
try:
installed_dist = env.get_distribution(item)
if installed_dist is not None:
item = f"{item}-{installed_dist.version}"
except Exception:
pass
items.append(item)
if conflicts is not None:
self._warn_about_conflicts(
conflicts,
resolver_variant=self.determine_resolver_variant(options),
)
installed_desc = " ".join(items)
if installed_desc:
write_output(
"Successfully installed %s",
installed_desc,
)
except OSError as error:
show_traceback = self.verbosity >= 1
message = create_os_error_message(
error,
show_traceback,
options.use_user_site,
)
logger.error(message, exc_info=show_traceback) # noqa
return ERROR
if options.target_dir:
assert target_temp_dir
self._handle_target_dir(
options.target_dir, target_temp_dir, options.upgrade
)
if options.warn_about_root_user:
warn_if_run_as_root()
return SUCCESS
def _handle_target_dir(
self, target_dir: str, target_temp_dir: TempDirectory, upgrade: bool
) -> None:
ensure_dir(target_dir)
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
lib_dir_list = []
# Checking both purelib and platlib directories for installed
# packages to be moved to target directory
scheme = get_scheme("", home=target_temp_dir.path)
purelib_dir = scheme.purelib
platlib_dir = scheme.platlib
data_dir = scheme.data
if os.path.exists(purelib_dir):
lib_dir_list.append(purelib_dir)
if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:
lib_dir_list.append(platlib_dir)
if os.path.exists(data_dir):
lib_dir_list.append(data_dir)
for lib_dir in lib_dir_list:
for item in os.listdir(lib_dir):
if lib_dir == data_dir:
ddir = os.path.join(data_dir, item)
if any(s.startswith(ddir) for s in lib_dir_list[:-1]):
continue
target_item_dir = os.path.join(target_dir, item)
if os.path.exists(target_item_dir):
if not upgrade:
logger.warning(
"Target directory %s already exists. Specify "
"--upgrade to force replacement.",
target_item_dir,
)
continue
if os.path.islink(target_item_dir):
logger.warning(
"Target directory %s already exists and is "
"a link. pip will not automatically replace "
"links, please remove if replacement is "
"desired.",
target_item_dir,
)
continue
if os.path.isdir(target_item_dir):
shutil.rmtree(target_item_dir)
else:
os.remove(target_item_dir)
shutil.move(os.path.join(lib_dir, item), target_item_dir)
def _determine_conflicts(
self, to_install: List[InstallRequirement]
) -> Optional[ConflictDetails]:
try:
return check_install_conflicts(to_install)
except Exception:
logger.exception(
"Error while checking for conflicts. Please file an issue on "
"pip's issue tracker: https://github.com/pypa/pip/issues/new"
)
return None
def _warn_about_conflicts(
self, conflict_details: ConflictDetails, resolver_variant: str
) -> None:
package_set, (missing, conflicting) = conflict_details
if not missing and not conflicting:
return
parts: List[str] = []
if resolver_variant == "legacy":
parts.append(
"pip's legacy dependency resolver does not consider dependency "
"conflicts when selecting packages. This behaviour is the "
"source of the following dependency conflicts."
)
else:
assert resolver_variant == "2020-resolver"
parts.append(
"pip's dependency resolver does not currently take into account "
"all the packages that are installed. This behaviour is the "
"source of the following dependency conflicts."
)
# NOTE: There is some duplication here, with commands/check.py
for project_name in missing:
version = package_set[project_name][0]
for dependency in missing[project_name]:
message = (
"{name} {version} requires {requirement}, "
"which is not installed."
).format(
name=project_name,
version=version,
requirement=dependency[1],
)
parts.append(message)
for project_name in conflicting:
version = package_set[project_name][0]
for dep_name, dep_version, req in conflicting[project_name]:
message = (
"{name} {version} requires {requirement}, but {you} have "
"{dep_name} {dep_version} which is incompatible."
).format(
name=project_name,
version=version,
requirement=req,
dep_name=dep_name,
dep_version=dep_version,
you=("you" if resolver_variant == "2020-resolver" else "you'll"),
)
parts.append(message)
logger.critical("\n".join(parts))
def get_lib_location_guesses(
user: bool = False,
home: Optional[str] = None,
root: Optional[str] = None,
isolated: bool = False,
prefix: Optional[str] = None,
) -> List[str]:
scheme = get_scheme(
"",
user=user,
home=home,
root=root,
isolated=isolated,
prefix=prefix,
)
return [scheme.purelib, scheme.platlib]
def site_packages_writable(root: Optional[str], isolated: bool) -> bool:
return all(
test_writable_dir(d)
for d in set(get_lib_location_guesses(root=root, isolated=isolated))
)
def decide_user_install(
use_user_site: Optional[bool],
prefix_path: Optional[str] = None,
target_dir: Optional[str] = None,
root_path: Optional[str] = None,
isolated_mode: bool = False,
) -> bool:
"""Determine whether to do a user install based on the input options.
If use_user_site is False, no additional checks are done.
If use_user_site is True, it is checked for compatibility with other
options.
If use_user_site is None, the default behaviour depends on the environment,
which is provided by the other arguments.
"""
# In some cases (config from tox), use_user_site can be set to an integer
# rather than a bool, which 'use_user_site is False' wouldn't catch.
if (use_user_site is not None) and (not use_user_site):
logger.debug("Non-user install by explicit request")
return False
if use_user_site:
if prefix_path:
raise CommandError(
"Can not combine '--user' and '--prefix' as they imply "
"different installation locations"
)
if virtualenv_no_global():
raise InstallationError(
"Can not perform a '--user' install. User site-packages "
"are not visible in this virtualenv."
)
logger.debug("User install by explicit request")
return True
# If we are here, user installs have not been explicitly requested/avoided
assert use_user_site is None
# user install incompatible with --prefix/--target
if prefix_path or target_dir:
logger.debug("Non-user install due to --prefix or --target option")
return False
# If user installs are not enabled, choose a non-user install
if not site.ENABLE_USER_SITE:
logger.debug("Non-user install because user site-packages disabled")
return False
# If we have permission for a non-user install, do that,
# otherwise do a user install.
if site_packages_writable(root=root_path, isolated=isolated_mode):
logger.debug("Non-user install because site-packages writeable")
return False
logger.info(
"Defaulting to user installation because normal site-packages "
"is not writeable"
)
return True
def reject_location_related_install_options(
requirements: List[InstallRequirement], options: Optional[List[str]]
) -> None:
"""If any location-changing --install-option arguments were passed for
requirements or on the command-line, then show a deprecation warning.
"""
def format_options(option_names: Iterable[str]) -> List[str]:
return ["--{}".format(name.replace("_", "-")) for name in option_names]
offenders = []
for requirement in requirements:
install_options = requirement.install_options
location_options = parse_distutils_args(install_options)
if location_options:
offenders.append(
"{!r} from {}".format(
format_options(location_options.keys()), requirement
)
)
if options:
location_options = parse_distutils_args(options)
if location_options:
offenders.append(
"{!r} from command line".format(format_options(location_options.keys()))
)
if not offenders:
return
raise CommandError(
"Location-changing options found in --install-option: {}."
" This is unsupported, use pip-level options like --user,"
" --prefix, --root, and --target instead.".format("; ".join(offenders))
)
def create_os_error_message(
error: OSError, show_traceback: bool, using_user_site: bool
) -> str:
"""Format an error message for an OSError
It may occur anytime during the execution of the install command.
"""
parts = []
# Mention the error if we are not going to show a traceback
parts.append("Could not install packages due to an OSError")
if not show_traceback:
parts.append(": ")
parts.append(str(error))
else:
parts.append(".")
# Spilt the error indication from a helper message (if any)
parts[-1] += "\n"
# Suggest useful actions to the user:
# (1) using user site-packages or (2) verifying the permissions
if error.errno == errno.EACCES:
user_option_part = "Consider using the `--user` option"
permissions_part = "Check the permissions"
if not running_under_virtualenv() and not using_user_site:
parts.extend(
[
user_option_part,
" or ",
permissions_part.lower(),
]
)
else:
parts.append(permissions_part)
parts.append(".\n")
# Suggest the user to enable Long Paths if path length is
# more than 260
if (
WINDOWS
and error.errno == errno.ENOENT
and error.filename
and len(error.filename) > 260
):
parts.append(
"HINT: This error might have occurred since "
"this system does not have Windows Long Path "
"support enabled. You can find information on "
"how to enable this at "
"https://pip.pypa.io/warnings/enable-long-paths\n"
)
return "".join(parts).strip() + "\n"
|
the-stack_0_16613 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module contains the helper for CI step.
It is used to find and verify correctness if beam examples/katas/tests.
"""
import logging
from typing import List
from api.v1.api_pb2 import STATUS_COMPILE_ERROR, STATUS_ERROR, STATUS_RUN_ERROR, \
STATUS_RUN_TIMEOUT, \
STATUS_VALIDATION_ERROR, STATUS_PREPARATION_ERROR
from config import Config
from grpc_client import GRPCClient
from helper import Example, get_statuses
class VerifyException(Exception):
def __init__(self, error: str):
super().__init__()
self.msg = error
def __str__(self):
return self.msg
class CIHelper:
"""
Helper for CI step.
It is used to find and verify correctness if beam examples/katas/tests.
"""
async def verify_examples(self, examples: List[Example]):
"""
Verify correctness of beam examples.
1. Find all beam examples starting from directory os.getenv("BEAM_ROOT_DIR")
2. Group code of examples by their SDK.
3. Run processing for single-file examples to verify examples' code.
"""
single_file_examples = list(filter(
lambda example: example.tag.multifile is False, examples))
await get_statuses(single_file_examples)
await self._verify_examples(single_file_examples)
async def _verify_examples(self, examples: List[Example]):
"""
Verify statuses of beam examples and the number of found default examples.
Check example.status for each examples. If the status of the example is:
- STATUS_VALIDATION_ERROR/STATUS_PREPARATION_ERROR
/STATUS_ERROR/STATUS_RUN_TIMEOUT: log error
- STATUS_COMPILE_ERROR: get logs using GetCompileOutput request and
log them with error.
- STATUS_RUN_ERROR: get logs using GetRunError request and
log them with error.
Args:
examples: beam examples that should be verified
"""
count_of_verified = 0
client = GRPCClient()
verify_status_failed = False
default_examples = []
for example in examples:
if example.tag.default_example:
default_examples.append(example)
if example.status not in Config.ERROR_STATUSES:
count_of_verified += 1
continue
if example.status == STATUS_VALIDATION_ERROR:
logging.error("Example: %s has validation error", example.filepath)
elif example.status == STATUS_PREPARATION_ERROR:
logging.error("Example: %s has preparation error", example.filepath)
elif example.status == STATUS_ERROR:
logging.error(
"Example: %s has error during setup run builder", example.filepath)
elif example.status == STATUS_RUN_TIMEOUT:
logging.error("Example: %s failed because of timeout", example.filepath)
elif example.status == STATUS_COMPILE_ERROR:
err = await client.get_compile_output(example.pipeline_id)
logging.error(
"Example: %s has compilation error: %s", example.filepath, err)
elif example.status == STATUS_RUN_ERROR:
err = await client.get_run_error(example.pipeline_id)
logging.error(
"Example: %s has execution error: %s", example.filepath, err)
verify_status_failed = True
logging.info(
"Number of verified Playground examples: %s / %s",
count_of_verified,
len(examples))
logging.info(
"Number of Playground examples with some error: %s / %s",
len(examples) - count_of_verified,
len(examples))
if len(default_examples) == 0:
logging.error("Default example not found")
raise VerifyException(
"CI step failed due to finding an incorrect number "
"of default examples. Default example not found")
if len(default_examples) > 1:
logging.error("Many default examples found")
logging.error("Examples where the default_example field is true:")
for example in default_examples:
logging.error(example.filepath)
raise VerifyException(
"CI step failed due to finding an incorrect number "
"of default examples. Many default examples found")
if verify_status_failed:
raise VerifyException("CI step failed due to errors in the examples")
|
the-stack_0_16614 | from __future__ import unicode_literals
from django.test import TestCase
try:
from unittest.mock import call, patch
except ImportError:
from mock import call, patch
from ..forms import AggregateMetricForm, MetricCategoryForm
class TestAggregateMetricForm(TestCase):
def test_form(self):
"""Test that form has choices populated from R.metric_slugs"""
# Set up a mock result for R.metric_slugs
config = {'return_value.metric_slugs.return_value': ['test-slug']}
with patch('redis_metrics.forms.R', **config) as mock_R:
form = AggregateMetricForm()
mock_R.assert_has_calls([
call(),
call().metric_slugs(),
])
self.assertEqual(
form.fields['metrics'].choices,
[('test-slug', 'test-slug')]
)
def test_cleaned_data(self):
"""Verify we get expected results from cleaned_data"""
# Set up a mock result for R.metric_slugs
config = {'return_value.metric_slugs.return_value': ['test-slug']}
with patch('redis_metrics.forms.R', **config):
form = AggregateMetricForm({"metrics": ["test-slug"]})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, {"metrics": ["test-slug"]})
class TestMetricCategoryForm(TestCase):
def test_form(self):
"""Test that the form has choices from R.metric_slugs, and that
providing a ``category`` argument sets initial values."""
# Set up a mock result for R.metric_slugs & R._category_slugs
config = {
'return_value.metric_slugs.return_value': ['test-slug'],
'return_value._category_slugs.return_value': ['test-slug']
}
with patch('redis_metrics.forms.R', **config) as mock_R:
# No Category
form = MetricCategoryForm()
self.assertFalse(form.fields['metrics'].required)
mock_R.assert_has_calls([
call(),
call().metric_slugs(),
])
self.assertEqual(
form.fields['metrics'].choices,
[('test-slug', 'test-slug')]
)
self.assertEqual(form.fields['metrics'].initial, None)
self.assertEqual(form.fields['category_name'].initial, None)
self.assertFalse(mock_R._category_slugs.called)
mock_R.reset_mock()
# With a Category
initial = {'category_name': "Sample Category"}
form = MetricCategoryForm(initial=initial)
self.assertFalse(form.fields['metrics'].required)
self.assertEqual(form.fields['metrics'].initial, ['test-slug'])
self.assertEqual(
form.fields['category_name'].initial,
"Sample Category"
)
r = mock_R.return_value
r._category_slugs.assert_called_once_with("Sample Category")
def test_cleaned_data(self):
"""Verify we get expected results from cleaned_data."""
# Set up a mock result for R.metric_slugs & R._category_slugs
config = {
'return_value.metric_slugs.return_value': ['test-slug'],
'return_value._category_slugs.return_value': ['test-slug']
}
with patch('redis_metrics.forms.R', **config):
data = {
'category_name': 'Sample Data',
'metrics': ['test-slug'],
}
form = MetricCategoryForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data, data)
def test_categorize_metrics(self):
"""Test the ``categorize_metrics`` method; This method should be called
after POSTing."""
k = {
'return_value.metric_slugs.return_value': ['foo', 'bar', 'baz'],
'return_value._category_slugs.return_value': ['foo', 'bar'],
}
with patch('redis_metrics.forms.R', **k) as mock_R:
data = {'category_name': 'Foo', 'metrics': ['foo', 'bar']}
form = MetricCategoryForm(data)
self.assertTrue(form.is_valid())
form.categorize_metrics()
# This is what should happen in the form when POSTing
mock_R.assert_has_calls([
# happens in __init__
call(),
call().metric_slugs(),
# happens in categorize_metrics
call().reset_category('Foo', ['foo', 'bar'])
])
|
the-stack_0_16615 | #
# KTH Royal Institute of Technology
# DD2424: Deep Learning in Data Science
# Assignment 4
#
# Carlo Rapisarda ([email protected])
#
import numpy as np
from sys import stderr
from model import RNNet
Theta = RNNet.Theta
def eprint(*args, **kwargs):
print(*args, file=stderr, **kwargs)
def unpickle(filename):
import pickle
with open(filename, 'rb') as f:
res = pickle.load(f, encoding='bytes')
return res
def pickle(obj, filename):
import pickle as pickle_
with open(filename, 'wb') as f:
pickle_.dump(obj, f)
def _compute_grads_numerical(X, Y, m, K, theta, loss_fn, h):
grads = Theta.zeros(m, K)
grads_v = vars(grads)
theta = vars(theta)
for k in theta:
for i in range(theta[k].size):
theta[k].itemset(i, theta[k].item(i) - h)
l1 = loss_fn(X, Y)
theta[k].itemset(i, theta[k].item(i) + h)
theta[k].itemset(i, theta[k].item(i) + h)
l2 = loss_fn(X, Y)
theta[k].itemset(i, theta[k].item(i) - h)
grads_v[k].itemset(i, (l2 - l1) / (2.0 * h))
return grads
def compute_grads_numerical(X, Y, h0, net: RNNet, step_size=1e-5):
old_theta = net.theta
tmp_theta = old_theta.copy()
m, K = net.m, net.K
net.theta = tmp_theta
def loss_fn(X_, Y_):
return net.cross_entropy_loss(X_, Y_, h_prev=h0)
grads = _compute_grads_numerical(X, Y, m, K, tmp_theta, loss_fn, step_size)
net.theta = old_theta
return grads
def relative_err(a,b,eps=1e-12):
assert a.shape == b.shape
return np.abs(a-b) / np.maximum(eps, np.abs(a)+np.abs(b))
def compare_grads(lhs: Theta, rhs: Theta, m, K):
errors = Theta.zeros(m, K)
errors_v = vars(errors)
lhs = vars(lhs)
rhs = vars(rhs)
for k in lhs:
errors_v[k] = relative_err(lhs[k], rhs[k])
return errors
def simple_smooth_1d(x, alpha):
assert len(x.shape) == 1, 'Function only works with 1D arrays'
smooth_x = np.zeros(x.shape[0])
smooth_x[0] = x[0]
for i in range(1, smooth_x.size):
smooth_x[i] = alpha * smooth_x[i-1] + (1.0 - alpha) * x[i]
return smooth_x
|
the-stack_0_16616 | import pathlib
from setuptools import setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="my_package_chetan",
version="2.0.0",
description="Read the latest my_package.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/chetanghadawaje/my_package.git",
author="Chetan Ghadawaje",
author_email="[email protected]",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=["my_package"],
include_package_data=True,
install_requires=[],
entry_points={
"console_scripts": [
"my_package=__main__:main",
]
},
)
|
the-stack_0_16617 | from invoke.vendor import six
import fabric.connection
def create_connection(host, user, identity_file):
return fabric.connection.Connection(host=host,
user=user,
connect_kwargs={
'key_filename': identity_file,
})
def mount_volume(conn, device, mounting_point, user, group):
# Catch tail of greeting output
res = conn.sudo('whoami', hide=True)
# Inspect volume's file system
res = conn.sudo('file -s {}'.format(device), hide=True)
# Ensure volume contains a file system
has_file_system = res.stdout.strip() != '{}: data'.format(device)
if not has_file_system:
conn.sudo('mkfs -t ext4 {}'.format(device), hide=True)
# Create mounting point
res = conn.run('mkdir -p {}'.format(mounting_point), hide=True)
# Mount volume
res = conn.sudo('mount {} {}'.format(device, mounting_point), hide=True)
# If file system has just been created, fix group and user of the mounting point
if not has_file_system:
res = conn.sudo('chown -R {}:{} {}'.format(group, user, mounting_point), hide=True)
def install_python_packages(conn, virtual_env, packages):
if not packages:
return
with conn.prefix('source activate {}'.format(virtual_env)):
conn.run('pip install {}'.format(' '.join(packages)), hide=True)
def install_packages(conn, packages):
if not packages:
return
# TODO: handle locked /var/lib/dpkg/lock
conn.sudo('apt install -y {}'.format(' '.join(packages)))
def sync_files(conn, local_path, remote_path, is_upload, is_recursive, allow_delete=False, strict_host_keys=True):
"""This code was ported from https://github.com/fabric/patchwork and extended for two-way transfer. """
exclude = ()
ssh_opts = ""
rsync_opts = '--out-format="[%t] {} %f %\'\'b"'.format('OUT' if is_upload else 'IN')
if is_recursive:
rsync_opts += ' -r'
# Turn single-string exclude into a one-item list for consistency
if isinstance(exclude, six.string_types):
exclude = [exclude]
# Create --exclude options from exclude list
exclude_opts = ' --exclude "{}"' * len(exclude)
# Double-backslash-escape
exclusions = tuple([str(s).replace('"', '\\\\"') for s in exclude])
# Honor SSH key(s)
key_string = ""
# TODO: seems plausible we need to look in multiple places if there's too
# much deferred evaluation going on in how we eg source SSH config files
# and so forth, re: connect_kwargs
# TODO: we could get VERY fancy here by eg generating a tempfile from any
# in-memory-only keys...but that's also arguably a security risk, so...
keys = conn.connect_kwargs.get("key_filename", [])
# TODO: would definitely be nice for Connection/FabricConfig to expose an
# always-a-list, always-up-to-date-from-all-sources attribute to save us
# from having to do this sort of thing. (may want to wait for Paramiko auth
# overhaul tho!)
if isinstance(keys, six.string_types):
keys = [keys]
if keys:
key_string = "-i " + " -i ".join(keys)
# Get base cxn params
user, host, port = conn.user, conn.host, conn.port
port_string = "-p {}".format(port)
# Remote shell (SSH) options
rsh_string = ""
# Strict host key checking
disable_keys = "-o StrictHostKeyChecking=no"
if not strict_host_keys and disable_keys not in ssh_opts:
ssh_opts += " {}".format(disable_keys)
rsh_parts = [key_string, port_string, ssh_opts]
if any(rsh_parts):
rsh_string = "--rsh='ssh {}'".format(" ".join(rsh_parts))
# Set up options part of string
options_map = {
"delete": "--delete" if allow_delete else "",
"exclude": exclude_opts.format(*exclusions),
"rsh": rsh_string,
"extra": rsync_opts,
}
options = "{delete}{exclude} -pthrvz {extra} {rsh}".format(**options_map)
# Create and run final command string
# TODO: richer host object exposing stuff like .address_is_ipv6 or whatever
if host.count(":") > 1:
# Square brackets are mandatory for IPv6 rsync address,
# even if port number is not specified
cmd = "rsync {opt:} {local:} [{user:}@{host:}]:{remote:}" if is_upload else "rsync {opt:} [{user:}@{host:}]:{remote:} {local:}"
else:
cmd = "rsync {opt:} {local:} {user:}@{host:}:{remote:}" if is_upload else "rsync {opt:} {user:}@{host:}:{remote:} {local:}"
cmd = cmd.format(opt=options, local=local_path, user=user, host=host, remote=remote_path)
res = conn.local(cmd, hide=True)
# Get transferred files
transferred_files = res.stdout.strip('\n').split('\n')[1:-3]
if len(transferred_files) > 0:
print('\n'.join(transferred_files))
__all__ = [
'create_connection',
'mount_volume',
'install_python_packages',
'install_packages',
'sync_files'
]
|
the-stack_0_16620 | import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Dense, BatchNormalization, ReLU, Input, LSTM, Concatenate, Masking, Reshape, Lambda, \
Bidirectional, GRU, LayerNormalization, Bidirectional, Conv2D, Conv1D, MaxPooling2D, Flatten, LayerNormalization, Layer, Embedding, MultiHeadAttention, Dropout
from tensorflow.keras.regularizers import l1, l2
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
tfpl = tfp.layers
from lfp.custom_layers import LearnedInitLSTM, LearnedInitGRU
from tensorflow.keras.layers.experimental.preprocessing import Rescaling
def latent_normal(inputs):
mu, scale = inputs
dist = tfd.Normal(loc=mu, scale=scale)
return dist
def logistic_mixture(inputs, qbits=None):
"""
:param inputs:
:param qbits: number of quantisation bits, total quantisation intervals = 2 ** qbits
:return:
"""
weightings, mu, scale = inputs
if qbits is not None:
dist = tfd.Logistic(loc=mu, scale=scale)
dist = tfd.QuantizedDistribution(
distribution=tfd.TransformedDistribution(
distribution=dist,
bijector=tfb.Shift(shift=-0.5)),
low=-2 ** qbits / 2.,
high=2 ** qbits / 2.,
)
else:
dist = tfd.Logistic(loc=mu, scale=scale)
mixture_dist = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(logits=weightings),
components_distribution=dist,
validate_args=True
)
if qbits is not None:
action_limits = tf.constant([1.5, 1.5, 2.2, 3.2, 3.2, 3.2, 1.1])
mixture_dist = tfd.TransformedDistribution(
distribution=mixture_dist,
bijector=tfb.Scale(scale=action_limits / (2 ** qbits / 2.)) # scale to action limits
)
return mixture_dist
def create_actor(obs_dim, act_dim, goal_dim,
layer_size=1024, latent_dim=256, epsilon=1e-4, num_distribs=None, qbits=None, gcbc=False,
training=True, return_state=False, discrete=False, disc_embed_size=64, **kwargs):
# params #
batch_size = None if training else 1
stateful = not training
# Input #
o = Input(shape=(None, obs_dim), batch_size=batch_size, dtype=tf.float32, name='input_obs')
z = Input(shape=(None, latent_dim), batch_size=batch_size, dtype=tf.float32, name='input_latent')
g = Input(shape=(None, goal_dim), batch_size=batch_size, dtype=tf.float32, name='input_goals')
# RNN #
if discrete:
embed = Dense(disc_embed_size, activation = 'relu', name='disc_to_cts_embedding')(z)
x = Concatenate(axis=-1)([o, embed, g])
else:
x = Concatenate(axis=-1)([o, z, g])
x = Masking(mask_value=0.)(x)
if return_state:
x, _, state1 = LSTM(layer_size, return_sequences=True, stateful=stateful, name='LSTM_in_1',
return_state=return_state)(x)
x, _, state2 = LSTM(layer_size, return_sequences=True, stateful=stateful, name='LSTM_in_2',
return_state=return_state)(x)
else:
x = LSTM(layer_size, return_sequences=True, stateful=stateful, name='LSTM_in_1', return_state=return_state)(x)
x = LSTM(layer_size, return_sequences=True, stateful=stateful, name='LSTM_in_2', return_state=return_state)(x)
# Probabilistic Mixture Model #
if num_distribs is not None:
weightings = Dense(act_dim * num_distribs, activation=None, name='alpha')(x)
mu = Dense(act_dim * num_distribs, activation=None, name='mu')(x)
scale = Dense(act_dim * num_distribs, activation="softplus", name='sigma')(x + epsilon)
weightings = Reshape((-1, act_dim, num_distribs))(weightings)
mu = Reshape((-1, act_dim, num_distribs))(mu)
scale = Reshape((-1, act_dim, num_distribs))(scale)
actions = tfpl.DistributionLambda(logistic_mixture, name='logistic_mix')([weightings, mu, scale], qbits)
else:
actions = Dense(act_dim, activation=None, name='acts')(x)
if return_state:
if gcbc:
return Model([o, g], [actions, state1, state2])
else:
return Model([o, z, g], [actions, state1, state2])
else:
if gcbc:
return Model([o, g], actions)
else:
return Model([o, z, g], actions)
def create_encoder(enc_in_dim,
layer_size=2048, latent_dim=256, epsilon=1e-4, training=True, **kwargs):
# Input #
inputs = Input(shape=(None, enc_in_dim), dtype=tf.float32, name='encoder_in')
# Layers #
x = Masking(mask_value=0.)(inputs)
x = Bidirectional(LSTM(layer_size, return_sequences=True), merge_mode='concat')(x)
x = Bidirectional(LSTM(layer_size, return_sequences=False), merge_mode='concat')(x)
# Latent Variable #
mu = Dense(latent_dim, activation=None, name='mu')(x)
scale = Dense(latent_dim, activation="softplus", name='sigma')(x + epsilon)
mixture = tfpl.DistributionLambda(latent_normal, name='latent_variable')((mu, scale))
return Model([inputs], mixture)
# def create_discrete_encoder(enc_in_dim, layer_size=2048, latent_dim=1024, **kwargs):
# # Input #
# inputs = Input(shape=(None, enc_in_dim), dtype=tf.float32, name='encoder_in')
# # Layers #
# x = Masking(mask_value=0.)(inputs)
# x = Bidirectional(LSTM(layer_size, return_sequences=True), merge_mode='concat')(x)
# x = Bidirectional(LSTM(layer_size, return_sequences=False), merge_mode='concat')(x)
# logits = Dense(latent_dim, name='to_vocab')(x)
# return Model([inputs], logits)
def create_discrete_encoder(enc_in_dim, layer_size=128, latent_dim=64, reductions=3, **kwargs):
# Input #
inputs = Input(shape=(None, enc_in_dim), dtype=tf.float32, name='encoder_in')
# Layers #
x = Masking(mask_value=0.)(inputs)
x = Bidirectional(LSTM(layer_size, return_sequences=True), merge_mode='concat')(x)
x = Bidirectional(LSTM(layer_size, return_sequences=True), merge_mode='concat')(x)
for l in range(reductions-1):
print(l)
x = Conv1D(layer_size, kernel_size=3, strides=2, padding="same")(x)
embed = Conv1D(latent_dim, kernel_size=3, strides=2, padding="same")(x)
return Model([inputs], embed)
def create_planner(obs_dim, goal_dim,
layer_size=2048, latent_dim=256, epsilon=1e-4, training=True, **kwargs):
# params #
batch_size = None
# Input #
o_i = Input(shape=(obs_dim,), batch_size=batch_size, dtype=tf.float32,
name='initial_obs') # has arm state
o_g = Input(shape=(goal_dim,), batch_size=batch_size, dtype=tf.float32,
name='goal_obs') # does not have arm state
# Layers #
x = Concatenate(axis=-1)([o_i, o_g])
x = Masking(mask_value=0.)(x)
x = Dense(layer_size, activation="relu", name='layer_1')(x)
x = Dense(layer_size, activation="relu", name='layer_2')(x)
x = Dense(layer_size, activation="relu", name='layer_3')(x)
x = Dense(layer_size, activation="relu", name='layer_4')(x)
# Latent Variable #
mu = Dense(latent_dim, activation=None, name='mu')(x)
scale = Dense(latent_dim, activation="softplus", name='sigma')(x + epsilon)
mixture = tfpl.DistributionLambda(latent_normal, name='latent_variable')((mu, scale))
return Model([o_i, o_g], mixture)
def create_discrete_planner(obs_dim, goal_dim,
layer_size=2048, latent_dim=256, epsilon=1e-4, training=True, **kwargs):
'''
takes in size B, N_TILES, D for start_state and goal_state
LSTM then predicts which discrete plan it should be for each tile
'''
# params #
batch_size = None if training else 1
stateful = not training
# Input #
o = Input(shape=(None, obs_dim), batch_size=batch_size, dtype=tf.float32, name='input_obs')
g = Input(shape=(None, goal_dim), batch_size=batch_size, dtype=tf.float32, name='input_goals')
# RNN #
x = Concatenate(axis=-1)([o, g])
x = LSTM(layer_size, return_sequences=True, stateful=stateful, name='LSTM_in_1', return_state=False)(x)
x = LSTM(layer_size, return_sequences=True, stateful=stateful, name='LSTM_in_2', return_state=False)(x)
tokens = Dense(latent_dim, name='acts')(x)
return Model([o, g], tokens)
# maps from sentence embedding space to goal dim space
def create_goal_space_mapper(input_embedding_dim, goal_embedding_dim,
layer_size=2048, **kwargs):
# params #
batch_size = None
# Input #
input_embeddings = Input(shape=(input_embedding_dim,), batch_size=batch_size, dtype=tf.float32,
name='lang_embeds') # embeddings created by MUSE or equiv
# Layers #
x = Masking(mask_value=0.)(input_embeddings)
x = Dense(layer_size, activation="relu", name='layer_1')(x)
x = Dense(layer_size, activation="relu", name='layer_2')(x)
goal_embeddings = Dense(goal_embedding_dim, activation=None, name='goal_space')(x)
return Model(input_embeddings, goal_embeddings)
# InfoVAE related
def compute_kernel(x, y):
x_size = tf.shape(x)[0]
y_size = tf.shape(y)[0]
dim = tf.shape(x)[1]
tiled_x = tf.tile(tf.reshape(x, tf.stack([x_size, 1, dim])), tf.stack([1, y_size, 1]))
tiled_y = tf.tile(tf.reshape(y, tf.stack([1, y_size, dim])), tf.stack([x_size, 1, 1]))
return tf.exp(-tf.reduce_mean(tf.square(tiled_x - tiled_y), axis=2) / tf.cast(dim, tf.float32))
def compute_mmd(x, y):
x_kernel = compute_kernel(x, x)
y_kernel = compute_kernel(y, y)
xy_kernel = compute_kernel(x, y)
return tf.reduce_mean(x_kernel) + tf.reduce_mean(y_kernel) - 2 * tf.reduce_mean(xy_kernel)
# # Standard CNN boi
# def create_vision_network(img_height, img_width, embedding_size = 256):
# return Sequential([
# Rescaling(1./255, input_shape=(img_height, img_width, 3)), # put it here for portability
# Conv2D(32, 3, padding='same', activation='relu'),
# MaxPooling2D(),
# Conv2D(32, 3, padding='same', activation='relu'),
# MaxPooling2D(),
# Conv2D(64, 3, padding='same', activation='relu'),
# MaxPooling2D(),
# Conv2D(64, 3, padding='same', activation='relu'),
# MaxPooling2D(),
# Conv2D(128, 3, padding='same', activation='relu'),
# MaxPooling2D(),
# Conv2D(128, 3, padding='same', activation='relu'),
# MaxPooling2D(),
# Conv2D(64, 3, padding='same', activation='relu', name='features'),
# Flatten(),
# Dense(512, activation='relu'),
# Dense(embedding_size),
# ], name = 'feature_encoder')
# # Has a cheeky 10M params but ok. This is the option which uses spatial softmax.
# class cnn(tf.keras.Model):
# # TODO: Make height width dependent
# def __init__(self, img_height=128, img_width = 128, img_channels=3, embedding_size=64):
# super(cnn, self).__init__()
# self.img_height = img_height
# self.img_width = img_width
# self.img_channels = img_channels
# self.rescaling = Rescaling(1./255, input_shape=(img_height, img_width, img_channels)) # put it here for portability
# self.conv1 = Conv2D(32, 8, strides=(4,4), padding='same', activation='relu', name='c1')
# self.conv2 = Conv2D(64, 4, strides=(2,2), padding='same', activation='relu', name='c2')
# self.conv3 = Conv2D(64, 4, strides=(2,2), padding='same', activation='relu', name='c3')
# self.conv4 = Conv2D(64, 3, strides=(1,1), padding='same', activation='relu', name='c4')
# # In between these, do a spatial softmax
# self.flatten = Flatten()
# self.dense1 = Dense(512, activation='relu')
# self.dense2 = Dense(embedding_size)
# def call(self, inputs):
# x = self.rescaling(inputs)
# x = self.conv1(x)
# x = self.conv2(x)
# x = self.conv3(x)
# pre_softmax = self.conv4(x)
# # Assume features is of size [N, H, W, C] (batch_size, height, width, channels).
# # Transpose it to [N, C, H, W], then reshape to [N * C, H * W] to compute softmax
# # jointly over the image dimensions.
# N, H, W, C = pre_softmax.shape
# pre_softmax = tf.reshape(tf.transpose(pre_softmax, [0, 3, 1, 2]), [N * C, H * W])
# softmax = tf.nn.softmax(pre_softmax)
# # Reshape and transpose back to original format.
# softmax = tf.transpose(tf.reshape(softmax, [N, C, H, W]), [0, 2, 3, 1])
# x = self.flatten(softmax)
# x = self.dense1(x)
# return self.dense2(x)
# Has a cheeky 10M params but ok. This is the option which uses spatial softmax.
class spatial_softmax_cnn(tf.keras.Model):
# TODO: Make height width dependent
def __init__(self, img_height=128, img_width = 128, img_channels=3, embedding_size=64, return_spatial_softmax = False):
super(spatial_softmax_cnn, self).__init__()
self.img_height = img_height
self.img_width = img_width
self.img_channels = img_channels
self.rescaling = Rescaling(1./255, input_shape=(img_height, img_width, img_channels)) # put it here for portability
self.conv1 = Conv2D(32, 8, strides=(4,4), padding='same', activation='relu', name='c1')
self.conv2 = Conv2D(64, 4, strides=(2,2), padding='same', activation='relu', name='c2')
self.conv3 = Conv2D(64, 3, strides=(1,1), padding='same', activation='relu', name='c3')
# In between these, do a spatial softmax
self.flatten = Flatten()
self.dense1 = Dense(512, activation='relu')
self.dense2 = Dense(embedding_size)
self.return_spatial_softmax = return_spatial_softmax
def call(self, inputs):
x = self.rescaling(inputs)
x = self.conv1(x)
x = self.conv2(x)
pre_softmax = self.conv3(x)
# pre_softmax = self.conv4(x)
# Assume features is of size [N, H, W, C] (batch_size, height, width, channels).
# Transpose it to [N, C, H, W], then reshape to [N * C, H * W] to compute softmax
# jointly over the image dimensions.
N, H, W, C = pre_softmax.shape
pre_softmax = tf.reshape(tf.transpose(pre_softmax, [0, 3, 1, 2]), [N * C, H * W])
softmax = tf.nn.softmax(pre_softmax)
# Reshape and transpose back to original format.
softmax = tf.transpose(tf.reshape(softmax, [N, C, H, W]), [0, 2, 3, 1]) # N, H, W, C
# Expand dims by 1
softmax = tf.expand_dims(softmax, -1)
x, y = tf.range(0, W)/W, tf.range(0, H)/H # so that feature locations are on a 0-1 scale not 0-128
X,Y = tf.meshgrid(x,y)
# Image coords is a tensor of size [H,W,2] representing the image coordinates of each pixel
image_coords = tf.cast(tf.stack([X,Y],-1), tf.float32)
image_coords= tf.expand_dims(image_coords, 2)
# multiply to get feature locations
spatial_soft_argmax = tf.reduce_sum(softmax * image_coords, axis=[1,2])
x = self.flatten(spatial_soft_argmax)
x = self.dense1(x)
return self.dense2(x), spatial_soft_argmax
class intensities_spatial_softmax_cnn(tf.keras.Model):
# TODO: Make height width dependent
def __init__(self, img_height=128, img_width = 128, img_channels=3, embedding_size=64, return_spatial_softmax = False):
super(intensities_spatial_softmax_cnn, self).__init__()
self.img_height = img_height
self.img_width = img_width
self.img_channels = img_channels
self.rescaling = Rescaling(1./255, input_shape=(img_height, img_width, img_channels)) # put it here for portability
self.conv1 = Conv2D(32, 8, strides=(4,4), padding='same', activation='relu', name='c1')
self.conv2 = Conv2D(64, 4, strides=(2,2), padding='same', activation='relu', name='c2')
self.conv3 = Conv2D(64, 3, strides=(1,1), padding='same', activation='relu', name='c3')
# In between these, do a spatial softmax
self.flatten = Flatten()
self.dense1 = Dense(512, activation='relu')
self.dense2 = Dense(embedding_size)
self.return_spatial_softmax = return_spatial_softmax
def call(self, inputs):
x = self.rescaling(inputs)
x = self.conv1(x)
x = self.conv2(x)
pre_softmax = self.conv3(x)
# pre_softmax = self.conv4(x)
# Assume features is of size [N, H, W, C] (batch_size, height, width, channels).
# Transpose it to [N, C, H, W], then reshape to [N * C, H * W] to compute softmax
# jointly over the image dimensions.
N, H, W, C = pre_softmax.shape
pre_softmax = tf.reshape(tf.transpose(pre_softmax, [0, 3, 1, 2]), [N * C, H * W])
softmax = tf.nn.softmax(pre_softmax)
# Reshape and transpose back to original format.
softmax = tf.transpose(tf.reshape(softmax, [N, C, H, W]), [0, 2, 3, 1]) # N, H, W, C
# Expand dims by 1
softmax = tf.expand_dims(softmax, -1)
x, y = tf.range(0, W)/W, tf.range(0, H)/H # so that feature locations are on a 0-1 scale not 0-128
X,Y = tf.meshgrid(x,y)
# Image coords is a tensor of size [H,W,2] representing the image coordinates of each pixel
image_coords = tf.cast(tf.stack([X,Y],-1), tf.float32)
image_coords= tf.expand_dims(image_coords, 2)
# multiply to get feature locations
spatial_soft_argmax = tf.reduce_sum(softmax * image_coords, axis=[1,2])
# Get indices corresponding to each
batch_indices =tf.reshape(tf.repeat(tf.range(0,N,1)[tf.newaxis,:], C), [N,C])[:,:,tf.newaxis] # 0,0,0, 1,1,1, etc as batch indices
keypoint_indices = tf.tile(tf.range(0,C,1)[tf.newaxis, :], [N,1])[:, :, tf.newaxis] # numbers 1,2,3... 1,2,3... keypoints, batches appropriately
assert W == H # this next step is currently only coded for squares
keypoint_img_indices = tf.reverse(tf.cast(spatial_soft_argmax * W, tf.int32), [-1]) # gather nd has opposite axes to images, x is y, y is x
gather_indices = tf.concat([batch_indices, keypoint_img_indices, keypoint_indices], axis = -1)
feature_intensities = tf.gather_nd(softmax, gather_indices) # N, C, 1
keypoints_with_intensities = tf.concat([feature_intensities, spatial_soft_argmax], -1)
x = self.flatten(keypoints_with_intensities)
x = self.dense1(x)
return self.dense2(x), keypoints_with_intensities
class impala_cnn(tf.keras.Model):
def __init__(self, img_height=128, img_width = 128, img_channels=3, embedding_size=64, return_spatial_softmax = False, l1=16, l2=32, l3=32):
super(impala_cnn, self).__init__()
self.img_height = img_height
self.img_width = img_width
self.img_channels = img_channels
self.rescaling = Rescaling(1./255, input_shape=(img_height, img_width, img_channels)) # put it here for portability
self.conv_1 = Conv2D(l1, 3, strides=(2,2), padding='same', activation='relu', name='c1')
self.res_1_1 = Conv2D(l1, 3, strides=(1,1), padding='same', activation='relu', name='r1_1')
self.res_1_2 = Conv2D(l1, 3, strides=(1,1), padding='same', activation='relu', name='r1_2')
self.conv_2 = Conv2D(l2, 3, strides=(2,2), padding='same', activation='relu', name='c2')
self.res_2_1 = Conv2D(l2, 3, strides=(1,1), padding='same', activation='relu', name='r2_1')
self.res_2_2 = Conv2D(l2, 3, strides=(1,1), padding='same', activation='relu', name='r2_2')
self.conv_3 = Conv2D(l3, 3, strides=(2,2), padding='same', activation='relu', name='c3')
self.res_3_1 = Conv2D(l3, 3, strides=(1,1), padding='same', activation='relu', name='r3_1')
self.res_3_2 = Conv2D(l3, 3, strides=(1,1), padding='same', activation='relu', name='r3_2')
# In between these, do a spatial softmax
self.flatten = Flatten()
self.dense1 = Dense(256, activation='relu')
self.dense2 = Dense(embedding_size)
self.return_spatial_softmax = return_spatial_softmax
def call(self, inputs):
x = self.rescaling(inputs)
x = self.conv_1(x)
r1 = self.res_1_1(x)
x = self.res_1_2(r1) + x
x = self.conv_2(x)
r1 = self.res_2_1(x)
x = self.res_2_2(r1) + x
x = self.conv_3(x)
r1 = self.res_3_1(x)
x = self.res_3_2(r1) + x
x = self.flatten(x)
x = self.dense1(x)
x = self.dense2(x)
return x, 0 # for compat with spatial softmax returns
class deep_impala_cnn(impala_cnn):
def __init__(self, img_height=128, img_width = 128, img_channels=3, embedding_size=64, return_spatial_softmax = False):
super(deep_impala_cnn, self).__init__(img_height, img_width, img_channels, embedding_size, return_spatial_softmax, l1=64, l2=128, l3=128)
CNN_DICT= {'spatial_softmax': spatial_softmax_cnn, 'intensities_spatial_softmax': intensities_spatial_softmax_cnn, 'impala': impala_cnn, 'deep_impala': deep_impala_cnn}
###############################################################################
def causal_attention_mask(batch_size, n_dest, n_src, dtype):
"""
Mask the upper half of the dot product matrix in self attention.
This prevents flow of information from future tokens to current token.
1's in the lower triangle, counting from the lower right corner.
"""
i = tf.range(n_dest)[:, None]
j = tf.range(n_src)
m = i >= j - n_src + n_dest
mask = tf.cast(m, dtype)
mask = tf.reshape(mask, [1, n_dest, n_src])
mult = tf.concat(
[tf.expand_dims(batch_size, -1), tf.constant([1, 1], dtype=tf.int32)], 0
)
return tf.tile(mask, mult)
class TransformerBlock(Layer):
def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):
super(TransformerBlock, self).__init__()
self.att = MultiHeadAttention(num_heads, embed_dim)
self.ffn = Sequential(
[Dense(ff_dim, activation="relu"), Dense(embed_dim),]
)
self.layernorm1 = LayerNormalization(epsilon=1e-6)
self.layernorm2 = LayerNormalization(epsilon=1e-6)
self.dropout1 = Dropout(rate)
self.dropout2 = Dropout(rate)
def call(self, inputs):
input_shape = tf.shape(inputs)
batch_size = input_shape[0]
seq_len = input_shape[1]
causal_mask = causal_attention_mask(batch_size, seq_len, seq_len, tf.bool)
attention_output = self.att(inputs, inputs, attention_mask=causal_mask)
attention_output = self.dropout1(attention_output)
out1 = self.layernorm1(inputs + attention_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output)
return self.layernorm2(out1 + ffn_output)
class PositionEmbedding(Layer):
def __init__(self, maxlen, embed_dim):
super(PositionEmbedding, self).__init__()
self.pos_emb = Embedding(input_dim=maxlen, output_dim=embed_dim)
def call(self, x):
maxlen = tf.shape(x)[-2]
positions = tf.range(start=0, limit=maxlen, delta=1)
positions = self.pos_emb(positions)
return x + positions
# def create_conditional_transformer(vocab_size, max_len, embed_dim, num_heads, feed_forward_dim=256, num_layers=1):
# goal = Input(shape=(1, goal_dim,), dtype=tf.float32) # so that we can concat easily
# seq = Input(shape=(max_len,), dtype=tf.int32)
# goal_embed = Dense(embed_dim, activation='relu', name='goal_embed')(goal) # convert the goal to the same embedding dim as the seq
# token_embeddings = Embedding(input_dim=vocab_size, output_dim=embed_dim)(seq) # embed the seq
# x = Concatenate(axis=-2)([goal_embed, token_embeddings])
# #
# embedding_layer = PositionEmbedding(max_len+1, embed_dim)
# x = embedding_layer(x)
# for i in range(num_layers):
# x = TransformerBlock(embed_dim, num_heads, feed_forward_dim)(x)
# outputs = Dense(vocab_size)(x)
# model = Model(inputs=[goal, seq], outputs={'logits': outputs, 'x':x})
# return model
class conditional_transformer(Model):
# TODO: Make height width dependent
def __init__(self, vocab_size, max_len,embed_dim, num_heads, feed_forward_dim=256, num_layers=1):
super(conditional_transformer, self).__init__()
self.goal_embed = Dense(embed_dim, activation='relu', name='goal_embed')
self.state_embed = Dense(embed_dim, activation='relu', name='state_embed')
self.token_embeddings = Embedding(input_dim=vocab_size, output_dim=embed_dim)
self.embedding_layer = PositionEmbedding(max_len+1, embed_dim)
self.tformer_layers = [TransformerBlock(embed_dim, num_heads, feed_forward_dim) for i in range(num_layers)]
self.outputs = Dense(vocab_size)
def expand(self, input):
if len(input.shape) == 2:
return input[:, tf.newaxis, :] # insert a time dim
elif len(input.shape) == 1:
return input[tf.newaxis, tf.newaxis, :]
def call(self, inputs):
current_state, goal, seq = inputs # seq should be 1, T (indices)
current_state = self.expand(current_state)
goal = self.expand(goal)
state_embed = self.state_embed(current_state)
goal_embed = self.goal_embed(goal)
if seq is not None:
seq_embed = self.token_embeddings(seq)
x = Concatenate(axis=-2)([goal_embed, state_embed, seq_embed])
else:
x = Concatenate(axis=-2)([goal_embed, state_embed])
x = self.embedding_layer(x)
for l in self.tformer_layers:
x = l(x)
logits = self.outputs(x)
return {'logits': logits, 'x': x} |
the-stack_0_16623 | # -*- coding: utf-8 -*-
"""
Copyright (c) 2018 Keijack Wu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
import socket
import os
import re
import ssl as _ssl
import threading
import time
import asyncio
from collections import OrderedDict
from socketserver import ThreadingMixIn, TCPServer
from types import coroutine
from urllib.parse import unquote
from urllib.parse import quote
from typing import Any, Awaitable, Callable, Coroutine, Dict, List, Tuple
from simple_http_server import ControllerFunction, StaticFile
from .base_request_handler import BaseHTTPRequestHandler
from .wsgi_request_handler import WSGIRequestHandler
from .__utils import remove_url_first_slash, get_function_args, get_function_kwargs
from .logger import get_logger
_logger = get_logger("simple_http_server.http_server")
class RoutingConf:
HTTP_METHODS = ["OPTIONS", "GET", "HEAD", "POST", "PUT", "DELETE", "TRACE", "CONNECT"]
def __init__(self, res_conf={}):
self.method_url_mapping: Dict[str, Dict[str, ControllerFunction]] = {"_": {}}
self.path_val_url_mapping: Dict[str, Dict[str, ControllerFunction]] = {"_": OrderedDict()}
self.method_regexp_mapping: Dict[str, Dict[str, ControllerFunction]] = {"_": OrderedDict()}
for mth in self.HTTP_METHODS:
self.method_url_mapping[mth] = {}
self.path_val_url_mapping[mth] = OrderedDict()
self.method_regexp_mapping[mth] = OrderedDict()
self.filter_mapping = OrderedDict()
self._res_conf = []
self.add_res_conf(res_conf)
self.ws_mapping = OrderedDict()
self.ws_path_val_mapping = OrderedDict()
self.error_page_mapping = {}
@property
def res_conf(self):
return self._res_conf
@res_conf.setter
def res_conf(self, val: Dict[str, str]):
self._res_conf.clear()
self.add_res_conf(val)
def add_res_conf(self, val: Dict[str, str]):
if not val or not isinstance(val, dict):
return
for res_k, v in val.items():
if res_k.startswith("/"):
k = res_k[1:]
else:
k = res_k
if k.endswith("/*"):
key = k[0:-1]
elif k.endswith("/**"):
key = k[0:-2]
elif k.endswith("/"):
key = k
else:
key = k + "/"
if v.endswith(os.path.sep):
val = v
else:
val = v + os.path.sep
self._res_conf.append((key, val))
self._res_conf.sort(key=lambda it: -len(it[0]))
def __get_path_reg_pattern(self, url):
_url: str = url
path_names = re.findall("(?u)\\{\\w+\\}", _url)
if len(path_names) == 0:
# normal url
return None, path_names
for name in path_names:
_url = _url.replace(name, "([\\w%.-@!\\(\\)\\[\\]\\|\\$]+)")
_url = f"^{_url}$"
quoted_names = []
for name in path_names:
name = name[1: -1]
quoted_names.append(quote(name))
return _url, quoted_names
def map_controller(self, ctrl: ControllerFunction):
url = ctrl.url
regexp = ctrl.regexp
method = ctrl.method
_logger.debug(f"map url {url}|{regexp} with method[{method}] to function {ctrl.func}. ")
assert method is None or method == "" or method.upper() in self.HTTP_METHODS
_method = method.upper() if method is not None and method != "" else "_"
if regexp:
self.method_regexp_mapping[_method][regexp] = ctrl
else:
_url = remove_url_first_slash(url)
path_pattern, path_names = self.__get_path_reg_pattern(_url)
if path_pattern is None:
self.method_url_mapping[_method][_url] = ctrl
else:
self.path_val_url_mapping[_method][path_pattern] = (ctrl, path_names)
def _res_(self, path, res_pre, res_dir):
fpath = os.path.join(res_dir, path.replace(res_pre, ""))
_logger.debug(f"static file. {path} :: {fpath}")
fext = os.path.splitext(fpath)[1]
ext = fext.lower()
if ext in (".html", ".htm", ".xhtml"):
content_type = "text/html"
elif ext == ".xml":
content_type = "text/xml"
elif ext == ".css":
content_type = "text/css"
elif ext in (".jpg", ".jpeg"):
content_type = "image/jpeg"
elif ext == ".png":
content_type = "image/png"
elif ext == ".webp":
content_type = "image/webp"
elif ext == ".js":
content_type = "text/javascript"
elif ext == ".pdf":
content_type = "application/pdf"
elif ext == ".mp4":
content_type = "video/mp4"
elif ext == ".mp3":
content_type = "audio/mp3"
else:
content_type = "application/octet-stream"
return StaticFile(fpath, content_type)
def get_url_controller(self, path="", method="") -> Tuple[ControllerFunction, Dict, List]:
# explicitly url matching
if path in self.method_url_mapping[method]:
return self.method_url_mapping[method][path], {}, ()
elif path in self.method_url_mapping["_"]:
return self.method_url_mapping["_"][path], {}, ()
# url with path value matching
fun_and_val = self.__try_get_from_path_val(path, method)
if fun_and_val is None:
fun_and_val = self.__try_get_from_path_val(path, "_")
if fun_and_val is not None:
return fun_and_val[0], fun_and_val[1], ()
# regexp
func_and_groups = self.__try_get_from_regexp(path, method)
if func_and_groups is None:
func_and_groups = self.__try_get_from_regexp(path, "_")
if func_and_groups is not None:
return func_and_groups[0], {}, func_and_groups[1]
# static files
for k, v in self.res_conf:
if path.startswith(k):
def static_fun():
return self._res_(path, k, v)
return ControllerFunction(func=static_fun), {}, ()
return None, {}, ()
def __try_get_from_regexp(self, path, method):
for regex, ctrl in self.method_regexp_mapping[method].items():
m = re.match(regex, path)
_logger.debug(f"regexp::pattern::[{regex}] => path::[{path}] match? {m is not None}")
if m:
return ctrl, tuple([unquote(v) for v in m.groups()])
return None
def __try_get_from_path_val(self, path, method):
for patterns, val in self.path_val_url_mapping[method].items():
m = re.match(patterns, path)
_logger.debug(f"url with path value::pattern::[{patterns}] => path::[{path}] match? {m is not None}")
if m:
fun, path_names = val
path_values = {}
for idx in range(len(path_names)):
key = unquote(path_names[idx])
path_values[key] = unquote(m.groups()[idx])
return fun, path_values
return None
def map_filter(self, path_pattern, filter_fun):
self.filter_mapping[path_pattern] = filter_fun
def get_matched_filters(self, path):
available_filters = []
for key, val in self.filter_mapping.items():
if re.match(key, path):
available_filters.append(val)
return available_filters
def map_websocket_handler(self, endpoint, handler_class):
url = remove_url_first_slash(endpoint)
path_pattern, path_names = self.__get_path_reg_pattern(url)
if path_pattern is None:
self.ws_mapping[url] = handler_class
else:
self.ws_path_val_mapping[path_pattern] = (handler_class, path_names)
def get_websocket_handler(self, path):
if path in self.ws_mapping:
return self.ws_mapping[path], {}
return self.__try_get_ws_handler_from_path_val(path)
def __try_get_ws_handler_from_path_val(self, path):
for patterns, val in self.ws_path_val_mapping.items():
m = re.match(patterns, path)
_logger.debug(f"websocket endpoint with path value::pattern::[{patterns}] => path::[{path}] match? {m is not None}")
if m:
clz, path_names = val
path_values = {}
for idx in range(len(path_names)):
key = unquote(path_names[idx])
path_values[key] = unquote(m.groups()[idx])
return clz, path_values
return None, {}
def map_error_page(self, code: str, error_page_fun: Callable):
if not code:
c = "_"
else:
c = str(code).lower()
self.error_page_mapping[c] = error_page_fun
def _default_error_page(self, code: int, message: str = "", explain: str = ""):
return json.dumps({
"code": code,
"message": message,
"explain": explain
})
def error_page(self, code: int, message: str = "", explain: str = ""):
c = str(code)
func = None
if c in self.error_page_mapping:
func = self.error_page_mapping[c]
elif code > 200:
c0x = c[0:2] + "x"
if c0x in self.error_page_mapping:
func = self.error_page_mapping[c0x]
elif "_" in self.error_page_mapping:
func = self.error_page_mapping["_"]
if not func:
func = self._default_error_page
_logger.debug(f"error page function:: {func}")
co = code
msg = message
exp = explain
args_def = get_function_args(func, None)
kwargs_def = get_function_kwargs(func, None)
args = []
for n, t in args_def:
_logger.debug(f"set value to error_page function -> {n}")
if co is not None:
if t is None or t == int:
args.append(co)
co = None
continue
if msg is not None:
if t is None or t == str:
args.append(msg)
msg = None
continue
if exp is not None:
if t is None or t == str:
args.append(exp)
exp = None
continue
args.append(None)
kwargs = {}
for n, v, t in kwargs_def:
if co is not None:
if (t is None and isinstance(v, int)) or t == int:
kwargs[n] = co
co = None
continue
if msg is not None:
if (t is None and isinstance(v, str)) or t == str:
kwargs[n] = msg
msg = None
continue
if exp is not None:
if (t is None and isinstance(v, str)) or t == str:
kwargs[n] = exp
exp = None
continue
kwargs[n] = v
if args and kwargs:
return func(*args, **kwargs)
elif args:
return func(*args)
elif kwargs:
return func(**kwargs)
else:
return func()
class HTTPServer(TCPServer, RoutingConf):
allow_reuse_address = 1 # Seems to make sense in testing environment
def server_bind(self):
"""Override server_bind to store the server name."""
TCPServer.server_bind(self)
host, port = self.server_address[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
def __init__(self, addr, res_conf={}):
TCPServer.__init__(self, addr, BaseHTTPRequestHandler)
RoutingConf.__init__(self, res_conf)
class ThreadingMixInHTTPServer(ThreadingMixIn, HTTPServer):
pass
class CoroutineMixIn:
daemon_threads = True
@property
def coroutine_tasks(self) -> Dict[Any, List[Awaitable]]:
if not hasattr(self, "_coroutine_tasks"):
self._coroutine_tasks = {}
return self._coroutine_tasks
@property
def coroutine_thread(self) -> threading.Thread:
if not hasattr(self, "_coroutine_thread"):
self._coroutine_thread = None
return self._coroutine_thread
@coroutine_thread.setter
def coroutine_thread(self, val: threading.Thread):
self._coroutine_thread = val
@property
def coroutine_loop(self) -> asyncio.AbstractEventLoop:
if not hasattr(self, "_coroutine_loop"):
self._coroutine_loop = None
return self._coroutine_loop
@coroutine_loop.setter
def coroutine_loop(self, val: asyncio.AbstractEventLoop):
self._coroutine_loop = val
def put_coroutine_task(self, request, task: Awaitable):
if request in self.coroutine_tasks:
self.coroutine_tasks[request].append(task)
else:
self.coroutine_tasks[request] = [task]
def coroutine_main(self):
self.coroutine_loop = loop = asyncio.new_event_loop()
try:
loop.run_forever()
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
async def process_request_async(self, request, client_address):
"""Same as in BaseServer but as async.
In addition, exception handling is done here.
"""
try:
self.finish_request(request, client_address)
if request in self.coroutine_tasks:
coroutine_tasks = self.coroutine_tasks[request]
while coroutine_tasks:
await coroutine_tasks.pop(0)
del self.coroutine_tasks[request]
except Exception:
self.handle_error(request, client_address)
finally:
self.shutdown_request(request)
def process_request(self, request, client_address):
if self.coroutine_thread is None:
self.coroutine_thread = threading.Thread(target=self.coroutine_main, name="CoroutineThread", daemon=self.daemon_threads)
self.coroutine_thread.start()
while not self.coroutine_loop:
# wait for the loop ready
time.sleep(0.1)
asyncio.run_coroutine_threadsafe(self.process_request_async(request, client_address), self.coroutine_loop)
def server_close(self):
super().server_close()
if self.coroutine_loop:
self.coroutine_loop.call_soon_threadsafe(self.coroutine_loop.stop)
self.coroutine_thread.join()
def shutdown(self):
super().shutdown()
if self.coroutine_loop:
self.coroutine_loop.call_soon_threadsafe(self.coroutine_loop.stop)
self.coroutine_thread.join()
class CoroutineMixInHTTPServer(CoroutineMixIn, HTTPServer):
pass
class SimpleDispatcherHttpServer:
"""Dispatcher Http server"""
def map_filter(self, path_pattern, filter_fun):
self.server.map_filter(path_pattern, filter_fun)
def map_controller(self, ctrl: ControllerFunction):
self.server.map_controller(ctrl)
def map_websocket_handler(self, endpoint, handler_class):
self.server.map_websocket_handler(endpoint, handler_class)
def map_error_page(self, code, func):
self.server.map_error_page(code, func)
def __init__(self,
host: Tuple[str, int] = ('', 9090),
ssl: bool = False,
ssl_protocol: int = _ssl.PROTOCOL_TLS_SERVER,
ssl_check_hostname: bool = False,
keyfile: str = "",
certfile: str = "",
keypass: str = "",
ssl_context: _ssl.SSLContext = None,
resources: Dict[str, str] = {},
prefer_corountine=False):
self.host = host
self.__ready = False
self.ssl = ssl
if prefer_corountine:
self.server = CoroutineMixInHTTPServer(self.host, res_conf=resources)
else:
self.server = ThreadingMixInHTTPServer(self.host, res_conf=resources)
if ssl:
if ssl_context:
ssl_ctx = ssl_context
else:
assert keyfile and certfile, "keyfile and certfile should be provided. "
ssl_ctx = _ssl.SSLContext(protocol=ssl_protocol)
ssl_ctx.check_hostname = ssl_check_hostname
ssl_ctx.load_cert_chain(certfile=certfile, keyfile=keyfile, password=keypass)
self.server.socket = ssl_ctx.wrap_socket(
self.server.socket,
server_side=True
)
@property
def ready(self):
return self.__ready
def resources(self, res={}):
self.server.res_conf = res
def start(self):
if self.ssl:
ssl_hint = " with SSL on"
else:
ssl_hint = ""
_logger.info(f"Dispatcher Http Server starts. Listen to port [{self.host[1]}]{ssl_hint}.")
try:
self.__ready = True
self.server.serve_forever()
except:
self.__ready = False
raise
def shutdown(self):
# shutdown it in a seperate thread.
threading.Thread(target=self.server.shutdown, daemon=True).start()
class WSGIProxy(RoutingConf):
def __init__(self, res_conf):
super().__init__(res_conf=res_conf)
def app_proxy(self, environment, start_response):
requestHandler = WSGIRequestHandler(self, environment, start_response)
return requestHandler.handle()
|
the-stack_0_16624 | import os.path
from data.base_dataset import BaseDataset, get_params, get_transform
from data.image_folder import make_dataset
from PIL import Image
class AlignedDataset(BaseDataset):
"""A dataset class for paired image dataset.
It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
During test time, you need to prepare a directory '/path/to/data/test'.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_AB = os.path.join(opt.dataroot, opt.phase) # get the image directory
self.AB_paths = sorted(make_dataset(self.dir_AB, opt.max_dataset_size)) # get image paths
assert(self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image
self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index - - a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) - - an image in the input domain
B (tensor) - - its corresponding image in the target domain
A_paths (str) - - image paths
B_paths (str) - - image paths (same as A_paths)
"""
# read a image given a random integer index
AB_path = self.AB_paths[index]
#print('index'+ index)
print('index:{a}'.format(a=index))
AB = Image.open(AB_path).convert('RGB')
# split AB image into A and B
w, h = AB.size
w2 = int(w / 2)
A = AB.crop((0, 0, w2, h))
B = AB.crop((w2, 0, w, h))
# apply the same transform to both A and B
transform_params = get_params(self.opt, A.size)
A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
A = A_transform(A)
B = B_transform(B)
return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path}
def __len__(self):
"""Return the total number of images in the dataset."""
return len(self.AB_paths)
|
the-stack_0_16625 | import numpy as np
from holoviews.core.overlay import NdOverlay
from holoviews.element import Bars
from bokeh.models import CategoricalColorMapper, LinearColorMapper
from ..utils import ParamLogStream
from .testplot import TestBokehPlot, bokeh_renderer
class TestBarPlot(TestBokehPlot):
def test_bars_hover_ensure_kdims_sanitized(self):
obj = Bars(np.random.rand(10,2), kdims=['Dim with spaces'])
obj = obj.opts(tools=['hover'])
self._test_hover_info(obj, [('Dim with spaces', '@{Dim_with_spaces}'), ('y', '@{y}')])
def test_bars_hover_ensure_vdims_sanitized(self):
obj = Bars(np.random.rand(10,2), vdims=['Dim with spaces'])
obj = obj.opts(tools=['hover'])
self._test_hover_info(obj, [('x', '@{x}'), ('Dim with spaces', '@{Dim_with_spaces}')])
def test_bars_suppress_legend(self):
bars = Bars([('A', 1), ('B', 2)]).opts(plot=dict(show_legend=False))
plot = bokeh_renderer.get_plot(bars)
plot.initialize_plot()
fig = plot.state
self.assertEqual(len(fig.legend), 0)
def test_empty_bars(self):
bars = Bars([], kdims=['x', 'y'], vdims=['z']).opts(plot=dict(group_index=1))
plot = bokeh_renderer.get_plot(bars)
plot.initialize_plot()
source = plot.handles['source']
for v in source.data.values():
self.assertEqual(len(v), 0)
def test_bars_grouped_categories(self):
bars = Bars([('A', 0, 1), ('A', 1, -1), ('B', 0, 2)],
kdims=['Index', 'Category'], vdims=['Value'])
plot = bokeh_renderer.get_plot(bars)
source = plot.handles['source']
self.assertEqual([tuple(x) for x in source.data['xoffsets']],
[('A', '0'), ('B', '0'), ('A', '1')])
self.assertEqual(list(source.data['Category']), ['0', '0', '1'])
self.assertEqual(source.data['Value'], np.array([1, 2, -1]))
x_range = plot.handles['x_range']
self.assertEqual(x_range.factors, [('A', '0'), ('A', '1'), ('B', '0'), ('B', '1')])
def test_bars_multi_level_sorted(self):
box= Bars((['A', 'B']*15, [3, 10, 1]*10, np.random.randn(30)),
['Group', 'Category'], 'Value').aggregate(function=np.mean)
plot = bokeh_renderer.get_plot(box)
x_range = plot.handles['x_range']
self.assertEqual(x_range.factors, [
('A', '1'), ('A', '3'), ('A', '10'), ('B', '1'), ('B', '3'), ('B', '10')])
def test_box_whisker_multi_level_sorted_alphanumerically(self):
box= Bars(([3, 10, 1]*10, ['A', 'B']*15, np.random.randn(30)),
['Group', 'Category'], 'Value').aggregate(function=np.mean)
plot = bokeh_renderer.get_plot(box)
x_range = plot.handles['x_range']
self.assertEqual(x_range.factors, [
('1', 'A'), ('1', 'B'), ('3', 'A'), ('3', 'B'), ('10', 'A'), ('10', 'B')])
def test_bars_positive_negative_mixed(self):
bars = Bars([('A', 0, 1), ('A', 1, -1), ('B', 0, 2)],
kdims=['Index', 'Category'], vdims=['Value'])
plot = bokeh_renderer.get_plot(bars.opts(stacked=True))
source = plot.handles['source']
self.assertEqual(list(source.data['Category']), ['1', '0', '0'])
self.assertEqual(list(source.data['Index']), ['A', 'A', 'B'])
self.assertEqual(source.data['top'], np.array([0, 1, 2]))
self.assertEqual(source.data['bottom'], np.array([-1, 0, 0]))
def test_bars_logy(self):
bars = Bars([('A', 1), ('B', 2), ('C', 3)],
kdims=['Index'], vdims=['Value'])
plot = bokeh_renderer.get_plot(bars.opts(plot=dict(logy=True)))
source = plot.handles['source']
glyph = plot.handles['glyph']
y_range = plot.handles['y_range']
self.assertEqual(list(source.data['Index']), ['A', 'B', 'C'])
self.assertEqual(source.data['Value'], np.array([1, 2, 3]))
self.assertEqual(glyph.bottom, 10**(np.log10(3)-2))
self.assertEqual(y_range.start, 10**(np.log10(3)-2))
self.assertEqual(y_range.end, 3.)
def test_bars_logy_explicit_range(self):
bars = Bars([('A', 1), ('B', 2), ('C', 3)],
kdims=['Index'], vdims=['Value']).redim.range(Value=(0.001, 3))
plot = bokeh_renderer.get_plot(bars.opts(plot=dict(logy=True)))
source = plot.handles['source']
glyph = plot.handles['glyph']
y_range = plot.handles['y_range']
self.assertEqual(list(source.data['Index']), ['A', 'B', 'C'])
self.assertEqual(source.data['Value'], np.array([1, 2, 3]))
self.assertEqual(glyph.bottom, 0.001)
self.assertEqual(y_range.start, 0.001)
self.assertEqual(y_range.end, 3.0000000000000013)
def test_bars_ylim(self):
bars = Bars([1, 2, 3]).opts(ylim=(0, 200))
plot = bokeh_renderer.get_plot(bars)
y_range = plot.handles['y_range']
self.assertEqual(y_range.start, 0)
self.assertEqual(y_range.end, 200)
def test_bars_padding_square(self):
points = Bars([(1, 2), (2, -1), (3, 3)]).options(padding=0.1)
plot = bokeh_renderer.get_plot(points)
y_range = plot.handles['y_range']
self.assertEqual(y_range.start, -1.4)
self.assertEqual(y_range.end, 3.4)
def test_bars_padding_square_positive(self):
points = Bars([(1, 2), (2, 1), (3, 3)]).options(padding=0.1)
plot = bokeh_renderer.get_plot(points)
y_range = plot.handles['y_range']
self.assertEqual(y_range.start, 0)
self.assertEqual(y_range.end, 3.2)
def test_bars_padding_square_negative(self):
points = Bars([(1, -2), (2, -1), (3, -3)]).options(padding=0.1)
plot = bokeh_renderer.get_plot(points)
y_range = plot.handles['y_range']
self.assertEqual(y_range.start, -3.2)
self.assertEqual(y_range.end, 0)
def test_bars_padding_nonsquare(self):
bars = Bars([(1, 2), (2, 1), (3, 3)]).options(padding=0.1, width=600)
plot = bokeh_renderer.get_plot(bars)
y_range = plot.handles['y_range']
self.assertEqual(y_range.start, 0)
self.assertEqual(y_range.end, 3.2)
def test_bars_padding_logx(self):
bars = Bars([(1, 1), (2, 2), (3,3)]).options(padding=0.1, logx=True)
plot = bokeh_renderer.get_plot(bars)
y_range = plot.handles['y_range']
self.assertEqual(y_range.start, 0)
self.assertEqual(y_range.end, 3.2)
def test_bars_padding_logy(self):
bars = Bars([(1, 2), (2, 1), (3, 3)]).options(padding=0.1, logy=True)
plot = bokeh_renderer.get_plot(bars)
y_range = plot.handles['y_range']
self.assertEqual(y_range.start, 0.033483695221017122)
self.assertEqual(y_range.end, 3.3483695221017129)
###########################
# Styling mapping #
###########################
def test_bars_color_op(self):
bars = Bars([(0, 0, '#000'), (0, 1, '#F00'), (0, 2, '#0F0')],
vdims=['y', 'color']).options(color='color')
plot = bokeh_renderer.get_plot(bars)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['color'], np.array(['#000', '#F00', '#0F0']))
self.assertEqual(glyph.fill_color, {'field': 'color'})
self.assertEqual(glyph.line_color, 'black')
def test_bars_linear_color_op(self):
bars = Bars([(0, 0, 0), (0, 1, 1), (0, 2, 2)],
vdims=['y', 'color']).options(color='color')
plot = bokeh_renderer.get_plot(bars)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
cmapper = plot.handles['color_color_mapper']
self.assertTrue(cmapper, LinearColorMapper)
self.assertEqual(cmapper.low, 0)
self.assertEqual(cmapper.high, 2)
self.assertEqual(cds.data['color'], np.array([0, 1, 2]))
self.assertEqual(glyph.fill_color, {'field': 'color', 'transform': cmapper})
self.assertEqual(glyph.line_color, 'black')
def test_bars_categorical_color_op(self):
bars = Bars([(0, 0, 'A'), (0, 1, 'B'), (0, 2, 'C')],
vdims=['y', 'color']).options(color='color')
plot = bokeh_renderer.get_plot(bars)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
cmapper = plot.handles['color_color_mapper']
self.assertTrue(cmapper, CategoricalColorMapper)
self.assertEqual(cmapper.factors, ['A', 'B', 'C'])
self.assertEqual(cds.data['color'], np.array(['A', 'B', 'C']))
self.assertEqual(glyph.fill_color, {'field': 'color', 'transform': cmapper})
self.assertEqual(glyph.line_color, 'black')
def test_bars_line_color_op(self):
bars = Bars([(0, 0, '#000'), (0, 1, '#F00'), (0, 2, '#0F0')],
vdims=['y', 'color']).options(line_color='color')
plot = bokeh_renderer.get_plot(bars)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['line_color'], np.array(['#000', '#F00', '#0F0']))
self.assertNotEqual(glyph.fill_color, {'field': 'line_color'})
self.assertEqual(glyph.line_color, {'field': 'line_color'})
def test_bars_fill_color_op(self):
bars = Bars([(0, 0, '#000'), (0, 1, '#F00'), (0, 2, '#0F0')],
vdims=['y', 'color']).options(fill_color='color')
plot = bokeh_renderer.get_plot(bars)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['fill_color'], np.array(['#000', '#F00', '#0F0']))
self.assertEqual(glyph.fill_color, {'field': 'fill_color'})
self.assertNotEqual(glyph.line_color, {'field': 'fill_color'})
def test_bars_alpha_op(self):
bars = Bars([(0, 0, 0), (0, 1, 0.2), (0, 2, 0.7)],
vdims=['y', 'alpha']).options(alpha='alpha')
plot = bokeh_renderer.get_plot(bars)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['alpha'], np.array([0, 0.2, 0.7]))
self.assertEqual(glyph.fill_alpha, {'field': 'alpha'})
def test_bars_line_alpha_op(self):
bars = Bars([(0, 0, 0), (0, 1, 0.2), (0, 2, 0.7)],
vdims=['y', 'alpha']).options(line_alpha='alpha')
plot = bokeh_renderer.get_plot(bars)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['line_alpha'], np.array([0, 0.2, 0.7]))
self.assertEqual(glyph.line_alpha, {'field': 'line_alpha'})
self.assertNotEqual(glyph.fill_alpha, {'field': 'line_alpha'})
def test_bars_fill_alpha_op(self):
bars = Bars([(0, 0, 0), (0, 1, 0.2), (0, 2, 0.7)],
vdims=['y', 'alpha']).options(fill_alpha='alpha')
plot = bokeh_renderer.get_plot(bars)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['fill_alpha'], np.array([0, 0.2, 0.7]))
self.assertNotEqual(glyph.line_alpha, {'field': 'fill_alpha'})
self.assertEqual(glyph.fill_alpha, {'field': 'fill_alpha'})
def test_bars_line_width_op(self):
bars = Bars([(0, 0, 1), (0, 1, 4), (0, 2, 8)],
vdims=['y', 'line_width']).options(line_width='line_width')
plot = bokeh_renderer.get_plot(bars)
cds = plot.handles['cds']
glyph = plot.handles['glyph']
self.assertEqual(cds.data['line_width'], np.array([1, 4, 8]))
self.assertEqual(glyph.line_width, {'field': 'line_width'})
def test_op_ndoverlay_value(self):
colors = ['blue', 'red']
overlay = NdOverlay({color: Bars(np.arange(i+2)) for i, color in enumerate(colors)}, 'Color').options('Bars', fill_color='Color')
plot = bokeh_renderer.get_plot(overlay)
for subplot, color in zip(plot.subplots.values(), colors):
self.assertEqual(subplot.handles['glyph'].fill_color, color)
def test_bars_color_index_color_clash(self):
bars = Bars([(0, 0, 0), (0, 1, 1), (0, 2, 2)],
vdims=['y', 'color']).options(color='color', color_index='color')
with ParamLogStream() as log:
bokeh_renderer.get_plot(bars)
log_msg = log.stream.read()
warning = ("Cannot declare style mapping for 'color' option "
"and declare a color_index; ignoring the color_index.\n")
self.assertEqual(log_msg, warning)
|
the-stack_0_16628 | # Copyright (C) 2017 Adam Schubert <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import tests.TestCase as TestCase
from cron_descriptor import Options, ExpressionDescriptor
class TestLocale(TestCase.TestCase):
def test_locale_de(self):
options = Options()
options.locale_code = 'de_DE'
options.use_24hour_time_format = True
self.assertEqual(
"Jede Minute",
ExpressionDescriptor("* * * * *", options).get_description())
|
the-stack_0_16629 | _base_ = [
'./ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py'
]
# model settings
model = dict(
backbone=dict(
norm_eval=True, bn_frozen=True, bottleneck_mode='ip', pretrained=None))
dataset_type = 'RawframeDataset'
data_root = 'data/kinetics400/rawframes_train'
data_root_val = 'data/kinetics400/rawframes_val'
ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt'
ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=1,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(
type='SampleFrames',
clip_len=32,
frame_interval=2,
num_clips=10,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='ThreeCrop', crop_size=256),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=test_pipeline))
optimizer = dict(
type='SGD', lr=0.08, momentum=0.9,
weight_decay=0.0001) # this lr is used for 8 gpus
optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2))
# learning policy
lr_config = dict(
policy='CosineAnnealing',
min_lr=0,
warmup='linear',
warmup_by_epoch=True,
warmup_iters=40)
total_epochs = 180
work_dir = './work_dirs/ipcsn_bnfrozen_r152_32x2x1_180e_kinetics400_rgb' # noqa: E501
|
the-stack_0_16630 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from flask.ext.script import Manager, Server
from flask.ext.collect import Collect
from quokka import create_app
from quokka.core.db import db
from quokka.ext.blueprints import load_blueprint_commands
app = create_app()
if app.config.get("LOGGER_ENABLED"):
logging.basicConfig(
level=getattr(logging, app.config.get("LOGGER_LEVEL", "DEBUG")),
format=app.config.get(
"LOGGER_FORMAT",
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s'),
datefmt=app.config.get("LOGGER_DATE_FORMAT", '%d.%m %H:%M:%S')
)
manager = Manager(app)
collect = Collect()
collect.init_script(manager)
@manager.shell
def make_shell_context():
" Update shell. "
return dict(app=app, db=db)
@manager.command
def check():
"""Prints app status"""
from pprint import pprint
print("Extensions.")
pprint(app.extensions)
print("Modules.")
pprint(app.blueprints)
print("App.")
return app
@manager.command
def populate():
"""Populate the database with sample data"""
from quokka.utils.populate import Populate
Populate(db)()
@manager.command
def show_config():
"print all config variables"
from pprint import pprint
print("Config.")
pprint(dict(app.config))
manager.add_command("run0", Server(
use_debugger=True,
use_reloader=True,
host='0.0.0.0',
port=8000
))
load_blueprint_commands(manager)
if __name__ == '__main__':
manager.run()
|
the-stack_0_16633 | from os import system, name
import json
class Eb2Utils:
# Simple function to clear the console...
def clear():
# for windows
if name == 'nt':
_ = system('cls')
_ = system('TITLE Expertise Bot :: Rewrite v0.0.2')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
|
the-stack_0_16634 | import tkinter as tk
import tkinter.ttk as ttk
import numpy as np
import math
import operator
import sys
import DataControls.ControlElements as DCCE
import os
import subprocess
from datetime import datetime
import matplotlib as mpl
mpl.use('TkAgg') #mpl backend
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib import backend_bases
# mpl.rcParams['toolbar'] = 'None'
from matplotlib.figure import Figure
import matplotlib.animation as anim
#Custom MPL Navigation Toolbar BEGIN
class CustomNavigationToolbar(NavigationToolbar2Tk):
def __init__(self, pFigureCanvasTKAgg, parent, root, *args, **kwargs):
self.m_root = root
self.toolitems = (
('Home', 'Reset original view', 'home', 'home_extended'),
('Back', 'Back to previous view', 'back', 'back'),
('Forward', 'Forward to next view', 'forward', 'forward'),
(None, None, None, None),
('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),
('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),
(None, None, None, None),
('Save', 'Save the figure', 'filesave', 'save_figure'),
('Subplots', 'Configure subplots', 'subplots', 'advanced_settings')
)
super().__init__(pFigureCanvasTKAgg, parent, *args, **kwargs)
self.m_figureRef = pFigureCanvasTKAgg.figure
self.m_containerRef = parent
def inputCheckFloatEntry(self, entry):
if(entry.get() == ''):
return False
try:
float(entry.get())
except ValueError:
tk.messagebox.showerror("Advanced Settings Error", "Please make sure all inputs are parseable as floats (i.e. decimal numbers).")
return False
return True
def set_advanced_settings(self):
self.inputCheckFloatEntry(self.m_yMaxBoundEntry)
pYMax = float(self.m_yMaxBoundEntry.get())
self.inputCheckFloatEntry(self.m_yMinBoundEntry)
pYMin = float(self.m_yMinBoundEntry.get())
self.m_containerRef.getPrimaryAxes().set_ybound(pYMin, pYMax)
self.inputCheckFloatEntry(self.m_xMaxBoundEntry)
pXMax = float(self.m_xMaxBoundEntry.get())
self.inputCheckFloatEntry(self.m_xMinBoundEntry)
pXMin = float(self.m_xMinBoundEntry.get())
self.m_containerRef.getPrimaryAxes().set_xbound(pXMin, pXMax)
self.m_containerRef.m_subplot.grid(linestyle=':')
if(self.m_containerRef.m_secondaryYAxisRequired):
self.inputCheckFloatEntry(self.m_secondYMaxBoundEntry)
sYMax = float(self.m_secondYMaxBoundEntry.get())
self.inputCheckFloatEntry(self.m_secondYMinBoundEntry)
sYMin = float(self.m_secondYMinBoundEntry.get())
self.m_containerRef.getSecondaryAxes().set_ybound(sYMin, sYMax)
#TODO: get DPI settings working with tkinter canvas -> currently reverting after a few seconds
# self.inputCheckFloatEntry(self.m_DPIEntry)
# newDPI = float(self.m_DPIEntry.get())
# self.m_figureRef.set_dpi(newDPI)
# self.m_containerRef.m_subplot.relim()
self.m_containerRef.canvas.draw_idle()
def advanced_settings(self):
self.m_window = tk.Toplevel(self.m_root)
self.m_window.title("Advanced Figure Options")
if not sys.platform.startswith('win'):
self.m_window.configure(bg = "#ececec") #ececec only for mac
self.m_yMaxBoundLabel = ttk.Label(self.m_window, text="Primary Max Y Bound")
self.m_yMaxBoundLabel.grid(row = 0, column = 0, sticky = "nsw")
self.m_yMaxBoundEntry = DCCE.EnhancedEntry(self.m_window)
self.m_yMaxBoundEntry.grid(row = 0, column = 1, sticky= "nsew")
self.m_yMaxBoundEntry.set(self.m_containerRef.getPrimaryAxes().get_ybound()[1])
self.m_yMinBoundLabel = ttk.Label(self.m_window, text="Primary Min Y Bound")
self.m_yMinBoundLabel.grid(row = 1, column = 0, sticky = "nsw")
self.m_yMinBoundEntry = DCCE.EnhancedEntry(self.m_window)
self.m_yMinBoundEntry.grid(row = 1, column = 1, sticky= "nsew")
self.m_yMinBoundEntry.set(self.m_containerRef.getPrimaryAxes().get_ybound()[0])
self.m_xMaxBoundLabel = ttk.Label(self.m_window, text="Primary Max X Bound")
self.m_xMaxBoundLabel.grid(row = 2, column = 0, sticky = "nsw")
self.m_xMaxBoundEntry = DCCE.EnhancedEntry(self.m_window)
self.m_xMaxBoundEntry.grid(row = 2, column = 1, sticky= "nsew")
self.m_xMaxBoundEntry.set(self.m_containerRef.getPrimaryAxes().get_xbound()[1])
self.m_xMinBoundLabel = ttk.Label(self.m_window, text="Primary Min X Bound")
self.m_xMinBoundLabel.grid(row = 3, column = 0, sticky = "nsw")
self.m_xMinBoundEntry = DCCE.EnhancedEntry(self.m_window)
self.m_xMinBoundEntry.grid(row = 3, column = 1, sticky= "nsew")
self.m_xMinBoundEntry.set(self.m_containerRef.getPrimaryAxes().get_xbound()[0])
self.m_DPILabel = ttk.Label(self.m_window, text="Figure DPI")
self.m_DPILabel.grid(row = 4, column = 0, sticky = "nsw")
self.m_DPIEntry = DCCE.EnhancedEntry(self.m_window)
self.m_DPIEntry.grid(row = 4, column = 1, sticky= "nsew")
self.m_DPIEntry.set(self.m_figureRef.get_dpi())
if(self.m_containerRef.m_secondaryYAxisRequired):
self.m_secondYMaxBoundLabel = ttk.Label(self.m_window, text="Secondary Max Y Bound")
self.m_secondYMaxBoundLabel.grid(row = 5, column = 0, sticky = "nsw")
self.m_secondYMaxBoundEntry = DCCE.EnhancedEntry(self.m_window)
self.m_secondYMaxBoundEntry.grid(row = 5, column = 1, sticky= "nsew")
self.m_secondYMaxBoundEntry.set(self.m_containerRef.getSecondaryAxes().get_ybound()[1])
self.m_secondYMinBoundLabel = ttk.Label(self.m_window, text="Secondary Min Y Bound")
self.m_secondYMinBoundLabel.grid(row = 6, column = 0, sticky = "nsw")
self.m_secondYMinBoundEntry = DCCE.EnhancedEntry(self.m_window)
self.m_secondYMinBoundEntry.grid(row = 6, column = 1, sticky= "nsew")
self.m_secondYMinBoundEntry.set(self.m_containerRef.getSecondaryAxes().get_ybound()[0])
self.m_buttonRowIndex = 7
else:
self.m_buttonRowIndex = 5
self.m_setButton = ttk.Button(self.m_window, text = "Set Values", command = self.set_advanced_settings)
self.m_setButton.grid(row = self.m_buttonRowIndex, columnspan = 2, sticky = "ns")
# raise NotImplementedError
#MACOS ONLY!!! TRY USING THIS TO FIX SAVEAS INTERFACE
def user_action(apath, cmd):
ascript = '''
-- apath - default path for dialogs to open too
-- cmd - "Select", "Save"
on run argv
set userCanceled to false
if (count of argv) = 0 then
tell application "System Events" to display dialog "argv is 0" ¬
giving up after 10
else
set apath to POSIX file (item 1 of argv) as alias
set action to (item 2 of argv) as text
end if
try
if action contains "Select" then
set fpath to POSIX path of (choose file default location apath ¬
without invisibles, multiple selections allowed and ¬
showing package contents)
# return fpath as text
else if action contains "Save" then
set fpath to POSIX path of (choose file name default location apath)
end if
return fpath as text
on error number -128
set userCanceled to true
end try
if userCanceled then
return "Cancel"
else
return fpath
end if
end run
'''
try:
proc = subprocess.check_output(['osascript', '-e', ascript,
apath, cmd])
if 'Cancel' in proc.decode('utf-8'): # User pressed Cancel button
sys.exit('User Canceled')
return proc.decode('utf-8')
except subprocess.CalledProcessError as e:
print('Python error: [%d]\n%s\n' % e.returncode, e.output)
def save_figure(self,*args): #copied backend save_fig because I needed to add custom solution for .txt file extension
# previousSize = self.m_figureRef.get_size_inches()
# previousDPI = self.m_figureRef.get_dpi()
# self.m_figureRef.set_dpi(300) #print quality temporarily
# self.m_figureRef.set_size_inches(w=13.0/2.54, h=8.0/2.54)#13cm by 8cm
# super().save_figure()
# self.m_figureRef.set_size_inches(previousSize)
# self.m_figureRef.set_dpi(previousDPI) #print quality temporarily
filetypes = self.canvas.get_supported_filetypes().copy()
# default_filetype = self.canvas.get_default_filetype()
# Tk doesn't provide a way to choose a default filetype,
# so we just have to put it first
#("All Files", "*.*"),
# default_filetype_name = filetypes.pop(default_filetype)
sorted_filetypes = sorted(filetypes.items())
tk_filetypes = [(name, '*.%s' % ext) for ext, name in sorted_filetypes]
# adding a default extension seems to break the
# asksaveasfilename dialog when you choose various save types
# from the dropdown. Passing in the empty string seems to
# work - JDH!
#defaultextension = self.canvas.get_default_filetype()
# defaultextension = 'txt'
initialdir = os.path.expanduser(mpl.rcParams['savefig.directory'])
# initialfile = "RawData.txt"#self.canvas.get_default_filename()
fname = tk.filedialog.asksaveasfilename(
master=self.canvas.get_tk_widget().master,
title='Save the figure',
filetypes=[("All Files", "*.*"),("Raw Plot Data", "*.txt")] + tk_filetypes
# filetypes=[('Raw Plot Data','*.txt'),('Image Data','*.jpeg')],
# defaultextension=defaultextension,
# initialdir=initialdir,
# initialfile=initialfile,
)
if fname in ["", ()]:
return
# Save dir for next time, unless empty str (i.e., use cwd).
if initialdir != "":
mpl.rcParams['savefig.directory'] = (
os.path.dirname(str(fname)))
try:
if(".txt" in fname):
rawData = self.m_containerRef.m_subplot.get_lines() #or use legend handles #_xy contains data and _label contains legend name
lineCount = len(rawData)
#init vars
labels = [self.m_containerRef.m_subplot.get_xlabel().replace(' ', '_')]
labels += [rawData[0].get_label().replace(' ', '_')]
output = np.vstack((rawData[0].get_xdata(),rawData[0].get_ydata()))
#append to them
for i in range(1,lineCount):
labels += [self.m_containerRef.m_subplot.get_xlabel().replace(' ', '_')]
labels += [rawData[i].get_label().replace(' ', '_')]
output = np.vstack((output, rawData[i].get_xdata()))
output = np.vstack((output, rawData[i].get_ydata()))
sep = ' '
headerString = sep.join(labels)
with open(fname, mode='a') as fileHandle:
np.savetxt(fileHandle, output.transpose(), delimiter=' ', header = headerString, comments='')
else:# This method will handle the delegation to the correct type
self.canvas.figure.savefig(fname)
except Exception as e:
tk.messagebox.showerror("Error saving file", str(e))
def home_extended(self):
super().home()
#Custom MPL Navigation Toolbar END
#MPLContainer BEGIN
# resizeFuncCounter = 0 #debug
# lastFuncCounterOutputTime = datetime.now() #debug
class MPLContainer(tk.Frame):
def __init__(self, parent, title, yAxisName, xAxisName, root, secondaryYAxis = False, secondaryYAxisName = "", invertXAxis = False, legendLoc = 0, *args, **kwargs):
super().__init__(parent, bg="white", *args, **kwargs)
self.m_title = title
self.m_xAxisName = xAxisName
self.m_yAxisName = yAxisName
self.m_usingMarkers = False
self.m_legendLoc = legendLoc
# self.m_primaryMaxX = 0
# self.m_primaryMaxY = 0
# self.m_secondaryMaxX = 0
# self.m_secondaryMaxY = 0
self.m_verticalLines = list()
root.registerResizeCallback(self.resizePlot)
self.m_secondaryYAxisRequired = secondaryYAxis
if(secondaryYAxis and secondaryYAxisName == ""):
raise ValueError #need a secondaryYAxisName!
self.m_secondaryYAxisName = secondaryYAxisName
self.m_invertXAxis = invertXAxis
self.initUI(parent, root)
def hidePlot(self):
self.canvas.get_tk_widget().place_forget()
self.m_figure.set_dpi(4)#setting figure to lowest dpi possible while hidden, because matplotlib tkagg backend keeps updating figure on resize, even while hidden :(
self.plotHidden = True
def showPlot(self):
self.m_figure.set_dpi(96)
self.canvas.get_tk_widget().place(anchor="nw",bordermode=tk.OUTSIDE,height=self.winfo_height(),width=self.winfo_width())
self.plotHidden = False
def resizePlot(self, timeDelta, *args, **kwargs):
if(not self.plotHidden and timeDelta.total_seconds()*1000 < 700): #hide the plot if we just started resizing
self.hidePlot()
elif(self.plotHidden and timeDelta.total_seconds()*1000 > 700): #if we stopped resizing, unhide plot
self.showPlot()
#else do nothing
def explicitRefresh(self):
self.canvas.draw()
if(not self.plotHidden):
self.hidePlot()
self.showPlot()
def initUI(self, parent, root):
self.pack(side=tk.TOP, fill = tk.BOTH, expand=True)
self.m_figure = Figure( dpi=96)
self.m_subplot = self.m_figure.add_subplot(111) #add_subplot returns axes
# a = f.add_subplot(111)#111 means only one chart as opposed to 121 meanign 2
self.m_subplot.set_title(self.m_title)
self.m_subplot.set_xlabel(self.m_xAxisName)
self.m_subplot.set_ylabel(self.m_yAxisName)
self.m_subplot.tick_params(direction="in")
self.m_subplot.grid(linestyle=':')
self.m_subplot.margins(x = 0.0)
#consider removing yellow, tan, salmon, coral
self.m_subplot.set_prop_cycle(color=['blue', 'green', 'red', 'cyan', 'magenta', 'black', 'purple', 'pink', 'brown', 'orange', 'teal', 'lightblue', 'lime', 'turquoise', 'darkgreen', 'gold'])
if(self.m_invertXAxis):
self.m_subplot.invert_xaxis()
if(self.m_secondaryYAxisRequired):
self.m_secondaryYAxis = self.m_subplot.twinx()
self.m_secondaryYAxis.set_ylabel(self.m_secondaryYAxisName)
self.m_secondaryYAxis.tick_params(direction="in")
self.m_secondaryYAxis.margins(x= 0.0)
#normally plt.show() now, but different for tk
self.canvas = FigureCanvasTkAgg(self.m_figure,self)
self.canvas.draw()
# self.canvas.draw_idle()
# canvas.get_tk_widget().grid(row=0,column=0,sticky="nsew")
# self.grid_rowconfigure(index=0,weight=1,minsize=self.winfo_height())
# self.grid_columnconfigure(index=0,weight=1,minsize=self.winfo_width())
# self.canvas.get_tk_widget().grid(sticky = "nsew", row = 0, column = 0)
# self.pack_propagate(0)#should stop grid resizing
self.resizeDateTime = datetime.now()
self.plotHidden = False
self.m_toolbar = CustomNavigationToolbar(self.canvas, self, root)
self.m_toolbar.update()
# self.canvas.get_tk_widget().pack(side=tk.TOP,fill=tk.BOTH,expand=True)
# self.canvas.get_tk_widget().place(anchor="nw",bordermode=tk.OUTSIDE,height=self.winfo_height(),width=self.winfo_width())
self.canvas.get_tk_widget().place(anchor="nw",bordermode=tk.INSIDE,relheight = 1.0, relwidth = 1.0)
def clearPlots(self):
if(len(self.m_subplot.lines) > 0):
for i in range(len(self.m_subplot.lines)-1,-1,-1):
line = self.m_subplot.lines.pop(i)
del line
if(len(self.m_subplot.patches) > 0):
for i in range(len(self.m_subplot.patches)-1,-1,-1):
line = self.m_subplot.patches.pop(i)
del line
if(self.m_secondaryYAxisRequired):
if(len(self.m_secondaryYAxis.lines) > 0):
for i in range(len(self.m_secondaryYAxis.lines)-1,-1,-1):
line = self.m_secondaryYAxis.lines.pop(i)
del line
self.canvas.draw_idle()
def _switchToMarkers(self, axes):
for child in axes.get_children():
if(type(child) is mpl.lines.Line2D):
child.set_linestyle('None')
child.set_marker('+')
def switchToMarkers(self):
self._switchToMarkers(self.m_subplot)
if(self.m_secondaryYAxisRequired):
self._switchToMarkers(self.m_secondaryYAxis)
self.canvas.draw_idle()
def _switchToLines(self, axes):
for child in axes.get_children():
if(type(child) is mpl.lines.Line2D):
child.set_marker('None')
child.set_linestyle('solid')
def switchToLines(self):
self._switchToLines(self.m_subplot)
if(self.m_secondaryYAxisRequired):
self._switchToLines(self.m_secondaryYAxis)
self.canvas.draw_idle()
def toggleMarkers(self):
if(self.m_usingMarkers):
self.switchToLines()
self.m_usingMarkers = False
else:
self.switchToMarkers()
self.m_usingMarkers = True
self.canvas.draw_idle()
def _addLinePlots(self, axes, ndarrayData, labels, logXAxis, logYAxis, color, shouldRelim, pLineWidth = 1):
maxX = None
maxY = None
minX = None
if ndarrayData.ndim >= 2:
for i in range(1,ndarrayData.shape[0]):
if (type(labels) is str):
axes.plot(ndarrayData[0,:],ndarrayData[i,:], label = labels, linewidth=pLineWidth, color = color)
elif(labels != None):
axes.plot(ndarrayData[0,:],ndarrayData[i,:], label = labels[i-1], linewidth=pLineWidth, color = color)
else:
axes.plot(ndarrayData[0,:],ndarrayData[i,:], linewidth=pLineWidth, color = color)
if(shouldRelim):
for l in axes.lines:
l_maxX = np.amax(l.get_xdata())
l_minX = np.amin(l.get_xdata())
l_maxY = 1.1*np.amax(l.get_ydata())
#max x
if maxX != None:
maxX = np.amax((maxX,l_maxX))
else:
maxX = l_maxX
#max y
if maxY != None:
maxY = np.amax((maxY,l_maxY))
else:
maxY = l_maxY
#min X
if minX != None:
minX = np.amin((minX,l_minX))
else:
minX = l_minX
axes.set_xbound(minX, maxX)#, top = None)
axes.set_ybound(0, maxY)#, top = None)
self.m_subplot.grid(linestyle=':')
if(self.m_usingMarkers):
self.switchToMarkers() #because we plot with lines by default when adding or subtracting lines
if (labels != None):
handles, labels = self.m_subplot.get_legend_handles_labels()
if(self.m_secondaryYAxisRequired):
handles2, labels2 = self.m_secondaryYAxis.get_legend_handles_labels()
handles = handles + handles2
labels = labels + labels2
# reverse the order
#self.m_subplot.legend(handles[::-1], labels[::-1])
# or sort them by labels
hl = sorted(zip(handles, labels),
key=operator.itemgetter(1))
handles, labels = zip(*hl)
legend = self.m_subplot.legend(handles, labels, loc=self.m_legendLoc)
if (logXAxis):
axes.set_xscale("log")
axes.set_xbound(minX, maxX)#math.log(maxX))#, top = None)
if (logYAxis):
axes.set_yscale("log")
axes.set_ybound(1, maxY)#math.log(maxY))#, top = None)
if(shouldRelim):
axes.relim()
def addPrimaryLinePlots(self, ndarrayData, labels = None, logXAxis = False, logYAxis = False, color = None, shouldRelim = True):
self._addLinePlots(self.m_subplot, ndarrayData, labels, logXAxis, logYAxis, color, shouldRelim)
self.canvas.draw_idle()
def addSecondaryLinePlots(self, ndarrayData, labels = None, logXAxis = False, logYAxis = False, color = None, shouldRelim = True):
if(not self.m_secondaryYAxisRequired):
raise NameError #should use primary line plots, since secondary axis is not defined for this plot
self._addLinePlots(self.m_secondaryYAxis, ndarrayData, labels, logXAxis, logYAxis, color, shouldRelim)
self.canvas.draw_idle()
def addVerticalLine(self, xValue):
self.m_verticalLines.append(self.m_subplot.axvline(xValue, linestyle="-.", color="r"))
self.canvas.draw_idle()
def removeVerticalLines(self):
if(len(self.m_subplot.lines) > 0):
for i in range(len(self.m_subplot.lines)-1,-1,-1):
if(self.m_subplot.lines[i].get_linestyle() == '-.'):
line = self.m_subplot.lines.pop(i)
del line
# if(len(self.m_verticalLines) == 0):
# return
# for l in self.m_verticalLines:
# l.remove() #this function removes the actor from the matplotlib plot, not the list
# self.m_verticalLines.clear()
# def __autoScaleTopY(self):
# self.m_subplot.set_ylim(auto = True)
# if(self.m_subplot.get_ylim()[0] < 0.0):
# self.m_subplot.set_ylim(bottom=0)#, top = None)
# self.m_subplot.relim()
def addSecondaryScaledXAxis(self, forwardFunc, reverseFunc):
self.m_secondaryScaledXAxis = self.m_subplot.secondary_xaxis("top", functions=(forwardFunc, reverseFunc))
self.canvas.draw_idle()
def addSecondaryScaledYAxis(self, forwardFunc, reverseFunc):
self.m_secondaryScaledXAxis = self.m_subplot.secondary_yaxis("right", functions=(forwardFunc, reverseFunc))
self.canvas.draw_idle()
def getPrimaryAxes(self):
return self.m_subplot
def getSecondaryAxes(self):
return self.m_secondaryYAxis
# def setLegendCenterRight(self):
# self.m_subplot.get_legend().s
def shadeBelowCurve(self, x, y, color = "b"):
self.m_subplot.fill(x,y,color, hatch = '/', fill = False)
#MPLContainer END
#PlotsFrame BEGIN
class PlotsFrame(tk.Frame):
# notebooks = {}
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.initUI(parent)
self.m_notebooks = {}
def initUI(self, parent):
# self.pack(side = tk.RIGHT, fill = tk.BOTH, expand = True)
self.grid_rowconfigure(0,weight=1)
self.grid_columnconfigure(0,weight=1)
def requestNotebook(self, key):
self.m_notebooks[key] = ttk.Notebook(self)
self.m_notebooks[key].grid(row=0,column=0,sticky="nsew") #initalize
self.m_notebooks[key].grid_forget() #but hide right away
return self.m_notebooks[key]
def raiseNotebook(self, key):
self.m_notebooks[key].grid(row=0,column=0,sticky="nsew")
def hideNotebook(self, key):
self.m_notebooks[key].grid_forget()
#PlotsFrame END
|
the-stack_0_16636 | import flask
import requests
import argparse
import json
import websockets
import uuid
import asyncio
import logging
import sys
import re
import threading
from flask import Flask, request
from parlai.chat_service.services.api.config import HOST_URL, PARLAI_URL, PARLAI_PORT, HOST_PORT, DEBUG, LOG_FORMAT
# Server configuration
parser = argparse.ArgumentParser(description="API for ParlAI chatbot")
parser.add_argument('--hostname', default=PARLAI_URL, help="ParlAI web server hostname.")
parser.add_argument('--port', type=int, default=PARLAI_PORT, help="ParlAI web server port.")
parser.add_argument('--serving_hostname', default=HOST_URL, help="API web server hostname.")
parser.add_argument('--serving_port', type=int, default=HOST_PORT, help="API web server port.")
args = parser.parse_args()
hostname = args.hostname
port = args.port
serving_hostname = args.serving_hostname
serving_port = args.serving_port
app = Flask(__name__)
blueprint = flask.Blueprint('parlai_api', __name__, template_folder='templates')
# Log configuration
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=LOG_FORMAT)
connections = {}
websocket_uri = f"ws://{hostname}:{port}/websocket"
running = False
requests = []
responses = {}
def get_random_id():
return str(uuid.uuid4())
def format_message(message):
# Remove all spaces in general for the following chars
p = re.compile(r"\s(?P<special_char>[$&+,:;=?@#|'<>.-^*()%!])\s?")
text_response = p.sub(r"\g<special_char>", message)
print(text_response)
# Remove only one space from the left for each of the following.
p = re.compile(r"(?P<special_char>[.,:?!])")
return p.sub(r"\g<special_char> ", text_response)
class ParlaiAPI:
@staticmethod
def parse():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
while True:
if not requests:
continue
request = requests.pop(0)
result = loop.run_until_complete(request[1]())
responses[request[0]] = result
@staticmethod
async def send_message(user_message, message_history=[], persona=False):
if persona:
message = "your persona: "
else:
message = ""
message += user_message
request_dict = {"text": message, "message_history": message_history}
request_string = json.dumps(request_dict)
request_bytes = bytes(request_string, encoding="UTF-8")
print(request_bytes)
try:
async with websockets.connect(websocket_uri) as ws:
await ws.send(request_bytes)
response = await ws.recv()
response = json.loads(response)
print(response)
try:
response['text'] = format_message(response.get('text'))
except Exception as e:
print(e)
return response
except Exception as e:
return {'text': str(e), 'error': True}
@blueprint.route('/api/send_message', methods=["POST"])
def send_message():
request_id = get_random_id()
data = request.get_json()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
message_text, message_history = data.get('text', None), data.get('message_history', [])
requests.append([request_id,
lambda: ParlaiAPI.send_message(message_text, message_history)])
print(str(requests))
logging.warning(str(requests))
while request_id not in responses:
pass
result = responses[request_id]
del responses[request_id]
return result, 200
async def main():
thread = threading.Thread(target=ParlaiAPI.parse)
thread.start()
app.register_blueprint(blueprint)
app.debug = True
app.run(host=serving_hostname, threaded=True, port=serving_port, debug=DEBUG)
main_loop = asyncio.get_event_loop()
main_loop.run_until_complete(main())
|
the-stack_0_16638 | # Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.benchmark import datagen, algorithms
from cuml.benchmark.bench_helper_funcs import _training_data_to_numpy
from cuml.benchmark.runners import AccuracyComparisonRunner, \
SpeedupComparisonRunner, run_variations
from cuml.common.import_utils import has_umap
from cuml.common.import_utils import has_xgboost
import numpy as np
import cudf
import pytest
from numba import cuda
from sklearn import metrics
import pandas as pd
import time
@pytest.mark.parametrize('dataset', ['blobs', 'regression', 'classification'])
def test_data_generators(dataset):
data = datagen.gen_data(dataset, "numpy", n_samples=100, n_features=10)
assert isinstance(data[0], np.ndarray)
assert data[0].shape[0] == 100
@pytest.mark.parametrize('input_type',
['numpy', 'cudf', 'pandas', 'gpuarray', 'gpuarray-c'])
def test_data_generator_types(input_type):
X, *_ = datagen.gen_data('blobs', input_type, n_samples=100, n_features=10)
if input_type == 'numpy':
assert isinstance(X, np.ndarray)
elif input_type == 'cudf':
assert isinstance(X, cudf.DataFrame)
elif input_type == 'pandas':
assert isinstance(X, pd.DataFrame)
elif input_type == 'gpuarray':
assert cuda.is_cuda_array(X)
elif input_type == 'gpuarray-c':
assert cuda.is_cuda_array(X)
else:
assert False
def test_data_generator_split():
X_train, y_train, X_test, y_test = datagen.gen_data(
'blobs', 'numpy', n_samples=100, n_features=10, test_fraction=0.20
)
assert X_train.shape == (100, 10)
assert X_test.shape == (25, 10)
def test_run_variations():
algo = algorithms.algorithm_by_name("LogisticRegression")
res = run_variations(
[algo],
dataset_name="classification",
bench_rows=[100, 200],
bench_dims=[10, 20],
)
assert res.shape[0] == 4
assert (res.n_samples == 100).sum() == 2
assert (res.n_features == 20).sum() == 2
def test_speedup_runner():
class MockAlgo:
def __init__(self, t):
self.t = t
def fit(self, X, y):
time.sleep(self.t)
return
def predict(self, X):
nr = X.shape[0]
res = np.zeros(nr)
res[0:int(nr / 5.0)] = 1.0
return res
class FastMockAlgo(MockAlgo):
def __init__(self):
MockAlgo.__init__(self, 0.1)
class SlowMockAlgo(MockAlgo):
def __init__(self):
MockAlgo.__init__(self, 2)
pair = algorithms.AlgorithmPair(
SlowMockAlgo,
FastMockAlgo,
shared_args={},
name="Mock",
accuracy_function=metrics.accuracy_score,
)
runner = SpeedupComparisonRunner(
[20], [5], dataset_name='zeros'
)
results = runner.run(pair)[0]
expected_speedup = SlowMockAlgo().t / FastMockAlgo().t
assert results["speedup"] == pytest.approx(expected_speedup, 0.4)
def test_multi_reps():
class CountingAlgo:
tot_reps = 0
def fit(self, X, y):
CountingAlgo.tot_reps += 1
pair = algorithms.AlgorithmPair(
CountingAlgo,
CountingAlgo,
shared_args={},
name="Counting",
)
runner = AccuracyComparisonRunner(
[20], [5], dataset_name='zeros', test_fraction=0.20, n_reps=4
)
runner.run(pair)
# Double the n_reps since it is used in cpu and cuml versions
assert CountingAlgo.tot_reps == 8
def test_accuracy_runner():
# Set up data that should deliver accuracy of 0.20 if all goes right
class MockAlgo:
def fit(self, X, y):
return
def predict(self, X):
nr = X.shape[0]
res = np.zeros(nr)
res[0:int(nr / 5.0)] = 1.0
return res
pair = algorithms.AlgorithmPair(
MockAlgo,
MockAlgo,
shared_args={},
name="Mock",
accuracy_function=metrics.accuracy_score,
)
runner = AccuracyComparisonRunner(
[20], [5], dataset_name='zeros', test_fraction=0.20
)
results = runner.run(pair)[0]
assert results["cuml_acc"] == pytest.approx(0.80)
# Only test a few algorithms (which collectively span several types)
# to reduce runtime burden
@pytest.mark.parametrize('algo_name', ['UMAP-Supervised',
'DBSCAN',
'LogisticRegression',
'ElasticNet',
'FIL'])
def test_real_algos_runner(algo_name):
pair = algorithms.algorithm_by_name(algo_name)
if (algo_name == 'UMAP-Supervised' and not has_umap()) or \
(algo_name == 'FIL' and not has_xgboost()):
pytest.xfail()
runner = AccuracyComparisonRunner(
[20], [5], dataset_name='classification', test_fraction=0.20
)
results = runner.run(pair)[0]
print(results)
assert results["cuml_acc"] is not None
# Test FIL with several input types
@pytest.mark.parametrize('input_type', ['numpy', 'cudf', 'gpuarray',
'gpuarray-c'])
def test_fil_input_types(input_type):
pair = algorithms.algorithm_by_name('FIL')
if not has_xgboost():
pytest.xfail()
runner = AccuracyComparisonRunner(
[20], [5], dataset_name='classification', test_fraction=0.5,
input_type=input_type)
results = runner.run(pair, run_cpu=False)[0]
assert results["cuml_acc"] is not None
@pytest.mark.parametrize('input_type', ['numpy', 'cudf', 'pandas', 'gpuarray'])
def test_training_data_to_numpy(input_type):
X, y, *_ = datagen.gen_data(
'blobs', input_type, n_samples=100, n_features=10
)
X_np, y_np = _training_data_to_numpy(X, y)
assert isinstance(X_np, np.ndarray)
assert isinstance(y_np, np.ndarray)
|
the-stack_0_16640 | # Copyright (c) Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import sys
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Union, cast
from pyee import EventEmitter
from playwright._impl._api_structures import FilePayload, Position
from playwright._impl._api_types import Error
from playwright._impl._connection import (
ChannelOwner,
from_channel,
from_nullable_channel,
)
from playwright._impl._element_handle import ElementHandle, convert_select_option_values
from playwright._impl._event_context_manager import EventContextManagerImpl
from playwright._impl._file_chooser import normalize_file_payloads
from playwright._impl._helper import (
DocumentLoadState,
FrameNavigatedEvent,
KeyboardModifier,
MouseButton,
URLMatch,
URLMatcher,
async_readfile,
locals_to_params,
monotonic_time,
)
from playwright._impl._js_handle import (
JSHandle,
Serializable,
parse_result,
serialize_argument,
)
from playwright._impl._locator import FrameLocator, Locator
from playwright._impl._network import Response
from playwright._impl._wait_helper import WaitHelper
if sys.version_info >= (3, 8): # pragma: no cover
from typing import Literal
else: # pragma: no cover
from typing_extensions import Literal
if TYPE_CHECKING: # pragma: no cover
from playwright._impl._page import Page
class Frame(ChannelOwner):
def __init__(
self, parent: ChannelOwner, type: str, guid: str, initializer: Dict
) -> None:
super().__init__(parent, type, guid, initializer)
self._parent_frame = from_nullable_channel(initializer.get("parentFrame"))
if self._parent_frame:
self._parent_frame._child_frames.append(self)
self._name = initializer["name"]
self._url = initializer["url"]
self._detached = False
self._child_frames: List[Frame] = []
self._page: "Page"
self._load_states: Set[str] = set(initializer["loadStates"])
self._event_emitter = EventEmitter()
self._channel.on(
"loadstate",
lambda params: self._on_load_state(params.get("add"), params.get("remove")),
)
self._channel.on(
"navigated",
lambda params: self._on_frame_navigated(params),
)
def __repr__(self) -> str:
return f"<Frame name={self.name} url={self.url!r}>"
def _on_load_state(
self, add: DocumentLoadState = None, remove: DocumentLoadState = None
) -> None:
if add:
self._load_states.add(add)
self._event_emitter.emit("loadstate", add)
elif remove and remove in self._load_states:
self._load_states.remove(remove)
def _on_frame_navigated(self, event: FrameNavigatedEvent) -> None:
self._url = event["url"]
self._name = event["name"]
self._event_emitter.emit("navigated", event)
if "error" not in event and hasattr(self, "_page") and self._page:
self._page.emit("framenavigated", self)
@property
def page(self) -> "Page":
return self._page
async def goto(
self,
url: str,
timeout: float = None,
waitUntil: DocumentLoadState = None,
referer: str = None,
) -> Optional[Response]:
return cast(
Optional[Response],
from_nullable_channel(
await self._channel.send("goto", locals_to_params(locals()))
),
)
def _setup_navigation_wait_helper(
self, wait_name: str, timeout: float = None
) -> WaitHelper:
wait_helper = WaitHelper(self._page, f"frame.{wait_name}")
wait_helper.reject_on_event(
self._page, "close", Error("Navigation failed because page was closed!")
)
wait_helper.reject_on_event(
self._page, "crash", Error("Navigation failed because page crashed!")
)
wait_helper.reject_on_event(
self._page,
"framedetached",
Error("Navigating frame was detached!"),
lambda frame: frame == self,
)
if timeout is None:
timeout = self._page._timeout_settings.navigation_timeout()
wait_helper.reject_on_timeout(timeout, f"Timeout {timeout}ms exceeded.")
return wait_helper
def expect_navigation(
self,
url: URLMatch = None,
wait_until: DocumentLoadState = None,
timeout: float = None,
) -> EventContextManagerImpl[Response]:
if not wait_until:
wait_until = "load"
if timeout is None:
timeout = self._page._timeout_settings.navigation_timeout()
deadline = monotonic_time() + timeout
wait_helper = self._setup_navigation_wait_helper("expect_navigation", timeout)
to_url = f' to "{url}"' if url else ""
wait_helper.log(f"waiting for navigation{to_url} until '{wait_until}'")
matcher = (
URLMatcher(self._page._browser_context._options.get("baseURL"), url)
if url
else None
)
def predicate(event: Any) -> bool:
# Any failed navigation results in a rejection.
if event.get("error"):
return True
wait_helper.log(f' navigated to "{event["url"]}"')
return not matcher or matcher.matches(event["url"])
wait_helper.wait_for_event(
self._event_emitter,
"navigated",
predicate=predicate,
)
async def continuation() -> Optional[Response]:
event = await wait_helper.result()
if "error" in event:
raise Error(event["error"])
if wait_until not in self._load_states:
t = deadline - monotonic_time()
if t > 0:
await self._wait_for_load_state_impl(state=wait_until, timeout=t)
if "newDocument" in event and "request" in event["newDocument"]:
request = from_channel(event["newDocument"]["request"])
return await request.response()
return None
return EventContextManagerImpl(asyncio.create_task(continuation()))
async def wait_for_url(
self,
url: URLMatch,
wait_until: DocumentLoadState = None,
timeout: float = None,
) -> None:
matcher = URLMatcher(self._page._browser_context._options.get("baseURL"), url)
if matcher.matches(self.url):
await self._wait_for_load_state_impl(state=wait_until, timeout=timeout)
return
async with self.expect_navigation(
url=url, wait_until=wait_until, timeout=timeout
):
pass
async def wait_for_load_state(
self,
state: Literal["domcontentloaded", "load", "networkidle"] = None,
timeout: float = None,
) -> None:
return await self._wait_for_load_state_impl(state, timeout)
async def _wait_for_load_state_impl(
self, state: DocumentLoadState = None, timeout: float = None
) -> None:
if not state:
state = "load"
if state not in ("load", "domcontentloaded", "networkidle", "commit"):
raise Error(
"state: expected one of (load|domcontentloaded|networkidle|commit)"
)
if state in self._load_states:
return
wait_helper = self._setup_navigation_wait_helper("wait_for_load_state", timeout)
def handle_load_state_event(actual_state: str) -> bool:
wait_helper.log(f'"{actual_state}" event fired')
return actual_state == state
wait_helper.wait_for_event(
self._event_emitter,
"loadstate",
handle_load_state_event,
)
await wait_helper.result()
async def frame_element(self) -> ElementHandle:
return from_channel(await self._channel.send("frameElement"))
async def evaluate(self, expression: str, arg: Serializable = None) -> Any:
return parse_result(
await self._channel.send(
"evaluateExpression",
dict(
expression=expression,
arg=serialize_argument(arg),
),
)
)
async def evaluate_handle(
self, expression: str, arg: Serializable = None
) -> JSHandle:
return from_channel(
await self._channel.send(
"evaluateExpressionHandle",
dict(
expression=expression,
arg=serialize_argument(arg),
),
)
)
async def query_selector(
self, selector: str, strict: bool = None
) -> Optional[ElementHandle]:
return from_nullable_channel(
await self._channel.send("querySelector", locals_to_params(locals()))
)
async def query_selector_all(self, selector: str) -> List[ElementHandle]:
return list(
map(
from_channel,
await self._channel.send("querySelectorAll", dict(selector=selector)),
)
)
async def wait_for_selector(
self,
selector: str,
strict: bool = None,
timeout: float = None,
state: Literal["attached", "detached", "hidden", "visible"] = None,
) -> Optional[ElementHandle]:
return from_nullable_channel(
await self._channel.send("waitForSelector", locals_to_params(locals()))
)
async def is_checked(
self, selector: str, strict: bool = None, timeout: float = None
) -> bool:
return await self._channel.send("isChecked", locals_to_params(locals()))
async def is_disabled(
self, selector: str, strict: bool = None, timeout: float = None
) -> bool:
return await self._channel.send("isDisabled", locals_to_params(locals()))
async def is_editable(
self, selector: str, strict: bool = None, timeout: float = None
) -> bool:
return await self._channel.send("isEditable", locals_to_params(locals()))
async def is_enabled(
self, selector: str, strict: bool = None, timeout: float = None
) -> bool:
return await self._channel.send("isEnabled", locals_to_params(locals()))
async def is_hidden(
self, selector: str, strict: bool = None, timeout: float = None
) -> bool:
return await self._channel.send("isHidden", locals_to_params(locals()))
async def is_visible(
self, selector: str, strict: bool = None, timeout: float = None
) -> bool:
return await self._channel.send("isVisible", locals_to_params(locals()))
async def dispatch_event(
self,
selector: str,
type: str,
eventInit: Dict = None,
strict: bool = None,
timeout: float = None,
) -> None:
await self._channel.send(
"dispatchEvent",
locals_to_params(
dict(
selector=selector,
type=type,
eventInit=serialize_argument(eventInit),
strict=strict,
timeout=timeout,
),
),
)
async def eval_on_selector(
self,
selector: str,
expression: str,
arg: Serializable = None,
strict: bool = None,
) -> Any:
return parse_result(
await self._channel.send(
"evalOnSelector",
locals_to_params(
dict(
selector=selector,
expression=expression,
arg=serialize_argument(arg),
strict=strict,
)
),
)
)
async def eval_on_selector_all(
self,
selector: str,
expression: str,
arg: Serializable = None,
) -> Any:
return parse_result(
await self._channel.send(
"evalOnSelectorAll",
dict(
selector=selector,
expression=expression,
arg=serialize_argument(arg),
),
)
)
async def content(self) -> str:
return await self._channel.send("content")
async def set_content(
self,
html: str,
timeout: float = None,
waitUntil: DocumentLoadState = None,
) -> None:
await self._channel.send("setContent", locals_to_params(locals()))
@property
def name(self) -> str:
return self._name or ""
@property
def url(self) -> str:
return self._url or ""
@property
def parent_frame(self) -> Optional["Frame"]:
return self._parent_frame
@property
def child_frames(self) -> List["Frame"]:
return self._child_frames.copy()
def is_detached(self) -> bool:
return self._detached
async def add_script_tag(
self,
url: str = None,
path: Union[str, Path] = None,
content: str = None,
type: str = None,
) -> ElementHandle:
params = locals_to_params(locals())
if path:
params["content"] = (
(await async_readfile(path)).decode()
+ "\n//# sourceURL="
+ str(Path(path))
)
del params["path"]
return from_channel(await self._channel.send("addScriptTag", params))
async def add_style_tag(
self, url: str = None, path: Union[str, Path] = None, content: str = None
) -> ElementHandle:
params = locals_to_params(locals())
if path:
params["content"] = (
(await async_readfile(path)).decode()
+ "\n/*# sourceURL="
+ str(Path(path))
+ "*/"
)
del params["path"]
return from_channel(await self._channel.send("addStyleTag", params))
async def click(
self,
selector: str,
modifiers: List[KeyboardModifier] = None,
position: Position = None,
delay: float = None,
button: MouseButton = None,
clickCount: int = None,
timeout: float = None,
force: bool = None,
noWaitAfter: bool = None,
strict: bool = None,
trial: bool = None,
) -> None:
await self._channel.send("click", locals_to_params(locals()))
async def dblclick(
self,
selector: str,
modifiers: List[KeyboardModifier] = None,
position: Position = None,
delay: float = None,
button: MouseButton = None,
timeout: float = None,
force: bool = None,
noWaitAfter: bool = None,
strict: bool = None,
trial: bool = None,
) -> None:
await self._channel.send("dblclick", locals_to_params(locals()))
async def tap(
self,
selector: str,
modifiers: List[KeyboardModifier] = None,
position: Position = None,
timeout: float = None,
force: bool = None,
noWaitAfter: bool = None,
strict: bool = None,
trial: bool = None,
) -> None:
await self._channel.send("tap", locals_to_params(locals()))
async def fill(
self,
selector: str,
value: str,
timeout: float = None,
noWaitAfter: bool = None,
strict: bool = None,
force: bool = None,
) -> None:
await self._channel.send("fill", locals_to_params(locals()))
def locator(
self,
selector: str,
) -> Locator:
return Locator(self, selector)
def frame_locator(self, selector: str) -> FrameLocator:
return FrameLocator(self, selector)
async def focus(
self, selector: str, strict: bool = None, timeout: float = None
) -> None:
await self._channel.send("focus", locals_to_params(locals()))
async def text_content(
self, selector: str, strict: bool = None, timeout: float = None
) -> Optional[str]:
return await self._channel.send("textContent", locals_to_params(locals()))
async def inner_text(
self, selector: str, strict: bool = None, timeout: float = None
) -> str:
return await self._channel.send("innerText", locals_to_params(locals()))
async def inner_html(
self, selector: str, strict: bool = None, timeout: float = None
) -> str:
return await self._channel.send("innerHTML", locals_to_params(locals()))
async def get_attribute(
self, selector: str, name: str, strict: bool = None, timeout: float = None
) -> Optional[str]:
return await self._channel.send("getAttribute", locals_to_params(locals()))
async def hover(
self,
selector: str,
modifiers: List[KeyboardModifier] = None,
position: Position = None,
timeout: float = None,
force: bool = None,
strict: bool = None,
trial: bool = None,
) -> None:
await self._channel.send("hover", locals_to_params(locals()))
async def drag_and_drop(
self,
source: str,
target: str,
source_position: Position = None,
target_position: Position = None,
force: bool = None,
noWaitAfter: bool = None,
strict: bool = None,
timeout: float = None,
trial: bool = None,
) -> None:
await self._channel.send("dragAndDrop", locals_to_params(locals()))
async def select_option(
self,
selector: str,
value: Union[str, List[str]] = None,
index: Union[int, List[int]] = None,
label: Union[str, List[str]] = None,
element: Union["ElementHandle", List["ElementHandle"]] = None,
timeout: float = None,
noWaitAfter: bool = None,
strict: bool = None,
force: bool = None,
) -> List[str]:
params = locals_to_params(
dict(
selector=selector,
timeout=timeout,
noWaitAfter=noWaitAfter,
strict=strict,
force=force,
**convert_select_option_values(value, index, label, element),
)
)
return await self._channel.send("selectOption", params)
async def input_value(
self,
selector: str,
strict: bool = None,
timeout: float = None,
) -> str:
return await self._channel.send("inputValue", locals_to_params(locals()))
async def set_input_files(
self,
selector: str,
files: Union[str, Path, FilePayload, List[Union[str, Path]], List[FilePayload]],
strict: bool = None,
timeout: float = None,
noWaitAfter: bool = None,
) -> None:
params = locals_to_params(locals())
params["files"] = await normalize_file_payloads(files)
await self._channel.send("setInputFiles", params)
async def type(
self,
selector: str,
text: str,
delay: float = None,
strict: bool = None,
timeout: float = None,
noWaitAfter: bool = None,
) -> None:
await self._channel.send("type", locals_to_params(locals()))
async def press(
self,
selector: str,
key: str,
delay: float = None,
strict: bool = None,
timeout: float = None,
noWaitAfter: bool = None,
) -> None:
await self._channel.send("press", locals_to_params(locals()))
async def check(
self,
selector: str,
position: Position = None,
timeout: float = None,
force: bool = None,
noWaitAfter: bool = None,
strict: bool = None,
trial: bool = None,
) -> None:
await self._channel.send("check", locals_to_params(locals()))
async def uncheck(
self,
selector: str,
position: Position = None,
timeout: float = None,
force: bool = None,
noWaitAfter: bool = None,
strict: bool = None,
trial: bool = None,
) -> None:
await self._channel.send("uncheck", locals_to_params(locals()))
async def wait_for_timeout(self, timeout: float) -> None:
await self._channel.send("waitForTimeout", locals_to_params(locals()))
async def wait_for_function(
self,
expression: str,
arg: Serializable = None,
timeout: float = None,
polling: Union[float, Literal["raf"]] = None,
) -> JSHandle:
params = locals_to_params(locals())
params["arg"] = serialize_argument(arg)
return from_channel(await self._channel.send("waitForFunction", params))
async def title(self) -> str:
return await self._channel.send("title")
async def set_checked(
self,
selector: str,
checked: bool,
position: Position = None,
timeout: float = None,
force: bool = None,
noWaitAfter: bool = None,
strict: bool = None,
trial: bool = None,
) -> None:
if checked:
await self.check(
selector=selector,
position=position,
timeout=timeout,
force=force,
noWaitAfter=noWaitAfter,
strict=strict,
trial=trial,
)
else:
await self.uncheck(
selector=selector,
position=position,
timeout=timeout,
force=force,
noWaitAfter=noWaitAfter,
strict=strict,
trial=trial,
)
|
the-stack_0_16642 | from django.conf.urls import url
# URLconf maps URL patterns (described as regular expressions) to views
from . import views
app_name = 'polls'
urlpatterns = [
# ex: /polls/
url(r'^$', views.IndexView.as_view(), name='index'),
# ex. /polls/5/
url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'),
# ex. /polls/5/results/
url(r'^(?P<pk>[0-9]+)/results/$', views.ResultsView.as_view(), name='results'),
# ex. /polls/5/vote/
url(r'^(?P<question_id>[0-9]+)/vote/$', views.vote, name='vote'),
] |
the-stack_0_16643 | import collections
import datetime
try:
from github import Github
except ImportError:
raise ImportError('Install PyGithub from https://github.com/PyGithub/PyGithub or via pip')
API_TOKEN = None
if API_TOKEN is None:
raise ValueError('Need to specify an API token')
p = Github(API_TOKEN)
last_release = datetime.datetime(year=2017, month=11, day=8)
authors = []
comments = p.get_repo('brian-team/brian2').get_issues_comments(since=last_release)
comment_counter = 0
for comment in comments:
name = comment.user.name
if name is None:
authors.append('`@{login} <https://github.com/{login}>`_'.format(login=comment.user.login.encode('utf-8'),
name=name))
else:
authors.append(
'{name} (`@{login} <https://github.com/{login}>`_)'.format(
login=comment.user.login.encode('utf-8'),
name=name.encode('utf-8')))
comment_counter += 1
print('Counted {} comments'.format(comment_counter))
issues = p.get_repo('brian-team/brian2').get_issues(since=last_release)
issue_counter = 0
for issue in issues:
name = issue.user.name
if name is None:
authors.append('`@{login} <https://github.com/{login}>`_'.format(login=issue.user.login.encode('utf-8'),
name=name))
else:
authors.append(
'{name} (`@{login} <https://github.com/{login}>`_)'.format(
login=issue.user.login.encode('utf-8'),
name=name.encode('utf-8')))
issue_counter += 1
print('Counted {} issues'.format(issue_counter))
counted = collections.Counter(authors)
sorted = sorted(counted.items(), key=lambda item: item[1], reverse=True)
for name, contributions in sorted:
print('{:>4} {}'.format(contributions, name))
|
the-stack_0_16648 | #!/usr/bin/python
# (c) 2018-2019, NetApp, Inc
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_qos_adaptive_policy_group
short_description: NetApp ONTAP Adaptive Quality of Service policy group.
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.9'
author: NetApp Ansible Team (@joshedmonds) <[email protected]>
description:
- Create, destroy, modify, or rename an Adaptive QoS policy group on NetApp ONTAP. Module is based on the standard QoS policy group module.
options:
state:
choices: ['present', 'absent']
description:
- Whether the specified policy group should exist or not.
default: 'present'
type: str
name:
description:
- The name of the policy group to manage.
type: str
required: true
vserver:
description:
- Name of the vserver to use.
type: str
required: true
from_name:
description:
- Name of the existing policy group to be renamed to name.
type: str
absolute_min_iops:
description:
- Absolute minimum IOPS defined by this policy.
type: str
expected_iops:
description:
- Minimum expected IOPS defined by this policy.
type: str
peak_iops:
description:
- Maximum possible IOPS per allocated or used TB|GB.
type: str
peak_iops_allocation:
choices: ['allocated_space', 'used_space']
description:
- Whether peak_iops is specified by allocated or used space.
default: 'used_space'
type: str
force:
type: bool
default: False
description:
- Setting to 'true' forces the deletion of the workloads associated with the policy group along with the policy group.
'''
EXAMPLES = """
- name: create adaptive qos policy group
na_ontap_qos_adaptive_policy_group:
state: present
name: aq_policy_1
vserver: policy_vserver
absolute_min_iops: 70IOPS
expected_iops: 100IOPS/TB
peak_iops: 250IOPS/TB
peak_iops_allocation: allocated_space
hostname: 10.193.78.30
username: admin
password: netapp1!
- name: modify adaptive qos policy group expected iops
na_ontap_qos_adaptive_policy_group:
state: present
name: aq_policy_1
vserver: policy_vserver
absolute_min_iops: 70IOPS
expected_iops: 125IOPS/TB
peak_iops: 250IOPS/TB
peak_iops_allocation: allocated_space
hostname: 10.193.78.30
username: admin
password: netapp1!
- name: modify adaptive qos policy group peak iops allocation
na_ontap_qos_adaptive_policy_group:
state: present
name: aq_policy_1
vserver: policy_vserver
absolute_min_iops: 70IOPS
expected_iops: 125IOPS/TB
peak_iops: 250IOPS/TB
peak_iops_allocation: used_space
hostname: 10.193.78.30
username: admin
password: netapp1!
- name: delete qos policy group
na_ontap_qos_adaptive_policy_group:
state: absent
name: aq_policy_1
vserver: policy_vserver
hostname: 10.193.78.30
username: admin
password: netapp1!
"""
RETURN = """
"""
import traceback
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapAdaptiveQosPolicyGroup(object):
"""
Create, delete, modify and rename a policy group.
"""
def __init__(self):
"""
Initialize the Ontap qos policy group class.
"""
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, type='str', choices=['present', 'absent'], default='present'),
name=dict(required=True, type='str'),
from_name=dict(required=False, type='str'),
vserver=dict(required=True, type='str'),
absolute_min_iops=dict(required=False, type='str'),
expected_iops=dict(required=False, type='str'),
peak_iops=dict(required=False, type='str'),
peak_iops_allocation=dict(choices=['allocated_space', 'used_space'], default='used_space'),
force=dict(required=False, type='bool', default=False)
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(
msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(
module=self.module)
def get_policy_group(self, policy_group_name=None):
"""
Return details of a policy group.
:param policy_group_name: policy group name
:return: policy group details.
:rtype: dict.
"""
if policy_group_name is None:
policy_group_name = self.parameters['name']
policy_group_get_iter = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-get-iter')
policy_group_info = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-info')
policy_group_info.add_new_child('policy-group', policy_group_name)
policy_group_info.add_new_child('vserver', self.parameters['vserver'])
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(policy_group_info)
policy_group_get_iter.add_child_elem(query)
result = self.server.invoke_successfully(policy_group_get_iter, True)
policy_group_detail = None
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) == 1:
policy_info = result.get_child_by_name('attributes-list').get_child_by_name('qos-adaptive-policy-group-info')
policy_group_detail = {
'name': policy_info.get_child_content('policy-group'),
'vserver': policy_info.get_child_content('vserver'),
'absolute_min_iops': policy_info.get_child_content('absolute-min-iops'),
'expected_iops': policy_info.get_child_content('expected-iops'),
'peak_iops': policy_info.get_child_content('peak-iops'),
'peak_iops_allocation': policy_info.get_child_content('peak-iops-allocation')
}
return policy_group_detail
def create_policy_group(self):
"""
create a policy group name.
"""
policy_group = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-create')
policy_group.add_new_child('policy-group', self.parameters['name'])
policy_group.add_new_child('vserver', self.parameters['vserver'])
if self.parameters.get('absolute_min_iops'):
policy_group.add_new_child('absolute-min-iops', self.parameters['absolute_min_iops'])
if self.parameters.get('expected_iops'):
policy_group.add_new_child('expected-iops', self.parameters['expected_iops'])
if self.parameters.get('peak_iops'):
policy_group.add_new_child('peak-iops', self.parameters['peak_iops'])
if self.parameters.get('peak_iops_allocation'):
policy_group.add_new_child('peak-iops-allocation', self.parameters['peak_iops_allocation'])
try:
self.server.invoke_successfully(policy_group, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating adaptive qos policy group %s: %s' %
(self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def delete_policy_group(self, policy_group=None):
"""
delete an existing policy group.
:param policy_group: policy group name.
"""
if policy_group is None:
policy_group = self.parameters['name']
policy_group_obj = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-delete')
policy_group_obj.add_new_child('policy-group', policy_group)
if self.parameters.get('force'):
policy_group_obj.add_new_child('force', str(self.parameters['force']))
try:
self.server.invoke_successfully(policy_group_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error deleting adaptive qos policy group %s: %s' %
(policy_group, to_native(error)),
exception=traceback.format_exc())
def modify_policy_group(self):
"""
Modify policy group.
"""
policy_group_obj = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-modify')
policy_group_obj.add_new_child('policy-group', self.parameters['name'])
if self.parameters.get('absolute_min_iops'):
policy_group_obj.add_new_child('absolute-min-iops', self.parameters['absolute_min_iops'])
if self.parameters.get('expected_iops'):
policy_group_obj.add_new_child('expected-iops', self.parameters['expected_iops'])
if self.parameters.get('peak_iops'):
policy_group_obj.add_new_child('peak-iops', self.parameters['peak_iops'])
if self.parameters.get('peak_iops_allocation'):
policy_group_obj.add_new_child('peak-iops-allocation', self.parameters['peak_iops_allocation'])
try:
self.server.invoke_successfully(policy_group_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error modifying adaptive qos policy group %s: %s' %
(self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def rename_policy_group(self):
"""
Rename policy group name.
"""
rename_obj = netapp_utils.zapi.NaElement('qos-adaptive-policy-group-rename')
rename_obj.add_new_child('new-name', self.parameters['name'])
rename_obj.add_new_child('policy-group-name', self.parameters['from_name'])
try:
self.server.invoke_successfully(rename_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error renaming adaptive qos policy group %s: %s' %
(self.parameters['from_name'], to_native(error)),
exception=traceback.format_exc())
def modify_helper(self, modify):
"""
helper method to modify policy group.
:param modify: modified attributes.
"""
for attribute in modify.keys():
if attribute in ['absolute_min_iops', 'expected_iops', 'peak_iops', 'peak_iops_allocation']:
self.modify_policy_group()
def apply(self):
"""
Run module based on playbook
"""
self.autosupport_log("na_ontap_qos_policy_group")
current = self.get_policy_group()
rename, cd_action = None, None
if self.parameters.get('from_name'):
rename = self.na_helper.is_rename_action(self.get_policy_group(self.parameters['from_name']), current)
else:
cd_action = self.na_helper.get_cd_action(current, self.parameters)
modify = self.na_helper.get_modified_attributes(current, self.parameters)
if self.na_helper.changed:
if self.module.check_mode:
pass
else:
if rename:
self.rename_policy_group()
if cd_action == 'create':
self.create_policy_group()
elif cd_action == 'delete':
self.delete_policy_group()
elif modify:
self.modify_helper(modify)
self.module.exit_json(changed=self.na_helper.changed)
def autosupport_log(self, event_name):
"""
Create a log event against the provided vserver
"""
server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
netapp_utils.ems_log_event(event_name, server)
def main():
'''Apply vserver operations from playbook'''
qos_policy_group = NetAppOntapAdaptiveQosPolicyGroup()
qos_policy_group.apply()
if __name__ == '__main__':
main()
|
the-stack_0_16650 | """
Normalization class for Matplotlib that can be used to produce
colorbars.
"""
import inspect
import warnings
import numpy as np
from numpy import ma
from .interval import (PercentileInterval, AsymmetricPercentileInterval,
ManualInterval, MinMaxInterval, BaseInterval)
from .stretch import (LinearStretch, SqrtStretch, PowerStretch, LogStretch,
AsinhStretch, BaseStretch)
from ..utils.exceptions import AstropyDeprecationWarning
try:
import matplotlib # pylint: disable=W0611
from matplotlib.colors import Normalize
from matplotlib import pyplot as plt
except ImportError:
class Normalize:
def __init__(self, *args, **kwargs):
raise ImportError('matplotlib is required in order to use this '
'class.')
__all__ = ['ImageNormalize', 'simple_norm', 'imshow_norm']
__doctest_requires__ = {'*': ['matplotlib']}
class ImageNormalize(Normalize):
"""
Normalization class to be used with Matplotlib.
Parameters
----------
data : `~numpy.ndarray`, optional
The image array. This input is used only if ``interval`` is
also input. ``data`` and ``interval`` are used to compute the
vmin and/or vmax values only if ``vmin`` or ``vmax`` are not
input.
interval : `~astropy.visualization.BaseInterval` subclass instance, optional
The interval object to apply to the input ``data`` to determine
the ``vmin`` and ``vmax`` values. This input is used only if
``data`` is also input. ``data`` and ``interval`` are used to
compute the vmin and/or vmax values only if ``vmin`` or ``vmax``
are not input.
vmin, vmax : float, optional
The minimum and maximum levels to show for the data. The
``vmin`` and ``vmax`` inputs override any calculated values from
the ``interval`` and ``data`` inputs.
stretch : `~astropy.visualization.BaseStretch` subclass instance
The stretch object to apply to the data. The default is
`~astropy.visualization.LinearStretch`.
clip : bool, optional
If `True`, data values outside the [0:1] range are clipped to
the [0:1] range.
invalid : `None` or float, optional
Value to assign NaN values generated by this class. NaNs in the
input ``data`` array are not changed. For matplotlib
normalization, the ``invalid`` value should map to the
matplotlib colormap "under" value (i.e., any finite value < 0).
If `None`, then NaN values are not replaced. This keyword has
no effect if ``clip=True``.
"""
def __init__(self, data=None, interval=None, vmin=None, vmax=None,
stretch=LinearStretch(), clip=False, invalid=-1.0):
# this super call checks for matplotlib
super().__init__(vmin=vmin, vmax=vmax, clip=clip)
self.vmin = vmin
self.vmax = vmax
if stretch is None:
raise ValueError('stretch must be input')
if not isinstance(stretch, BaseStretch):
raise TypeError('stretch must be an instance of a BaseStretch '
'subclass')
self.stretch = stretch
if interval is not None and not isinstance(interval, BaseInterval):
raise TypeError('interval must be an instance of a BaseInterval '
'subclass')
self.interval = interval
self.inverse_stretch = stretch.inverse
self.clip = clip
self.invalid = invalid
# Define vmin and vmax if not None and data was input
if data is not None:
self._set_limits(data)
def _set_limits(self, data):
if self.vmin is not None and self.vmax is not None:
return
# Define vmin and vmax from the interval class if not None
if self.interval is None:
if self.vmin is None:
self.vmin = np.min(data[np.isfinite(data)])
if self.vmax is None:
self.vmax = np.max(data[np.isfinite(data)])
else:
_vmin, _vmax = self.interval.get_limits(data)
if self.vmin is None:
self.vmin = _vmin
if self.vmax is None:
self.vmax = _vmax
def __call__(self, values, clip=None, invalid=None):
"""
Transform values using this normalization.
Parameters
----------
values : array_like
The input values.
clip : bool, optional
If `True`, values outside the [0:1] range are clipped to the
[0:1] range. If `None` then the ``clip`` value from the
`ImageNormalize` instance is used (the default of which is
`False`).
invalid : `None` or float, optional
Value to assign NaN values generated by this class. NaNs in
the input ``data`` array are not changed. For matplotlib
normalization, the ``invalid`` value should map to the
matplotlib colormap "under" value (i.e., any finite value <
0). If `None`, then the `ImageNormalize` instance value is
used. This keyword has no effect if ``clip=True``.
"""
if clip is None:
clip = self.clip
if invalid is None:
invalid = self.invalid
if isinstance(values, ma.MaskedArray):
if clip:
mask = False
else:
mask = values.mask
values = values.filled(self.vmax)
else:
mask = False
# Make sure scalars get broadcast to 1-d
if np.isscalar(values):
values = np.array([values], dtype=float)
else:
# copy because of in-place operations after
values = np.array(values, copy=True, dtype=float)
# Define vmin and vmax if not None
self._set_limits(values)
# Normalize based on vmin and vmax
np.subtract(values, self.vmin, out=values)
np.true_divide(values, self.vmax - self.vmin, out=values)
# Clip to the 0 to 1 range
if clip:
values = np.clip(values, 0., 1., out=values)
# Stretch values
if self.stretch._supports_invalid_kw:
values = self.stretch(values, out=values, clip=False,
invalid=invalid)
else:
values = self.stretch(values, out=values, clip=False)
# Convert to masked array for matplotlib
return ma.array(values, mask=mask)
def inverse(self, values, invalid=None):
# Find unstretched values in range 0 to 1
if self.inverse_stretch._supports_invalid_kw:
values_norm = self.inverse_stretch(values, clip=False,
invalid=invalid)
else:
values_norm = self.inverse_stretch(values, clip=False)
# Scale to original range
return values_norm * (self.vmax - self.vmin) + self.vmin
def simple_norm(data, stretch='linear', power=1.0, asinh_a=0.1, min_cut=None,
max_cut=None, min_percent=None, max_percent=None,
percent=None, clip=False, log_a=1000, invalid=-1.0):
"""
Return a Normalization class that can be used for displaying images
with Matplotlib.
This function enables only a subset of image stretching functions
available in `~astropy.visualization.mpl_normalize.ImageNormalize`.
This function is used by the
``astropy.visualization.scripts.fits2bitmap`` script.
Parameters
----------
data : `~numpy.ndarray`
The image array.
stretch : {'linear', 'sqrt', 'power', log', 'asinh'}, optional
The stretch function to apply to the image. The default is
'linear'.
power : float, optional
The power index for ``stretch='power'``. The default is 1.0.
asinh_a : float, optional
For ``stretch='asinh'``, the value where the asinh curve
transitions from linear to logarithmic behavior, expressed as a
fraction of the normalized image. Must be in the range between
0 and 1. The default is 0.1.
min_cut : float, optional
The pixel value of the minimum cut level. Data values less than
``min_cut`` will set to ``min_cut`` before stretching the image.
The default is the image minimum. ``min_cut`` overrides
``min_percent``.
max_cut : float, optional
The pixel value of the maximum cut level. Data values greater
than ``min_cut`` will set to ``min_cut`` before stretching the
image. The default is the image maximum. ``max_cut`` overrides
``max_percent``.
min_percent : float, optional
The percentile value used to determine the pixel value of
minimum cut level. The default is 0.0. ``min_percent``
overrides ``percent``.
max_percent : float, optional
The percentile value used to determine the pixel value of
maximum cut level. The default is 100.0. ``max_percent``
overrides ``percent``.
percent : float, optional
The percentage of the image values used to determine the pixel
values of the minimum and maximum cut levels. The lower cut
level will set at the ``(100 - percent) / 2`` percentile, while
the upper cut level will be set at the ``(100 + percent) / 2``
percentile. The default is 100.0. ``percent`` is ignored if
either ``min_percent`` or ``max_percent`` is input.
clip : bool, optional
If `True`, data values outside the [0:1] range are clipped to
the [0:1] range.
log_a : float, optional
The log index for ``stretch='log'``. The default is 1000.
invalid : `None` or float, optional
Value to assign NaN values generated by the normalization. NaNs
in the input ``data`` array are not changed. For matplotlib
normalization, the ``invalid`` value should map to the
matplotlib colormap "under" value (i.e., any finite value < 0).
If `None`, then NaN values are not replaced. This keyword has
no effect if ``clip=True``.
Returns
-------
result : `ImageNormalize` instance
An `ImageNormalize` instance that can be used for displaying
images with Matplotlib.
"""
if percent is not None:
interval = PercentileInterval(percent)
elif min_percent is not None or max_percent is not None:
interval = AsymmetricPercentileInterval(min_percent or 0.,
max_percent or 100.)
elif min_cut is not None or max_cut is not None:
interval = ManualInterval(min_cut, max_cut)
else:
interval = MinMaxInterval()
if stretch == 'linear':
stretch = LinearStretch()
elif stretch == 'sqrt':
stretch = SqrtStretch()
elif stretch == 'power':
stretch = PowerStretch(power)
elif stretch == 'log':
stretch = LogStretch(log_a)
elif stretch == 'asinh':
stretch = AsinhStretch(asinh_a)
else:
raise ValueError(f'Unknown stretch: {stretch}.')
vmin, vmax = interval.get_limits(data)
return ImageNormalize(vmin=vmin, vmax=vmax, stretch=stretch, clip=clip,
invalid=invalid)
# used in imshow_norm
_norm_sig = inspect.signature(ImageNormalize)
def imshow_norm(data, ax=None, imshow_only_kwargs={}, **kwargs):
""" A convenience function to call matplotlib's `matplotlib.pyplot.imshow`
function, using an `ImageNormalize` object as the normalization.
Parameters
----------
data : 2D or 3D array_like - see `~matplotlib.pyplot.imshow`
The data to show. Can be whatever `~matplotlib.pyplot.imshow` and
`ImageNormalize` both accept.
ax : None or `~matplotlib.axes.Axes`, optional
If None, use pyplot's imshow. Otherwise, calls ``imshow`` method of the
supplied axes.
imshow_only_kwargs : dict, optional
Deprecated since Astropy v4.1. Note that settting both ``norm``
and ``vmin/vmax`` is deprecated in ``matplotlib >= 3.3``.
Arguments to be passed directly to `~matplotlib.pyplot.imshow` without
first trying `ImageNormalize`. This is only for keywords that have the
same name in both `ImageNormalize` and `~matplotlib.pyplot.imshow` - if
you want to set the `~matplotlib.pyplot.imshow` keywords only, supply
them in this dictionary.
kwargs : dict, optional
All other keyword arguments are parsed first by the
`ImageNormalize` initializer, then to
`~matplotlib.pyplot.imshow`.
Returns
-------
result : tuple
A tuple containing the `~matplotlib.image.AxesImage` generated
by `~matplotlib.pyplot.imshow` as well as the `ImageNormalize`
instance.
Notes
-----
The ``norm`` matplotlib keyword is not supported.
Examples
--------
.. plot::
:include-source:
import numpy as np
import matplotlib.pyplot as plt
from astropy.visualization import (imshow_norm, MinMaxInterval,
SqrtStretch)
# Generate and display a test image
image = np.arange(65536).reshape((256, 256))
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
im, norm = imshow_norm(image, ax, origin='lower',
interval=MinMaxInterval(),
stretch=SqrtStretch())
fig.colorbar(im)
"""
if imshow_only_kwargs:
warnings.warn('imshow_only_kwargs is deprecated since v4.1 and will '
'be removed in a future version.',
AstropyDeprecationWarning)
if 'X' in kwargs:
raise ValueError('Cannot give both ``X`` and ``data``')
if 'norm' in kwargs:
raise ValueError('There is no point in using imshow_norm if you give '
'the ``norm`` keyword - use imshow directly if you '
'want that.')
imshow_kwargs = dict(kwargs)
norm_kwargs = {'data': data}
for pname in _norm_sig.parameters:
if pname in kwargs:
norm_kwargs[pname] = imshow_kwargs.pop(pname)
for k, v in imshow_only_kwargs.items():
if k not in _norm_sig.parameters:
# the below is not strictly "has to be true", but is here so that
# users don't start using both imshow_only_kwargs *and* keyword
# arguments to this function, as that makes for more confusing
# user code
raise ValueError('You provided a keyword to imshow_only_kwargs '
'({}) that is not a keyword for ImageNormalize. '
'This is not supported. Instead you should '
'pass the keyword directly into imshow_norm'
.format(k))
imshow_kwargs[k] = v
imshow_kwargs['norm'] = ImageNormalize(**norm_kwargs)
if ax is None:
imshow_result = plt.imshow(data, **imshow_kwargs)
else:
imshow_result = ax.imshow(data, **imshow_kwargs)
return imshow_result, imshow_kwargs['norm']
|
the-stack_0_16651 | import logging
from airflow import DAG
from operators.candles_aggregation import CandleAggregation
from datetime import datetime, timedelta
logger = logging.getLogger(__name__)
default_args = {
'start_date': datetime(2020, 12, 23),
'owner': 'airflow',
'retries': 3,
'retry_delay': timedelta(minutes=1),
'max_active_runs': 1,
'catchup': True
}
with DAG(dag_id='trading_candles_aggregation', schedule_interval="@monthly", default_args=default_args) as dag:
aggregated_candles = CandleAggregation(task_id='candles_aggregation', provide_context=True, scope='month')
|
the-stack_0_16652 | """Tests for the intent helpers."""
import unittest
import voluptuous as vol
from homeassistant.core import State
from homeassistant.helpers import (intent, config_validation as cv)
import pytest
class MockIntentHandler(intent.IntentHandler):
"""Provide a mock intent handler."""
def __init__(self, slot_schema):
"""Initialize the mock handler."""
self.slot_schema = slot_schema
def test_async_match_state():
"""Test async_match_state helper."""
state1 = State('light.kitchen', 'on')
state2 = State('switch.kitchen', 'on')
state = intent.async_match_state(None, 'kitch', [state1, state2])
assert state is state1
class TestIntentHandler(unittest.TestCase):
"""Test the Home Assistant event helpers."""
def test_async_validate_slots(self):
"""Test async_validate_slots of IntentHandler."""
handler1 = MockIntentHandler({
vol.Required('name'): cv.string,
})
with pytest.raises(vol.error.MultipleInvalid):
handler1.async_validate_slots({})
with pytest.raises(vol.error.MultipleInvalid):
handler1.async_validate_slots({'name': 1})
with pytest.raises(vol.error.MultipleInvalid):
handler1.async_validate_slots({'name': 'kitchen'})
handler1.async_validate_slots({'name': {'value': 'kitchen'}})
handler1.async_validate_slots({
'name': {'value': 'kitchen'},
'probability': {'value': '0.5'}
})
|
the-stack_0_16654 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# evaluate_mcd.py
# Copyright (C) 2020 Wen-Chin HUANG
#
# Distributed under terms of the MIT license.
#
import sys
import argparse
import logging
import numpy as np
import scipy
from fastdtw import fastdtw
from joblib import Parallel, delayed
from pathlib import Path
import soundfile as sf
from sprocket.speech import FeatureExtractor
from crank.net.trainer.dataset import read_feature
from crank.utils import load_yaml, open_featsscp
from crank.utils import low_cut_filter
def get_world_features(wavpath, spk, conf, spkr_conf):
x, fs = sf.read(str(wavpath))
x = np.array(x, dtype=np.float)
x = low_cut_filter(x, fs, cutoff=70)
fe = FeatureExtractor(
analyzer="world",
fs=conf["feature"]["fs"],
fftl=conf["feature"]["fftl"],
shiftms=conf["feature"]["shiftms"],
minf0=spkr_conf[spk]["minf0"],
maxf0=spkr_conf[spk]["maxf0"],
)
cv_f0, _, _ = fe.analyze(x)
cv_mcep = fe.mcep(
dim=conf["feature"]["mcep_dim"], alpha=conf["feature"]["mcep_alpha"]
)
return cv_mcep, cv_f0
def calculate(cv_path, gt_file_list, conf, spkr_conf):
basename = cv_path.stem
number, orgspk, tarspk = basename.split("_")
tarspk = tarspk.split("-")[-1]
orgspk = orgspk.split("-")[-1]
# get converted features. If mcep, from h5; else waveform
if conf["output_feat_type"] == "mcep":
cv_mcep = read_feature(cv_path, "feat")
cv_f0 = read_feature(cv_path, "f0")
else:
cv_mcep, cv_f0 = get_world_features(cv_path, tarspk, conf, spkr_conf)
# get ground truth features
gt_mcep = read_feature(gt_file_list[f"{tarspk}_{number}"], "mcep")
gt_f0 = read_feature(gt_file_list[f"{tarspk}_{number}"], "f0")
# non-silence parts
gt_idx = np.where(gt_f0 > 0)[0]
gt_mcep = gt_mcep[gt_idx]
cv_idx = np.where(cv_f0 > 0)[0]
cv_mcep = cv_mcep[cv_idx]
# DTW
_, path = fastdtw(cv_mcep, gt_mcep, dist=scipy.spatial.distance.euclidean)
twf = np.array(path).T
cv_mcep_dtw = cv_mcep[twf[0]]
gt_mcep_dtw = gt_mcep[twf[1]]
# MCD
diff2sum = np.sum((cv_mcep_dtw - gt_mcep_dtw) ** 2, 1)
mcd = np.mean(10.0 / np.log(10.0) * np.sqrt(2 * diff2sum), 0)
return f"{orgspk}-{tarspk}-{number}", mcd
def main():
parser = argparse.ArgumentParser(description="calculate MCD.")
parser.add_argument("--conf", type=str, help="configuration file")
parser.add_argument("--spkr_conf", type=str, help="speaker configuration file")
parser.add_argument(
"--featdir",
type=str,
help="root directory of ground truth h5",
)
parser.add_argument("--outwavdir", type=str, help="converted waveform directory")
parser.add_argument(
"--out",
type=str,
help="if omitted, then output to sys.stdout",
)
parser.add_argument("--n_jobs", default=1, type=int, help="number of parallel jobs")
args = parser.parse_args()
# logging info
logging.basicConfig(
level=logging.INFO,
stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) " "%(levelname)s: %(message)s",
)
# load configure files
conf = load_yaml(args.conf)
spkr_conf = load_yaml(args.spkr_conf)
# load converted files. If mcep, use h5; else, waveform
if conf["output_feat_type"] == "mcep":
converted_files = sorted(list(Path(args.outwavdir).glob("*.h5")))
else:
converted_files = sorted(list(Path(args.outwavdir).rglob("*.wav")))
logging.info(f"number of utterances = {len(converted_files)}")
# load ground truth scp
featdir = Path(args.featdir) / conf["feature"]["label"]
gt_feats = open_featsscp(featdir / "eval" / "feats.scp")
if args.out is None:
out = sys.stdout
else:
out = open(args.out, "w", encoding="utf-8")
MCD_list = Parallel(args.n_jobs)(
[
delayed(calculate)(cv_path, gt_feats, conf, spkr_conf)
for cv_path in converted_files
]
)
# summarize by pair
pairwise_MCD = {}
for k, v in MCD_list:
orgspk, tarspk, _ = k.split("-")
pair = orgspk + " " + tarspk
if pair not in pairwise_MCD:
pairwise_MCD[pair] = []
pairwise_MCD[pair].append(v)
for k in sorted(pairwise_MCD.keys()):
mcd_list = pairwise_MCD[k]
mean_mcd = float(sum(mcd_list) / len(mcd_list))
out.write(f"{k} {mean_mcd:.3f}\n")
if __name__ == "__main__":
main()
|
the-stack_0_16655 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.md') as readme_file:
readme = readme_file.read()
requirements = [
"brainio @ git+https://github.com/brain-score/brainio",
"brain-score @ git+https://github.com/brain-score/brain-score",
"h5py",
"Pillow",
"numpy",
"tqdm",
"torch",
"torchvision",
"tensorflow==1.15",
"keras==2.3.1",
"scikit-learn",
"result_caching @ git+https://github.com/brain-score/result_caching",
]
setup(
name='model-tools',
version='0.1.0',
description="Tools for predictive models of brain processing.",
long_description=readme,
author="Martin Schrimpf",
author_email='[email protected]',
url='https://github.com/brain-score/model-tools',
packages=find_packages(exclude=['tests']),
include_package_data=True,
install_requires=requirements,
license="MIT license",
zip_safe=False,
keywords='brain-score',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.7',
],
test_suite='tests',
)
|
the-stack_0_16657 | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 9 15:36:04 2021
@author: TT User
"""
import numpy as np
import matplotlib.pyplot as plt
from time import gmtime, strftime #STRF time go praj vremeto vo string; GMtime go praj vremeto od pochetokot na epohata vo OBJEKT
from scipy.signal import butter, lfilter
#%%
LOG_DIR = "logs/"
PLOT_DIR = "plots/"
class QRSDetectorOffline(object):
"""
Python Offline ECG QRS Detector based on the Pan-Tomkins algorithm.
The QRS complex corresponds to the depolarization of the right and left ventricles of the human heart. It is the most visually obvious part of the ECG signal. QRS complex detection is essential for time-domain ECG signal analyses, namely heart rate variability. It makes it possible to compute inter-beat interval (RR interval) values that correspond to the time between two consecutive R peaks. Thus, a QRS complex detector is an ECG-based heart contraction detector.
Offline version detects QRS complexes in a pre-recorded ECG signal dataset (e.g. stored in .csv format).
This implementation of a QRS Complex Detector is by no means a certified medical tool and should not be used in health monitoring. It was created and used for experimental purposes in psychophysiology and psychology.
MIT License
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
#%%
def __init__(self, ecg_data_path, verbose=True, log_data=False, plot_data=False, show_plot=False):
"""
QRSDetectorOffline class initialisation method.
:param string ecg_data_path: path to the ECG dataset
:param bool verbose: flag for printing the results
:param bool log_data: flag for logging the results
:param bool plot_data: flag for plotting the results to a file
:param bool show_plot: flag for showing generated results plot - will not show anything if plot is not generated
"""
# Configuration parameters.
self.ecg_data_path = ecg_data_path
self.signal_frequency = 250 # Set ECG device frequency in samples per second here.
self.filter_lowcut = 0.01
#self.filter_highcut = 15.0
self.filter_highcut = 0.99
self.filter_order = 1
self.integration_window = 15 # Change proportionally when adjusting frequency (in samples).
self.findpeaks_limit = 0.35
self.findpeaks_spacing = 50 # Change proportionally when adjusting frequency (in samples).
self.refractory_period = 120 # Change proportionally when adjusting frequency (in samples).
self.qrs_peak_filtering_factor = 0.125
self.noise_peak_filtering_factor = 0.125
self.qrs_noise_diff_weight = 0.25
# Loaded ECG data.
self.ecg_data_raw = None
# Measured and calculated values.
self.filtered_ecg_measurements = None
self.differentiated_ecg_measurements = None
self.squared_ecg_measurements = None
self.integrated_ecg_measurements = None
self.detected_peaks_indices = None
self.detected_peaks_values = None
self.qrs_peak_value = 0.0
self.noise_peak_value = 0.0
self.threshold_value = 0.0
# Detection results.
self.qrs_peaks_indices = np.array([], dtype=int)
self.noise_peaks_indices = np.array([], dtype=int)
# Final ECG data and QRS detection results array - samples with detected QRS are marked with 1 value.
self.ecg_data_detected = None
# Run whole detector flow.
self.load_ecg_data()
self.detect_peaks()
self.detect_qrs()
if verbose:
self.print_detection_data()
if log_data:
self.log_path = "{:s}QRS_offline_detector_log_{:s}.csv".format(LOG_DIR,
strftime("%Y_%m_%d_%H_%M_%S", gmtime()))
self.log_detection_data()
if plot_data:
self.plot_path = "{:s}QRS_offline_detector_plot_{:s}.png".format(PLOT_DIR,
strftime("%Y_%m_%d_%H_%M_%S", gmtime()))
self.plot_detection_data(show_plot=show_plot)
#%%
"""Loading ECG measurements data methods."""
def load_ecg_data(self):
"""
Method loading ECG data set from a file.
"""
self.ecg_data_raw = np.loadtxt(self.ecg_data_path, skiprows=1, delimiter=',')
#%%
"""ECG measurements data processing methods."""
def detect_peaks(self):
"""
Method responsible for extracting peaks from loaded ECG measurements data through measurements processing.
"""
# Extract measurements from loaded ECG data.
ecg_measurements = self.ecg_data_raw[:, 1] #Go zema merenjeto, poshto nultiot element e TIMESTAMP
# Measurements filtering - 0-15 Hz band pass filter.
self.filtered_ecg_measurements = self.bandpass_filter(ecg_measurements, lowcut=self.filter_lowcut,
highcut=self.filter_highcut, signal_freq=self.signal_frequency,
filter_order=self.filter_order)
self.filtered_ecg_measurements[:5] = self.filtered_ecg_measurements[5]
# Derivative - provides QRS slope information.
self.differentiated_ecg_measurements = np.ediff1d(self.filtered_ecg_measurements)
# Squaring - intensifies values received in derivative.
self.squared_ecg_measurements = self.differentiated_ecg_measurements ** 2
# Moving-window integration.
self.integrated_ecg_measurements = np.convolve(self.squared_ecg_measurements, np.ones(self.integration_window))
# Fiducial mark - peak detection on integrated measurements.
self.detected_peaks_indices = self.findpeaks(data=self.integrated_ecg_measurements,
limit=self.findpeaks_limit,
spacing=self.findpeaks_spacing)
self.detected_peaks_values = self.integrated_ecg_measurements[self.detected_peaks_indices]
#%%
"""QRS detection methods."""
def detect_qrs(self):
"""
Method responsible for classifying detected ECG measurements peaks either as noise or as QRS complex (heart beat).
"""
for detected_peak_index, detected_peaks_value in zip(self.detected_peaks_indices, self.detected_peaks_values):
try:
last_qrs_index = self.qrs_peaks_indices[-1]
except IndexError:
last_qrs_index = 0
# After a valid QRS complex detection, there is a 200 ms refractory period before next one can be detected.
if detected_peak_index - last_qrs_index > self.refractory_period or not self.qrs_peaks_indices.size:
# Peak must be classified either as a noise peak or a QRS peak.
# To be classified as a QRS peak it must exceed dynamically set threshold value.
if detected_peaks_value > self.threshold_value:
self.qrs_peaks_indices = np.append(self.qrs_peaks_indices, detected_peak_index)
# Adjust QRS peak value used later for setting QRS-noise threshold.
self.qrs_peak_value = self.qrs_peak_filtering_factor * detected_peaks_value + \
(1 - self.qrs_peak_filtering_factor) * self.qrs_peak_value
else:
self.noise_peaks_indices = np.append(self.noise_peaks_indices, detected_peak_index)
# Adjust noise peak value used later for setting QRS-noise threshold.
self.noise_peak_value = self.noise_peak_filtering_factor * detected_peaks_value + \
(1 - self.noise_peak_filtering_factor) * self.noise_peak_value
# Adjust QRS-noise threshold value based on previously detected QRS or noise peaks value.
self.threshold_value = self.noise_peak_value + \
self.qrs_noise_diff_weight * (self.qrs_peak_value - self.noise_peak_value)
# Create array containing both input ECG measurements data and QRS detection indication column.
# We mark QRS detection with '1' flag in 'qrs_detected' log column ('0' otherwise).
measurement_qrs_detection_flag = np.zeros([len(self.ecg_data_raw[:, 1]), 1])
measurement_qrs_detection_flag[self.qrs_peaks_indices] = 1
self.ecg_data_detected = np.append(self.ecg_data_raw, measurement_qrs_detection_flag, 1)
#%%
"""Results reporting methods."""
def print_detection_data(self):
"""
Method responsible for printing the results.
"""
print("qrs peaks indices")
print(self.qrs_peaks_indices)
print("noise peaks indices")
print(self.noise_peaks_indices)
def log_detection_data(self):
"""
Method responsible for logging measured ECG and detection results to a file.
"""
with open(self.log_path, "wb") as fin:
fin.write(b"timestamp,ecg_measurement,qrs_detected\n")
np.savetxt(fin, self.ecg_data_detected, delimiter=",")
def plot_detection_data(self, show_plot=False):
"""
Method responsible for plotting detection results.
:param bool show_plot: flag for plotting the results and showing plot
"""
def plot_data(axis, data, title='', fontsize=10):
axis.set_title(title, fontsize=fontsize)
axis.grid(which='both', axis='both', linestyle='--')
axis.plot(data, color="salmon", zorder=1)
def plot_points(axis, values, indices):
axis.scatter(x=indices, y=values[indices], c="black", s=50, zorder=2)
plt.close('all')
fig, axarr = plt.subplots(6, sharex=True, figsize=(15, 18))
plot_data(axis=axarr[0], data=self.ecg_data_raw[:, 1], title='Raw ECG measurements')
plot_data(axis=axarr[1], data=self.filtered_ecg_measurements, title='Filtered ECG measurements')
plot_data(axis=axarr[2], data=self.differentiated_ecg_measurements, title='Differentiated ECG measurements')
plot_data(axis=axarr[3], data=self.squared_ecg_measurements, title='Squared ECG measurements')
plot_data(axis=axarr[4], data=self.integrated_ecg_measurements, title='Integrated ECG measurements with QRS peaks marked (black)')
plot_points(axis=axarr[4], values=self.integrated_ecg_measurements, indices=self.qrs_peaks_indices)
plot_data(axis=axarr[5], data=self.ecg_data_detected[:, 1], title='Raw ECG measurements with QRS peaks marked (black)')
plot_points(axis=axarr[5], values=self.ecg_data_detected[:, 1], indices=self.qrs_peaks_indices)
plt.tight_layout()
fig.savefig(self.plot_path)
if show_plot:
plt.show()
plt.close()
#%%
"""Tools methods."""
def bandpass_filter(self, data, lowcut, highcut, signal_freq, filter_order):
"""
Method responsible for creating and applying Butterworth filter.
:param deque data: raw data
:param float lowcut: filter lowcut frequency value
:param float highcut: filter highcut frequency value
:param int signal_freq: signal frequency in samples per second (Hz)
:param int filter_order: filter order
:return array: filtered data
"""
nyquist_freq = 0.5 * signal_freq #nyquist=fs/2
low = lowcut / nyquist_freq
high = highcut / nyquist_freq
#b, a = butter(filter_order, [low, high], btype="band",output='ba')
b,a = butter(filter_order,[low, high], btype="band")
#b, a = butter(filter_order, [0,1], btype="band")
y = lfilter(b, a, data)
return y
#%%
def findpeaks(self, data, spacing=1, limit=None):
len = data.size
x = np.zeros(len + 2 * spacing)
x[:spacing] = data[0] - 1.e-6
x[-spacing:] = data[-1] - 1.e-6
x[spacing:spacing + len] = data
peak_candidate = np.zeros(len)
peak_candidate[:] = True
for s in range(spacing):
start = spacing - s - 1
h_b = x[start: start + len] # before
start = spacing
h_c = x[start: start + len] # central
start = spacing + s + 1
h_a = x[start: start + len] # after
peak_candidate = np.logical_and(peak_candidate, np.logical_and(h_c > h_b, h_c > h_a))
ind = np.argwhere(peak_candidate)
ind = ind.reshape(ind.size)
if limit is not None:
ind = ind[data[ind] > limit]
return ind
#%%
if __name__ == "__main__":
qrs_detector = QRSDetectorOffline(ecg_data_path="ecg_data_1.csv", verbose=True,
log_data=True, plot_data=True, show_plot=False)
|
the-stack_0_16662 | # coding=utf8
from models import c3d_model
from keras.optimizers import SGD
import numpy as np
import cv2
import datetime
import os
import configparser
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
def main(video_stream):
# read config.txt
root_dir=os.path.abspath(os.path.dirname(__file__)) #获取当前文件所在的目录
configpath = os.path.join(root_dir, "config.txt")
config = configparser.ConfigParser()
config.read(configpath)
classInd_path = config.get("C3D", "classInd_path")
weights_path = config.get("C3D", "weights_path")
lr = config.get("C3D", "lr")
momentum = config.get("C3D", "momentum")
image_read = config.get("image", "image_read")
image_write = config.get("image", "image_write")
video_image = config.get("choose", "video_image")
with open(classInd_path, 'r') as f:
class_names = f.readlines()
f.close()
# init model
num = 1
camera_ids =video_stream.keys()
cap_write ={}
model = c3d_model()
sgd = SGD(lr=float(lr), momentum=float(momentum), nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
model.summary()
model.load_weights(weights_path, by_name=True)
def multi_detecion(clip, frame):
inputs = np.array(clip).astype(np.float32)
inputs = np.expand_dims(inputs, axis=0)
inputs[..., 0] -= 99.9
inputs[..., 1] -= 92.1
inputs[..., 2] -= 82.6
inputs[..., 0] /= 65.8
inputs[..., 1] /= 62.3
inputs[..., 2] /= 60.3
inputs = inputs[:, :, 8:120, 30:142, :]
inputs = np.transpose(inputs, (0, 2, 3, 1, 4))
pred = model.predict(inputs)
label = np.argmax(pred[0])
cv2.putText(frame, class_names[label].split(' ')[-1].strip(), (20, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.6,
(0, 0, 255), 1)
cv2.putText(frame, "prob: %.4f" % pred[0][label], (20, 40),
cv2.FONT_HERSHEY_SIMPLEX, 0.6,
(0, 0, 255), 1)
clip.pop(0)
return (frame)
for i in camera_ids:
cap_write['cap_'+i] =cv2.VideoCapture(video_stream[i][1])
size_1 = (int(cap_write['cap_'+i].get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap_write['cap_'+i].get(cv2.CAP_PROP_FRAME_HEIGHT)))
fps_1 = cap_write['cap_'+i].get(cv2.CAP_PROP_FPS)
cap_write["write_" + i]= cv2.VideoWriter(video_stream[i][2], cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), fps_1, size_1)
if video_image == 'video':
while True:
if num % 2 == 0:
camera = 'camera_1'
else:
camera = 'camera_2'
ret_1, frame_1 = cap_write['cap_'+str(camera)].read()
if ret_1:
tmp = cv2.cvtColor(frame_1, cv2.COLOR_BGR2RGB)
video_stream[camera][0].append(cv2.resize(tmp, (171, 128)))
if len(video_stream[camera][0]) == 16:
frame_1 = multi_detecion(video_stream[camera][0], frame_1)
print("16")
cap_write['write_'+str(camera)].write(frame_1)
print (camera+"success")
num =num + 1
elif video_image == 'image':
fileList = os.listdir(image_read)
fileList.reverse()
clip = []
for fileName in fileList:
frame = cv2.imread(image_read + '/' + fileName)
clip.append(cv2.resize(frame, (171, 128)))
if len(clip) == 16:
frame = multi_detecion(clip, frame)
cv2.imwrite(image_write + '/' + str(num) + ".jpg", frame)
print("write success")
num = num+1
else:
print("choose image or video")
#for i in camera_ids:
# cap_write['cap_' + i].release()
# print('release'+i)
if __name__ == '__main__':
video_stream = {'camera_1': [], 'camera_2': [[],'/home/shixi/C3D-keras/datasets/ucf101/abnormal_event/abnormal-event_100.avi','results/abnormal_test.mp4' ]}
video_stream['camera_1'].append([])
video_stream['camera_1'].append('/home/shixi/C3D-keras/videos/shooting.mpg')
video_stream['camera_1'].append('results/normal_test.mp4')
main(video_stream) |
the-stack_0_16664 | import os
import jwt
from functools import wraps
from flask import request, make_response, jsonify,abort
def verify_tokens():
"""
Method to verify that auth token is valid
"""
token = None
if 'Authorization' in request.headers:
token = request.headers['Authorization']
if not token:
abort(make_response(jsonify({"Message": "You need to login"}), 401))
try:
data = jwt.decode(token, os.getenv('JWT_SECRET_KEY', default='SdaHv342nx!jknr837bjwd?c,lsajjjhw673hdsbgeh'))
return data["email"], data["user_id"]
except:
abort(make_response(jsonify({
"Message":"The token is invalid"
}), 403)) |
the-stack_0_16665 | import zhdate
from nonebot import on_command, CommandSession, permission, log
from .get_divination_of_thing import get_divination_of_thing
from omega_miya.plugins.Group_manage.group_permissions import *
__plugin_name__ = '求签'
__plugin_usage__ = r'''【求签】
使用这个命令可以对任何事求运势, 包括且不限于吃饭、睡懒觉、DD
用法:
/求签 [所求之事]'''
# on_command 装饰器将函数声明为一个命令处理器
@on_command('maybe', aliases='求签', only_to_me=False, permission=permission.EVERYBODY)
async def maybe(session: CommandSession):
group_id = session.event.group_id
user_id = session.event.user_id
session_type = session.event.detail_type
if session_type == 'group':
if not has_command_permissions(group_id):
await session.send('本群组没有执行命令的权限呢QAQ')
log.logger.info(f'{__name__}: 群组: {group_id} 没有命令权限, 已中止命令执行')
return
elif session_type == 'private':
await session.send('本命令不支持在私聊中使用QAQ')
log.logger.info(f'{__name__}: 用户: {session.event.user_id} 在{session_type}中使用了命令, 已中止命令执行')
return
else:
log.logger.info(f'{__name__}: 用户: {session.event.user_id} 在{session_type}环境中使用了命令, 已中止命令执行')
return
# 从会话状态(session.state)中获取事项, 如果当前不存在, 则询问用户
divination = session.get('divination', prompt='你想问什么事呢?')
try:
# 求签者昵称, 优先使用群昵称
divination_user = session.event['sender']['card']
if not divination_user:
divination_user = session.event['sender']['nickname']
# 求签
divination_result = await get_divination_of_thing(divination=divination, divination_user=user_id)
# 向用户发送结果
date_luna = zhdate.ZhDate.today().chinese()
msg = f'今天是{date_luna}\n{divination_user}所求事项: 【{divination}】\n\n结果: 【{divination_result}】'
await session.send(msg)
except Exception as e:
log.logger.warning(f'{__name__}: 群组: {group_id}, 用户: {session.event.user_id} 试图使用命令maybe时发生了错误: {e}')
# args_parser 装饰器将函数声明为命令的参数解析器
# 命令解析器用于将用户输入的参数解析成命令真正需要的数据
@maybe.args_parser
async def _(session: CommandSession):
group_id = session.event.group_id
session_type = session.event.detail_type
if session_type == 'group':
if not has_command_permissions(group_id):
return
elif session_type == 'private':
return
else:
return
# 去掉消息首尾的空白符
stripped_arg = session.current_arg_text.strip()
if session.is_first_run:
# 该命令第一次运行(第一次进入命令会话)
if stripped_arg:
# 第一次运行参数不为空
session.state['divination'] = stripped_arg
return
if not stripped_arg:
# 用户没有发送有效的字符(而是发送了空白字符), 则提示重新输入
# 这里 session.pause() 将会发送消息并暂停当前会话(该行后面的代码不会被运行)
session.pause('你还没告诉我你想问什么事呢~')
# 如果当前正在向用户询问更多信息(例如本例中的要查询的城市), 且用户输入有效, 则放入会话状态
session.state[session.current_key] = stripped_arg
|
the-stack_0_16666 | import os
import base64
import hashlib
import datetime
# parse an ISO formatted timestamp string, converting it to a python datetime object;
# note: this function is also defined in server code
def parse_json_datetime(json_timestamp):
assert json_timestamp.endswith('Z')
format = ''
if '.' in json_timestamp:
format = '%Y-%m-%dT%H:%M:%S.%f'
else:
format = '%Y-%m-%dT%H:%M:%S'
if json_timestamp.endswith(' Z'):
format += ' Z'
else:
format += 'Z'
return datetime.datetime.strptime(json_timestamp, format)
# build an auth_code string by hashing a secret key
def build_auth_code(secret_key):
nonce = base64.b64encode(os.urandom(32)).decode()
key_hash = base64.b64encode(hashlib.sha512((nonce + ';' + secret_key).encode()).digest()).decode()
key_part = secret_key[:3] + secret_key[-3:]
return key_part + ';' + nonce + ';' + key_hash
|
the-stack_0_16667 | import os
class Config(object):
"""Parent configuration class."""
DEBUG = False
TESTING = False
SECRET_KEY = os.getenv('SECRET')
class DevelopmentConfig(Config):
"""Configurations for Development."""
DEBUG = True
TESTING = True
class TestingConfig(Config):
"""Configurations for Testing, with a separate test database."""
TESTING = True
DEBUG = True
class ProductionConfig(Config):
"""Configurations for Production."""
DEBUG = False
TESTING = False
app_config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig
} |
the-stack_0_16668 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ReduceJoin op from string_ops."""
import itertools
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
def _input_array(num_dims):
"""Creates an ndarray where each element is the binary of its linear index.
Args:
num_dims: The number of dimensions to create.
Returns:
An ndarray of shape [2] * num_dims.
"""
formatter = "{:0%db}" % num_dims
strings = [formatter.format(i) for i in xrange(2**num_dims)]
return np.array(strings, dtype="S%d" % num_dims).reshape([2] * num_dims)
def _joined_array(num_dims, reduce_dim):
"""Creates an ndarray with the result from reduce_join on input_array.
Args:
num_dims: The number of dimensions of the original input array.
reduce_dim: The dimension to reduce.
Returns:
An ndarray of shape [2] * (num_dims - 1).
"""
formatter = "{:0%db}" % (num_dims - 1)
result = np.zeros(shape=[2] * (num_dims - 1), dtype="S%d" % (2 * num_dims))
flat = result.ravel()
for i in xrange(2**(num_dims - 1)):
dims = formatter.format(i)
flat[i] = "".join([(dims[:reduce_dim] + "%d" + dims[reduce_dim:]) % j
for j in xrange(2)])
return result
class UnicodeTestCase(test.TestCase):
"""Test case with Python3-compatible string comparator."""
def assertAllEqualUnicode(self, truth, actual):
self.assertAllEqual(
np.array(truth).astype("U"), np.array(actual).astype("U"))
class ReduceJoinTestHelperTest(UnicodeTestCase):
"""Tests for helper functions."""
def testInputArray(self):
num_dims = 3
truth = ["{:03b}".format(i) for i in xrange(2**num_dims)]
output_array = _input_array(num_dims).reshape([-1])
self.assertAllEqualUnicode(truth, output_array)
def testJoinedArray(self):
num_dims = 3
truth_dim_zero = [["000100", "001101"], ["010110", "011111"]]
truth_dim_one = [["000010", "001011"], ["100110", "101111"]]
truth_dim_two = [["000001", "010011"], ["100101", "110111"]]
output_array_dim_zero = _joined_array(num_dims, reduce_dim=0)
output_array_dim_one = _joined_array(num_dims, reduce_dim=1)
output_array_dim_two = _joined_array(num_dims, reduce_dim=2)
self.assertAllEqualUnicode(truth_dim_zero, output_array_dim_zero)
self.assertAllEqualUnicode(truth_dim_one, output_array_dim_one)
self.assertAllEqualUnicode(truth_dim_two, output_array_dim_two)
class ReduceJoinTest(UnicodeTestCase):
def _testReduceJoin(self,
input_array,
truth,
truth_shape,
axis,
keep_dims=False,
separator=""):
"""Compares the output of reduce_join to an expected result.
Args:
input_array: The string input to be joined.
truth: An array or np.array of the expected result.
truth_shape: An array or np.array of the expected shape.
axis: The indices to reduce over.
keep_dims: Whether or not to retain reduced dimensions.
separator: The separator to use for joining.
"""
with self.cached_session():
output = string_ops.reduce_join(
inputs=input_array,
axis=axis,
keep_dims=keep_dims,
separator=separator)
output_array = self.evaluate(output)
self.assertAllEqualUnicode(truth, output_array)
self.assertAllEqual(truth_shape, output.get_shape())
def _testMultipleReduceJoin(self, input_array, axis, separator=" "):
"""Tests reduce_join for one input and multiple axes.
Does so by comparing the output to that from nested reduce_string_joins.
The correctness of single-dimension reduce_join is verified by other
tests below using _testReduceJoin.
Args:
input_array: The input to test.
axis: The indices to reduce.
separator: The separator to use when joining.
"""
with self.cached_session():
output = string_ops.reduce_join(
inputs=input_array, axis=axis, keep_dims=False, separator=separator)
output_keep_dims = string_ops.reduce_join(
inputs=input_array, axis=axis, keep_dims=True, separator=separator)
truth = input_array
for index in axis:
truth = string_ops.reduce_join(
inputs=truth, axis=index, keep_dims=True, separator=separator)
if not axis:
truth = constant_op.constant(truth)
truth_squeezed = array_ops.squeeze(truth, axis=axis)
output_array = self.evaluate(output)
output_keep_dims_array = self.evaluate(output_keep_dims)
truth_array = self.evaluate(truth)
truth_squeezed_array = self.evaluate(truth_squeezed)
self.assertAllEqualUnicode(truth_array, output_keep_dims_array)
self.assertAllEqualUnicode(truth_squeezed_array, output_array)
self.assertAllEqual(truth.get_shape(), output_keep_dims.get_shape())
self.assertAllEqual(truth_squeezed.get_shape(), output.get_shape())
def testRankOne(self):
input_array = ["this", "is", "a", "test"]
truth = "thisisatest"
truth_shape = []
self._testReduceJoin(input_array, truth, truth_shape, axis=0)
def testRankTwo(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["thisplease", "isdo", "anot", "testpanic"]
truth_shape_dim_zero = [4]
truth_dim_one = ["thisisatest", "pleasedonotpanic"]
truth_shape_dim_one = [2]
self._testReduceJoin(
input_array, truth_dim_zero, truth_shape_dim_zero, axis=0)
self._testReduceJoin(
input_array, truth_dim_one, truth_shape_dim_one, axis=1)
expected_val = "thisisatestpleasedonotpanic"
expected_shape = []
self._testReduceJoin(input_array, expected_val, expected_shape, axis=None)
# Using axis=[] is a no-op.
expected_val = input_array
expected_shape = [2, 4]
self._testReduceJoin(input_array, expected_val, expected_shape, axis=[])
def testRankFive(self):
input_array = _input_array(num_dims=5)
truths = [_joined_array(num_dims=5, reduce_dim=i) for i in xrange(5)]
truth_shape = [2] * 4
for i in xrange(5):
self._testReduceJoin(input_array, truths[i], truth_shape, axis=i)
def testNegative(self):
input_array = _input_array(num_dims=5)
truths = [_joined_array(num_dims=5, reduce_dim=i) for i in xrange(5)]
truth_shape = [2] * 4
for i in xrange(5):
self._testReduceJoin(input_array, truths[i], truth_shape, axis=i - 5)
def testSingletonDimension(self):
input_arrays = [
_input_array(num_dims=5).reshape([2] * i + [1] + [2] * (5 - i))
for i in xrange(6)
]
truth = _input_array(num_dims=5)
truth_shape = [2] * 5
for i in xrange(6):
self._testReduceJoin(input_arrays[i], truth, truth_shape, axis=i)
def testSeparator(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["this please", "is do", "a not", "test panic"]
truth_shape_dim_zero = [4]
truth_dim_one = ["this is a test", "please do not panic"]
truth_shape_dim_one = [2]
self._testReduceJoin(
input_array,
truth_dim_zero,
truth_shape_dim_zero,
axis=0,
separator=" ")
self._testReduceJoin(
input_array,
truth_dim_one,
truth_shape_dim_one,
axis=1,
separator=" ")
@test_util.run_deprecated_v1
def testUnknownShape(self):
input_array = [["a"], ["b"]]
truth = ["ab"]
truth_shape = None
with self.cached_session():
placeholder = array_ops.placeholder(dtypes.string, name="placeholder")
reduced = string_ops.reduce_join(placeholder, axis=0)
output_array = reduced.eval(feed_dict={placeholder.name: input_array})
self.assertAllEqualUnicode(truth, output_array)
self.assertAllEqual(truth_shape, reduced.get_shape())
@test_util.run_deprecated_v1
def testUnknownIndices(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = ["thisplease", "isdo", "anot", "testpanic"]
truth_dim_one = ["thisisatest", "pleasedonotpanic"]
truth_shape = None
with self.cached_session():
placeholder = array_ops.placeholder(dtypes.int32, name="placeholder")
reduced = string_ops.reduce_join(input_array, axis=placeholder)
output_array_dim_zero = reduced.eval(feed_dict={placeholder.name: [0]})
output_array_dim_one = reduced.eval(feed_dict={placeholder.name: [1]})
self.assertAllEqualUnicode(truth_dim_zero, output_array_dim_zero)
self.assertAllEqualUnicode(truth_dim_one, output_array_dim_one)
self.assertAllEqual(truth_shape, reduced.get_shape())
def testKeepDims(self):
input_array = [["this", "is", "a", "test"],
["please", "do", "not", "panic"]]
truth_dim_zero = [["thisplease", "isdo", "anot", "testpanic"]]
truth_shape_dim_zero = [1, 4]
truth_dim_one = [["thisisatest"], ["pleasedonotpanic"]]
truth_shape_dim_one = [2, 1]
self._testReduceJoin(
input_array,
truth_dim_zero,
truth_shape_dim_zero,
axis=0,
keep_dims=True)
self._testReduceJoin(
input_array,
truth_dim_one,
truth_shape_dim_one,
axis=1,
keep_dims=True)
expected_val = [["thisisatestpleasedonotpanic"]]
expected_shape = [1, 1]
self._testReduceJoin(
constant_op.constant(input_array), expected_val, expected_shape,
keep_dims=True, axis=None)
# Using axis=[] is a no-op.
expected_val = input_array
expected_shape = [2, 4]
self._testReduceJoin(
input_array, expected_val, expected_shape, keep_dims=True, axis=[])
def testMultiIndex(self):
num_dims = 3
input_array = _input_array(num_dims=num_dims)
# Also tests [].
for i in xrange(num_dims + 1):
for permutation in itertools.permutations(xrange(num_dims), i):
self._testMultipleReduceJoin(input_array, axis=permutation)
@test_util.run_deprecated_v1
def testInvalidReductionIndices(self):
with self.cached_session():
with self.assertRaisesRegex(ValueError, "Invalid reduction dim"):
string_ops.reduce_join(inputs="", axis=0)
with self.assertRaisesRegex(ValueError, "Invalid reduction dimension -3"):
string_ops.reduce_join(inputs=[[""]], axis=-3)
with self.assertRaisesRegex(ValueError, "Invalid reduction dimension 2"):
string_ops.reduce_join(inputs=[[""]], axis=2)
with self.assertRaisesRegex(ValueError, "Invalid reduction dimension -3"):
string_ops.reduce_join(inputs=[[""]], axis=[0, -3])
with self.assertRaisesRegex(ValueError, "Invalid reduction dimension 2"):
string_ops.reduce_join(inputs=[[""]], axis=[0, 2])
def testZeroDims(self):
with self.cached_session():
inputs = np.zeros([0, 1], dtype=str)
# Reduction that drops the dim of size 0.
output = string_ops.reduce_join(inputs=inputs, axis=0)
self.assertAllEqualUnicode([""], self.evaluate(output))
# Reduction that keeps the dim of size 0.
output = string_ops.reduce_join(inputs=inputs, axis=1)
output_shape = self.evaluate(output).shape
self.assertAllEqual([0], output_shape)
@test_util.run_deprecated_v1
def testInvalidArgsUnknownShape(self):
with self.cached_session():
placeholder = array_ops.placeholder(dtypes.string, name="placeholder")
index_too_high = string_ops.reduce_join(placeholder, axis=1)
duplicate_index = string_ops.reduce_join(placeholder, axis=[-1, 1])
with self.assertRaisesOpError("Invalid reduction dimension 1"):
index_too_high.eval(feed_dict={placeholder.name: [""]})
with self.assertRaisesOpError("Duplicate reduction dimension 1"):
duplicate_index.eval(feed_dict={placeholder.name: [[""]]})
@test_util.run_deprecated_v1
def testInvalidArgsUnknownIndices(self):
with self.cached_session():
placeholder = array_ops.placeholder(dtypes.int32, name="placeholder")
reduced = string_ops.reduce_join(["test", "test2"], axis=placeholder)
with self.assertRaisesOpError("reduction dimension -2"):
reduced.eval(feed_dict={placeholder.name: -2})
with self.assertRaisesOpError("reduction dimension 2"):
reduced.eval(feed_dict={placeholder.name: 2})
def testDeprecatedArgs(self):
foobar = constant_op.constant(["foobar"])
# Old names: keep_dims and reduction_indices
output = string_ops.reduce_join(
["foo", "bar"], reduction_indices=0, keep_dims=True)
self.assertAllEqual(foobar, output)
# New names keepdims and axis.
output = string_ops.reduce_join(["foo", "bar"], axis=0, keepdims=True)
self.assertAllEqual(foobar, output)
if __name__ == "__main__":
test.main()
|
the-stack_0_16671 | """ ESC/POS Commands (Constants) """
# Feed control sequences
CTL_LF = '\x0a' # Print and line feed
CTL_FF = '\x0c' # Form feed
CTL_CR = '\x0d' # Carriage return
CTL_HT = '\x09' # Horizontal tab
CTL_VT = '\x0b' # Vertical tab
# Printer hardware
HW_INIT = '\x1b\x40' # Clear data in buffer and reset modes
HW_SELECT = '\x1b\x3d\x01' # Printer select
HW_RESET = '\x1b\x3f\x0a\x00' # Reset printer hardware
# Cash Drawer
CD_KICK_2 = '\x1b\x70\x00' # Sends a pulse to pin 2 []
CD_KICK_5 = '\x1b\x70\x01' # Sends a pulse to pin 5 []
# Paper
PAPER_FULL_CUT = '\x1d\x56\x00' # Full cut paper
PAPER_PART_CUT = '\x1d\x56\x01' # Partial cut paper
# Text format
BARCODE_TXT_OFF = '\x1d\x48\x00' # HRI barcode chars OFF
BARCODE_TXT_ABV = '\x1d\x48\x01' # HRI barcode chars above
BARCODE_TXT_BLW = '\x1d\x48\x02' # HRI barcode chars below
BARCODE_TXT_BTH = '\x1d\x48\x03' # HRI barcode chars both above and below
BARCODE_FONT_A = '\x1d\x66\x00' # Font type A for HRI barcode chars
BARCODE_FONT_B = '\x1d\x66\x01' # Font type B for HRI barcode chars
BARCODE_HEIGHT = '\x1d\x68\x64' # Barcode Height [1-255]
BARCODE_WIDTH = '\x1d\x77\x03' # Barcode Width [2-6]
BARCODE_UPC_A = '\x1d\x6b\x00' # Barcode type UPC-A
BARCODE_UPC_E = '\x1d\x6b\x01' # Barcode type UPC-E
BARCODE_EAN13 = '\x1d\x6b\x02' # Barcode type EAN13
BARCODE_EAN8 = '\x1d\x6b\x03' # Barcode type EAN8
BARCODE_CODE39 = '\x1d\x6b\x04' # Barcode type CODE39
BARCODE_ITF = '\x1d\x6b\x05' # Barcode type ITF
BARCODE_NW7 = '\x1d\x6b\x06' # Barcode type NW7
# Image format
S_RASTER_N = '\x1d\x76\x30\x00' # Set raster image normal size
S_RASTER_2W = '\x1d\x76\x30\x01' # Set raster image double width
S_RASTER_2H = '\x1d\x76\x30\x02' # Set raster image double height
S_RASTER_Q = '\x1d\x76\x30\x03' # Set raster image quadruple
RESET = '\x1b\x40'
TEXT_STYLE = {
'bold': {
0: '\x1b\x45\x00', # Bold font OFF
1: '\x1b\x45\x01', # Bold font ON
},
'underline': {
None: '\x1b\x2d\x00', # Underline font OFF
1: '\x1b\x2d\x01', # Underline font 1-dot ON
2: '\x1b\x2d\x02', # Underline font 2-dot ON
},
'size': {
'normal': '\x1b\x21\x00', # Normal text
'2h': '\x1b\x21\x10', # Double height text
'2w': '\x1b\x21\x20', # Double width text
'2x': '\x1b\x21\x30', # Quad area text
},
'font': {
'a': '\x1b\x4d\x00', # Font type A
'b': '\x1b\x4d\x01', # Font type B
'c': '\x1b\x4d\x02', # Font type C (may not support)
},
'align': {
'left': '\x1b\x61\x00', # Left justification
'right': '\x1b\x61\x02', # Right justification
'center': '\x1b\x61\x01', # Centering
},
'inverted': {
False: '\x1d\x42\x00', # Inverted mode ON
True: '\x1d\x42\x01', # Inverted mode OFF
},
'color': {
1: '\x1b\x72\x00', # Select 1st printing color
2: '\x1b\x72\x00', # Select 2nd printing color
}
}
PAGE_CP_SET_COMMAND = '\x1b\x74'
PAGE_CP_CODE = {
'cp437' : 0,
# 'katakana' : 1,
'cp850' : 2,
'cp860' : 3,
'cp863' : 4,
'cp865' : 5,
'cp1251' : 6,
'cp866' : 7,
'mac_cyrillic': 8,
'cp775' : 9,
'cp1253' : 10,
'cp737' : 11,
'cp857' : 12,
'iso8859_9' : 13,
'cp864' : 14,
'cp862' : 15,
'iso8859_2' : 16,
'cp1253' : 17,
'cp1250' : 18,
'cp858' : 19,
'cp1254' : 20,
# 'TIS_14' : 21,
# 'TIS_17' : 22,
# 'TIS_11' : 23,
'cp737' : 24,
'cp1257' : 25,
'cp847' : 26,
# 'cp720' : 27,
'cp885' : 28,
'cp857' : 29,
'cp1250' : 30,
'cp775' : 31,
'cp1254' : 32,
# '' : 33,
'cp1256' : 34,
'cp1258' : 35,
'iso8859_2' : 36,
'iso8859_3' : 37,
'iso8859_4' : 38,
'iso8859_5' : 39,
'iso8859_6' : 40,
'iso8859_7' : 41,
'iso8859_8' : 42,
'iso8859_9' : 43,
'iso8859_15' : 44,
# '???' : 45,
'cp856' : 46,
'cp874' : 47,
}
|
the-stack_0_16672 | class Node(object):
def __init__(self, name, which):
self.name = name
self.which = which
self.next = next
self.timestamp = 0
class AnimalShelter(object):
def __init__(self):
self.first_cat = None
self.first_dog = None
self.last_cat = None
self.last_dog = None
self.counter = 0
def enqueue(self, name, which):
self.counter += 1
node = Node(name, which)
node.timestamp = self.counter
if which == 'cat':
if not self.first_cat:
self.first_cat = node
if self.last_cat:
self.last_cat.next = node
self.last_cat = node
if which == 'dog':
if not self.first_dog:
self.first_dog = node
if self.last_dog:
self.last_dog.next = node
self.last_dog = node
def dequeueDog(self):
if self.first_dog:
node = self.first_dog
self.first_dog = node.next
return str(node.name)
raise Exception('No Dogs!')
def dequeueCat(self):
if self.first_cat:
node = self.first_cat
self.first_cat = node.next
return str(node.name)
raise Exception('No Cats!')
def dequeueAny(self):
nodecat = self.first_cat
nodedog = self.first_dog
if nodecat and not nodedog:
return self.dequeueCat()
elif nodedog and not nodecat:
return self.dequeueDog()
elif nodedog and nodecat:
if nodedog.timestamp < nodecat.timestamp:
return self.dequeueDog()
else:
return self.dequeueCat()
raise Exception('No Animals!')
def main():
qs = AnimalShelter()
qs.enqueue('bob', 'cat')
qs.enqueue('mia', 'cat')
qs.enqueue('yoda', 'dog')
qs.enqueue('wolf', 'dog')
assert(qs.dequeueDog() == 'yoda')
assert(qs.dequeueCat() == 'bob')
print(qs.dequeueAny() == 'mia')
if __name__ == '__main__':
main()
|
the-stack_0_16674 | import cv2 as cv
import numpy as np
import ctypes
def Mbox(title, text, style):
return ctypes.windll.user32.MessageBoxW(0, text, title, style)
# https://docs.opencv.org/3.4/dc/d9b/classcv_1_1ppf__match__3d_1_1ICP.html
def rotation(theta):
tx, ty, tz = theta
Rx = np.array([[1, 0, 0], [0, np.cos(tx), -np.sin(tx)], [0, np.sin(tx), np.cos(tx)]])
Ry = np.array([[np.cos(ty), 0, -np.sin(ty)], [0, 1, 0], [np.sin(ty), 0, np.cos(ty)]])
Rz = np.array([[np.cos(tz), -np.sin(tz), 0], [np.sin(tz), np.cos(tz), 0], [0, 0, 1]])
return np.dot(Rx, np.dot(Ry, Rz))
width = 20
height = 10
max_deg = np.pi / 12
cloud, rotated_cloud = [None]*3, [None]*3
retval, residual, pose = [None]*3, [None]*3, [None]*3
noise = np.random.normal(0.0, 0.1, height * width * 3).reshape((-1, 3))
noise2 = np.random.normal(0.0, 1.0, height * width)
x, y = np.meshgrid(
range(-width//2, width//2),
range(-height//2, height//2),
sparse=False, indexing='xy'
)
z = np.zeros((height, width))
cloud[0] = np.dstack((x, y, z)).reshape((-1, 3)).astype(np.float32)
cloud[1] = noise.astype(np.float32) + cloud[0]
cloud[2] = cloud[1]
cloud[2][:, 2] += noise2.astype(np.float32)
R = rotation([
0, #np.random.uniform(-max_deg, max_deg),
np.random.uniform(-max_deg, max_deg),
0, #np.random.uniform(-max_deg, max_deg)
])
t = np.zeros((3, 1))
Rt = np.vstack((
np.hstack((R, t)),
np.array([0, 0, 0, 1])
)).astype(np.float32)
icp = cv.ppf_match_3d_ICP(100)
I = np.eye(4)
print("Unaligned error:\t%.6f" % np.linalg.norm(I - Rt))
sprintfStr = "Unaligned error:\t%.6f\n" % np.linalg.norm(I - Rt)
for i in range(3):
rotated_cloud[i] = np.matmul(Rt[0:3,0:3], cloud[i].T).T + Rt[:3,3].T
retval[i], residual[i], pose[i] = icp.registerModelToScene(rotated_cloud[i], cloud[i])
print("ICP error:\t\t%.6f" % np.linalg.norm(I - np.matmul(pose[0], Rt)))
sprintfStr += "ICP error:\t\t%.6f\n" % np.linalg.norm(I - np.matmul(pose[0], Rt))
Mbox('ICP complete', sprintfStr, 1) |
the-stack_0_16675 | #
# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved.
#
# OpenArkCompiler is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
#
from api import *
SCO2_TEST = {
"compile": [
C2ast(
clang="${OUT_ROOT}/tools/bin/clang",
include_path=[
"${OUT_ROOT}/aarch64-clang-release/lib/include",
"${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/usr/include",
"${OUT_ROOT}/tools/gcc-linaro-7.5.0/lib/gcc/aarch64-linux-gnu/7.5.0/include"
],
option="--target=aarch64",
infile="${APP}.c",
outfile="${APP}.ast",
extra_opt="${SPEC_PARAM}"
),
Mplfe(
hir2mpl="${OUT_ROOT}/aarch64-clang-release/bin/hir2mpl",
infile="${APP}.ast",
outfile="${APP}.mpl"
),
Maple(
maple="${OUT_ROOT}/aarch64-clang-release/bin/maple",
run=["me", "mpl2mpl", "mplcg"],
option={
"me": "-O2 --quiet",
"mpl2mpl": "-O2 --quiet",
"mplcg": "--O2 --fpic --quiet --no-pie --verbose-asm"
},
global_option="",
infile="${APP}.mpl"
),
CLinker(
infile="${APP}.s",
front_option="-O2 -std=c99",
outfile="${APP}.o",
back_option="",
mid_opt="-c"
)
],
"link": [
CLinker(
infile="${APP}",
front_option="-std=gnu99 -no-pie",
outfile="${EXE}",
back_option="-lm -L${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc/lib/"
)
],
"cp_data":[
Shell(
"cp -r data/test/${APP} ${TARGET}"
)
],
"run": [
Shell(
"${OUT_ROOT}/tools/bin/qemu-aarch64 -L ${OUT_ROOT}/tools/gcc-linaro-7.5.0/aarch64-linux-gnu/libc ${EXE} ${APP} > output.log"
)
],
"compare": [
Shell(
"${MAPLE_ROOT}/testsuite/c_test/spec_test/specperl ${MAPLE_ROOT}/testsuite/c_test/spec_test/specdiff -m -l 10 ${EXTRA_COMPARE} output.log data/test/${APP}"
)
]
}
|
the-stack_0_16676 | import torch
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
def grad_cosine(grad_1, grad_2):
cos = np.zeros(len(grad_1))
for i in range(len(grad_1)):
cos_arr = grad_1[i] * grad_2[i]
cos_arr /= np.sqrt(np.sum(grad_1[i] ** 2))
cos_arr /= np.sqrt(np.sum(grad_2[i] ** 2))
cos[i] = np.sum(cos_arr)
return cos
def grad_vs_optimal(grad_list, param_list):
final_param = param_list[-1]
cos = []
for i in range(len(param_list) - 1):
param = param_list[i]
grad = grad_list[i]
ideal_direction = [param[j] - final_param[j] for j in range(len(param))]
cos.append(grad_cosine(grad, ideal_direction))
return np.stack(cos)
def plot_grad_flow(named_parameters):
'''Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"plot_grad_flow(self.model.named_parameters())" to visualize the gradient flow'''
ave_grads = []
max_grads= []
layers = []
for n, p in named_parameters:
if(p.requires_grad) and ("bias" not in n):
layers.append(n)
ave_grads.append(p.grad.abs().mean())
max_grads.append(p.grad.abs().max())
plt.bar(np.arange(len(max_grads)), max_grads, alpha=0.1, lw=1, color="c")
plt.bar(np.arange(len(max_grads)), ave_grads, alpha=0.1, lw=1, color="b")
plt.hlines(0, 0, len(ave_grads)+1, lw=2, color="k" )
plt.xticks(range(0,len(ave_grads), 1), layers, rotation="vertical")
plt.xlim(left=0, right=len(ave_grads))
plt.ylim(bottom = -0.001, top=0.02) # zoom in on the lower gradient regions
plt.xlabel("Layers")
plt.ylabel("average gradient")
plt.title("Gradient flow")
plt.grid(True)
plt.legend([plt.Line2D([0], [0], color="c", lw=4),
plt.Line2D([0], [0], color="b", lw=4),
plt.Line2D([0], [0], color="k", lw=4)], ['max-gradient', 'mean-gradient', 'zero-gradient'])
class GradAnalysis(object):
def __init__(self, model):
self.model = model
self.names = []
self.params = []
self.grad = []
self.get_param()
def get_param(self):
self.params = []
for n, p in self.model.named_parameters():
if (p.requires_grad) and ("bias" not in n):
self.names.append(n)
self.params.append(p.data.clone().cpu().numpy())
return self.params
def loss_grad(self, loss):
# Backward and optimize
loss.backward(retain_graph=True)
self.grad = [
p.grad.clone().cpu().numpy()
for n, p in self.model.named_parameters()
if (p.requires_grad) and ("bias" not in n)
]
return self.grad
def clear_grad(self):
for n, p in self.model.named_parameters():
if (p.requires_grad) and ("bias" not in n):
p.grad.data.zero_() |
the-stack_0_16679 | #!/usr/bin/env python3
# Copyright (c) 2014-2021 The Garliccoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>:<port>
[<ipv6>]:<port>
<onion>.onion:<port>
<i2p>.b32.i2p:<port>
The output will be two data structures with the peers in binary format:
static const uint8_t chainparams_seed_{main,test}[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from enum import Enum
import struct
import sys
import os
import re
class BIP155Network(Enum):
IPV4 = 1
IPV6 = 2
TORV2 = 3
TORV3 = 4
I2P = 5
CJDNS = 6
def name_to_bip155(addr):
'''Convert address string to BIP155 (networkID, addr) tuple.'''
if addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) == 10:
return (BIP155Network.TORV2, vchAddr)
elif len(vchAddr) == 35:
assert(vchAddr[34] == 3)
return (BIP155Network.TORV3, vchAddr[:32])
else:
raise ValueError('Invalid onion %s' % vchAddr)
elif addr.endswith('.b32.i2p'):
vchAddr = b32decode(addr[0:-8] + '====', True)
if len(vchAddr) == 32:
return (BIP155Network.I2P, vchAddr)
else:
raise ValueError(f'Invalid I2P {vchAddr}')
elif '.' in addr: # IPv4
return (BIP155Network.IPV4, bytes((int(x) for x in addr.split('.'))))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return (BIP155Network.IPV6, bytes(sub[0] + ([0] * nullbytes) + sub[1]))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s):
'''Convert endpoint string to BIP155 (networkID, addr, port) tuple.'''
match = re.match(r'\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = 0
else:
port = int(port)
host = name_to_bip155(host)
return host + (port, )
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def bip155_serialize(spec):
'''
Serialize (networkID, addr, port) tuple to BIP155 binary format.
'''
r = b""
r += struct.pack('B', spec[0].value)
r += ser_compact_size(len(spec[1]))
r += spec[1]
r += struct.pack('>H', spec[2])
return r
def process_nodes(g, f, structname):
g.write('static const uint8_t %s[] = {\n' % structname)
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
spec = parse_spec(line)
blob = bip155_serialize(spec)
hoststr = ','.join(('0x%02x' % b) for b in blob)
g.write(f' {hoststr},\n')
g.write('};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef GARLICCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define GARLICCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the garliccoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a BIP155 serialized (networkID, addr, port) tuple.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'chainparams_seed_main')
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
process_nodes(g, f, 'chainparams_seed_test')
g.write('#endif // GARLICCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
|
the-stack_0_16680 | import argparse
import time
import torch
import torch.nn as nn
from torch.utils import data
import numpy as np
import pickle
import cv2
import torch.optim as optim
import scipy.misc
import torch.backends.cudnn as cudnn
import sys
import os
from tqdm import tqdm
import os.path as osp
#from networks.gcnet import Res_Deeplab
from dataset.datasets import CSDataSet
#import matplotlib.pyplot as plt
import random
import timeit
import logging
from tensorboardX import SummaryWriter
from utils.utils import decode_labels, inv_preprocess, decode_predictions
from utils.criterion import CriterionCrossEntropy, CriterionOhemCrossEntropy, CriterionDSN, CriterionOhemDSN
from utils.encoding import DataParallelModel, DataParallelCriterion
from utils.utils import fromfile
torch_ver = torch.__version__[:3]
if torch_ver == '0.3':
from torch.autograd import Variable
start = timeit.default_timer()
IMG_MEAN = np.array((104.00698793,116.66876762,122.67891434), dtype=np.float32)
'''
BATCH_SIZE = 8
DATA_DIRECTORY = 'cityscapes'
DATA_LIST_PATH = './dataset/list/cityscapes/train.lst'
IGNORE_LABEL = 255
INPUT_SIZE = '769,769'
LEARNING_RATE = 1e-2
MOMENTUM = 0.9
NUM_CLASSES = 19
NUM_STEPS = 60000
POWER = 0.9
RANDOM_SEED = 1234
RESTORE_FROM = './dataset/resnet101-imagenet.pth'
SAVE_NUM_IMAGES = 2
SAVE_PRED_EVERY = 10000
SNAPSHOT_DIR = 'snapshots/'
WEIGHT_DECAY = 0.0005
'''
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="DeepLab-ResNet Network")
parser.add_argument("--data-dir", type=str, default=None,
help="Path to the directory containing the PASCAL VOC dataset.")
parser.add_argument("--is-training", action="store_true",
help="Whether to updates the running means and variances during the training.")
parser.add_argument("--not-restore-last", action="store_true",
help="Whether to not restore last (FC) layers.")
parser.add_argument("--start-iters", type=int, default=0,
help="Number of classes to predict (including background).")
parser.add_argument("--random-mirror", action="store_true",
help="Whether to randomly mirror the inputs during the training.")
parser.add_argument("--random-scale", action="store_true",
help="Whether to randomly scale the inputs during the training.")
parser.add_argument("--restore-from", type=str, default=None,
help="Where restore model parameters from.")
parser.add_argument("--gpu", type=str, default='None',
help="choose gpu device.")
parser.add_argument("--recurrence", type=int, default=1,
help="choose the number of recurrence.")
parser.add_argument("--ft", type=bool, default=False,
help="fine-tune the model with large input size.")
parser.add_argument('--config', help='train config file path')
parser.add_argument("--ohem", type=str2bool, default='False',
help="use hard negative mining")
parser.add_argument("--ohem-thres", type=float, default=0.6,
help="choose the samples with correct probability underthe threshold.")
parser.add_argument("--ohem-keep", type=int, default=200000,
help="choose the samples with correct probability underthe threshold.")
parser.add_argument("--use-zip", type=str2bool, default='True',
help="use zipfile as dataset")
return parser.parse_args()
args = get_arguments()
cfg=fromfile(args.config)
if cfg.model.type == 'basenet':
from networks.basenet import Res_Deeplab
def lr_poly(base_lr, iter, max_iter, power):
return base_lr*((1-float(iter)/max_iter)**(power))
def adjust_learning_rate(optimizer, i_iter):
"""Sets the learning rate to the initial LR divided by 5 at 60th, 120th and 160th epochs"""
lr = lr_poly(cfg.train_cfg.learning_rate, i_iter, cfg.train_cfg.num_steps, cfg.train_cfg.power)
optimizer.param_groups[0]['lr'] = lr
return lr
def set_bn_eval(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.eval()
def set_bn_momentum(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1 or classname.find('InPlaceABN') != -1:
m.momentum = 0.0003
def main():
"""Create the model and start the training."""
writer = SummaryWriter(cfg.train_cfg.snapshot_dir)
if args.gpu is not None:
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
if args.data_dir is not None:
cfg.data_cfg.data_dir = args.data_dir
if args.restore_from is not None:
cfg.train_cfg.restore_from = args.restore_from
if args.start_iters is not None:
cfg.train_cfg.start_iters = args.start_iters
h, w = map(int, cfg.data_cfg.input_size.split(','))
input_size = (h, w)
cudnn.enabled = True
# Create network.
deeplab = Res_Deeplab(cfg.model,cfg.data_cfg.num_classes)
print(deeplab)
if cfg.train_cfg.start_iters == 0:
saved_state_dict = torch.load(cfg.train_cfg.restore_from)
new_params = deeplab.state_dict().copy()
for i in saved_state_dict:
i_parts = i.split('.')
if not i_parts[0] == 'fc':
new_params['.'.join(i_parts[0:])] = saved_state_dict[i]
deeplab.load_state_dict(new_params)
print('new params loaded')
else:
saved_state_dict = torch.load(cfg.train_cfg.restore_from)
deeplab.load_state_dict(saved_state_dict)
model = DataParallelModel(deeplab)
model.train()
model.float()
# model.apply(set_bn_momentum)
model.cuda()
if args.ohem:
criterion = CriterionOhemDSN(thresh=args.ohem_thres, min_kept=args.ohem_keep)
else:
criterion = CriterionDSN()
criterion = DataParallelCriterion(criterion)
criterion.cuda()
cudnn.benchmark = True
if not os.path.exists(cfg.train_cfg.snapshot_dir):
os.makedirs(cfg.train_cfg.snapshot_dir)
trainloader = data.DataLoader(CSDataSet(cfg.data_cfg.data_dir, cfg.data_cfg.data_list, max_iters=cfg.train_cfg.num_steps*cfg.train_cfg.batch_size,
crop_size=input_size,scale=args.random_scale, mirror=args.random_mirror, mean=IMG_MEAN, use_zip=args.use_zip),
batch_size=cfg.train_cfg.batch_size, shuffle=True, num_workers=4, pin_memory=True)
def get_params(tmp_model):
lr_wd_group = []
lr_nowd_group = []
for name, p in tmp_model.named_parameters():
if p.requires_grad:
if p.__dict__.get('wd', -1) == 0:
lr_nowd_group.append(p)
print(name)
else:
lr_wd_group.append(p)
return [dict(params=lr_wd_group), dict(params=lr_nowd_group, weight_decay=0.0)]
optimizer = optim.SGD(get_params(deeplab), lr=cfg.train_cfg.learning_rate, momentum=cfg.train_cfg.momentum,weight_decay=cfg.train_cfg.weight_decay)
optimizer.zero_grad()
for i_iter, batch in enumerate(trainloader):
i_iter += cfg.train_cfg.start_iters
images, labels, _, _ = batch
images = images.cuda()
labels = labels.long().cuda()
if torch_ver == "0.3":
images = Variable(images)
labels = Variable(labels)
optimizer.zero_grad()
lr = adjust_learning_rate(optimizer, i_iter)
preds = model(images)
loss = criterion(preds, labels)
loss.backward()
optimizer.step()
if i_iter % 100 == 0:
writer.add_scalar('learning_rate', lr, i_iter)
writer.add_scalar('loss', loss.data.cpu().numpy(), i_iter)
if 'nowd' in cfg.model.module.type and cfg.model.module.get('with_nl', True):
writer.add_scalar('convkey_mean', model.module.head.ctb.conv_key.weight.mean(), i_iter)
writer.add_scalar('convkey_std', model.module.head.ctb.conv_key.weight.var().sqrt(), i_iter)
writer.add_scalar('convkey_max', model.module.head.ctb.conv_key.weight.abs().max(), i_iter)
writer.add_scalar('convquery_std', model.module.head.ctb.conv_query.weight.var().sqrt(), i_iter)
writer.add_scalar('convquery_mean', model.module.head.ctb.conv_query.weight.mean(), i_iter)
writer.add_scalar('convquery_max', model.module.head.ctb.conv_query.weight.abs().max(), i_iter)
# if i_iter % 5000 == 0:
# images_inv = inv_preprocess(images, args.save_num_images, IMG_MEAN)
# labels_colors = decode_labels(labels, args.save_num_images, args.num_classes)
# if isinstance(preds, list):
# preds = preds[0]
# preds_colors = decode_predictions(preds, args.save_num_images, args.num_classes)
# for index, (img, lab) in enumerate(zip(images_inv, labels_colors)):
# writer.add_image('Images/'+str(index), img, i_iter)
# writer.add_image('Labels/'+str(index), lab, i_iter)
# writer.add_image('preds/'+str(index), preds_colors[index], i_iter)
print('Time {}, iter = {} of {} completed, loss = {}'.format(time.strftime("%Y-%m-%d %H:%M:%S"), i_iter, cfg.train_cfg.num_steps, loss.data.cpu().numpy()))
if 'nowd' in cfg.model.module.type and cfg.model.module.get('with_nl', True):
print('convkey: mean {}, std {}, absmax {}'.format(
model.module.head.ctb.conv_key.weight.mean(), model.module.head.ctb.conv_key.weight.var().sqrt(), model.module.head.ctb.conv_key.weight.abs().max()))
print('convquery: mean {}, std {}, absmax {}'.format(
model.module.head.ctb.conv_query.weight.mean(), model.module.head.ctb.conv_query.weight.var().sqrt(), model.module.head.ctb.conv_query.weight.abs().max()))
if i_iter >= cfg.train_cfg.num_steps-1:
print('save model ...')
torch.save(deeplab.state_dict(),osp.join(cfg.train_cfg.snapshot_dir, 'CS_scenes_'+str(cfg.train_cfg.num_steps)+'.pth'))
break
if i_iter % cfg.train_cfg.save_pred_every == 0 and i_iter >= cfg.train_cfg.save_from-1:
print('taking snapshot ...')
torch.save(deeplab.state_dict(),osp.join(cfg.train_cfg.snapshot_dir, 'CS_scenes_'+str(i_iter)+'.pth'))
end = timeit.default_timer()
print(end-start,'seconds')
if __name__ == '__main__':
main()
|
the-stack_0_16681 | """
Try all efforts to minimize the distribution size of Depsland, then extract
archived files on client side.
WIP: This module is not stable to use.
"""
import os
import shutil
import subprocess
import sys
sys.path.append(os.path.abspath(f'{__file__}/../..'))
# noinspection PyUnresolvedReferences
from minimal_setup.index import ResourcesIndex # noqa
python_exe = sys.executable
res_idx = ...
def main():
global res_idx
res_idx = _indexing_resources()
_extract()
_setup_venv_packages()
_clean()
def _indexing_resources():
res_idx = ResourcesIndex()
return res_idx
def _extract():
def _extract(file_i: str, dir_o):
if file_i.endswith(('.tar.gz', '.tar')):
import tarfile
file_handle = tarfile.open(file_i)
else:
from zipfile import ZipFile
file_handle = ZipFile(file_i)
file_handle.extractall(dir_o)
return dir_o
_extract(res_idx.assets_zip, res_idx.assets)
_extract(res_idx.venv_packages_zip, res_idx.venv_packages_unzip)
def _setup_venv_packages():
# note: assert pip and setuptools already exist
send_cmd(f'{python_exe} -m pip install -r {res_idx.requirements} '
f'--no-index -f {res_idx.venv_packages_unzip}')
def _clean():
for i in (
res_idx.assets_zip,
res_idx.temp,
res_idx.venv_packages_zip,
):
if os.path.exists(i):
if os.path.isfile(i):
os.remove(i)
else:
shutil.rmtree(i)
# -----------------------------------------------------------------------------
def copy_dirs(dir_i, dir_o):
for n in os.listdir(dir_i):
i = f'{dir_i}/{n}'
o = f'{dir_o}/{n}'
shutil.copytree(i, o)
def send_cmd(cmd: str) -> str:
try:
ret = subprocess.run(
cmd, shell=True, check=True, capture_output=True
)
out = ret.stdout.decode(encoding='utf-8').replace('\r\n', '\n')
except subprocess.CalledProcessError as e:
out = e.stderr.decode(encoding='utf-8')
raise Exception(out)
return out
|
the-stack_0_16683 | # -*- coding: utf-8 -*-
'''
© 2012-2013 eBay Software Foundation
Authored by: Tim Keefer
Licensed under CDDL 1.0
'''
import os
import sys
import gevent
from optparse import OptionParser
sys.path.insert(0, '%s/../' % os.path.dirname(__file__))
from common import dump
from ebaysdk.finding import Connection as finding
from ebaysdk.http import Connection as html
from ebaysdk.exception import ConnectionError
def init_options():
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-d", "--debug",
action="store_true", dest="debug", default=False,
help="Enabled debugging [default: %default]")
parser.add_option("-y", "--yaml",
dest="yaml", default='ebay.yaml',
help="Specifies the name of the YAML defaults file. [default: %default]")
parser.add_option("-a", "--appid",
dest="appid", default=None,
help="Specifies the eBay application id to use.")
(opts, args) = parser.parse_args()
return opts, args
def run(opts):
timeout = gevent.Timeout(4)
timeout.start()
try:
calls = []
for page in range(1, 10):
api = finding(debug=opts.debug, appid=opts.appid,
config_file=opts.yaml)
call = gevent.spawn(api.execute,
'findItemsAdvanced',
{'keywords': 'python',
'paginationInput': {'pageNumber': page}})
calls.append(call)
gevent.joinall(calls)
try:
call_results = [c.get() for c in calls]
toprated = 0
for resp in call_results:
for item in resp.reply.searchResult.item:
if item.topRatedListing == 'true':
toprated += 1
print("Top Rated Listings: %s" % toprated)
except ConnectionError as e:
print("%s" % e)
except gevent.timeout.Timeout as e:
print("Calls reached timeout threshold: %s" % e)
finally:
timeout.cancel()
if __name__ == "__main__":
(opts, args) = init_options()
run(opts)
|
the-stack_0_16684 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
clean_html,
get_element_by_class,
js_to_json,
)
class TVNoeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tvnoe\.cz/video/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.tvnoe.cz/video/10362',
'md5': 'aee983f279aab96ec45ab6e2abb3c2ca',
'info_dict': {
'id': '10362',
'ext': 'mp4',
'series': 'Noční univerzita',
'title': 'prof. Tomáš Halík, Th.D. - Návrat náboženství a střet civilizací',
'description': 'md5:f337bae384e1a531a52c55ebc50fff41',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
iframe_url = self._search_regex(
r'<iframe[^>]+src="([^"]+)"', webpage, 'iframe URL')
ifs_page = self._download_webpage(iframe_url, video_id)
jwplayer_data = self._parse_json(
self._find_jwplayer_data(ifs_page),
video_id, transform_source=js_to_json)
info_dict = self._parse_jwplayer_data(
jwplayer_data, video_id, require_title=False, base_url=iframe_url)
info_dict.update({
'id': video_id,
'title': clean_html(get_element_by_class(
'field-name-field-podnazev', webpage)),
'description': clean_html(get_element_by_class(
'field-name-body', webpage)),
'series': clean_html(get_element_by_class('title', webpage))
})
return info_dict
|
the-stack_0_16685 | from collections import OrderedDict
import logging
LOG = logging.getLogger(__name__)
class Dispatcher(object):
def __init__(self, mount=None):
self._endpoints = OrderedDict()
self.mount = mount
def add_endpoint(self, nickname, endpoint):
if self.mount:
endpoint = self.mount + endpoint
self._endpoints[nickname] = (endpoint, None)
def get_endpoint_path(self, req, nickname, **kwargs):
path = ''
if nickname in self._endpoints:
path = self._endpoints[nickname][0]
if '{tenant_id}' in path:
tenant_id = req.env['tenant_id']
path = path.replace('{tenant_id}', tenant_id)
for var, value in kwargs.items():
if '{%s}' % var in path:
path = path.replace('{%s}' % var, str(value))
return path
def get_endpoint_url(self, req, nickname, **kwargs):
return (req.protocol + '://' +
req.get_header('host') +
req.app +
self.get_endpoint_path(req, nickname, **kwargs))
def get_unused_endpoints(self):
results = []
for nickname, endpoint in self._endpoints.items():
if not endpoint[1]:
results.append(nickname)
return results
def set_handler(self, nickname, handler):
if nickname not in self._endpoints:
raise ValueError("Unsupported endpoint '%s' specified." % nickname)
endpoint, _ = self._endpoints[nickname]
self._endpoints[nickname] = (endpoint, handler)
def get_routes(self):
endpoints = []
for endpoint, h in self._endpoints.values():
if h:
endpoints.append((endpoint, h))
return endpoints
|
the-stack_0_16687 | import json
from . import indexDbAPIs
from . import redisAPIs
class IndexDataRequestHandlers:
def __init__(self):
self.indexDbAPIs = indexDbAPIs.IndexDbAPIs()
async def handler_indexSymbolList(self, request):
'''
Returns the list of symbols in cash market segment
/api/{marketType}
'''
# FROM MongoDB SERVER
#return await self.indexDbAPIs.getIndexSymbolList()
# FROM REDIS SERVER
data = redisAPIs.readDataFromRedis('INDEX_SYMBOLS')
if data:
return json.loads(redisAPIs.readDataFromRedis('INDEX_SYMBOLS'))
else:
#return json.loads({'ERROR' : 'Redis data needs to be built'})
#return ('ERROR: FNOIDX_SYMBOLS')
return ([])
async def handler_indexMarketData(self, request):
'''
Returns the details of a stock symbol
/api/cash/data?symbol=Nifty 50&startdate=5-jul-2019&enddate=15-jul-2019
'''
symbol = request.rel_url.query.get('symbol') # Symbol is Case sensititve in this case
startDate = request.rel_url.query.get('startdate')
endDate = request.rel_url.query.get('enddate')
result = await self.indexDbAPIs.getIndexMarketData(symbol, startDate, endDate)
return result |
the-stack_0_16690 | #!/usr/bin/python
from __future__ import print_function
from bcc import BPF
import re, signal, sys
from time import sleep
# for influxdb
from influxdb import InfluxDBClient
import lmp_influxdb as db
from db_modules import write2db
from datetime import datetime
DBNAME = 'lmp'
client = db.connect(DBNAME,user='root',passwd=123456)
# load BPF program
b = BPF(text="""
#include <uapi/linux/ptrace.h>
#include <linux/blkdev.h>
struct val_t {
u32 pid;
char name[TASK_COMM_LEN];
u64 ts;
};
struct data_t {
u32 pid;
u64 rwflag;
u64 delta;
u64 sector;
u64 len;
u64 ts;
char disk_name[DISK_NAME_LEN];
char name[TASK_COMM_LEN];
};
BPF_HASH(infobyreq, struct request *, struct val_t);
BPF_PERF_OUTPUT(events);
// cache PID and comm by-req
int trace_pid_start(struct pt_regs *ctx, struct request *req)
{
struct val_t val = {};
if (bpf_get_current_comm(&val.name, sizeof(val.name)) == 0) {
val.pid = bpf_get_current_pid_tgid();
val.ts = bpf_ktime_get_ns();
infobyreq.update(&req, &val);
}
return 0;
}
// output
int trace_req_completion(struct pt_regs *ctx, struct request *req)
{
u64 delta;
u32 *pidp = 0;
struct val_t *valp;
struct data_t data = {};
u64 ts;
// fetch timestamp and calculate delta
ts = bpf_ktime_get_ns();
//if(data.delta < 1000000){
// return 0;
//}
valp = infobyreq.lookup(&req);
//data.delta = ts - valp->ts;
data.ts = ts/1000;
if (valp == 0) {
data.len = req->__data_len;
strcpy(data.name, "?");
} else {
data.delta = ts - valp->ts;
data.pid = valp->pid;
data.len = req->__data_len;
data.sector = req->__sector;
bpf_probe_read(&data.name, sizeof(data.name), valp->name);
struct gendisk *rq_disk = req->rq_disk;
bpf_probe_read(&data.disk_name, sizeof(data.disk_name),
rq_disk->disk_name);
}
#ifdef REQ_WRITE
data.rwflag = !!(req->cmd_flags & REQ_WRITE);
#elif defined(REQ_OP_SHIFT)
data.rwflag = !!((req->cmd_flags >> REQ_OP_SHIFT) == REQ_OP_WRITE);
#else
data.rwflag = !!((req->cmd_flags & REQ_OP_MASK) == REQ_OP_WRITE);
#endif
events.perf_submit(ctx, &data, sizeof(data));
infobyreq.delete(&req);
return 0;
}
""", debug=0)
# data structure from template
class lmp_data(object):
def __init__(self,a,b,c,d,e,f,g,h):
self.time = a
self.glob = b
self.comm = c
self.pid = d
self.disk = e
self.t = f
self.bytes = g
self.lat = h
data_struct = {"measurement":'HardDiskReadWriteTime',
"time":[],
"tags":['glob','comm','pid',],
"fields":['disk','t','bytes','lat']}
if BPF.get_kprobe_functions(b'blk_start_request'):
b.attach_kprobe(event="blk_start_request", fn_name="trace_pid_start")
b.attach_kprobe(event="blk_mq_start_request", fn_name="trace_pid_start")
b.attach_kprobe(event="blk_account_io_completion",
fn_name="trace_req_completion")
TASK_COMM_LEN = 16 # linux/sched.h
DISK_NAME_LEN = 32 # linux/genhd.h
# header
# print("%-14s %-14s %-6s %-7s %-2s %-22s %-10s %7s " % ("TIME(s)", "COMM", "PID",
# "DISK", "T", "SECTOR", "BYTES", "LAT(ms)"))
rwflg = ""
start_ts = 0
prev_ts = 0
delta = 0
# process event
def print_event(cpu, data, size):
event = b["events"].event(data)
val = -1
global start_ts
global prev_ts
global delta
if event.rwflag == 1:
rwflg = "W"
if event.rwflag == 0:
rwflg = "R"
if not re.match(b'\?', event.name):
val = event.sector
if start_ts == 0:
prev_ts = start_ts
if start_ts == 1:
delta = float(delta) + (event.ts - prev_ts)
# print("%-14.9f %-14.14s %-6s %-7s %-2s %-22s %-7s %7.2f " % (
# delta / 1000000, event.name.decode('utf-8', 'replace'), event.pid,
# event.disk_name.decode('utf-8', 'replace'), rwflg, val,
# event.len, float(event.delta) / 1000000))
test_data = lmp_data(datetime.now().isoformat(),'glob', event.name.decode('utf-8', 'replace'), event.pid,
event.disk_name.decode('utf-8', 'replace'), rwflg,
event.len, float(event.delta) / 1000000)
# print(event.pid, time)
write2db(data_struct, test_data, client)
prev_ts = event.ts
start_ts = 1
def quit(signum, frame):
sys.exit()
# loop with callback to print_event
b["events"].open_perf_buffer(print_event, page_cnt=64)
while 1:
try:
sleep(1)
signal.signal(signal.SIGINT, quit)
signal.signal(signal.SIGTERM, quit)
b.perf_buffer_poll()
print()
except Exception as exc:
print(exc)
# except KeyboardInterrupt:
# db.close()
# exit()
|
the-stack_0_16691 | # -*- coding: utf-8 -*-
'''
Functions for querying and modifying a user account and the groups to which it
belongs.
'''
from __future__ import absolute_import
# Import Python libs
import ctypes
import getpass
import logging
import os
import sys
# Import Salt libs
import salt.utils.path
import salt.utils.platform
from salt.exceptions import CommandExecutionError
from salt.utils.decorators.jinja import jinja_filter
# Import 3rd-party libs
from salt.ext import six
# Conditional imports
try:
import pwd
HAS_PWD = True
except ImportError:
HAS_PWD = False
try:
import grp
HAS_GRP = True
except ImportError:
HAS_GRP = False
try:
import pysss
HAS_PYSSS = True
except ImportError:
HAS_PYSSS = False
try:
import salt.utils.win_functions
HAS_WIN_FUNCTIONS = True
except ImportError:
HAS_WIN_FUNCTIONS = False
log = logging.getLogger(__name__)
def get_user():
'''
Get the current user
'''
if HAS_PWD:
return pwd.getpwuid(os.geteuid()).pw_name
elif HAS_WIN_FUNCTIONS and salt.utils.win_functions.HAS_WIN32:
return salt.utils.win_functions.get_current_user()
else:
raise CommandExecutionError(
'Required external library (pwd or win32api) not installed')
@jinja_filter('get_uid')
def get_uid(user=None):
'''
Get the uid for a given user name. If no user given, the current euid will
be returned. If the user does not exist, None will be returned. On systems
which do not support pwd or os.geteuid, None will be returned.
'''
if not HAS_PWD:
return None
elif user is None:
try:
return os.geteuid()
except AttributeError:
return None
else:
try:
return pwd.getpwnam(user).pw_uid
except KeyError:
return None
def _win_user_token_is_admin(user_token):
'''
Using the win32 api, determine if the user with token 'user_token' has
administrator rights.
See MSDN entry here:
http://msdn.microsoft.com/en-us/library/aa376389(VS.85).aspx
'''
class SID_IDENTIFIER_AUTHORITY(ctypes.Structure):
_fields_ = [
("byte0", ctypes.c_byte),
("byte1", ctypes.c_byte),
("byte2", ctypes.c_byte),
("byte3", ctypes.c_byte),
("byte4", ctypes.c_byte),
("byte5", ctypes.c_byte),
]
nt_authority = SID_IDENTIFIER_AUTHORITY()
nt_authority.byte5 = 5
SECURITY_BUILTIN_DOMAIN_RID = 0x20
DOMAIN_ALIAS_RID_ADMINS = 0x220
administrators_group = ctypes.c_void_p()
if ctypes.windll.advapi32.AllocateAndInitializeSid(
ctypes.byref(nt_authority),
2,
SECURITY_BUILTIN_DOMAIN_RID,
DOMAIN_ALIAS_RID_ADMINS,
0, 0, 0, 0, 0, 0,
ctypes.byref(administrators_group)) == 0:
raise Exception("AllocateAndInitializeSid failed")
try:
is_admin = ctypes.wintypes.BOOL()
if ctypes.windll.advapi32.CheckTokenMembership(
user_token,
administrators_group,
ctypes.byref(is_admin)) == 0:
raise Exception("CheckTokenMembership failed")
return is_admin.value != 0
finally:
ctypes.windll.advapi32.FreeSid(administrators_group)
def _win_current_user_is_admin():
'''
ctypes.windll.shell32.IsUserAnAdmin() is intentionally avoided due to this
function being deprecated.
'''
return _win_user_token_is_admin(0)
def get_specific_user():
'''
Get a user name for publishing. If you find the user is "root" attempt to be
more specific
'''
user = get_user()
if salt.utils.platform.is_windows():
if _win_current_user_is_admin():
return 'sudo_{0}'.format(user)
else:
env_vars = ('SUDO_USER',)
if user == 'root':
for evar in env_vars:
if evar in os.environ:
return 'sudo_{0}'.format(os.environ[evar])
return user
def chugid(runas, group=None):
'''
Change the current process to belong to the specified user (and the groups
to which it belongs)
'''
uinfo = pwd.getpwnam(runas)
supgroups = []
supgroups_seen = set()
if group:
try:
target_pw_gid = grp.getgrnam(group).gr_gid
except KeyError as err:
raise CommandExecutionError(
'Failed to fetch the GID for {0}. Error: {1}'.format(
group, err
)
)
else:
target_pw_gid = uinfo.pw_gid
# The line below used to exclude the current user's primary gid.
# However, when root belongs to more than one group
# this causes root's primary group of '0' to be dropped from
# his grouplist. On FreeBSD, at least, this makes some
# command executions fail with 'access denied'.
#
# The Python documentation says that os.setgroups sets only
# the supplemental groups for a running process. On FreeBSD
# this does not appear to be strictly true.
group_list = get_group_dict(runas, include_default=True)
if sys.platform == 'darwin':
group_list = dict((k, v) for k, v in six.iteritems(group_list)
if not k.startswith('_'))
for group_name in group_list:
gid = group_list[group_name]
if (gid not in supgroups_seen
and not supgroups_seen.add(gid)):
supgroups.append(gid)
if os.getgid() != target_pw_gid:
try:
os.setgid(target_pw_gid)
except OSError as err:
raise CommandExecutionError(
'Failed to change from gid {0} to {1}. Error: {2}'.format(
os.getgid(), target_pw_gid, err
)
)
# Set supplemental groups
if sorted(os.getgroups()) != sorted(supgroups):
try:
os.setgroups(supgroups)
except OSError as err:
raise CommandExecutionError(
'Failed to set supplemental groups to {0}. Error: {1}'.format(
supgroups, err
)
)
if os.getuid() != uinfo.pw_uid:
try:
os.setuid(uinfo.pw_uid)
except OSError as err:
raise CommandExecutionError(
'Failed to change from uid {0} to {1}. Error: {2}'.format(
os.getuid(), uinfo.pw_uid, err
)
)
def chugid_and_umask(runas, umask, group=None):
'''
Helper method for for subprocess.Popen to initialise uid/gid and umask
for the new process.
'''
set_runas = False
set_grp = False
current_user = getpass.getuser()
if runas and runas != current_user:
set_runas = True
runas_user = runas
else:
runas_user = current_user
current_grp = grp.getgrgid(pwd.getpwnam(getpass.getuser()).pw_gid).gr_name
if group and group != current_grp:
set_grp = True
runas_grp = group
else:
runas_grp = current_grp
if set_runas or set_grp:
chugid(runas_user, runas_grp)
if umask is not None:
os.umask(umask)
def get_default_group(user):
'''
Returns the specified user's default group. If the user doesn't exist, a
KeyError will be raised.
'''
return grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name \
if HAS_GRP and HAS_PWD \
else None
def get_group_list(user, include_default=True):
'''
Returns a list of all of the system group names of which the user
is a member.
'''
if HAS_GRP is False or HAS_PWD is False:
return []
group_names = None
ugroups = set()
if hasattr(os, 'getgrouplist'):
# Try os.getgrouplist, available in python >= 3.3
log.trace('Trying os.getgrouplist for \'%s\'', user)
try:
group_names = [
grp.getgrgid(grpid).gr_name for grpid in
os.getgrouplist(user, pwd.getpwnam(user).pw_gid)
]
except Exception:
pass
elif HAS_PYSSS:
# Try pysss.getgrouplist
log.trace('Trying pysss.getgrouplist for \'%s\'', user)
try:
group_names = list(pysss.getgrouplist(user))
except Exception:
pass
if group_names is None:
# Fall back to generic code
# Include the user's default group to match behavior of
# os.getgrouplist() and pysss.getgrouplist()
log.trace('Trying generic group list for \'%s\'', user)
group_names = [g.gr_name for g in grp.getgrall() if user in g.gr_mem]
try:
default_group = get_default_group(user)
if default_group not in group_names:
group_names.append(default_group)
except KeyError:
# If for some reason the user does not have a default group
pass
if group_names is not None:
ugroups.update(group_names)
if include_default is False:
# Historically, saltstack code for getting group lists did not
# include the default group. Some things may only want
# supplemental groups, so include_default=False omits the users
# default group.
try:
default_group = grp.getgrgid(pwd.getpwnam(user).pw_gid).gr_name
ugroups.remove(default_group)
except KeyError:
# If for some reason the user does not have a default group
pass
log.trace('Group list for user \'%s\': %s', user, sorted(ugroups))
return sorted(ugroups)
def get_group_dict(user=None, include_default=True):
'''
Returns a dict of all of the system groups as keys, and group ids
as values, of which the user is a member.
E.g.: {'staff': 501, 'sudo': 27}
'''
if HAS_GRP is False or HAS_PWD is False:
return {}
group_dict = {}
group_names = get_group_list(user, include_default=include_default)
for group in group_names:
group_dict.update({group: grp.getgrnam(group).gr_gid})
return group_dict
def get_gid_list(user, include_default=True):
'''
Returns a list of all of the system group IDs of which the user
is a member.
'''
if HAS_GRP is False or HAS_PWD is False:
return []
gid_list = list(
six.itervalues(
get_group_dict(user, include_default=include_default)
)
)
return sorted(set(gid_list))
def get_gid(group=None):
'''
Get the gid for a given group name. If no group given, the current egid
will be returned. If the group does not exist, None will be returned. On
systems which do not support grp or os.getegid it will return None.
'''
if not HAS_GRP:
return None
if group is None:
try:
return os.getegid()
except AttributeError:
return None
else:
try:
return grp.getgrnam(group).gr_gid
except KeyError:
return None
|
the-stack_0_16692 | """Sensor for Last.fm account status."""
import hashlib
import logging
import re
import pylast as lastfm
from pylast import WSError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_API_KEY
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_LAST_PLAYED = "last_played"
ATTR_PLAY_COUNT = "play_count"
ATTR_TOP_PLAYED = "top_played"
ATTRIBUTION = "Data provided by Last.fm"
CONF_USERS = "users"
ICON = "mdi:lastfm"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_USERS, default=[]): vol.All(cv.ensure_list, [cv.string]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Last.fm sensor platform."""
api_key = config[CONF_API_KEY]
users = config.get(CONF_USERS)
lastfm_api = lastfm.LastFMNetwork(api_key=api_key)
entities = []
for username in users:
try:
lastfm_api.get_user(username).get_image()
entities.append(LastfmSensor(username, lastfm_api))
except WSError as error:
_LOGGER.error(error)
return
add_entities(entities, True)
class LastfmSensor(Entity):
"""A class for the Last.fm account."""
def __init__(self, user, lastfm_api):
"""Initialize the sensor."""
self._unique_id = hashlib.sha256(user.encode("utf-8")).hexdigest()
self._user = lastfm_api.get_user(user)
self._name = user
self._lastfm = lastfm_api
self._state = "Not Scrobbling"
self._playcount = None
self._lastplayed = None
self._topplayed = None
self._cover = None
@property
def unique_id(self):
"""Return the unique ID of the sensor."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
def update(self):
"""Update device state."""
self._cover = self._user.get_image()
self._playcount = self._user.get_playcount()
last = self._user.get_recent_tracks(limit=2)[0]
self._lastplayed = f"{last.track.artist} - {last.track.title}"
top = self._user.get_top_tracks(limit=1)[0]
toptitle = re.search("', '(.+?)',", str(top))
topartist = re.search("'(.+?)',", str(top))
self._topplayed = "{} - {}".format(topartist.group(1), toptitle.group(1))
if self._user.get_now_playing() is None:
self._state = "Not Scrobbling"
return
now = self._user.get_now_playing()
self._state = f"{now.artist} - {now.title}"
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_LAST_PLAYED: self._lastplayed,
ATTR_PLAY_COUNT: self._playcount,
ATTR_TOP_PLAYED: self._topplayed,
}
@property
def entity_picture(self):
"""Avatar of the user."""
return self._cover
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON
|
the-stack_0_16693 | import json
import io
def create_snippet(file_path, first_n=5):
with open(file_path, 'r') as f:
return [next(f) for _ in range(first_n)]
def create_jtr_snippet(file_path):
return convert_simplequestions(file_path, first_n=5)
def convert_simplequestions(file_path, first_n=None):
instances = []
f = io.open(file_path, "r")
i = 0
for l in f:
i += 1
if first_n and i > first_n:
break
subj, rel, obj, qu = l.strip().split("\t")
support = [" ".join([subj, rel])]
qdict = {
'question': qu,
'answers': [obj]
}
qset_dict = {
'support': [{'text': supp} for supp in support],
'questions': [qdict]
}
instances.append(qset_dict)
corpus_dict = {
'meta': "simpleQuestions.json",
'instances': instances
}
f.close()
return corpus_dict
def main():
# some tests:
# raw_data = load_cbt_file(path=None, part='valid', mode='NE')
# instances = split_cbt(raw_data)
# = parse_cbt_example(instances[0])
import sys
if len(sys.argv) == 3:
# corpus = create_jtr_snippet(sys.argv[1])
# out = create_snippet(sys.argv[1])
# with open(sys.argv[2], 'w') as outfile:
# outfile.writelines(out)
corpus = convert_simplequestions(sys.argv[1])
with open(sys.argv[2], 'w') as outfile:
json.dump(corpus, outfile, indent=2)
else:
print("Usage: python3 simpleQuestions2jtr.py path/to/simpleQuestions save/to/simpleQuestions.jtr.json")
if __name__ == "__main__":
main()
|
the-stack_0_16694 | # Copyright 2019 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add Secret Consumers table
Revision ID: 0f8c192a061f
Revises: 39cf2e645cba
Create Date: 2019-08-19 12:03:08.567230
"""
# revision identifiers, used by Alembic.
revision = "0f8c192a061f"
down_revision = "39cf2e645cba"
from alembic import op
import sqlalchemy as sa
def upgrade():
ctx = op.get_context()
con = op.get_bind()
table_exists = ctx.dialect.has_table(con.engine,
"secret_consumer_metadata")
if not table_exists:
op.create_table(
"secret_consumer_metadata",
# ModelBase
sa.Column("id", sa.String(length=36), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=False),
sa.Column("updated_at", sa.DateTime(), nullable=False),
sa.Column("deleted_at", sa.DateTime(), nullable=True),
sa.Column("deleted", sa.Boolean(), nullable=False),
sa.Column("status", sa.String(length=20), nullable=False),
# SecretConsumerMetadatum
sa.Column("secret_id", sa.String(36), nullable=False),
sa.Column("project_id", sa.String(36), nullable=False),
sa.Column("service", sa.String(255), nullable=False),
sa.Column("resource_type", sa.String(255), nullable=False),
sa.Column("resource_id", sa.String(36), nullable=False),
# Constraints and Indexes
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["secret_id"], ["secrets.id"]),
sa.UniqueConstraint(
"secret_id", "resource_id", name="_secret_consumer_resource_uc"
),
sa.Index("ix_secret_consumer_metadata_secret_id", "secret_id"),
sa.Index("ix_secret_consumer_metadata_resource_id", "resource_id"),
)
|
the-stack_0_16695 | #add parent dir to find package. Only needed for source code build, pip install doesn't need it.
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0,parentdir)
import gym
from pybullet_envs.bullet.kukaGymEnv import KukaGymEnv
from baselines import deepq
def main():
env = KukaGymEnv(renders=True, isDiscrete=True)
act = deepq.load("kuka_model.pkl")
print(act)
while True:
obs, done = env.reset(), False
print("===================================")
print("obs")
print(obs)
episode_rew = 0
while not done:
env.render()
obs, rew, done, _ = env.step(act(obs[None])[0])
episode_rew += rew
print("Episode reward", episode_rew)
if __name__ == '__main__':
main()
|
the-stack_0_16696 | from dataclasses import dataclass
from typing import FrozenSet, Callable, List
import heapq
from contextlib import contextmanager
from time import time
@contextmanager
def timing(description: str) -> None:
start = time()
yield
elapsed_time = (time() - start) * 1000
print(f"{description}: {elapsed_time}ms")
@dataclass
class Action:
name: str
precondition: FrozenSet[str]
positive_effect: FrozenSet[str]
negative_effect: FrozenSet[str]
@dataclass
class Problem:
actions: List[Action]
init: FrozenSet[str]
goal: FrozenSet[str]
class Solver():
def __init__(self, heuristic: Callable[[FrozenSet[str]], float] = None):
self.heuristic = heuristic
if heuristic is None:
self.heuristic = lambda x: 0
def solve(self, problem: Problem) -> List[str]:
open_list = [(self.heuristic(set(problem.init)), problem.init, [])]
closed_list = set()
while open_list:
_, current, path = heapq.heappop(open_list)
if current not in closed_list:
closed_list.add(current)
if problem.goal.issubset(current):
return path
for act in problem.actions:
if act.precondition.issubset(current):
child = current.difference(act.negative_effect).union(act.positive_effect)
if child not in closed_list:
child_f = len(path) + 1 + self.heuristic(set(child))
heapq.heappush(open_list, (child_f, child, path+[act.name]))
def generate_problem(size: int) -> Problem:
actions = [Action('mk_y', frozenset(['x']), frozenset(['y']), frozenset(['x'])),
Action('reset_x', frozenset([]), frozenset(['x']), frozenset([]))]
goal = []
for i in range(size):
name = f"v{i}"
goal.append(name)
actions.append(Action(f'mk_{name}', frozenset(['y']), frozenset([name]), frozenset(['y'])),)
init = frozenset(['x'])
return Problem(actions, init, frozenset(goal))
def main():
size = 15
problem = generate_problem(size)
def heuristic(state: FrozenSet[str]) -> float:
return size - len(state & problem.goal)
with timing("Without Heuristic"):
solver = Solver(heuristic=None)
plan = solver.solve(problem)
print(plan)
with timing("With Heuristic"):
solver = Solver(heuristic=heuristic)
plan = solver.solve(problem)
print(plan)
if __name__ == '__main__':
main()
|
the-stack_0_16698 | # -*- coding: utf-8 -*-
import base64
import datetime
import hashlib
import io
import uuid
from lxml import etree, builder
DS = builder.ElementMaker(
namespace="http://www.w3.org/2000/09/xmldsig#",
nsmap={
"ds": "http://www.w3.org/2000/09/xmldsig#",
},
)
CanonicalizationMethod = DS.CanonicalizationMethod
DigestMethod = DS.DigestMethod
DigestValue = DS.DigestValue
KeyInfo = DS.KeyInfo
Object = DS.Object
Reference = DS.Reference
Signature = DS.Signature
SignatureMethod = DS.SignatureMethod
SignatureValue = DS.SignatureValue
SignedInfo = DS.SignedInfo
Transform = DS.Transform
Transforms = DS.Transforms
X509Certificate = DS.X509Certificate
X509Data = DS.X509Data
X509IssuerName = DS.X509IssuerName
X509SerialNumber = DS.X509SerialNumber
XADES = builder.ElementMaker(
namespace="http://uri.etsi.org/01903/v1.3.2#",
nsmap={
"xades": "http://uri.etsi.org/01903/v1.3.2#",
"ds": "http://www.w3.org/2000/09/xmldsig#",
},
)
Cert = XADES.Cert
CertDigest = XADES.CertDigest
DataObjectFormat = XADES.DataObjectFormat
Description = XADES.Description
DocumentationReference = XADES.DocumentationReference
DocumentationReferences = XADES.DocumentationReferences
Identifier = XADES.Identifier
IssuerSerial = XADES.IssuerSerial
MimeType = XADES.MimeType
ObjectIdentifier = XADES.ObjectIdentifier
QualifyingProperties = XADES.QualifyingProperties
SignedDataObjectProperties = XADES.SignedDataObjectProperties
SignedProperties = XADES.SignedProperties
SignedSignatureProperties = XADES.SignedSignatureProperties
SigningCertificate = XADES.SigningCertificate
SigningTime = XADES.SigningTime
UnsignedProperties = XADES.UnsignedProperties
def ensure_str(x, encoding="utf-8", none_ok=False):
if none_ok is True and x is None:
return x
if not isinstance(x, str):
x = x.decode(encoding)
return x
class BES:
def __init__(self):
self.guid = str(uuid.uuid1())
self.time = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
def sha256(self, data):
h = hashlib.sha256(data).digest()
return ensure_str(base64.b64encode(h))
def _c14n(self, nodes, algorithm, inclusive_ns_prefixes=None):
exclusive, with_comments = False, False
if algorithm.startswith("http://www.w3.org/2001/10/xml-exc-c14n#"):
exclusive = True
if algorithm.endswith("#WithComments"):
with_comments = True
if not isinstance(nodes, list):
nodes = [nodes]
c14n = b""
for node in nodes:
c14n += etree.tostring(node, method="c14n", exclusive=exclusive, with_comments=with_comments,
inclusive_ns_prefixes=inclusive_ns_prefixes) # TODO: optimize if needed
if exclusive is False:
# TODO: there must be a nicer way to do this. See also:
# http://www.w3.org/TR/xml-c14n, "namespace axis"
# http://www.w3.org/TR/xml-c14n2/#sec-Namespace-Processing
c14n = c14n.replace(b' xmlns=""', b'')
return c14n
def build(self, fname, data, smime, cert, certcontent, signproc, base64encode=True, withcomments=False):
swithcomments = ""
if withcomments:
swithcomments = "#WithComments"
if base64encode:
data = ensure_str(base64.b64encode(data))
signedobj = Object(
data,
Encoding="http://www.w3.org/2000/09/xmldsig#base64",
MimeType=smime,
Id="Object1_" + self.guid,
)
elif 0:
signedobj = Object(
data,
MimeType='text/xml',
Id="Object1_" + self.guid,
)
else:
signedobj = Object(
MimeType='text/xml',
Id="Object1_" + self.guid,
)
tree = etree.parse(io.BytesIO(data))
signedobj.append(tree.getroot())
certdigest = self.sha256(certcontent)
b64 = b''.join(base64.encodebytes(certcontent).split())
certcontent = []
for i in range(0, len(b64), 64):
certcontent.append(b64[i:i + 64])
certcontent = b'\n'.join(certcontent)
certserialnumber = '%d' % cert.serial_number
certissuer = []
for k, v in (
('CN', 'common_name'),
('O', 'organization_name'),
('C', 'country_name'),
('serialNumber', 'serial_number'),
):
try:
v = cert.issuer.native[v]
certissuer.append('%s=%s' % (k, v))
except:
pass
certissuer = ','.join(certissuer)
signedprop = SignedProperties(
SignedSignatureProperties(
SigningTime(
self.time
),
SigningCertificate(
Cert(
CertDigest(
DigestMethod(
Algorithm="http://www.w3.org/2001/04/xmlenc#sha256",
),
DigestValue(
certdigest,
),
),
IssuerSerial(
X509IssuerName(
certissuer,
),
X509SerialNumber(
certserialnumber,
),
),
),
),
Id="SignedSignatureProperties_" + self.guid + "_04",
),
SignedDataObjectProperties(
DataObjectFormat(
Description("""\
MIME-Version: 1.0
Content-Type: %s
Content-Transfer-Encoding: binary
Content-Disposition: filename="%s"\
""" % (smime, fname),
),
ObjectIdentifier(
Identifier(
"http://www.certum.pl/OIDAsURI/signedFile/1.2.616.1.113527.3.1.1.3.1",
Qualifier="OIDAsURI",
),
Description(
u"Opis formatu dokumentu oraz jego pełna nazwa",
),
DocumentationReferences(
DocumentationReference(
"http://www.certum.pl/OIDAsURI/signedFile.pdf",
),
),
),
MimeType(
smime,
),
ObjectReference="#Reference1_" + self.guid + "_29",
),
Id="SignedDataObjectProperties_" + self.guid + "_45",
),
Id="SignedProperties_" + self.guid + "_40",
)
canonicalizedxml = self._c14n(signedobj, '')
digestvalue1 = self.sha256(canonicalizedxml)
canonicalizedxml = self._c14n(signedprop, '')
digestvalue2 = self.sha256(canonicalizedxml)
signedinfo = SignedInfo(
CanonicalizationMethod(
Algorithm="http://www.w3.org/TR/2001/REC-xml-c14n-20010315",
),
SignatureMethod(
Algorithm="http://www.w3.org/2001/04/xmldsig-more#rsa-sha256",
),
Reference(
Transforms(
Transform(
Algorithm="http://www.w3.org/TR/2001/REC-xml-c14n-20010315" + swithcomments,
)
),
DigestMethod(
Algorithm="http://www.w3.org/2001/04/xmlenc#sha256",
),
DigestValue(
digestvalue1,
),
URI="#Object1_" + self.guid,
Id="Reference1_" + self.guid + "_29",
),
Reference(
DigestMethod(
Algorithm="http://www.w3.org/2001/04/xmlenc#sha256",
),
DigestValue(
digestvalue2,
),
Id="SignedProperties-Reference_" + self.guid + "_26",
Type="http://uri.etsi.org/01903#SignedProperties",
URI="#SignedProperties_" + self.guid + "_40",
),
Id="SignedInfo_" + self.guid + "_4f",
)
canonicalizedxml = self._c14n(signedinfo, '')
signature = signproc(canonicalizedxml, 'sha256')
actualdigestencoded = ensure_str(base64.b64encode(signature))
digestvalue3 = []
for i in range(0, len(actualdigestencoded), 64):
digestvalue3.append(actualdigestencoded[i:i + 64])
digestvalue3 = '\n'.join(digestvalue3)
DOC = Signature(
signedinfo,
SignatureValue(
digestvalue3,
Id="SignatureValue_" + self.guid + "_5c",
),
KeyInfo(
X509Data(
X509Certificate(
certcontent.decode()
),
),
Id="KeyInfo_" + self.guid + "_2a",
),
Object(
QualifyingProperties(
signedprop,
UnsignedProperties(
Id="UnsignedProperties_" + self.guid + "_5b",
),
Id="QualifyingProperties_" + self.guid + "_4d",
Target="#Signature_" + self.guid + "_17",
),
),
signedobj,
Id="Signature_" + self.guid + "_17",
)
return DOC
|
the-stack_0_16700 |
from .backend_template import BackendTemplate
import warnings
try:
import pandas as pd
# WHEN CHECKING FOR THE TYPE OF AN OBJECT IN A SERIES BEWARE THAT:
#
# series = pd.Series([1, 2, 3, 4])
#
# for s in series:
# print(str(type(s)))
# outputs;
# `<class 'int'>
# `<class 'int'>
# `<class 'int'>
# `<class 'int'>
#
# str(type(series[2]))
# outputs:
# "<class 'numpy.int64'>"
def is_consistent(series: pd.Series) -> bool:
"""Check that all the values in the series are of the same type."""
if series.dtype != "object":
return True
expected_type = str(type(series.values[0]))
return all(
expected_type == str(type(s))
for s in series
)
def get_vector_dtype(series: pd.Series) -> str:
"""Get which type to use to serialize the type of the series"""
t = str(series.dtype)
if t == "object":
return "str"
return t
common_message = (
" contains values of multiple types therefore the data will be saved as required"
" but we don't guarantee that"
" they will be loaded as the same types as pandas does not support this.\n"
"Consider using pickle (.pkl) or compress pickle (.pkl.gz, ...) to cache this complex type"
" in a consistent manner."
)
class PandasCsvBackend(BackendTemplate):
SUPPORTED_EXTENSIONS = {
".csv":",",
".csv.gz":",",
".csv.bz2":",",
".csv.xz":",",
".csv.zip":",",
".tsv":"\t",
".tsv.gz":"\t",
".tsv.bz2":"\t",
".tsv.xz":"\t",
".tsv.zip":"\t",
}
def __init__(self, load_kwargs, dump_kwargs):
load_kwargs = load_kwargs.copy()
load_kwargs.setdefault("index_col", 0)
super(PandasCsvBackend, self).__init__(load_kwargs, dump_kwargs)
@staticmethod
def support_path(path:str) -> bool:
return any(
path.endswith(extension)
for extension in PandasCsvBackend.SUPPORTED_EXTENSIONS
)
@staticmethod
def can_deserialize(metadata: dict, path:str) -> bool:
return PandasCsvBackend.support_path(path) and metadata.get("type", None) == "pandas"
@staticmethod
def can_serialize(obj_to_serialize: object, path:str) -> bool:
return PandasCsvBackend.support_path(path) and isinstance(obj_to_serialize, pd.DataFrame)
def dump(self, obj_to_serialize: pd.DataFrame, path:str) -> dict:
for column in obj_to_serialize.columns:
if not is_consistent(obj_to_serialize[column]):
warnings.warn("The column '{}'".format(column) + common_message )
if not is_consistent(obj_to_serialize.index):
warnings.warn("The index" + common_message)
if not is_consistent(obj_to_serialize.columns):
warnings.warn("The column names" + common_message)
obj_to_serialize.to_csv(
path,
sep=self.SUPPORTED_EXTENSIONS[
next(
x
for x in self.SUPPORTED_EXTENSIONS
if path.endswith(x)
)
],
**self._dump_kwargs
)
# Return the types of the columns to be saved as metadata
return {
"type":"pandas",
"columns_types":{
column:get_vector_dtype(obj_to_serialize[column])
for column in obj_to_serialize.columns
},
"index_type":get_vector_dtype(obj_to_serialize.index),
"columns_names_type":get_vector_dtype(obj_to_serialize.columns),
}
def load(self, metadata:dict, path:str) -> object:
df = pd.read_csv(
path,
sep=self.SUPPORTED_EXTENSIONS[
next(
x
for x in self.SUPPORTED_EXTENSIONS
if path.endswith(x)
)
],
**self._load_kwargs
)
# Convert back the types of the columns to the original ones
df = df.astype(metadata["columns_types"])
df.index = df.index.astype(metadata["index_type"])
df.columns = df.columns.astype(metadata["columns_names_type"])
return df
except ModuleNotFoundError:
PandasCsvBackend = None |
the-stack_0_16701 | import psycopg2 as psy
import sqlalchemy
import datetime as dt
from sqlalchemy import (
Table,
Column,
Index,
Integer,
String,
Text,
Boolean,
ForeignKey,
UniqueConstraint,
)
from sqlalchemy import text
from sqlalchemy.dialects.postgresql import JSON,JSONB
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.exc import ProgrammingError
from sqlalchemy.orm import (
relationship,
)
Base = declarative_base()
###### Table defs ###############################
# association between curators and studies
curator_study_table = Table('curator_study_map', Base.metadata,
Column('study_id', String, ForeignKey('study.id'), primary_key=True),
Column('curator_id', Integer, ForeignKey('curator.id'), primary_key=True)
)
# association between trees and otus
tree_otu_table = Table('tree_otu_map', Base.metadata,
Column('ott_id', Integer, ForeignKey('taxonomy.id'), primary_key=True),
Column('tree_id', Integer, ForeignKey('tree.id'), primary_key=True)
)
class Study(Base):
__tablename__ = 'study'
# The studyID is of the form prefix_id, so String, not Int.
id = Column(String, primary_key=True, index=True)
year = Column(Integer)
data = Column(JSONB)
#trees = relationship('Tree',backref='study')
# many-to-many study<-->curator relationship
curators = relationship('Curator',
secondary=curator_study_table,
back_populates='studies')
class Tree(Base):
__tablename__ = 'tree'
__table_args__ = (
UniqueConstraint('id','study_id'),
)
id = Column(Integer,primary_key=True)
tree_id = Column(String, nullable=False)
data = Column(JSONB)
study_id = Column(String, ForeignKey("study.id"), nullable=False)
ntips = Column(Integer)
proposed = Column(Boolean)
treebase_id = Column(String)
# many-to-many tree<-->otu relationship
otus = relationship('Taxonomy',
secondary=tree_otu_table,
back_populates='trees')
class Curator(Base):
__tablename__ = 'curator'
id = Column(Integer,primary_key=True)
name = Column(String,nullable=False,unique=True)
# many-to-many study<-->curator relationship
studies = relationship('Study',
secondary=curator_study_table,
back_populates='curators')
class Taxonomy(Base):
__tablename__ = 'taxonomy'
id = Column(Integer, primary_key=True)
name = Column(String,nullable=False)
parent = Column(Integer)
trees = relationship('Tree',
secondary=tree_otu_table,
back_populates='otus')
|
the-stack_0_16702 | # -*- coding: utf-8 -*-
"""The filter file CLI arguments helper."""
from __future__ import unicode_literals
import os
from plaso.cli import tools
from plaso.cli.helpers import interface
from plaso.cli.helpers import manager
from plaso.lib import errors
class FilterFileArgumentsHelper(interface.ArgumentsHelper):
"""Filter file CLI arguments helper."""
NAME = 'filter_file'
DESCRIPTION = 'Filter file command line arguments.'
@classmethod
def AddArguments(cls, argument_group):
"""Adds command line arguments to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group.
"""
argument_group.add_argument(
'-f', '--file_filter', '--file-filter', dest='file_filter',
action='store', type=str, default=None, help=(
'List of files to include for targeted collection of files to '
'parse, one line per file path, setup is /path|file - where each '
'element can contain either a variable set in the preprocessing '
'stage or a regular expression.'))
@classmethod
def ParseOptions(cls, options, configuration_object):
"""Parses and validates options.
Args:
options (argparse.Namespace): parser options.
configuration_object (CLITool): object to be configured by the argument
helper.
Raises:
BadConfigObject: when the configuration object is of the wrong type.
"""
if not isinstance(configuration_object, tools.CLITool):
raise errors.BadConfigObject(
'Configuration object is not an instance of CLITool')
filter_file = cls._ParseStringOption(options, 'file_filter')
# Search the data location for the filter file.
if filter_file and not os.path.isfile(filter_file):
data_location = getattr(configuration_object, '_data_location', None)
if data_location:
filter_file_basename = os.path.basename(filter_file)
filter_file_path = os.path.join(data_location, filter_file_basename)
if os.path.isfile(filter_file_path):
filter_file = filter_file_path
if filter_file and not os.path.isfile(filter_file):
raise errors.BadConfigOption(
'No such collection filter file: {0:s}.'.format(filter_file))
setattr(configuration_object, '_filter_file', filter_file)
manager.ArgumentHelperManager.RegisterHelper(FilterFileArgumentsHelper)
|
the-stack_0_16704 | # (c) 2014, James Tanner <[email protected]>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import os
import textwrap
import traceback
import yaml
from ansible import constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.module_utils._text import to_native
from ansible.module_utils.six import string_types
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.loader import module_loader, action_loader, lookup_loader, callback_loader, cache_loader, \
vars_loader, connection_loader, strategy_loader, PluginLoader
from ansible.utils import plugin_docs
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class DocCLI(CLI):
''' displays information on modules installed in Ansible libraries.
It displays a terse listing of plugins and their short descriptions,
provides a printout of their DOCUMENTATION strings,
and it can create a short "snippet" which can be pasted into a playbook. '''
def __init__(self, args):
super(DocCLI, self).__init__(args)
self.plugin_list = set()
def parse(self):
self.parser = CLI.base_parser(
usage='usage: %prog [-l|-s] [options] [-t <plugin type] [plugin]',
module_opts=True,
desc="plugin documentation tool",
epilog="See man pages for Ansible CLI options or website for tutorials https://docs.ansible.com"
)
self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir',
help='List available plugins')
self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
help='Show playbook snippet for specified plugin(s)')
self.parser.add_option("-a", "--all", action="store_true", default=False, dest='all_plugins',
help='**For internal testing only** Show documentation for all plugins.')
self.parser.add_option("-t", "--type", action="store", default='module', dest='type', type='choice',
help='Choose which plugin type (defaults to "module")',
choices=['cache', 'callback', 'connection', 'inventory', 'lookup', 'module', 'strategy', 'vars'])
super(DocCLI, self).parse()
if [self.options.all_plugins, self.options.list_dir, self.options.show_snippet].count(True) > 1:
raise AnsibleOptionsError("Only one of -l, -s or -a can be used at the same time.")
display.verbosity = self.options.verbosity
def run(self):
super(DocCLI, self).run()
plugin_type = self.options.type
# choose plugin type
if plugin_type == 'cache':
loader = cache_loader
elif plugin_type == 'callback':
loader = callback_loader
elif plugin_type == 'connection':
loader = connection_loader
elif plugin_type == 'lookup':
loader = lookup_loader
elif plugin_type == 'strategy':
loader = strategy_loader
elif plugin_type == 'vars':
loader = vars_loader
elif plugin_type == 'inventory':
loader = PluginLoader('InventoryModule', 'ansible.plugins.inventory', C.DEFAULT_INVENTORY_PLUGIN_PATH, 'inventory_plugins')
else:
loader = module_loader
# add to plugin path from command line
if self.options.module_path:
for path in self.options.module_path:
if path:
loader.add_directory(path)
# save only top level paths for errors
search_paths = DocCLI.print_paths(loader)
loader._paths = None # reset so we can use subdirs below
# list plugins for type
if self.options.list_dir:
paths = loader._get_paths()
for path in paths:
self.find_plugins(path, plugin_type)
self.pager(self.get_plugin_list_text(loader))
return 0
# process all plugins of type
if self.options.all_plugins:
paths = loader._get_paths()
for path in paths:
self.find_plugins(path, plugin_type)
self.args = sorted(set(self.plugin_list))
if len(self.args) == 0:
raise AnsibleOptionsError("Incorrect options passed")
# process command line list
text = ''
for plugin in self.args:
try:
# if the plugin lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = loader.find_plugin(plugin, mod_type='.py', ignore_deprecated=True, check_aliases=True)
if filename is None:
display.warning("%s %s not found in:\n%s\n" % (plugin_type, plugin, search_paths))
continue
if any(filename.endswith(x) for x in C.BLACKLIST_EXTS):
continue
try:
doc, plainexamples, returndocs, metadata = plugin_docs.get_docstring(filename, verbose=(self.options.verbosity > 0))
except:
display.vvv(traceback.format_exc())
display.error("%s %s has a documentation error formatting or is missing documentation." % (plugin_type, plugin))
continue
if doc is not None:
# assign from other sections
doc['plainexamples'] = plainexamples
doc['returndocs'] = returndocs
doc['metadata'] = metadata
# generate extra data
if plugin_type == 'module':
# is there corresponding action plugin?
if plugin in action_loader:
doc['action'] = True
else:
doc['action'] = False
doc['filename'] = filename
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
if 'docuri' in doc:
doc['docuri'] = doc[plugin_type].replace('_', '-')
if self.options.show_snippet and plugin_type == 'module':
text += self.get_snippet_text(doc)
else:
text += self.get_man_text(doc)
else:
# this typically means we couldn't even parse the docstring, not just that the YAML is busted,
# probably a quoting issue.
raise AnsibleError("Parsing produced an empty object.")
except Exception as e:
display.vvv(traceback.format_exc())
raise AnsibleError("%s %s missing documentation (or could not parse documentation): %s\n" % (plugin_type, plugin, str(e)))
if text:
self.pager(text)
return 0
def find_plugins(self, path, ptype):
display.vvvv("Searching %s for plugins" % path)
if not os.path.exists(path):
display.vvvv("%s does not exist" % path)
return
bkey = ptype.upper()
for plugin in os.listdir(path):
display.vvvv("Found %s" % plugin)
full_path = '/'.join([path, plugin])
if plugin.startswith('.'):
continue
elif os.path.isdir(full_path):
continue
elif any(plugin.endswith(x) for x in C.BLACKLIST_EXTS):
continue
elif plugin.startswith('__'):
continue
elif plugin in C.IGNORE_FILES:
continue
elif plugin .startswith('_'):
if os.path.islink(full_path): # avoids aliases
continue
plugin = os.path.splitext(plugin)[0] # removes the extension
plugin = plugin.lstrip('_') # remove underscore from deprecated plugins
if plugin not in plugin_docs.BLACKLIST.get(bkey, ()):
self.plugin_list.add(plugin)
display.vvvv("Added %s" % plugin)
def get_plugin_list_text(self, loader):
columns = display.columns
displace = max(len(x) for x in self.plugin_list)
linelimit = columns - displace - 5
text = []
deprecated = []
for plugin in sorted(self.plugin_list):
try:
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = loader.find_plugin(plugin, mod_type='.py', ignore_deprecated=True, check_aliases=True)
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
doc = None
try:
doc, plainexamples, returndocs, metadata = plugin_docs.get_docstring(filename)
except:
display.warning("%s has a documentation formatting error" % plugin)
if not doc or not isinstance(doc, dict):
desc = 'UNDOCUMENTED'
display.warning("%s parsing did not produce documentation." % plugin)
else:
desc = self.tty_ify(doc.get('short_description', 'INVALID SHORT DESCRIPTION').strip())
if len(desc) > linelimit:
desc = desc[:linelimit] + '...'
if plugin.startswith('_'): # Handle deprecated
deprecated.append("%-*s %-*.*s" % (displace, plugin[1:], linelimit, len(desc), desc))
else:
text.append("%-*s %-*.*s" % (displace, plugin, linelimit, len(desc), desc))
except Exception as e:
raise AnsibleError("Failed reading docs at %s: %s" % (plugin, to_native(e)))
if len(deprecated) > 0:
text.append("\nDEPRECATED:")
text.extend(deprecated)
return "\n".join(text)
@staticmethod
def print_paths(finder):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in finder._get_paths(subdirs=False):
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
def get_snippet_text(self, doc):
text = []
desc = CLI.tty_ify(doc['short_description'])
text.append("- name: %s" % (desc))
text.append(" %s:" % (doc['module']))
pad = 31
subdent = " " * pad
limit = display.columns - pad
for o in sorted(doc['options'].keys()):
opt = doc['options'][o]
if isinstance(opt['description'], string_types):
desc = CLI.tty_ify(opt['description'])
else:
desc = CLI.tty_ify(" ".join(opt['description']))
required = opt.get('required', False)
if not isinstance(required, bool):
raise("Incorrect value for 'Required', a boolean is needed.: %s" % required)
if required:
desc = "(required) %s" % desc
o = '%s:' % o
text.append(" %-20s # %s" % (o, textwrap.fill(desc, limit, subsequent_indent=subdent)))
text.append('')
return "\n".join(text)
def _dump_yaml(self, struct, indent):
return CLI.tty_ify('\n'.join([indent + line for line in yaml.dump(struct, default_flow_style=False, Dumper=AnsibleDumper).split('\n')]))
def add_fields(self, text, fields, limit, opt_indent):
for o in sorted(fields):
opt = fields[o]
required = opt.pop('required', False)
if not isinstance(required, bool):
raise AnsibleError("Incorrect value for 'Required', a boolean is needed.: %s" % required)
if required:
opt_leadin = "="
else:
opt_leadin = "-"
text.append("%s %s" % (opt_leadin, o))
if isinstance(opt['description'], list):
for entry in opt['description']:
text.append(textwrap.fill(CLI.tty_ify(entry), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
else:
text.append(textwrap.fill(CLI.tty_ify(opt['description']), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
del opt['description']
aliases = ''
if 'aliases' in opt:
if len(opt['aliases']) > 0:
aliases = "(Aliases: " + ", ".join(str(i) for i in opt['aliases']) + ")"
del opt['aliases']
choices = ''
if 'choices' in opt:
if len(opt['choices']) > 0:
choices = "(Choices: " + ", ".join(str(i) for i in opt['choices']) + ")"
del opt['choices']
default = ''
if 'default' in opt or not required:
default = "[Default: %s" % str(opt.pop('default', '(null)')) + "]"
text.append(textwrap.fill(CLI.tty_ify(aliases + choices + default), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
if 'options' in opt:
text.append("%soptions:\n" % opt_indent)
self.add_fields(text, opt.pop('options'), limit, opt_indent + opt_indent)
if 'spec' in opt:
text.append("%sspec:\n" % opt_indent)
self.add_fields(text, opt.pop('spec'), limit, opt_indent + opt_indent)
conf = {}
for config in ('env', 'ini', 'yaml', 'vars'):
if config in opt and opt[config]:
conf[config] = opt.pop(config)
if conf:
text.append(self._dump_yaml({'set_via': conf}, opt_indent))
for k in sorted(opt):
if k.startswith('_'):
continue
if isinstance(opt[k], string_types):
text.append('%s%s: %s' % (opt_indent, k, textwrap.fill(CLI.tty_ify(opt[k]), limit - (len(k) + 2), subsequent_indent=opt_indent)))
elif isinstance(opt[k], (list, tuple)):
text.append(CLI.tty_ify('%s%s: %s' % (opt_indent, k, ', '.join(opt[k]))))
else:
text.append(self._dump_yaml({k: opt[k]}, opt_indent))
text.append('')
@staticmethod
def get_support_block(doc):
# Note: 'curated' is deprecated and not used in any of the modules we ship
support_level_msg = {'core': 'The Ansible Core Team',
'network': 'The Ansible Network Team',
'certified': 'an Ansible Partner',
'community': 'The Ansible Community',
'curated': 'A Third Party',
}
if doc['metadata'].get('metadata_version') in ('1.0', '1.1'):
return [" * This module is maintained by %s" % support_level_msg[doc['metadata']['supported_by']]]
return []
@staticmethod
def get_metadata_block(doc):
text = []
if doc['metadata'].get('metadata_version') in ('1.0', '1.1'):
text.append("METADATA:")
text.append('\tSUPPORT LEVEL: %s' % doc['metadata']['supported_by'])
for k in (m for m in doc['metadata'] if m not in ('version', 'metadata_version', 'supported_by')):
if isinstance(k, list):
text.append("\t%s: %s" % (k.capitalize(), ", ".join(doc['metadata'][k])))
else:
text.append("\t%s: %s" % (k.capitalize(), doc['metadata'][k]))
return text
return []
def get_man_text(self, doc):
IGNORE = frozenset(['module', 'docuri', 'version_added', 'short_description', 'now_date', 'plainexamples', 'returndocs', self.options.type])
opt_indent = " "
text = []
pad = display.columns * 0.20
limit = max(display.columns - int(pad), 70)
text.append("> %s (%s)\n" % (doc.get(self.options.type, doc.get('plugin_type')).upper(), doc.pop('filename')))
if isinstance(doc['description'], list):
desc = " ".join(doc.pop('description'))
else:
desc = doc.pop('description')
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
if 'deprecated' in doc and doc['deprecated'] is not None and len(doc['deprecated']) > 0:
text.append("DEPRECATED: \n")
if isinstance(doc['deprecated'], dict):
text.append("\tReason: %(why)s\n\tScheduled removal: Ansible %(version)s\n\tAlternatives: %(alternative)s" % doc.pop('deprecated'))
else:
text.append("%s" % doc.pop('deprecated'))
text.append("\n")
try:
support_block = self.get_support_block(doc)
if support_block:
text.extend(support_block)
except:
pass # FIXME: not suported by plugins
if doc.pop('action', False):
text.append(" * note: %s\n" % "This module has a corresponding action plugin.")
if 'options' in doc and doc['options']:
text.append("OPTIONS (= is mandatory):\n")
self.add_fields(text, doc.pop('options'), limit, opt_indent)
text.append('')
if 'notes' in doc and doc['notes'] and len(doc['notes']) > 0:
text.append("NOTES:")
for note in doc['notes']:
text.append(textwrap.fill(CLI.tty_ify(note), limit - 6, initial_indent=opt_indent[:-2] + "* ", subsequent_indent=opt_indent))
text.append('')
del doc['notes']
if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
req = ", ".join(doc.pop('requirements'))
text.append("REQUIREMENTS:%s\n" % textwrap.fill(CLI.tty_ify(req), limit - 16, initial_indent=" ", subsequent_indent=opt_indent))
# Generic handler
for k in sorted(doc):
if k in IGNORE or not doc[k]:
continue
if isinstance(doc[k], string_types):
text.append('%s: %s' % (k.upper(), textwrap.fill(CLI.tty_ify(doc[k]), limit - (len(k) + 2), subsequent_indent=opt_indent)))
elif isinstance(doc[k], (list, tuple)):
text.append('%s: %s' % (k.upper(), ', '.join(doc[k])))
else:
text.append(self._dump_yaml({k.upper(): doc[k]}, opt_indent))
del doc[k]
text.append('')
if 'plainexamples' in doc and doc['plainexamples'] is not None:
text.append("EXAMPLES:")
if isinstance(doc['plainexamples'], string_types):
text.append(doc.pop('plainexamples').strip())
else:
text.append(yaml.dump(doc.pop('plainexamples'), indent=2, default_flow_style=False))
text.append('')
if 'returndocs' in doc and doc['returndocs'] is not None:
text.append("RETURN VALUES:\n")
if isinstance(doc['returndocs'], string_types):
text.append(doc.pop('returndocs'))
else:
text.append(yaml.dump(doc.pop('returndocs'), indent=2, default_flow_style=False))
text.append('')
try:
metadata_block = self.get_metadata_block(doc)
if metadata_block:
text.extend(metadata_block)
text.append('')
except:
pass # metadata is optional
return "\n".join(text)
|
the-stack_0_16705 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a Google Cloud Storage operator.
"""
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.exceptions import AirflowException
WILDCARD = '*'
class GoogleCloudStorageToGoogleCloudStorageOperator(BaseOperator):
"""
Copies objects from a bucket to another, with renaming if requested.
:param source_bucket: The source Google cloud storage bucket where the
object is. (templated)
:type source_bucket: str
:param source_object: The source name of the object to copy in the Google cloud
storage bucket. (templated)
You can use only one wildcard for objects (filenames) within your
bucket. The wildcard can appear inside the object name or at the
end of the object name. Appending a wildcard to the bucket name is
unsupported.
:type source_object: str
:param destination_bucket: The destination Google cloud storage bucket
where the object should be. If the destination_bucket is None, it defaults
to source_bucket. (templated)
:type destination_bucket: str
:param destination_object: The destination name of the object in the
destination Google cloud storage bucket. (templated)
If a wildcard is supplied in the source_object argument, this is the
prefix that will be prepended to the final destination objects' paths.
Note that the source path's part before the wildcard will be removed;
if it needs to be retained it should be appended to destination_object.
For example, with prefix ``foo/*`` and destination_object ``blah/``, the
file ``foo/baz`` will be copied to ``blah/baz``; to retain the prefix write
the destination_object as e.g. ``blah/foo``, in which case the copied file
will be named ``blah/foo/baz``.
:type destination_object: str
:param move_object: When move object is True, the object is moved instead
of copied to the new location. This is the equivalent of a mv command
as opposed to a cp command.
:type move_object: bool
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google cloud storage.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param last_modified_time: When specified, the objects will be copied or moved,
only if they were modified after last_modified_time.
If tzinfo has not been set, UTC will be assumed.
:type last_modified_time: datetime.datetime
:Example:
The following Operator would copy a single file named
``sales/sales-2017/january.avro`` in the ``data`` bucket to the file named
``copied_sales/2017/january-backup.avro`` in the ``data_backup`` bucket ::
copy_single_file = GoogleCloudStorageToGoogleCloudStorageOperator(
task_id='copy_single_file',
source_bucket='data',
source_object='sales/sales-2017/january.avro',
destination_bucket='data_backup',
destination_object='copied_sales/2017/january-backup.avro',
google_cloud_storage_conn_id=google_cloud_conn_id
)
The following Operator would copy all the Avro files from ``sales/sales-2017``
folder (i.e. with names starting with that prefix) in ``data`` bucket to the
``copied_sales/2017`` folder in the ``data_backup`` bucket. ::
copy_files = GoogleCloudStorageToGoogleCloudStorageOperator(
task_id='copy_files',
source_bucket='data',
source_object='sales/sales-2017/*.avro',
destination_bucket='data_backup',
destination_object='copied_sales/2017/',
google_cloud_storage_conn_id=google_cloud_conn_id
)
The following Operator would move all the Avro files from ``sales/sales-2017``
folder (i.e. with names starting with that prefix) in ``data`` bucket to the
same folder in the ``data_backup`` bucket, deleting the original files in the
process. ::
move_files = GoogleCloudStorageToGoogleCloudStorageOperator(
task_id='move_files',
source_bucket='data',
source_object='sales/sales-2017/*.avro',
destination_bucket='data_backup',
move_object=True,
google_cloud_storage_conn_id=google_cloud_conn_id
)
"""
template_fields = ('source_bucket', 'source_object', 'destination_bucket',
'destination_object',)
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
source_bucket,
source_object,
destination_bucket=None,
destination_object=None,
move_object=False,
google_cloud_storage_conn_id='google_cloud_default',
delegate_to=None,
last_modified_time=None,
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.source_bucket = source_bucket
self.source_object = source_object
self.destination_bucket = destination_bucket
self.destination_object = destination_object
self.move_object = move_object
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
self.last_modified_time = last_modified_time
def execute(self, context):
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to
)
if self.destination_bucket is None:
self.log.warning(
'destination_bucket is None. Defaulting it to source_bucket (%s)',
self.source_bucket)
self.destination_bucket = self.source_bucket
if WILDCARD in self.source_object:
total_wildcards = self.source_object.count(WILDCARD)
if total_wildcards > 1:
error_msg = "Only one wildcard '*' is allowed in source_object parameter. " \
"Found {} in {}.".format(total_wildcards, self.source_object)
raise AirflowException(error_msg)
prefix, delimiter = self.source_object.split(WILDCARD, 1)
objects = hook.list(self.source_bucket, prefix=prefix, delimiter=delimiter)
for source_object in objects:
if self.destination_object is None:
destination_object = source_object
else:
destination_object = source_object.replace(prefix,
self.destination_object, 1)
self._copy_single_object(hook=hook, source_object=source_object,
destination_object=destination_object)
else:
self._copy_single_object(hook=hook, source_object=self.source_object,
destination_object=self.destination_object)
def _copy_single_object(self, hook, source_object, destination_object):
if self.last_modified_time is not None:
# Check to see if object was modified after last_modified_time
if hook.is_updated_after(self.source_bucket,
source_object,
self.last_modified_time):
self.log.debug("Object has been modified after %s ", self.last_modified_time)
else:
return
self.log.info('Executing copy of gs://%s/%s to gs://%s/%s',
self.source_bucket, source_object,
self.destination_bucket, destination_object)
hook.rewrite(self.source_bucket, source_object,
self.destination_bucket, destination_object)
if self.move_object:
hook.delete(self.source_bucket, source_object)
|
the-stack_0_16706 | '''
This file will write and compile resume latex file.
'''
import json
import os
import sys
from GetLatex import *
'''
Read init tex file and add content from pre-defined json
args:
filename: filename defined your resume file
js: resume json object
'''
def build(filename,js):
with open(filename,'w') as f:
with open("init.tex") as fin:
for l in fin:
if "Education" in l:
f.write(l)
f.write(getEductionLatex(js["education"]))
elif "Skill" in l:
f.write(l)
f.write(getSkillLatex(js["skillset"]))
elif "Internship" in l:
f.write(l)
f.write(getExperience(js["experience"]))
elif "Projects" in l:
f.write(l)
f.write(getProLatex(js["project"],False))
else:
f.write(l)
'''
main program
Read resume object, copy to template folder, use latex to build and compile it. Then delete some temp files and move your tex and pdf to root folder.
args:
file: json file defines your resume.
'''
def main(file):
jobj = json.load(open(file))
build(jobj["filename"],jobj)
os.system("mv "+jobj["filename"]+" template/")
os.chdir("template")
os.system("xelatex -synctex=1 -interaction=nonstopmode {0}".format(jobj['filename']))
os.system("mv {0} ..".format(jobj["filename"][:-4]+'.pdf'))
os.system("mv {0} ..".format(jobj["filename"][:-4]+'.tex'))
os.system("rm {0}".format(jobj["filename"][:-4]+'.*'))
if __name__ == '__main__':
main(sys.argv[1])
|
the-stack_0_16707 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bot that scrapes RSS feeds.
Usage:
Run python3 bot.py --help for help.
Press Ctrl-C on the command line or send a signal to the process to stop the
bot.
"""
import logging
import feedparser
import pytz
import argparse
import datetime as dt
import yaml
from sqlitedict import SqliteDict
from telegram.error import TimedOut
from telegram.ext import Updater
from time import mktime
from utils import get_substring_or_empty
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
SGT = pytz.timezone('Asia/Singapore')
RSS_FROM_THIS_TIMESTAMP = dt.datetime.strptime('20 Mar 2019 2:30PM', '%d %b %Y %I:%M%p').astimezone(SGT)
def update_feeds(context):
job = context.job
bot = context.bot
chat_name, chat_id, seen_urls_dict, feeds = job.context
logger.debug("update_feeds called! chat_name = '%s' | chat_id = '%s'" % (chat_name, chat_id))
for feed in feeds: # feeds is a list of dicts with keys {name, url, disabled}
feed_name = feed['name']
if "disabled" in feed and feed['disabled']:
logger.debug("skipping '%s' since 'disabled' detected" % feed_name)
continue
# HTTP access on feed['url'] and grab the entries
try:
NewsFeed = feedparser.parse(feed['url'])
except:
logger.error("Exception when attempting to read and/or parse '%s' at URL %s", feed_name, feed['url'])
continue
for entry in NewsFeed.entries:
# Ignore if the link has already been seen before
if entry.link in seen_urls_dict:
break
# Ignore if any of the ignored_categories are found in the entry.category
if 'ignored_categories' in feed:
ignored = False
for c in feed['ignored_categories']:
if c in entry.category:
logger.info("Ignored because category = '%s': %s | %s | %s", entry.category, feed_name,
entry.published, entry.link)
ignored = True
# Mark as seen
seen_urls_dict[entry.link] = True
break
if ignored: continue
# Ignore if the published datetime is before RSS_FROM_THIS_TIMESTAMP
published_datetime = dt.datetime.fromtimestamp(mktime(entry.published_parsed)).replace(
tzinfo=pytz.utc).astimezone(SGT)
if published_datetime < RSS_FROM_THIS_TIMESTAMP:
break
budget = get_substring_or_empty(entry['summary'], '<b>Budget</b>: ', '<br /><b>')
hourly_rate = get_substring_or_empty(entry['summary'], '<b>Hourly Range</b>: ', '<br /><b>')
country = get_substring_or_empty(entry['summary'], '<b>Country</b>: ', '<br />')
text = """%s (%s)
%s
[%s]
Fixed: %s
Hourly: %s
From: %s
""" % (
entry.title,
published_datetime.strftime("%Y-%m-%d %H:%M"),
entry.link,
feed_name,
budget,
hourly_rate,
country
)
try:
bot.send_message(chat_id, text=text)
logger.info("Sent to chat '%s': %s | %s | %s", chat_name, feed_name, entry.published, entry.link)
# If this line is reached, then the message has been successfully sent
seen_urls_dict[entry.link] = True
except TimedOut as e:
logger.error("Timeout when attempting to send to chat '%s': %s | %s | %s | %s", chat_name, feed_name,
entry.published, entry.link, str(e))
except Exception as e:
logger.error("Exception when attempting to send to chat '%s': %s | %s | %s | %s", chat_name, feed_name,
entry.published, entry.link, str(e))
def error(bot, update, telegram_error):
logger.warning('Update "%s" caused error "%s"', update, telegram_error)
def main():
# Command line parameters
parser = argparse.ArgumentParser(description='RSS Scraping Telegram Bot')
parser.add_argument('bot_token', action='store', default=None, help="Your bot's token")
# parser.add_argument('chat_id', action='store', default=None, help="The destination channel or chat in the format @channelname")
parser.add_argument('--interval', dest='interval', action='store', type=int, default=60,
help="Interval in seconds to refresh the RSS feeds")
parser.add_argument('--feeds', dest='feeds', action='store', type=str, default='feeds.yaml',
help="YAML file containing chats and feeds")
parser.add_argument('--seendb', dest='seendb', action='store', type=str, default='seen_urls.sqlite',
help="SQLite db for storing seen URLs")
parser.add_argument('--runonce', action='store_true', default=False, help="Scrape once and quit")
args = parser.parse_args()
# Open the "feeds.yaml" config file and read the feeds
with open(args.feeds, 'r') as stream:
try:
feeds_config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print("Error while loading %s" % args.feeds)
raise exc
logger.info("RSS Scraping Telegram Bot starting up...")
updater = Updater(args.bot_token, use_context=True)
for chat in feeds_config['chats']:
seen_urls_dict = SqliteDict(args.seendb, autocommit=True, tablename=chat['chat_name'])
if args.runonce:
updater.job_queue.run_once(update_feeds, 0,
context=(chat['chat_name'], chat['chat_id'], seen_urls_dict, chat['feeds']))
else:
updater.job_queue.run_repeating(update_feeds, args.interval,
context=(chat['chat_name'], chat['chat_id'], seen_urls_dict, chat['feeds']))
# Get the dispatcher to register handlers
dp = updater.dispatcher
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Block until you press Ctrl-C or the process receives SIGINT, SIGTERM or
# SIGABRT. This should be used most of the time, since start_polling() is
# non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
|
the-stack_0_16710 | from __future__ import print_function, division
import numpy as np
import pytest
import itertools
import os.path
from skimage.transform import radon, iradon, iradon_sart, rescale
from skimage.io import imread
from skimage import data_dir
from skimage._shared.testing import test_parallel
from skimage._shared._warnings import expected_warnings
PHANTOM = imread(os.path.join(data_dir, "phantom.png"),
as_grey=True)[::2, ::2]
PHANTOM = rescale(PHANTOM, 0.5, order=1, multichannel=False)
def _debug_plot(original, result, sinogram=None):
from matplotlib import pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
if sinogram is None:
plt.figure(figsize=(15, 6))
sp = 130
else:
plt.figure(figsize=(11, 11))
sp = 221
plt.subplot(sp + 0)
plt.imshow(sinogram, aspect='auto', **imkwargs)
plt.subplot(sp + 1)
plt.imshow(original, **imkwargs)
plt.subplot(sp + 2)
plt.imshow(result, vmin=original.min(), vmax=original.max(), **imkwargs)
plt.subplot(sp + 3)
plt.imshow(result - original, **imkwargs)
plt.colorbar()
plt.show()
def _rescale_intensity(x):
x = x.astype(float)
x -= x.min()
x /= x.max()
return x
def check_radon_center(shape, circle):
# Create a test image with only a single non-zero pixel at the origin
image = np.zeros(shape, dtype=np.float)
image[(shape[0] // 2, shape[1] // 2)] = 1.
# Calculate the sinogram
theta = np.linspace(0., 180., max(shape), endpoint=False)
sinogram = radon(image, theta=theta, circle=circle)
# The sinogram should be a straight, horizontal line
sinogram_max = np.argmax(sinogram, axis=0)
print(sinogram_max)
assert np.std(sinogram_max) < 1e-6
def test_radon_center():
shapes = [(16, 16), (17, 17)]
circles = [False, True]
for shape, circle in itertools.product(shapes, circles):
yield check_radon_center, shape, circle
rectangular_shapes = [(32, 16), (33, 17)]
for shape in rectangular_shapes:
yield check_radon_center, shape, False
def check_iradon_center(size, theta, circle):
debug = False
# Create a test sinogram corresponding to a single projection
# with a single non-zero pixel at the rotation center
if circle:
sinogram = np.zeros((size, 1), dtype=np.float)
sinogram[size // 2, 0] = 1.
else:
diagonal = int(np.ceil(np.sqrt(2) * size))
sinogram = np.zeros((diagonal, 1), dtype=np.float)
sinogram[sinogram.shape[0] // 2, 0] = 1.
maxpoint = np.unravel_index(np.argmax(sinogram), sinogram.shape)
print('shape of generated sinogram', sinogram.shape)
print('maximum in generated sinogram', maxpoint)
# Compare reconstructions for theta=angle and theta=angle + 180;
# these should be exactly equal
reconstruction = iradon(sinogram, theta=[theta], circle=circle)
reconstruction_opposite = iradon(sinogram, theta=[theta + 180],
circle=circle)
print('rms deviance:',
np.sqrt(np.mean((reconstruction_opposite - reconstruction)**2)))
if debug:
import matplotlib.pyplot as plt
imkwargs = dict(cmap='gray', interpolation='nearest')
plt.figure()
plt.subplot(221)
plt.imshow(sinogram, **imkwargs)
plt.subplot(222)
plt.imshow(reconstruction_opposite - reconstruction, **imkwargs)
plt.subplot(223)
plt.imshow(reconstruction, **imkwargs)
plt.subplot(224)
plt.imshow(reconstruction_opposite, **imkwargs)
plt.show()
assert np.allclose(reconstruction, reconstruction_opposite)
def test_iradon_center():
sizes = [16, 17]
thetas = [0, 90]
circles = [False, True]
for size, theta, circle in itertools.product(sizes, thetas, circles):
yield check_iradon_center, size, theta, circle
def check_radon_iradon(interpolation_type, filter_type):
debug = False
image = PHANTOM
reconstructed = iradon(radon(image, circle=False), filter=filter_type,
interpolation=interpolation_type)
delta = np.mean(np.abs(image - reconstructed))
print('\n\tmean error:', delta)
if debug:
_debug_plot(image, reconstructed)
if filter_type in ('ramp', 'shepp-logan'):
if interpolation_type == 'nearest':
allowed_delta = 0.03
else:
allowed_delta = 0.025
else:
allowed_delta = 0.05
assert delta < allowed_delta
def test_radon_iradon():
filter_types = ["ramp", "shepp-logan", "cosine", "hamming", "hann"]
interpolation_types = ['linear', 'nearest']
for interpolation_type, filter_type in \
itertools.product(interpolation_types, filter_types):
yield check_radon_iradon, interpolation_type, filter_type
# cubic interpolation is slow; only run one test for it
yield check_radon_iradon, 'cubic', 'shepp-logan'
def test_iradon_angles():
"""
Test with different number of projections
"""
size = 100
# Synthetic data
image = np.tri(size) + np.tri(size)[::-1]
# Large number of projections: a good quality is expected
nb_angles = 200
theta = np.linspace(0, 180, nb_angles, endpoint=False)
radon_image_200 = radon(image, theta=theta, circle=False)
reconstructed = iradon(radon_image_200, circle=False)
delta_200 = np.mean(abs(_rescale_intensity(image) -
_rescale_intensity(reconstructed)))
assert delta_200 < 0.03
# Lower number of projections
nb_angles = 80
radon_image_80 = radon(image, theta=theta, circle=False)
# Test whether the sum of all projections is approximately the same
s = radon_image_80.sum(axis=0)
assert np.allclose(s, s[0], rtol=0.01)
reconstructed = iradon(radon_image_80, circle=False)
delta_80 = np.mean(abs(image / np.max(image) -
reconstructed / np.max(reconstructed)))
# Loss of quality when the number of projections is reduced
assert delta_80 > delta_200
def check_radon_iradon_minimal(shape, slices):
debug = False
theta = np.arange(180)
image = np.zeros(shape, dtype=np.float)
image[slices] = 1.
sinogram = radon(image, theta, circle=False)
reconstructed = iradon(sinogram, theta, circle=False)
print('\n\tMaximum deviation:', np.max(np.abs(image - reconstructed)))
if debug:
_debug_plot(image, reconstructed, sinogram)
if image.sum() == 1:
assert (np.unravel_index(np.argmax(reconstructed), image.shape)
== np.unravel_index(np.argmax(image), image.shape))
def test_radon_iradon_minimal():
shapes = [(3, 3), (4, 4), (5, 5)]
for shape in shapes:
c0, c1 = shape[0] // 2, shape[1] // 2
coordinates = itertools.product((c0 - 1, c0, c0 + 1),
(c1 - 1, c1, c1 + 1))
for coordinate in coordinates:
yield check_radon_iradon_minimal, shape, coordinate
def test_reconstruct_with_wrong_angles():
a = np.zeros((3, 3))
p = radon(a, theta=[0, 1, 2], circle=False)
iradon(p, theta=[0, 1, 2], circle=False)
with pytest.raises(ValueError):
iradon(p, theta=[0, 1, 2, 3])
def _random_circle(shape):
# Synthetic random data, zero outside reconstruction circle
np.random.seed(98312871)
image = np.random.rand(*shape)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image[r > radius] = 0.
return image
def test_radon_circle():
a = np.ones((10, 10))
with expected_warnings(['reconstruction circle']):
radon(a, circle=True)
# Synthetic data, circular symmetry
shape = (61, 79)
c0, c1 = np.ogrid[0:shape[0], 0:shape[1]]
r = np.sqrt((c0 - shape[0] // 2)**2 + (c1 - shape[1] // 2)**2)
radius = min(shape) // 2
image = np.clip(radius - r, 0, np.inf)
image = _rescale_intensity(image)
angles = np.linspace(0, 180, min(shape), endpoint=False)
sinogram = radon(image, theta=angles, circle=True)
assert np.all(sinogram.std(axis=1) < 1e-2)
# Synthetic data, random
image = _random_circle(shape)
sinogram = radon(image, theta=angles, circle=True)
mass = sinogram.sum(axis=0)
average_mass = mass.mean()
relative_error = np.abs(mass - average_mass) / average_mass
print(relative_error.max(), relative_error.mean())
assert np.all(relative_error < 3.2e-3)
def check_sinogram_circle_to_square(size):
from skimage.transform.radon_transform import _sinogram_circle_to_square
image = _random_circle((size, size))
theta = np.linspace(0., 180., size, False)
sinogram_circle = radon(image, theta, circle=True)
def argmax_shape(a):
return np.unravel_index(np.argmax(a), a.shape)
print('\n\targmax of circle:', argmax_shape(sinogram_circle))
sinogram_square = radon(image, theta, circle=False)
print('\targmax of square:', argmax_shape(sinogram_square))
sinogram_circle_to_square = _sinogram_circle_to_square(sinogram_circle)
print('\targmax of circle to square:',
argmax_shape(sinogram_circle_to_square))
error = abs(sinogram_square - sinogram_circle_to_square)
print(np.mean(error), np.max(error))
assert (argmax_shape(sinogram_square) ==
argmax_shape(sinogram_circle_to_square))
def test_sinogram_circle_to_square():
for size in (50, 51):
yield check_sinogram_circle_to_square, size
def check_radon_iradon_circle(interpolation, shape, output_size):
# Forward and inverse radon on synthetic data
image = _random_circle(shape)
radius = min(shape) // 2
sinogram_rectangle = radon(image, circle=False)
reconstruction_rectangle = iradon(sinogram_rectangle,
output_size=output_size,
interpolation=interpolation,
circle=False)
sinogram_circle = radon(image, circle=True)
reconstruction_circle = iradon(sinogram_circle,
output_size=output_size,
interpolation=interpolation,
circle=True)
# Crop rectangular reconstruction to match circle=True reconstruction
width = reconstruction_circle.shape[0]
excess = int(np.ceil((reconstruction_rectangle.shape[0] - width) / 2))
s = np.s_[excess:width + excess, excess:width + excess]
reconstruction_rectangle = reconstruction_rectangle[s]
# Find the reconstruction circle, set reconstruction to zero outside
c0, c1 = np.ogrid[0:width, 0:width]
r = np.sqrt((c0 - width // 2)**2 + (c1 - width // 2)**2)
reconstruction_rectangle[r > radius] = 0.
print(reconstruction_circle.shape)
print(reconstruction_rectangle.shape)
np.allclose(reconstruction_rectangle, reconstruction_circle)
def test_radon_iradon_circle():
shape = (61, 79)
interpolations = ('nearest', 'linear')
output_sizes = (None, min(shape), max(shape), 97)
for interpolation, output_size in itertools.product(interpolations,
output_sizes):
yield check_radon_iradon_circle, interpolation, shape, output_size
def test_order_angles_golden_ratio():
from skimage.transform.radon_transform import order_angles_golden_ratio
np.random.seed(1231)
lengths = [1, 4, 10, 180]
for l in lengths:
theta_ordered = np.linspace(0, 180, l, endpoint=False)
theta_random = np.random.uniform(0, 180, l)
for theta in (theta_random, theta_ordered):
indices = [x for x in order_angles_golden_ratio(theta)]
# no duplicate indices allowed
assert len(indices) == len(set(indices))
@test_parallel()
def test_iradon_sart():
debug = False
image = rescale(PHANTOM, 0.8, mode='reflect')
theta_ordered = np.linspace(0., 180., image.shape[0], endpoint=False)
theta_missing_wedge = np.linspace(0., 150., image.shape[0], endpoint=True)
for theta, error_factor in ((theta_ordered, 1.),
(theta_missing_wedge, 2.)):
sinogram = radon(image, theta, circle=True)
reconstructed = iradon_sart(sinogram, theta)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration) =', delta)
assert delta < 0.02 * error_factor
reconstructed = iradon_sart(sinogram, theta, reconstructed)
delta = np.mean(np.abs(reconstructed - image))
print('delta (2 iterations) =', delta)
assert delta < 0.014 * error_factor
reconstructed = iradon_sart(sinogram, theta, clip=(0, 1))
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, clip) =', delta)
assert delta < 0.018 * error_factor
np.random.seed(1239867)
shifts = np.random.uniform(-3, 3, sinogram.shape[1])
x = np.arange(sinogram.shape[0])
sinogram_shifted = np.vstack(np.interp(x + shifts[i], x,
sinogram[:, i])
for i in range(sinogram.shape[1])).T
reconstructed = iradon_sart(sinogram_shifted, theta,
projection_shifts=shifts)
if debug:
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(221)
plt.imshow(image, interpolation='nearest')
plt.subplot(222)
plt.imshow(sinogram_shifted, interpolation='nearest')
plt.subplot(223)
plt.imshow(reconstructed, interpolation='nearest')
plt.subplot(224)
plt.imshow(reconstructed - image, interpolation='nearest')
plt.show()
delta = np.mean(np.abs(reconstructed - image))
print('delta (1 iteration, shifted sinogram) =', delta)
assert delta < 0.022 * error_factor
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
|
the-stack_0_16712 | #!/usr/bin/env python3
# plot_shapely.py
# %%
from dataclasses import dataclass
from typing import List, Tuple, Set
import matplotlib.pyplot as plt
from aray.problem import Problem
from shapely.geometry import Polygon, Point, LineString
'''
Datastructures we want to have
point: integer pair, not the shapely kind
delta: integer pair, difference between two points
edge: integer pair, indexes into a point or vertex list
segment: point pair
problem data:
hole: list of points (form a polygon)
vertices: list of points
edges: list of edges, indexes into vertices
computed data:
points: sorted list of valid points
edge_dists: list of edge distances, corresponds to edges
dist_deltas: map from dist to a list of deltas
delta_forbidden: map from delta to a list of forbidden segments
'''
# %%
def get_points(hole):
poly = Polygon(hole)
min_x, min_y, max_x, max_y = poly.bounds
points = []
for x in range(int(min_x), int(max_x) + 1):
for y in range(int(min_y), int(max_y) + 1):
if poly.intersects(Point(x, y)):
points.append((x, y))
return sorted(points)
def get_forbidden(hole, delta):
poly = Polygon(hole)
forbidden = []
for a in points:
b = (a[0] + delta[0], a[1] + delta[1])
if b not in points:
continue
ab = LineString((a, b))
if poly.contains(ab) or ab.within(poly):
continue
elif poly.exterior.crosses(ab):
forbidden.append((a, b))
elif poly.touches(ab) and not poly.exterior.contains(ab):
forbidden.append((a, b))
return forbidden
def get_deltas(d_old: int, epsilon: int) -> List[Tuple[int, int]]:
deltas = []
n = int(d_old ** 0.5 + 1) * 2
for x in range(-n, n + 1):
for y in range(-n, n + 1):
d_new = dsq((0, 0), (x, y))
if abs(d_new / d_old - 1) <= epsilon / 1e6:
deltas.append((x, y))
return deltas
fig, ax = plt.subplots()
problem = Problem.get(14)
problem.plot(fig, ax)
points = get_points(problem.hole)
xs = [p[0] for p in points]
ys = [p[1] for p in points]
ax.scatter(xs, ys)
forbidden = get_forbidden(problem.hole, (-1, -1))
for a, b in forbidden:
ax.plot((a[0], b[0]), (a[1], b[1]))
forbidden
# %%
problem = Problem.get(14)
vert = problem.vertices
def dsq(a, b): return (a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2
edge_dsq = [dsq(vert[i], vert[j]) for i, j in problem.edges]
epsilon = problem.epsilon
for d_old in sorted(set(edge_dsq)):
print(d_old, edge_dsq.count(d_old))
deltas = get_deltas(d_old, epsilon)
print('d', deltas)
fig, ax = plt.subplots()
ax.grid(True)
# set x and y ticks
ax.set_xticks(range(-n, n + 1))
ax.set_yticks(range(-n, n + 1))
ax.scatter([p[0] for p in deltas], [p[1] for p in deltas])
|
the-stack_0_16714 | import ctypes
from vivsys.common import *
class ServiceControlManager:
def __init__(self):
self.hScm = None
def __enter__(self):
self.hScm = advapi32.OpenSCManagerW(None, None, SC_MANAGER_CREATE_SERVICE)
if not self.hScm:
raise ctypes.WinError()
return self
def __exit__(self, exc, ex, tb):
advapi32.CloseServiceHandle(self.hScm)
self.hScm = None
def _req_with(self):
if self.hScm == None:
raise Exception('ServiceControlManager not in with block!')
def openService(self, name, access=SERVICE_ALL_ACCESS):
'''
Retrieve a Service object for the given service name.
Example:
with ServiceControlManager() as scm:
with scm.openService('woot') as svc:
dostuff(svc)
'''
self._req_with()
hSvc = advapi32.OpenServiceW(self.hScm, name, access)
if not hSvc:
raise ctypes.WinError()
return Service(self.hScm, hSvc)
def createDriverService(self, path, name):
'''
Helper method for creation of driver services.
'''
self._req_with()
hSvc = advapi32.CreateServiceW(self.hScm,
name,
None,
SERVICE_START | DELETE | SERVICE_STOP,
SERVICE_KERNEL_DRIVER,
SERVICE_DEMAND_START,
SERVICE_ERROR_IGNORE,
path,
None, NULL, None, None, None)
if not hSvc:
raise ctypes.WinError()
return Service(self.hScm,hSvc)
def isServiceName(self, name):
'''
Return True/False if a service (by name) exists.
'''
self._req_with()
retval = False
hSvc = advapi32.OpenServiceW(self.hScm, name, SERVICE_ALL_ACCESS)
if hSvc:
retval = True
advapi32.CloseServiceHandle(hSvc)
return retval
class Service:
'''
An object to minimally wrap the Windows Service APIs
which are needed for loading/unloading drivers.
'''
def __init__(self, hScm, hSvc):
self.hScm = hScm
self.hSvc = hSvc
self.inwith = False
def __enter__(self):
self.inwith = True
return self
def __exit__(self, exc, ex, tb):
self.close()
def close(self):
advapi32.CloseServiceHandle(self.hSvc)
self.hSvc = None
def _req_with(self):
if not self.inwith:
raise Exception('Service not in with block!')
def getServiceStatus(self):
'''
Returns a SERVICE_STATUS structure for the service.
'''
self._req_with()
status = SERVICE_STATUS()
if not advapi32.QueryServiceStatus(self.hSvc, ctypes.byref(status)):
raise ctypes.WinError()
return status
def delService(self):
'''
Delete the service.
Example:
scm = ServiceControlManager()
with scm:
with scm.openService('woot') as svc:
svc.delService()
'''
self._req_with()
if not advapi32.DeleteService(self.hSvc):
err = ctypes.WinError()
if ERROR_SERVICE_MARKED_FOR_DELETE != err.winerror:
raise err
def stopService(self):
'''
Stop the service ( if running ).
'''
self._req_with()
status = self.getServiceStatus()
if status.dwCurrentState == SERVICE_RUNNING:
if not advapi32.ControlService(self.hSvc, SERVICE_CONTROL_STOP, ctypes.byref(status)):
raise ctypes.WinError()
return status
def startService(self):
'''
Start the service.
'''
self._req_with()
if not advapi32.StartServiceW(self.hSvc, 0, NULL):
err = ctypes.WinError()
if ERROR_SERVICE_ALREADY_RUNNING != err.winerror:
raise err
|
the-stack_0_16715 | #CODE2---For calculating pathway details----
#Python 3.6.5 |Anaconda, Inc.
import sys
import glob
import errno
import csv
#path = '/home/16AT72P01/Excelra/SMPDB/output/metabolic_proteins.csv'
path = '/home/16AT72P01/Excelra/SMPDB/output/metabolics.csv'
files = glob.glob(path1)
unique_pathway = set()
with open(path) as f1:
reader = csv.DictReader(f1, quotechar='"', delimiter='\t')
print(reader)
for row in reader:
#unique_pathway.add(row['PATHWAY_NAME'])
unique_pathway.add(row['PATHWAY_NAME'])
f1.close()
print(len(unique_pathway))
|
the-stack_0_16719 | #!/usr/bin/env python
# coding: utf-8
# This file is a part of `qal`.
#
# Copyright (c) 2021, University of Nebraska Board of Regents.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from abc import ABCMeta, abstractmethod
class Publication(metaclass=ABCMeta):
def __init__(self, identifier, title, authors, year):
self.search_terms = {}
self.identifier = identifier
self.title = title
self.authors = authors
self.year = year
def add_search_terms(self, source, search_terms):
if source in self.search_terms.keys():
self.search_terms[source].append(search_terms)
else:
self.search_terms[source] = [search_terms]
def asdict(self):
return {'identifier': self.identifier,
'title': self.title,
'authors': self.authors,
'year': self.year,
'search_terms': self.search_terms}
@abstractmethod
def venue(self):
raise NotImplementedError("Must define venue getter.")
class Article(Publication):
def __init__(self, identifier, title, authors, year, journal, volume, issue, abstract=None, pages=None):
super().__init__(identifier, title, authors, year)
self.journal = journal
self.volume = volume
self.issue = issue
self.abstract = abstract
self.pages = pages
def venue(self):
return self.journal
def asdict(self):
d = super().asdict()
d['journal'] = self.journal
d['volume'] = self.volume
d['issue'] = self.issue
d['abstract'] = self.abstract
d['pages'] = self.pages
return d
class Conference(Publication):
def __init__(self, identifier, title, authors, year, book_title, conference, abstract=None, pages=None):
super().__init__(identifier, title, authors, year)
self.book_title = book_title
self.conference = conference
self.abstract = abstract
self.pages = pages
def venue(self):
if self.conference:
return self.conference
else:
return self.book_title
def asdict(self):
d = super().asdict()
d['book_title'] = self.book_title
d['conference'] = self.conference
d['abstract'] = self.abstract
d['pages'] = self.pages
return d
class Book(Publication):
def __init__(self, identifier, title, authors, year, abstract=None):
super().__init__(identifier, title, authors, year)
self.abstract = abstract
def venue(self):
return self.title
def asdict(self):
d = super().asdict()
d['abstract'] = self.abstract
class BookChapter(Publication):
def __init__(self, identifier, title, authors, year, book_title, abstract=None, pages=None):
super().__init__(identifier, title, authors, year)
self.book_title = book_title
self.abstract = abstract
self.pages = pages
def venue(self):
return self.book_title
def asdict(self):
d = super().asdict()
d['book_title'] = self.book_title
d['abstract'] = self.abstract
d['pages'] = self.pages
return d
|
the-stack_0_16720 | import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
def is_valid_python_version():
version_valid = False
ver = sys.version_info
if (2 == ver.major) and (7 <= ver.minor):
version_valid = True
if (3 == ver.major) and (4 <= ver.minor):
version_valid = True
return version_valid
def python_short_ver_str():
ver = sys.version_info
return "%s.%s" % (ver.major, ver.minor)
def are_deps_installed():
installed = False
try:
import peewee
import bitcoinrpc.authproxy
import simplejson
installed = True
except ImportError as e:
print("[error]: Missing dependencies")
return installed
def is_database_correctly_configured():
import peewee
import config
configured = False
cannot_connect_message = "Cannot connect to database. Please ensure database service is running and user access is properly configured in 'sentinel.conf'."
try:
db = config.db
db.connect()
configured = True
except (peewee.ImproperlyConfigured, peewee.OperationalError, ImportError) as e:
print("[error]: %s" % e)
print(cannot_connect_message)
sys.exit(1)
return configured
def has_brixcoin_conf():
import config
import io
valid_brixcoin_conf = False
# ensure brixcoin_conf exists & readable
#
# if not, print a message stating that Brixcoin Core must be installed and
# configured, including JSONRPC access in brixcoin.conf
try:
f = io.open(config.brixcoin_conf)
valid_brixcoin_conf = True
except IOError as e:
print(e)
return valid_brixcoin_conf
# === begin main
def main():
install_instructions = "\tpip install -r requirements.txt"
if not is_valid_python_version():
print("Python %s is not supported" % python_short_ver_str())
sys.exit(1)
if not are_deps_installed():
print("Please ensure all dependencies are installed:")
print(install_instructions)
sys.exit(1)
if not is_database_correctly_configured():
print("Please ensure correct database configuration.")
sys.exit(1)
if not has_brixcoin_conf():
print("BrixcoinCore must be installed and configured, including JSONRPC access in brixcoin.conf")
sys.exit(1)
main()
|
the-stack_0_16721 | from PEPit import PEP
from PEPit.functions import SmoothStronglyConvexFunction
def wc_polyak_steps_in_function_value(L, mu, gamma, verbose=1):
"""
Consider the minimization problem
.. math:: f_\\star \\triangleq \\min_x f(x),
where :math:`f` is :math:`L`-smooth and :math:`\\mu`-strongly convex, and :math:`x_\\star=\\arg\\min_x f(x)`.
This code computes a worst-case guarantee for a variant of a **gradient method** relying on **Polyak step-sizes**.
That is, it computes the smallest possible :math:`\\tau(L, \\mu, \\gamma)` such that the guarantee
.. math:: f(x_{t+1}) - f_\\star \\leqslant \\tau(L, \\mu, \\gamma) (f(x_t) - f_\\star)
is valid, where :math:`x_t` is the output of the gradient method with PS and :math:`\\gamma` is the effective value
of the step-size of the gradient method.
In short, for given values of :math:`L`, :math:`\\mu`, and :math:`\\gamma`, :math:`\\tau(L, \\mu, \\gamma)` is computed as the worst-case
value of :math:`f(x_{t+1})-f_\\star` when :math:`f(x_t)-f_\\star \\leqslant 1`.
**Algorithm**:
Gradient descent is described by
.. math:: x_{t+1} = x_t - \\gamma \\nabla f(x_t),
where :math:`\\gamma` is a step-size. The Polyak step-size rule under consideration here corresponds to choosing
of :math:`\\gamma` satisfying:
.. math:: \\|\\nabla f(x_t)\\|^2 = 2 L (2 - L \\gamma) (f(x_t) - f_\\star).
**Theoretical guarantee**:
The gradient method with the variant of Polyak step-sizes under consideration enjoys the
**tight** theoretical guarantee [1, Proposition 2]:
.. math:: f(x_{t+1})-f_\\star \\leqslant \\tau(L,\\mu,\\gamma) (f(x_{t})-f_\\star),
where :math:`\\gamma` is the effective step-size used at iteration :math:`t` and
.. math::
:nowrap:
\\begin{eqnarray}
\\tau(L,\\mu,\\gamma) & = & \\left\\{\\begin{array}{ll} (\\gamma L - 1) (L \\gamma (3 - \\gamma (L + \\mu)) - 1) & \\text{if } \\gamma\in[\\tfrac{1}{L},\\tfrac{2L-\mu}{L^2}],\\\\
0 & \\text{otherwise.} \\end{array}\\right.
\\end{eqnarray}
**References**:
`[1] M. Barré, A. Taylor, A. d’Aspremont (2020). Complexity guarantees for Polyak steps with momentum.
In Conference on Learning Theory (COLT).
<https://arxiv.org/pdf/2002.00915.pdf>`_
Args:
L (float): the smoothness parameter.
mu (float): the strong convexity parameter.
gamma (float): the step-size.
verbose (int): Level of information details to print.
-1: No verbose at all.
0: This example's output.
1: This example's output + PEPit information.
2: This example's output + PEPit information + CVXPY details.
Returns:
pepit_tau (float): worst-case value
theoretical_tau (float): theoretical value
Example:
>>> L = 1
>>> mu = 0.1
>>> gamma = 2 / (L + mu)
>>> pepit_tau, theoretical_tau = wc_polyak_steps_in_function_value(L=L, mu=mu, gamma=gamma, verbose=1)
(PEPit) Setting up the problem: size of the main PSD matrix: 4x4
(PEPit) Setting up the problem: performance measure is minimum of 1 element(s)
(PEPit) Setting up the problem: initial conditions (2 constraint(s) added)
(PEPit) Setting up the problem: interpolation conditions for 1 function(s)
function 1 : 6 constraint(s) added
(PEPit) Compiling SDP
(PEPit) Calling SDP solver
(PEPit) Solver status: optimal (solver: SCS); optimal value: 0.6694215432773613
*** Example file: worst-case performance of Polyak steps ***
PEPit guarantee: f(x_1) - f_* <= 0.669422 (f(x_0) - f_*)
Theoretical guarantee: f(x_1) - f_* <= 0.669421 (f(x_0) - f_*)
"""
# Instantiate PEP
problem = PEP()
# Declare a smooth convex function
func = problem.declare_function(SmoothStronglyConvexFunction, L=L, mu=mu)
# Start by defining its unique optimal point xs = x_* and corresponding function value fs = f_*
xs = func.stationary_point()
fs = func.value(xs)
# Then define the starting point x0 of the algorithm as well as corresponding gradient and function value gn and fn
x0 = problem.set_initial_point()
g0, f0 = func.oracle(x0)
# Set the initial condition to the distance betwenn x0 and xs
problem.set_initial_condition(f0 - fs <= 1)
# Set the initial condition to the Polyak step-size
problem.add_constraint(g0 ** 2 == 2 * L * (2 - L * gamma) * (f0 - fs))
# Run the Polayk steps at iteration 1
x1 = x0 - gamma * g0
g1, f1 = func.oracle(x1)
# Set the performance metric to the distance in function values between x_1 and x_* = xs
problem.set_performance_metric(f1 - fs)
# Solve the PEP
pepit_verbose = max(verbose, 0)
pepit_tau = problem.solve(verbose=pepit_verbose)
# Compute theoretical guarantee (for comparison)
if 1/L <= gamma <= (2 * L - mu)/L**2:
theoretical_tau = (gamma * L - 1) * (L * gamma * (3 - gamma * (L + mu)) - 1)
else:
theoretical_tau = 0.
# Print conclusion if required
if verbose != -1:
print('*** Example file: worst-case performance of Polyak steps ***')
print('\tPEPit guarantee:\t f(x_1) - f_* <= {:.6} (f(x_0) - f_*) '.format(pepit_tau))
print('\tTheoretical guarantee:\t f(x_1) - f_* <= {:.6} (f(x_0) - f_*)'.format(theoretical_tau))
# Return the worst-case guarantee of the evaluated method (and the reference theoretical value)
return pepit_tau, theoretical_tau
if __name__ == "__main__":
L = 1
mu = 0.1
gamma = 2 / (L + mu)
pepit_tau, theoretical_tau = wc_polyak_steps_in_function_value(L=L, mu=mu, gamma=gamma, verbose=1)
|
the-stack_0_16722 | import csv
import itertools
import json
import os
import threading
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import wait
from pathlib import Path
from subprocess import CalledProcessError
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import List
from typing import Union
from cleo.io.null_io import NullIO
from poetry.core.packages.file_dependency import FileDependency
from poetry.core.packages.package import Package
from poetry.core.packages.utils.link import Link
from poetry.core.packages.utils.utils import url_to_path
from poetry.core.pyproject.toml import PyProjectTOML
from poetry.utils._compat import decode
from poetry.utils.env import EnvCommandError
from poetry.utils.helpers import safe_rmtree
from poetry.utils.pip import pip_editable_install
from ..utils.authenticator import Authenticator
from ..utils.pip import pip_install
from .chef import Chef
from .chooser import Chooser
from .operations.install import Install
from .operations.operation import Operation
from .operations.uninstall import Uninstall
from .operations.update import Update
if TYPE_CHECKING:
from cleo.io.io import IO # noqa
from poetry.config.config import Config
from poetry.repositories import Pool
from poetry.utils.env import Env
from .operations import OperationTypes
class Executor:
def __init__(
self,
env: "Env",
pool: "Pool",
config: "Config",
io: "IO",
parallel: bool = None,
) -> None:
self._env = env
self._io = io
self._dry_run = False
self._enabled = True
self._verbose = False
self._authenticator = Authenticator(config, self._io)
self._chef = Chef(config, self._env)
self._chooser = Chooser(pool, self._env)
if parallel is None:
parallel = config.get("installer.parallel", True)
if parallel:
# This should be directly handled by ThreadPoolExecutor
# however, on some systems the number of CPUs cannot be determined
# (it raises a NotImplementedError), so, in this case, we assume
# that the system only has one CPU.
try:
self._max_workers = os.cpu_count() + 4
except NotImplementedError:
self._max_workers = 5
else:
self._max_workers = 1
self._executor = ThreadPoolExecutor(max_workers=self._max_workers)
self._total_operations = 0
self._executed_operations = 0
self._executed = {"install": 0, "update": 0, "uninstall": 0}
self._skipped = {"install": 0, "update": 0, "uninstall": 0}
self._sections = dict()
self._lock = threading.Lock()
self._shutdown = False
self._hashes: Dict[str, str] = {}
@property
def installations_count(self) -> int:
return self._executed["install"]
@property
def updates_count(self) -> int:
return self._executed["update"]
@property
def removals_count(self) -> int:
return self._executed["uninstall"]
def supports_fancy_output(self) -> bool:
return self._io.output.is_decorated() and not self._dry_run
def disable(self) -> "Executor":
self._enabled = False
return self
def dry_run(self, dry_run: bool = True) -> "Executor":
self._dry_run = dry_run
return self
def verbose(self, verbose: bool = True) -> "Executor":
self._verbose = verbose
return self
def pip_install(
self, req: Union[Path, Link], upgrade: bool = False, editable: bool = False
) -> int:
func = pip_install
if editable:
func = pip_editable_install
try:
func(req, self._env, upgrade=upgrade)
except EnvCommandError as e:
output = decode(e.e.output)
if (
"KeyboardInterrupt" in output
or "ERROR: Operation cancelled by user" in output
):
return -2
raise
return 0
def execute(self, operations: List["OperationTypes"]) -> int:
self._total_operations = len(operations)
for job_type in self._executed:
self._executed[job_type] = 0
self._skipped[job_type] = 0
if operations and (self._enabled or self._dry_run):
self._display_summary(operations)
# We group operations by priority
groups = itertools.groupby(operations, key=lambda o: -o.priority)
self._sections = dict()
for _, group in groups:
tasks = []
serial_operations = []
for operation in group:
if self._shutdown:
break
# Some operations are unsafe, we must execute them serially in a group
# https://github.com/python-poetry/poetry/issues/3086
# https://github.com/python-poetry/poetry/issues/2658
#
# We need to explicitly check source type here, see:
# https://github.com/python-poetry/poetry-core/pull/98
is_parallel_unsafe = operation.job_type == "uninstall" or (
operation.package.develop
and operation.package.source_type in {"directory", "git"}
)
if not operation.skipped and is_parallel_unsafe:
serial_operations.append(operation)
continue
tasks.append(self._executor.submit(self._execute_operation, operation))
try:
wait(tasks)
for operation in serial_operations:
wait([self._executor.submit(self._execute_operation, operation)])
except KeyboardInterrupt:
self._shutdown = True
if self._shutdown:
# Cancelling further tasks from being executed
[task.cancel() for task in tasks]
self._executor.shutdown(wait=True)
break
return 1 if self._shutdown else 0
def _write(self, operation: "OperationTypes", line: str) -> None:
if not self.supports_fancy_output() or not self._should_write_operation(
operation
):
return
if self._io.is_debug():
with self._lock:
section = self._sections[id(operation)]
section.write_line(line)
return
with self._lock:
section = self._sections[id(operation)]
section.clear()
section.write(line)
def _execute_operation(self, operation: "OperationTypes") -> None:
try:
if self.supports_fancy_output():
if id(operation) not in self._sections:
if self._should_write_operation(operation):
with self._lock:
self._sections[id(operation)] = self._io.section()
self._sections[id(operation)].write_line(
" <fg=blue;options=bold>•</> {message}: <fg=blue>Pending...</>".format(
message=self.get_operation_message(operation),
),
)
else:
if self._should_write_operation(operation):
if not operation.skipped:
self._io.write_line(
" <fg=blue;options=bold>•</> {message}".format(
message=self.get_operation_message(operation),
),
)
else:
self._io.write_line(
" <fg=default;options=bold,dark>•</> {message}: "
"<fg=default;options=bold,dark>Skipped</> "
"<fg=default;options=dark>for the following reason:</> "
"<fg=default;options=bold,dark>{reason}</>".format(
message=self.get_operation_message(operation),
reason=operation.skip_reason,
)
)
try:
result = self._do_execute_operation(operation)
except EnvCommandError as e:
if e.e.returncode == -2:
result = -2
else:
raise
# If we have a result of -2 it means a KeyboardInterrupt
# in the any python subprocess, so we raise a KeyboardInterrupt
# error to be picked up by the error handler.
if result == -2:
raise KeyboardInterrupt
except Exception as e:
try:
from cleo.ui.exception_trace import ExceptionTrace
if not self.supports_fancy_output():
io = self._io
else:
message = (
" <error>•</error> {message}: <error>Failed</error>".format(
message=self.get_operation_message(operation, error=True),
)
)
self._write(operation, message)
io = self._sections.get(id(operation), self._io)
with self._lock:
trace = ExceptionTrace(e)
trace.render(io)
io.write_line("")
finally:
with self._lock:
self._shutdown = True
except KeyboardInterrupt:
try:
message = " <warning>•</warning> {message}: <warning>Cancelled</warning>".format(
message=self.get_operation_message(operation, warning=True),
)
if not self.supports_fancy_output():
self._io.write_line(message)
else:
self._write(operation, message)
finally:
with self._lock:
self._shutdown = True
def _do_execute_operation(self, operation: "OperationTypes") -> int:
method = operation.job_type
operation_message = self.get_operation_message(operation)
if operation.skipped:
if self.supports_fancy_output():
self._write(
operation,
" <fg=default;options=bold,dark>•</> {message}: "
"<fg=default;options=bold,dark>Skipped</> "
"<fg=default;options=dark>for the following reason:</> "
"<fg=default;options=bold,dark>{reason}</>".format(
message=operation_message,
reason=operation.skip_reason,
),
)
self._skipped[operation.job_type] += 1
return 0
if not self._enabled or self._dry_run:
self._io.write_line(
" <fg=blue;options=bold>•</> {message}".format(
message=operation_message,
)
)
return 0
result = getattr(self, f"_execute_{method}")(operation)
if result != 0:
return result
message = " <fg=green;options=bold>•</> {message}".format(
message=self.get_operation_message(operation, done=True),
)
self._write(operation, message)
self._increment_operations_count(operation, True)
return result
def _increment_operations_count(
self, operation: "OperationTypes", executed: bool
) -> None:
with self._lock:
if executed:
self._executed_operations += 1
self._executed[operation.job_type] += 1
else:
self._skipped[operation.job_type] += 1
def run_pip(self, *args: Any, **kwargs: Any) -> int:
try:
self._env.run_pip(*args, **kwargs)
except EnvCommandError as e:
output = decode(e.e.output)
if (
"KeyboardInterrupt" in output
or "ERROR: Operation cancelled by user" in output
):
return -2
raise
return 0
def get_operation_message(
self,
operation: "OperationTypes",
done: bool = False,
error: bool = False,
warning: bool = False,
) -> str:
base_tag = "fg=default"
operation_color = "c2"
source_operation_color = "c2"
package_color = "c1"
if error:
operation_color = "error"
elif warning:
operation_color = "warning"
elif done:
operation_color = "success"
if operation.skipped:
base_tag = "fg=default;options=dark"
operation_color += "_dark"
source_operation_color += "_dark"
package_color += "_dark"
if operation.job_type == "install":
return "<{}>Installing <{}>{}</{}> (<{}>{}</>)</>".format(
base_tag,
package_color,
operation.package.name,
package_color,
operation_color,
operation.package.full_pretty_version,
)
if operation.job_type == "uninstall":
return "<{}>Removing <{}>{}</{}> (<{}>{}</>)</>".format(
base_tag,
package_color,
operation.package.name,
package_color,
operation_color,
operation.package.full_pretty_version,
)
if operation.job_type == "update":
return "<{}>Updating <{}>{}</{}> (<{}>{}</{}> -> <{}>{}</>)</>".format(
base_tag,
package_color,
operation.initial_package.name,
package_color,
source_operation_color,
operation.initial_package.full_pretty_version,
source_operation_color,
operation_color,
operation.target_package.full_pretty_version,
)
return ""
def _display_summary(self, operations: List["OperationTypes"]) -> None:
installs = 0
updates = 0
uninstalls = 0
skipped = 0
for op in operations:
if op.skipped:
skipped += 1
continue
if op.job_type == "install":
installs += 1
elif op.job_type == "update":
updates += 1
elif op.job_type == "uninstall":
uninstalls += 1
if not installs and not updates and not uninstalls and not self._verbose:
self._io.write_line("")
self._io.write_line("No dependencies to install or update")
return
self._io.write_line("")
self._io.write_line(
"<b>Package operations</b>: "
"<info>{}</> install{}, "
"<info>{}</> update{}, "
"<info>{}</> removal{}"
"{}".format(
installs,
"" if installs == 1 else "s",
updates,
"" if updates == 1 else "s",
uninstalls,
"" if uninstalls == 1 else "s",
f", <info>{skipped}</> skipped" if skipped and self._verbose else "",
)
)
self._io.write_line("")
def _execute_install(self, operation: Union[Install, Update]) -> int:
status_code = self._install(operation)
self._save_url_reference(operation)
return status_code
def _execute_update(self, operation: Union[Install, Update]) -> int:
status_code = self._update(operation)
self._save_url_reference(operation)
return status_code
def _execute_uninstall(self, operation: Uninstall) -> int:
message = (
" <fg=blue;options=bold>•</> {message}: <info>Removing...</info>".format(
message=self.get_operation_message(operation),
)
)
self._write(operation, message)
return self._remove(operation)
def _install(self, operation: Union[Install, Update]) -> int:
package = operation.package
if package.source_type == "directory":
return self._install_directory(operation)
if package.source_type == "git":
return self._install_git(operation)
if package.source_type == "file":
archive = self._prepare_file(operation)
elif package.source_type == "url":
archive = self._download_link(operation, Link(package.source_url))
else:
archive = self._download(operation)
operation_message = self.get_operation_message(operation)
message = (
" <fg=blue;options=bold>•</> {message}: <info>Installing...</info>".format(
message=operation_message,
)
)
self._write(operation, message)
return self.pip_install(archive, upgrade=operation.job_type == "update")
def _update(self, operation: Union[Install, Update]) -> int:
return self._install(operation)
def _remove(self, operation: Uninstall) -> int:
package = operation.package
# If we have a VCS package, remove its source directory
if package.source_type == "git":
src_dir = self._env.path / "src" / package.name
if src_dir.exists():
safe_rmtree(str(src_dir))
try:
return self.run_pip("uninstall", package.name, "-y")
except CalledProcessError as e:
if "not installed" in str(e):
return 0
raise
def _prepare_file(self, operation: Union[Install, Update]) -> Path:
package = operation.package
message = (
" <fg=blue;options=bold>•</> {message}: <info>Preparing...</info>".format(
message=self.get_operation_message(operation),
)
)
self._write(operation, message)
archive = Path(package.source_url)
if not Path(package.source_url).is_absolute() and package.root_dir:
archive = package.root_dir / archive
archive = self._chef.prepare(archive)
return archive
def _install_directory(self, operation: Union[Install, Update]) -> int:
from poetry.factory import Factory
package = operation.package
operation_message = self.get_operation_message(operation)
message = (
" <fg=blue;options=bold>•</> {message}: <info>Building...</info>".format(
message=operation_message,
)
)
self._write(operation, message)
if package.root_dir:
req = package.root_dir / package.source_url
else:
req = Path(package.source_url).resolve(strict=False)
pyproject = PyProjectTOML(os.path.join(req, "pyproject.toml"))
if pyproject.is_poetry_project():
# Even if there is a build system specified
# some versions of pip (< 19.0.0) don't understand it
# so we need to check the version of pip to know
# if we can rely on the build system
legacy_pip = (
self._env.pip_version
< self._env.pip_version.__class__.from_parts(19, 0, 0)
)
package_poetry = Factory().create_poetry(pyproject.file.path.parent)
if package.develop and not package_poetry.package.build_script:
from poetry.masonry.builders.editable import EditableBuilder
# This is a Poetry package in editable mode
# we can use the EditableBuilder without going through pip
# to install it, unless it has a build script.
builder = EditableBuilder(package_poetry, self._env, NullIO())
builder.build()
return 0
elif legacy_pip or package_poetry.package.build_script:
from poetry.core.masonry.builders.sdist import SdistBuilder
# We need to rely on creating a temporary setup.py
# file since the version of pip does not support
# build-systems
# We also need it for non-PEP-517 packages
builder = SdistBuilder(package_poetry)
with builder.setup_py():
if package.develop:
return self.pip_install(req, editable=True)
return self.pip_install(req, upgrade=True)
if package.develop:
return self.pip_install(req, editable=True)
return self.pip_install(req, upgrade=True)
def _install_git(self, operation: Union[Install, Update]) -> int:
from poetry.core.vcs import Git
package = operation.package
operation_message = self.get_operation_message(operation)
message = (
" <fg=blue;options=bold>•</> {message}: <info>Cloning...</info>".format(
message=operation_message,
)
)
self._write(operation, message)
src_dir = self._env.path / "src" / package.name
if src_dir.exists():
safe_rmtree(str(src_dir))
src_dir.parent.mkdir(exist_ok=True)
git = Git()
git.clone(package.source_url, src_dir)
reference = package.source_resolved_reference
if not reference:
reference = package.source_reference
git.checkout(reference, src_dir)
# Now we just need to install from the source directory
original_url = package.source_url
package._source_url = str(src_dir)
status_code = self._install_directory(operation)
package._source_url = original_url
return status_code
def _download(self, operation: Union[Install, Update]) -> Link:
link = self._chooser.choose_for(operation.package)
return self._download_link(operation, link)
def _download_link(self, operation: Union[Install, Update], link: Link) -> Link:
package = operation.package
archive = self._chef.get_cached_archive_for_link(link)
if archive is link:
# No cached distributions was found, so we download and prepare it
try:
archive = self._download_archive(operation, link)
except BaseException:
cache_directory = self._chef.get_cache_directory_for_link(link)
cached_file = cache_directory.joinpath(link.filename)
# We can't use unlink(missing_ok=True) because it's not available
# in pathlib2 for Python 2.7
if cached_file.exists():
cached_file.unlink()
raise
# TODO: Check readability of the created archive
if not link.is_wheel:
archive = self._chef.prepare(archive)
if package.files:
archive_hash = self._validate_archive_hash(archive, package)
self._hashes[package.name] = archive_hash
return archive
@staticmethod
def _validate_archive_hash(archive: Union[Path, Link], package: Package) -> str:
archive_path = (
url_to_path(archive.url) if isinstance(archive, Link) else archive
)
file_dep = FileDependency(
package.name,
archive_path,
)
archive_hash = "sha256:" + file_dep.hash()
known_hashes = {f["hash"] for f in package.files}
if archive_hash not in known_hashes:
raise RuntimeError(
f"Hash for {package} from archive {archive_path.name} not found in known hashes (was: {archive_hash})"
)
return archive_hash
def _download_archive(self, operation: Union[Install, Update], link: Link) -> Path:
response = self._authenticator.request(
"get", link.url, stream=True, io=self._sections.get(id(operation), self._io)
)
wheel_size = response.headers.get("content-length")
operation_message = self.get_operation_message(operation)
message = (
" <fg=blue;options=bold>•</> {message}: <info>Downloading...</>".format(
message=operation_message,
)
)
progress = None
if self.supports_fancy_output():
if wheel_size is None:
self._write(operation, message)
else:
from cleo.ui.progress_bar import ProgressBar
progress = ProgressBar(
self._sections[id(operation)], max=int(wheel_size)
)
progress.set_format(message + " <b>%percent%%</b>")
if progress:
with self._lock:
progress.start()
done = 0
archive = self._chef.get_cache_directory_for_link(link) / link.filename
archive.parent.mkdir(parents=True, exist_ok=True)
with archive.open("wb") as f:
for chunk in response.iter_content(chunk_size=4096):
if not chunk:
break
done += len(chunk)
if progress:
with self._lock:
progress.set_progress(done)
f.write(chunk)
if progress:
with self._lock:
progress.finish()
return archive
def _should_write_operation(self, operation: Operation) -> bool:
return not operation.skipped or self._dry_run or self._verbose
def _save_url_reference(self, operation: "OperationTypes") -> None:
"""
Create and store a PEP-610 `direct_url.json` file, if needed.
"""
if operation.job_type not in {"install", "update"}:
return
package = operation.package
if not package.source_url:
# Since we are installing from our own distribution cache
# pip will write a `direct_url.json` file pointing to the cache
# distribution.
# That's not what we want so we remove the direct_url.json file,
# if it exists.
for (
direct_url_json
) in self._env.site_packages.find_distribution_direct_url_json_files(
distribution_name=package.name, writable_only=True
):
# We can't use unlink(missing_ok=True) because it's not always available
if direct_url_json.exists():
direct_url_json.unlink()
return
url_reference = None
if package.source_type == "git":
url_reference = self._create_git_url_reference(package)
elif package.source_type == "url":
url_reference = self._create_url_url_reference(package)
elif package.source_type == "directory":
url_reference = self._create_directory_url_reference(package)
elif package.source_type == "file":
url_reference = self._create_file_url_reference(package)
if url_reference:
for dist in self._env.site_packages.distributions(
name=package.name, writable_only=True
):
dist._path.joinpath("direct_url.json").write_text(
json.dumps(url_reference),
encoding="utf-8",
)
record = dist._path.joinpath("RECORD")
if record.exists():
with record.open(mode="a", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerow(
[
str(
dist._path.joinpath("direct_url.json").relative_to(
record.parent.parent
)
),
"",
"",
]
)
def _create_git_url_reference(
self, package: "Package"
) -> Dict[str, Union[str, Dict[str, str]]]:
reference = {
"url": package.source_url,
"vcs_info": {
"vcs": "git",
"requested_revision": package.source_reference,
"commit_id": package.source_resolved_reference,
},
}
return reference
def _create_url_url_reference(
self, package: "Package"
) -> Dict[str, Union[str, Dict[str, str]]]:
archive_info = {}
if package.name in self._hashes:
archive_info["hash"] = self._hashes[package.name]
reference = {"url": package.source_url, "archive_info": archive_info}
return reference
def _create_file_url_reference(
self, package: "Package"
) -> Dict[str, Union[str, Dict[str, str]]]:
archive_info = {}
if package.name in self._hashes:
archive_info["hash"] = self._hashes[package.name]
reference = {
"url": Path(package.source_url).as_uri(),
"archive_info": archive_info,
}
return reference
def _create_directory_url_reference(
self, package: "Package"
) -> Dict[str, Union[str, Dict[str, str]]]:
dir_info = {}
if package.develop:
dir_info["editable"] = True
reference = {
"url": Path(package.source_url).as_uri(),
"dir_info": dir_info,
}
return reference
|
the-stack_0_16726 | # Copyright (c) OpenMMLab. All rights reserved.
from collections import namedtuple
import torch
from torch.nn import (AdaptiveAvgPool2d, BatchNorm2d, Conv2d, MaxPool2d,
Module, PReLU, ReLU, Sequential, Sigmoid)
# yapf: disable
"""
ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) # isort:skip # noqa
"""
# yapf: enable
class Flatten(Module):
"""Flatten Module."""
def forward(self, input):
return input.view(input.size(0), -1)
def l2_norm(input, axis=1):
"""l2 normalization.
Args:
input (torch.Tensor): The input tensor.
axis (int, optional): Specifies which axis of input to calculate the
norm across. Defaults to 1.
Returns:
Tensor: Tensor after L2 normalization per-instance.
"""
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
"""A named tuple describing a ResNet block."""
def get_block(in_channel, depth, num_units, stride=2):
"""Get a single block config.
Args:
in_channel (int): Input channels.
depth (int): Output channels.
num_units (int): Number of unit modules.
stride (int, optional): Conv2d stride. Defaults to 2.
Returns:
list: A list of unit modules' config.
"""
return [Bottleneck(in_channel, depth, stride)
] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
def get_blocks(num_layers):
"""Get block configs of backbone.
Args:
num_layers (int): Number of ConvBlock layers in backbone.
Raises:
ValueError: `num_layers` must be one of [50, 100, 152].
Returns:
list: A list of block configs.
"""
if num_layers == 50:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=4),
get_block(in_channel=128, depth=256, num_units=14),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 100:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=13),
get_block(in_channel=128, depth=256, num_units=30),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 152:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=8),
get_block(in_channel=128, depth=256, num_units=36),
get_block(in_channel=256, depth=512, num_units=3)
]
else:
raise ValueError(
'Invalid number of layers: {}. Must be one of [50, 100, 152]'.
format(num_layers))
return blocks
class SEModule(Module):
"""Squeeze-and-Excitation Modules.
Args:
channels (int): Input channels.
reduction (int): Intermediate channels reduction ratio.
"""
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = AdaptiveAvgPool2d(1)
self.fc1 = Conv2d(
channels,
channels // reduction,
kernel_size=1,
padding=0,
bias=False)
self.relu = ReLU(inplace=True)
self.fc2 = Conv2d(
channels // reduction,
channels,
kernel_size=1,
padding=0,
bias=False)
self.sigmoid = Sigmoid()
def forward(self, x):
"""Forward Function."""
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class bottleneck_IR(Module):
"""Intermediate Resblock of bottleneck.
Args:
in_channel (int): Input channels.
depth (int): Output channels.
stride (int): Conv2d stride.
"""
def __init__(self, in_channel, depth, stride):
"""Intermediate Resblock of bottleneck.
Args:
in_channel (int): Input channels.
depth (int): Output channels.
stride (int): Conv2d stride.
"""
super(bottleneck_IR, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth))
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth))
def forward(self, x):
"""Forward function."""
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class bottleneck_IR_SE(Module):
"""Intermediate Resblock of bottleneck with SEModule.
Args:
in_channel (int): Input channels.
depth (int): Output channels.
stride (int): Conv2d stride.
"""
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR_SE, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth))
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth), SEModule(depth, 16))
def forward(self, x):
"""Forward function."""
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
|
the-stack_0_16727 | #!/usr/bin/env python3
import json
import sys
import os
import subprocess
import time
description = """
process all data_set within a bundle
"""
def main():
try:
args = parse_args()
bundle = read_bundle_json(args["bundle_json"])
for data_set in bundle["DATA_SETS"]:
process_one_set(data_set)
except:
print_help()
raise
def print_help():
print(description)
print("Usage:")
print("./process_bundle.py <bundle_json>")
print()
def parse_args():
if len(sys.argv) != 2:
raise Exception("Wrong number of args, need 1")
args = {}
args["bundle_json"] = sys.argv[1]
return args
# Read the json file contains info for 1 bundle
def read_bundle_json(json_filename):
with open(json_filename, "r") as outfile:
bundle_json = json.load(outfile)
return bundle_json
# Run process_one_set.sh
# data_set argument is added into the environment
def process_one_set(data_set):
assert(isinstance(data_set, dict))
my_env = os.environ.copy()
my_env.update(data_set) # add data_set into the environment
try:
proc = subprocess.check_output(["/bin/bash", "process_one_set.sh"], env=my_env)
except subprocess.SubprocessError:
print("Error when running for data_set: ", data_set)
sys.exit(proc.returncode)
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.