ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a2fa9bc071c85c8c01ed2af30bf00fb42c6360b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Main
Created on Tue Aug 17 14:16:44 2021
Version: 1.0
Universidad Santo Tomás Tunja
Simulation
@author: Juana Valentina Mendoza Santamaría
@author: Alix Ivonne Chaparro Vasquez
presented to: Martha Susana Contreras Ortiz
"""
from controllers.settings import config
from controllers.mainController import mainController
if __name__ == "__main__":
simulator = config.PARAMETERS['simulator']
mainController(simulator) # True (simulation) - False (Demo) |
py | 1a2fad3f3ec070c73e65b6b03c43e2f696851309 | # TIPS: only used to find the best epoch of MLP
# MLP
import csv
from itertools import islice
import random
import matplotlib.pyplot as plt
import numpy as np
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import KFold, train_test_split
import pandas as pd
from sklearn.utils import shuffle
import tensorflow as tf
def bit2attr(bitstr) -> list:
attr_vec = list()
for i in range(len(bitstr)):
attr_vec.append(int(bitstr[i]))
return attr_vec
def mean_relative_error(y_pred, y_test):
assert len(y_pred) == len(y_test)
mre = 0.0
for i in range(len(y_pred)):
mre = mre + abs((y_pred[i] - y_test[i]) / y_test[i])
mre = mre * 100/ len(y_pred)
return mre
Large_MRE_points = pd.DataFrame()
Large_MRE_X = []
Large_MRE_y_test = []
Large_MRE_y_pred = []
Large_MRE = []
'''
1) 数据预处理
'''
# filepath = 'data/fp/sjn/R+B+Cmorgan_fp1202.csv'
filepath = 'data/database/22-01-29-descriptor-train.csv'
data = pd.read_csv(filepath, encoding='gb18030')
print(data.shape)
data = data.dropna()
print(data.shape)
data = shuffle(data)
data_x_df = data.drop(['label'], axis=1)
data_y_df = data[['label']]
# 归一化
min_max_scaler_X = MinMaxScaler()
min_max_scaler_X.fit(data_x_df)
x_trans1 = min_max_scaler_X.transform(data_x_df)
min_max_scaler_y = MinMaxScaler()
min_max_scaler_y.fit(data_y_df)
y_trans1 = min_max_scaler_y.transform(data_y_df)
test_filepath = "data/database/22-01-29-descriptor-test-level-1.csv"
test_data = pd.read_csv(test_filepath, encoding='gb18030')
print('test data: ', test_data.shape)
test_data_x_df = test_data.drop(['label'], axis=1)
test_data_y_df = test_data[['label']]
x_trans1_test = min_max_scaler_X.transform(test_data_x_df)
y_trans1_test = min_max_scaler_y.transform(test_data_y_df)
'''
3) 构建模型
'''
from keras.layers import MaxPooling1D, Conv1D, Dense, Flatten, Dropout
from keras import models
from keras.optimizers import Adam, RMSprop, SGD
def buildModel():
model = models.Sequential()
l4 = Dense(512, activation='relu')
l5 = Dropout(rate=0.2)
l6 = Dense(128, activation='relu')
l7 = Dense(30, activation='relu')
l8 = Dense(1)
layers = [l4, l5, l6, l7, l8]
for i in range(len(layers)):
model.add(layers[i])
adam = Adam(lr=1e-3)
model.compile(optimizer=adam, loss='logcosh', metrics=['mae', 'mape'])
model_mlp = MLPRegressor(
hidden_layer_sizes=(512, 128, 32), activation='relu', solver='lbfgs', alpha=0.0001,
max_iter=5000,
random_state=1, tol=0.0001, verbose=False, warm_start=False)
return model
def scheduler(epoch, lr):
if epoch > 0 and epoch % 500 == 0:
return lr * 0.1
else:
return lr
'''
4) 训练模型
'''
from sklearn import metrics
# n_split = 10
mlp_scores = []
MAEs = []
out_MAEs = []
in_y_test = []
in_y_pred = []
out_y_test = []
out_y_pred = []
X_train = x_trans1
y_train = y_trans1
# 外部验证
X_test = x_trans1_test
y_trans1_test = np.reshape(y_trans1_test, (-1, 1))
y_test = y_trans1_test
callback = tf.keras.callbacks.LearningRateScheduler(scheduler, verbose=1)
model_mlp = buildModel()
history = model_mlp.fit(X_train, y_train, epochs=1, verbose=1, validation_data=(X_test, y_test), callbacks=[callback])
print(model_mlp.summary())
exit(0)
losses = history.history['loss']
eval_mres = history.history['val_mape']
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot([x for x in range(len(losses))], losses, 'b', label='loss')
ax1.set_ylabel('loss', color='b')
ax2.plot([x for x in range(len(eval_mres))], eval_mres, 'r', label='eval_mre')
ax2.set_ylabel('eval_mre', color='r')
ax1.set_xlabel('epochs')
plt.title('Training of MLP')
plt.savefig('pics/Training_of_MLP.png')
import os
outdir = 'Out/losses_and_mres'
os.makedirs(outdir, exist_ok=True)
with open(os.path.join(outdir, 'mlp_descriptor.txt'), 'w') as f:
f.write('loss\n')
f.write(' '.join([str(x) for x in losses]))
f.write('\n')
f.write('mres\n')
f.write(' '.join([str(x) for x in eval_mres])) |
py | 1a2fada21c09f8bf65d23747a1211aeb7930ea13 | import logging
from google.appengine.ext import db
from google.appengine.api import memcache
from app.utility.utils import memcached
import app.utility.utils as utils
import app.db.counter as counter
import web
QUESTIONS_PER_SITEMAP = 500
class Sitemap(db.Model):
question_count = db.IntegerProperty(default = 0)
question_keys = db.StringListProperty(default = [])
content = db.TextProperty(default ='')
archived = db.BooleanProperty(default = False)
created = db.DateTimeProperty(auto_now_add = True)
last_modified = db.DateTimeProperty(auto_now = True)
@staticmethod
def get_last_sitemap():
entity = Sitemap.all().order('-created').get()
if entity:
if entity.question_count >= QUESTIONS_PER_SITEMAP:
entity.content = unicode(web.render.sitemap_questions(entity.question_keys))
entity.archived = True
entity.put()
entity = Sitemap()
entity.put()
else:
entity = Sitemap()
entity.put()
return entity
@staticmethod
def update_last_sitemap(key):
last_sitemap = Sitemap.get_last_sitemap()
last_sitemap.question_count += 1
last_sitemap.question_keys.insert(0, str(key))
last_sitemap.put()
@staticmethod
def get_sitemaps():
sitemaps = Sitemap.all().order('-created').fetch(500)
return sitemaps
@staticmethod
@memcached('get_sitemap_by_id', 3600*24, lambda id : int(id) )
def get_sitemap_by_id(id):
entity = Sitemap.get_by_id(id)
if entity:
if entity.content:
return entity.content
else:
return unicode(web.render.sitemap_questions(entity.question_keys))
else:
raise web.notfound() |
py | 1a2faefaf711c5ce913d8e0bc348eba0ebcc72e8 | from django.shortcuts import render
from django.contrib.auth.models import User
from django.http import HttpResponse
from .models import Form
from .forms import ReqForm
from .filters import FormFilter
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.shortcuts import redirect
def form(request):
return render(request,'form.html')
def status(request):
return render(request,'status.html')
def about(request):
return render(request,'about.html')
def index(request):
return render(request,'index.html')
def showformdata(request):
if request.method=='POST':
fm=ReqForm(request.POST)
if fm.is_valid():
em=fm.cleaned_data['email']
cn=fm.cleaned_data['ClubName']
rn=fm.cleaned_data['RepresentativeName']
cn=fm.cleaned_data['Contact']
df=fm.cleaned_data['req_date_from']
dt=fm.cleaned_data['req_date_to']
rt=fm.cleaned_data['req_type']
rp=fm.cleaned_data['req_purpose']
profile = fm.save(commit=False)
profile.user = request.user
profile.save()
fm.save()
fm=ReqForm()
print(em)
print(rn)
else:
fm=ReqForm()
return render(request,'form.html',{'frm':fm})
def reqInfo(request):
u=request.user
if u.groups.filter(name='Managers').exists():
req = Form.objects.all()
print(req)
print("this is a manager")
context={
'form':form,
'req': req
}
else:
req = Form.objects.filter(user=request.user)
print(req)
print("normal user")
context={
'form':form,
'req': req
}
return render(request,'status.html',context)
def student_approve(request,user_id):
val=Form.objects.get(id=user_id)
val.alloted=1
val.save()
return HttpResponse("approved successfully")
def student_disapprove(request,user_id):
val=Form.objects.get(id=user_id)
val.alloted=2
val.save()
return HttpResponse("disapproved successfully")
def student_reset(request,user_id):
val=Form.objects.get(id=user_id)
val.alloted=0
val.save()
return HttpResponse("reset successfully")
# def write_view(request, *args, **kwargs):
# val=Form.objects.get(id=user_id)
# if request.is_ajax() and request.method == "POST":
# texteditor = request.POST['TextEntered']
# val.Management_Comments='texteditor'
# print(texteditor)
# ## Don't forget to do validation and cleanup on texteditor to avoid security hassles
# ## Do your logic here
# SuccessAcknowledgment = {"Acknowledged":"Acknowledged"}
# return HttpResponse(json.dumps(SuccessAcknowledgment))
# else:
# return render(request, "write.html")
def reqInfoMess(request):
u=request.user
if u.groups.filter(name='Managers').exists():
req = Form.objects.all()
print(req)
print("this is a manager")
context={
'form':form,
'req': req
}
else:
req = Form.objects.filter(user=request.user)
print(req)
print("normal user")
context={
'form':form,
'req': req
}
return render(request,'status.html',context)
def showmess(request, user_id):
u=request.user
if request.method=='POST':
fm=mess(request.POST)
ms=""
if fm.is_valid():
ms=fm.cleaned_data['Management_Comments']
u = fm.save(commit=False)
#profile.user = request.user
u.save()
fm.save()
fm=mess()
print(ms)
return render(request,'status.html',{'mess':ms})
|
py | 1a2faf341daa8b57f34a09cec3003b1557907096 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised))
# Copyright (c) 2016-2021, Cabral, Juan; Luczywo, Nadia
# All rights reserved.
# =============================================================================
# DOCS
# =============================================================================
"""Tool to check if each python module has a corresponding API docs."""
# =============================================================================
# IMPORTS
# =============================================================================
import inspect
import pathlib
import attr
import typer
# =============================================================================
# CONSTANTS
# =============================================================================
VERSION = "0.1"
# =============================================================================
# FUNCTIONS
# =============================================================================
def check_apidoc_structure(apidoc_dir, reference_dir):
apidoc_dir = pathlib.Path(apidoc_dir)
reference_dir = pathlib.Path(reference_dir)
if not apidoc_dir.exists():
raise OSError(f"'{apidoc_dir}' do no exist")
if not reference_dir.exists():
raise OSError(f"'{reference_dir}' do no exist")
reference = list(reference_dir.glob("**/*.py"))
result = {}
for ref in reference:
# essentially we remove the parent dir
*dirs, ref_name = ref.relative_to(reference_dir).parts
if ref_name == "__init__.py":
ref_name = "index.py"
search_dir = apidoc_dir
for subdir in dirs:
search_dir /= subdir
search = search_dir / f"{ref_name[:-3]}.rst"
result[str(ref)] = (str(search), search.exists())
return result
# =============================================================================
# CLI
# =============================================================================
@attr.s(frozen=True)
class CLI:
"""Check if the structure of API doc directory is equivalent to those of
the project.
"""
footnotes = "\n".join(
[
"This software is under the BSD 3-Clause License.",
"Copyright (c) 2021, Juan Cabral.",
"For bug reporting or other instructions please check:"
" https://github.com/quatrope/scikit-criteria",
]
)
run = attr.ib(init=False)
@run.default
def _set_run_default(self):
app = typer.Typer()
for k in dir(self):
if k.startswith("_"):
continue
v = getattr(self, k)
if inspect.ismethod(v):
decorator = app.command()
decorator(v)
return app
def version(self):
"""Print checktestdir.py version."""
typer.echo(f"{__file__ } v.{VERSION}")
def check(
self,
test_dir: str = typer.Argument(
..., help="Path to the api-doc structure."
),
reference_dir: str = typer.Option(
..., help="Path to the reference structure."
),
verbose: bool = typer.Option(
default=False, help="Show all the result"
),
):
"""Check if the structure of test directory is equivalent to those
of the project.
"""
try:
check_result = check_apidoc_structure(test_dir, reference_dir)
except Exception as err:
typer.echo(typer.style(str(err), fg=typer.colors.RED))
raise typer.Exit(code=1)
all_tests_exists = True
for ref, test_result in check_result.items():
test, test_exists = test_result
if test_exists:
fg = typer.colors.GREEN
status = ""
else:
all_tests_exists = False
fg = typer.colors.RED
status = typer.style("[NOT FOUND]", fg=typer.colors.YELLOW)
if verbose or not test_exists:
msg = f"{ref} -> {test} {status}"
typer.echo(typer.style(msg, fg=fg))
if all_tests_exists:
final_fg = typer.colors.GREEN
final_status = "Test structure ok!"
exit_code = 0
else:
final_fg = typer.colors.RED
final_status = "Structure not equivalent!"
exit_code = 1
typer.echo("-------------------------------------")
typer.echo(typer.style(final_status, fg=final_fg))
raise typer.Exit(code=exit_code)
def main():
"""Run the checkapidocdir.py cli interface."""
cli = CLI()
cli.run()
if __name__ == "__main__":
main()
|
py | 1a2fb1887626cf76c536e2f273cf98f4340ebff5 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkemr.endpoint import endpoint_data
class ListRequiredServiceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'ListRequiredService','emr')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_EmrVersion(self):
return self.get_query_params().get('EmrVersion')
def set_EmrVersion(self,EmrVersion):
self.add_query_param('EmrVersion',EmrVersion)
def get_ServiceNameList(self):
return self.get_query_params().get('ServiceNameList')
def set_ServiceNameList(self,ServiceNameList):
self.add_query_param('ServiceNameList',ServiceNameList) |
py | 1a2fb2f57b569902970daef785ec480eca5e29b4 | """The tests for the Restore component."""
from datetime import datetime
from unittest.mock import patch
from homeassistant.const import EVENT_HOMEASSISTANT_START
from homeassistant.core import CoreState, State
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.restore_state import (
DATA_RESTORE_STATE_TASK,
STORAGE_KEY,
RestoreEntity,
RestoreStateData,
StoredState,
)
from homeassistant.util import dt as dt_util
async def test_caching_data(hass):
"""Test that we cache data."""
now = dt_util.utcnow()
stored_states = [
StoredState(State("input_boolean.b0", "on"), now),
StoredState(State("input_boolean.b1", "on"), now),
StoredState(State("input_boolean.b2", "on"), now),
]
data = await RestoreStateData.async_get_instance(hass)
await hass.async_block_till_done()
await data.store.async_save([state.as_dict() for state in stored_states])
# Emulate a fresh load
hass.data[DATA_RESTORE_STATE_TASK] = None
entity = RestoreEntity()
entity.hass = hass
entity.entity_id = "input_boolean.b1"
# Mock that only b1 is present this run
with patch(
"homeassistant.helpers.restore_state.Store.async_save"
) as mock_write_data:
state = await entity.async_get_last_state()
await hass.async_block_till_done()
assert state is not None
assert state.entity_id == "input_boolean.b1"
assert state.state == "on"
assert mock_write_data.called
async def test_hass_starting(hass):
"""Test that we cache data."""
hass.state = CoreState.starting
now = dt_util.utcnow()
stored_states = [
StoredState(State("input_boolean.b0", "on"), now),
StoredState(State("input_boolean.b1", "on"), now),
StoredState(State("input_boolean.b2", "on"), now),
]
data = await RestoreStateData.async_get_instance(hass)
await hass.async_block_till_done()
await data.store.async_save([state.as_dict() for state in stored_states])
# Emulate a fresh load
hass.data[DATA_RESTORE_STATE_TASK] = None
entity = RestoreEntity()
entity.hass = hass
entity.entity_id = "input_boolean.b1"
# Mock that only b1 is present this run
states = [State("input_boolean.b1", "on")]
with patch(
"homeassistant.helpers.restore_state.Store.async_save"
) as mock_write_data, patch.object(hass.states, "async_all", return_value=states):
state = await entity.async_get_last_state()
await hass.async_block_till_done()
assert state is not None
assert state.entity_id == "input_boolean.b1"
assert state.state == "on"
# Assert that no data was written yet, since hass is still starting.
assert not mock_write_data.called
# Finish hass startup
with patch(
"homeassistant.helpers.restore_state.Store.async_save"
) as mock_write_data:
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
# Assert that this session states were written
assert mock_write_data.called
async def test_dump_data(hass):
"""Test that we cache data."""
states = [
State("input_boolean.b0", "on"),
State("input_boolean.b1", "on"),
State("input_boolean.b2", "on"),
State("input_boolean.b5", "unavailable", {"restored": True}),
]
entity = Entity()
entity.hass = hass
entity.entity_id = "input_boolean.b0"
await entity.async_internal_added_to_hass()
entity = RestoreEntity()
entity.hass = hass
entity.entity_id = "input_boolean.b1"
await entity.async_internal_added_to_hass()
data = await RestoreStateData.async_get_instance(hass)
now = dt_util.utcnow()
data.last_states = {
"input_boolean.b0": StoredState(State("input_boolean.b0", "off"), now),
"input_boolean.b1": StoredState(State("input_boolean.b1", "off"), now),
"input_boolean.b2": StoredState(State("input_boolean.b2", "off"), now),
"input_boolean.b3": StoredState(State("input_boolean.b3", "off"), now),
"input_boolean.b4": StoredState(
State("input_boolean.b4", "off"),
datetime(1985, 10, 26, 1, 22, tzinfo=dt_util.UTC),
),
"input_boolean.b5": StoredState(State("input_boolean.b5", "off"), now),
}
with patch(
"homeassistant.helpers.restore_state.Store.async_save"
) as mock_write_data, patch.object(hass.states, "async_all", return_value=states):
await data.async_dump_states()
assert mock_write_data.called
args = mock_write_data.mock_calls[0][1]
written_states = args[0]
# b0 should not be written, since it didn't extend RestoreEntity
# b1 should be written, since it is present in the current run
# b2 should not be written, since it is not registered with the helper
# b3 should be written, since it is still not expired
# b4 should not be written, since it is now expired
# b5 should be written, since current state is restored by entity registry
assert len(written_states) == 3
assert written_states[0]["state"]["entity_id"] == "input_boolean.b1"
assert written_states[0]["state"]["state"] == "on"
assert written_states[1]["state"]["entity_id"] == "input_boolean.b3"
assert written_states[1]["state"]["state"] == "off"
assert written_states[2]["state"]["entity_id"] == "input_boolean.b5"
assert written_states[2]["state"]["state"] == "off"
# Test that removed entities are not persisted
await entity.async_remove()
with patch(
"homeassistant.helpers.restore_state.Store.async_save"
) as mock_write_data, patch.object(hass.states, "async_all", return_value=states):
await data.async_dump_states()
assert mock_write_data.called
args = mock_write_data.mock_calls[0][1]
written_states = args[0]
assert len(written_states) == 2
assert written_states[0]["state"]["entity_id"] == "input_boolean.b3"
assert written_states[0]["state"]["state"] == "off"
assert written_states[1]["state"]["entity_id"] == "input_boolean.b5"
assert written_states[1]["state"]["state"] == "off"
async def test_dump_error(hass):
"""Test that we cache data."""
states = [
State("input_boolean.b0", "on"),
State("input_boolean.b1", "on"),
State("input_boolean.b2", "on"),
]
entity = Entity()
entity.hass = hass
entity.entity_id = "input_boolean.b0"
await entity.async_internal_added_to_hass()
entity = RestoreEntity()
entity.hass = hass
entity.entity_id = "input_boolean.b1"
await entity.async_internal_added_to_hass()
data = await RestoreStateData.async_get_instance(hass)
with patch(
"homeassistant.helpers.restore_state.Store.async_save",
side_effect=HomeAssistantError,
) as mock_write_data, patch.object(hass.states, "async_all", return_value=states):
await data.async_dump_states()
assert mock_write_data.called
async def test_load_error(hass):
"""Test that we cache data."""
entity = RestoreEntity()
entity.hass = hass
entity.entity_id = "input_boolean.b1"
with patch(
"homeassistant.helpers.storage.Store.async_load",
side_effect=HomeAssistantError,
):
state = await entity.async_get_last_state()
assert state is None
async def test_state_saved_on_remove(hass):
"""Test that we save entity state on removal."""
entity = RestoreEntity()
entity.hass = hass
entity.entity_id = "input_boolean.b0"
await entity.async_internal_added_to_hass()
now = dt_util.utcnow()
hass.states.async_set(
"input_boolean.b0", "on", {"complicated": {"value": {1, 2, now}}}
)
data = await RestoreStateData.async_get_instance(hass)
# No last states should currently be saved
assert not data.last_states
await entity.async_remove()
# We should store the input boolean state when it is removed
state = data.last_states["input_boolean.b0"].state
assert state.state == "on"
assert isinstance(state.attributes["complicated"]["value"], list)
assert set(state.attributes["complicated"]["value"]) == {1, 2, now.isoformat()}
async def test_restoring_invalid_entity_id(hass, hass_storage):
"""Test restoring invalid entity IDs."""
entity = RestoreEntity()
entity.hass = hass
entity.entity_id = "test.invalid__entity_id"
now = dt_util.utcnow().isoformat()
hass_storage[STORAGE_KEY] = {
"version": 1,
"key": STORAGE_KEY,
"data": [
{
"state": {
"entity_id": "test.invalid__entity_id",
"state": "off",
"attributes": {},
"last_changed": now,
"last_updated": now,
"context": {
"id": "3c2243ff5f30447eb12e7348cfd5b8ff",
"user_id": None,
},
},
"last_seen": dt_util.utcnow().isoformat(),
}
],
}
state = await entity.async_get_last_state()
assert state is None
|
py | 1a2fb3864acac51c2808eb3aa0e7ffbcde0d3200 | """Implement a query class."""
from dataclasses import dataclass
from typing import List, Container
from .keyword import Keyword
from .first_class_collection import FirstClassSequence
@dataclass
class Query(FirstClassSequence):
"""Represent a query."""
keywords: List[Keyword]
@property
def sequence(self):
"""Return :py:attr:`keywords`."""
return self.keywords
def get_query_filtered_by_container(self, container: Container[str]):
"""Filter by a container.
Returns
-------
Query
"""
return Query([keyword for keyword in self.keywords
if keyword.keyword in container])
|
py | 1a2fb470ba622f19ebdc5d1aa4ba2b377e0a971f | import hmac
from hashlib import sha1
from time import time
import urllib2
import simplejson
from django.conf import settings
from django_hpcloud.models import AuthToken
def generate_form_post_key(path, redirect,
expires=2147483647,
max_file_size=1073741824,
method='POST'):
'''
Generates the key for the FormPOST signatures. This is used for the file
upload forms.
:param path: :class:`str` The path of the directory to upload to, this should
not include the name of the file you're uploading.
:param expires: :class:`int` The Unix timestamp of the expiry date of the form.
:param max_file_size: :class:`int` The maximum file size of the files allowed
to be uploaded with this form.
:param method: :class:`str` The method which the form will be using, defaults to
POST because that's all that's supported but allows
others just in case.
'''
path = "/v1/%s/%s/" % (settings.TENANT_ID, path)
hmac_body = "%s\n%s\n%s\n%s\n%s" % (
path, redirect, max_file_size, "10", expires,
)
return "%s:%s:%s" % (
settings.TENANT_ID, settings.HP_ACCESS_KEY,
hmac.new(settings.HP_SECRET_KEY, hmac_body, sha1).hexdigest()
)
def generate_share_url(path, expires=2147483647):
'''
Generates the URL for which you can create a time-sensitive link to any item
in your object store.
:param expires: :class:`int` The Unix timestamp of the expiry date of the form.
'''
hmac_path = "/v1/%s/%s" % (settings.TENANT_ID, path)
hmac_body = "%s\n%s\n%s" % ("GET",expires, hmac_path)
hmac_code = "%s:%s:%s" % (
settings.TENANT_ID, settings.HP_ACCESS_KEY,
hmac.new(settings.HP_SECRET_KEY, hmac_body, sha1).hexdigest()
)
path = "%s%s/%s?temp_url_sig=%s&temp_url_expires=%s" % (
settings.OBJECT_STORE_URL, settings.TENANT_ID, path,
hmac_code, expires)
return path
def get_object_list(container):
'''Returns a list of objects inside a container.
:param container: :class:`str` The name of the container to list.
'''
container = "%s%s/%s?format=json" % (settings.OBJECT_STORE_URL, settings.TENANT_ID, container)
req = urllib2.Request(container)
req.add_header("Content-type", "application/json")
req.add_header("X-Auth-Token", get_auth_token())
response = urllib2.urlopen(req)
return simplejson.loads(response.read())
def get_auth_token():
'''Returns the auth_token currently being used.
If the auth_token has expired, it will generate a new one and return that.
'''
if AuthToken.objects.all().count() > 0:
token = AuthToken.objects.all()[0]
if token.is_valid():
return token.token
AuthToken.objects.all().delete()
json_data = {
"auth": {
"passwordCredentials": {
"username": settings.HPCLOUD_USERNAME,
"password": settings.HPCLOUD_PASSWORD
},
"tenantId": settings.TENANT_ID
}
}
payload = simplejson.dumps(json_data)
req = urllib2.Request(
settings.REGION_URL + "tokens",
)
req.add_header("Content-type", "application/json")
json = simplejson.loads(urllib2.urlopen(req, payload).read())
AuthToken(token=json['access']['token']['id'],
expires=json['access']['token']['expires']).save()
return json['access']['token']['id']
|
py | 1a2fb594f98416859fabd33085c931d2e5be1adb | import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input, Conv2D, Flatten, Dense, Conv2DTranspose, Lambda, Reshape, Layer
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K
INPUT_DIM = (64,64,3)
CONV_FILTERS = [32,64,64, 128]
CONV_KERNEL_SIZES = [4,4,4,4]
CONV_STRIDES = [2,2,2,2]
CONV_ACTIVATIONS = ['relu','relu','relu','relu']
DENSE_SIZE = 1024
CONV_T_FILTERS = [64,64,32,3]
CONV_T_KERNEL_SIZES = [5,5,6,6]
CONV_T_STRIDES = [2,2,2,2]
CONV_T_ACTIVATIONS = ['relu','relu','relu','sigmoid']
Z_DIM = 32
BATCH_SIZE = 100
LEARNING_RATE = 0.0001
KL_TOLERANCE = 0.5
class Sampling(Layer):
def call(self, inputs):
mu, log_var = inputs
epsilon = K.random_normal(shape=K.shape(mu), mean=0., stddev=1.)
return mu + K.exp(log_var / 2) * epsilon
class VAEModel(Model):
def __init__(self, encoder, decoder, r_loss_factor, **kwargs):
super(VAEModel, self).__init__(**kwargs)
self.encoder = encoder
self.decoder = decoder
self.r_loss_factor = r_loss_factor
def train_step(self, data):
if isinstance(data, tuple):
data = data[0]
with tf.GradientTape() as tape:
z_mean, z_log_var, z = self.encoder(data)
reconstruction = self.decoder(z)
reconstruction_loss = tf.reduce_mean(
tf.square(data - reconstruction), axis = [1,2,3]
)
reconstruction_loss *= self.r_loss_factor
kl_loss = 1 + z_log_var - tf.square(z_mean) - tf.exp(z_log_var)
kl_loss = tf.reduce_sum(kl_loss, axis = 1)
kl_loss *= -0.5
total_loss = reconstruction_loss + kl_loss
grads = tape.gradient(total_loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
return {
"loss": total_loss,
"reconstruction_loss": reconstruction_loss,
"kl_loss": kl_loss,
}
def call(self,inputs):
latent = self.encoder(inputs)
return self.decoder(latent)
class VAEGAN(tf.keras.Model):
"""a VAEGAN class for tensorflow
Extends:
tf.keras.Model
"""
def __init__(self, **kwargs):
super(VAEGAN, self).__init__()
self.__dict__.update(kwargs)
self.enc = tf.keras.Sequential(self.enc)
self.dec = tf.keras.Sequential(self.dec)
inputs, disc_l, outputs = self.vae_disc_function()
self.disc = tf.keras.Model(inputs=[inputs], outputs=[outputs, disc_l])
self.enc_optimizer = tf.keras.optimizers.Adam(self.lr_base_gen, beta_1=0.5)
self.dec_optimizer = tf.keras.optimizers.Adam(self.lr_base_gen, beta_1=0.5)
self.disc_optimizer = tf.keras.optimizers.Adam(self.get_lr_d, beta_1=0.5)
def encode(self, x):
mu, sigma = tf.split(self.enc(x), num_or_size_splits=2, axis=1)
return mu, sigma
def dist_encode(self, x):
mu, sigma = self.encode(x)
return ds.MultivariateNormalDiag(loc=mu, scale_diag=sigma)
def get_lr_d(self):
return self.lr_base_disc * self.D_prop
def decode(self, z):
return self.dec(z)
def discriminate(self, x):
return self.disc(x)
def reconstruct(self, x):
mean, _ = self.encode(x)
return self.decode(mean)
def reparameterize(self, mean, logvar):
eps = tf.random.normal(shape=mean.shape)
return eps * tf.exp(logvar * 0.5) + mean
# @tf.function
def compute_loss(self, x):
# pass through network
q_z = self.dist_encode(x)
z = q_z.sample()
p_z = ds.MultivariateNormalDiag(
loc=[0.0] * z.shape[-1], scale_diag=[1.0] * z.shape[-1]
)
xg = self.decode(z)
z_samp = tf.random.normal([x.shape[0], 1, 1, z.shape[-1]])
xg_samp = self.decode(z_samp)
d_xg, ld_xg = self.discriminate(xg)
d_x, ld_x = self.discriminate(x)
d_xg_samp, ld_xg_samp = self.discriminate(xg_samp)
# GAN losses
disc_real_loss = gan_loss(logits=d_x, is_real=True)
disc_fake_loss = gan_loss(logits=d_xg_samp, is_real=False)
gen_fake_loss = gan_loss(logits=d_xg_samp, is_real=True)
discrim_layer_recon_loss = (
tf.reduce_mean(tf.reduce_mean(tf.math.square(ld_x - ld_xg), axis=0))
/ self.recon_loss_div
)
self.D_prop = sigmoid(
disc_fake_loss - gen_fake_loss, shift=0.0, mult=self.sig_mult
)
kl_div = ds.kl_divergence(q_z, p_z)
latent_loss = tf.reduce_mean(tf.maximum(kl_div, 0)) / self.latent_loss_div
return (
self.D_prop,
latent_loss,
discrim_layer_recon_loss,
gen_fake_loss,
disc_fake_loss,
disc_real_loss,
)
# @tf.function
def compute_gradients(self, x):
with tf.GradientTape() as enc_tape, tf.GradientTape() as dec_tape, tf.GradientTape() as disc_tape:
(
_,
latent_loss,
discrim_layer_recon_loss,
gen_fake_loss,
disc_fake_loss,
disc_real_loss,
) = self.compute_loss(x)
enc_loss = latent_loss + discrim_layer_recon_loss
dec_loss = gen_fake_loss + discrim_layer_recon_loss
disc_loss = disc_fake_loss + disc_real_loss
enc_gradients = enc_tape.gradient(enc_loss, self.enc.trainable_variables)
dec_gradients = dec_tape.gradient(dec_loss, self.dec.trainable_variables)
disc_gradients = disc_tape.gradient(disc_loss, self.disc.trainable_variables)
return enc_gradients, dec_gradients, disc_gradients
@tf.function
def apply_gradients(self, enc_gradients, dec_gradients, disc_gradients):
self.enc_optimizer.apply_gradients(
zip(enc_gradients, self.enc.trainable_variables)
)
self.dec_optimizer.apply_gradients(
zip(dec_gradients, self.dec.trainable_variables)
)
self.disc_optimizer.apply_gradients(
zip(disc_gradients, self.disc.trainable_variables)
)
def train(self, x):
enc_gradients, dec_gradients, disc_gradients = self.compute_gradients(x)
self.apply_gradients(enc_gradients, dec_gradients, disc_gradients)
def gan_loss(logits, is_real=True):
"""Computes standard gan loss between logits and labels
Arguments:
logits {[type]} -- output of discriminator
Keyword Arguments:
isreal {bool} -- whether labels should be 0 (fake) or 1 (real) (default: {True})
"""
if is_real:
labels = tf.ones_like(logits)
else:
labels = tf.zeros_like(logits)
return tf.compat.v1.losses.sigmoid_cross_entropy(
multi_class_labels=labels, logits=logits
)
def sigmoid(x, shift=0.0, mult=20):
""" squashes a value with a sigmoid
"""
return tf.constant(1.0) / (
tf.constant(1.0) + tf.exp(-tf.constant(1.0) * (x * mult))
)
class VAE():
def __init__(self):
self.models = self._build()
self.full_model = self.models[0]
self.encoder = self.models[1]
self.decoder = self.models[2]
self.input_dim = INPUT_DIM
self.z_dim = Z_DIM
self.learning_rate = LEARNING_RATE
self.kl_tolerance = KL_TOLERANCE
def _build(self):
vae_x = Input(shape=INPUT_DIM, name='observation_input')
vae_c1 = Conv2D(filters = CONV_FILTERS[0], kernel_size = CONV_KERNEL_SIZES[0], strides = CONV_STRIDES[0], activation=CONV_ACTIVATIONS[0], name='conv_layer_1')(vae_x)
vae_c2 = Conv2D(filters = CONV_FILTERS[1], kernel_size = CONV_KERNEL_SIZES[1], strides = CONV_STRIDES[1], activation=CONV_ACTIVATIONS[0], name='conv_layer_2')(vae_c1)
vae_c3= Conv2D(filters = CONV_FILTERS[2], kernel_size = CONV_KERNEL_SIZES[2], strides = CONV_STRIDES[2], activation=CONV_ACTIVATIONS[0], name='conv_layer_3')(vae_c2)
vae_c4= Conv2D(filters = CONV_FILTERS[3], kernel_size = CONV_KERNEL_SIZES[3], strides = CONV_STRIDES[3], activation=CONV_ACTIVATIONS[0], name='conv_layer_4')(vae_c3)
vae_z_in = Flatten()(vae_c4)
vae_z_mean = Dense(Z_DIM, name='mu')(vae_z_in)
vae_z_log_var = Dense(Z_DIM, name='log_var')(vae_z_in)
vae_z = Sampling(name='z')([vae_z_mean, vae_z_log_var])
#### DECODER:
vae_z_input = Input(shape=(Z_DIM,), name='z_input')
vae_dense = Dense(1024, name='dense_layer')(vae_z_input)
vae_unflatten = Reshape((1,1,DENSE_SIZE), name='unflatten')(vae_dense)
vae_d1 = Conv2DTranspose(filters = CONV_T_FILTERS[0], kernel_size = CONV_T_KERNEL_SIZES[0] , strides = CONV_T_STRIDES[0], activation=CONV_T_ACTIVATIONS[0], name='deconv_layer_1')(vae_unflatten)
vae_d2 = Conv2DTranspose(filters = CONV_T_FILTERS[1], kernel_size = CONV_T_KERNEL_SIZES[1] , strides = CONV_T_STRIDES[1], activation=CONV_T_ACTIVATIONS[1], name='deconv_layer_2')(vae_d1)
vae_d3 = Conv2DTranspose(filters = CONV_T_FILTERS[2], kernel_size = CONV_T_KERNEL_SIZES[2] , strides = CONV_T_STRIDES[2], activation=CONV_T_ACTIVATIONS[2], name='deconv_layer_3')(vae_d2)
vae_d4 = Conv2DTranspose(filters = CONV_T_FILTERS[3], kernel_size = CONV_T_KERNEL_SIZES[3] , strides = CONV_T_STRIDES[3], activation=CONV_T_ACTIVATIONS[3], name='deconv_layer_4')(vae_d3)
#### MODELS
vae_encoder = Model(vae_x, [vae_z_mean, vae_z_log_var, vae_z], name = 'encoder')
vae_decoder = Model(vae_z_input, vae_d4, name = 'decoder')
vae_full = VAEModel(vae_encoder, vae_decoder, 10000)
opti = Adam(lr=LEARNING_RATE)
vae_full.compile(optimizer=opti)
return (vae_full,vae_encoder, vae_decoder)
def set_weights(self, filepath):
self.full_model.load_weights(filepath)
def train(self, data):
self.full_model.fit(data, data,
shuffle=True,
epochs=1,
batch_size=BATCH_SIZE)
def save_weights(self, filepath):
self.full_model.save_weights(filepath)
|
py | 1a2fb744396bccd3b48b313776264cdfe8dd0a7c | from utils import prefer_envar
from logs.logger import log
from logs.log_utils import log_json
from config.reddit.reddit_sub_lists import REDDIT_APPROVED_SUBS
from config.reddit.config_gen import config_gen
import sys
import json
import os
if os.path.isfile('config.json'):
file = open("config.json", "r")
AUTH = prefer_envar(json.loads(file.read()))
else:
AUTH = prefer_envar({
# app creds
"reddit_client_id":"",
"reddit_client_secret":"",
# reddit account creds
"reddit_username":"",
"reddit_password":"",
})
for envar in AUTH:
if AUTH[envar] == "":
# reddit auth not configured correctly.
# instruct user to generate a .env file
config_gen()
log.info(f"REDDIT AUTH CONFIG:\n {log_json(AUTH)}")
CONFIG = prefer_envar({
"reddit_crosspost_enabled": False,
# the chance the bot will repost a post
"reddit_post_chance": 0.005,
# the chance the bot will make a comment
"reddit_comment_chance": 0.005,
# the chance the bot will reply to a comment
# otherwise it will reply to a post
"reddit_reply_to_comment": 0.002,
# chance the bot will remove poor performing
# posts and comments
"reddit_remove_low_scores": 0.002,
# posts/comments that get downvoted to this score will be deleted
"reddit_low_score_threshold": 0,
# chance to check if the bot is shadowbanned,
# and shut down the script automatically
"reddit_shadowban_check": 0.002,
# list of subreddits for the bot to use
"reddit_sub_list": REDDIT_APPROVED_SUBS,
# bot schedules. all times are UTC
# add the schedule number to the array
# and the bot will run within that time range
# leave the array empty for no schedule: []
# 1 - 7am-10am ((7,00),(10,00))
# 2 - 10am-2pm ((10,00),(14,00))
# 3 - 2pm-6pm ((14,00),(18,00))
# 4 - 6pm-10pm ((18,00),(22,00))
# 5 - 10pm-2am ((22,00),(2,00))
"reddit_sleep_schedule": [2, 4]
})
log.info(f"REDDIT CONNFIG:\n {log_json(CONFIG)}")
|
py | 1a2fba1a886ca8bda4185af80c3157d5a5d9f4a5 | from django.http import JsonResponse
from django.utils import timezone
from django.contrib.sessions.models import Session
from rest_framework import views, viewsets, authentication
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.parsers import JSONParser
from rest_framework.exceptions import APIException
from liliapi.serializers import *
from liliapi.models import *
from liliapi.permissions import *
from liliapi.paginations import *
from liliapi.authentication import *
from liliapi.tasks import *
########################################################################################################################
#
# copyright: 2017 WiM - USGS
# authors: Aaron Stephenson USGS WiM (Web Informatics and Mapping)
#
# In Django, a view is what takes a Web request and returns a Web response. The response can be many things, but most
# of the time it will be a Web page, a redirect, or a document. In this case, the response will almost always be data
# in JSON format.
#
# All these views are written as Class-Based Views (https://docs.djangoproject.com/en/1.11/topics/class-based-views/)
# because that is the paradigm used by Django Rest Framework (http://www.django-rest-framework.org/api-guide/views/)
# which is the toolkit we used to create web services in Django.
#
#
########################################################################################################################
LIST_DELIMETER = settings.LIST_DELIMETER
######
#
# Abstract Base Classes
#
######
class HistoryViewSet(viewsets.ModelViewSet):
"""
This class will automatically assign the User ID to the created_by and modified_by history fields when appropriate
"""
permission_classes = (permissions.IsAuthenticated,)
pagination_class = StandardResultsSetPagination
def perform_create(self, serializer):
serializer.save(created_by=self.request.user, modified_by=self.request.user)
def perform_update(self, serializer):
serializer.save(modified_by=self.request.user)
# override the default pagination to allow disabling of pagination
def paginate_queryset(self, *args, **kwargs):
if self.request and 'paginate' in self.request.query_params:
return super().paginate_queryset(*args, **kwargs)
return None
######
#
# Samples
#
######
class SampleViewSet(HistoryViewSet):
serializer_class = SampleSerializer
def get_serializer_class(self):
if self.request and 'slim' in self.request.query_params:
return SampleSlimSerializer
else:
return SampleSerializer
@action(detail=False)
def finalsamplemeanconcentrations(self, request):
queryset = Sample.objects.prefetch_related('finalsamplemeanconcentrations').distinct()
query_params = self.request.query_params
# filter by sample IDs, exact list
sample = query_params.get('sample', None)
if sample is not None:
if LIST_DELIMETER in sample:
sample_list = sample.split(LIST_DELIMETER)
queryset = queryset.filter(id__in=sample_list)
else:
queryset = queryset.filter(id__exact=sample)
# filter by target IDs, exact list
target = query_params.get('target', None)
target_list = []
if target is not None:
if LIST_DELIMETER in target:
target_list = target.split(LIST_DELIMETER)
queryset = queryset.filter(finalsamplemeanconcentrations__target__in=target_list)
else:
target_list = [target]
queryset = queryset.filter(finalsamplemeanconcentrations__target__exact=target)
# recalc reps validity
for sample in queryset:
fsmcs = FinalSampleMeanConcentration.objects.filter(sample=sample.id, target__in=target_list)
for fsmc in fsmcs:
recalc_reps('FinalSampleMeanConcentration', sample.id, target=fsmc.target.id, recalc_rep_conc=False)
# start building up the response object
resp = []
for sample in queryset:
sample_target_list = [int(target) for target in target_list]
item = {
"id": sample.id,
"collaborator_sample_id": sample.collaborator_sample_id,
"collection_start_date": sample.collection_start_date,
"final_sample_mean_concentrations": []
}
fsmcs = list(FinalSampleMeanConcentration.objects.filter(sample=sample.id))
for fsmc in fsmcs:
# attempt to find the matching target in the fsmc list
try:
sample_target_index = sample_target_list.index(fsmc.target.id)
# pop the matching fsmc target from its list so that we eventually end up with an empty list,
# or a list of extraneous targets
sample_target_list.pop(sample_target_index)
# start building up the nested response object
item["final_sample_mean_concentrations"].append({
"target": fsmc.target.id,
"target_string": fsmc.target.name,
"final_sample_mean_concentration": fsmc.final_sample_mean_concentration
})
# no matching target was found in the fsmc list
except ValueError:
# do not include this fsmc in the response because its target was not requested
continue
# now list out the other targets that were requested but do not exist for this sample
for extraneous_target in sample_target_list:
# start building up the nested response object
target_name = list(Target.objects.filter(id=extraneous_target).values_list('name', flat=True))
item["final_sample_mean_concentrations"].append({
"target": extraneous_target,
"target_string": target_name[0],
"final_sample_mean_concentration": "N/A"
})
resp.append(item)
return Response(resp)
@action(detail=False)
def get_count(self, request):
# Sample.objects.filter(matrix__in=matrix_list).count()
query_params = self.request.query_params
return Response({"count": self.build_queryset(query_params).count()})
@action(detail=False)
def get_sampler_names(self, request):
sampler_names = set(list(Sample.objects.values_list('sampler_name', flat=True)))
return Response({"sampler_names": sampler_names})
@action(detail=False)
def get_recent_pegnegs(self, request):
pegneg_record_type = RecordType.objects.filter(id=2).first()
recent_pegnegs = Sample.objects.filter(record_type=pegneg_record_type).order_by('-id')[:20]
return Response(self.serializer_class(recent_pegnegs, many=True).data)
# override the default queryset to allow filtering by URL arguments
def get_queryset(self):
query_params = self.request.query_params
return self.build_queryset(query_params)
# build a queryset using query_params
# NOTE: this is being done in its own method to adhere to the DRY Principle
def build_queryset(self, query_params):
queryset = Sample.objects.all()
# filter by sample IDs, exact list
sample = query_params.get('id', None)
if sample is not None:
if LIST_DELIMETER in sample:
sample_list = sample.split(LIST_DELIMETER)
queryset = queryset.filter(id__in=sample_list)
else:
queryset = queryset.filter(id__exact=sample)
# filter by sample ID, range
from_sample = query_params.get('from_id', None)
to_sample = query_params.get('to_id', None)
if from_sample is not None and to_sample is not None:
# the filter below using __range is value-inclusive
queryset = queryset.filter(id__range=(from_sample, to_sample))
elif to_sample is not None:
queryset = queryset.filter(id__lte=to_sample)
elif from_sample is not None:
queryset = queryset.filter(id__gte=from_sample)
# filter by study ID, exact list
study = query_params.get('study', None)
if study is not None:
if LIST_DELIMETER in study:
study_list = study.split(LIST_DELIMETER)
queryset = queryset.filter(study__in=study_list)
else:
queryset = queryset.filter(study__exact=study)
# filter by collection_start_date, range
from_collection_start_date = query_params.get('from_collection_start_date', None)
to_collection_start_date = query_params.get('to_collection_start_date', None)
if from_collection_start_date is not None and to_collection_start_date is not None:
# the filter below using __range is value-inclusive
queryset = queryset.filter(collection_start_date__range=(
from_collection_start_date, to_collection_start_date))
elif to_collection_start_date is not None:
queryset = queryset.filter(collection_start_date__lte=to_collection_start_date)
elif from_collection_start_date is not None:
queryset = queryset.filter(collection_start_date__gte=from_collection_start_date)
# filter by collaborator_sample_id, exact list
collaborator_sample_id = query_params.get('collaborator_sample_id', None)
if collaborator_sample_id is not None:
if LIST_DELIMETER in collaborator_sample_id:
collaborator_sample_id_list = collaborator_sample_id.split(LIST_DELIMETER)
queryset = queryset.filter(collaborator_sample_id__in=collaborator_sample_id_list)
else:
queryset = queryset.filter(collaborator_sample_id__exact=collaborator_sample_id)
# filter by sample type, exact list
sample_type = query_params.get('sample_type', None)
if sample_type is not None:
if LIST_DELIMETER in sample_type:
sample_type_list = sample_type.split(LIST_DELIMETER)
queryset = queryset.filter(sample_type__in=sample_type_list)
else:
queryset = queryset.filter(sample_type__exact=sample_type)
# filter by matrix, exact list
matrix = query_params.get('matrix', None)
if matrix is not None:
if LIST_DELIMETER in matrix:
matrix_list = matrix.split(LIST_DELIMETER)
queryset = queryset.filter(matrix__in=matrix_list)
else:
queryset = queryset.filter(matrix__exact=matrix)
# filter by record_type, exact list
record_type = query_params.get('record_type', None)
if record_type is not None:
if LIST_DELIMETER in record_type:
record_type_list = record_type.split(LIST_DELIMETER)
queryset = queryset.filter(record_type__in=record_type_list)
else:
queryset = queryset.filter(record_type__exact=record_type)
# filter by peg_neg, exact list
peg_neg = query_params.get('peg_neg', None)
if peg_neg is not None:
if LIST_DELIMETER in peg_neg:
peg_neg_list = peg_neg.split(LIST_DELIMETER)
queryset = queryset.filter(peg_neg__in=peg_neg_list)
else:
queryset = queryset.filter(peg_neg__exact=peg_neg)
return queryset
class AliquotViewSet(HistoryViewSet):
queryset = Aliquot.objects.all()
serializer_class = AliquotCustomSerializer
@action(detail=False)
def get_location(self, request):
# get the freezer from the request query
freezer = request.query_params.get('freezer', None)
# get the rack from the request query
rack = request.query_params.get('rack', None)
# get the box from the request query
box = request.query_params.get('box', None)
# if a freezer was included in the query, use it, otherwise default to the first freezer
freezer = freezer if freezer else 1
# find all aliquots in the requested rack and/or box (and freezer)
if rack and box:
queryset = Aliquot.objects.filter(freezer_location__freezer=freezer,
freezer_location__rack=rack, freezer_location__box=box)
elif rack:
queryset = Aliquot.objects.filter(freezer_location__freezer=freezer, freezer_location__rack=rack)
elif box:
queryset = Aliquot.objects.filter(freezer_location__freezer=freezer, freezer_location__box=box)
else:
queryset = Aliquot.objects.none()
return Response(AliquotSlimSerializer(queryset, many=True).data)
@action(methods=['post'], detail=False)
def bulk_delete(self, request):
# ensure submitted data is a list of only IDs or a list of only aliquot_strings (SampleID-AliquotNumber)
if all([str(item).isdigit() for item in request.data]):
aliquots = Aliquot.objects.filter(id__in=request.data)
if len(aliquots) != len(request.data):
aliquot_ids = [aliquot.id for aliquot in aliquots]
invalid_ids = list(set(request.data).difference(aliquot_ids))
message = "Invalid request. No aliquots deleted. The following submitted values could not be found"
message += " in the database: " + str(invalid_ids)
return JsonResponse({"message": message}, status=400)
else:
freezer_location_ids = [aliquot.freezer_location_id for aliquot in aliquots]
Aliquot.objects.filter(id__in=request.data).delete()
FreezerLocation.objects.filter(id__in=freezer_location_ids).delete()
return JsonResponse({"message": "Aliquots deleted."}, status=200)
elif all([isinstance(item, str) and '-' in item for item in request.data]):
aliquot_ids = []
freezer_location_ids = []
invalid_ids = []
for item in request.data:
item_split = item.split('-')
aliquot = Aliquot.objects.filter(sample=item_split[0], aliquot_number=item_split[1]).first()
if aliquot:
aliquot_ids.append(aliquot.id)
freezer_location_ids.append(aliquot.freezer_location_id)
else:
invalid_ids.append(item)
if len(invalid_ids) > 0:
message = "Invalid request. No aliquots deleted. The following submitted values could not be found"
message += " in the database: " + str(invalid_ids)
return JsonResponse({"message": message}, status=400)
else:
Aliquot.objects.filter(id__in=aliquot_ids).delete()
FreezerLocation.objects.filter(id__in=freezer_location_ids).delete()
return JsonResponse({"message": "Aliquots deleted."}, status=200)
else:
message = "Invalid request. Submitted data must be a list/array of aliquot IDs"
message += "or sample_id-aliquot_number combinations (e.g., '1001-3')"
return JsonResponse({"message": message}, status=400)
def get_serializer_class(self):
if not isinstance(self.request.data, list):
return AliquotSerializer
else:
return self.serializer_class
def get_serializer(self, *args, **kwargs):
if 'data' in kwargs:
data = kwargs['data']
# check if many is required
if isinstance(data, list) and len(data) > 0 and 'aliquot_count' in data[0]:
kwargs['many'] = True
return super(AliquotViewSet, self).get_serializer(*args, **kwargs)
class SampleTypeViewSet(HistoryViewSet):
queryset = SampleType.objects.all()
serializer_class = SampleTypeSerializer
class MatrixViewSet(HistoryViewSet):
queryset = Matrix.objects.all()
serializer_class = MatrixSerializer
class FilterTypeViewSet(HistoryViewSet):
queryset = FilterType.objects.all()
serializer_class = FilterTypeSerializer
class StudyViewSet(HistoryViewSet):
queryset = Study.objects.all()
serializer_class = StudySerializer
class UnitViewSet(HistoryViewSet):
queryset = Unit.objects.all()
serializer_class = UnitSerializer
######
#
# Freezer Locations
#
######
class FreezerLocationViewSet(HistoryViewSet):
queryset = FreezerLocation.objects.all()
serializer_class = FreezerLocationSerializer
@action(methods=['get'], detail=False)
def get_next_available(self, request):
# get the first empty box in the any freezer
first_empty_box = FreezerLocation.objects.get_first_empty_box()
if first_empty_box is None:
first_empty_box = "There are no more empty boxes in this freezer!"
# get the study_id from the request query
study_id = request.query_params.get('study', None)
last_spot = FreezerLocation.objects.get_last_occupied_spot(study_id)
# if a last spot is found look up the next available spot
if last_spot is not None:
next_spot = FreezerLocation.objects.get_next_available_spot(last_spot)
# if there is a next spot
if next_spot is not None:
# start building the full response object
resp = next_spot
# determine maximum available spots in a box in this freezer (for an empty box)
rows_in_box = last_spot.freezer.rows
spots_in_row = last_spot.freezer.spots
spots_in_box = rows_in_box * spots_in_row
# ensure next spot and next empty box are not the same
get_second_empty_box = True if next_spot['available_spots_in_box'] == spots_in_box else False
next_empty_box = FreezerLocation.objects.get_next_empty_box(last_spot, get_second_empty_box)
# then add the next empty box to the response object
resp.update({"next_empty_box": next_empty_box})
# no next spot was found
else:
resp = {"not_found": "There are no more empty boxes in this freezer!"}
# otherwise no last spot has been found
else:
# if a study_id was included in the query, mention it in the response
if study_id is not None:
study = Study.objects.filter(id=study_id).first()
message = "No aliquots for "
if study is not None:
message += study.name + " "
message += "(Study ID #" + str(study_id) + ") are stored in any freezer."
# otherwise inform the user that no freezer locations have been used
else:
message = "No aliquots are stored in any freezer."
resp = {"not_found": message}
resp.update({"next_empty_box": first_empty_box})
return Response(resp)
class FreezerViewSet(HistoryViewSet):
queryset = Freezer.objects.all()
serializer_class = FreezerSerializer
######
#
# Final Sample Values
#
######
class FinalConcentratedSampleVolumeViewSet(HistoryViewSet):
serializer_class = FinalConcentratedSampleVolumeSerializer
# override the default queryset to allow filtering by URL arguments
def get_queryset(self):
queryset = FinalConcentratedSampleVolume.objects.all()
# filter by sample ID, exact list
sample = self.request.query_params.get('sample', None)
if sample is not None:
sample_list = sample.split(',')
queryset = queryset.filter(sample__in=sample_list)
return queryset
def get_serializer(self, *args, **kwargs):
if 'data' in kwargs:
data = kwargs['data']
# check if many is required
if isinstance(data, list):
kwargs['many'] = True
return super(FinalConcentratedSampleVolumeViewSet, self).get_serializer(*args, **kwargs)
class ConcentrationTypeViewSet(HistoryViewSet):
queryset = ConcentrationType.objects.all()
serializer_class = ConcentrationTypeSerializer
class FinalSampleMeanConcentrationViewSet(HistoryViewSet):
serializer_class = FinalSampleMeanConcentrationSerializer
@action(detail=False)
def summary_statistics(self, request):
sample = request.query_params.get('sample', None)
target = request.query_params.get('target', None)
statistic = request.query_params.get('statistic', None)
report_type = ReportType.objects.filter(id=2).first()
status = Status.objects.filter(id=1).first()
report_file = ReportFile.objects.create(
report_type=report_type, status=status, created_by=request.user, modified_by=request.user)
task = generate_results_summary_report.delay(sample, target, statistic, report_file.id, request.user.username)
monitor_task.delay(task.id, datetime.now().strftime('%Y-%m-%d_%H:%M:%S'), report_file.id)
return JsonResponse({"message": "Request for Results Summary Report received."}, status=200)
@action(detail=False)
def results(self, request):
sample = request.query_params.get('sample', None)
target = request.query_params.get('target', None)
report_type = ReportType.objects.filter(id=3).first()
status = Status.objects.filter(id=1).first()
report_file = ReportFile.objects.create(
report_type=report_type, status=status, created_by=request.user, modified_by=request.user)
task = generate_individual_sample_report.delay(sample, target, report_file.id, request.user.username)
monitor_task.delay(task.id, datetime.now().strftime('%Y-%m-%d_%H:%M:%S'), report_file.id)
return JsonResponse({"message": "Request for Individual Sample Report received."}, status=200)
# override the default queryset to allow filtering by URL arguments
def get_queryset(self):
query_params = self.request.query_params
return self.build_queryset(query_params)
# build a queryset using query_params
# NOTE: this is being done in its own method to adhere to the DRY Principle
def build_queryset(self, query_params):
queryset = FinalSampleMeanConcentration.objects.all()
# filter by sample ID, exact list
sample = query_params.get('sample', None)
if sample is not None:
sample_list = sample.split(',')
queryset = queryset.filter(sample__in=sample_list)
# filter by target ID, exact list
target = query_params.get('target', None)
if target is not None:
target_list = target.split(',')
queryset = queryset.filter(target__in=target_list)
# filter by study ID, exact list
study = query_params.get('study', None)
if study is not None:
study_list = sample.split(',')
queryset = queryset.filter(sample__study__in=study_list)
# filter by collection_start_date, exact list
collection_start_date = query_params.get('collection_start_date', None)
if collection_start_date is not None:
collection_start_date_list = sample.split(',')
queryset = queryset.filter(sample__collection_start_date__in=collection_start_date_list)
# filter by collaborator_sample_id, exact list
collaborator_sample_id = query_params.get('collaborator_sample_id', None)
if collaborator_sample_id is not None:
collaborator_sample_id_list = sample.split(',')
queryset = queryset.filter(sample__collaborator_sample_id__in=collaborator_sample_id_list)
# recalc reps validity
for fsmc in queryset:
recalc_reps('FinalSampleMeanConcentration', fsmc.sample.id, target=fsmc.target.id, recalc_rep_conc=False)
return queryset
# override the default GET method to recalc all child PCR Replicates first before the FSMC Select query
def retrieve(self, request, *args, **kwargs):
recalc_reps('FinalSampleMeanConcentration',
self.get_object().sample.id, target=self.get_object().target.id, recalc_rep_conc=False)
return super(FinalSampleMeanConcentrationViewSet, self).retrieve(request, *args, **kwargs)
######
#
# Sample Groups
#
######
class SampleSampleGroupViewSet(HistoryViewSet):
queryset = SampleSampleGroup.objects.all()
serializer_class = SampleSampleGroupSerializer
class SampleGroupViewSet(HistoryViewSet):
queryset = SampleGroup.objects.all()
serializer_class = SampleGroupSerializer
######
#
# Analyses
#
######
class SampleAnalysisBatchViewSet(HistoryViewSet):
queryset = SampleAnalysisBatch.objects.all()
serializer_class = SampleAnalysisBatchSerializer
class AnalysisBatchViewSet(HistoryViewSet):
queryset = AnalysisBatch.objects.all()
serializer_class = AnalysisBatchSerializer
# override the default DELETE method to prevent deletion of an AnalysisBatch with any results data entered
def destroy(self, request, *args, **kwargs):
nonnull_pcrreplicates = PCRReplicate.objects.filter(
pcrreplicate_batch__extraction_batch__analysis_batch=self.get_object().id).exclude(cq_value__isnull=True)
if any(nonnull_pcrreplicates):
message = "An Analysis Batch may not be deleted if any related PCR Replicates have results data entered."
raise APIException(message)
return super(AnalysisBatchViewSet, self).destroy(request, *args, **kwargs)
class AnalysisBatchDetailViewSet(HistoryViewSet):
serializer_class = AnalysisBatchDetailSerializer
# override the default queryset to allow filtering by URL arguments
def get_queryset(self):
queryset = AnalysisBatch.objects.all()
batch = self.request.query_params.get('id', None)
if batch is not None:
if LIST_DELIMETER in batch:
batch_list = batch.split(',')
queryset = queryset.filter(id__in=batch_list)
else:
queryset = queryset.filter(id__exact=batch)
return queryset
class AnalysisBatchSummaryViewSet(HistoryViewSet):
serializer_class = AnalysisBatchSummarySerializer
@action(detail=False)
def get_count(self, request):
query_params = self.request.query_params
return Response({"count": self.build_queryset(query_params).count()})
# override the default queryset to allow filtering by URL arguments
def get_queryset(self):
query_params = self.request.query_params
return self.build_queryset(query_params)
# build a queryset using query_params
# NOTE: this is being done in its own method to adhere to the DRY Principle
def build_queryset(self, query_params):
study = self.request.query_params.get('study', None)
if study is not None:
queryset = AnalysisBatch.objects.prefetch_related('samples').all()
else:
queryset = AnalysisBatch.objects.all()
# filter by batch ID, exact list
batch = self.request.query_params.get('id', None)
if batch is not None:
if LIST_DELIMETER in batch:
batch_list = batch.split(',')
queryset = queryset.filter(id__in=batch_list)
else:
queryset = queryset.filter(id__exact=batch)
# filter by batch ID, range
from_batch = query_params.get('from_id', None)
to_batch = query_params.get('to_id', None)
if from_batch is not None and to_batch is not None:
# the filter below using __range is value-inclusive
queryset = queryset.filter(id__range=(from_batch, to_batch))
elif to_batch is not None:
queryset = queryset.filter(id__lte=to_batch)
elif from_batch is not None:
queryset = queryset.filter(id__gte=from_batch)
# filter by study ID, exact list
if study is not None:
if LIST_DELIMETER in study:
study_list = study.split(',')
queryset = queryset.filter(samples__study__in=study_list).distinct()
else:
queryset = queryset.filter(samples__study__exact=study).distinct()
return queryset
class AnalysisBatchTemplateViewSet(HistoryViewSet):
queryset = AnalysisBatchTemplate.objects.all()
serializer_class = AnalysisBatchTemplateSerializer
######
#
# Extractions
#
######
class ExtractionMethodViewSet(HistoryViewSet):
queryset = ExtractionMethod.objects.all()
serializer_class = ExtractionMethodSerializer
class ExtractionBatchViewSet(HistoryViewSet):
queryset = ExtractionBatch.objects.all()
# override the default serializer_class if summary fields are requested
def get_serializer_class(self):
include_summary_fields = self.request.query_params.get('includeSummaryFields', None)
if include_summary_fields is not None and include_summary_fields.lower() == 'true':
return ExtractionBatchSummarySerializer
else:
return ExtractionBatchSerializer
def get_serializer(self, *args, **kwargs):
if 'data' in kwargs:
data = kwargs['data']
# check if many is required
if isinstance(data, list):
kwargs['many'] = True
return super(ExtractionBatchViewSet, self).get_serializer(*args, **kwargs)
# override the default DELETE method to prevent deletion of an ExtractionBatch with any results data entered
def destroy(self, request, *args, **kwargs):
nonnull_pcrreplicates = PCRReplicate.objects.filter(
pcrreplicate_batch__extraction_batch=self.get_object().id).exclude(cq_value__isnull=True)
if any(nonnull_pcrreplicates):
message = "An Extraction Batch may not be deleted if any related PCR Replicates have results data entered."
raise APIException(message)
return super(ExtractionBatchViewSet, self).destroy(request, *args, **kwargs)
# override the default PATCH method to allow bulk processing
def patch(self, request, pk=None):
request_data = JSONParser().parse(request)
# if there is no pk, assume this is a bulk request
if not pk:
is_valid = True
response_data = []
valid_data = []
response_errors = []
for item in request_data:
# ensure the id field is present, otherwise nothing can be updated
if not item.get('id'):
is_valid = False
response_errors.append({"id": "This field is required."})
else:
eb_id = item.pop('id')
eb = ExtractionBatch.objects.filter(id=eb_id).first()
item['modified_by'] = request.user
# remove nulls coming from client (user not actually sending nulls, so no need to trigger recalcs)
if 'ext_pos_rna_rt_cq_value' in item and item['ext_pos_rna_rt_cq_value'] is None:
item.pop('ext_pos_rna_rt_cq_value')
if 'ext_pos_dna_cq_value' in item and item['ext_pos_dna_cq_value'] is None:
item.pop('ext_pos_dna_cq_value')
if eb:
serializer = self.get_serializer(eb, data=item, partial=True)
# if this item is valid, temporarily hold it until all items are proven valid, then save all
# if even one item is invalid, none will be saved, and the user will be returned the error(s)
if serializer.is_valid():
valid_data.append(serializer)
else:
is_valid = False
response_errors.append(serializer.errors)
else:
is_valid = False
message = "No ExtractionBatch exists with this ID: " + str(eb_id)
response_errors.append({"extractionbatch": message})
if is_valid:
# now that all items are proven valid, save and return them to the user
for item in valid_data:
item.save()
response_data.append(item.data)
return JsonResponse(response_data, safe=False, status=200)
else:
return JsonResponse(response_errors, safe=False, status=400)
# otherwise, if there is a pk, update the instance indicated by the pk
else:
rep = ExtractionBatch.objects.filter(id=pk).first()
if rep:
serializer = self.serializer_class(rep, data=request_data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=200)
else:
return Response(serializer.errors, status=400)
else:
message = "No ExtractionBatch exists with this ID: " + str(pk)
return JsonResponse({"extractionbatch": message}, status=400)
class ReverseTranscriptionViewSet(HistoryViewSet):
queryset = ReverseTranscription.objects.all()
serializer_class = ReverseTranscriptionSerializer
def get_serializer(self, *args, **kwargs):
if 'data' in kwargs:
data = kwargs['data']
# check if many is required
if isinstance(data, list):
kwargs['many'] = True
return super(ReverseTranscriptionViewSet, self).get_serializer(*args, **kwargs)
# override the default DELETE method to prevent deletion of a ReverseTranscription with any results data entered
def destroy(self, request, *args, **kwargs):
nonnull_pcrreplicates = PCRReplicate.objects.filter(
pcrreplicate_batch__extraction_batch__reversetranscriptions=self.get_object().id).exclude(
cq_value__isnull=True)
if any(nonnull_pcrreplicates):
message = "A Reverse Transcription may not be deleted"
message += " if any related PCR Replicates have results data entered."
raise APIException(message)
return super(ReverseTranscriptionViewSet, self).destroy(request, *args, **kwargs)
# override the default PATCH method to allow bulk processing
def patch(self, request, pk=None):
request_data = JSONParser().parse(request)
# if there is no pk, assume this is a bulk request
if not pk:
is_valid = True
response_data = []
valid_data = []
response_errors = []
for item in request_data:
# ensure the id field is present, otherwise nothing can be updated
if not item.get('id'):
is_valid = False
response_errors.append({"id": "This field is required."})
else:
rt_id = item.pop('id')
rt = ReverseTranscription.objects.filter(id=rt_id).first()
if rt:
serializer = self.serializer_class(rt, data=item, partial=True)
# if this item is valid, temporarily hold it until all items are proven valid, then save all
# if even one item is invalid, none will be saved, and the user will be returned the error(s)
if serializer.is_valid():
valid_data.append(serializer)
else:
is_valid = False
response_errors.append(serializer.errors)
else:
is_valid = False
response_errors.append(
{"reversetranscription": "No ReverseTranscription exists with this ID: " + str(rt_id)})
if is_valid:
# now that all items are proven valid, save and return them to the user
for item in valid_data:
item.save()
response_data.append(item.data)
return JsonResponse(response_data, safe=False, status=200)
else:
return JsonResponse(response_errors, safe=False, status=400)
# otherwise, if there is a pk, update the instance indicated by the pk
else:
rep = ReverseTranscription.objects.filter(id=pk).first()
if rep:
serializer = self.serializer_class(rep, data=request_data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=200)
else:
return Response(serializer.errors, status=400)
else:
return JsonResponse(
{"reversetranscription": "No ReverseTranscription exists with this ID: " + str(pk)}, status=400)
class SampleExtractionViewSet(HistoryViewSet):
queryset = SampleExtraction.objects.all()
serializer_class = SampleExtractionSerializer
@action(detail=False)
def inhibition_report(self, request):
sample = request.query_params.get('sample', None)
report_type = ReportType.objects.filter(id=1).first()
status = Status.objects.filter(id=1).first()
report_file = ReportFile.objects.create(
report_type=report_type, status=status, created_by=request.user, modified_by=request.user)
task = generate_inhibition_report.delay(sample, report_file.id, request.user.username)
monitor_task.delay(task.id, datetime.now().strftime('%Y-%m-%d_%H:%M:%S'), report_file.id)
return JsonResponse({"message": "Request for Inhibition Report received."}, status=200)
# override the default DELETE method to prevent deletion of a SampleExtraction with any results data entered
def destroy(self, request, *args, **kwargs):
nonnull_pcrreplicates = PCRReplicate.objects.filter(
sample_extraction=self.get_object().id).exclude(cq_value__isnull=True)
if any(nonnull_pcrreplicates):
message = "A Sample Extraction may not be deleted if any related PCR Replicates have results data entered."
raise APIException(message)
return super(SampleExtractionViewSet, self).destroy(request, *args, **kwargs)
class PCRReplicateViewSet(HistoryViewSet):
serializer_class = PCRReplicateSerializer
def get_serializer(self, *args, **kwargs):
if 'data' in kwargs:
data = kwargs['data']
# check if many is required
if isinstance(data, list):
kwargs['many'] = True
return super(PCRReplicateViewSet, self).get_serializer(*args, **kwargs)
def get_queryset(self):
queryset = PCRReplicate.objects.all()
id = self.request.query_params.get('id', None)
if id is not None:
if LIST_DELIMETER in id:
id_list = id.split(',')
queryset = queryset.filter(id__in=id_list)
else:
queryset = queryset.filter(id__exact=id)
return queryset
# override the default PATCH method to allow bulk processing
def patch(self, request, pk=None):
request_data = JSONParser().parse(request)
# if there is no pk, assume this is a bulk request
if not pk:
is_valid = True
response_data = []
valid_data = []
response_errors = []
for item in request_data:
# ensure the id field is present, otherwise nothing can be updated
if not item.get('id'):
is_valid = False
response_errors.append({"id": "This field is required."})
else:
rep_id = item.pop('id')
rep = PCRReplicate.objects.filter(id=rep_id).first()
if rep:
new_invalid = item.get('invalid', None)
if new_invalid is not None and new_invalid != rep.invalid:
item['invalid_override'] = request.user.id
rep.replicate_concentration = rep.calc_rep_conc()
serializer = self.serializer_class(rep, data=item, partial=True)
# if this item is valid, temporarily hold it until all items are proven valid, then save all
# if even one item is invalid, none will be saved, and the user will be returned the error(s)
if serializer.is_valid():
valid_data.append(serializer)
else:
is_valid = False
response_errors.append(serializer.errors)
else:
is_valid = False
response_errors.append({"pcrreplicate": "No PCRReplicate exists with this ID: " + str(rep_id)})
if is_valid:
# now that all items are proven valid, save and return them to the user
for item in valid_data:
item.save()
response_data.append(item.data)
return JsonResponse(response_data, safe=False, status=200)
else:
return JsonResponse(response_errors, safe=False, status=400)
# otherwise, if there is a pk, update the instance indicated by the pk
else:
rep = PCRReplicate.objects.filter(id=pk).first()
if rep:
new_invalid = request_data.get('invalid', None)
if new_invalid is not None and new_invalid != rep.invalid:
if request_data.get('invalid_override', None) is None:
request_data['invalid_override'] = request.user.id
serializer = self.serializer_class(rep, data=request_data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=200)
else:
return Response(serializer.errors, status=400)
else:
return JsonResponse({"pcrreplicate": "No PCRReplicate exists with this ID: " + str(pk)}, status=400)
class PCRReplicateBatchViewSet(HistoryViewSet):
serializer_class = PCRReplicateBatchSerializer
def isnumber(self, val):
try:
return True if float(val) == 0 else float(val)
except ValueError:
return False
def err_obj(self, field, message, severity):
return {"field": field, "message": message, "severity": severity}
def validate_controls(self, field):
synonym = " ('cp')" if 'cq_value' in field else " ('concentration')" if 'gc_reaction' in field else ''
invalid_reason = None
if field not in self.request.data:
invalid_reason = self.err_obj(field, field + synonym + " is missing", 2)
elif self.request.data[field] is not None:
if not self.isnumber(self.request.data[field]):
invalid_reason = self.err_obj(field, field + synonym + " is not a number", 1)
elif self.request.data[field] > Decimal('0') and field not in ['pcr_pos_cq_value', 'pcr_pos_gc_reaction']:
# eventually we will also validate pcr_pos_cq_value by testing if it is >0.5 cylces from expected
invalid_reason = self.err_obj(field, field + synonym + " is positive", 1)
return invalid_reason
@action(methods=['post'], detail=False)
def bulk_load_negatives(self, request):
is_valid = True
valid_data = []
response_errors = []
for item in request.data:
item_validation_errors = []
if 'extraction_batch' not in item:
item_validation_errors.append("extraction_batch is required")
if 'target' not in item:
item_validation_errors.append("target is required")
if 'replicate_number' not in item:
item_validation_errors.append("replicate_number is required")
if 'pcr_pos_cq_value' not in item:
item_validation_errors.append("pcr_pos_cq_value is required")
if len(item_validation_errors) > 0:
is_valid = False
response_errors.append(item_validation_errors)
continue
pcrreplicate_batch = PCRReplicateBatch.objects.filter(
extraction_batch=item['extraction_batch'], target=item['target'],
replicate_number=item['replicate_number']).first()
if pcrreplicate_batch:
if not is_valid:
continue
else:
item.pop('extraction_batch')
item.pop('target')
item.pop('replicate_number')
item['ext_neg_cq_value'] = 0
item['ext_neg_gc_reaction'] = 0
item['rt_neg_cq_value'] = 0
item['rt_neg_gc_reaction'] = 0
item['pcr_neg_cq_value'] = 0
item['pcr_neg_gc_reaction'] = 0
item['pcr_pos_gc_reaction'] = 0
item['updated_pcrreplicates'] = []
pcrreplicates = PCRReplicate.objects.filter(pcrreplicate_batch=pcrreplicate_batch.id)
for rep in pcrreplicates:
item['updated_pcrreplicates'].append(
{"sample": rep.sample_extraction.sample.id, "cq_value": 0, "gc_reaction": 0})
serializer = self.serializer_class(pcrreplicate_batch, data=item, partial=True)
# if this item is valid, temporarily hold it until all items are proven valid, then save all
# if even one item is invalid, none will be saved, and the user will be returned the error(s)
if serializer.is_valid():
valid_data.append(serializer)
else:
is_valid = False
response_errors.append(serializer.errors)
else:
message = "No PCR replicate batch was found with extraction batch of " + str(item['extraction_batch'])
message += " and target of " + str(item['target'])
message += " and replicate number of " + str(item['replicate_number'])
is_valid = False
response_errors.append({"pcrreplicatebatch": message})
if is_valid:
# now that all items are proven valid, save and return them to the user
response_data = []
for item in valid_data:
item.save()
# recalc the child rep validity
reps = PCRReplicate.objects.filter(pcrreplicate_batch=item.data['id'])
for rep in reps:
if rep.invalid_override is None:
rep.invalid = rep.calc_invalid()
rep.save()
response_data.append(item.data)
return JsonResponse(response_data, safe=False, status=200)
else:
return JsonResponse(response_errors, safe=False, status=400)
@action(methods=['post'], detail=False)
def validate(self, request):
validation_errors = []
if 'analysis_batch' not in request.data:
validation_errors.append("analysis_batch is required")
if 'extraction_number' not in request.data:
validation_errors.append("extraction_number is required")
if 'target' not in request.data:
validation_errors.append("target is required")
if 'replicate_number' not in request.data:
validation_errors.append("replicate_number is required")
if len(validation_errors) > 0:
return Response(validation_errors)
extraction_batch = ExtractionBatch.objects.filter(
analysis_batch=request.data['analysis_batch'],
extraction_number=request.data['extraction_number']
).first()
if not extraction_batch:
message = "No extraction batch was found with analysis batch of " + str(request.data['analysis_batch'])
message += " and extraction number of " + str(request.data['extraction_number'])
return Response({"extraction_batch": message})
target = Target.objects.filter(id=request.data['target']).first()
if not target:
message = "No target was found with ID of " + str(request.data['target'])
return Response({"target": message})
pcrreplicate_batch = PCRReplicateBatch.objects.filter(
extraction_batch=extraction_batch.id,
target=target.id,
replicate_number=request.data['replicate_number']
).first()
if not pcrreplicate_batch:
message = "No PCR replicate batch was found with extraction batch of " + str(extraction_batch.id)
message += " and target of " + str(request.data['target'])
message += " and replicate number of " + str(request.data['replicate_number'])
return Response({"pcrreplicate_batch": message}, status=400)
rna = True if target.nucleic_acid_type.name == 'RNA' else False
# start building up the response object
field_validations = {
"id": pcrreplicate_batch.id,
"ext_neg_invalid": False,
"rt_neg_invalid": False,
"pcr_neg_invalid": False,
"pcr_pos_invalid": False
}
# populate the response object with the submitted control values and the control validations
control_fields = ['ext_neg_cq_value', 'ext_neg_gc_reaction', 'rt_neg_cq_value', 'rt_neg_gc_reaction',
'pcr_neg_cq_value', 'pcr_neg_gc_reaction', 'pcr_pos_cq_value', 'pcr_pos_gc_reaction']
control_validations = []
for field in control_fields:
field_validations[field] = request.data[field] if field in request.data else None
# exclude RT fields if this is a DNA target
if 'rt' not in field or rna:
validation_error = self.validate_controls(field)
if validation_error:
control_validations.append(validation_error)
if "ext_neg" in field:
field_validations["ext_neg_invalid"] = True
elif "rt_neg" in field:
field_validations["rt_neg_invalid"] = True
elif "pcr_neg" in field:
field_validations["pcr_neg_invalid"] = True
elif "pcr_pos" in field:
field_validations["pcr_pos_invalid"] = True
field_validations["validation_errors"] = control_validations
# check that pcrreplicates have been submitted
if 'updated_pcrreplicates' not in request.data or not request.data['updated_pcrreplicates']:
field_validations["updated_pcrreplicates"] = [("updated_pcrreplicates is missing", 2)]
else:
# validate pcrreplicates
existing_pcrreplicates = PCRReplicate.objects.filter(
pcrreplicate_batch=pcrreplicate_batch.id).order_by('sample_extraction__sample__id')
all_pcrreplicates_validations = []
updated_pcrreplicates = request.data.get('updated_pcrreplicates')
updated_pcrreplicates_sample_ids = [rep['sample'] for rep in updated_pcrreplicates]
for existing_rep in existing_pcrreplicates:
sample_id = existing_rep.sample_extraction.sample.id
rep_validations = []
# attempt to find the matching updated rep
try:
rep_index = updated_pcrreplicates_sample_ids.index(sample_id)
# pop the matching updated rep from its list so that we eventually end up with an empty list,
# or a list of extraneous reps
updated_rep = updated_pcrreplicates.pop(rep_index)
# also remove the parallel sample ID so that the two lists continue to have matching indexes
del updated_pcrreplicates_sample_ids[rep_index]
# start building up the response object
response_rep = {"sample": sample_id}
rep_validations = []
# check if this rep has already been uploaded
if existing_rep.cq_value is not None:
message = "sample " + str(sample_id) + " has already been uploaded for this PCR replicate batch"
rep_validations.append(self.err_obj("cq_value", message, 1))
# validate cq_value
# remember that null is an acceptable value
if 'cq_value' not in updated_rep:
rep_validations.append(self.err_obj("cq_value", "cq_value ('cp') is missing", 2))
else:
rep_cq_value = updated_rep['cq_value']
response_rep['cq_value'] = rep_cq_value
if rep_cq_value is not None:
if not self.isnumber(rep_cq_value):
rep_validations.append(self.err_obj("cq_value", "cq_value ('cp') is not a number", 1))
elif rep_cq_value < Decimal('0'):
rep_validations.append(self.err_obj("cq_value", "cq_value ('cp') is less than zero", 2))
# validate gc_reaction
# remember that null is an acceptable value
if 'gc_reaction' not in updated_rep:
message = "gc_reaction ('concentration') is missing"
rep_validations.append(self.err_obj("gc_reaction", message, 2))
else:
rep_gc_reaction = updated_rep['gc_reaction']
response_rep['gc_reaction'] = rep_gc_reaction
if rep_gc_reaction is not None:
if not self.isnumber(rep_gc_reaction):
message = "gc_reaction ('concentration') is not a number"
rep_validations.append(self.err_obj("gc_reaction", message, 1))
response_rep['gc_reaction_sci'] = ''
elif rep_gc_reaction < Decimal('0'):
message = "gc_reaction ('concentration') is less than zero"
rep_validations.append(self.err_obj("gc_reaction", message, 2))
response_rep['gc_reaction_sci'] = get_sci_val(rep_gc_reaction)
else:
response_rep['gc_reaction_sci'] = get_sci_val(rep_gc_reaction)
else:
response_rep['gc_reaction'] = None
response_rep['gc_reaction_sci'] = ''
response_rep['validation_errors'] = rep_validations
all_pcrreplicates_validations.append(response_rep)
# no matching updated_rep was found
except ValueError:
# start building up the response object
response_rep = {"sample": sample_id}
message = "sample " + str(sample_id) + " expected but not found in submission"
rep_validations.append(self.err_obj("sample", message, 2))
response_rep['validation_errors'] = rep_validations
all_pcrreplicates_validations.append(response_rep)
# now list out the other updated reps that were submitted but do not belong to this batch
for extraneous_rep in updated_pcrreplicates:
rep_validations = []
sample_id = "(No Sample ID)"
if 'sample' not in extraneous_rep or extraneous_rep['sample'] is None:
validation_error = self.err_obj("sample", "sample is a required field", 1)
else:
sample_id = str(extraneous_rep.get('sample'))
message = "sample " + sample_id + " is not in this PCR replicate batch"
validation_error = self.err_obj("sample", message, 1)
# start building up the response object
response_rep = {"sample": sample_id}
if 'cq_value' not in extraneous_rep:
continue
else:
rep_cq_value = extraneous_rep['cq_value']
response_rep['cq_value'] = rep_cq_value
if 'gc_reaction' not in extraneous_rep:
continue
else:
rep_gc_reaction = extraneous_rep['gc_reaction']
response_rep['gc_reaction'] = rep_gc_reaction
if not self.isnumber(rep_gc_reaction):
response_rep['gc_reaction_sci'] = ''
else:
response_rep['gc_reaction_sci'] = get_sci_val(rep_gc_reaction)
rep_validations.append(validation_error)
response_rep['validation_errors'] = rep_validations
all_pcrreplicates_validations.append(response_rep)
field_validations["updated_pcrreplicates"] = all_pcrreplicates_validations
return JsonResponse(field_validations, safe=False, status=200)
# override the default queryset to allow filtering by URL arguments
def get_queryset(self):
queryset = PCRReplicateBatch.objects.all()
# if ID is in query, only search by ID and ignore other params
batch = self.request.query_params.get('id', None)
if batch is not None:
queryset = queryset.filter(id__exact=batch)
# else, search by other params (that don't include ID)
else:
analysis_batch = self.request.query_params.get('analysis_batch', None)
extraction_number = self.request.query_params.get('extraction_number', None)
if analysis_batch is not None and extraction_number is not None:
queryset = queryset.filter(extraction_batch__analysis_batch__exact=analysis_batch,
extraction_batch__extraction_number__exact=extraction_number)
target = self.request.query_params.get('target', None)
if target is not None:
queryset = queryset.filter(target__exact=target)
replicate_number = self.request.query_params.get('replicate_number', None)
if replicate_number is not None:
queryset = queryset.filter(replicate_number__exact=replicate_number)
return queryset
# override the default DELETE method to prevent deletion of a PCRReplicateBatch with any results data entered
def destroy(self, request, *args, **kwargs):
nonnull_pcrreplicates = PCRReplicate.objects.filter(
pcrreplicate_batch=self.get_object().id).exclude(cq_value__isnull=True)
if any(nonnull_pcrreplicates):
message = "A PCR Replicate Batch may not be deleted"
message += " if any related PCR Replicates have results data entered."
raise APIException(message)
return super(PCRReplicateBatchViewSet, self).destroy(request, *args, **kwargs)
class StandardCurveViewSet(HistoryViewSet):
queryset = StandardCurve.objects.all()
serializer_class = StandardCurveSerializer
class InhibitionViewSet(HistoryViewSet):
queryset = Inhibition.objects.all()
serializer_class = InhibitionSerializer
def get_serializer(self, *args, **kwargs):
if 'data' in kwargs:
data = kwargs['data']
# check if many is required
if isinstance(data, list):
kwargs['many'] = True
return super(InhibitionViewSet, self).get_serializer(*args, **kwargs)
# override the default DELETE method to prevent deletion of an Inhibition with any results data entered
def destroy(self, request, *args, **kwargs):
nonnull_pcrreplicates_dna = PCRReplicate.objects.filter(
sample_extraction__inhibition_dna=self.get_object().id).exclude(cq_value__isnull=True)
nonnull_pcrreplicates_rna = PCRReplicate.objects.filter(
sample_extraction__inhibition_rna=self.get_object().id).exclude(cq_value__isnull=True)
nonnull_pcrreplicates = nonnull_pcrreplicates_dna.union(nonnull_pcrreplicates_rna).distinct()
if any(nonnull_pcrreplicates):
message = "An Inhibition may not be deleted if any related PCR Replicates have results data entered."
raise APIException(message)
return super(InhibitionViewSet, self).destroy(request, *args, **kwargs)
# override the default PATCH method to allow bulk processing
def patch(self, request, pk=None):
request_data = JSONParser().parse(request)
# if there is no pk, assume this is a bulk request
if not pk:
is_valid = True
response_data = []
valid_data = []
response_errors = []
for item in request_data:
# ensure the id field is present, otherwise nothing can be updated
if not item.get('id'):
is_valid = False
response_errors.append({"id": "This field is required."})
else:
inhib = item.pop('id')
inhibition = Inhibition.objects.filter(id=inhib).first()
if inhibition:
serializer = self.serializer_class(inhibition, data=item, partial=True)
# if this item is valid, temporarily hold it until all items are proven valid, then save all
# if even one item is invalid, none will be saved, and the user will be returned the error(s)
if serializer.is_valid():
valid_data.append(serializer)
else:
is_valid = False
response_errors.append(serializer.errors)
else:
is_valid = False
response_errors.append({"inhibition": "No Inhibition exists with this ID: " + str(inhib)})
if is_valid:
# now that all items are proven valid, save and return them to the user
for item in valid_data:
item.save()
response_data.append(item.data)
return JsonResponse(response_data, safe=False, status=200)
else:
return JsonResponse(response_errors, safe=False, status=400)
# otherwise, if there is a pk, update the instance indicated by the pk
else:
inhibition = Inhibition.objects.filter(id=pk).first()
if inhibition:
serializer = self.serializer_class(inhibition, data=request_data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=200)
else:
return Response(serializer.errors, status=400)
else:
return JsonResponse({"inhibition": "No Inhibition exists with this ID: " + str(pk)}, status=400)
class SampleInhibitionViewSet(HistoryViewSet):
serializer_class = SampleInhibitionSerializer
# override the default queryset to allow filtering by URL arguments
# if sample ID is in query, only search by sample ID and ignore other params
def get_queryset(self):
queryset = Sample.objects.all()
# filter by sample IDs, exact list
sample = self.request.query_params.get('id', None)
if sample is not None:
sample_list = sample.split(',')
queryset = queryset.filter(id__in=sample_list)
# else, search by other params (that don't include sample ID)
else:
# filter by analysis batch ID, exact
analysis_batch = self.request.query_params.get('analysis_batch', None)
if analysis_batch is not None:
queryset = queryset.filter(analysis_batches__in=analysis_batch)
return queryset
class InhibitionCalculateDilutionFactorView(views.APIView):
permission_classes = (permissions.IsAuthenticated,)
def post(self, request):
request_data = JSONParser().parse(request)
ab = request_data.get('analysis_batch', None)
en = request_data.get('extraction_number', None)
na = request_data.get('nucleic_acid_type', None)
eb = ExtractionBatch.objects.filter(analysis_batch=ab, extraction_number=en).first()
if eb:
serializer = InhibitionCalculateDilutionFactorSerializer(data=request_data)
if serializer.is_valid():
is_valid = True
response_data = []
response_errors = []
pos = request_data.get('inh_pos_cq_value', None)
inhibitions = request_data.get('inhibitions', None)
for inhibition in inhibitions:
cq = inhibition.get('cq_value', None)
sample = inhibition.get('sample', None)
inhib = Inhibition.objects.filter(sample=sample, extraction_batch=eb, nucleic_acid_type=na).first()
if inhib:
suggested_dilution_factor = None
diff = abs(pos - cq)
# If INH CONT Cq minus Sample Cq<2 cycles, then dilution factor = 1 (no dilution)
# If INH CONT Cq minus Sample Cq>=2 cycles AND Sample Cq<36, then dilution factor = 5
# If INH CONT Cq minus Sample Cq>2 cycles AND Sample Cq>36 or no Cq, then dilution factor = 10
if not cq:
suggested_dilution_factor = 10
elif 0.0 <= diff < 2.0:
suggested_dilution_factor = 1
elif diff >= 2.0 and cq < 36.0:
suggested_dilution_factor = 5
elif diff > 2.0 and cq > 36.0:
suggested_dilution_factor = 10
new_data = {"id": inhib.id, "sample": sample, "cq_value": cq,
"suggested_dilution_factor": suggested_dilution_factor,
"extraction_batch": eb.id}
response_data.append(new_data)
else:
is_valid = False
message = "No Inhibition exists with Sample ID: " + str(sample)
message += ", Extraction Batch ID: " + str(eb) + ", Nucleic Acid Type ID: " + str(na)
response_errors.append({"inhibition": message})
if is_valid:
return JsonResponse(response_data, safe=False, status=200)
else:
return JsonResponse(response_errors, safe=False, status=400)
return Response(serializer.errors, status=400)
else:
message = "No Extraction Batch exists with Analysis Batch ID: " + str(ab)
message += " and Extraction Number: " + str(en)
return JsonResponse({"extraction_batch": message}, status=400)
class TargetViewSet(HistoryViewSet):
queryset = Target.objects.all()
serializer_class = TargetSerializer
######
#
# Misc
#
######
class FieldUnitViewSet(HistoryViewSet):
queryset = FieldUnit.objects.all()
serializer_class = FieldUnitSerializer
class NucleicAcidTypeViewSet(HistoryViewSet):
queryset = NucleicAcidType.objects.all()
serializer_class = NucleicAcidTypeSerializer
class RecordTypeViewSet(HistoryViewSet):
queryset = RecordType.objects.all()
serializer_class = RecordTypeSerializer
class OtherAnalysisViewSet(HistoryViewSet):
queryset = OtherAnalysis.objects.all()
serializer_class = OtherAnalysisSerializer
######
#
# Users
#
######
class UserViewSet(HistoryViewSet):
serializer_class = UserSerializer
def get_queryset(self):
# do not return the admin and public users
queryset = User.objects.all().exclude(id__in=[1])
# filter by username, exact
username = self.request.query_params.get('username', None)
if username is not None:
queryset = queryset.filter(username__exact=username)
return queryset
class AuthView(views.APIView):
authentication_classes = (CustomBasicAuthentication,)
serializer_class = UserSerializer
def post(self, request):
# remove all sessions to prevent CSRF missing error on subsequent basic auth requests
if request.user:
user_sessions = []
all_sessions = Session.objects.filter(expire_date__gte=timezone.now())
for session in all_sessions:
if str(request.user.id) == session.get_decoded().get('_auth_user_id'):
user_sessions.append(session.pk)
Session.objects.filter(pk__in=user_sessions).delete()
resp = Response(self.serializer_class(request.user).data)
# attempt to remove CSRF and session cookies
resp.delete_cookie('csrftoken')
resp.delete_cookie('sessionid')
return resp
######
#
# Reports
#
######
class QualityControlReportView(views.APIView):
permission_classes = (permissions.IsAuthenticated,)
def post(self, request):
request_data = JSONParser().parse(request)
samples = request_data.get('samples', None)
report_type = ReportType.objects.filter(id=4).first()
status = Status.objects.filter(id=1).first()
report_file = ReportFile.objects.create(
report_type=report_type, status=status, created_by=request.user, modified_by=request.user)
task = generate_quality_control_report.delay(samples, report_file.id, request.user.username)
monitor_task.delay(task.id, datetime.now().strftime('%Y-%m-%d_%H:%M:%S'), report_file.id)
return JsonResponse({"message": "Request for Inhibition Report received."}, status=200)
class ControlsResultsReportView(views.APIView):
permission_classes = (permissions.IsAuthenticated,)
def post(self, request):
request_data = JSONParser().parse(request)
sample_ids = request_data.get('samples', None)
target_ids = request_data.get('targets', None)
report_type = ReportType.objects.filter(id=5).first()
status = Status.objects.filter(id=1).first()
report_file = ReportFile.objects.create(
report_type=report_type, status=status, created_by=request.user, modified_by=request.user)
task = generate_control_results_report.delay(sample_ids, target_ids, report_file.id, request.user.username)
monitor_task.delay(task.id, datetime.now().strftime('%Y-%m-%d_%H:%M:%S'), report_file.id)
return JsonResponse({"message": "Request for Control Results Report received."}, status=200)
class ReportFileViewSet(viewsets.ReadOnlyModelViewSet):
permission_classes = (permissions.IsAuthenticated,)
serializer_class = ReportFileSerializer
def get_queryset(self):
queryset = ReportFile.objects.all()
query_params = self.request.query_params
# filter by report_type, exact list
report_type = query_params.get('report_type', None)
if report_type is not None:
if LIST_DELIMETER in report_type:
report_type_list = report_type.split(LIST_DELIMETER)
queryset = queryset.filter(report_type__in=report_type_list)
else:
queryset = queryset.filter(report_type__exact=report_type)
return queryset
class ReportTypeViewSet(viewsets.ModelViewSet):
permission_classes = (permissions.IsAuthenticated,)
queryset = ReportType.objects.all()
serializer_class = ReportTypeSerializer
class StatusViewSet(viewsets.ModelViewSet):
permission_classes = (permissions.IsAuthenticated,)
queryset = Status.objects.all()
serializer_class = StatusSerializer
|
py | 1a2fbb3f9d4cbb78cfc718b436a869fb005a7f82 | import typing
def format_exc(): pass |
py | 1a2fbdc42aa0037a08ad5b5a9976d8cf322a2c7d |
class AlreadyBoundException(Exception):
"""Raised when either x or y has been bound with the other."""
def __init__(self, *args):
Exception.__init__(self, 'Already Bound', *args)
# Bijection Mapper
class BiMapper():
def __init__(self):
self.x2y = {}
self.y2x = {}
def bind(self, x, y):
"""
Raises
------
AlreadyBoundException:
Raises AlreadyBoundException if x and y have been already bound.
"""
if x in self.x2y or y in self.y2x:
raise AlreadyBoundException()
self.x2y[x] = y
self.y2x[y] = x
def unbind_x(self, x):
if not x in self.x2y: return
y = self.x2y.pop(x)
self.y2x.pop(y)
def unbind_y(self, y):
if not y in self.y2x: return
x = self.y2x.pop(y)
self.x2y.pop(x)
|
py | 1a2fbe136ea46d2b8933a72cc64d2ad3d78dd0e4 | from decimal import Decimal
from . import exchanges
from .user import User
from .utils import log
# TODO this logic isn't scientific in any way, mostly a playground
def determine_limit_price(user: User, symbol: str, purchasing_currency: str) -> Decimal:
# TODO this is binance-specific right now, refactor this out
trading_pair = symbol + purchasing_currency
client = user.binance_client()
# order depth returns the lowest asks and the highest bids
# increasing limits returns lower bids and higher asks
# grab a long-ish order book to get some analytics on the order book
order_book = client.get_order_book(symbol=trading_pair, limit=100)
# price that binance reports is at the bottom of the order book
# looks like they use the bottom of the ask stack to clear market orders (makes sense)
# cannot determine if the orders in the book are market, limit, or other order types.
# I wonder if other exchanges expose that sort of information?
lowest_ask = order_book["asks"][0][0]
highest_bid = order_book["bids"][0][0]
ask_difference = Decimal(highest_bid) - Decimal(lowest_ask)
# TODO can we inspect the low price and determine the volume that was traded at that price point?
last_day_low = low_over_last_day(user, trading_pair)
log.warn(
"price analytics",
symbol=trading_pair,
ask_bid_difference=ask_difference,
ask_bid_percentage_difference=ask_difference / Decimal(lowest_ask) * -100,
last_day_low_difference=100 - (last_day_low / Decimal(lowest_ask) * 100),
bid=highest_bid,
ask=lowest_ask,
last_day_low=last_day_low,
reported_price=exchanges.binance_price_for_symbol(trading_pair),
)
# TODO calculate momentum, or low price over last 24hrs, to determine the ideal drop price
# TODO pull percentage drop attempt from user model
limit_price = min(Decimal(highest_bid), Decimal(lowest_ask) * Decimal(0.97))
limit_price = min(last_day_low, limit_price)
# TODO can we inspect the order book depth here? Or general liquidity for the market?
# what else can we do to improve our purchase strategy?
# TODO add option to use the midpoint, or some other position, of the order book instead of the lowest ask
return limit_price
def low_over_last_day(user: User, trading_pair: str) -> Decimal:
# import datetime
# TODO coinbase option is below, but ran into some issues with it that I can't remember
# candles = coinbase_public_client.get_product_historic_rates(
# product_id="PAXG-USD",
# granularity=60*60,
# start=(datetime.datetime.now() - datetime.timedelta(hours=24)).isoformat(),
# stop=datetime.datetime.now().isoformat()
# )
# min([candle['low'] for candle in candles])
# https://binance-docs.github.io/apidocs/spot/en/#kline-candlestick-data
# the API just returns an ordered array, which is insane
"""
[
1499040000000, // Open time
"0.01634790", // Open
"0.80000000", // High
"0.01575800", // Low
"0.01577100", // Close
"148976.11427815", // Volume
1499644799999, // Close time
"2434.19055334", // Quote asset volume
308, // Number of trades
"1756.87402397", // Taker buy base asset volume
"28.46694368", // Taker buy quote asset volume
"17928899.62484339" // Ignore.
]
"""
candles = user.binance_client().get_klines(symbol=trading_pair, interval="1h")
return Decimal(min([candle[3] for candle in candles]))
|
py | 1a2fc15b74332b0e1ff63e13e6914abfc9334f42 | # Copyright (c) 2017 The Verde Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
Add license notice to every source file if not present
"""
import sys
from argparse import ArgumentParser
from pathlib import Path
from pathspec import PathSpec
PROJECT = "verde"
YEAR = "2017"
NOTICE = f"""
# Copyright (c) {YEAR} The {PROJECT.title()} Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
""".strip()
CHECK_HELP = """
Don't write the files, just return the status. Return code 0 means
nothing would change. Return code 1 means some files lacks the license notice.
"""
def get_gitignore(root):
"""
Return a PathSpec matching gitignore content if present.
This function is a modified version of the one present in Black
(https://github.com/psf/black) available under MIT License.
"""
gitignore = root / ".gitignore"
lines = []
if gitignore.is_file():
with gitignore.open() as gi_file:
lines = gi_file.readlines()
return PathSpec.from_lines("gitwildmatch", lines)
def main():
"""
Add license notice to every source file if not present or just check
"""
# Create option parser
parser = ArgumentParser(
description=" Add license notice to every source file if not present."
)
parser.add_argument(
"--check", action="store_true", dest="check", default=False, help=CHECK_HELP
)
args = parser.parse_args()
gitignore = get_gitignore(Path("."))
python_files = [
path
for path in Path(".").glob("**/*.py")
if not str(path).startswith(".")
if not gitignore.match_file(path)
]
missing_notice_files = []
for pyfile in python_files:
code = pyfile.read_text()
if not code.startswith(NOTICE):
missing_notice_files.append(pyfile)
if args.check:
if missing_notice_files:
print("License notice is missing in some source files! 💔")
for pyfile in missing_notice_files:
print(f" {pyfile}")
sys.exit(1)
else:
print("All source files have the license notice! 🎉")
sys.exit(0)
else:
print("Successfully added license notice to:")
for pyfile in missing_notice_files:
code = pyfile.read_text()
pyfile.write_text("\n".join([NOTICE, code]))
print(f" {pyfile}")
sys.exit(0)
if __name__ == "__main__":
main()
|
py | 1a2fc40ddc164c90b62b6dc082080f9f5f1fd070 | # Copyright 2019, The TensorFlow Federated Authors. #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_federated.python.research.utils import aggregate_fns
def create_weights_delta(input_size=2, hidden_size=5, constant=0):
"""Returns deterministic weights delta for a linear model."""
kernel = constant + tf.reshape(
tf.range(input_size * hidden_size, dtype=tf.float32),
[input_size, hidden_size])
bias = constant + tf.range(hidden_size, dtype=tf.float32)
return collections.OrderedDict([('dense/kernel', kernel),
('dense/bias', bias)])
class ClipNormAggregateFnTest(tf.test.TestCase):
def global_norm(self, value):
return tf.linalg.global_norm(tf.nest.flatten(value))
def test_clip_by_global_norm(self):
clip_norm = 20.0
aggregate_fn = aggregate_fns.build_clip_norm_aggregate_fn(clip_norm)
# Global l2 norms [17.74824, 53.99074].
deltas = [create_weights_delta(), create_weights_delta(constant=10)]
deltas_type = tff.framework.type_from_tensors(deltas[0])
weights = [1., 1.]
@tff.federated_computation(
tff.FederatedType(deltas_type, tff.CLIENTS),
tff.FederatedType(tf.float32, tff.CLIENTS))
def federated_aggregate_test(deltas, weights):
state = tff.federated_value(aggregate_fn.initialize(), tff.SERVER)
return aggregate_fn(state, deltas, weights)
federated_aggregate_test.type_signature.result.check_equivalent_to(
tff.StructType((
tff.FederatedType(
aggregate_fns.ClipNormAggregateState(
clip_norm=tf.float32, max_norm=tf.float32), tff.SERVER),
tff.FederatedType(deltas_type, tff.SERVER),
)))
state, mean = federated_aggregate_test(deltas, weights)
expected_clipped = []
for delta in deltas:
flat = tf.nest.flatten(delta)
clipped, _ = tf.clip_by_global_norm(flat, clip_norm)
expected_clipped.append(tf.nest.pack_sequence_as(delta, clipped))
expected_mean = tf.nest.map_structure(lambda a, b: (a + b) / 2,
*expected_clipped)
self.assertEqual(state.clip_norm, tf.constant(20.0, tf.float32))
self.assertEqual(state.max_norm, tf.constant(53.99074, tf.float32))
tf.nest.map_structure(self.assertAllEqual, expected_mean, mean)
class FixedClipNormProcessTest(tf.test.TestCase):
def test_clip_by_global_norm(self):
clip_norm = 20.0
test_deltas = [create_weights_delta(), create_weights_delta(constant=10)]
update_type = tff.framework.type_from_tensors(test_deltas[0])
aggregate_fn = aggregate_fns.build_fixed_clip_norm_mean_process(
clip_norm=clip_norm, model_update_type=update_type)
self.assertEqual(
aggregate_fn.next.type_signature,
tff.FunctionType(
parameter=(
tff.FederatedType((), tff.SERVER),
tff.FederatedType(update_type, tff.CLIENTS),
tff.FederatedType(tf.float32, tff.CLIENTS),
),
result=collections.OrderedDict(
state=tff.FederatedType((), tff.SERVER),
result=tff.FederatedType(update_type, tff.SERVER),
measurements=tff.FederatedType(
aggregate_fns.NormClippedAggregationMetrics(
max_global_norm=tf.float32, num_clipped=tf.int32),
tff.SERVER)),
))
state = aggregate_fn.initialize()
weights = [1., 1.]
output = aggregate_fn.next(state, test_deltas, weights)
expected_clipped = []
for delta in test_deltas:
clipped, _ = tf.clip_by_global_norm(tf.nest.flatten(delta), clip_norm)
expected_clipped.append(tf.nest.pack_sequence_as(delta, clipped))
expected_mean = tf.nest.map_structure(lambda a, b: (a + b) / 2,
*expected_clipped)
self.assertAllClose(expected_mean, output['result'])
# Global l2 norms [17.74824, 53.99074].
metrics = output['measurements']
self.assertAlmostEqual(metrics.max_global_norm, 53.99074, places=5)
self.assertEqual(metrics.num_clipped, 1)
if __name__ == '__main__':
tf.test.main()
|
py | 1a2fc42564e0d25ea3508aa3521acc4641688c8b | #!/usr/bin/env python3
# Copyright (c) 2018-2021 The Xaya developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test spendability of premine and that P2SH is enforced correctly for it."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import *
from test_framework.util import *
import codecs
PREMINE_VALUE = Decimal ('222222222')
PREMINE_ADDRESS = 'dHNvNaqcD7XPDnoRjAoyfcMpHRi5upJD7p'
PREMINE_PRIVKEYS = ['b69iyynFSWcU54LqXisbbqZ8uTJ7Dawk3V3yhht6ykxgttqMQFjb',
'b3fgAKVQpMj24gbuh6DiXVwCCjCbo1cWiZC2fXgWEU9nXy6sdxD5']
PREMINE_PUBKEYS = [
'03c278d06b977e67b8ea45ef24e3c96a9258c47bc4cce3d0b497b690d672497b6e',
'0221ac9dc97fe12a98374344d08b458a9c2c1df9afb29dd6089b94a3b4dc9ad570',
]
class PremineTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module (self):
self.skip_if_no_wallet ()
def run_test(self):
node = self.nodes[0]
node.importaddress (PREMINE_ADDRESS)
# Find basic data about the genesis coinbase tx.
genesis = node.getblock (node.getblockhash (0), 2)
assert_equal (len (genesis['tx']), 1)
tx = genesis['tx'][0]
txid = tx['hash']
assert_equal (len (tx['vout']), 1)
out = tx['vout'][0]
assert_equal (out['value'], PREMINE_VALUE)
assert_equal (out['scriptPubKey']['address'], PREMINE_ADDRESS)
# Accessing it should work normally (upstream Bitcoin/Namecoin have a
# special check that disallows the genesis coinbase with getrawtransaction,
# as it is not spendable).
node.gettransaction (txid)
assert_equal (node.getrawtransaction (txid, False, genesis['hash']),
tx['hex'])
# The coinbase txout should be in the UTXO set.
utxo = node.gettxout (txid, 0)
assert utxo is not None
# Check balance of node and then import the keys for the premine
# and check again. It should be available as spendable.
assert_equal (node.getbalance (), 0)
for key in PREMINE_PRIVKEYS:
node.importprivkey (key, 'premine')
pubkeys = []
for addr in node.getaddressesbylabel ('premine'):
data = node.getaddressinfo (addr)
if (not data['isscript']) and (not data['iswitness']):
pubkeys.append (data['pubkey'])
assert_equal (set (pubkeys), set (PREMINE_PUBKEYS))
p2sh = node.addmultisigaddress (1, PREMINE_PUBKEYS)
assert_equal (p2sh['address'], PREMINE_ADDRESS)
node.rescanblockchain ()
assert_equal (node.getbalance (), PREMINE_VALUE)
# Construct a raw tx spending the premine.
addr = node.getnewaddress ()
inputs = [{"txid": txid, "vout": 0}]
outputs = {addr: Decimal ('123456')}
rawTx = node.createrawtransaction (inputs, outputs)
# Try to "sign" it by just adding the redeem script, which would have been
# valid before the P2SH softfork. Doing so should fail, which verifies that
# P2SH is enforced right from the start and thus that the premine is safe.
data = node.getaddressinfo (PREMINE_ADDRESS)
redeemScript = data['hex']
# Prepend script size, so that it will correctly push the script hash
# to the stack.
redeemScript = ("%02x" % (len (redeemScript) // 2)) + redeemScript
forgedTx = tx_from_hex (rawTx)
forgedTx.vin[0].scriptSig = codecs.decode (redeemScript, 'hex_codec')
forgedTx = forgedTx.serialize ().hex ()
assert_raises_rpc_error (-26, "not valid",
node.sendrawtransaction, forgedTx, 0)
# Sign and send the raw tx, should succeed.
signed = node.signrawtransactionwithwallet (rawTx)
assert signed['complete']
signedTx = signed['hex']
sendId = node.sendrawtransaction (signedTx, 0)
node.generate (1)
assert_equal (node.gettransaction (sendId)['confirmations'], 1)
if __name__ == '__main__':
PremineTest().main()
|
py | 1a2fc4c1e27cfa944455dd9e15fa7f96ace3d7d8 | from re import search
from setuptools import setup, find_packages
with open("graphql/__init__.py") as init_file:
version = search('__version__ = "(.*)"', init_file.read()).group(1)
with open("README.md") as readme_file:
readme = readme_file.read()
setup(
name="GraphQL-core-next",
version=version,
description="GraphQL-core-next is a Python port of GraphQL.js,"
" the JavaScript reference implementation for GraphQL.",
long_description=readme,
long_description_content_type="text/markdown",
keywords="graphql",
url="https://github.com/graphql-python/graphql-core-next",
author="Christoph Zwerschke",
author_email="[email protected]",
license="MIT license",
# PEP-561: https://www.python.org/dev/peps/pep-0561/
package_data={"graphql": ["py.typed"]},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
install_requires=[],
python_requires=">=3.6",
test_suite="tests",
tests_require=[
"pytest",
"pytest-asyncio",
"pytest-cov",
"pytest-describe",
"black",
"flake8",
"mypy",
"tox",
"python-coveralls",
],
packages=find_packages(include=["graphql"]),
include_package_data=True,
zip_safe=False,
)
|
py | 1a2fc4e313fbc3ebe872bc603357771bf264967c | '''
Flask-Admin has issues when used with Application Factory pattern.
Since I had issues when using it with pytest, I had to move the admin
instance to "register_admin" method.
https://github.com/flask-admin/flask-admin/issues/910
'''
from flask import redirect, url_for, request
from flask_admin import Admin
from flask_admin.menu import MenuLink
from flask_admin.contrib.sqla import ModelView
from flask_login import current_user
from app.extensions import db
from app.auth.models import User, Role, PreAllowedUser
from app.main.models import (
Product, Specification, StockProduct, Stock, Order, OrderItem, Transaction)
class ProtectedModelView(ModelView):
column_display_pk = True
def is_accessible(self):
return (current_user.is_authenticated and
current_user.is_administrator())
def inaccessible_callback(self, name, **kwargs):
return redirect(url_for('auth.login', next=request.url))
def register_admin(app):
admin = Admin(app=app, template_mode='bootstrap3')
admin.add_link(MenuLink(name='Voltar', url=('/')))
admin.add_views(
ProtectedModelView(User, db.session),
ProtectedModelView(Role, db.session),
ProtectedModelView(PreAllowedUser, db.session),
ProtectedModelView(Product, db.session),
ProtectedModelView(Specification, db.session),
ProtectedModelView(StockProduct, db.session),
ProtectedModelView(Stock, db.session),
ProtectedModelView(Order, db.session),
ProtectedModelView(OrderItem, db.session),
ProtectedModelView(Transaction, db.session),
)
|
py | 1a2fc5a8fc13c30248bcafa6f23e14668fd26734 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import sys
from datetime import datetime, timedelta
import operator
import string
from inspect import getargspec
from itertools import product, starmap
from distutils.version import LooseVersion
import nose
from numpy import nan, inf
import numpy as np
import numpy.ma as ma
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, notnull, bdate_range,
date_range, period_range, timedelta_range)
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.period import PeriodIndex
from pandas.tseries.index import Timestamp, DatetimeIndex
from pandas.tseries.tdi import Timedelta, TimedeltaIndex
import pandas.core.common as com
import pandas.core.config as cf
import pandas.lib as lib
import pandas.core.datetools as datetools
import pandas.core.nanops as nanops
from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_almost_equal,
assert_frame_equal,
ensure_clean)
import pandas.util.testing as tm
#------------------------------------------------------------------------------
# Series test cases
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class CheckNameIntegration(object):
_multiprocess_can_split_ = True
def test_scalarop_preserve_name(self):
result = self.ts * 2
self.assertEqual(result.name, self.ts.name)
def test_copy_name(self):
result = self.ts.copy()
self.assertEqual(result.name, self.ts.name)
def test_copy_index_name_checking(self):
# don't want to be able to modify the index stored elsewhere after
# making a copy
self.ts.index.name = None
self.assertIsNone(self.ts.index.name)
self.assertIs(self.ts, self.ts)
cp = self.ts.copy()
cp.index.name = 'foo'
com.pprint_thing(self.ts.index.name)
self.assertIsNone(self.ts.index.name)
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
self.assertEqual(result.name, self.ts.name)
def test_dt_namespace_accessor(self):
# GH 7207
# test .dt namespace accessor
ok_for_base = ['year','month','day','hour','minute','second','weekofyear','week','dayofweek','weekday','dayofyear','quarter','freq']
ok_for_period = ok_for_base + ['qyear']
ok_for_dt = ok_for_base + ['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',
'is_quarter_end', 'is_year_start', 'is_year_end', 'tz']
ok_for_dt_methods = ['to_period','to_pydatetime','tz_localize','tz_convert']
ok_for_td = ['days','seconds','microseconds','nanoseconds']
ok_for_td_methods = ['components','to_pytimedelta']
def get_expected(s, name):
result = getattr(Index(s.values),prop)
if isinstance(result, np.ndarray):
if com.is_integer_dtype(result):
result = result.astype('int64')
elif not com.is_list_like(result):
return result
return Series(result,index=s.index)
def compare(s, name):
a = getattr(s.dt,prop)
b = get_expected(s,prop)
if not (com.is_list_like(a) and com.is_list_like(b)):
self.assertEqual(a,b)
else:
tm.assert_series_equal(a,b)
# invalids
for s in [Series(np.arange(5)),
Series(list('abcde')),
Series(np.random.randn(5))]:
self.assertRaises(TypeError, lambda : s.dt)
# datetimeindex
for s in [Series(date_range('20130101',periods=5)),
Series(date_range('20130101',periods=5,freq='s')),
Series(date_range('20130101 00:00:00',periods=5,freq='ms'))]:
for prop in ok_for_dt:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_dt_methods:
getattr(s.dt,prop)
result = s.dt.to_pydatetime()
self.assertIsInstance(result,np.ndarray)
self.assertTrue(result.dtype == object)
result = s.dt.tz_localize('US/Eastern')
expected = Series(DatetimeIndex(s.values).tz_localize('US/Eastern'),index=s.index)
tm.assert_series_equal(result, expected)
tz_result = result.dt.tz
self.assertEqual(str(tz_result), 'US/Eastern')
freq_result = s.dt.freq
self.assertEqual(freq_result, DatetimeIndex(s.values, freq='infer').freq)
# let's localize, then convert
result = s.dt.tz_localize('UTC').dt.tz_convert('US/Eastern')
expected = Series(DatetimeIndex(s.values).tz_localize('UTC').tz_convert('US/Eastern'),index=s.index)
tm.assert_series_equal(result, expected)
# timedeltaindex
for s in [Series(timedelta_range('1 day',periods=5),index=list('abcde')),
Series(timedelta_range('1 day 01:23:45',periods=5,freq='s')),
Series(timedelta_range('2 days 01:23:45.012345',periods=5,freq='ms'))]:
for prop in ok_for_td:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_td_methods:
getattr(s.dt,prop)
result = s.dt.components
self.assertIsInstance(result,DataFrame)
tm.assert_index_equal(result.index,s.index)
result = s.dt.to_pytimedelta()
self.assertIsInstance(result,np.ndarray)
self.assertTrue(result.dtype == object)
freq_result = s.dt.freq
self.assertEqual(freq_result, TimedeltaIndex(s.values, freq='infer').freq)
# both
index = date_range('20130101',periods=3,freq='D')
s = Series(date_range('20140204',periods=3,freq='s'),index=index)
tm.assert_series_equal(s.dt.year,Series(np.array([2014,2014,2014],dtype='int64'),index=index))
tm.assert_series_equal(s.dt.month,Series(np.array([2,2,2],dtype='int64'),index=index))
tm.assert_series_equal(s.dt.second,Series(np.array([0,1,2],dtype='int64'),index=index))
# periodindex
for s in [Series(period_range('20130101',periods=5,freq='D'))]:
for prop in ok_for_period:
# we test freq below
if prop != 'freq':
compare(s, prop)
freq_result = s.dt.freq
self.assertEqual(freq_result, PeriodIndex(s.values).freq)
# test limited display api
def get_dir(s):
results = [ r for r in s.dt.__dir__() if not r.startswith('_') ]
return list(sorted(set(results)))
s = Series(date_range('20130101',periods=5,freq='D'))
results = get_dir(s)
tm.assert_almost_equal(results,list(sorted(set(ok_for_dt + ok_for_dt_methods))))
s = Series(period_range('20130101',periods=5,freq='D').asobject)
results = get_dir(s)
tm.assert_almost_equal(results,list(sorted(set(ok_for_period))))
# no setting allowed
s = Series(date_range('20130101',periods=5,freq='D'))
with tm.assertRaisesRegexp(ValueError, "modifications"):
s.dt.hour = 5
# trying to set a copy
with pd.option_context('chained_assignment','raise'):
def f():
s.dt.hour[0] = 5
self.assertRaises(com.SettingWithCopyError, f)
def test_valid_dt_with_missing_values(self):
from datetime import date, time
# GH 8689
s = Series(date_range('20130101',periods=5,freq='D'))
s_orig = s.copy()
s.iloc[2] = pd.NaT
for attr in ['microsecond','nanosecond','second','minute','hour','day']:
expected = getattr(s.dt,attr).copy()
expected.iloc[2] = np.nan
result = getattr(s.dt,attr)
tm.assert_series_equal(result, expected)
result = s.dt.date
expected = Series([date(2013,1,1),date(2013,1,2),np.nan,date(2013,1,4),date(2013,1,5)],dtype='object')
tm.assert_series_equal(result, expected)
result = s.dt.time
expected = Series([time(0),time(0),np.nan,time(0),time(0)],dtype='object')
tm.assert_series_equal(result, expected)
def test_dt_accessor_api(self):
# GH 9322
from pandas.tseries.common import (CombinedDatetimelikeProperties,
DatetimeProperties)
self.assertIs(Series.dt, CombinedDatetimelikeProperties)
s = Series(date_range('2000-01-01', periods=3))
self.assertIsInstance(s.dt, DatetimeProperties)
with tm.assertRaisesRegexp(TypeError, "only use .dt accessor"):
Series([1]).dt
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
self.assertEqual(result.name, self.ts.name)
result = self.ts * self.ts[:-2]
self.assertEqual(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
self.assertIsNone(result.name)
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
self.assertEqual(result.name, self.ts.name)
def test_combine_first_dt64(self):
from pandas.tseries.tools import to_datetime
s0 = to_datetime(Series(["2010", np.NaN]))
s1 = to_datetime(Series([np.NaN, "2011"]))
rs = s0.combine_first(s1)
xp = to_datetime(Series(['2010', '2011']))
assert_series_equal(rs, xp)
s0 = to_datetime(Series(["2010", np.NaN]))
s1 = Series([np.NaN, "2011"])
rs = s0.combine_first(s1)
xp = Series([datetime(2010, 1, 1), '2011'])
assert_series_equal(rs, xp)
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result,expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index([25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result,expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i':[0]*3, 'b':[False]*3})
vc = df.i.value_counts()
result = vc.get(99,default='Missing')
self.assertEqual(result,'Missing')
vc = df.b.value_counts()
result = vc.get(False,default='Missing')
self.assertEqual(result,3)
result = vc.get(True,default='Missing')
self.assertEqual(result,'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1,5),index=lrange(1,5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2,5),index=lrange(2,5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64'))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64'))
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
self.assertEqual(result.name, self.ts.name)
result = self.ts[[0, 2, 4]]
self.assertEqual(result.name, self.ts.name)
result = self.ts[5:10]
self.assertEqual(result.name, self.ts.name)
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(lrange(0, len(index)), index=index, name='sth')
expected = ["first second",
"foo one 0",
" two 1",
" three 2",
"bar one 3",
" two 4",
"baz two 5",
" three 6",
"qux one 7",
" two 8",
" three 9",
"Name: sth, dtype: int64"]
expected = "\n".join(expected)
self.assertEqual(repr(s), expected)
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.ix['foo']
self.assertEqual(result.name, s.name)
self.assertEqual(result2.name, s.name)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assertIn("Name: test", repr(s))
s.name = None
self.assertNotIn("Name:", repr(s))
# test big series (diff code path)
s = Series(lrange(0, 1000))
s.name = "test"
self.assertIn("Name: test", repr(s))
s.name = None
self.assertNotIn("Name:", repr(s))
s = Series(index=date_range('20010101', '20020101'), name='test')
self.assertIn("Name: test", repr(s))
def test_pickle_preserve_name(self):
unpickled = self._pickle_roundtrip_name(self.ts)
self.assertEqual(unpickled.name, self.ts.name)
def _pickle_roundtrip_name(self, obj):
with ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
self.assertEqual(result.name, self.ts.name)
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
self.assertEqual(result.name, self.ts.name)
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
self.assertEqual(result.name, self.ts.name)
class TestNanops(tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
s == s2
s2 == s
def test_none_comparison(self):
# bug brought up by #1079
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
self.assertRaises(TypeError, s.__eq__, None)
def test_sum_zero(self):
arr = np.array([])
self.assertEqual(nanops.nansum(arr), 0)
arr = np.empty((10, 0))
self.assertTrue((nanops.nansum(arr, axis=1) == 0).all())
# GH #844
s = Series([], index=[])
self.assertEqual(s.sum(), 0)
df = DataFrame(np.empty((10, 0)))
self.assertTrue((df.sum(1) == 0).all())
def test_nansum_buglet(self):
s = Series([1.0, np.nan], index=[0, 1])
result = np.nansum(s)
assert_almost_equal(result, 1)
def test_overflow(self):
# GH 6915
# overflowing on the smaller int dtypes
for dtype in ['int32','int64']:
v = np.arange(5000000,dtype=dtype)
s = Series(v)
# no bottleneck
result = s.sum(skipna=False)
self.assertEqual(int(result),v.sum(dtype='int64'))
result = s.min(skipna=False)
self.assertEqual(int(result),0)
result = s.max(skipna=False)
self.assertEqual(int(result),v[-1])
# use bottleneck if available
result = s.sum()
self.assertEqual(int(result),v.sum(dtype='int64'))
result = s.min()
self.assertEqual(int(result),0)
result = s.max()
self.assertEqual(int(result),v[-1])
for dtype in ['float32','float64']:
v = np.arange(5000000,dtype=dtype)
s = Series(v)
# no bottleneck
result = s.sum(skipna=False)
self.assertTrue(np.allclose(float(result),v.sum(dtype='float64')))
result = s.min(skipna=False)
self.assertTrue(np.allclose(float(result),0.0))
result = s.max(skipna=False)
self.assertTrue(np.allclose(float(result),v[-1]))
# use bottleneck if available
result = s.sum()
self.assertTrue(np.allclose(float(result),v.sum(dtype='float64')))
result = s.min()
self.assertTrue(np.allclose(float(result),0.0))
result = s.max()
self.assertTrue(np.allclose(float(result),v[-1]))
class SafeForSparse(object):
pass
_ts = tm.makeTimeSeries()
class TestSeries(tm.TestCase, CheckNameIntegration):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.ts = _ts.copy()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty = Series([], index=[])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
self.assertNotIsInstance(scalar, float)
# coercion
self.assertEqual(float(Series([1.])), 1.0)
self.assertEqual(int(Series([1.])), 1)
self.assertEqual(long(Series([1.])), 1)
def test_astype(self):
s = Series(np.random.randn(5),name='foo')
for dtype in ['float32','float64','int64','int32']:
astyped = s.astype(dtype)
self.assertEqual(astyped.dtype, dtype)
self.assertEqual(astyped.name, s.name)
def test_constructor(self):
# Recognize TimeSeries
self.assertTrue(self.ts.is_time_series)
# Pass in Series
derived = Series(self.ts)
self.assertTrue(derived.is_time_series)
self.assertTrue(tm.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEqual(id(self.ts.index), id(derived.index))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assertEqual(mixed.dtype, np.object_)
self.assertIs(mixed[1], np.NaN)
self.assertFalse(self.empty.is_time_series)
self.assertFalse(Series({}).is_time_series)
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
mixed.name = 'Series'
rs = Series(mixed).name
xp = 'Series'
self.assertEqual(rs, xp)
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
self.assertRaises(NotImplementedError, Series, m)
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
assert_series_equal(empty, empty2)
empty = Series(index=lrange(10))
empty2 = Series(np.nan, index=lrange(10))
assert_series_equal(empty, empty2)
def test_constructor_series(self):
index1 = ['d', 'b', 'a', 'c']
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
assert_series_equal(s2, s1.sort_index())
def test_constructor_iterator(self):
expected = Series(list(range(10)),dtype='int64')
result = Series(range(10),dtype='int64')
assert_series_equal(result, expected)
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(lrange(10))
assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=lrange(10, 20))
exp.index = lrange(10, 20)
assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(lrange(10))
assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=lrange(10, 20))
exp.index = lrange(10, 20)
assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'], fastpath=True)
cat.name = 'foo'
res = Series(cat)
self.assertEqual(res.name, cat.name)
self.assertTrue(res.values.equals(cat))
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([nan, nan, nan], dtype=float)
assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0, nan, 2], index=index, dtype=float)
assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([nan, nan, nan], dtype=object)
assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([True, nan, False], index=index, dtype=object)
assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
assert_series_equal(result, expected)
from pandas import tslib
data = ma.masked_all((3,), dtype='M8[ns]')
result = Series(data)
expected = Series([tslib.iNaT, tslib.iNaT, tslib.iNaT], dtype='M8[ns]')
assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([datetime(2001, 1, 1), tslib.iNaT,
datetime(2001, 1, 3)], index=index, dtype='M8[ns]')
assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series([datetime(2001, 1, 1), datetime(2001, 1, 2),
datetime(2001, 1, 3)], index=index, dtype='M8[ns]')
assert_series_equal(result, expected)
def test_constructor_default_index(self):
s = Series([0, 1, 2])
assert_almost_equal(s.index, np.arange(3))
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
tm.assert_isinstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1., 1., 8.]), dtype='i8')
self.assertEqual(s.dtype, np.dtype('i8'))
s = Series(np.array([1., 1., np.nan]), copy=True, dtype='i8')
self.assertEqual(s.dtype, np.dtype('f8'))
def test_constructor_pass_none(self):
s = Series(None, index=lrange(5))
self.assertEqual(s.dtype, np.float64)
s = Series(None, index=lrange(5), dtype=object)
self.assertEqual(s.dtype, np.object_)
# GH 7431
# inference on the index
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
assert_series_equal(s,expected)
def test_constructor_cast(self):
self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float)
def test_constructor_dtype_nocast(self):
# 1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
self.assertEqual(s[1], 5)
def test_constructor_dtype_datetime64(self):
import pandas.tslib as tslib
s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5))
self.assertTrue(isnull(s).all())
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(tslib.iNaT, index=lrange(5))
self.assertFalse(isnull(s).all())
s = Series(nan, dtype='M8[ns]', index=lrange(5))
self.assertTrue(isnull(s).all())
s = Series([datetime(2001, 1, 2, 0, 0), tslib.iNaT], dtype='M8[ns]')
self.assertTrue(isnull(s[1]))
self.assertEqual(s.dtype, 'M8[ns]')
s = Series([datetime(2001, 1, 2, 0, 0), nan], dtype='M8[ns]')
self.assertTrue(isnull(s[1]))
self.assertEqual(s.dtype, 'M8[ns]')
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
self.assertEqual(s.dtype, 'M8[ns]')
s.ix[0] = np.nan
self.assertEqual(s.dtype, 'M8[ns]')
# invalid astypes
for t in ['s', 'D', 'us', 'ms']:
self.assertRaises(TypeError, s.astype, 'M8[%s]' % t)
# GH3414 related
self.assertRaises(TypeError, lambda x: Series(
Series(dates).astype('int') / 1000000, dtype='M8[ms]'))
self.assertRaises(
TypeError, lambda x: Series(dates, dtype='datetime64'))
# invalid dates can be help as object
result = Series([datetime(2,1,1)])
self.assertEqual(result[0], datetime(2,1,1,0,0))
result = Series([datetime(3000,1,1)])
self.assertEqual(result[0], datetime(3000,1,1,0,0))
# don't mix types
result = Series([ Timestamp('20130101'), 1],index=['a','b'])
self.assertEqual(result['a'], Timestamp('20130101'))
self.assertEqual(result['b'], 1)
# GH6529
# coerce datetime64 non-ns properly
dates = date_range('01-Jan-2015', '01-Dec-2015', freq='M')
values2 = dates.view(np.ndarray).astype('datetime64[ns]')
expected = Series(values2, dates)
for dtype in ['s', 'D', 'ms', 'us', 'ns']:
values1 = dates.view(np.ndarray).astype('M8[{0}]'.format(dtype))
result = Series(values1, dates)
assert_series_equal(result,expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()],
dtype=object)
series1 = Series(dates2, dates)
self.assert_numpy_array_equal(series1.values,dates2)
self.assertEqual(series1.dtype,object)
# these will correctly infer a datetime
s = Series([None, pd.NaT, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype,'datetime64[ns]')
s = Series([np.nan, pd.NaT, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype,'datetime64[ns]')
s = Series([pd.NaT, None, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype,'datetime64[ns]')
s = Series([pd.NaT, np.nan, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype,'datetime64[ns]')
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101',periods=3)
self.assertTrue(Series(dr).iloc[0].tz is None)
dr = date_range('20130101',periods=3,tz='UTC')
self.assertTrue(str(Series(dr).iloc[0].tz) == 'UTC')
dr = date_range('20130101',periods=3,tz='US/Eastern')
self.assertTrue(str(Series(dr).iloc[0].tz) == 'US/Eastern')
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range('20130101',periods=5,freq='D')
s = Series(pi)
expected = Series(pi.asobject)
assert_series_equal(s, expected)
def test_constructor_dict(self):
d = {'a': 0., 'b': 1., 'c': 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
pidx = tm.makePeriodIndex(100)
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx)
expected.ix[0] = 0
expected.ix[1] = 1
assert_series_equal(result, expected)
def test_constructor_dict_multiindex(self):
check = lambda result, expected: tm.assert_series_equal(
result, expected, check_dtype=True, check_index_type=True,
check_series_type=True)
d = {('a', 'a'): 0., ('b', 'a'): 1., ('b', 'c'): 2.}
_d = sorted(d.items())
ser = Series(d)
expected = Series([x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d]))
check(ser, expected)
d['z'] = 111.
_d.insert(0, ('z', d['z']))
ser = Series(d)
expected = Series(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False))
ser = ser.reindex(index=expected.index)
check(ser, expected)
def test_constructor_subclass_dict(self):
data = tm.TestSubDict((x, 10.0 * x) for x in range(10))
series = Series(data)
refseries = Series(dict(compat.iteritems(data)))
assert_series_equal(refseries, series)
def test_orderedDict_ctor(self):
# GH3283
import pandas
import random
data = OrderedDict([('col%s' % i, random.random()) for i in range(12)])
s = pandas.Series(data)
self.assertTrue(all(s.values == list(data.values())))
def test_orderedDict_subclass_ctor(self):
# GH3283
import pandas
import random
class A(OrderedDict):
pass
data = A([('col%s' % i, random.random()) for i in range(12)])
s = pandas.Series(data)
self.assertTrue(all(s.values == list(data.values())))
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
self.assertEqual(list(s), data)
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
self.assertEqual(tuple(s), data)
def test_constructor_set(self):
values = set([1, 2, 3, 4, 5])
self.assertRaises(TypeError, Series, values)
values = frozenset(values)
self.assertRaises(TypeError, Series, values)
def test_fromDict(self):
data = {'a': 0, 'b': 1, 'c': 2, 'd': 3}
series = Series(data)
self.assertTrue(tm.is_sorted(series.index))
data = {'a': 0, 'b': '1', 'c': '2', 'd': datetime.now()}
series = Series(data)
self.assertEqual(series.dtype, np.object_)
data = {'a': 0, 'b': '1', 'c': '2', 'd': '3'}
series = Series(data)
self.assertEqual(series.dtype, np.object_)
data = {'a': '0', 'b': '1'}
series = Series(data, dtype=float)
self.assertEqual(series.dtype, np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, setattr, series, 'index', None)
# wrong length
series = self.series.copy()
self.assertRaises(Exception, setattr, series, 'index',
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
tm.assert_isinstance(series.index, Index)
def test_array_finalize(self):
pass
def test_pop(self):
# GH 6600
df = DataFrame({
'A': 0,
'B': np.arange(5,dtype='int64'),
'C': 0,
})
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0,0],index=['A','C'])
assert_series_equal(k, expected)
def test_not_hashable(self):
s_empty = Series()
s = Series([1])
self.assertRaises(TypeError, hash, s_empty)
self.assertRaises(TypeError, hash, s)
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
self.assertEqual(nans.dtype, np.float_)
self.assertEqual(len(nans), len(self.ts))
strings = Series('foo', index=self.ts.index)
self.assertEqual(strings.dtype, np.object_)
self.assertEqual(len(strings), len(self.ts))
d = datetime.now()
dates = Series(d, index=self.ts.index)
self.assertEqual(dates.dtype, 'M8[ns]')
self.assertEqual(len(dates), len(self.ts))
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
with ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
for i in range(len(s)):
result = s.iget(i)
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iget(slice(1, 3))
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iget([0, 2, 3, 4, 5])
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iget(2), 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_numpy_array_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan,index=['C'],dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object)
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=datetools.bday) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
#ts[mask_shifted]
#ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
#ts.ix[mask_shifted]
#ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assert_isinstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(np.isscalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5),index=['A','A','B','C','C'],dtype=np.int64)
expected = Series([3,4],index=['C','C'],dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df>5)
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5],index=[1]))
assert_series_equal(s2,expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5],index=[1]))
assert_series_equal(s2,expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd,
np.array(self.series)[-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
result = s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda : self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
expected = self.series.append(Series([1],index=['foobar']))
assert_series_equal(s,expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan,2,3])
s = Series([1,2,3])
s.iloc[0] = np.nan
assert_series_equal(s,expected)
s = Series([1,2,3])
s.loc[0] = np.nan
assert_series_equal(s,expected)
s = Series([1,2,3])
s[0] = np.nan
assert_series_equal(s,expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s,Series([np.nan]))
s = Series([False,True])
s.loc[0] = np.nan
assert_series_equal(s,Series([np.nan,1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_reshape_non_2d(self):
# GH 4554
x = Series(np.random.random(201), name='x')
self.assertTrue(x.reshape(x.shape,) is x)
# GH 2719
a = Series([1, 2, 3, 4])
result = a.reshape(2, 2)
expected = a.values.reshape(2, 2)
np.testing.assert_array_equal(result, expected)
self.assertTrue(type(result) is type(expected))
def test_reshape_2d_return_array(self):
x = Series(np.random.random(201), name='x')
result = x.reshape((-1, 1))
self.assertNotIsInstance(result, Series)
result2 = np.reshape(x, (-1, 1))
self.assertNotIsInstance(result2, Series)
result = x[:, None]
expected = x.reshape((-1, 1))
assert_almost_equal(result, expected)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert(s.shape == rs.shape)
assert(rs is not s)
# test alignment
cond = Series([True,False,False,True,False],index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(
ValueError, s.__setitem__, tuple([[[True, False]]]), [0, 2, 3])
self.assertRaises(
ValueError, s.__setitem__, tuple([[[True, False]]]), [])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5,4,3,2,1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1,2,3,4])
result = s.where(s>2,np.nan)
expected = Series([np.nan,np.nan,3,4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan,index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0,1,2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0,'b',1,'d','e','f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a','b','c',0,1,'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0,1,2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0,1,2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)),'b','c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [np.resize([True, False, False, False, False], size), # First element should be set
# Set alternating elements]
np.resize([True, False], size),
np.resize([False], size)]: # No element should be set
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item,)]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[i]
for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1,s2])
result = comb.where(comb < 2)
expected = Series([0,1,np.nan,0,1,np.nan],index=[0,1,2,0,1,2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb<1] = 5
expected = Series([5,1,2,5,1,2],index=[0,1,2,0,1,2])
assert_series_equal(comb, expected)
comb[comb<2] += 10
expected = Series([5,11,2,5,11,2],index=[0,1,2,0,1,2])
assert_series_equal(comb, expected)
def test_mask(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond, np.nan)
assert_series_equal(rs, s.mask(~cond))
def test_drop(self):
# unique
s = Series([1,2],index=['one','two'])
expected = Series([1],index=['one'])
result = s.drop(['two'])
assert_series_equal(result,expected)
result = s.drop('two', axis='rows')
assert_series_equal(result,expected)
# non-unique
# GH 5248
s = Series([1,1,2],index=['one','two','one'])
expected = Series([1,2],index=['one','one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result,expected)
result = s.drop('two')
assert_series_equal(result,expected)
expected = Series([1],index=['two'])
result = s.drop(['one'])
assert_series_equal(result,expected)
result = s.drop('one')
assert_series_equal(result,expected)
# single string/tuple-like
s = Series(range(3),index=list('abc'))
self.assertRaises(ValueError, s.drop, 'bc')
self.assertRaises(ValueError, s.drop, ('a',))
# bad axis
self.assertRaises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s = Series([2,3], index=[True, False])
self.assertTrue(s.index.is_object())
result = s.drop(True)
expected = Series([3],index=[False])
assert_series_equal(result,expected)
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEqual(self.series[d1], 4)
self.assertEqual(self.series[d2], 6)
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s>1, 'X')
self.assertFalse(com.is_integer(w[0]))
self.assertTrue(com.is_integer(w[1]))
self.assertTrue(com.is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s>1, ['X', 'Y', 'Z'])
self.assertFalse(com.is_integer(w[0]))
self.assertTrue(com.is_integer(w[1]))
self.assertTrue(com.is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s>1, np.array(['X', 'Y', 'Z']))
self.assertFalse(com.is_integer(w[0]))
self.assertTrue(com.is_integer(w[1]))
self.assertTrue(com.is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.order()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(tm.randn(1000), index=np.arange(1000)))
str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
# with Nones
ots = self.ts.astype('O')
ots[::2] = None
repr(ots)
# various names
for name in ['', 1, 1.2, 'foo', u('\u03B1\u03B2\u03B3'),
'loooooooooooooooooooooooooooooooooooooooooooooooooooong',
('foo', 'bar', 'baz'),
(1, 2),
('foo', 1, 2.3),
(u('\u03B1'), u('\u03B2'), u('\u03B3')),
(u('\u03B1'), 'bar')]:
self.series.name = name
repr(self.series)
biggie = Series(tm.randn(1000), index=np.arange(1000),
name=('foo', 'bar', 'baz'))
repr(biggie)
# 0 as name
ser = Series(np.random.randn(100), name=0)
rep_str = repr(ser)
self.assertIn("Name: 0", rep_str)
# tidy repr
ser = Series(np.random.randn(1001), name=0)
rep_str = repr(ser)
self.assertIn("Name: 0", rep_str)
ser = Series(["a\n\r\tb"], name=["a\n\r\td"], index=["a\n\r\tf"])
self.assertFalse("\t" in repr(ser))
self.assertFalse("\r" in repr(ser))
self.assertFalse("a\n" in repr(ser))
# with empty series (#4651)
s = Series([], dtype=np.int64, name='foo')
self.assertEqual(repr(s), 'Series([], name: foo, dtype: int64)')
s = Series([], dtype=np.int64, name=None)
self.assertEqual(repr(s), 'Series([], dtype: int64)')
def test_tidy_repr(self):
a = Series([u("\u05d0")] * 1000)
a.name = 'title1'
repr(a) # should not raise exception
def test_repr_bool_fails(self):
s = Series([DataFrame(np.random.randn(2, 2)) for i in range(5)])
import sys
buf = StringIO()
tmp = sys.stderr
sys.stderr = buf
try:
# it works (with no Cython exception barf)!
repr(s)
finally:
sys.stderr = tmp
self.assertEqual(buf.getvalue(), '')
def test_repr_name_iterable_indexable(self):
s = Series([1, 2, 3], name=np.int64(3))
# it works!
repr(s)
s.name = (u("\u05d0"),) * 2
repr(s)
def test_repr_should_return_str(self):
# http://docs.python.org/py3k/reference/datamodel.html#object.__repr__
# http://docs.python.org/reference/datamodel.html#object.__repr__
# ...The return value must be a string object.
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = [u("\u03c3"), u("\u03c4"), u("\u03c5"), u("\u03c6")]
df = Series(data, index=index1)
self.assertTrue(type(df.__repr__() == str)) # both py2 / 3
def test_repr_max_rows(self):
# GH 6863
with pd.option_context('max_rows', None):
str(Series(range(1001))) # should not raise exception
def test_unicode_string_with_unicode(self):
df = Series([u("\u05d0")], name=u("\u05d1"))
if compat.PY3:
str(df)
else:
compat.text_type(df)
def test_bytestring_with_unicode(self):
df = Series([u("\u05d0")], name=u("\u05d1"))
if compat.PY3:
bytes(df)
else:
str(df)
def test_timeseries_repr_object_dtype(self):
index = Index([datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)], dtype=object)
ts = Series(np.random.randn(len(index)), index)
repr(ts)
ts = tm.makeTimeSeries(1000)
self.assertTrue(repr(ts).splitlines()[-1].startswith('Freq:'))
ts2 = ts.ix[np.random.randint(0, len(ts) - 1, 400)]
repr(ts2).splitlines()[-1]
def test_timeseries_periodindex(self):
# GH2891
from pandas import period_range
prng = period_range('1/1/2011', '1/1/2012', freq='M')
ts = Series(np.random.randn(len(prng)), prng)
new_ts = self.round_trip_pickle(ts)
self.assertEqual(new_ts.index.freq, 'M')
def test_iter(self):
for i, val in enumerate(self.series):
self.assertEqual(val, self.series[i])
for i, val in enumerate(self.ts):
self.assertEqual(val, self.ts[i])
def test_keys(self):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = self.ts.keys
self.assertIs(getkeys(), self.ts.index)
def test_values(self):
self.assert_numpy_array_equal(self.ts, self.ts.values)
def test_iteritems(self):
for idx, val in compat.iteritems(self.series):
self.assertEqual(val, self.series[idx])
for idx, val in compat.iteritems(self.ts):
self.assertEqual(val, self.ts[idx])
# assert is lazy (genrators don't define reverse, lists do)
self.assertFalse(hasattr(self.series.iteritems(), 'reverse'))
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_sum_inf(self):
import pandas.core.nanops as nanops
s = Series(np.random.randn(10))
s2 = s.copy()
s[5:8] = np.inf
s2[5:8] = np.nan
self.assertTrue(np.isinf(s.sum()))
arr = np.random.randn(100, 100).astype('f4')
arr[:, 2] = np.inf
with cf.option_context("mode.use_inf_as_null", True):
assert_almost_equal(s.sum(), s2.sum())
res = nanops.nansum(arr, axis=1)
self.assertTrue(np.isinf(res).all())
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_median(self):
self._check_stat_op('median', np.median)
# test with integers, test failure
int_ts = Series(np.ones(10, dtype=int), index=lrange(10))
self.assertAlmostEqual(np.median(int_ts), int_ts.median())
def test_mode(self):
s = Series([12, 12, 11, 10, 19, 11])
exp = Series([11, 12])
assert_series_equal(s.mode(), exp)
assert_series_equal(Series([1, 2, 3]).mode(), Series([], dtype='int64'))
lst = [5] * 20 + [1] * 10 + [6] * 25
np.random.shuffle(lst)
s = Series(lst)
assert_series_equal(s.mode(), Series([6]))
s = Series([5] * 10)
assert_series_equal(s.mode(), Series([5]))
s = Series(lst)
s[0] = np.nan
assert_series_equal(s.mode(), Series([6.]))
s = Series(list('adfasbasfwewefwefweeeeasdfasnbam'))
assert_series_equal(s.mode(), Series(['e']))
s = Series(['2011-01-03', '2013-01-02', '1900-05-03'], dtype='M8[ns]')
assert_series_equal(s.mode(), Series([], dtype="M8[ns]"))
s = Series(['2011-01-03', '2013-01-02', '1900-05-03', '2011-01-03',
'2013-01-02'], dtype='M8[ns]')
assert_series_equal(s.mode(), Series(['2011-01-03', '2013-01-02'],
dtype='M8[ns]'))
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_min(self):
self._check_stat_op('min', np.min, check_objects=True)
def test_max(self):
self._check_stat_op('max', np.max, check_objects=True)
def test_var_std(self):
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
result = self.ts.std(ddof=4)
expected = np.std(self.ts.values, ddof=4)
assert_almost_equal(result, expected)
result = self.ts.var(ddof=4)
expected = np.var(self.ts.values, ddof=4)
assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = self.ts.iloc[[0]]
result = s.var(ddof=1)
self.assertTrue(isnull(result))
result = s.std(ddof=1)
self.assertTrue(isnull(result))
def test_sem(self):
alt = lambda x: np.std(x, ddof=1)/np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.ts.sem(ddof=4)
expected = np.std(self.ts.values, ddof=4)/np.sqrt(len(self.ts.values))
assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = self.ts.iloc[[0]]
result = s.sem(ddof=1)
self.assertTrue(isnull(result))
def test_skew(self):
tm._skip_if_no_scipy()
from scipy.stats import skew
alt = lambda x: skew(x, bias=False)
self._check_stat_op('skew', alt)
# test corner cases, skew() returns NaN unless there's at least 3 values
min_N = 3
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
self.assertTrue(np.isnan(s.skew()))
self.assertTrue(np.isnan(df.skew()).all())
else:
self.assertEqual(0, s.skew())
self.assertTrue((df.skew() == 0).all())
def test_kurt(self):
tm._skip_if_no_scipy()
from scipy.stats import kurtosis
alt = lambda x: kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
self.assertAlmostEqual(s.kurt(), s.kurt(level=0)['bar'])
# test corner cases, kurt() returns NaN unless there's at least 4 values
min_N = 4
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
self.assertTrue(np.isnan(s.kurt()))
self.assertTrue(np.isnan(df.kurt()).all())
else:
self.assertEqual(0, s.kurt())
self.assertTrue((df.kurt() == 0).all())
def test_argsort(self):
self._check_accum_op('argsort')
argsorted = self.ts.argsort()
self.assertTrue(issubclass(argsorted.dtype.type, np.integer))
# GH 2967 (introduced bug in 0.11-dev I think)
s = Series([Timestamp('201301%02d' % (i + 1)) for i in range(5)])
self.assertEqual(s.dtype, 'datetime64[ns]')
shifted = s.shift(-1)
self.assertEqual(shifted.dtype, 'datetime64[ns]')
self.assertTrue(isnull(shifted[4]))
result = s.argsort()
expected = Series(lrange(5), dtype='int64')
assert_series_equal(result, expected)
result = shifted.argsort()
expected = Series(lrange(4) + [-1], dtype='int64')
assert_series_equal(result, expected)
def test_argsort_stable(self):
s = Series(np.random.randint(0, 100, size=10000))
mindexer = s.argsort(kind='mergesort')
qindexer = s.argsort()
mexpected = np.argsort(s.values, kind='mergesort')
qexpected = np.argsort(s.values, kind='quicksort')
self.assert_numpy_array_equal(mindexer, mexpected)
self.assert_numpy_array_equal(qindexer, qexpected)
self.assertFalse(np.array_equal(qindexer, mindexer))
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
s = Series(np.arange(6), index=index)
# no change, position
result = s.reorder_levels([0, 1, 2])
assert_series_equal(s, result)
# no change, labels
result = s.reorder_levels(['L0', 'L1', 'L2'])
assert_series_equal(s, result)
# rotate, position
result = s.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = Series(np.arange(6), index=e_idx)
assert_series_equal(result, expected)
result = s.reorder_levels([0, 0, 0])
e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],
labels=[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
names=['L0', 'L0', 'L0'])
expected = Series(range(6), index=e_idx)
assert_series_equal(result, expected)
result = s.reorder_levels(['L0', 'L0', 'L0'])
assert_series_equal(result, expected)
def test_cumsum(self):
self._check_accum_op('cumsum')
def test_cumprod(self):
self._check_accum_op('cumprod')
def test_cummin(self):
self.assert_numpy_array_equal(self.ts.cummin(),
np.minimum.accumulate(np.array(self.ts)))
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.cummin()[1::2]
expected = np.minimum.accumulate(ts.valid())
self.assert_numpy_array_equal(result, expected)
def test_cummax(self):
self.assert_numpy_array_equal(self.ts.cummax(),
np.maximum.accumulate(np.array(self.ts)))
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.cummax()[1::2]
expected = np.maximum.accumulate(ts.valid())
self.assert_numpy_array_equal(result, expected)
def test_cummin_datetime64(self):
s = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-3']))
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-1']))
result = s.cummin(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', '2000-1-2', '2000-1-1', '2000-1-1', '2000-1-1']))
result = s.cummin(skipna=False)
self.assert_series_equal(expected, result)
def test_cummax_datetime64(self):
s = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-3']))
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', 'NaT', '2000-1-2', 'NaT', '2000-1-3']))
result = s.cummax(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-3']))
result = s.cummax(skipna=False)
self.assert_series_equal(expected, result)
def test_cummin_timedelta64(self):
s = pd.Series(pd.to_timedelta(
['NaT', '2 min', 'NaT', '1 min', 'NaT', '3 min', ]))
expected = pd.Series(pd.to_timedelta(
['NaT', '2 min', 'NaT', '1 min', 'NaT', '1 min', ]))
result = s.cummin(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_timedelta(
['NaT', '2 min', '2 min', '1 min', '1 min', '1 min', ]))
result = s.cummin(skipna=False)
self.assert_series_equal(expected, result)
def test_cummax_timedelta64(self):
s = pd.Series(pd.to_timedelta(
['NaT', '2 min', 'NaT', '1 min', 'NaT', '3 min', ]))
expected = pd.Series(pd.to_timedelta(
['NaT', '2 min', 'NaT', '2 min', 'NaT', '3 min', ]))
result = s.cummax(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_timedelta(
['NaT', '2 min', '2 min', '2 min', '2 min', '3 min', ]))
result = s.cummax(skipna=False)
self.assert_series_equal(expected, result)
def test_npdiff(self):
raise nose.SkipTest("skipping due to Series no longer being an "
"ndarray")
# no longer works as the return type of np.diff is now nd.array
s = Series(np.arange(5))
r = np.diff(s)
assert_series_equal(Series([nan, 0, 0, 0, nan]), r)
def _check_stat_op(self, name, alternate, check_objects=False):
import pandas.core.nanops as nanops
def testit():
f = getattr(Series, name)
# add some NaNs
self.series[5:15] = np.NaN
# idxmax, idxmin, min, and max are valid for dates
if name not in ['max','min']:
ds = Series(date_range('1/1/2001', periods=10))
self.assertRaises(TypeError, f, ds)
# skipna or no
self.assertTrue(notnull(f(self.series)))
self.assertTrue(isnull(f(self.series, skipna=False)))
# check the result is correct
nona = self.series.dropna()
assert_almost_equal(f(nona), alternate(nona.values))
assert_almost_equal(f(self.series), alternate(nona.values))
allna = self.series * nan
self.assertTrue(np.isnan(f(allna)))
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
f(s)
# 2888
l = [0]
l.extend(lrange(2 ** 40, 2 ** 40+1000))
s = Series(l, dtype='int64')
assert_almost_equal(float(f(s)), float(alternate(s.values)))
# check date range
if check_objects:
s = Series(bdate_range('1/1/2000', periods=10))
res = f(s)
exp = alternate(s)
self.assertEqual(res, exp)
# Invalid axis.
self.assertRaises(ValueError, f, self.series, axis=1)
# Unimplemented numeric_only parameter.
if 'numeric_only' in getargspec(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
self.series, numeric_only=True)
testit()
try:
import bottleneck as bn
nanops._USE_BOTTLENECK = False
testit()
nanops._USE_BOTTLENECK = True
except ImportError:
pass
def _check_accum_op(self, name):
func = getattr(np, name)
self.assert_numpy_array_equal(func(self.ts), func(np.array(self.ts)))
# with missing values
ts = self.ts.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.valid()))
self.assert_numpy_array_equal(result, expected)
def test_round(self):
# numpy.round doesn't preserve metadata, probably a numpy bug,
# re: GH #314
result = np.round(self.ts, 2)
expected = Series(np.round(self.ts.values, 2), index=self.ts.index)
assert_series_equal(result, expected)
self.assertEqual(result.name, self.ts.name)
def test_prod_numpy16_bug(self):
s = Series([1., 1., 1.], index=lrange(3))
result = s.prod()
self.assertNotIsInstance(result, Series)
def test_quantile(self):
from numpy import percentile
q = self.ts.quantile(0.1)
self.assertEqual(q, percentile(self.ts.valid(), 10))
q = self.ts.quantile(0.9)
self.assertEqual(q, percentile(self.ts.valid(), 90))
# object dtype
q = Series(self.ts,dtype=object).quantile(0.9)
self.assertEqual(q, percentile(self.ts.valid(), 90))
# datetime64[ns] dtype
dts = self.ts.index.to_series()
q = dts.quantile(.2)
self.assertEqual(q, Timestamp('2000-01-10 19:12:00'))
# timedelta64[ns] dtype
tds = dts.diff()
q = tds.quantile(.25)
self.assertEqual(q, pd.to_timedelta('24:00:00'))
# GH7661
result = Series([np.timedelta64('NaT')]).sum()
self.assertTrue(result is pd.NaT)
def test_quantile_multi(self):
from numpy import percentile
qs = [.1, .9]
result = self.ts.quantile(qs)
expected = pd.Series([percentile(self.ts.valid(), 10),
percentile(self.ts.valid(), 90)],
index=qs)
assert_series_equal(result, expected)
dts = self.ts.index.to_series()
result = dts.quantile((.2, .2))
assert_series_equal(result, Series([Timestamp('2000-01-10 19:12:00'),
Timestamp('2000-01-10 19:12:00')],
index=[.2, .2]))
def test_append(self):
appendedSeries = self.series.append(self.objSeries)
for idx, value in compat.iteritems(appendedSeries):
if idx in self.series.index:
self.assertEqual(value, self.series[idx])
elif idx in self.objSeries.index:
self.assertEqual(value, self.objSeries[idx])
else:
self.fail("orphaned index!")
self.assertRaises(ValueError, self.ts.append, self.ts,
verify_integrity=True)
def test_append_many(self):
pieces = [self.ts[:5], self.ts[5:10], self.ts[10:]]
result = pieces[0].append(pieces[1:])
assert_series_equal(result, self.ts)
def test_all_any(self):
ts = tm.makeTimeSeries()
bool_series = ts > 0
self.assertFalse(bool_series.all())
self.assertTrue(bool_series.any())
# Alternative types, with implicit 'object' dtype.
s = Series(['abc', True])
self.assertEqual('abc', s.any()) # 'abc' || True => 'abc'
def test_all_any_params(self):
# Check skipna, with implicit 'object' dtype.
s1 = Series([np.nan, True])
s2 = Series([np.nan, False])
self.assertTrue(s1.all(skipna=False)) # nan && True => True
self.assertTrue(s1.all(skipna=True))
self.assertTrue(np.isnan(s2.any(skipna=False))) # nan || False => nan
self.assertFalse(s2.any(skipna=True))
# Check level.
s = pd.Series([False, False, True, True, False, True],
index=[0, 0, 1, 1, 2, 2])
assert_series_equal(s.all(level=0), Series([False, True, False]))
assert_series_equal(s.any(level=0), Series([False, True, True]))
# bool_only is not implemented with level option.
self.assertRaises(NotImplementedError, s.any, bool_only=True, level=0)
self.assertRaises(NotImplementedError, s.all, bool_only=True, level=0)
# bool_only is not implemented alone.
self.assertRaises(NotImplementedError, s.any, bool_only=True)
self.assertRaises(NotImplementedError, s.all, bool_only=True)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_modulo(self):
# GH3590, modulo as ints
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] % p['second']
expected = Series(p['first'].values %
p['second'].values, dtype='float64')
expected.iloc[0:3] = np.nan
assert_series_equal(result, expected)
result = p['first'] % 0
expected = Series(np.nan, index=p.index)
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] % p['second']
expected = Series(p['first'].values % p['second'].values)
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] % p['second']
result2 = p['second'] % p['first']
self.assertFalse(np.array_equal(result, result2))
# GH 9144
s = Series([0, 1])
result = s % 0
expected = Series([nan, nan])
assert_series_equal(result, expected)
result = 0 % s
expected = Series([nan, 0.0])
assert_series_equal(result, expected)
def test_div(self):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values, dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index)
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'))
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan,1.,-1.])
result = s / 0
expected = Series([np.nan,np.inf,-np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1,0), 'second': (-0.01,-0.02)})
expected = Series([-0.01,-np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_almost_equal(cython_or_numpy, python)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other):
_check_op(series, other, operator.gt)
_check_op(series, other, operator.ge)
_check_op(series, other, operator.eq)
_check_op(series, other, operator.lt)
_check_op(series, other, operator.le)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_constructor_dtype_timedelta64(self):
# basic
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([timedelta(days=1)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([timedelta(days=1),timedelta(days=2),np.timedelta64(1,'s')])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# mixed with NaT
from pandas import tslib
td = Series([timedelta(days=1),tslib.NaT ], dtype='m8[ns]' )
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([timedelta(days=1),np.nan ], dtype='m8[ns]' )
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(300000000), pd.NaT],dtype='m8[ns]')
self.assertEqual(td.dtype, 'timedelta64[ns]')
# improved inference
# GH5689
td = Series([np.timedelta64(300000000), pd.NaT])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(300000000), tslib.iNaT])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(300000000), np.nan])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([pd.NaT, np.timedelta64(300000000)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(1,'s')])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# these are frequency conversion astypes
#for t in ['s', 'D', 'us', 'ms']:
# self.assertRaises(TypeError, td.astype, 'm8[%s]' % t)
# valid astype
td.astype('int64')
# invalid casting
self.assertRaises(TypeError, td.astype, 'int32')
# this is an invalid casting
def f():
Series([timedelta(days=1), 'foo'],dtype='m8[ns]')
self.assertRaises(Exception, f)
# leave as object here
td = Series([timedelta(days=i) for i in range(3)] + ['foo'])
self.assertEqual(td.dtype, 'object')
# these will correctly infer a timedelta
s = Series([None, pd.NaT, '1 Day'])
self.assertEqual(s.dtype,'timedelta64[ns]')
s = Series([np.nan, pd.NaT, '1 Day'])
self.assertEqual(s.dtype,'timedelta64[ns]')
s = Series([pd.NaT, None, '1 Day'])
self.assertEqual(s.dtype,'timedelta64[ns]')
s = Series([pd.NaT, np.nan, '1 Day'])
self.assertEqual(s.dtype,'timedelta64[ns]')
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(
Exception, self.objSeries.__add__, np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(
Exception, self.objSeries.__sub__, np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24, rs.index).astype(
'int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assert_isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
expected = Series(
[Timestamp('20111230'), Timestamp('20120101'), Timestamp('20120103')])
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series([timedelta(days=4017 + i) for i in range(3)])
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5,seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5,seconds=1))
self.assertEqual(rs[2], value)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series(
[Timestamp('20130101 9:01:05'), Timestamp('20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp('20130101 9:01:00.005'), Timestamp('20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp('20130101 9:06:00.005'), Timestamp('20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series(
[Timestamp('20130101 9:01:01'), Timestamp('20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series(
[Timestamp('20130101 9:01:00.005'), Timestamp('20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in [ 'Hour', 'Minute', 'Second', 'Day', 'Micro',
'Milli', 'Nano' ]:
op = getattr(pd.offsets,do)
s + op(5)
op(5) + s
# invalid DateOffsets
for do in [ 'Week', 'BDay', 'BQuarterEnd', 'BMonthEnd', 'BYearEnd',
'BYearBegin','BQuarterBegin', 'BMonthBegin',
'MonthEnd','YearBegin', 'YearEnd',
'MonthBegin', 'QuarterBegin' ]:
op = getattr(pd.offsets,do)
self.assertRaises(TypeError, s.__add__, op(5))
self.assertRaises(TypeError, s.__radd__, op(5))
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) -Series(
[timedelta(seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2,td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) -Series(
[timedelta(seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2,td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result,expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result,expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result,expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result,expected)
for dtype in ['int32','int16','uint32','uint64','uint32','uint16','uint8']:
s2 = Series([20, 30, 40],dtype=dtype)
expected = Series(s1.values.astype(np.int64) * s2.astype(np.int64), dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result,expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result,expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result,expected)
# invalid ops
for op in ['__true_div__','__div__','__mul__']:
sop = getattr(s1,op,None)
if sop is not None:
self.assertRaises(TypeError, sop, s2.astype(float))
self.assertRaises(TypeError, sop, 2.)
for op in ['__add__','__sub__']:
sop = getattr(s1,op,None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D','h','m','s','ms','us','ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m,unit))
result = s1 / np.timedelta64(m,unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(lambda x: np.timedelta64(m,unit) / x)
result = np.timedelta64(m,unit) / s1
# astype
s = Series(date_range('20130101',periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0],datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0],timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'),
Timestamp('20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
npy16_mappings = {'D': 24 * 60 * 60 * 1000000, 'h': 60 * 60 * 1000000,
'm': 60 * 1000000, 's': 1000000, 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s, us, lhs, rhs))
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s,Series(Timedelta('1 days'),index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s,Series([np.nan,Timedelta('1 days')],index=['A','B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'),index=['A','B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
for op in ops:
try:
op = getattr(get_ser, op, None)
if op is not None:
self.assertRaises(TypeError, op, test_ser)
except:
com.pprint_thing("Failed on op %r" % op)
raise
### timedelta64 ###
td1 = Series([timedelta(minutes=5,seconds=3)]*3)
td2 = timedelta(minutes=5,seconds=4)
ops = ['__mul__','__floordiv__','__pow__',
'__rmul__','__rfloordiv__','__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
### datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
### datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
### timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rsub__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0),timedelta(0),pd.NaT])
result = dt2-dt
assert_series_equal(result,expected)
result = (dt2.to_frame()-dt.to_frame())[0]
assert_series_equal(result,expected)
def test_timedelta64_functions(self):
from datetime import timedelta
from pandas import date_range
# index min/max
td = Series(date_range('2012-1-1', periods=3, freq='D')) - \
Timestamp('20120101')
result = td.idxmin()
self.assertEqual(result, 0)
result = td.idxmax()
self.assertEqual(result, 2)
# GH 2982
# with NaT
td[0] = np.nan
result = td.idxmin()
self.assertEqual(result, 1)
result = td.idxmax()
self.assertEqual(result, 2)
# abs
s1 = Series(date_range('20120101', periods=3))
s2 = Series(date_range('20120102', periods=3))
expected = Series(s2 - s1)
# this fails as numpy returns timedelta64[us]
#result = np.abs(s1-s2)
# assert_frame_equal(result,expected)
result = (s1 - s2).abs()
assert_series_equal(result, expected)
# max/min
result = td.max()
expected = Timedelta('2 days')
self.assertEqual(result, expected)
result = td.min()
expected = Timedelta('1 days')
self.assertEqual(result, expected)
def test_ops_consistency_on_empty(self):
# GH 7869
# consistency on empty
# float
result = Series(dtype=float).sum()
self.assertEqual(result,0)
result = Series(dtype=float).mean()
self.assertTrue(isnull(result))
result = Series(dtype=float).median()
self.assertTrue(isnull(result))
# timedelta64[ns]
result = Series(dtype='m8[ns]').sum()
self.assertEqual(result, Timedelta(0))
result = Series(dtype='m8[ns]').mean()
self.assertTrue(result is pd.NaT)
result = Series(dtype='m8[ns]').median()
self.assertTrue(result is pd.NaT)
def test_timedelta_fillna(self):
#GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9*3600+60+1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0),
timedelta(1), timedelta(days=1, seconds=9*3600+60+1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(0),
timedelta(1), timedelta(days=1, seconds=9*3600+60+1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9*3600+60+1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9*3600+60+1)], dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9*3600+60+1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'),
Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
self.assertRaises(TypeError, s.fillna, [1, 2])
self.assertRaises(TypeError, s.fillna, (1, 2))
def test_raise_on_info(self):
s = Series(np.random.randn(10))
with tm.assertRaises(AttributeError):
s.info()
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
# TimeSeries-specific
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
self.assert_numpy_array_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
self.assert_numpy_array_equal(ts.fillna(method='ffill'),
[0., 1., 1., 3., 4.])
self.assert_numpy_array_equal(ts.fillna(method='backfill'),
[0., 1., 3., 3., 4.])
self.assert_numpy_array_equal(ts.fillna(value=5), [0., 1., 5., 3., 4.])
self.assertRaises(ValueError, ts.fillna)
self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result,expected)
result = s1.fillna({})
assert_series_equal(result,s1)
result = s1.fillna(Series(()))
assert_series_equal(result,s1)
result = s2.fillna(s1)
assert_series_equal(result,s2)
result = s1.fillna({ 0 : 1})
assert_series_equal(result,expected)
result = s1.fillna({ 1 : 1})
assert_series_equal(result,Series([np.nan]))
result = s1.fillna({ 0 : 1, 1 : 1})
assert_series_equal(result,expected)
result = s1.fillna(Series({ 0 : 1, 1 : 1}))
assert_series_equal(result,expected)
result = s1.fillna(Series({ 0 : 1, 1 : 1},index=[4,5]))
assert_series_equal(result,s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0,0,2.], list('bac'))
assert_series_equal(result,expected)
# limit
s = Series(np.nan,index=[0,1,2])
result = s.fillna(999,limit=1)
expected = Series([999,np.nan,np.nan],index=[0,1,2])
assert_series_equal(result,expected)
result = s.fillna(999,limit=2)
expected = Series([999,999,np.nan],index=[0,1,2])
assert_series_equal(result,expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self):
try:
self.ts.fillna(method='ffil')
except ValueError as inst:
self.assertIn('ffil', str(inst))
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_sub_of_datetime_from_TimeSeries(self):
from pandas.tseries.timedeltas import to_timedelta
from datetime import datetime
a = Timestamp(datetime(1993, 0o1, 0o7, 13, 30, 00))
b = datetime(1993, 6, 22, 13, 30)
a = Series([a])
result = to_timedelta(np.abs(a - b))
self.assertEqual(result.dtype, 'timedelta64[ns]')
def test_datetime64_with_index(self):
# arithmetic integer ops with an index
s = Series(np.random.randn(5))
expected = s-s.index.to_series()
result = s-s.index
assert_series_equal(result,expected)
# GH 4629
# arithmetic datetime64 ops with an index
s = Series(date_range('20130101',periods=5),index=date_range('20130101',periods=5))
expected = s-s.index.to_series()
result = s-s.index
assert_series_equal(result,expected)
result = s-s.index.to_period()
assert_series_equal(result,expected)
df = DataFrame(np.random.randn(5,2),index=date_range('20130101',periods=5))
df['date'] = Timestamp('20130102')
df['expected'] = df['date'] - df.index.to_series()
df['result'] = df['date'] - df.index
assert_series_equal(df['result'],df['expected'])
def test_timedelta64_nan(self):
from pandas import tslib
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
self.assertTrue(isnull(td1[0]))
self.assertEqual(td1[0].value, tslib.iNaT)
td1[0] = td[0]
self.assertFalse(isnull(td1[0]))
td1[1] = tslib.iNaT
self.assertTrue(isnull(td1[1]))
self.assertEqual(td1[1].value, tslib.iNaT)
td1[1] = td[1]
self.assertFalse(isnull(td1[1]))
td1[2] = tslib.NaT
self.assertTrue(isnull(td1[2]))
self.assertEqual(td1[2].value, tslib.iNaT)
td1[2] = td[2]
self.assertFalse(isnull(td1[2]))
# boolean setting
# this doesn't work, not sure numpy even supports it
#result = td[(td>np.timedelta64(timedelta(days=3))) & (td<np.timedelta64(timedelta(days=7)))] = np.nan
#self.assertEqual(isnull(result).sum(), 7)
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_operators_na_handling(self):
from decimal import Decimal
from datetime import date
s = Series([Decimal('1.3'), Decimal('2.3')],
index=[date(2012, 1, 1), date(2012, 1, 2)])
result = s + s.shift(1)
result2 = s.shift(1) + s
self.assertTrue(isnull(result[0]))
self.assertTrue(isnull(result2[0]))
s = Series(['foo', 'bar', 'baz', np.nan])
result = 'prefix_' + s
expected = Series(['prefix_foo', 'prefix_bar', 'prefix_baz', np.nan])
assert_series_equal(result, expected)
result = s + '_suffix'
expected = Series(['foo_suffix', 'bar_suffix', 'baz_suffix', np.nan])
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s,s2),(s2,s)]:
self.assertRaises(TypeError, lambda : x == y)
self.assertRaises(TypeError, lambda : x != y)
self.assertRaises(TypeError, lambda : x >= y)
self.assertRaises(TypeError, lambda : x > y)
self.assertRaises(TypeError, lambda : x < y)
self.assertRaises(TypeError, lambda : x <= y)
def test_more_na_comparisons(self):
left = Series(['a', np.nan, 'c'])
right = Series(['a', np.nan, 'd'])
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([True, False, False], list('bca'))
result = a & b
assert_series_equal(result,expected)
expected = Series([True, False, True], list('bca'))
result = a | b
assert_series_equal(result,expected)
expected = Series([False, False, True], list('bca'))
result = a ^ b
assert_series_equal(result,expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([True, False, False], list('bca'))
result = a & b
assert_series_equal(result,expected)
expected = Series([True, False, True], list('bca'))
result = a | b
assert_series_equal(result,expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result,expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result,expected)
# vs non-matching
result = a & Series([1],['z'])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result,expected)
result = a | Series([1],['z'])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result,expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]),Series([1],['z']),Series(['z']),Series(np.nan,b.index),Series(np.nan,a.index)]:
result = a[a | e]
assert_series_equal(result,a[a])
# vs scalars
index = list('bca')
t = Series([True,False,True])
for v in [True,1,2]:
result = Series([True,False,True],index=index) | v
expected = Series([True,True,True],index=index)
assert_series_equal(result,expected)
for v in [np.nan,'foo']:
self.assertRaises(TypeError, lambda : t | v)
for v in [False,0]:
result = Series([True,False,True],index=index) | v
expected = Series([True,False,True],index=index)
assert_series_equal(result,expected)
for v in [True,1]:
result = Series([True,False,True],index=index) & v
expected = Series([True,False,True],index=index)
assert_series_equal(result,expected)
for v in [False,0]:
result = Series([True,False,True],index=index) & v
expected = Series([False,False,False],index=index)
assert_series_equal(result,expected)
for v in [np.nan]:
self.assertRaises(TypeError, lambda : t & v)
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
s_0101 = Series([0,1,0,1])
s_0123 = Series(range(4),dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4),dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8),dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1]*4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
self.assertRaises(TypeError, lambda: s_1111 & 'a')
self.assertRaises(TypeError, lambda: s_1111 & ['a','b','c','d'])
self.assertRaises(TypeError, lambda: s_0123 & np.NaN)
self.assertRaises(TypeError, lambda: s_0123 & 3.14)
self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])
# s_0123 will be all false now because of reindexing like s_tft
assert_series_equal(s_tft & s_0123, Series([False] * 3, list('bca')))
# s_tft will be all false now because of reindexing like s_0123
assert_series_equal(s_0123 & s_tft, Series([False] * 4))
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]), Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a','b',np.NaN,'d'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_between(self):
s = Series(bdate_range('1/1/2000', periods=20).asobject)
s[::2] = np.nan
result = s[s.between(s[3], s[17])]
expected = s[3:18].dropna()
assert_series_equal(result, expected)
result = s[s.between(s[3], s[17], inclusive=False)]
expected = s[5:16].dropna()
assert_series_equal(result, expected)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8, 9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_scalar_na_cmp_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
def tester(a, b):
return a & b
self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1))
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True,index=s.index)
expected[::2] = False
assert_series_equal(tester(s, list(s)), expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
self.assertRaises(ValueError, tester, s, d)
def test_idxmin(self):
# test idxmin
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmin()], self.series.min())
self.assertTrue(isnull(self.series.idxmin(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmin()], nona.min())
self.assertEqual(nona.index.values.tolist().index(nona.idxmin()),
nona.values.argmin())
# all NaNs
allna = self.series * nan
self.assertTrue(isnull(allna.idxmin()))
# datetime64[ns]
from pandas import date_range
s = Series(date_range('20130102', periods=6))
result = s.idxmin()
self.assertEqual(result, 0)
s[0] = np.nan
result = s.idxmin()
self.assertEqual(result, 1)
def test_idxmax(self):
# test idxmax
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmax()], self.series.max())
self.assertTrue(isnull(self.series.idxmax(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmax()], nona.max())
self.assertEqual(nona.index.values.tolist().index(nona.idxmax()),
nona.values.argmax())
# all NaNs
allna = self.series * nan
self.assertTrue(isnull(allna.idxmax()))
from pandas import date_range
s = Series(date_range('20130102', periods=6))
result = s.idxmax()
self.assertEqual(result, 5)
s[5] = np.nan
result = s.idxmax()
self.assertEqual(result, 4)
# Float64Index
# GH 5914
s = pd.Series([1,2,3],[1.1,2.1,3.1])
result = s.idxmax()
self.assertEqual(result, 3.1)
result = s.idxmin()
self.assertEqual(result, 1.1)
s = pd.Series(s.index, s.index)
result = s.idxmax()
self.assertEqual(result, 3.1)
result = s.idxmin()
self.assertEqual(result, 1.1)
def test_ndarray_compat(self):
# test numpy compat with Series as sub-class of NDFrame
tsdf = DataFrame(np.random.randn(1000, 3), columns=['A', 'B', 'C'],
index=date_range('1/1/2000', periods=1000))
def f(x):
return x[x.argmax()]
result = tsdf.apply(f)
expected = tsdf.max()
assert_series_equal(result,expected)
# .item()
s = Series([1])
result = s.item()
self.assertEqual(result, 1)
self.assertEqual(s.item(), s.iloc[0])
# using an ndarray like function
s = Series(np.random.randn(10))
result = np.ones_like(s)
expected = Series(1,index=range(10),dtype='float64')
#assert_series_equal(result,expected)
# ravel
s = Series(np.random.randn(10))
tm.assert_almost_equal(s.ravel(order='F'),s.values.ravel(order='F'))
# compress
# GH 6658
s = Series([0,1.,-1],index=list('abc'))
result = np.compress(s>0,s)
assert_series_equal(result, Series([1.],index=['b']))
result = np.compress(s<-1,s)
assert_series_equal(result, Series([],dtype='float64'))
def test_complexx(self):
# GH4819
# complex access for ndarray compat
a = np.arange(5)
b = Series(a + 4j*a)
tm.assert_almost_equal(a,b.real)
tm.assert_almost_equal(4*a,b.imag)
b.real = np.arange(5)+5
tm.assert_almost_equal(a+5,b.real)
tm.assert_almost_equal(4*a,b.imag)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1,2,3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2,2,2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(dict(a = [1,2,3], b = [1,2,3], c = [1,2,3], val = [0,1,0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df,expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment',None)
df = DataFrame({ "aa":range(5), "bb":[2.2]*5})
df["cc"] = 0.0
ck = [True]*len(df)
df["bb"].iloc[0] = .13
df_tmp = df.iloc[ck]
df["bb"].iloc[0] = .15
self.assertEqual(df['bb'].iloc[0], 0.15)
pd.set_option('chained_assignment','raise')
# GH 3217
df = DataFrame(dict(a = [1,3], b = [np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'],index=[0]))
expected = DataFrame(dict(a = [1,3], b = [np.nan, 2], c = ['foo',np.nan]))
tm.assert_frame_equal(df,expected)
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assertTrue(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assertEqual(len(result), 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = self.ts.values[:-5] + int_ts.values
self.assert_numpy_array_equal(added[:-5], expected)
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10),
dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_series_frame_radd_bug(self):
import operator
# GH 353
vals = Series(tm.rands_array(5, 10))
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
assert_series_equal(result, expected)
frame = DataFrame({'vals': vals})
result = 'foo_' + frame
expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
# really raise this time
self.assertRaises(TypeError, operator.add, datetime.now(), self.ts)
def test_operators_frame(self):
# rpow does not work with DataFrame
df = DataFrame({'A': self.ts})
tm.assert_almost_equal(self.ts + self.ts, (self.ts + df)['A'])
tm.assert_almost_equal(self.ts ** self.ts, (self.ts ** df)['A'])
tm.assert_almost_equal(self.ts < self.ts, (self.ts < df)['A'])
tm.assert_almost_equal(self.ts / self.ts, (self.ts / df)['A'])
def test_operators_combine(self):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isnull(a)
bmask = isnull(b)
exp_values = []
for i in range(len(exp_index)):
if amask[i]:
if bmask[i]:
exp_values.append(nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
assert_series_equal(result, expected)
a = Series([nan, 1., 2., 3., nan], index=np.arange(5))
b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6))
pairings = []
for op in ['add', 'sub', 'mul', 'pow', 'truediv', 'floordiv']:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, 'r' + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
if compat.PY3:
pairings.append((Series.div, operator.truediv, 1))
pairings.append((Series.rdiv, lambda x, y: operator.truediv(y, x), 1))
else:
pairings.append((Series.div, operator.div, 1))
pairings.append((Series.rdiv, lambda x, y: operator.div(y, x), 1))
for op, equiv_op, fv in pairings:
result = op(a, b)
exp = equiv_op(a, b)
assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
def test_combine_first(self):
values = tm.makeIntIndex(20).values.astype(float)
series = Series(values, index=tm.makeIntIndex(20))
series_copy = series * 2
series_copy[::2] = np.NaN
# nothing used from the input
combined = series.combine_first(series_copy)
self.assert_numpy_array_equal(combined, series)
# Holes filled from input
combined = series_copy.combine_first(series)
self.assertTrue(np.isfinite(combined).all())
self.assert_numpy_array_equal(combined[::2], series[::2])
self.assert_numpy_array_equal(combined[1::2], series_copy[1::2])
# mixed types
index = tm.makeStringIndex(20)
floats = Series(tm.randn(20), index=index)
strings = Series(tm.makeStringIndex(10), index=index[::2])
combined = strings.combine_first(floats)
tm.assert_dict_equal(strings, combined, compare_keys=False)
tm.assert_dict_equal(floats[1::2], combined, compare_keys=False)
# corner case
s = Series([1., 2, 3], index=[0, 1, 2])
result = s.combine_first(Series([], index=[]))
assert_series_equal(s, result)
def test_update(self):
s = Series([1.5, nan, 3., 4., nan])
s2 = Series([nan, 3.5, nan, 5.])
s.update(s2)
expected = Series([1.5, 3.5, 3., 5., np.nan])
assert_series_equal(s, expected)
# GH 3217
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
# this will fail as long as series is a sub-class of ndarray
# df['c'].update(Series(['foo'],index=[0])) #####
def test_corr(self):
tm._skip_if_no_scipy()
import scipy.stats as stats
# full overlap
self.assertAlmostEqual(self.ts.corr(self.ts), 1)
# partial overlap
self.assertAlmostEqual(self.ts[:15].corr(self.ts[5:]), 1)
self.assertTrue(isnull(self.ts[:15].corr(self.ts[5:], min_periods=12)))
ts1 = self.ts[:15].reindex(self.ts.index)
ts2 = self.ts[5:].reindex(self.ts.index)
self.assertTrue(isnull(ts1.corr(ts2, min_periods=12)))
# No overlap
self.assertTrue(np.isnan(self.ts[::2].corr(self.ts[1::2])))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
self.assertTrue(isnull(cp.corr(cp)))
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
result = A.corr(B)
expected, _ = stats.pearsonr(A, B)
self.assertAlmostEqual(result, expected)
def test_corr_rank(self):
tm._skip_if_no_scipy()
import scipy
import scipy.stats as stats
# kendall and spearman
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
A[-5:] = A[:5]
result = A.corr(B, method='kendall')
expected = stats.kendalltau(A, B)[0]
self.assertAlmostEqual(result, expected)
result = A.corr(B, method='spearman')
expected = stats.spearmanr(A, B)[0]
self.assertAlmostEqual(result, expected)
# these methods got rewritten in 0.8
if scipy.__version__ < LooseVersion('0.9'):
raise nose.SkipTest("skipping corr rank because of scipy version "
"{0}".format(scipy.__version__))
# results from R
A = Series([-0.89926396, 0.94209606, -1.03289164, -0.95445587,
0.76910310, -0.06430576, -2.09704447, 0.40660407,
-0.89926396, 0.94209606])
B = Series([-1.01270225, -0.62210117, -1.56895827, 0.59592943,
-0.01680292, 1.17258718, -1.06009347, -0.10222060,
-0.89076239, 0.89372375])
kexp = 0.4319297
sexp = 0.5853767
self.assertAlmostEqual(A.corr(B, method='kendall'), kexp)
self.assertAlmostEqual(A.corr(B, method='spearman'), sexp)
def test_cov(self):
# full overlap
self.assertAlmostEqual(self.ts.cov(self.ts), self.ts.std() ** 2)
# partial overlap
self.assertAlmostEqual(
self.ts[:15].cov(self.ts[5:]), self.ts[5:15].std() ** 2)
# No overlap
self.assertTrue(np.isnan(self.ts[::2].cov(self.ts[1::2])))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
self.assertTrue(isnull(cp.cov(cp)))
# min_periods
self.assertTrue(isnull(self.ts[:15].cov(self.ts[5:], min_periods=12)))
ts1 = self.ts[:15].reindex(self.ts.index)
ts2 = self.ts[5:].reindex(self.ts.index)
self.assertTrue(isnull(ts1.cov(ts2, min_periods=12)))
def test_copy(self):
ts = self.ts.copy()
ts[::2] = np.NaN
# Did not modify original Series
self.assertFalse(np.isnan(self.ts[0]))
def test_count(self):
self.assertEqual(self.ts.count(), len(self.ts))
self.ts[::2] = np.NaN
self.assertEqual(self.ts.count(), np.isfinite(self.ts).sum())
def test_dtype(self):
self.assertEqual(self.ts.dtype, np.dtype('float64'))
self.assertEqual(self.ts.dtypes, np.dtype('float64'))
self.assertEqual(self.ts.ftype, 'float64:dense')
self.assertEqual(self.ts.ftypes, 'float64:dense')
assert_series_equal(self.ts.get_dtype_counts(),Series(1,['float64']))
assert_series_equal(self.ts.get_ftype_counts(),Series(1,['float64:dense']))
def test_dot(self):
a = Series(np.random.randn(4), index=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(3, 4), index=['1', '2', '3'],
columns=['p', 'q', 'r', 's']).T
result = a.dot(b)
expected = Series(np.dot(a.values, b.values),
index=['1', '2', '3'])
assert_series_equal(result, expected)
# Check index alignment
b2 = b.reindex(index=reversed(b.index))
result = a.dot(b)
assert_series_equal(result, expected)
# Check ndarray argument
result = a.dot(b.values)
self.assertTrue(np.all(result == expected.values))
assert_almost_equal(a.dot(b['2'].values), expected['2'])
# Check series argument
assert_almost_equal(a.dot(b['1']), expected['1'])
assert_almost_equal(a.dot(b2['1']), expected['1'])
self.assertRaises(Exception, a.dot, a.values[:3])
self.assertRaises(ValueError, a.dot, b.T)
def test_value_counts_nunique(self):
# basics.rst doc example
series = Series(np.random.randn(500))
series[20:500] = np.nan
series[10:20] = 5000
result = series.nunique()
self.assertEqual(result, 11)
def test_unique(self):
# 714 also, dtype=float
s = Series([1.2345] * 100)
s[::2] = np.nan
result = s.unique()
self.assertEqual(len(result), 2)
s = Series([1.2345] * 100, dtype='f4')
s[::2] = np.nan
result = s.unique()
self.assertEqual(len(result), 2)
# NAs in object arrays #714
s = Series(['foo'] * 100, dtype='O')
s[::2] = np.nan
result = s.unique()
self.assertEqual(len(result), 2)
# decision about None
s = Series([1, 2, 3, None, None, None], dtype=object)
result = s.unique()
expected = np.array([1, 2, 3, None], dtype=object)
self.assert_numpy_array_equal(result, expected)
def test_dropna_empty(self):
s = Series([])
self.assertEqual(len(s.dropna()), 0)
s.dropna(inplace=True)
self.assertEqual(len(s), 0)
# invalid axis
self.assertRaises(ValueError, s.dropna, axis=1)
def test_axis_alias(self):
s = Series([1, 2, np.nan])
assert_series_equal(s.dropna(axis='rows'), s.dropna(axis='index'))
self.assertEqual(s.dropna().sum('rows'), 3)
self.assertEqual(s._get_axis_number('rows'), 0)
self.assertEqual(s._get_axis_name('rows'), 'index')
def test_drop_duplicates(self):
s = Series([1, 2, 3, 3])
result = s.duplicated()
expected = Series([False, False, False, True])
assert_series_equal(result, expected)
result = s.duplicated(take_last=True)
expected = Series([False, False, True, False])
assert_series_equal(result, expected)
result = s.drop_duplicates()
expected = s[[True, True, True, False]]
assert_series_equal(result, expected)
sc = s.copy()
sc.drop_duplicates(inplace=True)
assert_series_equal(sc, expected)
result = s.drop_duplicates(take_last=True)
expected = s[[True, True, False, True]]
assert_series_equal(result, expected)
sc = s.copy()
sc.drop_duplicates(take_last=True, inplace=True)
assert_series_equal(sc, expected)
def test_sort(self):
ts = self.ts.copy()
ts.sort()
self.assert_numpy_array_equal(ts, self.ts.order())
self.assert_numpy_array_equal(ts.index, self.ts.order().index)
ts.sort(ascending=False)
self.assert_numpy_array_equal(ts, self.ts.order(ascending=False))
self.assert_numpy_array_equal(ts.index,
self.ts.order(ascending=False).index)
# GH 5856/5853
# Series.sort operating on a view
df = DataFrame(np.random.randn(10,4))
s = df.iloc[:,0]
def f():
s.sort()
self.assertRaises(ValueError, f)
# test order/sort inplace
# GH6859
ts1 = self.ts.copy()
ts1.sort(ascending=False)
ts2 = self.ts.copy()
ts2.order(ascending=False,inplace=True)
assert_series_equal(ts1,ts2)
ts1 = self.ts.copy()
ts1 = ts1.sort(ascending=False,inplace=False)
ts2 = self.ts.copy()
ts2 = ts.order(ascending=False)
assert_series_equal(ts1,ts2)
def test_sort_index(self):
import random
rindex = list(self.ts.index)
random.shuffle(rindex)
random_order = self.ts.reindex(rindex)
sorted_series = random_order.sort_index()
assert_series_equal(sorted_series, self.ts)
# descending
sorted_series = random_order.sort_index(ascending=False)
assert_series_equal(sorted_series,
self.ts.reindex(self.ts.index[::-1]))
def test_order(self):
ts = self.ts.copy()
ts[:5] = np.NaN
vals = ts.values
result = ts.order()
self.assertTrue(np.isnan(result[-5:]).all())
self.assert_numpy_array_equal(result[:-5], np.sort(vals[5:]))
result = ts.order(na_position='first')
self.assertTrue(np.isnan(result[:5]).all())
self.assert_numpy_array_equal(result[5:], np.sort(vals[5:]))
# something object-type
ser = Series(['A', 'B'], [1, 2])
# no failure
ser.order()
# ascending=False
ordered = ts.order(ascending=False)
expected = np.sort(ts.valid().values)[::-1]
assert_almost_equal(expected, ordered.valid().values)
ordered = ts.order(ascending=False, na_position='first')
assert_almost_equal(expected, ordered.valid().values)
def test_nsmallest_nlargest(self):
# float, int, datetime64 (use i8), timedelts64 (same),
# object that are numbers, object that are strings
base = [3, 2, 1, 2, 5]
s_list = [
Series(base, dtype='int8'),
Series(base, dtype='int16'),
Series(base, dtype='int32'),
Series(base, dtype='int64'),
Series(base, dtype='float32'),
Series(base, dtype='float64'),
Series(base, dtype='uint8'),
Series(base, dtype='uint16'),
Series(base, dtype='uint32'),
Series(base, dtype='uint64'),
Series(base).astype('timedelta64[ns]'),
Series(pd.to_datetime(['2003', '2002', '2001', '2002', '2005'])),
]
raising = [
Series([3., 2, 1, 2, '5'], dtype='object'),
Series([3., 2, 1, 2, 5], dtype='object'),
# not supported on some archs
# Series([3., 2, 1, 2, 5], dtype='complex256'),
Series([3., 2, 1, 2, 5], dtype='complex128'),
]
for r in raising:
dt = r.dtype
msg = "Cannot use method 'n(larg|small)est' with dtype %s" % dt
args = 2, len(r), 0, -1
methods = r.nlargest, r.nsmallest
for method, arg in product(methods, args):
with tm.assertRaisesRegexp(TypeError, msg):
method(arg)
for s in s_list:
assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]])
assert_series_equal(s.nsmallest(2, take_last=True), s.iloc[[2, 3]])
assert_series_equal(s.nlargest(3), s.iloc[[4, 0, 1]])
assert_series_equal(s.nlargest(3, take_last=True),
s.iloc[[4, 0, 3]])
empty = s.iloc[0:0]
assert_series_equal(s.nsmallest(0), empty)
assert_series_equal(s.nsmallest(-1), empty)
assert_series_equal(s.nlargest(0), empty)
assert_series_equal(s.nlargest(-1), empty)
assert_series_equal(s.nsmallest(len(s)), s.order())
assert_series_equal(s.nsmallest(len(s) + 1), s.order())
assert_series_equal(s.nlargest(len(s)), s.iloc[[4, 0, 1, 3, 2]])
assert_series_equal(s.nlargest(len(s) + 1),
s.iloc[[4, 0, 1, 3, 2]])
s = Series([3., np.nan, 1, 2, 5])
assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]])
assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]])
def test_rank(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
self.ts[::2] = np.nan
self.ts[:10][::3] = 4.
ranks = self.ts.rank()
oranks = self.ts.astype('O').rank()
assert_series_equal(ranks, oranks)
mask = np.isnan(self.ts)
filled = self.ts.fillna(np.inf)
# rankdata returns a ndarray
exp = Series(rankdata(filled),index=filled.index)
exp[mask] = np.nan
assert_almost_equal(ranks, exp)
iseries = Series(np.arange(5).repeat(2))
iranks = iseries.rank()
exp = iseries.astype(float).rank()
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1.0
exp = iseries / 5.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.repeat(1, 100))
exp = Series(np.repeat(0.505, 100))
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries[1] = np.nan
exp = Series(np.repeat(50.0 / 99.0, 100))
exp[1] = np.nan
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1.0
iseries[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.repeat(np.nan, 100))
exp = iseries.copy()
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1
iseries[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
rng = date_range('1/1/1990', periods=5)
iseries = Series(np.arange(5), rng) + 1
iseries.ix[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20+1e-30, 1e-1])
exp = Series([2, 1, 3.5, 5, 3.5, 6])
iranks = iseries.rank()
assert_series_equal(iranks, exp)
values = np.array([-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40], dtype='float64')
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(random_order + 1.0, dtype='float64')
iranks = iseries.rank()
assert_series_equal(iranks, exp)
def test_rank_inf(self):
raise nose.SkipTest('DataFrame.rank does not currently rank np.inf and -np.inf properly')
values = np.array([-np.inf, -50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40, np.inf], dtype='float64')
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(random_order + 1.0, dtype='float64')
iranks = iseries.rank()
assert_series_equal(iranks, exp)
def test_from_csv(self):
with ensure_clean() as path:
self.ts.to_csv(path)
ts = Series.from_csv(path)
assert_series_equal(self.ts, ts)
self.assertTrue(ts.index.name is None)
self.series.to_csv(path)
series = Series.from_csv(path)
self.assertIsNone(series.name)
self.assertIsNone(series.index.name)
assert_series_equal(self.series, series)
outfile = open(path, 'w')
outfile.write('1998-01-01|1.0\n1999-01-01|2.0')
outfile.close()
series = Series.from_csv(path, sep='|')
checkseries = Series(
{datetime(1998, 1, 1): 1.0, datetime(1999, 1, 1): 2.0})
assert_series_equal(checkseries, series)
series = Series.from_csv(path, sep='|', parse_dates=False)
checkseries = Series({'1998-01-01': 1.0, '1999-01-01': 2.0})
assert_series_equal(checkseries, series)
def test_to_csv(self):
import io
with ensure_clean() as path:
self.ts.to_csv(path)
lines = io.open(path, newline=None).readlines()
assert(lines[1] != '\n')
self.ts.to_csv(path, index=False)
arr = np.loadtxt(path)
assert_almost_equal(arr, self.ts.values)
def test_to_csv_unicode_index(self):
buf = StringIO()
s = Series([u("\u05d0"), "d2"], index=[u("\u05d0"), u("\u05d1")])
s.to_csv(buf, encoding='UTF-8')
buf.seek(0)
s2 = Series.from_csv(buf, index_col=0, encoding='UTF-8')
assert_series_equal(s, s2)
def test_tolist(self):
rs = self.ts.tolist()
xp = self.ts.values.tolist()
assert_almost_equal(rs, xp)
# datetime64
s = Series(self.ts.index)
rs = s.tolist()
self.assertEqual(self.ts.index[0], rs[0])
def test_to_frame(self):
self.ts.name = None
rs = self.ts.to_frame()
xp = pd.DataFrame(self.ts.values, index=self.ts.index)
assert_frame_equal(rs, xp)
self.ts.name = 'testname'
rs = self.ts.to_frame()
xp = pd.DataFrame(dict(testname=self.ts.values), index=self.ts.index)
assert_frame_equal(rs, xp)
rs = self.ts.to_frame(name='testdifferent')
xp = pd.DataFrame(dict(testdifferent=self.ts.values), index=self.ts.index)
assert_frame_equal(rs, xp)
def test_to_dict(self):
self.assert_numpy_array_equal(Series(self.ts.to_dict()), self.ts)
def test_to_csv_float_format(self):
with ensure_clean() as filename:
ser = Series([0.123456, 0.234567, 0.567567])
ser.to_csv(filename, float_format='%.2f')
rs = Series.from_csv(filename)
xp = Series([0.12, 0.23, 0.57])
assert_series_equal(rs, xp)
def test_to_csv_list_entries(self):
s = Series(['jack and jill', 'jesse and frank'])
split = s.str.split(r'\s+and\s+')
buf = StringIO()
split.to_csv(buf)
def test_to_csv_path_is_none(self):
# GH 8215
# Series.to_csv() was returning None, inconsistent with
# DataFrame.to_csv() which returned string
s = Series([1, 2, 3])
csv_str = s.to_csv(path=None)
self.assertIsInstance(csv_str, str)
def test_clip(self):
val = self.ts.median()
self.assertEqual(self.ts.clip_lower(val).min(), val)
self.assertEqual(self.ts.clip_upper(val).max(), val)
self.assertEqual(self.ts.clip(lower=val).min(), val)
self.assertEqual(self.ts.clip(upper=val).max(), val)
result = self.ts.clip(-0.5, 0.5)
expected = np.clip(self.ts, -0.5, 0.5)
assert_series_equal(result, expected)
tm.assert_isinstance(expected, Series)
def test_clip_types_and_nulls(self):
sers = [Series([np.nan, 1.0, 2.0, 3.0]),
Series([None, 'a', 'b', 'c']),
Series(pd.to_datetime([np.nan, 1, 2, 3], unit='D'))]
for s in sers:
thresh = s[2]
l = s.clip_lower(thresh)
u = s.clip_upper(thresh)
self.assertEqual(l[notnull(l)].min(), thresh)
self.assertEqual(u[notnull(u)].max(), thresh)
self.assertEqual(list(isnull(s)), list(isnull(l)))
self.assertEqual(list(isnull(s)), list(isnull(u)))
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
self.assertEqual(len(result), ts.count())
tm.assert_dict_equal(result, ts, compare_keys=False)
def test_isnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(
ser.isnull(), Series([False, False, False, True, False]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.isnull(), Series([False, False, True]).values)
def test_notnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(
ser.notnull(), Series([True, True, True, False, True]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.notnull(), Series([True, True, False]).values)
def test_shift(self):
shifted = self.ts.shift(1)
unshifted = shifted.shift(-1)
tm.assert_dict_equal(unshifted.valid(), self.ts, compare_keys=False)
offset = datetools.bday
shifted = self.ts.shift(1, freq=offset)
unshifted = shifted.shift(-1, freq=offset)
assert_series_equal(unshifted, self.ts)
unshifted = self.ts.shift(0, freq=offset)
assert_series_equal(unshifted, self.ts)
shifted = self.ts.shift(1, freq='B')
unshifted = shifted.shift(-1, freq='B')
assert_series_equal(unshifted, self.ts)
# corner case
unshifted = self.ts.shift(0)
assert_series_equal(unshifted, self.ts)
# Shifting with PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_dict_equal(unshifted.valid(), ps, compare_keys=False)
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, datetools.bday)
assert_series_equal(shifted2, shifted3)
assert_series_equal(ps, shifted2.shift(-1, 'B'))
self.assertRaises(ValueError, ps.shift, freq='D')
# legacy support
shifted4 = ps.shift(1, timeRule='B')
assert_series_equal(shifted2, shifted4)
shifted5 = ps.shift(1, offset=datetools.bday)
assert_series_equal(shifted5, shifted4)
# 32-bit taking
# GH 8129
index=date_range('2000-01-01',periods=5)
for dtype in ['int32','int64']:
s1 = Series(np.arange(5,dtype=dtype),index=index)
p = s1.iloc[1]
result = s1.shift(periods=p)
expected = Series([np.nan,0,1,2,3],index=index)
assert_series_equal(result,expected)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_series_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=datetools.bday)
assert_series_equal(shifted, shifted3)
self.assertRaises(ValueError, ps.tshift, freq='M')
# DatetimeIndex
shifted = self.ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(self.ts, unshifted)
shifted2 = self.ts.tshift(freq=self.ts.index.freq)
assert_series_equal(shifted, shifted2)
inferred_ts = Series(self.ts.values, Index(np.asarray(self.ts.index)))
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(shifted, self.ts.tshift(1))
assert_series_equal(unshifted, inferred_ts)
no_freq = self.ts[[0, 5, 7]]
self.assertRaises(ValueError, no_freq.tshift)
def test_shift_int(self):
ts = self.ts.astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
assert_series_equal(shifted, expected)
def test_truncate(self):
offset = datetools.bday
ts = self.ts[::3]
start, end = self.ts.index[3], self.ts.index[6]
start_missing, end_missing = self.ts.index[2], self.ts.index[7]
# neither specified
truncated = ts.truncate()
assert_series_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_series_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_series_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_series_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_series_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_series_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_series_equal(truncated, expected)
# corner case, empty series returned
truncated = ts.truncate(after=self.ts.index[0] - offset)
assert(len(truncated) == 0)
truncated = ts.truncate(before=self.ts.index[-1] + offset)
assert(len(truncated) == 0)
self.assertRaises(ValueError, ts.truncate,
before=self.ts.index[-1] + offset,
after=self.ts.index[0] - offset)
def test_ptp(self):
N = 1000
arr = np.random.randn(N)
ser = Series(arr)
self.assertEqual(np.ptp(ser), np.ptp(arr))
def test_asof(self):
# array or list or dates
N = 50
rng = date_range('1/1/1990', periods=N, freq='53s')
ts = Series(np.random.randn(N), index=rng)
ts[15:30] = np.nan
dates = date_range('1/1/1990', periods=N * 3, freq='25s')
result = ts.asof(dates)
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
self.assertTrue((rs == ts[lb]).all())
val = result[result.index[result.index >= ub][0]]
self.assertEqual(ts[ub], val)
self.ts[5:10] = np.NaN
self.ts[15:20] = np.NaN
val1 = self.ts.asof(self.ts.index[7])
val2 = self.ts.asof(self.ts.index[19])
self.assertEqual(val1, self.ts[4])
self.assertEqual(val2, self.ts[14])
# accepts strings
val1 = self.ts.asof(str(self.ts.index[7]))
self.assertEqual(val1, self.ts[4])
# in there
self.assertEqual(self.ts.asof(self.ts.index[3]), self.ts[3])
# no as of value
d = self.ts.index[0] - datetools.bday
self.assertTrue(np.isnan(self.ts.asof(d)))
def test_getitem_setitem_datetimeindex(self):
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04:00:00"]
expected = ts[4]
self.assertEqual(result, expected)
result = ts.copy()
result["1990-01-01 04:00:00"] = 0
result["1990-01-01 04:00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts["1990-01-01 04:00:00":"1990-01-01 07:00:00"]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = 0
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = ts[4:8]
assert_series_equal(result, ts)
lb = "1990-01-01 04:00:00"
rb = "1990-01-01 07:00:00"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
# repeat all the above with naive datetimes
result = ts[datetime(1990, 1, 1, 4)]
expected = ts[4]
self.assertEqual(result, expected)
result = ts.copy()
result[datetime(1990, 1, 1, 4)] = 0
result[datetime(1990, 1, 1, 4)] = ts[4]
assert_series_equal(result, ts)
result = ts[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = 0
result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = ts[4:8]
assert_series_equal(result, ts)
lb = datetime(1990, 1, 1, 4)
rb = datetime(1990, 1, 1, 7)
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts[ts.index[4]]
expected = ts[4]
self.assertEqual(result, expected)
result = ts[ts.index[4:8]]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result[4:8] = ts[4:8]
assert_series_equal(result, ts)
# also test partial date slicing
result = ts["1990-01-02"]
expected = ts[24:48]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-02"] = 0
result["1990-01-02"] = ts[24:48]
assert_series_equal(result, ts)
def test_getitem_setitem_datetime_tz_pytz(self):
tm._skip_if_no_pytz();
from pytz import timezone as tz
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
# comparison dates with datetime MUST be localized!
date = tz('US/Central').localize(datetime(1990, 1, 1, 3))
result[date] = 0
result[date] = ts[4]
assert_series_equal(result, ts)
def test_getitem_setitem_datetime_tz_dateutil(self):
tm._skip_if_no_dateutil();
from dateutil.tz import tzutc
from dateutil.zoneinfo import gettz
tz = lambda x: tzutc() if x == 'UTC' else gettz(x) # handle special case for utc in dateutil
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result[datetime(1990, 1, 1, 3, tzinfo=tz('US/Central'))] = 0
result[datetime(1990, 1, 1, 3, tzinfo=tz('US/Central'))] = ts[4]
assert_series_equal(result, ts)
def test_getitem_setitem_periodindex(self):
from pandas import period_range
N = 50
rng = period_range('1/1/1990', periods=N, freq='H')
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04"]
expected = ts[4]
self.assertEqual(result, expected)
result = ts.copy()
result["1990-01-01 04"] = 0
result["1990-01-01 04"] = ts[4]
assert_series_equal(result, ts)
result = ts["1990-01-01 04":"1990-01-01 07"]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04":"1990-01-01 07"] = 0
result["1990-01-01 04":"1990-01-01 07"] = ts[4:8]
assert_series_equal(result, ts)
lb = "1990-01-01 04"
rb = "1990-01-01 07"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
# GH 2782
result = ts[ts.index[4]]
expected = ts[4]
self.assertEqual(result, expected)
result = ts[ts.index[4:8]]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result[4:8] = ts[4:8]
assert_series_equal(result, ts)
def test_asof_periodindex(self):
from pandas import period_range, PeriodIndex
# array or list or dates
N = 50
rng = period_range('1/1/1990', periods=N, freq='H')
ts = Series(np.random.randn(N), index=rng)
ts[15:30] = np.nan
dates = date_range('1/1/1990', periods=N * 3, freq='37min')
result = ts.asof(dates)
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
pix = PeriodIndex(result.index.values, freq='H')
mask = (pix >= lb) & (pix < ub)
rs = result[mask]
self.assertTrue((rs == ts[lb]).all())
ts[5:10] = np.NaN
ts[15:20] = np.NaN
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
self.assertEqual(val1, ts[4])
self.assertEqual(val2, ts[14])
# accepts strings
val1 = ts.asof(str(ts.index[7]))
self.assertEqual(val1, ts[4])
# in there
self.assertEqual(ts.asof(ts.index[3]), ts[3])
# no as of value
d = ts.index[0].to_timestamp() - datetools.bday
self.assertTrue(np.isnan(ts.asof(d)))
def test_asof_more(self):
from pandas import date_range
s = Series([nan, nan, 1, 2, nan, nan, 3, 4, 5],
index=date_range('1/1/2000', periods=9))
dates = s.index[[4, 5, 6, 2, 1]]
result = s.asof(dates)
expected = Series([2, 2, 3, 1, np.nan], index=dates)
assert_series_equal(result, expected)
s = Series([1.5, 2.5, 1, 2, nan, nan, 3, 4, 5],
index=date_range('1/1/2000', periods=9))
result = s.asof(s.index[0])
self.assertEqual(result, s[0])
def test_cast_on_putmask(self):
# GH 2746
# need to upcast
s = Series([1, 2], index=[1, 2], dtype='int64')
s[[True, False]] = Series([0], index=[1], dtype='int64')
expected = Series([0, 2], index=[1, 2], dtype='int64')
assert_series_equal(s, expected)
def test_type_promote_putmask(self):
# GH8387: test that changing types does not break alignment
ts = Series(np.random.randn(100), index=np.arange(100,0,-1)).round(5)
left, mask = ts.copy(), ts > 0
right = ts[mask].copy().map(str)
left[mask] = right
assert_series_equal(left, ts.map(lambda t: str(t) if t > 0 else t))
s = Series([0, 1, 2, 0 ])
mask = s > 0
s2 = s[ mask ].map( str )
s[mask] = s2
assert_series_equal(s, Series([0, '1', '2', 0]))
s = Series([0, 'foo', 'bar', 0 ])
mask = Series([False, True, True, False])
s2 = s[ mask ]
s[mask] = s2
assert_series_equal(s, Series([0, 'foo','bar', 0]))
def test_astype_cast_nan_int(self):
df = Series([1.0, 2.0, 3.0, np.nan])
self.assertRaises(ValueError, df.astype, np.int64)
def test_astype_cast_object_int(self):
arr = Series(["car", "house", "tree", "1"])
self.assertRaises(ValueError, arr.astype, int)
self.assertRaises(ValueError, arr.astype, np.int64)
self.assertRaises(ValueError, arr.astype, np.int8)
arr = Series(['1', '2', '3', '4'], dtype=object)
result = arr.astype(int)
self.assert_numpy_array_equal(result, np.arange(1, 5))
def test_astype_datetimes(self):
import pandas.tslib as tslib
s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5))
s = s.astype('O')
self.assertEqual(s.dtype, np.object_)
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype('O')
self.assertEqual(s.dtype, np.object_)
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
self.assertEqual(s.dtype, 'M8[ns]')
s = s.astype('O')
self.assertEqual(s.dtype, np.object_)
def test_astype_str(self):
# GH4405
digits = string.digits
s1 = Series([digits * 10, tm.rands(63), tm.rands(64),
tm.rands(1000)])
s2 = Series([digits * 10, tm.rands(63), tm.rands(64), nan, 1.0])
types = (compat.text_type, np.str_)
for typ in types:
for s in (s1, s2):
res = s.astype(typ)
expec = s.map(compat.text_type)
assert_series_equal(res, expec)
def test_astype_unicode(self):
# GH7758
# a bit of magic is required to set default encoding encoding to utf-8
digits = string.digits
test_series = [
Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([u('データーサイエンス、お前はもう死んでいる')]),
]
former_encoding = None
if not compat.PY3:
# in python we can force the default encoding
# for this test
former_encoding = sys.getdefaultencoding()
reload(sys)
sys.setdefaultencoding("utf-8")
if sys.getdefaultencoding() == "utf-8":
test_series.append(Series([u('野菜食べないとやばい').encode("utf-8")]))
for s in test_series:
res = s.astype("unicode")
expec = s.map(compat.text_type)
assert_series_equal(res, expec)
# restore the former encoding
if former_encoding is not None and former_encoding != "utf-8":
reload(sys)
sys.setdefaultencoding(former_encoding)
def test_map(self):
index, data = tm.getMixedTypeDict()
source = Series(data['B'], index=data['C'])
target = Series(data['C'][:4], index=data['D'][:4])
merged = target.map(source)
for k, v in compat.iteritems(merged):
self.assertEqual(v, source[target[k]])
# input could be a dict
merged = target.map(source.to_dict())
for k, v in compat.iteritems(merged):
self.assertEqual(v, source[target[k]])
# function
result = self.ts.map(lambda x: x * 2)
self.assert_numpy_array_equal(result, self.ts * 2)
def test_map_compat(self):
# related GH 8024
s = Series([True,True,False],index=[1,2,3])
result = s.map({ True : 'foo', False : 'bar' })
expected = Series(['foo','foo','bar'],index=[1,2,3])
assert_series_equal(result,expected)
def test_map_int(self):
left = Series({'a': 1., 'b': 2., 'c': 3., 'd': 4})
right = Series({1: 11, 2: 22, 3: 33})
self.assertEqual(left.dtype, np.float_)
self.assertTrue(issubclass(right.dtype.type, np.integer))
merged = left.map(right)
self.assertEqual(merged.dtype, np.float_)
self.assertTrue(isnull(merged['d']))
self.assertTrue(not isnull(merged['c']))
def test_map_type_inference(self):
s = Series(lrange(3))
s2 = s.map(lambda x: np.where(x == 0, 0, 1))
self.assertTrue(issubclass(s2.dtype.type, np.integer))
def test_map_decimal(self):
from decimal import Decimal
result = self.series.map(lambda x: Decimal(str(x)))
self.assertEqual(result.dtype, np.object_)
tm.assert_isinstance(result[0], Decimal)
def test_map_na_exclusion(self):
s = Series([1.5, np.nan, 3, np.nan, 5])
result = s.map(lambda x: x * 2, na_action='ignore')
exp = s * 2
assert_series_equal(result, exp)
def test_map_dict_with_tuple_keys(self):
'''
Due to new MultiIndex-ing behaviour in v0.14.0,
dicts with tuple keys passed to map were being
converted to a multi-index, preventing tuple values
from being mapped properly.
'''
df = pd.DataFrame({'a': [(1,), (2,), (3, 4), (5, 6)]})
label_mappings = {
(1,): 'A',
(2,): 'B',
(3, 4): 'A',
(5, 6): 'B'
}
df['labels'] = df['a'].map(label_mappings)
df['expected_labels'] = pd.Series(['A', 'B', 'A', 'B'], index=df.index)
# All labels should be filled now
tm.assert_series_equal(df['labels'], df['expected_labels'])
def test_apply(self):
assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts))
# elementwise-apply
import math
assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts))
# how to handle Series result, #2316
result = self.ts.apply(lambda x: Series([x, x ** 2],
index=['x', 'x^2']))
expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2})
tm.assert_frame_equal(result, expected)
# empty series
s = Series(dtype=object, name='foo', index=pd.Index([], name='bar'))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
self.assertIsNot(s, rs)
self.assertIs(s.index, rs.index)
self.assertEqual(s.dtype, rs.dtype)
self.assertEqual(s.name, rs.name)
# index but no data
s = Series(index=[1, 2, 3])
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug(self):
s = Series([1, 2])
f = lambda x: (x, x + 1)
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
def test_apply_dont_convert_dtype(self):
s = Series(np.random.randn(10))
f = lambda x: x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
self.assertEqual(result.dtype, object)
def test_convert_objects(self):
s = Series([1., 2, 3], index=['a', 'b', 'c'])
result = s.convert_objects(convert_dates=False, convert_numeric=True)
assert_series_equal(result, s)
# force numeric conversion
r = s.copy().astype('O')
r['a'] = '1'
result = r.convert_objects(convert_dates=False, convert_numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = '1.'
result = r.convert_objects(convert_dates=False, convert_numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = 'garbled'
expected = s.copy()
expected['a'] = np.nan
result = r.convert_objects(convert_dates=False, convert_numeric=True)
assert_series_equal(result, expected)
# GH 4119, not converting a mixed type (e.g.floats and object)
s = Series([1, 'na', 3, 4])
result = s.convert_objects(convert_numeric=True)
expected = Series([1, np.nan, 3, 4])
assert_series_equal(result, expected)
s = Series([1, '', 3, 4])
result = s.convert_objects(convert_numeric=True)
expected = Series([1, np.nan, 3, 4])
assert_series_equal(result, expected)
# dates
s = Series(
[datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(2001, 1, 3, 0, 0)])
s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(
2001, 1, 3, 0, 0), 'foo', 1.0, 1, Timestamp('20010104'), '20010105'], dtype='O')
result = s.convert_objects(convert_dates=True, convert_numeric=False)
expected = Series(
[Timestamp('20010101'), Timestamp('20010102'), Timestamp('20010103')], dtype='M8[ns]')
assert_series_equal(result, expected)
result = s.convert_objects(
convert_dates='coerce', convert_numeric=False)
result = s.convert_objects(
convert_dates='coerce', convert_numeric=True)
assert_series_equal(result, expected)
expected = Series(
[Timestamp(
'20010101'), Timestamp('20010102'), Timestamp('20010103'),
lib.NaT, lib.NaT, lib.NaT, Timestamp('20010104'), Timestamp('20010105')], dtype='M8[ns]')
result = s2.convert_objects(
convert_dates='coerce', convert_numeric=False)
assert_series_equal(result, expected)
result = s2.convert_objects(
convert_dates='coerce', convert_numeric=True)
assert_series_equal(result, expected)
# preserver all-nans (if convert_dates='coerce')
s = Series(['foo', 'bar', 1, 1.0], dtype='O')
result = s.convert_objects(
convert_dates='coerce', convert_numeric=False)
assert_series_equal(result, s)
# preserver if non-object
s = Series([1], dtype='float32')
result = s.convert_objects(
convert_dates='coerce', convert_numeric=False)
assert_series_equal(result, s)
#r = s.copy()
#r[0] = np.nan
#result = r.convert_objects(convert_dates=True,convert_numeric=False)
#self.assertEqual(result.dtype, 'M8[ns]')
# dateutil parses some single letters into today's value as a date
for x in 'abcdefghijklmnopqrstuvwxyz':
s = Series([x])
result = s.convert_objects(convert_dates='coerce')
assert_series_equal(result, s)
s = Series([x.upper()])
result = s.convert_objects(convert_dates='coerce')
assert_series_equal(result, s)
def test_convert_objects_preserve_bool(self):
s = Series([1, True, 3, 5], dtype=object)
r = s.convert_objects(convert_numeric=True)
e = Series([1, 1, 3, 5], dtype='i8')
tm.assert_series_equal(r, e)
def test_convert_objects_preserve_all_bool(self):
s = Series([False, True, False, False], dtype=object)
r = s.convert_objects(convert_numeric=True)
e = Series([False, True, False, False], dtype=bool)
tm.assert_series_equal(r, e)
def test_apply_args(self):
s = Series(['foo,bar'])
result = s.apply(str.split, args=(',',))
self.assertEqual(result[0], ['foo', 'bar'])
tm.assert_isinstance(result[0], list)
def test_align(self):
def _check_align(a, b, how='left', fill=None):
aa, ab = a.align(b, join=how, fill_value=fill)
join_index = a.index.join(b.index, how=how)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
self.assertTrue((aa.reindex(diff_a) == fill).all())
if len(diff_b) > 0:
self.assertTrue((ab.reindex(diff_b) == fill).all())
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
for kind in JOIN_TYPES:
_check_align(self.ts[2:], self.ts[:-5], how=kind)
_check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind)
def test_align_fill_method(self):
def _check_align(a, b, how='left', method='pad', limit=None):
aa, ab = a.align(b, join=how, method=method, limit=limit)
join_index = a.index.join(b.index, how=how)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
for kind in JOIN_TYPES:
for meth in ['pad', 'bfill']:
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[2:], self.ts[:-5], how=kind,
method=meth, limit=1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,
limit=1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,
limit=1)
def test_align_nocopy(self):
b = self.ts[:5].copy()
# do copy
a = self.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
self.assertFalse((a[:5] == 5).any())
# do not copy
a = self.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
self.assertTrue((a[:5] == 5).all())
# do copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
self.assertFalse((b[:3] == 5).any())
# do not copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
self.assertTrue((b[:2] == 5).all())
def test_align_sameindex(self):
a, b = self.ts.align(self.ts, copy=False)
self.assertIs(a.index, self.ts.index)
self.assertIs(b.index, self.ts.index)
# a, b = self.ts.align(self.ts, copy=True)
# self.assertIsNot(a.index, self.ts.index)
# self.assertIsNot(b.index, self.ts.index)
def test_reindex(self):
identity = self.series.reindex(self.series.index)
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
self.assertTrue(np.may_share_memory(self.series.index, identity.index))
except (AttributeError):
pass
self.assertTrue(identity.index.is_(self.series.index))
self.assertTrue(identity.index.identical(self.series.index))
subIndex = self.series.index[10:20]
subSeries = self.series.reindex(subIndex)
for idx, val in compat.iteritems(subSeries):
self.assertEqual(val, self.series[idx])
subIndex2 = self.ts.index[10:20]
subTS = self.ts.reindex(subIndex2)
for idx, val in compat.iteritems(subTS):
self.assertEqual(val, self.ts[idx])
stuffSeries = self.ts.reindex(subIndex)
self.assertTrue(np.isnan(stuffSeries).all())
# This is extremely important for the Cython code to not screw up
nonContigIndex = self.ts.index[::2]
subNonContig = self.ts.reindex(nonContigIndex)
for idx, val in compat.iteritems(subNonContig):
self.assertEqual(val, self.ts[idx])
# return a copy the same index here
result = self.ts.reindex()
self.assertFalse((result is self.ts))
def test_reindex_corner(self):
# (don't forget to fix this) I think it's fixed
reindexed_dep = self.empty.reindex(self.ts.index, method='pad')
# corner case: pad empty series
reindexed = self.empty.reindex(self.ts.index, method='pad')
# pass non-Index
reindexed = self.ts.reindex(list(self.ts.index))
assert_series_equal(self.ts, reindexed)
# bad fill method
ts = self.ts[::2]
self.assertRaises(Exception, ts.reindex, self.ts.index, method='foo')
def test_reindex_pad(self):
s = Series(np.arange(10),dtype='int64')
s2 = s[::2]
reindexed = s2.reindex(s.index, method='pad')
reindexed2 = s2.reindex(s.index, method='ffill')
assert_series_equal(reindexed, reindexed2)
expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))
assert_series_equal(reindexed, expected)
# GH4604
s = Series([1,2,3,4,5], index=['a', 'b', 'c', 'd', 'e'])
new_index = ['a','g','c','f']
expected = Series([1,1,3,3],index=new_index)
# this changes dtype because the ffill happens after
result = s.reindex(new_index).ffill()
assert_series_equal(result, expected.astype('float64'))
result = s.reindex(new_index).ffill(downcast='infer')
assert_series_equal(result, expected)
# invalid because we can't forward fill on this type of index
self.assertRaises(ValueError, lambda : s.reindex(new_index, method='ffill'))
# inferrence of new dtype
s = Series([True,False,False,True],index=list('abcd'))
new_index='agc'
result = s.reindex(list(new_index)).ffill()
expected = Series([True,True,False],index=list(new_index))
assert_series_equal(result, expected)
# GH4618 shifted series downcasting
s = Series(False,index=lrange(0,5))
result = s.shift(1).fillna(method='bfill')
expected = Series(False,index=lrange(0,5))
assert_series_equal(result, expected)
def test_reindex_backfill(self):
pass
def test_reindex_int(self):
ts = self.ts[::2]
int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index)
# this should work fine
reindexed_int = int_ts.reindex(self.ts.index)
# if NaNs introduced
self.assertEqual(reindexed_int.dtype, np.float_)
# NO NaNs introduced
reindexed_int = int_ts.reindex(int_ts.index[::2])
self.assertEqual(reindexed_int.dtype, np.int_)
def test_reindex_bool(self):
# A series other than float, int, string, or object
ts = self.ts[::2]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
# this should work fine
reindexed_bool = bool_ts.reindex(self.ts.index)
# if NaNs introduced
self.assertEqual(reindexed_bool.dtype, np.object_)
# NO NaNs introduced
reindexed_bool = bool_ts.reindex(bool_ts.index[::2])
self.assertEqual(reindexed_bool.dtype, np.bool_)
def test_reindex_bool_pad(self):
# fail
ts = self.ts[5:]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
filled_bool = bool_ts.reindex(self.ts.index, method='pad')
self.assertTrue(isnull(filled_bool[:5]).all())
def test_reindex_like(self):
other = self.ts[::2]
assert_series_equal(self.ts.reindex(other.index),
self.ts.reindex_like(other))
# GH 7179
day1 = datetime(2013,3,5)
day2 = datetime(2013,5,5)
day3 = datetime(2014,3,5)
series1 = Series([5, None, None],[day1, day2, day3])
series2 = Series([None, None], [day1, day3])
result = series1.reindex_like(series2, method='pad')
expected = Series([5, np.nan], index=[day1, day3])
assert_series_equal(result, expected)
def test_reindex_fill_value(self):
#------------------------------------------------------------
# floats
floats = Series([1., 2., 3.])
result = floats.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
result = floats.reindex([1, 2, 3], fill_value=0)
expected = Series([2., 3., 0], index=[1, 2, 3])
assert_series_equal(result, expected)
#------------------------------------------------------------
# ints
ints = Series([1, 2, 3])
result = ints.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
# don't upcast
result = ints.reindex([1, 2, 3], fill_value=0)
expected = Series([2, 3, 0], index=[1, 2, 3])
self.assertTrue(issubclass(result.dtype.type, np.integer))
assert_series_equal(result, expected)
#------------------------------------------------------------
# objects
objects = Series([1, 2, 3], dtype=object)
result = objects.reindex([1, 2, 3])
expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = objects.reindex([1, 2, 3], fill_value='foo')
expected = Series([2, 3, 'foo'], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
#------------------------------------------------------------
# bools
bools = Series([True, False, True])
result = bools.reindex([1, 2, 3])
expected = Series([False, True, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = bools.reindex([1, 2, 3], fill_value=False)
expected = Series([False, True, False], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_rename(self):
renamer = lambda x: x.strftime('%Y%m%d')
renamed = self.ts.rename(renamer)
self.assertEqual(renamed.index[0], renamer(self.ts.index[0]))
# dict
rename_dict = dict(zip(self.ts.index, renamed.index))
renamed2 = self.ts.rename(rename_dict)
assert_series_equal(renamed, renamed2)
# partial dict
s = Series(np.arange(4), index=['a', 'b', 'c', 'd'], dtype='int64')
renamed = s.rename({'b': 'foo', 'd': 'bar'})
self.assert_numpy_array_equal(renamed.index, ['a', 'foo', 'c', 'bar'])
# index with name
renamer = Series(
np.arange(4), index=Index(['a', 'b', 'c', 'd'], name='name'), dtype='int64')
renamed = renamer.rename({})
self.assertEqual(renamed.index.name, renamer.index.name)
def test_rename_inplace(self):
renamer = lambda x: x.strftime('%Y%m%d')
expected = renamer(self.ts.index[0])
self.ts.rename(renamer, inplace=True)
self.assertEqual(self.ts.index[0], expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
self.assertFalse(np.isnan(self.ts[10]))
def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
expected = [True, True, False, True, True]
self.assertTrue(tm.equalContents(ts.index != 5, expected))
self.assertTrue(tm.equalContents(~(ts.index == 5), expected))
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan],
['z', 'a', 'b', 'c', 'd'], dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
self.assertTrue(np.isnan(x[0]), np.isnan(expected[0]))
def test_unstack(self):
from numpy import nan
from pandas.util.testing import assert_frame_equal
index = MultiIndex(levels=[['bar', 'foo'], ['one', 'three', 'two']],
labels=[[1, 1, 0, 0], [0, 1, 0, 2]])
s = Series(np.arange(4.), index=index)
unstacked = s.unstack()
expected = DataFrame([[2., nan, 3.], [0., 1., nan]],
index=['bar', 'foo'],
columns=['one', 'three', 'two'])
assert_frame_equal(unstacked, expected)
unstacked = s.unstack(level=0)
assert_frame_equal(unstacked, expected.T)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
exp_index = MultiIndex(levels=[['one', 'two', 'three'], [0, 1]],
labels=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
expected = DataFrame({'bar': s.values}, index=exp_index).sortlevel(0)
unstacked = s.unstack(0)
assert_frame_equal(unstacked, expected)
# GH5873
idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]])
ts = pd.Series([1,2], index=idx)
left = ts.unstack()
right = DataFrame([[nan, 1], [2, nan]], index=[101, 102],
columns=[nan, 3.5])
assert_frame_equal(left, right)
idx = pd.MultiIndex.from_arrays([['cat', 'cat', 'cat', 'dog', 'dog'],
['a', 'a', 'b', 'a', 'b'], [1, 2, 1, 1, np.nan]])
ts = pd.Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx)
right = DataFrame([[1.0, 1.3], [1.1, nan], [nan, 1.4], [1.2, nan]],
columns=['cat', 'dog'])
tpls = [('a', 1), ('a', 2), ('b', nan), ('b', 1)]
right.index = pd.MultiIndex.from_tuples(tpls)
assert_frame_equal(ts.unstack(level=0), right)
def test_sortlevel(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
res = s.sortlevel('A')
assert_series_equal(backwards, res)
res = s.sortlevel(['A', 'B'])
assert_series_equal(backwards, res)
res = s.sortlevel('A', sort_remaining=False)
assert_series_equal(s, res)
res = s.sortlevel(['A', 'B'], sort_remaining=False)
assert_series_equal(s, res)
def test_head_tail(self):
assert_series_equal(self.series.head(), self.series[:5])
assert_series_equal(self.series.tail(), self.series[-5:])
def test_isin(self):
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
result = s.isin(['A', 'C'])
expected = Series([True, False, True, False, False, False, True, True])
assert_series_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH4763
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
with tm.assertRaises(TypeError):
s.isin('a')
with tm.assertRaises(TypeError):
s = Series(['aaa', 'b', 'c'])
s.isin('aaa')
def test_isin_with_i8(self):
# GH 5021
expected = Series([True,True,False,False,False])
expected2 = Series([False,True,False,False,False])
# datetime64[ns]
s = Series(date_range('jan-01-2013','jan-05-2013'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
result = s.isin(s[0:2].values)
assert_series_equal(result, expected)
# fails on dtype conversion in the first place
result = s.isin(s[0:2].values.astype('datetime64[D]'))
assert_series_equal(result, expected)
result = s.isin([s[1]])
assert_series_equal(result, expected2)
result = s.isin([np.datetime64(s[1])])
assert_series_equal(result, expected2)
# timedelta64[ns]
s = Series(pd.to_timedelta(lrange(5),unit='d'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
#------------------------------------------------------------------------------
# TimeSeries-specific
def test_cummethods_bool(self):
# GH 6270
# looks like a buggy np.maximum.accumulate for numpy 1.6.1, py 3.2
def cummin(x):
return np.minimum.accumulate(x)
def cummax(x):
return np.maximum.accumulate(x)
a = pd.Series([False, False, False, True, True, False, False])
b = ~a
c = pd.Series([False] * len(b))
d = ~c
methods = {'cumsum': np.cumsum, 'cumprod': np.cumprod,
'cummin': cummin, 'cummax': cummax}
args = product((a, b, c, d), methods)
for s, method in args:
expected = Series(methods[method](s.values))
result = getattr(s, method)()
assert_series_equal(result, expected)
e = pd.Series([False, True, nan, False])
cse = pd.Series([0, 1, nan, 1], dtype=object)
cpe = pd.Series([False, 0, nan, 0])
cmin = pd.Series([False, False, nan, False])
cmax = pd.Series([False, True, nan, True])
expecteds = {'cumsum': cse, 'cumprod': cpe, 'cummin': cmin,
'cummax': cmax}
for method in methods:
res = getattr(e, method)()
assert_series_equal(res, expecteds[method])
def test_replace(self):
N = 100
ser = Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
ser.replace([np.nan], -1, inplace=True)
exp = ser.fillna(-1)
assert_series_equal(ser, exp)
rs = ser.replace(0., np.nan)
ser[ser == 0.] = np.nan
assert_series_equal(rs, ser)
ser = Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
dtype=object)
ser[:5] = np.nan
ser[6:10] = 'foo'
ser[20:30] = 'bar'
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -1).all())
self.assertTrue((rs[20:30] == -1).all())
self.assertTrue((isnull(ser[:5])).all())
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -2).all())
self.assertTrue((rs[20:30] == -3).all())
self.assertTrue((isnull(ser[:5])).all())
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
assert_series_equal(rs, rs2)
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
self.assertTrue((ser[:5] == -1).all())
self.assertTrue((ser[6:10] == -1).all())
self.assertTrue((ser[20:30] == -1).all())
ser = Series([np.nan, 0, np.inf])
assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = Series([np.nan, 0, 'foo', 'bar', np.inf, None, lib.NaT])
assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
assert_series_equal(ser.replace(np.inf, 0), filled)
ser = Series(self.ts.index)
assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
self.assertRaises(ValueError, ser.replace, [1, 2, 3], [np.nan, 0])
# make sure that we aren't just masking a TypeError because bools don't
# implement indexing
with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):
ser.replace([1, 2], [np.nan, 0])
ser = Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
assert_series_equal(result, Series([4, 3, 2, 1, 0]))
# API change from 0.12?
# GH 5319
ser = Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
assert_series_equal(result, expected)
ser = Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
assert_series_equal(result, expected)
#GH 5797
ser = Series(date_range('20130101', periods=5))
expected = ser.copy()
expected.loc[2] = Timestamp('20120101')
result = ser.replace({Timestamp('20130103'):
Timestamp('20120101')})
assert_series_equal(result, expected)
result = ser.replace(Timestamp('20130103'), Timestamp('20120101'))
assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = Series([0, 1, 2, 3, 4])
result = ser.replace([1,2,3])
assert_series_equal(result, Series([0,0,0,0,4]))
s = ser.copy()
s.replace([1,2,3],inplace=True)
assert_series_equal(s, Series([0,0,0,0,4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
with tm.assertRaises(ValueError):
s.replace([1,2,3],inplace=True,method='crash_cymbal')
assert_series_equal(s, ser)
def test_replace_mixed_types(self):
s = Series(np.arange(5),dtype='int64')
def check_replace(to_rep, val, expected):
sc = s.copy()
r = s.replace(to_rep, val)
sc.replace(to_rep, val, inplace=True)
assert_series_equal(expected, r)
assert_series_equal(expected, sc)
# should NOT upcast to float
e = Series([0,1,2,3,4])
tr, v = [3], [3.0]
check_replace(tr, v, e)
# MUST upcast to float
e = Series([0,1,2,3.5,4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = Series([0,1,2,3.5,'a'])
tr, v = [3,4], [3.5,'a']
check_replace(tr, v, e)
# again casts to object
e = Series([0,1,2,3.5,Timestamp('20130101')])
tr, v = [3,4],[3.5,Timestamp('20130101')]
check_replace(tr, v, e)
# casts to float
e = Series([0,1,2,3.5,1])
tr, v = [3,4],[3.5,True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = date_range('1/1/2001', '1/10/2001',
freq='D').to_series().reset_index(drop=True)
r = dr.astype(object).replace([dr[0],dr[1],dr[2]], [1.0,2,'a'])
assert_series_equal(r, Series([1.0,2,'a'] +
dr[3:].tolist(),dtype=object))
def test_replace_bool_with_string_no_op(self):
s = Series([True, False, True])
result = s.replace('fun', 'in-the-sun')
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = Series([True, False, True])
result = s.replace(True, '2u')
expected = Series(['2u', False, '2u'])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = Series([True, False, True])
result = s.replace(True, False)
expected = Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = Series([True, False, True])
with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):
s.replace({'asdf': 'asdb', True: 'yes'})
def test_asfreq(self):
ts = Series([0., 1., 2.], index=[datetime(2009, 10, 30),
datetime(2009, 11, 30),
datetime(2009, 12, 31)])
daily_ts = ts.asfreq('B')
monthly_ts = daily_ts.asfreq('BM')
self.assert_numpy_array_equal(monthly_ts, ts)
daily_ts = ts.asfreq('B', method='pad')
monthly_ts = daily_ts.asfreq('BM')
self.assert_numpy_array_equal(monthly_ts, ts)
daily_ts = ts.asfreq(datetools.bday)
monthly_ts = daily_ts.asfreq(datetools.bmonthEnd)
self.assert_numpy_array_equal(monthly_ts, ts)
result = ts[:0].asfreq('M')
self.assertEqual(len(result), 0)
self.assertIsNot(result, ts)
def test_diff(self):
# Just run the function
self.ts.diff()
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = s.diff()
self.assertEqual(rs[1], 1)
# neg n
rs = self.ts.diff(-1)
xp = self.ts - self.ts.shift(-1)
assert_series_equal(rs, xp)
# 0
rs = self.ts.diff(0)
xp = self.ts - self.ts
assert_series_equal(rs, xp)
# datetime diff (GH3100)
s = Series(date_range('20130102', periods=5))
rs = s - s.shift(1)
xp = s.diff()
assert_series_equal(rs, xp)
# timedelta diff
nrs = rs - rs.shift(1)
nxp = xp.diff()
assert_series_equal(nrs, nxp)
def test_pct_change(self):
rs = self.ts.pct_change(fill_method=None)
assert_series_equal(rs, self.ts / self.ts.shift(1) - 1)
rs = self.ts.pct_change(2)
filled = self.ts.fillna(method='pad')
assert_series_equal(rs, filled / filled.shift(2) - 1)
rs = self.ts.pct_change(fill_method='bfill', limit=1)
filled = self.ts.fillna(method='bfill', limit=1)
assert_series_equal(rs, filled / filled.shift(1) - 1)
rs = self.ts.pct_change(freq='5D')
filled = self.ts.fillna(method='pad')
assert_series_equal(rs, filled / filled.shift(freq='5D') - 1)
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
chg = s.pct_change()
expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])
assert_series_equal(chg, expected)
def test_autocorr(self):
# Just run the function
corr1 = self.ts.autocorr()
# Now run it with the lag parameter
corr2 = self.ts.autocorr(lag=1)
# corr() with lag needs Series of at least length 2
if len(self.ts) <= 2:
self.assertTrue(np.isnan(corr1))
self.assertTrue(np.isnan(corr2))
else:
self.assertEqual(corr1, corr2)
# Choose a random lag between 1 and length of Series - 2
# and compare the result with the Series corr() function
n = 1 + np.random.randint(max(1, len(self.ts) - 2))
corr1 = self.ts.corr(self.ts.shift(n))
corr2 = self.ts.autocorr(lag=n)
# corr() with lag needs Series of at least length 2
if len(self.ts) <= 2:
self.assertTrue(np.isnan(corr1))
self.assertTrue(np.isnan(corr2))
else:
self.assertEqual(corr1, corr2)
def test_first_last_valid(self):
ts = self.ts.copy()
ts[:5] = np.NaN
index = ts.first_valid_index()
self.assertEqual(index, ts.index[5])
ts[-5:] = np.NaN
index = ts.last_valid_index()
self.assertEqual(index, ts.index[-6])
ts[:] = np.nan
self.assertIsNone(ts.last_valid_index())
self.assertIsNone(ts.first_valid_index())
ser = Series([], index=[])
self.assertIsNone(ser.last_valid_index())
self.assertIsNone(ser.first_valid_index())
def test_mpl_compat_hack(self):
result = self.ts[:, np.newaxis]
expected = self.ts.values[:, np.newaxis]
assert_almost_equal(result, expected)
#------------------------------------------------------------------------------
# GroupBy
def test_select(self):
n = len(self.ts)
result = self.ts.select(lambda x: x >= self.ts.index[n // 2])
expected = self.ts.reindex(self.ts.index[n // 2:])
assert_series_equal(result, expected)
result = self.ts.select(lambda x: x.weekday() == 2)
expected = self.ts[self.ts.index.weekday == 2]
assert_series_equal(result, expected)
#------------------------------------------------------------------------------
# Misc not safe for sparse
def test_dropna_preserve_name(self):
self.ts[:5] = np.nan
result = self.ts.dropna()
self.assertEqual(result.name, self.ts.name)
name = self.ts.name
ts = self.ts.copy()
ts.dropna(inplace=True)
self.assertEqual(ts.name, name)
def test_numpy_unique(self):
# it works!
result = np.unique(self.ts)
def test_concat_empty_series_dtypes_roundtrips(self):
# round-tripping with self & like self
dtypes = map(np.dtype,['float64','int8','uint8','bool','m8[ns]','M8[ns]'])
for dtype in dtypes:
self.assertEqual(pd.concat([Series(dtype=dtype)]).dtype, dtype)
self.assertEqual(pd.concat([Series(dtype=dtype),
Series(dtype=dtype)]).dtype, dtype)
def int_result_type(dtype, dtype2):
typs = set([dtype.kind,dtype2.kind])
if not len(typs-set(['i','u','b'])) and (dtype.kind == 'i' or dtype2.kind == 'i'):
return 'i'
elif not len(typs-set(['u','b'])) and (dtype.kind == 'u' or dtype2.kind == 'u'):
return 'u'
return None
def float_result_type(dtype, dtype2):
typs = set([dtype.kind,dtype2.kind])
if not len(typs-set(['f','i','u'])) and (dtype.kind == 'f' or dtype2.kind == 'f'):
return 'f'
return None
def get_result_type(dtype, dtype2):
result = float_result_type(dtype, dtype2)
if result is not None:
return result
result = int_result_type(dtype, dtype2)
if result is not None:
return result
return 'O'
for dtype in dtypes:
for dtype2 in dtypes:
if dtype == dtype2:
continue
expected = get_result_type(dtype, dtype2)
result = pd.concat([Series(dtype=dtype),
Series(dtype=dtype2)]).dtype
self.assertEqual(result.kind, expected)
def test_concat_empty_series_dtypes(self):
# bools
self.assertEqual(pd.concat([Series(dtype=np.bool_),
Series(dtype=np.int32)]).dtype, np.int32)
self.assertEqual(pd.concat([Series(dtype=np.bool_),
Series(dtype=np.float32)]).dtype, np.object_)
# datetimelike
self.assertEqual(pd.concat([Series(dtype='m8[ns]'),
Series(dtype=np.bool)]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='m8[ns]'),
Series(dtype=np.int64)]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.bool)]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.int64)]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.bool_),
Series(dtype=np.int64)]).dtype, np.object_)
# categorical
self.assertEqual(pd.concat([Series(dtype='category'),
Series(dtype='category')]).dtype, 'category')
self.assertEqual(pd.concat([Series(dtype='category'),
Series(dtype='float64')]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='category'),
Series(dtype='object')]).dtype, 'category')
# sparse
result = pd.concat([Series(dtype='float64').to_sparse(),
Series(dtype='float64').to_sparse()])
self.assertEqual(result.dtype,np.float64)
self.assertEqual(result.ftype,'float64:sparse')
result = pd.concat([Series(dtype='float64').to_sparse(),
Series(dtype='float64')])
self.assertEqual(result.dtype,np.float64)
self.assertEqual(result.ftype,'float64:sparse')
result = pd.concat([Series(dtype='float64').to_sparse(),
Series(dtype='object')])
self.assertEqual(result.dtype,np.object_)
self.assertEqual(result.ftype,'object:dense')
def test_searchsorted_numeric_dtypes_scalar(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted(30)
e = 2
tm.assert_equal(r, e)
r = s.searchsorted([30])
e = np.array([2])
tm.assert_array_equal(r, e)
def test_searchsorted_numeric_dtypes_vector(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted([91, 2e6])
e = np.array([3, 4])
tm.assert_array_equal(r, e)
def test_search_sorted_datetime64_scalar(self):
s = Series(pd.date_range('20120101', periods=10, freq='2D'))
v = pd.Timestamp('20120102')
r = s.searchsorted(v)
e = 1
tm.assert_equal(r, e)
def test_search_sorted_datetime64_list(self):
s = Series(pd.date_range('20120101', periods=10, freq='2D'))
v = [pd.Timestamp('20120102'), pd.Timestamp('20120104')]
r = s.searchsorted(v)
e = np.array([1, 2])
tm.assert_array_equal(r, e)
def test_searchsorted_sorter(self):
# GH8490
s = Series([3, 1, 2])
r = s.searchsorted([0, 3], sorter=np.argsort(s))
e = np.array([0, 2])
tm.assert_array_equal(r, e)
class TestSeriesNonUnique(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
self.assertRaises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
def test_reset_index(self):
df = tm.makeDataFrame()[:5]
ser = df.stack()
ser.index.names = ['hash', 'category']
ser.name = 'value'
df = ser.reset_index()
self.assertIn('value', df)
df = ser.reset_index(name='value2')
self.assertIn('value2', df)
# check inplace
s = ser.reset_index(drop=True)
s2 = ser
s2.reset_index(drop=True, inplace=True)
assert_series_equal(s, s2)
# level
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
rs = s.reset_index(level=1)
self.assertEqual(len(rs.columns), 2)
rs = s.reset_index(level=[0, 2], drop=True)
self.assertTrue(rs.index.equals(Index(index.get_level_values(1))))
tm.assert_isinstance(rs, Series)
def test_set_index_makes_timeseries(self):
idx = tm.makeDateIndex(10)
s = Series(lrange(10))
s.index = idx
self.assertTrue(s.is_time_series == True)
def test_timeseries_coercion(self):
idx = tm.makeDateIndex(10000)
ser = Series(np.random.randn(len(idx)), idx.astype(object))
self.assertTrue(ser.is_time_series)
self.assertIsInstance(ser.index, DatetimeIndex)
def test_replace(self):
N = 100
ser = Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
dtype=object)
ser[:5] = np.nan
ser[6:10] = 'foo'
ser[20:30] = 'bar'
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -1).all())
self.assertTrue((rs[20:30] == -1).all())
self.assertTrue((isnull(ser[:5])).all())
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -2).all())
self.assertTrue((rs[20:30] == -3).all())
self.assertTrue((isnull(ser[:5])).all())
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
assert_series_equal(rs, rs2)
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
self.assertTrue((ser[:5] == -1).all())
self.assertTrue((ser[6:10] == -1).all())
self.assertTrue((ser[20:30] == -1).all())
def test_repeat(self):
s = Series(np.random.randn(3), index=['a', 'b', 'c'])
reps = s.repeat(5)
exp = Series(s.values.repeat(5), index=s.index.values.repeat(5))
assert_series_equal(reps, exp)
to_rep = [2, 3, 4]
reps = s.repeat(to_rep)
exp = Series(s.values.repeat(to_rep),
index=s.index.values.repeat(to_rep))
assert_series_equal(reps, exp)
def test_unique_data_ownership(self):
# it works! #1807
Series(Series(["a", "c", "b"]).unique()).sort()
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
py | 1a2fc5fef9537c0af5cbc7caa7253d9e1dc9a7cc | import os
from os import path
from pathlib import Path
from shutil import rmtree
from typing import Union
from pyspark.sql.types import DataType
from pyspark.sql.types import StructType
from spark_fhir_schemas.r4.complex_types.address import AddressSchema
from spark_fhir_schemas.r4.resources.explanationofbenefit import (
ExplanationOfBenefitSchema,
)
from spark_fhir_schemas.r4.resources.patient import PatientSchema
def test_simple() -> None:
data_dir: Path = Path(__file__).parent.joinpath("./")
temp_folder = data_dir.joinpath("./temp")
if path.isdir(temp_folder):
rmtree(temp_folder)
os.mkdir(temp_folder)
schema: Union[StructType, DataType] = PatientSchema.get_schema()
assert isinstance(schema, StructType)
# print(schema)
print("------- Patient --------")
print(schema.json())
with open(temp_folder.joinpath("patient_schema.json"), "w+") as file:
file.write(schema.json())
print("------- Address --------")
schema = AddressSchema.get_schema()
print(schema.json())
with open(temp_folder.joinpath("address_schema.json"), "w+") as file:
file.write(schema.json())
print("------- ExplanationOfBenefitSchema --------")
schema = ExplanationOfBenefitSchema.get_schema()
print(schema.json())
# noinspection SpellCheckingInspection
with open(temp_folder.joinpath("explanationofbenefit_schema.json"), "w") as file:
file.write(schema.json())
assert 1 == 1
|
py | 1a2fc664e4da7f455d63942dd2554fa8b531bffc | from __future__ import division
from __future__ import print_function
import os
# disable autotune
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
import argparse
import glob
import logging
logging.basicConfig(level=logging.INFO)
import time
import numpy as np
import mxnet as mx
from tqdm import tqdm
from mxnet import nd
from mxnet import gluon
import gluoncv as gcv
gcv.utils.check_version('0.6.0')
from gluoncv import data as gdata
from gluoncv.data import batchify
from gluoncv.data.transforms.presets.rcnn import FasterRCNNDefaultValTransform
from gluoncv.utils.metrics.voc_detection import VOC07MApMetric
from gluoncv.utils.metrics.coco_detection import COCODetectionMetric
def parse_args():
parser = argparse.ArgumentParser(description='Validate Faster-RCNN networks.')
parser.add_argument('--network', type=str, default='resnet50_v1b',
help="Base feature extraction network name")
parser.add_argument('--dataset', type=str, default='voc',
help='Training dataset.')
parser.add_argument('--num-workers', '-j', dest='num_workers', type=int,
default=4, help='Number of data workers')
parser.add_argument('--gpus', type=str, default='0',
help='Training with GPUs, you can specify 1,3 for example.')
parser.add_argument('--pretrained', type=str, default='True',
help='Load weights from previously saved parameters.')
parser.add_argument('--save-prefix', type=str, default='',
help='Saving parameter prefix')
parser.add_argument('--save-json', action='store_true',
help='Save coco output json')
parser.add_argument('--eval-all', action='store_true',
help='Eval all models begins with save prefix. Use with pretrained.')
parser.add_argument('--norm-layer', type=str, default=None,
help='Type of normalization layer to use. '
'If set to None, backbone normalization layer will be fixed,'
' and no normalization layer will be used. '
'Currently supports \'bn\', and None, default is None')
parser.add_argument('--use-fpn', action='store_true',
help='Whether to use feature pyramid network.')
args = parser.parse_args()
return args
def get_dataset(dataset, args):
if dataset.lower() == 'voc':
val_dataset = gdata.VOCDetection(
splits=[(2007, 'test')])
val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
elif dataset.lower() == 'coco':
val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval',
cleanup=not args.save_json)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return val_dataset, val_metric
def get_dataloader(net, val_dataset, batch_size, num_workers):
"""Get dataloader."""
val_bfn = batchify.Tuple(*[batchify.Append() for _ in range(3)])
val_loader = mx.gluon.data.DataLoader(
val_dataset.transform(FasterRCNNDefaultValTransform(net.short, net.max_size)),
batch_size, False, batchify_fn=val_bfn, last_batch='keep', num_workers=num_workers)
return val_loader
def split_and_load(batch, ctx_list):
"""Split data to 1 batch each device."""
num_ctx = len(ctx_list)
new_batch = []
for i, data in enumerate(batch):
new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]
new_batch.append(new_data)
return new_batch
def validate(net, val_data, ctx, eval_metric, size):
"""Test on validation dataset."""
clipper = gcv.nn.bbox.BBoxClipToImage()
eval_metric.reset()
net.hybridize(static_alloc=True)
with tqdm(total=size) as pbar:
for ib, batch in enumerate(val_data):
batch = split_and_load(batch, ctx_list=ctx)
det_bboxes = []
det_ids = []
det_scores = []
gt_bboxes = []
gt_ids = []
gt_difficults = []
for x, y, im_scale in zip(*batch):
# get prediction results
ids, scores, bboxes = net(x)
det_ids.append(ids)
det_scores.append(scores)
# clip to image size
det_bboxes.append(clipper(bboxes, x))
# rescale to original resolution
im_scale = im_scale.reshape((-1)).asscalar()
det_bboxes[-1] *= im_scale
# split ground truths
gt_ids.append(y.slice_axis(axis=-1, begin=4, end=5))
gt_bboxes.append(y.slice_axis(axis=-1, begin=0, end=4))
gt_bboxes[-1] *= im_scale
gt_difficults.append(y.slice_axis(axis=-1, begin=5, end=6) if y.shape[-1] > 5 else None)
# update metric
for det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff in zip(det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids, gt_difficults):
eval_metric.update(det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff)
pbar.update(len(ctx))
return eval_metric.get()
if __name__ == '__main__':
args = parse_args()
# contexts
ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]
ctx = ctx if ctx else [mx.cpu()]
args.batch_size = len(ctx) # 1 batch per device
# network
kwargs = {}
module_list = []
if args.use_fpn:
module_list.append('fpn')
if args.norm_layer is not None:
module_list.append(args.norm_layer)
if args.norm_layer == 'bn':
kwargs['num_devices'] = len(args.gpus.split(','))
net_name = '_'.join(('faster_rcnn', *module_list, args.network, args.dataset))
args.save_prefix += net_name
if args.pretrained.lower() in ['true', '1', 'yes', 't']:
net = gcv.model_zoo.get_model(net_name, pretrained=True, **kwargs)
else:
net = gcv.model_zoo.get_model(net_name, pretrained=False, **kwargs)
net.load_parameters(args.pretrained.strip(), cast_dtype=True)
net.collect_params().reset_ctx(ctx)
# validation data
val_dataset, eval_metric = get_dataset(args.dataset, args)
val_data = get_dataloader(
net, val_dataset, args.batch_size, args.num_workers)
# validation
if not args.eval_all:
names, values = validate(net, val_data, ctx, eval_metric, len(val_dataset))
for k, v in zip(names, values):
print(k, v)
else:
saved_models = glob.glob(args.save_prefix + '*.params')
for epoch, saved_model in enumerate(sorted(saved_models)):
print('[Epoch {}] Validating from {}'.format(epoch, saved_model))
net.load_parameters(saved_model)
net.collect_params().reset_ctx(ctx)
map_name, mean_ap = validate(net, val_data, ctx, eval_metric, len(val_dataset))
val_msg = '\n'.join(['{}={}'.format(k, v) for k, v in zip(map_name, mean_ap)])
print('[Epoch {}] Validation: \n{}'.format(epoch, val_msg))
current_map = float(mean_ap[-1])
with open(args.save_prefix+'_best_map.log', 'a') as f:
f.write('\n{:04d}:\t{:.4f}'.format(epoch, current_map))
|
py | 1a2fc6a45177c4fb910e60a72147540646329b18 | # -*- coding: utf-8 -*-
"""
pygments.styles.rrt
~~~~~~~~~~~~~~~~~~~
pygments "rrt" theme, based on Zap and Emacs defaults.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Comment, Name, Keyword, String
class RrtStyle(Style):
"""
Minimalistic "rrt" theme, based on Zap and Emacs defaults.
"""
background_color = '#000000'
highlight_color = '#0000ff'
styles = {
Comment: '#00ff00',
Name.Function: '#ffff00',
Name.Variable: '#eedd82',
Name.Constant: '#7fffd4',
Keyword: '#ff0000',
Comment.Preproc: '#e5e5e5',
String: '#87ceeb',
Keyword.Type: '#ee82ee',
}
|
py | 1a2fc6d60a0e0c27df3ace927b3016c87b619670 | from scraping.funtion import html_convert_python
def get_data_page_locate(url):
soup = html_convert_python( url )
data = []
for row in soup.find("ul", {"id": "postcode-list"}).find_all("li"):
url = row.find('a').attrs['href']
data.append(url)
return data
def get_data_page_region(url):
soup = html_convert_python( url )
data = []
for row in soup.find_all("div", {"class": "col-md-3 col-xs-4"}):
url = row.a.get('href')
print(url)
data.append(url)
return data
def get_data_page_postcode(url):
soup = html_convert_python( url )
data = []
for row in soup.find_all("div", {"class": "col-md-3 col-xs-12"}):
url = row.a.string
print(url)
data.append(url)
return data
|
py | 1a2fc6f2e7b702b78f689e41f3dc9f0edb25c1c4 | #!/usr/bin/env python
#encoding: utf8
#
# Copyright © Burak Arslan <burak at arskom dot com dot tr>,
# Arskom Ltd. http://www.arskom.com.tr
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the owner nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import logging
import random
import sys
import base64
from Cookie import SimpleCookie
# bcrypt seems to be among the latest consensus around cryptograpic circles on
# storing passwords.
# You need the package from http://code.google.com/p/py-bcrypt/
# You can install it by running easy_install py-bcrypt.
try:
import bcrypt
except ImportError:
print('easy_install --user py-bcrypt to get it.')
raise
from spyne.application import Application
from spyne.decorator import rpc
from spyne.error import ResourceNotFoundError
from spyne.model.complex import ComplexModel
from spyne.model.fault import Fault
from spyne.model.primitive import Mandatory
from spyne.model.primitive import String
from spyne.protocol.soap import Soap11
from spyne.server.wsgi import WsgiApplication
from spyne.service import ServiceBase
class PublicKeyError(ResourceNotFoundError):
__namespace__ = 'spyne.examples.authentication'
def __init__(self, value):
Fault.__init__(self,
faultcode='Client.KeyError',
faultstring='Value %r not found' % value
)
class AuthenticationError(Fault):
__namespace__ = 'spyne.examples.authentication'
def __init__(self, user_name):
# TODO: self.transport.http.resp_code = HTTP_401
Fault.__init__(self,
faultcode='Client.AuthenticationError',
faultstring='Invalid authentication request for %r' % user_name
)
class AuthorizationError(Fault):
__namespace__ = 'spyne.examples.authentication'
def __init__(self):
# TODO: self.transport.http.resp_code = HTTP_401
Fault.__init__(self,
faultcode='Client.AuthorizationError',
faultstring='You are not authorized to access this resource.'
)
class UnauthenticatedError(Fault):
__namespace__ = 'spyne.examples.authentication'
def __init__(self):
Fault.__init__(self,
faultcode='Client.UnauthenticatedError',
faultstring='This resource can only be accessed after authentication.'
)
class SpyneDict(dict):
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
raise PublicKeyError(key)
class Preferences(ComplexModel):
__namespace__ = 'spyne.examples.authentication'
language = String(max_len=2)
time_zone = String
user_db = {
'neo': bcrypt.hashpw('Wh1teR@bbit', bcrypt.gensalt()),
}
session_db = set()
preferences_db = SpyneDict({
'neo': Preferences(language='en', time_zone='Underground/Zion'),
'smith': Preferences(language='xx', time_zone='Matrix/Core'),
})
class UserService(ServiceBase):
__tns__ = 'spyne.examples.authentication'
@rpc(Mandatory.String, Mandatory.String, _returns=None,
_throws=AuthenticationError)
def authenticate(ctx, user_name, password):
password_hash = user_db.get(user_name, None)
if password_hash is None:
raise AuthenticationError(user_name)
if bcrypt.hashpw(password, password_hash) != password_hash:
raise AuthenticationError(user_name)
session_id = (user_name, '%x' % random.randint(1<<128, (1<<132)-1))
session_db.add(session_id)
cookie = SimpleCookie()
cookie["session-id"] = base64.urlsafe_b64encode(str(session_id[0]) + "\0" + str(session_id[1]))
cookie["session-id"]["max-age"] = 3600
header_name, header_value = cookie.output().split(":", 1)
ctx.transport.resp_headers[header_name] = header_value.strip()
from pprint import pprint
pprint(ctx.transport.resp_headers)
@rpc(Mandatory.String, _throws=PublicKeyError, _returns=Preferences)
def get_preferences(ctx, user_name):
# Only allow access to the users own preferences.
if user_name != ctx.udc:
raise AuthorizationError()
retval = preferences_db[user_name]
return retval
def _on_method_call(ctx):
if ctx.descriptor.name == "authenticate":
# No checking of session cookie for call to authenticate
return
cookie = SimpleCookie()
http_cookie = ctx.transport.req_env.get("HTTP_COOKIE")
if http_cookie:
cookie.load(http_cookie)
if "session-id" not in cookie:
raise UnauthenticatedError()
session_cookie = cookie["session-id"].value
session_id = tuple(base64.urlsafe_b64decode(session_cookie).split("\0", 1))
if not session_id in session_db:
raise AuthenticationError(session_id[0])
ctx.udc = session_id[0] # user name
UserService.event_manager.add_listener('method_call', _on_method_call)
if __name__=='__main__':
from spyne.util.wsgi_wrapper import run_twisted
logging.basicConfig(level=logging.DEBUG)
logging.getLogger('spyne.protocol.xml').setLevel(logging.DEBUG)
logging.getLogger('twisted').setLevel(logging.DEBUG)
application = Application([UserService],
tns='spyne.examples.authentication',
in_protocol=Soap11(validator='lxml'),
out_protocol=Soap11()
)
twisted_apps = [
(WsgiApplication(application), 'app'),
]
sys.exit(run_twisted(twisted_apps, 7789))
|
py | 1a2fc7ee57cb4ab1366ee1f2bd9e223a48a4f673 | __author__ = 'dereyly'
import sys
#sys.path.append('/home/dereyly/progs/caffe_cudnn33/python_33')
#sys.path.append('/home/dereyly/progs/caffe-master-triplet/python')
import caffe
import numpy as np
'''
layer {
name: 'rcls_lost_my'
type: 'Python'
bottom: 'feats'
bottom: 'labels'
top: 'cls_lost_my'
python_param {
module: 'fast_rcnn.skip_softmax_loss'
layer: 'SoftmaxLossLayer'
#param_str: "{'ratios': [0.5, 1, 2], 'scales': [2, 4, 8, 16, 32]}"
}
loss_weight: 1
}
'''
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
sf = np.exp(x)
sum_sf=np.sum(sf, axis=1)
for i in range(x.shape[0]):
sf[i]/=sum_sf[i]
return sf
class SoftmaxLossLayer(caffe.Layer):
def setup(self, bottom, top):
# check input pair
if len(bottom) != 2:
raise Exception("Need two inputs to compute distance.")
# DBG
self.count = 0
self.skip_count = 0
top[0].reshape(1)
def reshape(self, bottom, top):
# check input dimensions match
# difference is shape of inputs
sz=bottom[0].data.shape
self.batch_sz=sz[0]
self.diff = np.zeros((sz[0],sz[1]),dtype=np.float32)
self.lbl_gt=np.zeros((sz[0],sz[1]),dtype=np.float32)
# loss output is scalar
#top[1].reshape(self.batch_sz)
def forward(self, bottom, top):
self.count+=1
sz=bottom[0].data.shape
self.lbl_gt=np.zeros((sz[0],sz[1]),dtype=np.float32)
lbl_idx=bottom[1].data
lbl_idx=lbl_idx.astype(dtype= int)
for i in range(self.batch_sz):
self.lbl_gt[i,lbl_idx[i]]=1
soft_max=softmax(bottom[0].data)
#loss = -self.lbl_gt*np.log(np.maximum(soft_max,np.finfo(np.float32).eps))
loss=0
for i in range(self.batch_sz):
loss -= np.log(np.maximum(soft_max[i][lbl_idx[i]],np.finfo(np.float32).eps))
#loss2=-np.log(soft_max)
#for i in range(self.batch_sz):
# loss[i,lbl_idx[i]]=0
#print bottom[1].data.shape
self.diff[...] = soft_max-self.lbl_gt
for i in range(self.batch_sz):
coeff=soft_max[i,lbl_idx[i]]
self.diff[i]*=coeff
self.skip_count+=coeff
if self.count%100==0:
print('-- skip count -- ',self.skip_count/(100.0*self.batch_sz))
self.skip_count=0
top[0].data[...] = np.sum(loss) / bottom[0].num
#top[1].data[...] = loss
def backward(self, top, propagate_down, bottom):
#pass
bottom[0].diff[...] = self.diff / bottom[0].num
|
py | 1a2fc822be9317ed37f6e3aa7339504f9dd0a7a2 | from typing import Optional, Tuple
################################################################
# Zulip Server settings.
#
# This file controls settings that affect the whole Zulip server.
# See our documentation at:
# https://zulip.readthedocs.io/en/latest/production/settings.html
#
# For developer documentation on the Zulip settings system, see:
# https://zulip.readthedocs.io/en/latest/subsystems/settings.html
#
# Remember to restart the server after making changes here!
# su zulip -c /home/zulip/deployments/current/scripts/restart-server
################################
# Mandatory settings.
#
# These settings MUST be set in production. In a development environment,
# sensible default values will be used.
# The email address for the person or team who maintains the Zulip
# installation. Note that this is a public-facing email address; it may
# appear on 404 pages, is used as the sender's address for many automated
# emails, and is advertised as a support address. An email address like
# [email protected] is totally reasonable, as is [email protected].
# Do not put a display name; e.g. '[email protected]', not
# 'Zulip Support <[email protected]>'.
ZULIP_ADMINISTRATOR = '[email protected]'
# The user-accessible Zulip hostname for this installation, e.g.
# zulip.example.com. This should match what users will put in their
# web browser. If you want to allow multiple hostnames, add the rest
# to ALLOWED_HOSTS.
#
# If you need to access the server on a specific port, you should set
# EXTERNAL_HOST to e.g. zulip.example.com:1234 here.
EXTERNAL_HOST = 'zulip.example.com'
# Alternative hostnames. A comma-separated list of strings
# representing the host/domain names that your users can enter in
# their browsers to access Zulip. This is a security measure; for
# details, see the Django documentation:
# https://docs.djangoproject.com/en/1.11/ref/settings/#allowed-hosts
#
# Zulip automatically adds to this list 'localhost', '127.0.0.1', and
# patterns representing EXTERNAL_HOST and subdomains of it. If you are
# accessing your server by other hostnames, list them here.
#
# Note that these should just be hostnames, without port numbers.
#ALLOWED_HOSTS = ['zulip-alias.example.com', '192.0.2.1']
# If EXTERNAL_HOST is not a valid domain name (e.g. an IP address),
# set FAKE_EMAIL_DOMAIN below to a domain that Zulip can use when
# generating (fake) email addresses for bots, dummy users, etc.
#FAKE_EMAIL_DOMAIN = 'fake-domain.example.com'
################
# Outgoing email (SMTP) settings.
#
# Zulip needs to be able to send email (that is, use SMTP) so it can
# confirm new users' email addresses and send notifications.
#
# If you don't already have an SMTP provider, free ones are available.
#
# For more details, including a list of free SMTP providers and
# advice for troubleshooting, see the Zulip documentation:
# https://zulip.readthedocs.io/en/latest/production/email.html
# EMAIL_HOST and EMAIL_HOST_USER are generally required.
#EMAIL_HOST = 'smtp.example.com'
#EMAIL_HOST_USER = ''
# Passwords and secrets are not stored in this file. The password
# for user EMAIL_HOST_USER goes in `/etc/zulip/zulip-secrets.conf`.
# In that file, set `email_password`. For example:
# email_password = abcd1234
# EMAIL_USE_TLS and EMAIL_PORT are required for most SMTP providers.
#EMAIL_USE_TLS = True
#EMAIL_PORT = 587
# The noreply address to be used as the sender for certain generated
# emails. Messages sent to this address could contain sensitive user
# data and should not be delivered anywhere. The default is
# e.g. noreply-{random_token}@zulip.example.com (if EXTERNAL_HOST is
# zulip.example.com). There are potential security issues if you set
# ADD_TOKENS_TO_NOREPLY_ADDRESS=False to remove the token; see
# https://zulip.readthedocs.io/en/latest/production/email.html for details.
#ADD_TOKENS_TO_NOREPLY_ADDRESS = True
#TOKENIZED_NOREPLY_EMAIL_ADDRESS = "noreply-{token}@example.com"
# NOREPLY_EMAIL_ADDRESS is the sender for noreply emails that don't
# contain confirmation links (where the security problem fixed by
# ADD_TOKENS_TO_NOREPLY_ADDRESS does not exist), as well as for
# confirmation emails when ADD_TOKENS_TO_NOREPLY_ADDRESS=False.
#NOREPLY_EMAIL_ADDRESS = '[email protected]'
# Many countries and bulk mailers require certain types of email to display
# a physical mailing address to comply with anti-spam legislation.
# Non-commercial and non-public-facing installations are unlikely to need
# this setting.
# The address should have no newlines.
#PHYSICAL_ADDRESS = ''
################
# Authentication settings.
# Enable at least one of the following authentication backends.
# See https://zulip.readthedocs.io/en/latest/production/authentication-methods.html
# for documentation on our authentication backends.
#
# The install process requires EmailAuthBackend (the default) to be
# enabled. If you want to disable it, do so after creating the
# initial realm and user.
AUTHENTICATION_BACKENDS = (
'zproject.backends.EmailAuthBackend', # Email and password; just requires SMTP setup
# 'zproject.backends.GoogleAuthBackend', # Google auth, setup below
# 'zproject.backends.GitHubAuthBackend', # GitHub auth, setup below
# 'zproject.backends.AzureADAuthBackend', # Microsoft Azure Active Directory auth, setup below
# 'zproject.backends.SAMLAuthBackend', # SAML, setup below
# 'zproject.backends.ZulipLDAPAuthBackend', # LDAP, setup below
# 'zproject.backends.ZulipRemoteUserBackend', # Local SSO, setup docs on readthedocs
) # type: Tuple[str, ...]
########
# Google OAuth.
#
# To set up Google authentication, you'll need to do the following:
#
# (1) Visit https://console.developers.google.com/ , navigate to
# "APIs & Services" > "Credentials", and create a "Project" which will
# correspond to your Zulip instance.
#
# (2) Navigate to "APIs & services" > "Library", and find the
# "Identity Toolkit API". Choose "Enable".
#
# (3) Return to "Credentials", and select "Create credentials".
# Choose "OAuth client ID", and follow prompts to create a consent
# screen. Fill in "Authorized redirect URIs" with a value like
# https://zulip.example.com/accounts/login/google/done/
# based on your value for EXTERNAL_HOST.
#
# (4) You should get a client ID and a client secret. Copy them.
# Use the client ID as `SOCIAL_AUTH_GOOGLE_KEY` here, and put the
# client secret in zulip-secrets.conf as `social_auth_google_secret`.
#SOCIAL_AUTH_GOOGLE_KEY = <your client ID from Google>
########
# GitHub OAuth.
#
# To set up GitHub authentication, you'll need to do the following:
#
# (1) Register an OAuth2 application with GitHub at one of:
# https://github.com/settings/developers
# https://github.com/organizations/ORGNAME/settings/developers
# Fill in "Callback URL" with a value like
# https://zulip.example.com/complete/github/ as
# based on your values for EXTERNAL_HOST and SOCIAL_AUTH_SUBDOMAIN.
#
# (2) You should get a page with settings for your new application,
# showing a client ID and a client secret. Use the client ID as
# `SOCIAL_AUTH_GITHUB_KEY` here, and put the client secret in
# zulip-secrets.conf as `social_auth_github_secret`.
#SOCIAL_AUTH_GITHUB_KEY = <your client ID from GitHub>
# (3) Optionally, you can configure the GitHub integration to only
# allow members of a particular GitHub team or organization to log
# into your Zulip server through GitHub authentication. To enable
# this, set one of the two parameters below:
#SOCIAL_AUTH_GITHUB_TEAM_ID = <your team id>
#SOCIAL_AUTH_GITHUB_ORG_NAME = <your org name>
# (4) If you are serving multiple Zulip organizations on different
# subdomains, you need to set SOCIAL_AUTH_SUBDOMAIN. You can set it
# to any subdomain on which you do not plan to host a Zulip
# organization. The default recommendation, `auth`, is a reserved
# subdomain; if you're using this setting, the "Callback URL" should be e.g.:
# https://auth.zulip.example.com/complete/github/
#
# If you end up using a subdomain other then the default
# recommendation, you must also set the 'ROOT_SUBDOMAIN_ALIASES' list
# to include this subdomain.
#
#SOCIAL_AUTH_SUBDOMAIN = 'auth'
########
# SAML Authentication
#
# For SAML authentication, you will need to configure the settings
# below using information from your SAML Identity Provider, as
# explained in:
#
# https://zulip.readthedocs.io/en/latest/production/authentication-methods.html#saml
#
# You will need to modify these SAML settings:
SOCIAL_AUTH_SAML_ORG_INFO = {
"en-US": {
"displayname": "Example, Inc. Zulip",
"name": "zulip",
"url": "%s%s" % ('https://', EXTERNAL_HOST),
}
}
SOCIAL_AUTH_SAML_ENABLED_IDPS = {
# The fields are explained in detail here:
# https://python-social-auth-docs.readthedocs.io/en/latest/backends/saml.html
"idp_name": {
# Configure entity_id and url according to information provided to you by your IdP:
"entity_id": "https://idp.testshib.org/idp/shibboleth",
"url": "https://idp.testshib.org/idp/profile/SAML2/Redirect/SSO",
# The part below corresponds to what's likely referred to as something like
# "Attribute Statements" (with Okta as your IdP) or "Attribute Mapping" (with G Suite).
# The names on the right side need to correspond to the names under which
# the IdP will send the user attributes. With these defaults, it's expected
# that the user's email will be sent with the "email" attribute name,
# the first name and the last name with the "first_name", "last_name" attribute names.
"attr_user_permanent_id": "email",
"attr_first_name": "first_name",
"attr_last_name": "last_name",
"attr_username": "email",
"attr_email": "email",
# The "x509cert" attribute is automatically read from
# /etc/zulip/saml/idps/{idp_name}.crt; don't specify it here.
# Optionally, you can edit display_name and display_icon
# settings below to change the name and icon that will show on
# the login button.
"display_name": "SAML",
# Path to a square image file containing a logo to appear at
# the left end of the login/register buttons for this IDP.
# The default of None results in a text-only button.
# "display_icon": "/path/to/icon.png",
}
}
SOCIAL_AUTH_SAML_SECURITY_CONFIG = {
# If you've set up the optional private and public server keys,
# set this to True to enable signing of SAMLRequests using the
# private key.
"authnRequestsSigned": False,
}
# These SAML settings you likely won't need to modify.
SOCIAL_AUTH_SAML_SP_ENTITY_ID = 'https://' + EXTERNAL_HOST
SOCIAL_AUTH_SAML_TECHNICAL_CONTACT = {
"givenName": "Technical team",
"emailAddress": ZULIP_ADMINISTRATOR,
}
SOCIAL_AUTH_SAML_SUPPORT_CONTACT = {
"givenName": "Support team",
"emailAddress": ZULIP_ADMINISTRATOR,
}
########
# Azure Active Directory OAuth.
#
# To set up Microsoft Azure AD authentication, you'll need to do the following:
#
# (1) Register an OAuth2 application with Microsoft at:
# https://apps.dev.microsoft.com
# Generate a new password under Application Secrets
# Generate a new platform (web) under Platforms. For Redirect URL, enter:
# https://zulip.example.com/complete/azuread-oauth2/
# Add User.Read permission under Microsoft Graph Permissions
#
# (2) Enter the application ID for the app as SOCIAL_AUTH_AZUREAD_OAUTH2_KEY here
# (3) Put the application password in zulip-secrets.conf as 'azure_oauth2_secret'.
#SOCIAL_AUTH_AZUREAD_OAUTH2_KEY = ''
########
# SSO via REMOTE_USER.
#
# If you are using the ZulipRemoteUserBackend authentication backend,
# set this to your domain (e.g. if REMOTE_USER is "username" and the
# corresponding email address is "[email protected]", set
# SSO_APPEND_DOMAIN = "example.com")
SSO_APPEND_DOMAIN = None # type: Optional[str]
################
# Miscellaneous settings.
# Support for mobile push notifications. Setting controls whether
# push notifications will be forwarded through a Zulip push
# notification bouncer server to the mobile apps. See
# https://zulip.readthedocs.io/en/latest/production/mobile-push-notifications.html
# for information on how to sign up for and configure this.
#PUSH_NOTIFICATION_BOUNCER_URL = 'https://push.zulipchat.com'
# Whether to redact the content of push notifications. This is less
# usable, but avoids sending message content over the wire. In the
# future, we're likely to replace this with an end-to-end push
# notification encryption feature.
#PUSH_NOTIFICATION_REDACT_CONTENT = False
# Whether to submit basic usage statistics to help the Zulip core team. Details at
#
# https://zulip.readthedocs.io/en/latest/production/mobile-push-notifications.html
#
# Defaults to True if and only if the Mobile Push Notifications Service is enabled.
#SUBMIT_USAGE_STATISTICS = True
# Controls whether session cookies expire when the browser closes
SESSION_EXPIRE_AT_BROWSER_CLOSE = False
# Session cookie expiry in seconds after the last page load
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # 2 weeks
# Password strength requirements; learn about configuration at
# https://zulip.readthedocs.io/en/latest/production/security-model.html.
# PASSWORD_MIN_LENGTH = 6
# PASSWORD_MIN_GUESSES = 10000
# Controls whether Zulip sends "new login" email notifications.
#SEND_LOGIN_EMAILS = True
# Controls whether or not there is a feedback button in the UI.
ENABLE_FEEDBACK = False
# Feedback sent by your users will be sent to this email address.
FEEDBACK_EMAIL = ZULIP_ADMINISTRATOR
# Controls whether or not error reports (tracebacks) are emailed to the
# server administrators.
#ERROR_REPORTING = True
# For frontend (JavaScript) tracebacks
#BROWSER_ERROR_REPORTING = False
# If True, each log message in the server logs will identify the
# Python module where it came from. Useful for tracking down a
# mysterious log message, but a little verbose.
#LOGGING_SHOW_MODULE = False
# If True, each log message in the server logs will identify the
# process ID. Useful for correlating logs with information from
# system-level monitoring tools.
#LOGGING_SHOW_PID = False
# Controls whether or not Zulip will provide inline image preview when
# a link to an image is referenced in a message. Note: this feature
# can also be disabled in a realm's organization settings.
#INLINE_IMAGE_PREVIEW = True
# Controls whether or not Zulip will provide inline previews of
# websites that are referenced in links in messages. Note: this feature
# can also be disabled in a realm's organization settings.
#INLINE_URL_EMBED_PREVIEW = True
# Controls whether or not Zulip will parse links starting with
# "file:///" as a hyperlink (useful if you have e.g. an NFS share).
ENABLE_FILE_LINKS = False
# By default, files uploaded by users and profile pictures are stored
# directly on the Zulip server. You can configure files being instead
# stored in Amazon S3 or another scalable data store here. See docs at:
#
# https://zulip.readthedocs.io/en/latest/production/upload-backends.html
#
# If you change LOCAL_UPLOADS_DIR to a different path, you will also
# need to manually edit Zulip's nginx configuration to use the new
# path. For that reason, we recommend replacing /home/zulip/uploads
# with a symlink instead of changing LOCAL_UPLOADS_DIR.
LOCAL_UPLOADS_DIR = "/home/zulip/uploads"
#S3_AUTH_UPLOADS_BUCKET = ""
#S3_AVATAR_BUCKET = ""
#S3_REGION = ""
# Maximum allowed size of uploaded files, in megabytes. DO NOT SET
# ABOVE 80MB. The file upload implementation doesn't support chunked
# uploads, so browsers will crash if you try uploading larger files.
# Set MAX_FILE_UPLOAD_SIZE to 0 to disable file uploads completely
# (including hiding upload-related options from UI).
MAX_FILE_UPLOAD_SIZE = 25
# Controls whether name changes are completely disabled for this
# installation. This is useful when you're syncing names from an
# integrated LDAP/Active Directory.
NAME_CHANGES_DISABLED = False
# Controls whether avatar changes are completely disabled for this
# installation. This is useful when you're syncing avatars from an
# integrated LDAP/Active Directory.
AVATAR_CHANGES_DISABLED = False
# Controls whether users who have not uploaded an avatar will receive an avatar
# from gravatar.com.
ENABLE_GRAVATAR = True
# To override the default avatar image if ENABLE_GRAVATAR is False, place your
# custom default avatar image at /home/zulip/local-static/default-avatar.png
# and uncomment the following line.
#DEFAULT_AVATAR_URI = '/local-static/default-avatar.png'
# To access an external postgres database you should define the host name in
# REMOTE_POSTGRES_HOST, you can define the password in the secrets file in the
# property postgres_password, and the SSL connection mode in REMOTE_POSTGRES_SSLMODE
# Valid values for REMOTE_POSTGRES_SSLMODE are documented in the
# "SSL Mode Descriptions" table in
# https://www.postgresql.org/docs/9.5/static/libpq-ssl.html
#REMOTE_POSTGRES_HOST = 'dbserver.example.com'
#REMOTE_POSTGRES_SSLMODE = 'require'
# If you want to set a Terms of Service for your server, set the path
# to your markdown file, and uncomment the following line.
#TERMS_OF_SERVICE = '/etc/zulip/terms.md'
# Similarly if you want to set a Privacy Policy.
#PRIVACY_POLICY = '/etc/zulip/privacy.md'
################
# Twitter integration.
# Zulip supports showing inline Tweet previews when a tweet is linked
# to in a message. To support this, Zulip must have access to the
# Twitter API via OAuth. To obtain the various access tokens needed
# below, you must register a new application under your Twitter
# account by doing the following:
#
# 1. Log in to http://dev.twitter.com.
# 2. In the menu under your username, click My Applications. From this page, create a new application.
# 3. Click on the application you created and click "create my access token".
# 4. Fill in the values for twitter_consumer_key, twitter_consumer_secret, twitter_access_token_key,
# and twitter_access_token_secret in /etc/zulip/zulip-secrets.conf.
################
# Email gateway integration.
#
# The Email gateway integration supports sending messages into Zulip
# by sending an email.
# For details, see the documentation:
# https://zulip.readthedocs.io/en/latest/production/settings.html#email-gateway
EMAIL_GATEWAY_PATTERN = ""
# If you are using polling, edit the IMAP settings below:
#
# The IMAP login; username here and password as email_gateway_password in
# zulip-secrets.conf.
EMAIL_GATEWAY_LOGIN = ""
# The IMAP server & port to connect to
EMAIL_GATEWAY_IMAP_SERVER = ""
EMAIL_GATEWAY_IMAP_PORT = 993
# The IMAP folder name to check for emails. All emails sent to EMAIL_GATEWAY_PATTERN above
# must be delivered to this folder
EMAIL_GATEWAY_IMAP_FOLDER = "INBOX"
################
# LDAP integration.
#
# Zulip supports retrieving information about users via LDAP, and
# optionally using LDAP as an authentication mechanism.
import ldap
from django_auth_ldap.config import LDAPSearch
########
# LDAP integration, part 1: Connecting to the LDAP server.
#
# For detailed instructions, see the Zulip documentation:
# https://zulip.readthedocs.io/en/latest/production/authentication-methods.html#ldap
# The LDAP server to connect to. Setting this enables Zulip
# automatically fetching each new user's name from LDAP.
# Example: "ldaps://ldap.example.com"
AUTH_LDAP_SERVER_URI = ""
# The DN of the user to bind as (i.e., authenticate as) in order to
# query LDAP. If unset, Zulip does an anonymous bind.
AUTH_LDAP_BIND_DN = ""
# Passwords and secrets are not stored in this file. The password
# corresponding to AUTH_LDAP_BIND_DN goes in `/etc/zulip/zulip-secrets.conf`.
# In that file, set `auth_ldap_bind_password`. For example:
# auth_ldap_bind_password = abcd1234
########
# LDAP integration, part 2: Mapping user info from LDAP to Zulip.
#
# For detailed instructions, see the Zulip documentation:
# https://zulip.readthedocs.io/en/latest/production/authentication-methods.html#ldap
# The LDAP search query to find a given user.
#
# The arguments to `LDAPSearch` are (base DN, scope, filter). In the
# filter, the string `%(user)s` is a Python placeholder. The Zulip
# server will replace this with the user's Zulip username, i.e. the
# name they type into the Zulip login form.
#
# For more details and alternatives, see the documentation linked above.
AUTH_LDAP_USER_SEARCH = LDAPSearch("ou=users,dc=example,dc=com",
ldap.SCOPE_SUBTREE, "(uid=%(user)s)")
# Configuration to lookup a user's LDAP data given their email address
# (For Zulip reverse mapping). If users log in as e.g. "sam" when
# their email address is "[email protected]", set LDAP_APPEND_DOMAIN to
# "example.com". Otherwise, leave LDAP_APPEND_DOMAIN=None and set
# AUTH_LDAP_REVERSE_EMAIL_SEARCH and AUTH_LDAP_USERNAME_ATTR below.
LDAP_APPEND_DOMAIN = None # type: Optional[str]
# LDAP attribute to find a user's email address.
#
# Leave as None if users log in with their email addresses,
# or if using LDAP_APPEND_DOMAIN.
LDAP_EMAIL_ATTR = None # type: Optional[str]
# AUTH_LDAP_REVERSE_EMAIL_SEARCH works like AUTH_LDAP_USER_SEARCH and
# should query an LDAP user given their email address. It and
# AUTH_LDAP_USERNAME_ATTR are required when LDAP_APPEND_DOMAIN is None.
#AUTH_LDAP_REVERSE_EMAIL_SEARCH = LDAPSearch("ou=users,dc=example,dc=com",
# ldap.SCOPE_SUBTREE, "(email=%(email)s)")
# AUTH_LDAP_USERNAME_ATTR should be the Zulip username attribute
# (defined in AUTH_LDAP_USER_SEARCH).
#AUTH_LDAP_USERNAME_ATTR = "uid"
# This map defines how to populate attributes of a Zulip user from LDAP.
#
# The format is `zulip_name: ldap_name`; each entry maps a Zulip
# concept (on the left) to the LDAP attribute name (on the right) your
# LDAP database uses for the same concept.
AUTH_LDAP_USER_ATTR_MAP = {
# full_name is required; common values include "cn" or "displayName".
# If names are encoded in your LDAP directory as first and last
# name, you can instead specify first_name and last_name, and
# Zulip will combine those to construct a full_name automatically.
"full_name": "cn",
# "first_name": "fn",
# "last_name": "ln",
# Profile pictures can be pulled from the LDAP "thumbnailPhoto"/"jpegPhoto" field.
# "avatar": "thumbnailPhoto",
# This line is for having Zulip to automatically deactivate users
# who are disabled in LDAP/Active Directory (and reactivate users who are not).
# See docs for usage details and precise semantics.
# "userAccountControl": "userAccountControl",
}
# Whether to automatically deactivate users not found in LDAP. If LDAP
# is the only authentication method, then this setting defaults to
# True. If other authentication methods are enabled, it defaults to
# False.
#LDAP_DEACTIVATE_NON_MATCHING_USERS = True
################
# Miscellaneous settings.
# The default CAMO_URI of '/external_content/' is served by the camo
# setup in the default Voyager nginx configuration. Setting CAMO_URI
# to '' will disable the Camo integration.
CAMO_URI = '/external_content/'
# RabbitMQ configuration
#
# By default, Zulip connects to rabbitmq running locally on the machine,
# but Zulip also supports connecting to RabbitMQ over the network;
# to use a remote RabbitMQ instance, set RABBITMQ_HOST to the hostname here.
# RABBITMQ_HOST = "127.0.0.1"
# To use another rabbitmq user than the default 'zulip', set RABBITMQ_USERNAME here.
# RABBITMQ_USERNAME = 'zulip'
# Memcached configuration
#
# By default, Zulip connects to memcached running locally on the machine,
# but Zulip also supports connecting to memcached over the network;
# to use a remote Memcached instance, set MEMCACHED_LOCATION here.
# Format HOST:PORT
# MEMCACHED_LOCATION = 127.0.0.1:11211
# Redis configuration
#
# By default, Zulip connects to redis running locally on the machine,
# but Zulip also supports connecting to redis over the network;
# to use a remote Redis instance, set REDIS_HOST here.
# REDIS_HOST = '127.0.0.1'
# For a different redis port set the REDIS_PORT here.
# REDIS_PORT = 6379
# If you set redis_password in zulip-secrets.conf, Zulip will use that password
# to connect to the redis server.
# Controls whether Zulip will rate-limit user requests.
# RATE_LIMITING = True
# By default, Zulip connects to the thumbor (the thumbnailing software
# we use) service running locally on the machine. If you're running
# thumbor on a different server, you can configure that by setting
# THUMBOR_URL here. Setting THUMBOR_URL='' will let Zulip server know that
# thumbor is not running or configured.
#THUMBOR_URL = 'http://127.0.0.1:9995'
#
# This setting controls whether images shown in Zulip's inline image
# previews should be thumbnailed by thumbor, which saves bandwidth but
# can modify the image's appearance.
#THUMBNAIL_IMAGES = True
# Controls the Jitsi Meet video call integration. By default, the
# integration uses the SaaS meet.jit.si server. You can specify
# your own Jitsi Meet server, or if you'd like to disable the
# integration, set JITSI_SERVER_URL = None.
#JITSI_SERVER_URL = 'jitsi.example.com'
|
py | 1a2fc8666597869658455165ed41e12d1dcdac07 |
"""
Runs one instance of the Atari environment and optimizes using DQN algorithm.
Can use a GPU for the agent (applies to both sample and train). No parallelism
employed, so everything happens in one python process; can be easier to debug.
The kwarg snapshot_mode="last" to logger context will save the latest model at
every log point (see inside the logger for other options).
In viskit, whatever (nested) key-value pairs appear in config will become plottable
keys for showing several experiments. If you need to add more after an experiment,
use rlpyt.utils.logging.context.add_exp_param().
"""
from rlpyt.samplers.serial.sampler import SerialSampler
from rlpyt.envs.atari.atari_env import AtariEnv, AtariTrajInfo
from rlpyt.algos.dqn.dqn import DQN
from rlpyt.agents.dqn.atari.atari_dqn_agent import AtariDqnAgent
from rlpyt.runners.minibatch_rl import MinibatchRlEval
from rlpyt.utils.logging.context import logger_context
from polyaxon_client.tracking import get_outputs_path
def build_and_train(game="pong", run_ID=0, cuda_idx=None):
sampler = SerialSampler(
EnvCls=AtariEnv,
TrajInfoCls=AtariTrajInfo, # default traj info + GameScore
env_kwargs=dict(game=game),
eval_env_kwargs=dict(game=game),
batch_T=4, # Four time-steps per sampler iteration.
batch_B=1,
max_decorrelation_steps=0,
eval_n_envs=10,
eval_max_steps=int(10e3),
eval_max_trajectories=5,
)
algo = DQN(min_steps_learn=1e3) # Run with defaults.
agent = AtariDqnAgent()
runner = MinibatchRlEval(
algo=algo,
agent=agent,
sampler=sampler,
n_steps=50e6,
log_interval_steps=1e3,
affinity=dict(cuda_idx=cuda_idx),
)
config = dict(game=game)
name = "dqn_" + game
#log_dir = "example_1"
log_dir = get_outputs_path()
with logger_context(log_dir, run_ID, name, config, snapshot_mode="last"):
runner.train()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--game', help='Atari game', default='pong')
parser.add_argument('--run_ID', help='run identifier (logging)', type=int, default=0)
parser.add_argument('--cuda_idx', help='gpu to use ', type=int, default=1)
args = parser.parse_args()
build_and_train(
game=args.game,
run_ID=args.run_ID,
cuda_idx=args.cuda_idx,
)
|
py | 1a2fc8b876c35404d5fc43bc110195fcfed87476 | """distutils.command.build_scripts
Implements the Distutils 'build_scripts' command."""
__revision__ = "$Id: build_scripts.py 77704 2010-01-23 09:23:15Z tarek.ziade $"
import os, re
from stat import ST_MODE
from distutils.core import Command
from distutils.dep_util import newer
from distutils.util import convert_path
from distutils import log
# check if Python is called on the first line with this expression
first_line_re = re.compile('^#!.*python[0-9.]*([ \t].*)?$')
class build_scripts (Command):
description = "\"build\" scripts (copy and fixup #! line)"
user_options = [
('build-dir=', 'd', "directory to \"build\" (copy) to"),
('force', 'f', "forcibly build everything (ignore file timestamps"),
('executable=', 'e', "specify final destination interpreter path"),
]
boolean_options = ['force']
def initialize_options (self):
self.build_dir = None
self.scripts = None
self.force = None
self.executable = None
self.outfiles = None
def finalize_options (self):
self.set_undefined_options('build',
('build_scripts', 'build_dir'),
('force', 'force'),
('executable', 'executable'))
self.scripts = self.distribution.scripts
def get_source_files(self):
return self.scripts
def run (self):
if not self.scripts:
return
self.copy_scripts()
def copy_scripts (self):
"""Copy each script listed in 'self.scripts'; if it's marked as a
Python script in the Unix way (first line matches 'first_line_re',
ie. starts with "\#!" and contains "python"), then adjust the first
line to refer to the current Python interpreter as we copy.
"""
_sysconfig = __import__('sysconfig')
self.mkpath(self.build_dir)
outfiles = []
for script in self.scripts:
adjust = 0
script = convert_path(script)
outfile = os.path.join(self.build_dir, os.path.basename(script))
outfiles.append(outfile)
if not self.force and not newer(script, outfile):
log.debug("not copying %s (up-to-date)", script)
continue
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, "r")
except IOError:
if not self.dry_run:
raise
f = None
else:
first_line = f.readline()
if not first_line:
self.warn("%s is an empty file (skipping)" % script)
continue
match = first_line_re.match(first_line)
if match:
adjust = 1
post_interp = match.group(1) or ''
if adjust:
log.info("copying and adjusting %s -> %s", script,
self.build_dir)
if not self.dry_run:
outf = open(outfile, "w")
if not _sysconfig.is_python_build():
outf.write("#!%s%s\n" %
(self.executable,
post_interp))
else:
outf.write("#!%s%s\n" %
(os.path.join(
_sysconfig.get_config_var("BINDIR"),
"python%s%s" % (_sysconfig.get_config_var("VERSION"),
_sysconfig.get_config_var("EXE"))),
post_interp))
outf.writelines(f.readlines())
outf.close()
if f:
f.close()
else:
if f:
f.close()
self.copy_file(script, outfile)
if os.name == 'posix':
for file in outfiles:
if self.dry_run:
log.info("changing mode of %s", file)
else:
oldmode = os.stat(file)[ST_MODE] & 07777
newmode = (oldmode | 0555) & 07777
if newmode != oldmode:
log.info("changing mode of %s from %o to %o",
file, oldmode, newmode)
os.chmod(file, newmode)
# copy_scripts ()
# class build_scripts
|
py | 1a2fc8c2af2a2668dba79d4cb1d14088e7ae64ca | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkPeeringsOperations:
"""VirtualNetworkPeeringsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
virtual_network_name: str,
virtual_network_peering_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_network_name: str,
virtual_network_peering_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified virtual network peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the virtual network peering.
:type virtual_network_peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
virtual_network_peering_name=virtual_network_peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
async def get(
self,
resource_group_name: str,
virtual_network_name: str,
virtual_network_peering_name: str,
**kwargs
) -> "models.VirtualNetworkPeering":
"""Gets the specified virtual network peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the virtual network peering.
:type virtual_network_peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.VirtualNetworkPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_network_name: str,
virtual_network_peering_name: str,
virtual_network_peering_parameters: "models.VirtualNetworkPeering",
**kwargs
) -> "models.VirtualNetworkPeering":
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'virtualNetworkPeeringName': self._serialize.url("virtual_network_peering_name", virtual_network_peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_network_peering_parameters, 'VirtualNetworkPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_network_name: str,
virtual_network_peering_name: str,
virtual_network_peering_parameters: "models.VirtualNetworkPeering",
**kwargs
) -> AsyncLROPoller["models.VirtualNetworkPeering"]:
"""Creates or updates a peering in the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param virtual_network_peering_name: The name of the peering.
:type virtual_network_peering_name: str
:param virtual_network_peering_parameters: Parameters supplied to the create or update virtual
network peering operation.
:type virtual_network_peering_parameters: ~azure.mgmt.network.v2020_03_01.models.VirtualNetworkPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualNetworkPeering or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_03_01.models.VirtualNetworkPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
virtual_network_peering_name=virtual_network_peering_name,
virtual_network_peering_parameters=virtual_network_peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}'} # type: ignore
def list(
self,
resource_group_name: str,
virtual_network_name: str,
**kwargs
) -> AsyncIterable["models.VirtualNetworkPeeringListResult"]:
"""Gets all virtual network peerings in a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkPeeringListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.VirtualNetworkPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings'} # type: ignore
|
py | 1a2fc8d575d251990336801076d181ea0608fc89 | from datetime import datetime
from pathlib import Path
from tkinter import *
from tkinter import filedialog
from docxtpl import DocxTemplate
import xlrd
import os
import configparser
import sys
def resource_path(relative_path):
if getattr(sys, 'frozen', False):
base_path = sys._MEIPASS
else:
base_path = os.path.dirname(os.path.abspath(__file__))
print(os.path.join(base_path, relative_path))
return os.path.join(base_path, relative_path)
def valid_count():
config = configparser.ConfigParser()
config.read(resource_path(os.path.join('res', 'conf.ini')), encoding="utf8")
return config.getint("sys_config", "totalCount"), config.getint("sys_config", "usedCount")
def update_valid(count):
config = configparser.ConfigParser()
config.read(resource_path(os.path.join('res', 'conf.ini')), encoding="utf8")
config.set("sys_config", "usedCount", repr(count))
config.write(open(resource_path(os.path.join('res', 'conf.ini')), "w"))
class Application(Frame):
def __init__(self, master=None):
Frame.__init__(self, master, bg='white')
self.pack(expand=YES, fill=BOTH)
self.window_init()
self.createWidgets()
def window_init(self):
self.master.title('报告批处理系统')
self.master.bg = 'white'
width, height = self.master.maxsize()
self.master.geometry("{}x{}".format(500, 500))
def createWidgets(self):
# # fm1
self.fm1 = Frame(self, bg='white')
self.openButton = Button(self.fm1, text='选择表格文件', bg='#e4e4e5', fg='black', font=('微软雅黑', 12),
command=self.fileOpen)
self.openButton.pack(expand=YES)
self.fm1.pack(side=TOP, pady=10, expand=NO, fill='x')
# fm2
self.fm2 = Frame(self, bg='white')
self.predictEntry = Text(self.fm2, font=('微软雅黑', 10), fg='#FF4081', state=DISABLED)
self.predictEntry.pack(side=LEFT, fill='y', padx=20, expand=YES)
self.fm2.pack(side=TOP, expand=YES, fill="y")
def output_predict_sentence(self, r):
# self.predictEntry.delete(0, END)
self.predictEntry.config(state=NORMAL)
self.predictEntry.insert(INSERT, r + "\n")
self.predictEntry.config(state=DISABLED)
def fileOpen(self):
fileName = filedialog.askopenfilename(title='选择表格文件', filetypes=[('Excel', '*.xlsx')])
self.read_excel(fileName)
# self.output_predict_sentence("结束")
def read_excel(self, fileName):
try:
self.output_predict_sentence("选择文件为:" + fileName)
my_file = Path(fileName)
if my_file.exists():
pass
else:
self.output_predict_sentence("文件不存在,重新选择文件!")
my_dir_name = fileName.replace('.xlsx', '')
my_dir = Path(my_dir_name)
if my_dir.exists():
pass
else:
os.makedirs(my_dir)
# self.output_predict_sentence("创建存储目录")
# 打开excel
x1 = xlrd.open_workbook(fileName)
# 打开sheet1
table = x1.sheet_by_index(0)
nrows = table.nrows
validCount = valid_count()
if nrows - 2 + validCount[1] > validCount[0]:
self.output_predict_sentence('数据异常,联系开发人员!')
return
self.output_predict_sentence('预计生成报告数:' + str(nrows - 2))
self.output_predict_sentence("开始生成报告!")
for i in range(nrows - 2):
reqTimeStr = str(table.cell_value(i + 2, 0)).strip()
companyName = table.cell_value(i + 2, 1)
if companyName is None:
break
productNumber = str(table.cell_value(i + 2, 2)).strip()
SCCJ = str(table.cell_value(i + 2, 3)).strip()
productName = str(table.cell_value(i + 2, 4)).strip()
productTime = table.cell_value(i + 2, 5)
PH = table.cell_value(i + 2, 6)
LC = str(table.cell_value(i + 2, 7)).strip()
GCZCH = table.cell_value(i + 2, 8)
YJZH = table.cell_value(i + 2, 9)
CYWZ = str(table.cell_value(i + 2, 10)).strip()
GH = str(table.cell_value(i + 2, 11)).strip()
reportTime = str(table.cell_value(i + 2, 12)).strip()
# 日期转换
reqTime = datetime.strptime(reqTimeStr, '%Y.%m.%d')
reportTime = datetime.strptime(reportTime, '%Y.%m.%d')
tpl = DocxTemplate(resource_path(os.path.join('res', 'tempdoc.docx')))
context = {
'companyName': companyName,
'productNumber': productNumber,
# 'SCCJ': SCCJ,
# 'productName': productName,
# 'productTime': productTime,
# 'PH': PH,
# 'LC': LC,
# 'GCZCH': GCZCH,
# 'YJZH': YJZH,
'CYWZ': CYWZ,
'GH': GH,
'reqTime': "{0:%Y}.{0:%m}.{0:%d}".format(reqTime),
'checkTime': "{0:%Y}.{0:%m}.{0:%d}".format(reqTime),
'reportTime': "{0:%Y}.{0:%m}.{0:%d}".format(reportTime),
}
if productName == 'None':
context['productName'] = ''
else:
context['productName'] = productName
if LC == 'None':
context['LC'] = ''
else:
context['LC'] = LC
if productTime is None:
context['productTime'] = ''
else:
if isinstance(productTime, float):
context['productTime'] = int(float(productTime))
elif isinstance(productTime, int):
context['productTime'] = int(productTime)
else:
context['productTime'] = str(
productTime).replace('00:00:00+00:00', '')
if PH is None:
context['PH'] = ''
else:
if isinstance(PH, float):
context['PH'] = int(float(PH))
else:
context['PH'] = PH
if SCCJ == 'None':
context['SCCJ'] = ''
else:
context['SCCJ'] = SCCJ
if YJZH is None:
context['YJZH'] = ''
else:
if isinstance(YJZH, float):
context['YJZH'] = int(float(YJZH))
else:
context['YJZH'] = YJZH
if GCZCH is None:
context['GCZCH'] = ''
else:
if isinstance(GCZCH, float):
context['GCZCH'] = int(float(GCZCH))
else:
context['GCZCH'] = GCZCH
temp = str(i + 1)
saveFileName = my_dir_name + '/' + \
companyName.replace('有限公司', '').strip() + '_' + \
GH + "_" + temp + '.docx'
# self.output_predict_sentence("第" + temp + "文件:" + saveFileName)
tpl.render(context)
tpl.save(saveFileName)
update_valid(nrows - 2 + validCount[1])
self.output_predict_sentence("报告生成结束,共生成报告:" + repr(nrows - 2))
except Exception as err:
blogpath = resource_path(os.path.join('res', 'log_err.txt'))
f = open(blogpath, 'w+')
f.writelines(repr(err))
f.close()
self.output_predict_sentence("报告生成失败,原因:" + repr(err))
if __name__ == '__main__':
app = Application()
app.mainloop()
|
py | 1a2fc9698da2f9cf357773e84edacbb806889435 | """
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
# pylint: disable=E1101,E1103,W0231
from numpy import nan, ndarray
import numpy as np
import warnings
import operator
from pandas.core.common import isnull, _values_from_object, _maybe_match_name
from pandas.core.index import Index, _ensure_index
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.internals import SingleBlockManager
from pandas.core import generic
import pandas.core.common as com
import pandas.core.ops as ops
import pandas.index as _index
from pandas.sparse.array import (make_sparse, _sparse_array_op, SparseArray)
from pandas._sparse import BlockIndex, IntIndex
import pandas._sparse as splib
from pandas.sparse.scipy_sparse import (_sparse_series_to_coo,
_coo_to_sparse_series)
# -----------------------------------------------------------------------------
# Wrapper function for Series arithmetic methods
def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
**eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
str_rep, default_axis, fill_zeros and eval_kwargs are not used, but are
present for compatibility.
"""
def wrapper(self, other):
if isinstance(other, Series):
if not isinstance(other, SparseSeries):
other = other.to_sparse(fill_value=self.fill_value)
return _sparse_series_op(self, other, op, name)
elif isinstance(other, DataFrame):
return NotImplemented
elif np.isscalar(other):
if isnull(other) or isnull(self.fill_value):
new_fill_value = np.nan
else:
new_fill_value = op(np.float64(self.fill_value),
np.float64(other))
return SparseSeries(op(self.sp_values, other),
index=self.index,
sparse_index=self.sp_index,
fill_value=new_fill_value,
name=self.name)
else: # pragma: no cover
raise TypeError('operation with %s not supported' % type(other))
wrapper.__name__ = name
if name.startswith("__"):
# strip special method names, e.g. `__add__` needs to be `add` when
# passed to _sparse_series_op
name = name[2:-2]
return wrapper
def _sparse_series_op(left, right, op, name):
left, right = left.align(right, join='outer', copy=False)
new_index = left.index
new_name = _maybe_match_name(left, right)
result = _sparse_array_op(left, right, op, name)
return SparseSeries(result, index=new_index, name=new_name)
class SparseSeries(Series):
"""Data structure for labeled, sparse floating point data
Parameters
----------
data : {array-like, Series, SparseSeries, dict}
kind : {'block', 'integer'}
fill_value : float
Defaults to NaN (code for missing)
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used internally
Notes
-----
SparseSeries objects are immutable via the typical Python means. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
_subtyp = 'sparse_series'
def __init__(self, data=None, index=None, sparse_index=None, kind='block',
fill_value=None, name=None, dtype=None, copy=False,
fastpath=False):
# we are called internally, so short-circuit
if fastpath:
# data is an ndarray, index is defined
data = SingleBlockManager(data, index, fastpath=True)
if copy:
data = data.copy()
else:
if data is None:
data = []
if isinstance(data, Series) and name is None:
name = data.name
is_sparse_array = isinstance(data, SparseArray)
if fill_value is None:
if is_sparse_array:
fill_value = data.fill_value
else:
fill_value = nan
if is_sparse_array:
if isinstance(data, SparseSeries) and index is None:
index = data.index.view()
elif index is not None:
assert (len(index) == len(data))
sparse_index = data.sp_index
data = np.asarray(data)
elif isinstance(data, SparseSeries):
if index is None:
index = data.index.view()
# extract the SingleBlockManager
data = data._data
elif isinstance(data, (Series, dict)):
if index is None:
index = data.index.view()
data = Series(data)
data, sparse_index = make_sparse(data, kind=kind,
fill_value=fill_value)
elif isinstance(data, (tuple, list, np.ndarray)):
# array-like
if sparse_index is None:
data, sparse_index = make_sparse(data, kind=kind,
fill_value=fill_value)
else:
assert (len(data) == sparse_index.npoints)
elif isinstance(data, SingleBlockManager):
if dtype is not None:
data = data.astype(dtype)
if index is None:
index = data.index.view()
else:
data = data.reindex(index, copy=False)
else:
length = len(index)
if data == fill_value or (isnull(data) and isnull(fill_value)):
if kind == 'block':
sparse_index = BlockIndex(length, [], [])
else:
sparse_index = IntIndex(length, [])
data = np.array([])
else:
if kind == 'block':
locs, lens = ([0], [length]) if length else ([], [])
sparse_index = BlockIndex(length, locs, lens)
else:
sparse_index = IntIndex(length, index)
v = data
data = np.empty(length)
data.fill(v)
if index is None:
index = com._default_index(sparse_index.length)
index = _ensure_index(index)
# create/copy the manager
if isinstance(data, SingleBlockManager):
if copy:
data = data.copy()
else:
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(data, sparse_index=sparse_index,
fill_value=fill_value, dtype=dtype,
copy=copy)
data = SingleBlockManager(data, index)
generic.NDFrame.__init__(self, data)
self.index = index
self.name = name
@property
def values(self):
""" return the array """
return self.block.values
def __array__(self, result=None):
""" the array interface, return my values """
return self.block.values
def get_values(self):
""" same as values """
return self.block.to_dense().view()
@property
def block(self):
return self._data._block
@property
def fill_value(self):
return self.block.fill_value
@fill_value.setter
def fill_value(self, v):
self.block.fill_value = v
@property
def sp_index(self):
return self.block.sp_index
@property
def sp_values(self):
return self.values.sp_values
@property
def npoints(self):
return self.sp_index.npoints
@classmethod
def from_array(cls, arr, index=None, name=None, copy=False,
fill_value=None, fastpath=False):
"""
Simplified alternate constructor
"""
return cls(arr, index=index, name=name, copy=copy,
fill_value=fill_value, fastpath=fastpath)
@property
def _constructor(self):
return SparseSeries
@property
def kind(self):
if isinstance(self.sp_index, BlockIndex):
return 'block'
elif isinstance(self.sp_index, IntIndex):
return 'integer'
def as_sparse_array(self, kind=None, fill_value=None, copy=False):
""" return my self as a sparse array, do not copy by default """
if fill_value is None:
fill_value = self.fill_value
if kind is None:
kind = self.kind
return SparseArray(self.values, sparse_index=self.sp_index,
fill_value=fill_value, kind=kind, copy=copy)
def __len__(self):
return len(self.block)
def __unicode__(self):
# currently, unicode is same as repr...fixes infinite loop
series_rep = Series.__unicode__(self)
rep = '%s\n%s' % (series_rep, repr(self.sp_index))
return rep
def __array_wrap__(self, result):
"""
Gets called prior to a ufunc (and after)
"""
return self._constructor(result, index=self.index,
sparse_index=self.sp_index,
fill_value=self.fill_value,
copy=False).__finalize__(self)
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self.name = getattr(obj, 'name', None)
self.fill_value = getattr(obj, 'fill_value', None)
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
""" perform a reduction operation """
return op(self.get_values(), skipna=skipna, **kwds)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
fill_value=self.fill_value, name=self.name)
def _unpickle_series_compat(self, state):
nd_state, own_state = state
# recreate the ndarray
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
index, fill_value, sp_index = own_state[:3]
name = None
if len(own_state) > 3:
name = own_state[3]
# create a sparse array
if not isinstance(data, SparseArray):
data = SparseArray(data, sparse_index=sp_index,
fill_value=fill_value, copy=False)
# recreate
data = SingleBlockManager(data, index, fastpath=True)
generic.NDFrame.__init__(self, data)
self._set_axis(0, index)
self.name = name
def __iter__(self):
""" forward to the array """
return iter(self.values)
def _set_subtyp(self, is_all_dates):
if is_all_dates:
object.__setattr__(self, '_subtyp', 'sparse_time_series')
else:
object.__setattr__(self, '_subtyp', 'sparse_series')
def _get_val_at(self, loc):
""" forward to the array """
return self.block.values._get_val_at(loc)
def __getitem__(self, key):
"""
"""
try:
return self._get_val_at(self.index.get_loc(key))
except KeyError:
if isinstance(key, (int, np.integer)):
return self._get_val_at(key)
raise Exception('Requested index not in this series!')
except TypeError:
# Could not hash item, must be array-like?
pass
# is there a case where this would NOT be an ndarray?
# need to find an example, I took out the case for now
key = _values_from_object(key)
dataSlice = self.values[key]
new_index = Index(self.index.view(ndarray)[key])
return self._constructor(dataSlice, index=new_index).__finalize__(self)
def _set_with_engine(self, key, value):
return self.set_value(key, value)
def abs(self):
"""
Return an object with absolute value taken. Only applicable to objects
that are all numeric
Returns
-------
abs: type of caller
"""
res_sp_values = np.abs(self.sp_values)
return self._constructor(res_sp_values, index=self.index,
sparse_index=self.sp_index,
fill_value=self.fill_value).__finalize__(self)
def get(self, label, default=None):
"""
Returns value occupying requested label, default to specified
missing value if not present. Analogous to dict.get
Parameters
----------
label : object
Label value looking for
default : object, optional
Value to return if label not in index
Returns
-------
y : scalar
"""
if label in self.index:
loc = self.index.get_loc(label)
return self._get_val_at(loc)
else:
return default
def get_value(self, label, takeable=False):
"""
Retrieve single value at passed index label
Parameters
----------
index : label
takeable : interpret the index as indexers, default False
Returns
-------
value : scalar value
"""
loc = label if takeable is True else self.index.get_loc(label)
return self._get_val_at(loc)
def set_value(self, label, value, takeable=False):
"""
Quickly set single value at passed label. If label is not contained, a
new object is created with the label placed at the end of the result
index
Parameters
----------
label : object
Partial indexing with MultiIndex not allowed
value : object
Scalar value
takeable : interpret the index as indexers, default False
Notes
-----
This method *always* returns a new object. It is not particularly
efficient but is provided for API compatibility with Series
Returns
-------
series : SparseSeries
"""
values = self.to_dense()
# if the label doesn't exist, we will create a new object here
# and possibily change the index
new_values = values.set_value(label, value, takeable=takeable)
if new_values is not None:
values = new_values
new_index = values.index
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, new_index)
self._index = new_index
def _set_values(self, key, value):
# this might be inefficient as we have to recreate the sparse array
# rather than setting individual elements, but have to convert
# the passed slice/boolean that's in dense space into a sparse indexer
# not sure how to do that!
if isinstance(key, Series):
key = key.values
values = self.values.to_dense()
values[key] = _index.convert_scalar(values, value)
values = SparseArray(values, fill_value=self.fill_value,
kind=self.kind)
self._data = SingleBlockManager(values, self.index)
def to_dense(self, sparse_only=False):
"""
Convert SparseSeries to (dense) Series
"""
if sparse_only:
int_index = self.sp_index.to_int_index()
index = self.index.take(int_index.indices)
return Series(self.sp_values, index=index, name=self.name)
else:
return Series(self.values.to_dense(), index=self.index,
name=self.name)
@property
def density(self):
r = float(self.sp_index.npoints) / float(self.sp_index.length)
return r
def copy(self, deep=True):
"""
Make a copy of the SparseSeries. Only the actual sparse values need to
be copied
"""
new_data = self._data
if deep:
new_data = self._data.copy()
return self._constructor(new_data, sparse_index=self.sp_index,
fill_value=self.fill_value).__finalize__(self)
def reindex(self, index=None, method=None, copy=True, limit=None):
"""
Conform SparseSeries to new Index
See Series.reindex docstring for general behavior
Returns
-------
reindexed : SparseSeries
"""
new_index = _ensure_index(index)
if self.index.equals(new_index):
if copy:
return self.copy()
else:
return self
return self._constructor(self._data.reindex(new_index, method=method,
limit=limit, copy=copy),
index=new_index).__finalize__(self)
def sparse_reindex(self, new_index):
"""
Conform sparse values to new SparseIndex
Parameters
----------
new_index : {BlockIndex, IntIndex}
Returns
-------
reindexed : SparseSeries
"""
if not isinstance(new_index, splib.SparseIndex):
raise TypeError('new index must be a SparseIndex')
block = self.block.sparse_reindex(new_index)
new_data = SingleBlockManager(block, self.index)
return self._constructor(new_data, index=self.index,
sparse_index=new_index,
fill_value=self.fill_value).__finalize__(self)
def take(self, indices, axis=0, convert=True):
"""
Sparse-compatible version of ndarray.take
Returns
-------
taken : ndarray
"""
new_values = SparseArray.take(self.values, indices)
new_index = self.index.take(indices)
return self._constructor(new_values,
index=new_index).__finalize__(self)
def cumsum(self, axis=0, dtype=None, out=None):
"""
Cumulative sum of values. Preserves locations of NaN values
Returns
-------
cumsum : Series or SparseSeries
"""
new_array = SparseArray.cumsum(self.values)
if isinstance(new_array, SparseArray):
return self._constructor(
new_array, index=self.index,
sparse_index=new_array.sp_index).__finalize__(self)
return Series(new_array, index=self.index).__finalize__(self)
def dropna(self, axis=0, inplace=False, **kwargs):
"""
Analogous to Series.dropna. If fill_value=NaN, returns a dense Series
"""
# TODO: make more efficient
axis = self._get_axis_number(axis or 0)
dense_valid = self.to_dense().valid()
if inplace:
raise NotImplementedError("Cannot perform inplace dropna"
" operations on a SparseSeries")
if isnull(self.fill_value):
return dense_valid
else:
dense_valid = dense_valid[dense_valid != self.fill_value]
return dense_valid.to_sparse(fill_value=self.fill_value)
def shift(self, periods, freq=None):
"""
Analogous to Series.shift
"""
# no special handling of fill values yet
if not isnull(self.fill_value):
# TODO: kwds is not defined...should this work?
dense_shifted = self.to_dense().shift(periods, freq=freq, **kwds) # noqa
return dense_shifted.to_sparse(fill_value=self.fill_value,
kind=self.kind)
if periods == 0:
return self.copy()
if freq is not None:
return self._constructor(
self.sp_values, sparse_index=self.sp_index,
index=self.index.shift(periods, freq),
fill_value=self.fill_value).__finalize__(self)
int_index = self.sp_index.to_int_index()
new_indices = int_index.indices + periods
start, end = new_indices.searchsorted([0, int_index.length])
new_indices = new_indices[start:end]
new_sp_index = IntIndex(len(self), new_indices)
if isinstance(self.sp_index, BlockIndex):
new_sp_index = new_sp_index.to_block_index()
return self._constructor(self.sp_values[start:end].copy(),
index=self.index, sparse_index=new_sp_index,
fill_value=self.fill_value).__finalize__(self)
def combine_first(self, other):
"""
Combine Series values, choosing the calling Series's values
first. Result index will be the union of the two indexes
Parameters
----------
other : Series
Returns
-------
y : Series
"""
if isinstance(other, SparseSeries):
other = other.to_dense()
dense_combined = self.to_dense().combine_first(other)
return dense_combined.to_sparse(fill_value=self.fill_value)
def to_coo(self, row_levels=(0, ), column_levels=(1, ), sort_labels=False):
"""
Create a scipy.sparse.coo_matrix from a SparseSeries with MultiIndex.
Use row_levels and column_levels to determine the row and column
coordinates respectively. row_levels and column_levels are the names
(labels) or numbers of the levels. {row_levels, column_levels} must be
a partition of the MultiIndex level names (or numbers).
.. versionadded:: 0.16.0
Parameters
----------
row_levels : tuple/list
column_levels : tuple/list
sort_labels : bool, default False
Sort the row and column labels before forming the sparse matrix.
Returns
-------
y : scipy.sparse.coo_matrix
rows : list (row labels)
columns : list (column labels)
Examples
--------
>>> from numpy import nan
>>> s = Series([3.0, nan, 1.0, 3.0, nan, nan])
>>> s.index = MultiIndex.from_tuples([(1, 2, 'a', 0),
(1, 2, 'a', 1),
(1, 1, 'b', 0),
(1, 1, 'b', 1),
(2, 1, 'b', 0),
(2, 1, 'b', 1)],
names=['A', 'B', 'C', 'D'])
>>> ss = s.to_sparse()
>>> A, rows, columns = ss.to_coo(row_levels=['A', 'B'],
column_levels=['C', 'D'],
sort_labels=True)
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[ 0., 0., 1., 3.],
[ 3., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> rows
[(1, 1), (1, 2), (2, 1)]
>>> columns
[('a', 0), ('a', 1), ('b', 0), ('b', 1)]
"""
A, rows, columns = _sparse_series_to_coo(self, row_levels,
column_levels,
sort_labels=sort_labels)
return A, rows, columns
@classmethod
def from_coo(cls, A, dense_index=False):
"""
Create a SparseSeries from a scipy.sparse.coo_matrix.
.. versionadded:: 0.16.0
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
If False (default), the SparseSeries index consists of only the
coords of the non-null entries of the original coo_matrix.
If True, the SparseSeries index consists of the full sorted
(row, col) coordinates of the coo_matrix.
Returns
-------
s : SparseSeries
Examples
---------
>>> from scipy import sparse
>>> A = sparse.coo_matrix(([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])),
shape=(3, 4))
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[ 0., 0., 1., 2.],
[ 3., 0., 0., 0.],
[ 0., 0., 0., 0.]])
>>> ss = SparseSeries.from_coo(A)
>>> ss
0 2 1
3 2
1 0 3
dtype: float64
BlockIndex
Block locations: array([0], dtype=int32)
Block lengths: array([3], dtype=int32)
"""
return _coo_to_sparse_series(A, dense_index=dense_index)
# overwrite series methods with unaccelerated versions
ops.add_special_arithmetic_methods(SparseSeries, use_numexpr=False,
**ops.series_special_funcs)
ops.add_flex_arithmetic_methods(SparseSeries, use_numexpr=False,
**ops.series_flex_funcs)
# overwrite basic arithmetic to use SparseSeries version
# force methods to overwrite previous definitions.
ops.add_special_arithmetic_methods(SparseSeries, _arith_method,
radd_func=operator.add, comp_method=None,
bool_method=None, use_numexpr=False,
force=True)
# backwards compatiblity
class SparseTimeSeries(SparseSeries):
def __init__(self, *args, **kwargs):
# deprecation TimeSeries, #10890
warnings.warn("SparseTimeSeries is deprecated. Please use "
"SparseSeries", FutureWarning, stacklevel=2)
super(SparseTimeSeries, self).__init__(*args, **kwargs)
|
py | 1a2fca522ef32aec771028190c04f3120ce95cb1 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""BYOL tasks."""
import random
from typing import Any, Callable, Dict, Optional, Tuple, cast
import torch
import torch.nn.functional as F
from kornia import augmentation as K
from kornia import filters
from kornia.geometry import transform as KorniaTransform
from pytorch_lightning.core.lightning import LightningModule
from torch import Tensor, optim
from torch.autograd import Variable
from torch.nn.modules import BatchNorm1d, Conv2d, Linear, Module, ReLU, Sequential
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchvision.models import resnet18
from torchvision.models.resnet import resnet50
# https://github.com/pytorch/pytorch/issues/60979
# https://github.com/pytorch/pytorch/pull/61045
Module.__module__ = "torch.nn"
def normalized_mse(x: Tensor, y: Tensor) -> Tensor:
"""Computes the normalized mean squared error between x and y.
Args:
x: tensor x
y: tensor y
Returns:
the normalized MSE between x and y
"""
x = F.normalize(x, dim=-1)
y = F.normalize(y, dim=-1)
mse = torch.mean(2 - 2 * (x * y).sum(dim=-1))
return mse
# TODO: Move this to transforms
class RandomApply(Module):
"""Applies augmentation function (augm) with probability p."""
def __init__(self, augm: Callable[[Tensor], Tensor], p: float) -> None:
"""Initialize RandomApply.
Args:
augm: augmentation function to apply
p: probability with which the augmentation function is applied
"""
super().__init__()
self.augm = augm
self.p = p
def forward(self, x: Tensor) -> Tensor:
"""Applies an augmentation to the input with some probability.
Args:
x: a batch of imagery
Returns
augmented version of ``x`` with probability ``self.p`` else an un-augmented
version
"""
return x if random.random() > self.p else self.augm(x)
# TODO: This isn't _really_ applying the augmentations from SimCLR as we have
# multispectral imagery and thus can't naively apply color jittering or grayscale
# conversions. We should think more about what makes sense here.
class SimCLRAugmentation(Module):
"""A module for applying SimCLR augmentations.
SimCLR was one of the first papers to show the effectiveness of random data
augmentation in self-supervised-learning setups. See
https://arxiv.org/pdf/2002.05709.pdf for more details.
"""
def __init__(self, image_size: Tuple[int, int] = (256, 256)) -> None:
"""Initialize a module for applying SimCLR augmentations.
Args:
image_size: Tuple of integers defining the image size
"""
super().__init__()
self.size = image_size
self.augmentation = Sequential(
KorniaTransform.Resize(size=image_size, align_corners=False),
# Not suitable for multispectral adapt
# RandomApply(K.ColorJitter(0.8, 0.8, 0.8, 0.2), p=0.8),
# K.RandomGrayscale(p=0.2),
K.RandomHorizontalFlip(),
RandomApply(filters.GaussianBlur2d((3, 3), (1.5, 1.5)), p=0.1),
K.RandomResizedCrop(size=image_size),
)
def forward(self, x: Tensor) -> Tensor:
"""Applys SimCLR augmentations to the input tensor.
Args:
x: a batch of imagery
Returns:
an augmented batch of imagery
"""
return cast(Tensor, self.augmentation(x))
class MLP(Module):
"""MLP used in the BYOL projection head."""
def __init__(
self, dim: int, projection_size: int = 256, hidden_size: int = 4096
) -> None:
"""Initializes the MLP projection head.
Args:
dim: size of layer to project
projection_size: size of the output layer
hidden_size: size of the hidden layer
"""
super().__init__()
self.mlp = Sequential(
Linear(dim, hidden_size),
BatchNorm1d(hidden_size), # type: ignore[no-untyped-call]
ReLU(inplace=True),
Linear(hidden_size, projection_size),
)
def forward(self, x: Tensor) -> Tensor:
"""Forward pass of the MLP model.
Args:
x: batch of imagery
Returns:
embedded version of the input
"""
return cast(Tensor, self.mlp(x))
class EncoderWrapper(Module):
"""Encoder wrapper for joining a model and a projection head.
When we call .forward() on this module the following steps happen:
* The input is passed through the base model
* When the encoding layer is reached a hook is called
* The output of the encoding layer is passed through the projection head
* The forward call returns the output of the projection head
"""
def __init__(
self,
model: Module,
projection_size: int = 256,
hidden_size: int = 4096,
layer: int = -2,
) -> None:
"""Initializes EncoderWrapper.
Args:
model: model to encode
projection_size: size of the ouput layer of the projector MLP
hidden_size: size of hidden layer of the projector MLP
layer: layer from model to project
"""
super().__init__()
self.model = model
self.projection_size = projection_size
self.hidden_size = hidden_size
self.layer = layer
self._projector: Optional[Module] = None
self._projector_dim: Optional[int] = None
self._encoded = torch.empty(0)
self._register_hook()
@property
def projector(self) -> Module:
"""Wrapper module for the projector head."""
assert self._projector_dim is not None
if self._projector is None:
self._projector = MLP(
self._projector_dim, self.projection_size, self.hidden_size
)
return self._projector
def _hook(self, module: Any, input: Any, output: Tensor) -> None:
"""Hook to record the activations at the projection layer.
See the following docs page for more details on hooks:
https://pytorch.org/docs/stable/generated/torch.nn.modules.module.register_module_forward_hook.html
Args:
module: the calling module
input: input to the module this hook was registered to
output: output from the module this hook was registered to
"""
output = output.flatten(start_dim=1)
if self._projector_dim is None:
# If we haven't already, measure the output size
self._projector_dim = output.shape[-1]
# Project the output to get encodings, the projector model is created the first
# time this is called
self._encoded = self.projector(output)
def _register_hook(self) -> None:
"""Register a hook for layer that we will extract features from."""
layer = list(self.model.children())[self.layer]
layer.register_forward_hook(self._hook)
def forward(self, x: Tensor) -> Tensor:
"""Pass through the model, and collect the representation from our forward hook.
Args:
x: tensor of data to run through the model
Returns:
output from the model
"""
_ = self.model(x)
return self._encoded
class BYOL(Module):
"""BYOL implementation.
BYOL contains two identical encoder networks. The first is trained as usual, and its
weights are updated with each training batch. The second, "target" network, is
updated using a running average of the first encoder's weights.
See https://arxiv.org/abs/2006.07733 for more details (and please cite it if you
use it in your own work).
"""
def __init__(
self,
model: Module,
image_size: Tuple[int, int] = (256, 256),
hidden_layer: int = -2,
in_channels: int = 4,
projection_size: int = 256,
hidden_size: int = 4096,
augment_fn: Optional[Module] = None,
beta: float = 0.99,
**kwargs: Any,
) -> None:
"""Sets up a model for pre-training with BYOL using projection heads.
Args:
model: the model to pretrain using BYOL
image_size: the size of the training images
hidden_layer: the hidden layer in ``model`` to attach the projection
head to, can be the name of the layer or index of the layer
in_channels: number of input channels to the model
projection_size: size of first layer of the projection MLP
hidden_size: size of the hidden layer of the projection MLP
augment_fn: an instance of a module that performs data augmentation
beta: the speed at which the target encoder is updated using the main
encoder
"""
super().__init__()
self.augment: Module
if augment_fn is None:
self.augment = SimCLRAugmentation(image_size)
else:
self.augment = augment_fn
self.beta = beta
self.in_channels = in_channels
self.encoder = EncoderWrapper(
model, projection_size, hidden_size, layer=hidden_layer
)
self.predictor = MLP(projection_size, projection_size, hidden_size)
self.target = EncoderWrapper(
model, projection_size, hidden_size, layer=hidden_layer
)
# Perform a single forward pass to initialize the wrapper correctly
self.encoder(torch.zeros(2, self.in_channels, *image_size))
def forward(self, x: Tensor) -> Tensor:
"""Forward pass of the encoder model through the MLP and prediction head.
Args:
x: tensor of data to run through the model
Returns:
output from the model
"""
return cast(Tensor, self.predictor(self.encoder(x)))
def update_target(self) -> None:
"""Method to update the "target" model weights."""
for p, pt in zip(self.encoder.parameters(), self.target.parameters()):
pt.data = self.beta * pt.data + (1 - self.beta) * p.data
class BYOLTask(LightningModule):
"""Class for pre-training any PyTorch model using BYOL."""
def config_task(self) -> None:
"""Configures the task based on kwargs parameters passed to the constructor."""
in_channels = self.hyperparams["in_channels"]
pretrained = self.hyperparams["imagenet_pretraining"]
encoder = None
if self.hyperparams["encoder_name"] == "resnet18":
encoder = resnet18(pretrained=pretrained)
elif self.hyperparams["encoder_name"] == "resnet50":
encoder = resnet50(pretrained=pretrained)
else:
raise ValueError(
f"Encoder type '{self.hyperparams['encoder_name']}' is not valid."
)
layer = encoder.conv1
# Creating new Conv2d layer
new_layer = Conv2d(
in_channels=in_channels,
out_channels=layer.out_channels,
kernel_size=layer.kernel_size,
stride=layer.stride,
padding=layer.padding,
bias=layer.bias,
).requires_grad_()
# initialize the weights from new channel with the red channel weights
copy_weights = 0
# Copying the weights from the old to the new layer
new_layer.weight[:, : layer.in_channels, :, :].data[:] = Variable(
layer.weight.clone(), requires_grad=True
)
# Copying the weights of the old layer to the extra channels
for i in range(in_channels - layer.in_channels):
channel = layer.in_channels + i
new_layer.weight[:, channel : channel + 1, :, :].data[:] = Variable(
layer.weight[:, copy_weights : copy_weights + 1, ::].clone(),
requires_grad=True,
)
encoder.conv1 = new_layer
self.model = BYOL(encoder, in_channels=in_channels, image_size=(256, 256))
def __init__(self, **kwargs: Any) -> None:
"""Initialize a LightningModule for pre-training a model with BYOL.
Keyword Args:
in_channels: number of channels on the input imagery
encoder_name: either "resnet18" or "resnet50"
imagenet_pretraining: bool indicating whether to use imagenet pretrained
weights
Raises:
ValueError: if kwargs arguments are invalid
"""
super().__init__()
# Creates `self.hparams` from kwargs
self.save_hyperparameters() # type: ignore[operator]
self.hyperparams = cast(Dict[str, Any], self.hparams)
self.config_task()
def forward(self, *args: Any, **kwargs: Any) -> Any:
"""Forward pass of the model.
Args:
x: tensor of data to run through the model
Returns:
output from the model
"""
return self.model(*args, **kwargs)
def configure_optimizers(self) -> Dict[str, Any]:
"""Initialize the optimizer and learning rate scheduler.
Returns:
a "lr dict" according to the pytorch lightning documentation --
https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers
"""
optimizer_class = getattr(optim, self.hyperparams.get("optimizer", "Adam"))
lr = self.hyperparams.get("lr", 1e-4)
weight_decay = self.hyperparams.get("weight_decay", 1e-6)
optimizer = optimizer_class(self.parameters(), lr=lr, weight_decay=weight_decay)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": ReduceLROnPlateau(
optimizer,
patience=self.hyperparams["learning_rate_schedule_patience"],
),
"monitor": "val_loss",
},
}
def training_step(self, *args: Any, **kwargs: Any) -> Tensor:
"""Compute and return the training loss.
Args:
batch: the output of your DataLoader
Returns:
training loss
"""
batch = args[0]
x = batch["image"]
with torch.no_grad():
x1, x2 = self.model.augment(x), self.model.augment(x)
pred1, pred2 = self.forward(x1), self.forward(x2)
with torch.no_grad():
targ1, targ2 = self.model.target(x1), self.model.target(x2)
loss = torch.mean(normalized_mse(pred1, targ2) + normalized_mse(pred2, targ1))
self.log("train_loss", loss, on_step=True, on_epoch=False)
self.model.update_target()
return loss
def validation_step(self, *args: Any, **kwargs: Any) -> None:
"""Compute validation loss.
Args:
batch: the output of your DataLoader
"""
batch = args[0]
x = batch["image"]
x1, x2 = self.model.augment(x), self.model.augment(x)
pred1, pred2 = self.forward(x1), self.forward(x2)
targ1, targ2 = self.model.target(x1), self.model.target(x2)
loss = torch.mean(normalized_mse(pred1, targ2) + normalized_mse(pred2, targ1))
self.log("val_loss", loss, on_step=False, on_epoch=True)
def test_step(self, *args: Any, **kwargs: Any) -> Any:
"""No-op, does nothing."""
|
py | 1a2fca7df226f2bba9bf64f240edd3ed5bdefb39 | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class Subnet(object):
def __init__(self, region=None, az=None, subnetId=None, name=None, cidr=None, vpcId=None, vpcName=None, availableIpCount=None, totalIpCount=None, networkType=None, description=None, createTime=None):
"""
:param region: (Optional) 地域代码, 如cn-east-1
:param az: (Optional) 可用区, 如cn-east-1a
:param subnetId: (Optional) 子网ID
:param name: (Optional) 子网名称
:param cidr: (Optional) 子网CIDR
:param vpcId: (Optional) 私有网络Id
:param vpcName: (Optional) 私有网络名称
:param availableIpCount: (Optional) 可用ip数量
:param totalIpCount: (Optional) 总ip数量
:param networkType: (Optional) 网络类型
:param description: (Optional) 描述
:param createTime: (Optional) 创建时间
"""
self.region = region
self.az = az
self.subnetId = subnetId
self.name = name
self.cidr = cidr
self.vpcId = vpcId
self.vpcName = vpcName
self.availableIpCount = availableIpCount
self.totalIpCount = totalIpCount
self.networkType = networkType
self.description = description
self.createTime = createTime
|
py | 1a2fcb1b2b6c20c380d179e48985d1ca52ef3f7b | expected_output = {
"1":{
"name":"1",
"type":"ipv4-acl-type",
"acl_type": "standard",
"aces":{
"10":{
"name":"10",
"actions":{
"forwarding":"deny",
"logging":"log-syslog"
},
"matches":{
"l3":{
"ipv4":{
"protocol":"ipv4",
"source_network":{
"10.9.3.4 0.0.0.0":{
"source_network":"10.9.3.4 0.0.0.0"
}
}
}
}
},
"statistics":{
"matched_packets":"18"
}
},
"20":{
"name":"20",
"actions":{
"forwarding":"permit"
},
"matches":{
"l3":{
"ipv4":{
"protocol":"ipv4",
"source_network":{
"any":{
"source_network":"any"
}
}
}
}
},
"statistics":{
"matched_packets":"58"
}
}
}
},
"meraki-fqdn-dns":{
"name":"meraki-fqdn-dns",
"type":"ipv4-acl-type",
"acl_type": "extended",
}
} |
py | 1a2fcc45162161d008efc1854c54852b37f3933d | #!/usr/bin/env python
import sys
entries = []
total_size = 0;
for line in sys.stdin:
words = line.split('\t')
if len(words) != 2:
continue
sizeStr = words[1].strip('\n')
if not sizeStr.isdigit():
continue
size = float(sizeStr)
elem = {'table':words[0], 'size':size}
entries.append(elem)
total_size += size
print "Total: %d" % total_size
print "Total: %.2f (MB)" % (total_size / 1024 /1024)
for elem in entries:
print "%02.2f %s" % (elem['size']/total_size*100, elem['table'])
|
py | 1a2fcd0e841ab12011ae0ea23c66a6dcd8429797 | # Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow_hub.feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint:disable=g-import-not-at-top,g-statement-before-imports
try:
import mock as mock
except ImportError:
import unittest.mock as mock
# pylint:disable=g-import-not-at-top,g-statement-before-imports
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow_hub import test_utils
from tensorflow_hub import tf_v1
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.ops.lookup_ops import HashTable
from tensorflow.python.ops.lookup_ops import KeyValueTensorInitializer
# pylint: enable=g-direct-tensorflow-import
_dense_features_module = test_utils.get_dense_features_module()
def text_module_fn():
embeddings = [
("", [0, 0, 0, 0]), # OOV items are mapped to this embedding.
("hello world", [1, 2, 3, 4]),
("pair-programming", [5, 5, 5, 5]),
]
keys = tf.constant([item[0] for item in embeddings], dtype=tf.string)
indices = tf.constant(list(range(len(embeddings))), dtype=tf.int64)
tbl_init = KeyValueTensorInitializer(keys, indices)
table = HashTable(tbl_init, 0)
weights_initializer = tf.cast(
tf.constant(list([item[1] for item in embeddings])), tf.float32)
weights = tf_v1.get_variable(
"weights", dtype=tf.float32, initializer=weights_initializer)
text_tensor = tf_v1.placeholder(dtype=tf.string, name="text", shape=[None])
indices_tensor = table.lookup(text_tensor)
embedding_tensor = tf.gather(weights, indices_tensor)
hub.add_signature(inputs=text_tensor, outputs=embedding_tensor)
def invalid_text_module_fn():
text = tf_v1.placeholder(tf.string, shape=[10])
hub.add_signature(inputs=text, outputs=tf.zeros([10, 3]))
class CommonColumnTest(tf.test.TestCase):
def setUp(self):
self.spec = hub.create_module_spec(text_module_fn)
@mock.patch.object(feature_column_v2._StateManagerImpl, "add_resource")
def testFeatureColumnsWithResources(self, mock_add_resource):
feature_column = hub.text_embedding_column("text_a", self.spec)
if not isinstance(feature_column, feature_column_v2.FeatureColumn):
self.skipTest("Resources not implemented in the state manager of feature "
"column v2.")
self.assertTrue(feature_column_v2.is_feature_column_v2([feature_column]))
@mock.patch.object(feature_column_v2._StateManagerImpl, "add_resource")
def testFeatureColumnsWithNoResources(self, mock_add_resource):
mock_add_resource.side_effect = NotImplementedError
feature_column = hub.text_embedding_column("text_a", self.spec)
self.assertFalse(feature_column_v2.is_feature_column_v2([feature_column]))
class TextEmbeddingColumnTest(tf.test.TestCase):
def setUp(self):
self.spec = hub.create_module_spec(text_module_fn)
def testVariableShape(self):
text_column = hub.text_embedding_column("text", self.spec, trainable=False)
self.assertEqual(text_column._variable_shape, [4])
def testParents(self):
text_column = hub.text_embedding_column("text", self.spec, trainable=False)
self.assertEqual(["text"], text_column.parents)
def testMakeParseExampleSpec(self):
text_column = hub.text_embedding_column("text", self.spec, trainable=False)
parsing_spec = tf_v1.feature_column.make_parse_example_spec([text_column])
self.assertEqual(parsing_spec,
{"text": tf_v1.FixedLenFeature([1], dtype=tf.string)})
def testInputLayer(self):
features = {
"text_a": ["hello world", "pair-programming"],
"text_b": ["hello world", "oov token"],
}
feature_columns = [
hub.text_embedding_column("text_a", self.spec, trainable=False),
hub.text_embedding_column("text_b", self.spec, trainable=False),
]
with tf.Graph().as_default():
input_layer = tf_v1.feature_column.input_layer(features, feature_columns)
with tf_v1.train.MonitoredSession() as sess:
output = sess.run(input_layer)
self.assertAllEqual(
output, [[1, 2, 3, 4, 1, 2, 3, 4], [5, 5, 5, 5, 0, 0, 0, 0]])
def testDenseFeatures(self):
features = {
"text_a": ["hello world", "pair-programming"],
"text_b": ["hello world", "oov token"],
}
feature_columns = [
hub.text_embedding_column("text_a", self.spec, trainable=False),
hub.text_embedding_column("text_b", self.spec, trainable=False),
]
if not feature_column_v2.is_feature_column_v2(feature_columns):
self.skipTest("Resources not implemented in the state manager of feature "
"column v2.")
with tf.Graph().as_default():
feature_layer = _dense_features_module.DenseFeatures(feature_columns)
feature_layer_out = feature_layer(features)
with tf_v1.train.MonitoredSession() as sess:
output = sess.run(feature_layer_out)
self.assertAllEqual(
output, [[1, 2, 3, 4, 1, 2, 3, 4], [5, 5, 5, 5, 0, 0, 0, 0]])
def testDenseFeatures_shareAcrossApplication(self):
features = {
"text": ["hello world", "pair-programming"],
}
feature_columns = [
hub.text_embedding_column("text", self.spec, trainable=True),
]
if not feature_column_v2.is_feature_column_v2(feature_columns):
self.skipTest("Resources not implemented in the state manager of feature "
"column v2.")
with tf.Graph().as_default():
feature_layer = _dense_features_module.DenseFeatures(feature_columns)
feature_layer_out_1 = feature_layer(features)
feature_layer_out_2 = feature_layer(features)
# We define loss only on the first layer. Since layers should have shared
# weights, we expect the second layer will change too.
loss = feature_layer_out_1 - tf.constant(0.005)
optimizer = tf_v1.train.GradientDescentOptimizer(learning_rate=0.7)
train_op = optimizer.minimize(loss)
with tf_v1.train.MonitoredSession() as sess:
before_update_1 = sess.run(feature_layer_out_1)
sess.run(train_op)
after_update_1 = sess.run(feature_layer_out_1)
after_update_2 = sess.run(feature_layer_out_2)
self.assertAllEqual(before_update_1, [[1, 2, 3, 4],
[5, 5, 5, 5]])
self.assertAllEqual(after_update_1, after_update_2)
def testWorksWithCannedEstimator(self):
comment_embedding_column = hub.text_embedding_column(
"comment", self.spec, trainable=False)
upvotes = tf_v1.feature_column.numeric_column("upvotes")
feature_columns = [comment_embedding_column, upvotes]
estimator = tf_v1.estimator.DNNClassifier(
hidden_units=[10],
feature_columns=feature_columns,
model_dir=self.get_temp_dir())
# This only tests that estimator apis are working with the feature
# column without throwing exceptions.
features = {
"comment": np.array([
["the quick brown fox"],
["spam spam spam"],
]),
"upvotes": np.array([
[20],
[1],
]),
}
labels = np.array([[1], [0]])
if hasattr(tf.compat, "v1"):
numpy_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn
else:
numpy_input_fn = tf_v1.estimator.inputs.numpy_input_fn
input_fn = numpy_input_fn(features, labels, shuffle=True)
estimator.train(input_fn, max_steps=1)
estimator.evaluate(input_fn, steps=1)
estimator.predict(input_fn)
def testTrainableEmbeddingColumn(self):
feature_columns = [
hub.text_embedding_column("text", self.spec, trainable=True),
]
with tf.Graph().as_default():
features = {
"text": ["hello world", "pair-programming"],
}
target = [[1, 1, 1, 1], [4, 3, 2, 1]]
input_layer = tf_v1.feature_column.input_layer(features, feature_columns)
loss = tf.cast(
tf_v1.losses.mean_squared_error(input_layer, target), tf.float64)
optimizer = tf_v1.train.GradientDescentOptimizer(learning_rate=0.97)
train_op = optimizer.minimize(loss)
with tf_v1.train.MonitoredSession() as sess:
self.assertAllEqual(sess.run(input_layer), [[1, 2, 3, 4], [5, 5, 5, 5]])
for _ in range(10):
sess.run(train_op)
self.assertAllClose(sess.run(input_layer), target, atol=0.5)
def testInvalidTextModule(self):
spec = hub.create_module_spec(invalid_text_module_fn)
with self.assertRaisesRegexp(ValueError, "only one input"):
hub.text_embedding_column("coment", spec, trainable=False)
def create_image_module_fn(randomly_initialized=False):
def image_module_fn():
"""Maps 1x2 images to sums of each color channel."""
images = tf_v1.placeholder(dtype=tf.float32, shape=[None, 1, 2, 3])
if randomly_initialized:
initializer = tf_v1.random_uniform_initializer(
minval=-1, maxval=1, dtype=tf.float32)
else:
initializer = tf_v1.constant_initializer(1.0, dtype=tf.float32)
weight = tf_v1.get_variable(
name="weight", shape=[1], initializer=initializer)
sum_channels = tf.reduce_sum(images, axis=[1, 2]) * weight
hub.add_signature(inputs={"images": images}, outputs=sum_channels)
return image_module_fn
class ImageEmbeddingColumnTest(tf.test.TestCase):
def setUp(self):
self.spec = hub.create_module_spec(create_image_module_fn())
self.randomly_initialized_spec = hub.create_module_spec(
create_image_module_fn(randomly_initialized=True))
def testExpectedImageSize(self):
image_column = hub.image_embedding_column("image", self.spec)
# The usage comment recommends this code pattern, so we test it here.
self.assertSequenceEqual(
hub.get_expected_image_size(image_column.module_spec), [1, 2])
def testVariableShape(self):
image_column = hub.image_embedding_column("image", self.spec)
self.assertEqual(image_column.variable_shape, [3])
def testParents(self):
image_column = hub.image_embedding_column("image", self.spec)
self.assertEqual(["image"], image_column.parents)
def testMakeParseExampleSpec(self):
image_column = hub.image_embedding_column("image", self.spec)
parsing_spec = tf_v1.feature_column.make_parse_example_spec([image_column])
self.assertEqual(
parsing_spec,
{"image": tf_v1.FixedLenFeature([1, 2, 3], dtype=tf.float32)})
def testInputLayer(self):
features = {
"image_a": [[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],
[[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],
"image_b": [[[[0.1, 0.2, 0.1], [0.2, 0.1, 0.2]]],
[[[0.1, 0.2, 0.3], [0.3, 0.2, 0.1]]]],
}
feature_columns = [
hub.image_embedding_column("image_a", self.spec),
hub.image_embedding_column("image_b", self.spec),
]
with tf.Graph().as_default():
input_layer = tf_v1.feature_column.input_layer(features, feature_columns)
with tf_v1.train.MonitoredSession() as sess:
output = sess.run(input_layer)
self.assertAllClose(
output,
[[0.5, 0.7, 0.9, 0.3, 0.3, 0.3], [0.8, 0.9, 1.0, 0.4, 0.4, 0.4]])
def testDenseFeatures(self):
features = {
"image_a": [[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],
[[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],
"image_b": [[[[0.1, 0.2, 0.1], [0.2, 0.1, 0.2]]],
[[[0.1, 0.2, 0.3], [0.3, 0.2, 0.1]]]],
}
feature_columns = [
hub.image_embedding_column("image_a", self.spec),
hub.image_embedding_column("image_b", self.spec),
]
if not feature_column_v2.is_feature_column_v2(feature_columns):
self.skipTest("Resources not implemented in the state manager of feature "
"column v2.")
with tf.Graph().as_default():
feature_layer = _dense_features_module.DenseFeatures(feature_columns)
feature_layer_out = feature_layer(features)
with tf_v1.train.MonitoredSession() as sess:
output = sess.run(feature_layer_out)
self.assertAllClose(
output,
[[0.5, 0.7, 0.9, 0.3, 0.3, 0.3], [0.8, 0.9, 1.0, 0.4, 0.4, 0.4]])
def testDenseFeatures_shareAcrossApplication(self):
features = {
"image": [[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],
[[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],
}
feature_columns = [
hub.image_embedding_column("image", self.randomly_initialized_spec),
]
if not feature_column_v2.is_feature_column_v2(feature_columns):
self.skipTest("Resources not implemented in the state manager of feature "
"column v2.")
with tf.Graph().as_default():
feature_layer = _dense_features_module.DenseFeatures(feature_columns)
feature_layer_out_1 = feature_layer(features)
feature_layer_out_2 = feature_layer(features)
with tf_v1.train.MonitoredSession() as sess:
output_1 = sess.run(feature_layer_out_1)
output_2 = sess.run(feature_layer_out_2)
self.assertAllClose(output_1, output_2)
def testWorksWithCannedEstimator(self):
image_column = hub.image_embedding_column("image", self.spec)
other_column = tf_v1.feature_column.numeric_column("number")
feature_columns = [image_column, other_column]
estimator = tf_v1.estimator.DNNClassifier(
hidden_units=[10],
feature_columns=feature_columns,
model_dir=self.get_temp_dir())
# This only tests that estimator apis are working with the feature
# column without throwing exceptions.
features = {
"image":
np.array([[[[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]],
[[[0.7, 0.7, 0.7], [0.1, 0.2, 0.3]]]],
dtype=np.float32),
"number":
np.array([[20], [1]]),
}
labels = np.array([[1], [0]])
if hasattr(tf.compat, "v1"):
numpy_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn
else:
numpy_input_fn = tf_v1.estimator.inputs.numpy_input_fn
input_fn = numpy_input_fn(features, labels, shuffle=True)
estimator.train(input_fn, max_steps=1)
estimator.evaluate(input_fn, steps=1)
estimator.predict(input_fn)
class SparseTextEmbeddingColumnTest(tf.test.TestCase):
def setUp(self):
self.spec = hub.create_module_spec(text_module_fn)
def testVariableShape(self):
text_column = hub.sparse_text_embedding_column(
"text", self.spec, combiner="mean", default_value=None, trainable=False)
self.assertEqual(text_column._variable_shape, [4])
def testMakeParseExampleSpec(self):
text_column = hub.sparse_text_embedding_column(
"text", self.spec, combiner="mean", default_value=None, trainable=False)
parsing_spec = tf_v1.feature_column.make_parse_example_spec([text_column])
self.assertEqual(parsing_spec, {"text": tf_v1.VarLenFeature(tf.string)})
def testParents(self):
text_column = hub.sparse_text_embedding_column(
"text", self.spec, "sum", "", trainable=False)
self.assertEqual(["text"], text_column.parents)
def testInputLayer(self):
with tf.Graph().as_default():
text_a = tf.SparseTensor(
values=["hello world", "pair-programming", "hello world"],
indices=[[0, 0], [0, 1], [1, 0]],
dense_shape=[2, 2])
text_b = tf.SparseTensor(
values=["hello world", "oov token"],
indices=[[0, 0], [0, 1]],
dense_shape=[2, 3])
features = {
"text_a": text_a,
"text_b": text_b,
}
feature_columns = [
hub.sparse_text_embedding_column(
"text_a",
self.spec,
combiner="mean",
default_value="__UNKNOWN__",
trainable=False),
hub.sparse_text_embedding_column(
"text_b",
self.spec,
combiner="mean",
default_value="__UNKNOWN__",
trainable=False),
]
input_layer = tf_v1.feature_column.input_layer(features, feature_columns)
with tf_v1.train.MonitoredSession() as sess:
output = sess.run(input_layer)
self.assertAllEqual(
output,
[[3, 3.5, 4, 4.5, 0.5, 1, 1.5, 2], [1, 2, 3, 4, 0, 0, 0, 0]])
# ([1, 2, 3, 4] + [5, 5, 5, 5])/2 extend ([1, 2, 3, 4] + [0, 0, 0, 0])/2
# [1, 2, 3, 4] extend [0, 0, 0, 0]
def testTrainableEmbeddingColumn(self):
feature_columns = [
hub.sparse_text_embedding_column(
"text",
self.spec,
combiner="mean",
default_value=None,
trainable=True),
]
with tf.Graph().as_default():
text = tf.SparseTensor(
values=["hello world", "pair-programming"],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 2])
target = [[1, 1, 1, 1], [4, 3, 2, 1]]
input_layer = tf_v1.feature_column.input_layer({"text": text},
feature_columns)
loss = tf_v1.losses.mean_squared_error(input_layer, target)
optimizer = tf_v1.train.GradientDescentOptimizer(learning_rate=0.97)
train_op = optimizer.minimize(loss)
with tf_v1.train.MonitoredSession() as sess:
self.assertAllEqual(sess.run(input_layer), [[1, 2, 3, 4], [5, 5, 5, 5]])
for _ in range(10):
sess.run(train_op)
self.assertAllClose(sess.run(input_layer), target, atol=0.5)
def testEmptySparseTensorBatch(self):
feature_columns = [
hub.sparse_text_embedding_column(
"text",
self.spec,
combiner="mean",
default_value="default",
trainable=True),
]
with tf.Graph().as_default():
text = tf.SparseTensor(
values=tf_v1.constant([], dtype=tf_v1.string, shape=[0]),
indices=tf_v1.constant([], dtype=tf_v1.int64, shape=[0, 2]),
dense_shape=[3, 0])
input_layer = tf_v1.feature_column.input_layer({"text": text},
feature_columns)
with tf_v1.train.MonitoredSession() as sess:
embeddings = sess.run(input_layer)
self.assertAllEqual(embeddings,
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
def testEmptySparseTensorRow(self):
feature_columns = [
hub.sparse_text_embedding_column(
"text",
self.spec,
combiner="mean",
default_value="default",
trainable=True),
]
with tf.Graph().as_default():
text = tf.SparseTensor(
values=tf_v1.constant(["hello world"], dtype=tf_v1.string, shape=[1]),
indices=tf_v1.constant([[0, 0]], dtype=tf_v1.int64, shape=[1, 2]),
dense_shape=[2, 1])
input_layer = tf_v1.feature_column.input_layer({"text": text},
feature_columns)
with tf_v1.train.MonitoredSession() as sess:
embeddings = sess.run(input_layer)
self.assertAllEqual(embeddings, [[1, 2, 3, 4], [0, 0, 0, 0]])
if __name__ == "__main__":
tf.test.main()
|
py | 1a2fcd9d775da7ad6e0fbffbd09813c027c38413 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from django.template import TemplateDoesNotExist
from django.template.loader import select_template
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from js_services import models, forms
from .constants import (
IS_THERE_COMPANIES,
)
if IS_THERE_COMPANIES:
from js_companies.models import Company
@plugin_pool.register_plugin
class RelatedServicesPlugin(CMSPluginBase):
TEMPLATE_NAME = 'js_services/plugins/related_services__%s.html'
module = 'Services'
render_template = 'js_services/plugins/related_services.html'
name = _('Related Services')
model = models.RelatedServicesPlugin
form = forms.RelatedServicesPluginForm
def render(self, context, instance, placeholder):
request = context.get('request')
context['instance'] = instance
context['title'] = instance.title
context['icon'] = instance.icon
context['image'] = instance.image
context['background_color'] = instance.background_color
context['full_screen'] = instance.full_screen
qs = instance.related_services.published()
related_sections = instance.related_sections.all()
related_people = instance.related_people.all()
if IS_THERE_COMPANIES:
related_companies = instance.related_companies.all()
related_categories = instance.related_categories.all()
if not qs.exists():
selected = False
qs = models.Service.objects.published().distinct()
if related_sections.exists():
selected = True
qs = qs.filter(sections__in=related_sections)
if related_people.exists():
selected = True
qs = qs.filter(person__in=related_people)
if IS_THERE_COMPANIES and related_companies.exists():
selected = True
qs = qs.filter(companies__in=related_companies)
if related_categories.exists():
selected = True
qs = qs.filter(categories__in=related_categories)
if not selected:
qs = models.Service.objects.none()
context['related_services_all'] = qs
context['related_services'] = qs[:int(instance.count)]
return context
def get_render_template(self, context, instance, placeholder):
if instance.layout:
template = self.TEMPLATE_NAME % instance.layout
try:
select_template([template])
return template
except TemplateDoesNotExist:
pass
return self.render_template
def save_model(self, request, obj, form, change):
super().save_model(request, obj, form, change)
if IS_THERE_COMPANIES:
obj.related_companies.set(Company.objects.filter(pk__in=form.cleaned_data.get('related_companies')))
|
py | 1a2fcdd43365079c42475bbfe897908520e51365 | # -*- encoding: utf-8 -*-
def log(*args, **kwargs):
print(args, kwargs) |
py | 1a2fcde7cf410f5cc6185fb6821140640453ed7a | import sys
import yaml
import os
def getcsv(argv):
if len(argv) == 0:
print("No input files given.")
else:
flag = True
out_string = ''
keys = ['L1c', 'L1b', 'L1a', 'L2c', 'L2b', 'L2a', 'L2prf',
'TLBe', 'TLBp', 'TLBa', 'IPC',
'Total_Instructions', 'Total_Cycles',
'L1-Total-Misses', 'L1-Load-Misses', 'L1-Store-Misses',
'L2-Total-Misses', 'L2-Load-Misses', 'L2-Store-Misses',
'Tlb-Total-Misses', 'Tlb-Load-Misses', 'Tlb-Store-Misses']
header = ''
for key in keys:
header += key
header += ';'
out_string = out_string + header + '\n'
for i in range(0, len(argv)):
if os.path.exists(argv[i]):
with open(argv[i], 'r') as in_file:
l_key = ''
try:
in_stream = yaml.safe_load(in_file)
line = ''
for key in keys:
l_key = key
line += str(in_stream[key])
line += ';'
out_string = out_string + line + '\n'
except KeyError:
sys.stderr.write("--Error-- {} does not contain key: {}.\n".format(argv[i], l_key))
flag = False
else:
sys.stderr.write("File {} does not exist.".format(argv[i]))
if flag:
print('Process finished without errors.')
return out_string
else:
sys.stderr.write('Process finished with errors.' + '\n')
return False
|
py | 1a2fce9e7dc4970a5b9f7925ba30476dbc1fe827 | import random
import numpy as np
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from .auto_augment import cutout, apply_policy
from .utils import *
class Cifar10ImageDataGenerator:
def __init__(self, args):
self.datagen = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, fill_mode='constant', cval=0, horizontal_flip=True)
self.means = np.array([0.4914009 , 0.48215896, 0.4465308])
self.stds = np.array([0.24703279, 0.24348423, 0.26158753])
self.args = args
if args.auto_augment:
self.policies = [
['Invert', 0.1, 7, 'Contrast', 0.2, 6],
['Rotate', 0.7, 2, 'TranslateX', 0.3, 9],
['Sharpness', 0.8, 1, 'Sharpness', 0.9, 3],
['ShearY', 0.5, 8, 'TranslateY', 0.7, 9],
['AutoContrast', 0.5, 8, 'Equalize', 0.9, 2],
['ShearY', 0.2, 7, 'Posterize', 0.3, 7],
['Color', 0.4, 3, 'Brightness', 0.6, 7],
['Sharpness', 0.3, 9, 'Brightness', 0.7, 9],
['Equalize', 0.6, 5, 'Equalize', 0.5, 1],
['Contrast', 0.6, 7, 'Sharpness', 0.6, 5],
['Color', 0.7, 7, 'TranslateX', 0.5, 8],
['Equalize', 0.3, 7, 'AutoContrast', 0.4, 8],
['TranslateY', 0.4, 3, 'Sharpness', 0.2, 6],
['Brightness', 0.9, 6, 'Color', 0.2, 8],
['Solarize', 0.5, 2, 'Invert', 0, 0.3],
['Equalize', 0.2, 0, 'AutoContrast', 0.6, 0],
['Equalize', 0.2, 8, 'Equalize', 0.6, 4],
['Color', 0.9, 9, 'Equalize', 0.6, 6],
['AutoContrast', 0.8, 4, 'Solarize', 0.2, 8],
['Brightness', 0.1, 3, 'Color', 0.7, 0],
['Solarize', 0.4, 5, 'AutoContrast', 0.9, 3],
['TranslateY', 0.9, 9, 'TranslateY', 0.7, 9],
['AutoContrast', 0.9, 2, 'Solarize', 0.8, 3],
['Equalize', 0.8, 8, 'Invert', 0.1, 3],
['TranslateY', 0.7, 9, 'AutoContrast', 0.9, 1],
]
def standardize(self, x):
x = x.astype('float32') / 255
means = self.means.reshape(1, 1, 1, 3)
stds = self.stds.reshape(1, 1, 1, 3)
x -= means
x /= (stds + 1e-6)
return x
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None,
seed=None, save_to_dir=None, save_prefix='', save_format='png', subset=None):
batches = self.datagen.flow(x, y, batch_size, shuffle, sample_weight,
seed, save_to_dir, save_prefix, save_format, subset)
while True:
x_batch, y_batch = next(batches)
if self.args.cutout:
for i in range(x_batch.shape[0]):
x_batch[i] = cutout(x_batch[i])
if self.args.auto_augment:
x_batch = x_batch.astype('uint8')
for i in range(x_batch.shape[0]):
x_batch[i] = apply_policy(x_batch[i], self.policies[random.randrange(len(self.policies))])
x_batch = self.standardize(x_batch)
yield x_batch, y_batch
def main():
import argparse
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import cifar10
parser = argparse.ArgumentParser()
parser.add_argument('--cutout', default=True, type=str2bool)
parser.add_argument('--auto-augment', default=True, type=str2bool)
args = parser.parse_args()
datagen = Cifar10ImageDataGenerator(args)
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
for imgs, _ in datagen.flow(x_train, y_train):
plt.imshow(imgs[0].astype('uint8'))
plt.axis('off')
plt.show()
if __name__ == '__main__':
main()
|
py | 1a2fcead660347520d44efaf9167beda4251c435 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsersTodoListsOperations(object):
"""UsersTodoListsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~users_functions.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def delta(
self,
user_id, # type: str
**kwargs # type: Any
):
# type: (...) -> List["models.MicrosoftGraphTodoTaskList"]
"""Invoke function delta.
Invoke function delta.
:param user_id: key: id of user.
:type user_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of MicrosoftGraphTodoTaskList, or the result of cls(response)
:rtype: list[~users_functions.models.MicrosoftGraphTodoTaskList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["models.MicrosoftGraphTodoTaskList"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delta.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[MicrosoftGraphTodoTaskList]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delta.metadata = {'url': '/users/{user-id}/todo/lists/microsoft.graph.delta()'} # type: ignore
|
py | 1a2fd193df9535e53e3f04f4eb03da31d7714631 | #!/usr/bin/env python
"""Implementation of various cryptographic types."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import binascii
import hashlib
import logging
import os
from cryptography import exceptions
from cryptography import x509
from cryptography.hazmat.backends import openssl
from cryptography.hazmat.primitives import ciphers
from cryptography.hazmat.primitives import constant_time
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import hmac
from cryptography.hazmat.primitives import padding as sym_padding
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.ciphers import algorithms
from cryptography.hazmat.primitives.ciphers import modes
from cryptography.hazmat.primitives.kdf import pbkdf2
from cryptography.x509 import oid
from future.builtins import str
from future.utils import string_types
from typing import Text
from grr_response_core.lib import config_lib
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import type_info
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import standard as rdf_standard
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_core.lib.util import precondition
from grr_response_core.lib.util import random
from grr_response_proto import jobs_pb2
class Error(Exception):
pass
class VerificationError(Error):
pass
class InvalidSignature(Error):
pass
class CipherError(rdfvalue.DecodeError):
"""Raised when decryption failed."""
class Certificate(rdf_structs.RDFProtoStruct):
protobuf = jobs_pb2.Certificate
class RDFX509Cert(rdfvalue.RDFPrimitive):
"""X509 certificates used to communicate with this client."""
def __init__(self, initializer=None, age=None):
super(RDFX509Cert, self).__init__(initializer=initializer, age=age)
if self._value is None and initializer is not None:
if isinstance(initializer, x509.Certificate):
self._value = initializer
elif isinstance(initializer, bytes):
self.ParseFromString(initializer)
else:
raise rdfvalue.InitializeError(
"Cannot initialize %s from %s." % (self.__class__, initializer))
def GetRawCertificate(self):
return self._value
def GetCN(self):
subject = self._value.subject
try:
cn_attributes = subject.get_attributes_for_oid(oid.NameOID.COMMON_NAME)
if len(cn_attributes) > 1:
raise rdfvalue.DecodeError("Cert has more than 1 CN entries.")
cn_attribute = cn_attributes[0]
except IndexError:
raise rdfvalue.DecodeError("Cert has no CN")
return cn_attribute.value
def GetPublicKey(self):
return RSAPublicKey(self._value.public_key())
def GetSerialNumber(self):
return self._value.serial_number
def GetIssuer(self):
return self._value.issuer
def ParseFromString(self, string):
try:
self._value = x509.load_pem_x509_certificate(
string, backend=openssl.backend)
except (ValueError, TypeError) as e:
raise rdfvalue.DecodeError("Invalid certificate %s: %s" % (string, e))
# This can also raise if there isn't exactly one CN entry.
self.GetCN()
def ParseFromHumanReadable(self, string):
precondition.AssertType(string, Text)
self.ParseFromString(string.encode("ascii"))
def ParseFromDatastore(self, value):
precondition.AssertType(value, bytes)
self.ParseFromString(value)
def SerializeToString(self):
if self._value is None:
return ""
return self._value.public_bytes(encoding=serialization.Encoding.PEM)
def AsPEM(self):
return self.SerializeToString()
def __str__(self):
return self.SerializeToString()
def Verify(self, public_key):
"""Verifies the certificate using the given key.
Args:
public_key: The public key to use.
Returns:
True: Everything went well.
Raises:
VerificationError: The certificate did not verify.
"""
# TODO(amoser): We have to do this manually for now since cryptography does
# not yet support cert verification. There is PR 2460:
# https://github.com/pyca/cryptography/pull/2460/files
# that will add it, once it's in we should switch to using this.
# Note that all times here are in UTC.
now = rdfvalue.RDFDatetime.Now().AsDatetime()
if now > self._value.not_valid_after:
raise VerificationError("Certificate expired!")
if now < self._value.not_valid_before:
raise VerificationError("Certificate not yet valid!")
public_key.Verify(
self._value.tbs_certificate_bytes,
self._value.signature,
hash_algorithm=self._value.signature_hash_algorithm)
return True
@classmethod
def ClientCertFromCSR(cls, csr):
"""Creates a new cert for the given common name.
Args:
csr: A CertificateSigningRequest.
Returns:
The signed cert.
"""
builder = x509.CertificateBuilder()
# Use the client CN for a cert serial_id. This will ensure we do
# not have clashing cert id.
common_name = csr.GetCN()
serial = int(common_name.split(".")[1], 16)
builder = builder.serial_number(serial)
builder = builder.subject_name(
x509.Name(
[x509.NameAttribute(oid.NameOID.COMMON_NAME, str(common_name))]))
now = rdfvalue.RDFDatetime.Now()
now_plus_year = now + rdfvalue.Duration("52w")
builder = builder.not_valid_after(now_plus_year.AsDatetime())
now_minus_ten = now - rdfvalue.Duration("10s")
builder = builder.not_valid_before(now_minus_ten.AsDatetime())
# TODO(user): dependency loop with
# grr/core/grr_response_core/config/client.py.
# pylint: disable=protected-access
ca_cert = config_lib._CONFIG["CA.certificate"]
# pylint: enable=protected-access
builder = builder.issuer_name(ca_cert.GetIssuer())
builder = builder.public_key(csr.GetPublicKey().GetRawPublicKey())
# TODO(user): dependency loop with
# grr/core/grr_response_core/config/client.py.
# pylint: disable=protected-access
ca_key = config_lib._CONFIG["PrivateKeys.ca_key"]
# pylint: enable=protected-access
return RDFX509Cert(
builder.sign(
private_key=ca_key.GetRawPrivateKey(),
algorithm=hashes.SHA256(),
backend=openssl.backend))
class CertificateSigningRequest(rdfvalue.RDFValue):
"""A CSR Rdfvalue."""
def __init__(self,
initializer=None,
common_name=None,
private_key=None,
age=None):
super(CertificateSigningRequest, self).__init__(
initializer=initializer, age=age)
if self._value is None:
if isinstance(initializer, x509.CertificateSigningRequest):
self._value = initializer
elif isinstance(initializer, string_types):
self.ParseFromString(initializer)
elif common_name and private_key:
self._value = x509.CertificateSigningRequestBuilder().subject_name(
x509.Name(
[x509.NameAttribute(oid.NameOID.COMMON_NAME,
str(common_name))])).sign(
private_key.GetRawPrivateKey(),
hashes.SHA256(),
backend=openssl.backend)
elif initializer is not None:
raise rdfvalue.InitializeError(
"Cannot initialize %s from %s." % (self.__class__, initializer))
def ParseFromString(self, csr_as_pem):
self._value = x509.load_pem_x509_csr(csr_as_pem, backend=openssl.backend)
def ParseFromDatastore(self, value):
precondition.AssertType(value, bytes)
self.ParseFromString(value)
def SerializeToString(self):
if self._value is None:
return ""
return self._value.public_bytes(serialization.Encoding.PEM)
def AsPEM(self):
return self.SerializeToString()
def __str__(self):
return self.SerializeToString()
def GetCN(self):
subject = self._value.subject
try:
cn_attributes = subject.get_attributes_for_oid(oid.NameOID.COMMON_NAME)
if len(cn_attributes) > 1:
raise rdfvalue.DecodeError("CSR has more than 1 CN entries.")
cn_attribute = cn_attributes[0]
except IndexError:
raise rdfvalue.DecodeError("CSR has no CN")
return cn_attribute.value
def GetPublicKey(self):
return RSAPublicKey(self._value.public_key())
def Verify(self, public_key):
public_key.Verify(
self._value.tbs_certrequest_bytes,
self._value.signature,
hash_algorithm=self._value.signature_hash_algorithm)
return True
class RSAPublicKey(rdfvalue.RDFPrimitive):
"""An RSA public key."""
def __init__(self, initializer=None, age=None):
super(RSAPublicKey, self).__init__(initializer=initializer, age=age)
if self._value is None and initializer is not None:
if isinstance(initializer, rsa.RSAPublicKey):
self._value = initializer
elif isinstance(initializer, bytes):
self.ParseFromString(initializer)
elif isinstance(initializer, Text):
self.ParseFromString(initializer.encode("ascii"))
else:
raise rdfvalue.InitializeError(
"Cannot initialize %s from %s." % (self.__class__, initializer))
def GetRawPublicKey(self):
return self._value
def ParseFromString(self, pem_string):
precondition.AssertType(pem_string, bytes)
try:
self._value = serialization.load_pem_public_key(
pem_string, backend=openssl.backend)
except (TypeError, ValueError, exceptions.UnsupportedAlgorithm) as e:
raise type_info.TypeValueError("Public key invalid: %s" % e)
def ParseFromDatastore(self, value):
precondition.AssertType(value, bytes)
self.ParseFromString(value)
def ParseFromHumanReadable(self, string):
precondition.AssertType(string, Text)
self.ParseFromString(string.encode("ascii"))
def SerializeToString(self):
if self._value is None:
return ""
return self._value.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
def GetN(self):
return self._value.public_numbers().n
def __str__(self):
return self.SerializeToString()
def AsPEM(self):
return self.SerializeToString()
def KeyLen(self):
if self._value is None:
return 0
return self._value.key_size
def Encrypt(self, message):
if self._value is None:
raise ValueError("Can't Encrypt with empty key.")
try:
return self._value.encrypt(
message,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None))
except ValueError as e:
raise CipherError(e)
def Verify(self, message, signature, hash_algorithm=None):
"""Verifies a given message."""
# This method accepts both PSS and PKCS1v15 padding. PSS is preferred but
# old clients only support PKCS1v15.
if hash_algorithm is None:
hash_algorithm = hashes.SHA256()
last_e = None
for padding_algorithm in [
padding.PSS(
mgf=padding.MGF1(hash_algorithm),
salt_length=padding.PSS.MAX_LENGTH),
padding.PKCS1v15()
]:
try:
self._value.verify(signature, message, padding_algorithm,
hash_algorithm)
return True
except exceptions.InvalidSignature as e:
last_e = e
raise VerificationError(last_e)
class RSAPrivateKey(rdfvalue.RDFPrimitive):
"""An RSA private key."""
def __init__(self, initializer=None, age=None, allow_prompt=None):
self.allow_prompt = allow_prompt
super(RSAPrivateKey, self).__init__(initializer=initializer, age=age)
if self._value is None and initializer is not None:
if isinstance(initializer, rsa.RSAPrivateKey):
self._value = initializer
elif isinstance(initializer, bytes):
self.ParseFromString(initializer)
elif isinstance(initializer, Text):
self.ParseFromString(initializer.encode("ascii"))
else:
raise rdfvalue.InitializeError(
"Cannot initialize %s from %s." % (self.__class__, initializer))
def ParseFromHumanReadable(self, string):
precondition.AssertType(string, Text)
self.ParseFromString(string.encode("ascii"))
def GetRawPrivateKey(self):
return self._value
def GetPublicKey(self):
return RSAPublicKey(self._value.public_key())
def Sign(self, message, use_pss=False):
"""Sign a given message."""
precondition.AssertType(message, bytes)
# TODO(amoser): This should use PSS by default at some point.
if not use_pss:
padding_algorithm = padding.PKCS1v15()
else:
padding_algorithm = padding.PSS(
mgf=padding.MGF1(hashes.SHA256()), salt_length=padding.PSS.MAX_LENGTH)
return self._value.sign(message, padding_algorithm, hashes.SHA256())
def Decrypt(self, message):
if self._value is None:
raise ValueError("Can't Decrypt with empty key.")
try:
return self._value.decrypt(
message,
padding.OAEP(
mgf=padding.MGF1(algorithm=hashes.SHA1()),
algorithm=hashes.SHA1(),
label=None))
except ValueError as e:
raise CipherError(e)
@classmethod
def GenerateKey(cls, bits=2048, exponent=65537):
key = rsa.generate_private_key(
public_exponent=exponent, key_size=bits, backend=openssl.backend)
return cls(key)
def ParseFromString(self, pem_string):
precondition.AssertType(pem_string, bytes)
try:
self._value = serialization.load_pem_private_key(
pem_string, password=None, backend=openssl.backend)
return
except (TypeError, ValueError, exceptions.UnsupportedAlgorithm) as e:
if "private key is encrypted" not in str(e):
raise type_info.TypeValueError("Private key invalid: %s" % e)
# pylint: disable=g-explicit-bool-comparison, g-equals-none
# The private key is passphrase protected, we need to see if we are
# allowed to ask the user.
#
# If allow_prompt is False, we are explicitly told that we are not.
if self.allow_prompt == False:
raise type_info.TypeValueError("Private key invalid: %s" % e)
# allow_prompt was not set, we use the context we are in to see if it
# makes sense to ask.
elif self.allow_prompt == None:
# TODO(user): dependency loop with
# core/grr_response_core/grr/config/client.py.
# pylint: disable=protected-access
if "Commandline Context" not in config_lib._CONFIG.context:
raise type_info.TypeValueError("Private key invalid: %s" % e)
# pylint: enable=protected-access
# pylint: enable=g-explicit-bool-comparison, g-equals-none
try:
# The private key is encrypted and we can ask the user for the passphrase.
password = utils.PassphraseCallback()
self._value = serialization.load_pem_private_key(
pem_string, password=password, backend=openssl.backend)
except (TypeError, ValueError, exceptions.UnsupportedAlgorithm) as e:
raise type_info.TypeValueError("Unable to load private key: %s" % e)
def ParseFromDatastore(self, value):
precondition.AssertType(value, bytes)
self.ParseFromString(value)
def SerializeToString(self):
if self._value is None:
return ""
return self._value.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption())
def __str__(self):
digest = hashlib.sha256(self.AsPEM()).hexdigest()
return "%s (%s)" % (self.__class__.__name__, digest)
def AsPEM(self):
return self.SerializeToString()
def AsPassphraseProtectedPEM(self, passphrase):
if self._value is None:
return ""
return self._value.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.BestAvailableEncryption(passphrase))
def KeyLen(self):
if self._value is None:
return 0
return self._value.key_size
# TODO(amoser): Get rid of those.
# Conserve old names for backwards compatibility.
class PEMPrivateKey(RSAPrivateKey):
pass
class PEMPublicKey(RSAPublicKey):
pass
class Hash(rdf_structs.RDFProtoStruct):
"""A hash object containing multiple digests."""
protobuf = jobs_pb2.Hash
rdf_deps = [
rdf_standard.AuthenticodeSignedData,
rdfvalue.HashDigest,
]
class SignedBlob(rdf_structs.RDFProtoStruct):
"""A signed blob.
The client can receive and verify a signed blob (e.g. driver or executable
binary). Once verified, the client may execute this.
"""
protobuf = jobs_pb2.SignedBlob
def Verify(self, public_key):
"""Verify the data in this blob.
Args:
public_key: The public key to use for verification.
Returns:
True when verification succeeds.
Raises:
rdfvalue.DecodeError if the data is not suitable verified.
"""
if self.digest_type != self.HashType.SHA256:
raise rdfvalue.DecodeError("Unsupported digest.")
if self.signature_type not in [
self.SignatureType.RSA_PKCS1v15, self.SignatureType.RSA_PSS
]:
raise rdfvalue.DecodeError("Unsupported signature type.")
try:
public_key.Verify(self.data, self.signature)
except InvalidSignature as e:
raise rdfvalue.DecodeError("Could not verify blob. Error: %s" % e)
return True
def Sign(self, data, signing_key, verify_key=None):
"""Use the data to sign this blob.
Args:
data: String containing the blob data.
signing_key: The key to sign with.
verify_key: Key to verify with. If None we assume the signing key also
contains the public key.
Returns:
self for call chaining.
"""
if signing_key.KeyLen() < 2048:
logging.warning("signing key is too short.")
self.signature = signing_key.Sign(data)
self.signature_type = self.SignatureType.RSA_PKCS1v15
self.digest = hashlib.sha256(data).digest()
self.digest_type = self.HashType.SHA256
self.data = data
# Test we can verify before we send it off.
if verify_key is None:
verify_key = signing_key.GetPublicKey()
# Verify our own data.
self.Verify(verify_key)
return self
class EncryptionKey(rdfvalue.RDFBytes):
"""Base class for encryption keys."""
# Size of the key in bits.
length = 0
def ParseFromString(self, string):
if len(string) % 8:
raise CipherError(
"Invalid key length %d (%s)." % (len(string) * 8, string))
self._value = string
self.length = 8 * len(self._value)
if self.length < 128:
raise CipherError("Key too short (%d): %s" % (self.length, string))
def __str__(self):
digest = hashlib.sha256(self.AsHexDigest()).hexdigest()
return "%s (%s)" % (self.__class__.__name__, digest)
def AsHexDigest(self):
return binascii.hexlify(self._value)
@classmethod
def FromHex(cls, hex_string):
precondition.AssertType(hex_string, Text)
return cls(binascii.unhexlify(hex_string))
def SerializeToString(self):
return self._value
@classmethod
def GenerateKey(cls, length=128):
return cls(os.urandom(length // 8))
@classmethod
def GenerateRandomIV(cls, length=128):
return cls.GenerateKey(length=length)
def RawBytes(self):
return self._value
# TODO(amoser): Size is now flexible, this class makes no sense anymore.
class AES128Key(EncryptionKey):
length = 128
class AutoGeneratedAES128Key(AES128Key):
"""Like AES128Key, but its UI edit box is prefilled with generated key."""
def __init__(self, initializer=None, **kwargs):
if isinstance(initializer, AES128Key):
super(AutoGeneratedAES128Key, self).__init__(
initializer=initializer.RawBytes(), **kwargs)
else:
super(AutoGeneratedAES128Key, self).__init__(
initializer=initializer, **kwargs)
class StreamingCBCEncryptor(object):
"""A class to stream data to a CBCCipher object."""
def __init__(self, cipher):
self._cipher = cipher
self._encryptor = cipher.GetEncryptor()
self._overflow_buffer = b""
self._block_size = len(cipher.key)
def Update(self, data):
data = self._overflow_buffer + data
overflow_count = len(data) % self._block_size
length_to_encrypt = len(data) - overflow_count
to_encrypt = data[:length_to_encrypt]
self._overflow_buffer = data[length_to_encrypt:]
return self._encryptor.update(to_encrypt)
def Finalize(self):
res = self._encryptor.update(self._cipher.Pad(self._overflow_buffer))
res += self._encryptor.finalize()
return res
class AES128CBCCipher(object):
"""A Cipher using AES128 in CBC mode and PKCS7 for padding."""
algorithm = None
def __init__(self, key, iv):
"""Init.
Args:
key: The key, a rdf_crypto.EncryptionKey instance.
iv: The iv, a rdf_crypto.EncryptionKey instance.
"""
self.key = key.RawBytes()
self.iv = iv.RawBytes()
def Pad(self, data):
padder = sym_padding.PKCS7(128).padder()
return padder.update(data) + padder.finalize()
def UnPad(self, padded_data):
unpadder = sym_padding.PKCS7(128).unpadder()
return unpadder.update(padded_data) + unpadder.finalize()
def GetEncryptor(self):
return ciphers.Cipher(
algorithms.AES(self.key), modes.CBC(self.iv),
backend=openssl.backend).encryptor()
def Encrypt(self, data):
"""A convenience method which pads and encrypts at once."""
encryptor = self.GetEncryptor()
padded_data = self.Pad(data)
try:
return encryptor.update(padded_data) + encryptor.finalize()
except ValueError as e:
raise CipherError(e)
def GetDecryptor(self):
return ciphers.Cipher(
algorithms.AES(self.key), modes.CBC(self.iv),
backend=openssl.backend).decryptor()
def Decrypt(self, data):
"""A convenience method which pads and decrypts at once."""
decryptor = self.GetDecryptor()
try:
padded_data = decryptor.update(data) + decryptor.finalize()
return self.UnPad(padded_data)
except ValueError as e:
raise CipherError(e)
class SymmetricCipher(rdf_structs.RDFProtoStruct):
"""Abstract symmetric cipher operations."""
protobuf = jobs_pb2.SymmetricCipher
rdf_deps = [
EncryptionKey,
]
@classmethod
def Generate(cls, algorithm):
if algorithm != cls.Algorithm.AES128CBC:
raise RuntimeError("Algorithm not supported.")
return cls(
_algorithm=algorithm,
_key=EncryptionKey.GenerateKey(length=128),
_iv=EncryptionKey.GenerateKey(length=128))
def _get_cipher(self):
if self._algorithm != self.Algorithm.AES128CBC:
raise CipherError("Unknown cipher type %s" % self._algorithm)
return AES128CBCCipher(self._key, self._iv)
def Encrypt(self, data):
if self._algorithm == self.Algorithm.NONE:
raise TypeError("Empty encryption is not allowed.")
return self._get_cipher().Encrypt(data)
def Decrypt(self, data):
if self._algorithm == self.Algorithm.NONE:
raise TypeError("Empty encryption is not allowed.")
return self._get_cipher().Decrypt(data)
class HMAC(object):
"""A wrapper for the cryptography HMAC object."""
def __init__(self, key, use_sha256=False):
# We store the raw key from cryptography.io.
if isinstance(key, EncryptionKey):
key = key.RawBytes()
self.key = key
self._hmac = self._NewHMAC(use_sha256=use_sha256)
def _NewHMAC(self, use_sha256=False):
if use_sha256:
hash_algorithm = hashes.SHA256()
else:
hash_algorithm = hashes.SHA1()
return hmac.HMAC(self.key, hash_algorithm, backend=openssl.backend)
def Update(self, data):
self._hmac.update(data)
def Finalize(self):
return self._hmac.finalize()
def HMAC(self, message, use_sha256=False):
"""Calculates the HMAC for a given message."""
h = self._NewHMAC(use_sha256=use_sha256)
h.update(message)
return h.finalize()
def Verify(self, message, signature):
"""Verifies the signature for a given message."""
siglen = len(signature)
if siglen == 20:
hash_algorithm = hashes.SHA1()
elif siglen == 32:
hash_algorithm = hashes.SHA256()
else:
raise VerificationError("Invalid signature length %d." % siglen)
h = hmac.HMAC(self.key, hash_algorithm, backend=openssl.backend)
h.update(message)
try:
h.verify(signature)
return True
except exceptions.InvalidSignature as e:
raise VerificationError(e)
class Password(rdf_structs.RDFProtoStruct):
"""A password stored in the database."""
protobuf = jobs_pb2.Password
def _CalculateHash(self, password, salt, iteration_count):
kdf = pbkdf2.PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=iteration_count,
backend=openssl.backend)
return kdf.derive(password)
def SetPassword(self, password):
self.salt = b"%016x" % random.UInt64()
self.iteration_count = 100000
# prevent non-descriptive 'key_material must be bytes' error later
if isinstance(password, string_types):
password = password.encode("utf-8")
self.hashed_pwd = self._CalculateHash(password, self.salt,
self.iteration_count)
def CheckPassword(self, password):
# prevent non-descriptive 'key_material must be bytes' error later
if isinstance(password, string_types):
password = password.encode("utf-8")
h = self._CalculateHash(password, self.salt, self.iteration_count)
return constant_time.bytes_eq(h, self.hashed_pwd)
|
bzl | 1a2fd22700cafa76f737a3dd29aba393dd1c839e | # Copyright 2019 Google LLC
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google LLC nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@com_google_api_gax_java_properties//:dependencies.properties.bzl", "PROPERTIES")
def com_google_api_gax_java_repositories():
# Import dependencies shared between Gradle and Bazel (i.e. maven dependencies)
for name, artifact in PROPERTIES.items():
_maybe(
native.maven_jar,
name = name,
strip_repo_prefix = "maven.",
artifact = _fix_bazel_artifact_format(artifact),
)
# Import Bazel-only dependencies (Gradle version will import maven artifacts of same
# version, while Bazel will depend on Bazel workspaces). The versions are shared in the
# properties file.
_protobuf_version = PROPERTIES["version.com_google_protobuf"]
_protobuf_version_in_link = "v%s" % _protobuf_version
_maybe(
http_archive,
name = "com_google_protobuf",
urls = ["https://github.com/protocolbuffers/protobuf/archive/%s.zip" % _protobuf_version_in_link],
strip_prefix = "protobuf-%s" % _protobuf_version,
)
_grpc_version = PROPERTIES["version.io_grpc"]
_grpc_version_in_link = "v%s" % _grpc_version
_maybe(
http_archive,
name = "io_grpc_grpc_java",
urls = ["https://github.com/grpc/grpc-java/archive/%s.zip" % _grpc_version_in_link],
strip_prefix = "grpc-java-%s" % _grpc_version,
)
_maybe(
http_archive,
name = "bazel_skylib",
strip_prefix = "bazel-skylib-0.7.0",
urls = ["https://github.com/bazelbuild/bazel-skylib/archive/0.7.0.zip"],
)
_maybe(
native.maven_jar,
name = "io_grpc_grpc_netty_shaded",
artifact = "io.grpc:grpc-netty-shaded:%s" % PROPERTIES["version.io_grpc"],
)
_maybe(
native.maven_jar,
name = "google_java_format_all_deps",
artifact = "com.google.googlejavaformat:google-java-format:jar:all-deps:%s" % PROPERTIES["version.google_java_format"],
)
_maybe(
native.bind,
name = "guava",
actual = "@com_google_guava_guava//jar",
)
_maybe(
native.bind,
name = "gson",
actual = "@com_google_code_gson_gson//jar",
)
_maybe(
native.bind,
name = "error_prone_annotations",
actual = "@com_google_errorprone_error_prone_annotations//jar",
)
def _maybe(repo_rule, name, strip_repo_prefix = "", **kwargs):
if not name.startswith(strip_repo_prefix):
return
repo_name = name[len(strip_repo_prefix):]
if repo_name in native.existing_rules():
return
repo_rule(name = repo_name, **kwargs)
def _fix_bazel_artifact_format(artifact_id):
# Fix the artifact id format discrepancy between Bazel & Gradle.
# This is relevant only when classifier is specified explicitly.
# Bazel format: groupId:artifactId:jar:classifier:version
# Gradle format: groupId:artifactId:version:classifier
ids = artifact_id.split(":")
if len(ids) != 4:
return artifact_id
return "%s:%s:%s:%s:%s" % (ids[0], ids[1], "jar", ids[3], ids[2])
|
py | 1a2fd23a0e2f3b5198b6e8ac5f62026fc8f32429 | from torch.autograd import Variable
from Model import Decoder
import torchsnooper
# Batches and Masking
class Batch:
"此对象用于在训练时进行已屏蔽的批数据处理"
def __init__(self, src, trg=None, pad=0):
self.src = src
self.src_mask = (src != pad).unsqueeze(-2)
if trg is not None:
self.trg = trg[:, :-1]
self.trg_y = trg[:, 1:]
self.trg_mask = \
self.make_std_mask(self.trg, pad)
self.ntokens = (self.trg_y != pad).data.sum()
@staticmethod
def make_std_mask(tgt, pad):
"创建一个mask来隐藏填充和将来的单词"
tgt_mask = (tgt != pad).unsqueeze(-2)
tgt_mask = tgt_mask & Variable(
subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))
return tgt_mask
# 我们将使用torch 文本进行批处理。在TorchText函数中创建批次,确保填充最大批次大小不超过阈值(如果我们有8个GPU,则为25000)。
global max_src_in_batch, max_tgt_in_batch
def batch_size_fn(new, count, sofar):
"持续扩大批处理并计算标识+填充的总数"
global max_src_in_batch, max_tgt_in_batch
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
max_src_in_batch = max(max_src_in_batch, len(new.src))
max_tgt_in_batch = max(max_tgt_in_batch, len(new.src) + 2)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
class Batch_kg:
"此对象用于在训练时进行已屏蔽的批数据处理"
def __init__(self, src, ent, trg=None, pad=0):
self.src = src
self.ent = ent
self.trg = trg
self.src_mask = (src != pad).unsqueeze(-2)
self.ent_mask = None
if self.trg is not None:
self.trg = trg[:, :-1]
self.trg_y = trg[:, 1:]
self.trg_mask = \
self.make_std_mask(self.trg, pad)
self.ntokens = (self.trg_y != pad).data.sum()
@staticmethod
def make_std_mask(tgt, pad):
"创建一个mask来隐藏填充和将来的单词"
tgt_mask = (tgt != pad).unsqueeze(-2)
tgt_mask = tgt_mask & Variable(
Decoder.subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))
return tgt_mask
class Batch_ast:
"此对象用于在训练时进行已屏蔽的批数据处理"
def __init__(self, src, ent, ast, trg=None, pad=0):
self.src = src
self.ent = ent
self.trg = trg
self.ast = ast
self.src_mask = (src != pad).unsqueeze(-2)
self.ent_mask = None
self.ast_mask = (src != pad).unsqueeze(-2)
if self.trg is not None:
self.trg = trg[:, :-1]
self.trg_y = trg[:, 1:]
self.trg_mask = \
self.make_std_mask(self.trg, pad)
self.ntokens = (self.trg_y != pad).data.sum()
@staticmethod
def make_std_mask(tgt, pad):
"创建一个mask来隐藏填充和将来的单词"
tgt_mask = (tgt != pad).unsqueeze(-2)
tgt_mask = tgt_mask & Variable(
Decoder.subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))
return tgt_mask |
py | 1a2fd2a6564dbb3f0de3e5aea345a7e5ec29fa0a | #coding=utf-8
HOST = ''
PORT = 50008
# maximum sleep time while there is no connect for a smv process
MAX_SLEEP_TIME = 5
# time out in seconds
TIME_OUT = 5
MU_CHECK_TIMEOUT = 600
MU_CHECK_MEMORY = 1024
# path to NuSMV
SMV_PATH = '/home/lyj238/Downloads/NuSMV/bin/NuSMV'
MU_PATH = '/home/lyj238/Downloads/cmurphi5.4.9/src/mu'
MU_INCLUDE = '/home/lyj238/Downloads/cmurphi5.4.9/include'
GXX_PATH = '/usr/bin/g++'
# path for storing smv files
SMV_FILE_DIR = '/tmp/NuSMV/'
MU_FILE_DIR = '/tmp/cmurphi/'
dirs = [SMV_FILE_DIR, MU_FILE_DIR]
import os
for d in dirs:
if not os.path.isdir(d):
os.makedirs(d)
|
py | 1a2fd3343a3364a08960b27e84d5f7c4cfcef834 | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ImportJobRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'body': 'ImportFileReq'
}
attribute_map = {
'body': 'body'
}
def __init__(self, body=None):
"""ImportJobRequest - a model defined in huaweicloud sdk"""
self._body = None
self.discriminator = None
if body is not None:
self.body = body
@property
def body(self):
"""Gets the body of this ImportJobRequest.
:return: The body of this ImportJobRequest.
:rtype: ImportFileReq
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this ImportJobRequest.
:param body: The body of this ImportJobRequest.
:type: ImportFileReq
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ImportJobRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a2fd3acdd0c8c8e703a6777cb4c6c209d8f9c7f | import cv2
import os
import numpy as np
import random
# 例子为:在NEU-CLS数据集上操作的。
# 在合成后数据集中随机选取若干张数据作为新的数据集。
image_dir = '/content/drive/MyDrive/colab/multiClass/NEU-CLS'
# 打乱原始数据集顺序
img_path = []
for name in os.listdir(image_dir):
img_path.append(os.path.join(image_dir, name))
random.shuffle(img_path)
new_types = ['PS', 'RS', 'Cr', 'In', 'Pa', 'Sc']
# 处理type
def str_to_defect_types(s):
defect_types = []
for t in new_types:
defect_types.append(s.count(t))
return defect_types
s = []
y = []
dataset_list = img_path # 训练或测试需要修改 列表 训练:train_dataset; 测试:test_dataset
# size_4_1 = int(len(dataset_list)/4) # 合成图像个数new_dataset_path
# randvector = list(range(len(dataset_list)))
randvector = list(range(1000)) # 3400 2800 1440
for i in randvector:
# img2 = dataset_list[i]
img2 = random.choice(dataset_list) # 路径
imgx = img2.split("/")[-1].split("_")[0] # 类别
s.append(imgx)
y.append(img2)
def to_matrix(x_y, n):
ls_4 = []
for i in range(0, len(x_y), n):
ls_4.append(x_y[i: i + n])
return ls_4
s = to_matrix(s, 4)
y = to_matrix(y, 4)
# 合成图片 4 -> 1
img_data = []
img_type = []
num = 0
for i in range(250):
x1 = cv2.imread(y[i][0]) # ,as_gray=True)
x2 = cv2.imread(y[i][1]) # ,as_gray=True)
x3 = cv2.imread(y[i][2]) # ,as_gray=True)
x4 = cv2.imread(y[i][3]) # ,as_gray=True)
im_h1 = cv2.hconcat([x1, x2]) # 合并函数
im_h2 = cv2.hconcat([x3, x4])
im_f = cv2.vconcat([im_h1, im_h2])
img_data.append(np.array(im_f))
img_type.append(str_to_defect_types(s[i])) # 处理type
root_path = '/content/drive/MyDrive/colab/multiClass/Defects' # 保存至此文件夹下
# 类型转换
img_data_np = np.array(img_data)
img_type_np = np.array(img_type)
# 合成保存文件绝对路径
img_data_file = os.path.join(root_path, 'data文件名.npy')
img_types = os.path.join(root_path, 'type文件名.npy')
# 保存
np.save(img_data_file, img_data_np)
np.save(img_types, img_type_np)
|
py | 1a2fd3c90aadf9f684f07ba1d0ba1cea7b840d49 | import discord
import random
import asyncio
import discord
from discord.ext import commands, tasks
class Prescence(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.prescence_default.start()
self.ctfu_rgblighting.start()
def cog_unload(self):
self.prescence_default.cancel()
@tasks.loop(seconds=60.0)
async def prescence_default(self):
await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=f'{len(self.bot.users)} users.'))
@tasks.loop(seconds=600.0)
async def ctfu_rgblighting(self):
ctfuserver = self.bot.get_guild(694217343173394432)
role = ctfuserver.get_role(701007133994647622)
await role.edit(colour=discord.Colour(random.randint(0, 0xFFFFFF)))
@prescence_default.before_loop
async def before_running(self):
print('Bot setting up... Adding presence...')
await self.bot.wait_until_ready()
@ctfu_rgblighting.before_loop
async def before_running(self):
print('Bot setting up... Adding RGB Lighting for CTFU...')
await self.bot.wait_until_ready()
def setup(bot):
bot.add_cog(Prescence(bot))
|
py | 1a2fd42c4c82234852097562aea1f7ebd15fb317 | import random
import string
from time import time
from settings import URL, CHATS_COLLECTION_NAME
from .base import CommandBase
class CommandStart(CommandBase):
async def __call__(self, payload):
self.set_bot(payload)
registered_chat = self.sdk.db.find_one(CHATS_COLLECTION_NAME, {'chat': payload['chat'], 'bot': self.bot})
if registered_chat:
user_token = registered_chat['user']
else:
user_token = self.generate_user_token()
new_chat = {
'chat': payload['chat'],
'user': user_token,
'dt_register': time(),
'bot': self.bot
}
self.sdk.db.insert(CHATS_COLLECTION_NAME, new_chat)
self.sdk.log("New user registered with token {}".format(user_token))
message = "Use this webhook for sending notifications to the chat:\n" \
"\n" \
"<code>{}/u/{}</code>\n" \
"\n" \
"Make a POST request with text in «message» param."
await self.send(
payload["chat"],
message.format(URL, user_token),
"HTML"
)
@staticmethod
def generate_user_token():
return ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(8))
|
py | 1a2fd4e0b8c7ae7228d7768503d24b8a68d0a440 | import unittest
from .framework import selenium_test, SeleniumTestCase
class ToolDescribingToursTestCase(SeleniumTestCase):
def setUp(self):
super().setUp()
self.home()
@selenium_test
def test_generate_tour_no_data(self):
"""Ensure a tour without data is generated and pops up."""
self._ensure_tdt_available()
self.tool_open('environment_variables')
self.tool_form_generate_tour()
popover_component = self.components.tour.popover._
popover_component.wait_for_visible()
title = popover_component.title.wait_for_visible().text
assert title == "environment_variables Tour", title
# Run tool
self.tool_form_execute()
self.history_panel_wait_for_hid_ok(1)
@selenium_test
def test_generate_tour_with_data(self):
"""Ensure a tour with data populates history."""
self._ensure_tdt_available()
self.tool_open('md5sum')
self.tool_form_generate_tour()
self.history_panel_wait_for_hid_ok(1)
popover_component = self.components.tour.popover._
popover_component.wait_for_visible()
title = popover_component.title.wait_for_visible().text
assert title == "md5sum Tour", title
self.screenshot("tool_describing_tour_0_start")
popover_component.next.wait_for_and_click()
self.sleep_for(self.wait_types.UX_RENDER)
text = popover_component.content.wait_for_visible().text
assert "Select dataset" in text, text
self.screenshot("tool_describing_tour_1_select")
popover_component.next.wait_for_and_click()
self.sleep_for(self.wait_types.UX_RENDER)
title = popover_component.title.wait_for_visible().text
assert title == "Execute tool"
self.screenshot("tool_describing_tour_2_execute")
popover_component.end.wait_for_and_click()
popover_component.wait_for_absent_or_hidden()
# Run tool
self.tool_form_execute()
self.history_panel_wait_for_hid_ok(2)
self.screenshot("tool_describing_tour_3_after_execute")
def _ensure_tdt_available(self):
""" Skip a test if the webhook TDT doesn't appear. """
response = self.api_get('webhooks', raw=True)
self.assertEqual(response.status_code, 200)
data = response.json()
webhooks = [x['id'] for x in data]
if 'tour_generator' not in webhooks:
raise unittest.SkipTest('Skipping test, webhook "Tool-Describing-Tours" doesn\'t appear to be configured.')
|
py | 1a2fd5114444d7e16b700dc1146c193c859ccf76 | #!/usr/bin/env python
import fileinput
jumps = [int(jump) for jump in fileinput.input()]
clock, pc, max_pc = 0, 0, 0
while pc < len(jumps):
jump = jumps[pc]
jumps[pc] += 1
pc += jump
clock += 1
if pc > max_pc:
max_pc = pc
print("%09d: %04d" % (clock, pc))
print(clock)
|
py | 1a2fd5154fb5ea99a0ee7df2a192dfa2d82c21de | from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.shortcuts import redirect
from django.views.generic import CreateView, FormView
from ..forms import CreateBoaLeagueForm, JoinBoaLeagueForm
from ..models import League, Manager
class JoinLeague(LoginRequiredMixin, CreateView):
model = Manager
form_class = JoinBoaLeagueForm
http_method_names = [u'get', u'post']
template_name = 'boa/join_league.html'
object = None
def get(self, request, *args, **kwargs):
context = {}
my_league_id = kwargs['pk']
league = League.objects.get(id=my_league_id)
if league.password:
form = JoinBoaLeagueForm(pw=True)
else:
form = JoinBoaLeagueForm(pw=False)
context.update({
'league': league,
'form': form,
})
return self.render_to_response(context)
def get_success_url(self):
return self.object.league.get_absolute_url()
def post(self, request, *args, **kwargs):
print(request.POST)
my_league_id = kwargs['pk']
league = League.objects.get(id=my_league_id)
has_password = bool(league.password)
form = JoinBoaLeagueForm(request.POST, pw=has_password)
if form.is_valid():
manager = form.save(commit=False)
manager.league = league
manager.user = self.request.user
# check password
if league.password and not('password' in form.cleaned_data
and form.cleaned_data['password'] == league.password):
form.add_error(
None,
f'Invalid password.'
)
return self.form_invalid(form)
#check team count
if Manager.objects.filter(league=league).count() >= league.max_teams_per_league:
form.add_error(
None,
f'This league is full.'
)
return self.form_invalid(form)
#check if already in league and team name
for other_manager in Manager.objects.filter(league=league):
if manager.user == other_manager.user:
form.add_error(
None,
f'You already manage a team in this league'
)
return self.form_invalid(form)
if manager.name == other_manager.name:
form.add_error(
None,
f'There is already a team named {manager.name} in this league'
)
return self.form_invalid(form)
manager.save()
messages.success(
request,
f'Successfully joined league {league}'
)
return self.form_valid(form)
else:
return self.form_invalid(form)
|
py | 1a2fd57ef5f3ecb75738f551fdb7c30b59a4b6ff | # vim:ts=4:sts=4:sw=4:expandtab
"""Matching Clients with event queues.
"""
import collections
from satori.objects import Object
from satori.events.misc import Namespace
class Dispatcher(Object):
"""Abstract. Dispatches Events to Clients.
"""
def __init__(self):
self.queues = dict()
self.clients = dict()
def _qdata(self, queue_id):
if queue_id not in self.queues:
qdata = Namespace()
qdata.references = 0
qdata.events = collections.deque()
qdata.clients = collections.deque()
self.queues[queue_id] = qdata
return self.queues[queue_id]
def _cdata(self, client):
if client not in self.clients:
cdata = Namespace()
cdata.queue_ids = set()
cdata.active = False
self.clients[client] = cdata
return self.clients[client]
def attach(self, client, queue_id):
"""Declare Client's interest in events from a given queue.
"""
qdata = self._qdata(queue_id)
cdata = self._cdata(client)
if queue_id not in cdata.queue_ids:
cdata.queue_ids.add(queue_id)
qdata.references += 1
def detach(self, client, queue_id):
"""Revoke Client's interest in events from a given queue.
"""
qdata = self._qdata(queue_id)
cdata = self._cdata(client)
if queue_id in cdata.queues:
cdata.queue_ids.remove(queue_id)
qdata.references -= 1
if qdata.references == 0:
yield queue_id
del self.queues[queue_id]
def activate(self, client):
"""Mark a Client as ready to receive a (single) event.
"""
cdata = self._cdata(client)
best = None
for queue_id in cdata.queue_ids:
qdata = self._qdata(queue_id)
if len(qdata.events) > 0:
event = qdata.events[0]
if best is None or best[1] > event.serial:
best = (queue_id, event.serial)
if best is not None:
qdata = self._qdata(best[0])
client.sendResponse((best[0], qdata.events.popleft()))
return
for queue_id in cdata.queue_ids:
qdata = self._qdata(queue_id)
qdata.clients.append(client)
cdata.active = True
def enqueue(self, queue_id, event):
"""Add a new event to a given queue.
"""
qdata = self._qdata(queue_id)
qdata.events.append(event)
while len(qdata.clients) > 0:
client = qdata.clients.popleft()
cdata = self._cdata(client)
if not cdata.active:
continue
if queue_id not in cdata.queue_ids:
continue
cdata.active = False
client.sendResponse((queue_id, qdata.events.popleft()))
return
|
py | 1a2fd5b5d2ad163433446939e5563b3bda14e61b | import os.path
import tensorflow as tf
import helper
import warnings
from distutils.version import LooseVersion
import project_tests as tests
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion(
'1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)
"""
# TODO: Implement function
# Use tf.saved_model.loader.load to load the model and weights
vgg_tag = 'vgg16'
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
## pretrain
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)
## extract layers
graph = tf.get_default_graph()
image_input = graph.get_tensor_by_name(vgg_input_tensor_name)
keep_prob = tf.get_default_graph().get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3_out = tf.get_default_graph().get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4_out = tf.get_default_graph().get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7_out = tf.get_default_graph().get_tensor_by_name(vgg_layer7_out_tensor_name)
return image_input, keep_prob, layer3_out, layer4_out, layer7_out
tests.test_load_vgg(load_vgg, tf)
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer7_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer3_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output
"""
# TODO: Implement function
# 1x1 convolution of vgg layer 7
layer7_conv_1x1 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1,
padding='same',
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# upsample
layer7_out = tf.layers.conv2d_transpose(layer7_conv_1x1, num_classes, 4,
strides=(2, 2),
padding='same',
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# 1x1 convolution of vgg layer 4
layer4_conv_1x1 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1,
padding='same',
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
layer4_skip_conns = tf.add(layer7_out, layer4_conv_1x1)
layer4_out = tf.layers.conv2d_transpose(layer4_skip_conns, num_classes, 4,
strides=(2, 2),
padding='same',
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
# 1x1 convolution of vgg layer 3
layer3_conv_1x1 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1,
padding='same',
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
layer3_skip_conns = tf.add(layer4_out, layer3_conv_1x1)
layer3_out = tf.layers.conv2d_transpose(layer3_skip_conns, num_classes, 16,
strides=(8, 8),
padding='same',
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(1e-3))
return layer3_out
tests.test_layers(layers)
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# TODO: Implement function
logits = tf.reshape(nn_last_layer, (-1, num_classes))
correct_label = tf.reshape(correct_label, (-1, num_classes))
# Loss function
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label))
# Training operation
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(cross_entropy_loss)
return logits, train_op, cross_entropy_loss
tests.test_optimize(optimize)
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate):
"""
Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate
"""
# TODO: Implement function
sess.run(tf.global_variables_initializer())
print("Training Neural Network\n\n")
for itr in range(epochs):
print("Epoch No. {}".format(itr + 1))
for image, label in get_batches_fn(batch_size):
_, loss = sess.run([train_op, cross_entropy_loss],
feed_dict={input_image: image, correct_label: label, keep_prob: 0.5,
learning_rate: 0.0009})
print("Training Loss: {:.3f}".format(loss))
print()
tests.test_train_nn(train_nn)
def run():
num_classes = 2
image_shape = (160, 576)
data_dir = './data'
runs_dir = './runs'
tests.test_for_kitti_dataset(data_dir)
# Download pretrained vgg model
helper.maybe_download_pretrained_vgg(data_dir)
# OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
# You'll need a GPU with at least 10 teraFLOPS to train on.
# https://www.cityscapes-dataset.com/
with tf.Session() as sess:
# Path to vgg model
vgg_path = os.path.join(data_dir, 'vgg')
# Create function to get batches
get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)
# OPTIONAL: Augment Images for better results
# https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network
# Build NN using load_vgg, layers, and optimize function
BATCH_SIZE = 5
EPOCHS = 50
# Placeholders for Tensorflow
correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes], name='correct_label')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
input_image, keep_prob, layer3_out, layer4_out, layer7_out = load_vgg(sess, vgg_path)
last_layer_out = layers(layer3_out, layer4_out, layer7_out, num_classes)
logits, train_op, cross_entropy_loss = optimize(last_layer_out, correct_label, learning_rate, num_classes)
# Train NN using the train_nn function
train_nn(sess, EPOCHS, BATCH_SIZE, get_batches_fn, train_op, cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate)
# Save inference data using helper.save_inference_samples
helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
# OPTIONAL: Apply the trained model to a video
if __name__ == '__main__':
run() |
py | 1a2fd60e4d1beee6888ddba7ed6af1def3c7d255 | import logging
log = logging.getLogger('onegov.form') # noqa
log.addHandler(logging.NullHandler()) # noqa
from translationstring import TranslationStringFactory
_ = TranslationStringFactory('onegov.form') # noqa
from onegov.form.collection import (
FormCollection,
FormSubmissionCollection,
FormDefinitionCollection
)
from onegov.form.core import (
FieldDependency,
Form,
merge_forms,
move_fields,
)
from onegov.form.display import render_field
from onegov.form.extensions import FormExtension, Extendable
from onegov.form.integration import FormApp
from onegov.form.models import (
FormDefinition,
FormFile,
FormSubmission,
FormRegistrationWindow,
PendingFormSubmission,
CompleteFormSubmission
)
from onegov.form.parser import find_field
from onegov.form.parser import flatten_fieldsets
from onegov.form.parser import parse_form
from onegov.form.parser import parse_formcode
from onegov.form.parser import WTFormsClassBuilder
from onegov.form.utils import decimal_range, as_internal_id, with_options
__all__ = [
'as_internal_id',
'CompleteFormSubmission',
'decimal_range',
'find_field',
'flatten_fieldsets',
'Extendable',
'FieldDependency',
'Form',
'FormApp',
'FormCollection',
'FormDefinition',
'FormDefinitionCollection',
'FormExtension',
'FormFile',
'FormRegistrationWindow',
'FormSubmission',
'FormSubmissionCollection',
'merge_forms',
'move_fields',
'parse_form',
'parse_formcode',
'PendingFormSubmission',
'render_field',
'with_options',
'WTFormsClassBuilder',
]
|
py | 1a2fd613b9bdb475afb052b7c1456d1191c09bb1 | """Base email backend class."""
class BaseEmailBackend(object):
"""
Base class for email backend implementations.
Subclasses must at least overwrite send_messages().
"""
def __init__(self, fail_silently=False, **kwargs):
self.fail_silently = fail_silently
def open(self, callback=False):
"""Open a network connection.
This method can be overwritten by backend implementations to
open a network connection.
It's up to the backend implementation to track the status of
a network connection if it's needed by the backend.
This method can be called by applications to force a single
network connection to be used when sending mails. See the
send_messages() method of the SMTP backend for a reference
implementation.
The default implementation does nothing.
"""
pass
def close(self):
"""Close a network connection."""
pass
def send_messages(self, email_messages, callback=False):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
raise NotImplementedError
|
py | 1a2fd7440c6a08e2a23e68232183b39370f64bad | # -*- coding: utf-8 -*-
#
import numpy
from . import cielab
from .illuminants import whitepoints_cie1931
class CIELCH(object):
def __init__(self, whitepoint=whitepoints_cie1931['D65']):
self.cielab = cielab.CIELAB(whitepoint=whitepoint)
return
def from_xyz100(self, xyz):
L, u, v = self.cielab.from_xyz100(xyz)
C = numpy.hypot(u, v)
h = numpy.mod(numpy.arctan2(v, u), 2*numpy.pi) / numpy.pi * 180
return numpy.array([L, C, h])
def to_xyz100(self, lch):
L, C, h = lch
h_ = h * numpy.pi / 180
lab = numpy.array([L, C * numpy.cos(h_), C * numpy.sin(h_)])
return self.cielab.to_xyz100(lab)
|
py | 1a2fd90970a101f64c169389990865b6d19a55bc | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2019 Snowflake Computing Inc. All right reserved.
#
import os
import random
import re
import string
import time
import pytest
from conftest import get_engine
from mock import patch
from parameters import CONNECTION_PARAMETERS
from snowflake.connector import ProgrammingError, connect
from snowflake.sqlalchemy import URL, MergeInto, dialect
from sqlalchemy import (
REAL,
Boolean,
Column,
DateTime,
ForeignKey,
Integer,
LargeBinary,
MetaData,
Numeric,
Sequence,
String,
Table,
create_engine,
dialects,
inspect,
text,
)
from sqlalchemy.sql import and_, not_, or_, select
try:
from parameters import (CONNECTION_PARAMETERS2)
except ImportError:
CONNECTION_PARAMETERS2 = CONNECTION_PARAMETERS
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
def _create_users_addresses_tables(engine_testaccount, metadata, fk=None):
users = Table('users', metadata,
Column('id', Integer, Sequence('user_id_seq'),
primary_key=True),
Column('name', String),
Column('fullname', String),
)
addresses = Table('addresses', metadata,
Column('id', Integer, Sequence('address_id_seq'),
primary_key=True),
Column('user_id', None,
ForeignKey('users.id', name=fk)),
Column('email_address', String, nullable=False)
)
metadata.create_all(engine_testaccount)
return users, addresses
def _create_users_addresses_tables_without_sequence(engine_testaccount,
metadata):
users = Table('users', metadata,
Column('id', Integer, primary_key=True),
Column('name', String),
Column('fullname', String),
)
addresses = Table('addresses', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', None, ForeignKey('users.id')),
Column('email_address', String, nullable=False)
)
metadata.create_all(engine_testaccount)
return users, addresses
def test_connect_args():
"""
Tests connect string
Snowflake connect string supports account name as a replacement of
host:port
"""
from sqlalchemy import create_engine
engine = create_engine(
'snowflake://{user}:{password}@{account}/{database}/{schema}'.format(
user=CONNECTION_PARAMETERS2['user'],
password=CONNECTION_PARAMETERS2['password'],
account=CONNECTION_PARAMETERS2['account'],
database=CONNECTION_PARAMETERS2['database'],
schema=CONNECTION_PARAMETERS2['schema'],
)
)
try:
results = engine.execute('select current_version()').fetchone()
assert results is not None
finally:
engine.dispose()
engine = create_engine(
'snowflake://{user}:{password}@{account}/'.format(
user=CONNECTION_PARAMETERS2['user'],
password=CONNECTION_PARAMETERS2['password'],
account=CONNECTION_PARAMETERS2['account'],
)
)
try:
results = engine.execute('select current_version()').fetchone()
assert results is not None
finally:
engine.dispose()
engine = create_engine(URL(
user=CONNECTION_PARAMETERS2['user'],
password=CONNECTION_PARAMETERS2['password'],
account=CONNECTION_PARAMETERS2['account'],
)
)
try:
results = engine.execute('select current_version()').fetchone()
assert results is not None
finally:
engine.dispose()
engine = create_engine(URL(
user=CONNECTION_PARAMETERS2['user'],
password=CONNECTION_PARAMETERS2['password'],
account=CONNECTION_PARAMETERS2['account'],
warehouse='testwh'
)
)
try:
results = engine.execute('select current_version()').fetchone()
assert results is not None
finally:
engine.dispose()
def test_simple_sql(engine_testaccount):
"""
Simple SQL by SQLAlchemy
"""
result = engine_testaccount.execute('show databases')
rows = [row for row in result]
assert len(rows) >= 0, 'show database results'
def test_create_drop_tables(engine_testaccount):
"""
Creates and Drops tables
"""
metadata = MetaData()
users, addresses = _create_users_addresses_tables_without_sequence(
engine_testaccount, metadata)
try:
# validate the tables exists
results = engine_testaccount.execute('desc table users')
assert len([row for row in results]) > 0, "users table doesn't exist"
# validate the tables exists
results = engine_testaccount.execute('desc table addresses')
assert len([row for row in results]) > 0, \
"addresses table doesn't exist"
finally:
# drop tables
addresses.drop(engine_testaccount)
users.drop(engine_testaccount)
def test_insert_tables(engine_testaccount):
"""
Inserts data into tables
"""
metadata = MetaData()
users, addresses = _create_users_addresses_tables(
engine_testaccount, metadata)
conn = engine_testaccount.connect()
try:
# inserts data with an implicitly generated id
ins = users.insert().values(name='jack', fullname='Jack Jones')
results = engine_testaccount.execute(ins)
assert results.inserted_primary_key == [1], 'sequence value'
results.close()
# inserts data with the given id
ins = users.insert()
conn.execute(ins, id=2, name='wendy', fullname='Wendy Williams')
# verify the results
s = select([users])
results = conn.execute(s)
assert len([row for row in results]) == 2, \
'number of rows from users table'
results.close()
# fetchone
s = select([users]).order_by('id')
results = conn.execute(s)
row = results.fetchone()
results.close()
assert row[2] == 'Jack Jones', 'user name'
assert row['fullname'] == 'Jack Jones', "user name by dict"
assert row[users.c.fullname] == 'Jack Jones', \
'user name by Column object'
conn.execute(addresses.insert(), [
{'user_id': 1, 'email_address': '[email protected]'},
{'user_id': 1, 'email_address': '[email protected]'},
{'user_id': 2, 'email_address': '[email protected]'},
{'user_id': 2, 'email_address': '[email protected]'},
])
# more records
s = select([addresses])
results = conn.execute(s)
assert len([row for row in results]) == 4, \
'number of rows from addresses table'
results.close()
# select specified column names
s = select([users.c.name, users.c.fullname]).order_by('name')
results = conn.execute(s)
results.fetchone()
row = results.fetchone()
assert row['name'] == 'wendy', 'name'
# join
s = select([users, addresses]).where(users.c.id == addresses.c.user_id)
results = conn.execute(s)
results.fetchone()
results.fetchone()
results.fetchone()
row = results.fetchone()
assert row['email_address'] == '[email protected]', 'email address'
# Operator
assert str(users.c.id == addresses.c.user_id) == \
'users.id = addresses.user_id', 'equal operator'
assert str(users.c.id == 7) == 'users.id = :id_1', \
'equal to a static number'
assert str(users.c.name == None) # NOQA
assert str(users.c.id + addresses.c.id) == 'users.id + addresses.id', \
'number + number'
assert str(users.c.name + users.c.fullname) == \
'users.name || users.fullname', 'str + str'
# Conjunctions
# example 1
obj = and_(
users.c.name.like('j%'),
users.c.id == addresses.c.user_id,
or_(
addresses.c.email_address == '[email protected]',
addresses.c.email_address == '[email protected]'
),
not_(users.c.id > 5)
)
expected_sql = """users.name LIKE :name_1
AND users.id = addresses.user_id
AND (addresses.email_address = :email_address_1
OR addresses.email_address = :email_address_2)
AND users.id <= :id_1"""
assert str(obj) == ''.join(expected_sql.split('\n')), \
"complex condition"
# example 2
obj = users.c.name.like('j%') & (users.c.id == addresses.c.user_id) & \
(
(addresses.c.email_address == '[email protected]') |
(addresses.c.email_address == '[email protected]')
) \
& ~(users.c.id > 5)
assert str(obj) == ''.join(expected_sql.split('\n')), \
"complex condition using python operators"
# example 3
s = select([(users.c.fullname +
", " + addresses.c.email_address).
label('title')]). \
where(
and_(
users.c.id == addresses.c.user_id,
users.c.name.between('m', 'z'),
or_(
addresses.c.email_address.like('%@aol.com'),
addresses.c.email_address.like('%@msn.com')
)
)
)
results = engine_testaccount.execute(s).fetchall()
assert results[0][0] == 'Wendy Williams, [email protected]'
# Aliases
a1 = addresses.alias()
a2 = addresses.alias()
s = select([users]).where(and_(
users.c.id == a1.c.user_id,
users.c.id == a2.c.user_id,
a1.c.email_address == '[email protected]',
a2.c.email_address == '[email protected]'))
results = engine_testaccount.execute(s).fetchone()
assert results == (1, 'jack', 'Jack Jones')
# Joins
assert str(users.join(addresses)) == 'users JOIN addresses ON ' \
'users.id = addresses.user_id'
assert str(users.join(addresses,
addresses.c.email_address.like(
users.c.name + '%'))) == \
'users JOIN addresses ' \
'ON addresses.email_address LIKE users.name || :name_1'
s = select([users.c.fullname]).select_from(
users.join(addresses,
addresses.c.email_address.like(users.c.name + '%')))
results = engine_testaccount.execute(s).fetchall()
assert results[1] == ('Jack Jones',)
s = select([users.c.fullname]).select_from(users.outerjoin(
addresses)).order_by(users.c.fullname)
results = engine_testaccount.execute(s).fetchall()
assert results[-1] == ('Wendy Williams',)
finally:
conn.close()
# drop tables
addresses.drop(engine_testaccount)
users.drop(engine_testaccount)
@pytest.mark.skip("""
Reflection is not implemented yet.
""")
def test_reflextion(engine_testaccount):
"""
Tests Reflection
"""
engine_testaccount.execute("""
CREATE OR REPLACE TABLE user (
id Integer primary key,
name String,
fullname String
)
""")
try:
meta = MetaData()
user_reflected = Table('user', meta, autoload=True,
autoload_with=engine_testaccount)
assert user_reflected.c == ['user.id', 'user.name', 'user.fullname']
finally:
engine_testaccount.execute("""
DROP TABLE IF EXISTS user
""")
def test_inspect_column(engine_testaccount):
"""
Tests Inspect
"""
metadata = MetaData()
users, addresses = _create_users_addresses_tables_without_sequence(
engine_testaccount,
metadata)
try:
inspector = inspect(engine_testaccount)
all_table_names = inspector.get_table_names()
assert 'users' in all_table_names
assert 'addresses' in all_table_names
columns_in_users = inspector.get_columns('users')
assert columns_in_users[0]['autoincrement'], 'autoincrement'
assert columns_in_users[0]['default'] is None, 'default'
assert columns_in_users[0]['name'] == 'id', 'name'
assert columns_in_users[0]['primary_key'], 'primary key'
assert not columns_in_users[1]['autoincrement'], 'autoincrement'
assert columns_in_users[1]['default'] is None, 'default'
assert columns_in_users[1]['name'] == 'name', 'name'
assert not columns_in_users[1]['primary_key'], 'primary key'
assert not columns_in_users[2]['autoincrement'], 'autoincrement'
assert columns_in_users[2]['default'] is None, 'default'
assert columns_in_users[2]['name'] == 'fullname', 'name'
assert not columns_in_users[2]['primary_key'], 'primary key'
finally:
addresses.drop(engine_testaccount)
users.drop(engine_testaccount)
def test_get_indexes(engine_testaccount):
"""
Tests get indexes
NOTE: Snowflake doesn't support indexes
"""
metadata = MetaData()
users, addresses = _create_users_addresses_tables_without_sequence(
engine_testaccount,
metadata)
try:
inspector = inspect(engine_testaccount)
assert inspector.get_indexes("users") == []
finally:
addresses.drop(engine_testaccount)
users.drop(engine_testaccount)
def test_get_primary_keys(engine_testaccount):
"""
Tests get primary keys
"""
metadata = MetaData()
users, addresses = _create_users_addresses_tables_without_sequence(
engine_testaccount,
metadata)
try:
inspector = inspect(engine_testaccount)
primary_keys = inspector.get_pk_constraint('users')
assert primary_keys['constrained_columns'] == ['id']
primary_keys = inspector.get_pk_constraint('addresses')
assert primary_keys['constrained_columns'] == ['id']
finally:
addresses.drop(engine_testaccount)
users.drop(engine_testaccount)
def test_get_foreign_keys(engine_testaccount):
"""
Tests foreign keys
"""
metadata = MetaData()
fk_name = 'fk_users_id_from_addresses'
users, addresses = _create_users_addresses_tables(
engine_testaccount,
metadata, fk=fk_name)
try:
inspector = inspect(engine_testaccount)
foreign_keys = inspector.get_foreign_keys('addresses')
assert foreign_keys[0]['name'] == fk_name
assert foreign_keys[0]['constrained_columns'] == ['user_id']
finally:
addresses.drop(engine_testaccount)
users.drop(engine_testaccount)
def test_get_multile_column_primary_key(engine_testaccount):
"""
Tests multicolumn primary key with and without autoincrement
"""
metadata = MetaData()
mytable = Table('mytable', metadata,
Column('gid',
Integer,
primary_key=True,
autoincrement=False),
Column('id',
Integer,
primary_key=True,
autoincrement=True))
metadata.create_all(engine_testaccount)
try:
inspector = inspect(engine_testaccount)
columns_in_mytable = inspector.get_columns('mytable')
assert not columns_in_mytable[0]['autoincrement'], 'autoincrement'
assert columns_in_mytable[0]['default'] is None, 'default'
assert columns_in_mytable[0]['name'] == 'gid', 'name'
assert columns_in_mytable[0]['primary_key'], 'primary key'
assert columns_in_mytable[1]['autoincrement'], 'autoincrement'
assert columns_in_mytable[1]['default'] is None, 'default'
assert columns_in_mytable[1]['name'] == 'id', 'name'
assert columns_in_mytable[1]['primary_key'], 'primary key'
primary_keys = inspector.get_pk_constraint('mytable')
assert primary_keys['constrained_columns'] == ['gid', 'id']
finally:
mytable.drop(engine_testaccount)
def test_create_table_with_cluster_by(engine_testaccount):
# Test case for https://github.com/snowflakedb/snowflake-sqlalchemy/pull/14
metadata = MetaData()
user = Table('clustered_user', metadata,
Column('Id', Integer, primary_key=True),
Column('name', String),
snowflake_clusterby=['Id', 'name'])
metadata.create_all(engine_testaccount)
try:
inspector = inspect(engine_testaccount)
columns_in_table = inspector.get_columns('clustered_user')
assert columns_in_table[0]['name'] == 'Id', 'name'
finally:
user.drop(engine_testaccount)
def test_view_names(engine_testaccount):
"""
Tests all views
"""
inspector = inspect(engine_testaccount)
information_schema_views = inspector.get_view_names(
schema='information_schema')
assert 'columns' in information_schema_views
assert 'table_constraints' in information_schema_views
def test_view_definition(engine_testaccount, db_parameters):
"""
Tests view definition
"""
test_table_name = "test_table_sqlalchemy"
test_view_name = "testview_sqlalchemy"
engine_testaccount.execute("""
CREATE OR REPLACE TABLE {0} (
id INTEGER,
name STRING
)
""".format(test_table_name))
sql = """
CREATE OR REPLACE VIEW {0} AS
SELECT * FROM {1} WHERE id > 10""".format(
test_view_name, test_table_name)
engine_testaccount.execute(text(sql).execution_options(
autocommit=True))
try:
inspector = inspect(engine_testaccount)
assert inspector.get_view_definition(test_view_name) == sql.strip()
assert inspector.get_view_definition(test_view_name,
db_parameters['schema']) == \
sql.strip()
assert inspector.get_view_names() == [test_view_name]
finally:
engine_testaccount.execute(text(
"DROP TABLE IF EXISTS {0}".format(test_table_name)))
engine_testaccount.execute(text(
"DROP VIEW IF EXISTS {0}".format(test_view_name)))
def test_view_comment_reading(engine_testaccount, db_parameters):
"""
Tests reading a comment from a view once it's defined
"""
test_table_name = "test_table_sqlalchemy"
test_view_name = "testview_sqlalchemy"
engine_testaccount.execute("""
CREATE OR REPLACE TABLE {} (
id INTEGER,
name STRING
)
""".format(test_table_name))
sql = """
CREATE OR REPLACE VIEW {} AS
SELECT * FROM {} WHERE id > 10""".format(
test_view_name, test_table_name)
engine_testaccount.execute(text(sql).execution_options(
autocommit=True))
comment_text = "hello my viewing friends"
sql = "COMMENT ON VIEW {} IS '{}';".format(
test_view_name, comment_text)
engine_testaccount.execute(text(sql).execution_options(
autocommit=True))
try:
inspector = inspect(engine_testaccount)
# NOTE: sqlalchemy doesn't have a way to get view comments specifically,
# but the code to get table comments should work for views too
assert inspector.get_table_comment(test_view_name) == {'text': comment_text}
assert inspector.get_table_comment(test_table_name) == {'text': None}
assert str(inspector.get_columns(test_table_name)) == str(inspector.get_columns(test_view_name))
finally:
engine_testaccount.execute(text(
"DROP TABLE IF EXISTS {0}".format(test_table_name)))
engine_testaccount.execute(text(
"DROP VIEW IF EXISTS {0}".format(test_view_name)))
@pytest.mark.skip("Temp table cannot be viewed for some reason")
def test_get_temp_table_names(engine_testaccount):
num_of_temp_tables = 2
temp_table_name = "temp_table"
for idx in range(num_of_temp_tables):
engine_testaccount.execute(text("""
CREATE TEMPORARY TABLE {0} (col1 integer, col2 string)
""".format(temp_table_name + str(idx))).execution_options(
autocommit=True))
for row in engine_testaccount.execute("SHOW TABLES"):
print(row)
try:
inspector = inspect(engine_testaccount)
temp_table_names = inspector.get_temp_table_names()
assert len(temp_table_names) == num_of_temp_tables
finally:
pass
def test_create_table_with_schema(engine_testaccount, db_parameters):
metadata = MetaData()
new_schema = db_parameters['schema'] + "_NEW"
engine_testaccount.execute(text(
"CREATE OR REPLACE SCHEMA \"{0}\"".format(new_schema)))
Table('users', metadata,
Column('id', Integer, Sequence('user_id_seq'),
primary_key=True),
Column('name', String),
Column('fullname', String),
schema=new_schema
)
metadata.create_all(engine_testaccount)
try:
inspector = inspect(engine_testaccount)
columns_in_users = inspector.get_columns('users', schema=new_schema)
assert columns_in_users is not None
finally:
metadata.drop_all(engine_testaccount)
engine_testaccount.execute(
text("DROP SCHEMA IF EXISTS \"{0}\"".format(new_schema)))
@pytest.mark.skipif(os.getenv("SNOWFLAKE_GCP") is not None, reason="PUT and GET is not supported for GCP yet")
def test_copy(engine_testaccount):
"""
COPY must be in a transaction
"""
metadata = MetaData()
users, addresses = _create_users_addresses_tables_without_sequence(
engine_testaccount,
metadata)
try:
engine_testaccount.execute(
"PUT file://{file_name} @%users".format(
file_name=os.path.join(THIS_DIR, "data", "users.txt")))
engine_testaccount.execute("COPY INTO users")
results = engine_testaccount.execute("SELECT * FROM USERS").fetchall()
assert results is not None and len(results) > 0
finally:
addresses.drop(engine_testaccount)
users.drop(engine_testaccount)
@pytest.mark.skip("""
No transaction works yet in the core API. Use orm API or Python Connector
directly if needed at the moment.
Note Snowflake DB supports DML transaction natively, but we have not figured out
how to integrate with SQLAlchemy core API yet.
""")
def test_transaction(engine_testaccount, db_parameters):
engine_testaccount.execute(text("""
CREATE TABLE {0} (c1 number)""".format(db_parameters['name'])))
trans = engine_testaccount.connect().begin()
try:
engine_testaccount.execute(text("""
INSERT INTO {0} VALUES(123)
""".format(db_parameters['name'])))
trans.commit()
engine_testaccount.execute(text("""
INSERT INTO {0} VALUES(456)
""".format(db_parameters['name'])))
trans.rollback()
results = engine_testaccount.execute("""
SELECT * FROM {0}
""".format(db_parameters['name'])).fetchall()
assert results == [(123,)]
finally:
engine_testaccount.execute(text("""
DROP TABLE IF EXISTS {0}
""".format(db_parameters['name'])))
def test_get_schemas(engine_testaccount):
"""
Tests get schemas from inspect.
Although the method get_schema_names is not part of DefaultDialect,
inspect() may call the method if exists.
"""
inspector = inspect(engine_testaccount)
schemas = inspector.get_schema_names()
assert 'information_schema' in schemas
def test_column_metadata(engine_testaccount):
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Appointment(Base):
__tablename__ = 'appointment'
id = Column(Numeric(38, 3), primary_key=True)
string_with_len = Column(String(100))
binary_data = Column(LargeBinary)
real_data = Column(REAL)
Base.metadata.create_all(engine_testaccount)
metadata = Base.metadata
t = Table('appointment', metadata)
inspector = inspect(engine_testaccount)
inspector.reflecttable(t, None)
assert str(t.columns['id'].type) == 'DECIMAL(38, 3)'
assert str(t.columns['string_with_len'].type) == 'VARCHAR(100)'
assert str(t.columns['binary_data'].type) == 'BINARY'
assert str(t.columns['real_data'].type) == 'FLOAT'
def _get_engine_with_columm_metadata_cache(
db_parameters, user=None, password=None, account=None):
"""
Creates a connection with column metadata cache
"""
if user is not None:
db_parameters['user'] = user
if password is not None:
db_parameters['password'] = password
if account is not None:
db_parameters['account'] = account
from sqlalchemy.pool import NullPool
from sqlalchemy import create_engine
from snowflake.sqlalchemy import URL
engine = create_engine(URL(
user=db_parameters['user'],
password=db_parameters['password'],
host=db_parameters['host'],
port=db_parameters['port'],
database=db_parameters['database'],
schema=db_parameters['schema'],
account=db_parameters['account'],
protocol=db_parameters['protocol'],
cache_column_metadata=True,
), poolclass=NullPool)
return engine
def test_many_table_column_metadta(db_parameters):
"""
Get dozens of table metadata with column metadata cache.
cache_column_metadata=True will cache all column metadata for all tables
in the schema.
"""
engine = _get_engine_with_columm_metadata_cache(db_parameters)
RE_SUFFIX_NUM = re.compile(r'.*(\d+)$')
metadata = MetaData()
total_objects = 10
for idx in range(total_objects):
Table('mainusers' + str(idx), metadata,
Column('id' + str(idx), Integer, Sequence('user_id_seq'),
primary_key=True),
Column('name' + str(idx), String),
Column('fullname', String),
Column('password', String)
)
Table('mainaddresses' + str(idx), metadata,
Column('id' + str(idx), Integer, Sequence('address_id_seq'),
primary_key=True),
Column('user_id' + str(idx), None,
ForeignKey('mainusers' + str(idx) + '.id' + str(idx))),
Column('email_address' + str(idx), String, nullable=False)
)
metadata.create_all(engine)
inspector = inspect(engine)
cnt = 0
schema = inspector.default_schema_name
for table_name in inspector.get_table_names(schema):
m = RE_SUFFIX_NUM.match(table_name)
if m:
suffix = m.group(1)
cs = inspector.get_columns(table_name, schema)
if table_name.startswith("mainusers"):
assert len(cs) == 4
assert cs[1]['name'] == 'name' + suffix
cnt += 1
elif table_name.startswith("mainaddresses"):
assert len(cs) == 3
assert cs[2]['name'] == 'email_address' + suffix
cnt += 1
ps = inspector.get_pk_constraint(table_name, schema)
if table_name.startswith("mainusers"):
assert ps['constrained_columns'] == ['id' + suffix]
elif table_name.startswith("mainaddresses"):
assert ps['constrained_columns'] == ['id' + suffix]
fs = inspector.get_foreign_keys(table_name, schema)
if table_name.startswith("mainusers"):
assert len(fs) == 0
elif table_name.startswith("mainaddresses"):
assert len(fs) == 1
assert fs[0]['constrained_columns'] == ['user_id' + suffix]
assert fs[0]['referred_table'] == 'mainusers' + suffix
assert cnt == total_objects * 2, 'total number of test objects'
def test_cache_time(engine_testaccount, db_parameters):
"""Check whether Inspector cache is working"""
# Set up necessary tables
metadata = MetaData()
total_objects = 10
for idx in range(total_objects):
Table('mainusers' + str(idx), metadata,
Column('id' + str(idx), Integer, Sequence('user_id_seq'),
primary_key=True),
Column('name' + str(idx), String),
Column('fullname', String),
Column('password', String)
)
Table('mainaddresses' + str(idx), metadata,
Column('id' + str(idx), Integer, Sequence('address_id_seq'),
primary_key=True),
Column('user_id' + str(idx), None,
ForeignKey('mainusers' + str(idx) + '.id' + str(idx))),
Column('email_address' + str(idx), String, nullable=False)
)
metadata.create_all(engine_testaccount)
inspector = inspect(engine_testaccount)
schema = db_parameters['schema']
def harass_inspector():
for table_name in inspector.get_table_names(schema):
inspector.get_columns(table_name, schema)
inspector.get_pk_constraint(table_name, schema)
inspector.get_foreign_keys(table_name, schema)
outcome = False
# Allow up to 5 times for the speed test to pass to avoid flaky test
for _ in range(5):
# Python 2.7 has no timeit.timeit with globals and locals parameters
s_time = time.time()
harass_inspector()
m_time = time.time()
harass_inspector()
time2 = time.time() - m_time
time1 = m_time - s_time
print("Ran inspector through tables twice, times:\n\tfirst: {0}\n\tsecond: {1}".format(time1, time2))
if time2 < time1 * 0.01:
outcome = True
break
else:
# Reset inspector to reset cache
inspector = inspect(engine_testaccount)
metadata.drop_all(engine_testaccount)
assert outcome
@pytest.mark.timeout(15)
def test_region():
from sqlalchemy import create_engine
engine = create_engine(URL(
user='testuser',
password='testpassword',
account='testaccount',
region='eu-central-1',
login_timeout=5
))
try:
engine.execute('select current_version()').fetchone()
pytest.fail('should not run')
except Exception as ex:
assert ex.orig.errno == 250001
assert 'Failed to connect to DB' in ex.orig.msg
assert 'testaccount.eu-central-1.snowflakecomputing.com' in ex.orig.msg
@pytest.mark.timeout(15)
def test_azure():
from sqlalchemy import create_engine
engine = create_engine(URL(
user='testuser',
password='testpassword',
account='testaccount',
region='east-us-2.azure',
login_timeout=5
))
try:
engine.execute('select current_version()').fetchone()
pytest.fail('should not run')
except Exception as ex:
assert ex.orig.errno == 250001
assert 'Failed to connect to DB' in ex.orig.msg
assert 'testaccount.east-us-2.azure.snowflakecomputing.com' in \
ex.orig.msg
def test_load_dialect():
"""
Test loading Snowflake SQLAlchemy dialect class
"""
assert isinstance(dialects.registry.load('snowflake')(), dialect)
@pytest.mark.parametrize('conditional_flag', [True, False])
@pytest.mark.parametrize('update_flag,insert_flag,delete_flag', [
(True, False, False),
(False, True, False),
(False, False, True),
(False, True, True),
(True, True, False)])
def test_upsert(engine_testaccount, update_flag, insert_flag, delete_flag, conditional_flag):
meta = MetaData()
users = Table('users', meta,
Column('id', Integer, Sequence('user_id_seq'), primary_key=True),
Column('name', String),
Column('fullname', String))
onboarding_users = Table('onboarding_users', meta,
Column('id', Integer, Sequence('new_user_id_seq'), primary_key=True),
Column('name', String),
Column('fullname', String),
Column('delete', Boolean))
meta.create_all(engine_testaccount)
conn = engine_testaccount.connect()
try:
conn.execute(users.insert(), [
{'id': 1, 'name': 'mark', 'fullname': 'Mark Keller'},
{'id': 4, 'name': 'luke', 'fullname': 'Luke Lorimer'},
{'id': 2, 'name': 'amanda', 'fullname': 'Amanda Harris'}])
conn.execute(onboarding_users.insert(), [
{'id': 2, 'name': 'amanda', 'fullname': 'Amanda Charlotte Harris', 'delete': True},
{'id': 3, 'name': 'jim', 'fullname': 'Jim Wang', 'delete': False},
{'id': 4, 'name': 'lukas', 'fullname': 'Lukas Lorimer', 'delete': False},
{'id': 5, 'name': 'andras', 'fullname': None, 'delete': False}
])
merge = MergeInto(users, onboarding_users, users.c.id == onboarding_users.c.id)
if update_flag:
clause = merge.when_matched_then_update().values(name=onboarding_users.c.name,
fullname=onboarding_users.c.fullname)
if conditional_flag:
clause.where(onboarding_users.c.name != 'amanda')
if insert_flag:
clause = merge.when_not_matched_then_insert().values(
id=onboarding_users.c.id,
name=onboarding_users.c.name,
fullname=onboarding_users.c.fullname,
)
if conditional_flag:
clause.where(onboarding_users.c.fullname != None) # NOQA
if delete_flag:
clause = merge.when_matched_then_delete()
if conditional_flag:
clause.where(onboarding_users.c.delete == True) # NOQA
conn.execute(merge)
users_tuples = {tuple(row) for row in conn.execute(select([users]))}
onboarding_users_tuples = {tuple(row) for row in conn.execute(select([onboarding_users]))}
expected_users = {
(1, 'mark', 'Mark Keller'),
(2, 'amanda', 'Amanda Harris'),
(4, 'luke', 'Luke Lorimer')
}
if update_flag:
if not conditional_flag:
expected_users.remove((2, 'amanda', 'Amanda Harris'))
expected_users.add((2, 'amanda', 'Amanda Charlotte Harris'))
expected_users.remove((4, 'luke', 'Luke Lorimer'))
expected_users.add((4, 'lukas', 'Lukas Lorimer'))
elif delete_flag:
if not conditional_flag:
expected_users.remove((4, 'luke', 'Luke Lorimer'))
expected_users.remove((2, 'amanda', 'Amanda Harris'))
if insert_flag:
if not conditional_flag:
expected_users.add((5, 'andras', None))
expected_users.add((3, 'jim', 'Jim Wang'))
expected_onboarding_users = {
(2, 'amanda', 'Amanda Charlotte Harris', True),
(3, 'jim', 'Jim Wang', False),
(4, 'lukas', 'Lukas Lorimer', False),
(5, 'andras', None, False)
}
assert users_tuples == expected_users
assert onboarding_users_tuples == expected_onboarding_users
finally:
conn.close()
users.drop(engine_testaccount)
onboarding_users.drop(engine_testaccount)
def test_deterministic_merge_into(sql_compiler):
meta = MetaData()
users = Table('users', meta,
Column('id', Integer, Sequence('user_id_seq'), primary_key=True),
Column('name', String),
Column('fullname', String))
onboarding_users = Table('onboarding_users', meta,
Column('id', Integer, Sequence('new_user_id_seq'), primary_key=True),
Column('name', String),
Column('fullname', String),
Column('delete', Boolean))
merge = MergeInto(users, onboarding_users, users.c.id == onboarding_users.c.id)
merge.when_matched_then_update().values(name=onboarding_users.c.name,
fullname=onboarding_users.c.fullname)
merge.when_not_matched_then_insert().values(
id=onboarding_users.c.id,
name=onboarding_users.c.name,
fullname=onboarding_users.c.fullname,
).where(onboarding_users.c.fullname != None) # NOQA
assert sql_compiler(merge) == "MERGE INTO users USING onboarding_users ON users.id = onboarding_users.id " \
"WHEN MATCHED THEN UPDATE SET fullname = onboarding_users.fullname, " \
"name = onboarding_users.name WHEN NOT MATCHED AND onboarding_users.fullname " \
"IS NOT NULL THEN INSERT (fullname, id, name) VALUES (onboarding_users.fullname, " \
"onboarding_users.id, onboarding_users.name)"
def test_comments(engine_testaccount):
"""Tests strictly reading column comment through SQLAlchemy"""
table_name = ''.join(random.choice(string.ascii_uppercase) for _ in range(5))
try:
engine_testaccount.execute("create table public.{} (\"col1\" text);".format(table_name))
engine_testaccount.execute("alter table public.{} alter \"col1\" comment 'this is my comment'".format(table_name))
engine_testaccount.execute("select comment from information_schema.columns where table_name='{}'".format(table_name)).fetchall()
inspector = inspect(engine_testaccount)
columns = inspector.get_columns(table_name, schema='PUBLIC')
assert columns[0].get('comment') == u'this is my comment'
finally:
engine_testaccount.execute("drop table public.{}".format(table_name))
def test_comment_sqlalchemy(db_parameters, engine_testaccount, on_public_ci):
"""Testing adding/reading column and table comments through SQLAlchemy"""
new_schema = db_parameters['schema'] + '2'
# Use same table name in 2 different schemas to make sure comment retrieval works properly
table_name = ''.join(random.choice(string.ascii_uppercase) for _ in range(5))
table_comment1 = ''.join(random.choice(string.ascii_uppercase) for _ in range(10))
column_comment1 = ''.join(random.choice(string.ascii_uppercase) for _ in range(10))
table_comment2 = ''.join(random.choice(string.ascii_uppercase) for _ in range(10))
column_comment2 = ''.join(random.choice(string.ascii_uppercase) for _ in range(10))
engine2, _ = get_engine(schema=new_schema)
con2 = None
if not on_public_ci:
con2 = engine2.connect()
con2.execute("CREATE SCHEMA IF NOT EXISTS {0}".format(new_schema))
inspector = inspect(engine_testaccount)
metadata1 = MetaData()
metadata2 = MetaData()
mytable1 = Table(table_name,
metadata1,
Column("tstamp", DateTime, comment=column_comment1),
comment=table_comment1)
mytable2 = Table(table_name,
metadata2,
Column("tstamp", DateTime, comment=column_comment2),
comment=table_comment2)
metadata1.create_all(engine_testaccount, tables=[mytable1])
if not on_public_ci:
metadata2.create_all(engine2, tables=[mytable2])
try:
assert inspector.get_columns(table_name)[0]['comment'] == column_comment1
assert inspector.get_table_comment(table_name)['text'] == table_comment1
if not on_public_ci:
assert inspector.get_columns(table_name, schema=new_schema)[0]['comment'] == column_comment2
assert inspector.get_table_comment(
table_name,
schema=new_schema.upper() # Note: since did not quote schema name it was uppercase'd
)['text'] == table_comment2
finally:
mytable1.drop(engine_testaccount)
if not on_public_ci:
mytable2.drop(engine2)
con2.execute("DROP SCHEMA IF EXISTS {0}".format(new_schema))
con2.close()
engine2.dispose()
def test_special_schema_character(db_parameters, on_public_ci):
"""Make sure we decode special characters correctly"""
if on_public_ci:
pytest.skip("Public CIs cannot create Schemas and Databases")
# Constants
database = "a/b/c" # "'/'.join([choice(ascii_lowercase) for _ in range(3)])
schema = "d/e/f" # '/'.join([choice(ascii_lowercase) for _ in range(3)])
# Setup
options = dict(**db_parameters)
conn = connect(**options)
conn.cursor().execute("CREATE OR REPLACE DATABASE \"{0}\"".format(database))
conn.cursor().execute("CREATE OR REPLACE SCHEMA \"{0}\"".format(schema))
conn.close()
# Test
options.update({'database': '"' + database + '"',
'schema': '"' + schema + '"'})
sf_conn = connect(**options)
sf_connection = [res for res in sf_conn.cursor().execute("select current_database(), "
"current_schema();")]
sa_conn = create_engine(URL(**options)).connect()
sa_connection = [res for res in sa_conn.execute("select current_database(), "
"current_schema();")]
sa_conn.close()
sf_conn.close()
# Teardown
conn = connect(**options)
conn.cursor().execute("DROP DATABASE IF EXISTS \"{0}\"".format(database))
conn.close()
assert [(database, schema)] == sf_connection == sa_connection
def test_autoincrement(engine_testaccount):
metadata = MetaData()
users = Table('users', metadata,
Column('uid', Integer, Sequence('id_seq'), primary_key=True),
Column('name', String(39)))
try:
users.create(engine_testaccount)
connection = engine_testaccount.connect()
connection.execute(users.insert(), [{'name': 'sf1'}])
assert connection.execute(select([users])).fetchall() == [
(1, 'sf1')
]
connection.execute(users.insert(), {'name': 'sf2'}, {'name': 'sf3'})
assert connection.execute(select([users])).fetchall() == [
(1, 'sf1'),
(2, 'sf2'),
(3, 'sf3')
]
connection.execute(users.insert(), {'name': 'sf4'})
assert connection.execute(select([users])).fetchall() == [
(1, 'sf1'),
(2, 'sf2'),
(3, 'sf3'),
(4, 'sf4')
]
seq = Sequence('id_seq')
nextid = connection.execute(seq)
connection.execute(users.insert(), [{'uid': nextid, 'name': 'sf5'}])
assert connection.execute(select([users])).fetchall() == [
(1, 'sf1'),
(2, 'sf2'),
(3, 'sf3'),
(4, 'sf4'),
(5, 'sf5')
]
finally:
users.drop(engine_testaccount)
def test_get_too_many_columns(engine_testaccount, db_parameters):
"""Check whether Inspector cache is working, when there are too many column to cache whole schema's columns"""
# Set up necessary tables
metadata = MetaData()
total_objects = 10
for idx in range(total_objects):
Table('mainuserss' + str(idx), metadata,
Column('id' + str(idx), Integer, Sequence('user_id_seq'),
primary_key=True),
Column('name' + str(idx), String),
Column('fullname', String),
Column('password', String)
)
Table('mainaddressess' + str(idx), metadata,
Column('id' + str(idx), Integer, Sequence('address_id_seq'),
primary_key=True),
Column('user_id' + str(idx), None,
ForeignKey('mainuserss' + str(idx) + '.id' + str(idx))),
Column('email_address' + str(idx), String, nullable=False)
)
metadata.create_all(engine_testaccount)
inspector = inspect(engine_testaccount)
schema = db_parameters['schema']
# Emulate error
with patch.object(inspector.dialect, '_get_schema_columns', return_value=None) as mock_method:
def harass_inspector():
for table_name in inspector.get_table_names(schema):
column_metadata = inspector.get_columns(table_name, schema)
inspector.get_pk_constraint(table_name, schema)
inspector.get_foreign_keys(table_name, schema)
assert 3 <= len(column_metadata) <= 4 # Either one of the tables should have 3 or 4 columns
outcome = False
# Allow up to 5 times for the speed test to pass to avoid flaky test
for _ in range(5):
# Python 2.7 has no timeit.timeit with globals and locals parameters
s_time = time.time()
harass_inspector()
m_time = time.time()
harass_inspector()
time2 = time.time() - m_time
time1 = m_time - s_time
print("Ran inspector through tables twice, times:\n\tfirst: {0}\n\tsecond: {1}".format(time1, time2))
if time2 < time1 * 0.01:
outcome = True
break
else:
# Reset inspector to reset cache
inspector = inspect(engine_testaccount)
metadata.drop_all(engine_testaccount)
assert mock_method.call_count > 0 # Make sure we actually mocked the issue happening
assert outcome
def test_too_many_columns_detection(engine_testaccount, db_parameters):
"""This tests whether a too many column error actually triggers the more granular table version"""
# Set up a single table
metadata = MetaData()
Table('users', metadata,
Column('id', Integer, Sequence('user_id_seq'),
primary_key=True),
Column('name', String),
Column('fullname', String),
Column('password', String)
)
metadata.create_all(engine_testaccount)
inspector = inspect(engine_testaccount)
# Do test
original_execute = inspector.bind.execute
def mock_helper(command, *args, **kwargs):
if '_get_schema_columns' in command:
raise ProgrammingError("Information schema query returned too much data. Please repeat query with more "
"selective predicates.", 90030)
else:
return original_execute(command, *args, **kwargs)
with patch.object(inspector.bind, 'execute', side_effect=mock_helper):
column_metadata = inspector.get_columns('users', db_parameters['schema'])
assert len(column_metadata) == 4
# Clean up
metadata.drop_all(engine_testaccount)
def test_empty_comments(engine_testaccount):
"""Test that no comment returns None"""
table_name = ''.join(random.choice(string.ascii_uppercase) for _ in range(5))
try:
engine_testaccount.execute("create table public.{} (\"col1\" text);".format(table_name))
engine_testaccount.execute("select comment from information_schema.columns where table_name='{}'".format(table_name)).fetchall()
inspector = inspect(engine_testaccount)
columns = inspector.get_columns(table_name, schema='PUBLIC')
assert inspector.get_table_comment(table_name, schema='PUBLIC') == {'text': None}
assert all([c['comment'] is None for c in columns])
finally:
engine_testaccount.execute("drop table public.{}".format(table_name))
|
py | 1a2fd9b881bcb20c6a7d72e836aff2941ba30a0c | """
ZetCode PyQt5 tutorial
This example shows an icon
in the titlebar of the window.
Author: Jan Bodnar
Website: zetcode.com
Last edited: August 2017
"""
import sys
from PyQt5.QtWidgets import QApplication, QWidget
from PyQt5.QtGui import QIcon
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 300, 220)
self.setWindowTitle('Icon')
self.setWindowIcon(QIcon('web.png'))
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) |
py | 1a2fdb0ecb858371470a0abd13011cb82b8f8dc1 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import json
import freezegun
import pytest
import update_ext_version
TEST_DATETIME = "2022-03-14 01:23:45"
# The build ID is calculated via:
# "1" + datetime.datetime.strptime(TEST_DATETIME,"%Y-%m-%d %H:%M:%S").strftime('%j%H%M')
EXPECTED_BUILD_ID = "10730123"
def create_package_json(directory, version):
"""Create `package.json` in `directory` with a specified version of `version`."""
package_json = directory / "package.json"
package_json.write_text(json.dumps({"version": version}), encoding="utf-8")
return package_json
def run_test(tmp_path, version, args, expected):
package_json = create_package_json(tmp_path, version)
update_ext_version.main(package_json, args)
package = json.loads(package_json.read_text(encoding="utf-8"))
assert expected == update_ext_version.parse_version(package["version"])
@pytest.mark.parametrize(
"version, args",
[
("1.0.0-rc", []),
("1.1.0-rc", ["--release"]),
("1.0.0-rc", ["--release", "--build-id", "-1"]),
("1.0.0-rc", ["--release", "--for-publishing", "--build-id", "-1"]),
("1.0.0-rc", ["--release", "--for-publishing", "--build-id", "999999999999"]),
("1.1.0-rc", ["--build-id", "-1"]),
("1.1.0-rc", ["--for-publishing", "--build-id", "-1"]),
("1.1.0-rc", ["--for-publishing", "--build-id", "999999999999"]),
],
)
def test_invalid_args(tmp_path, version, args):
with pytest.raises(ValueError):
run_test(tmp_path, version, args, None)
@pytest.mark.parametrize(
"version, args, expected",
[
("1.1.0-rc", ["--build-id", "12345"], ("1", "1", "12345", "rc")),
("1.0.0-rc", ["--release", "--build-id", "12345"], ("1", "0", "12345", "")),
(
"1.1.0-rc",
["--for-publishing", "--build-id", "12345"],
("1", "1", "12345", ""),
),
(
"1.0.0-rc",
["--release", "--for-publishing", "--build-id", "12345"],
("1", "0", "12345", ""),
),
(
"1.0.0-rc",
["--release", "--build-id", "999999999999"],
("1", "0", "999999999999", ""),
),
(
"1.1.0-rc",
["--build-id", "999999999999"],
("1", "1", "999999999999", "rc"),
),
("1.1.0-rc", [], ("1", "1", EXPECTED_BUILD_ID, "rc")),
(
"1.0.0-rc",
["--release"],
("1", "0", "0", ""),
),
(
"1.1.0-rc",
["--for-publishing"],
("1", "1", EXPECTED_BUILD_ID, ""),
),
(
"1.0.0-rc",
["--release", "--for-publishing"],
("1", "0", "0", ""),
),
(
"1.0.0-rc",
["--release"],
("1", "0", "0", ""),
),
(
"1.1.0-rc",
[],
("1", "1", EXPECTED_BUILD_ID, "rc"),
),
],
)
@freezegun.freeze_time("2022-03-14 01:23:45")
def test_update_ext_version(tmp_path, version, args, expected):
run_test(tmp_path, version, args, expected)
|
py | 1a2fdb91f2a3778bbd6edcb216c365f3b450590c | import simplejson as json
from collections import namedtuple
def json2obj(data):
return json.loads(data, object_hook=_json_object_hook)
def _json_object_hook(dobj):
dobj['json_dict'] = dobj.copy()
X = namedtuple('X', dobj.keys(), rename=True)
X.remove = lambda x: None
return(X(*dobj.values()))
|
py | 1a2fdccfb072d8e8a94167ef8fee28278425beb9 | from core.project.project import Project
from core.graph.region_chunk import RegionChunk
import numpy as np
import matplotlib.pyplot as plt
from skimage.segmentation import random_walker
from skimage.data import binary_blobs
import skimage
if __name__ == '__main__':
p = Project()
p.load('/Users/flipajs/Documents/wd/FERDA/Cam1_')
ch = p.chm[257]
rch = RegionChunk(ch, p.gm, p.rm)
start_vertex = ch.start_vertex()
in_regions = []
for n in start_vertex.in_neighbors():
r = p.gm.region(n)
in_regions.append(r)
r = rch[0]
r.frame()
from utils.video_manager import get_auto_video_manager
from skimage.morphology import skeletonize_3d
import cv2
vm = get_auto_video_manager(p)
# TODO: idea - label erosion before each nex iteration...
from scipy.ndimage.morphology import binary_erosion
whole_labels = None
for r1 in rch.regions_gen():
markers = np.zeros((1000, 1000), dtype=np.int32)
r1_im = np.zeros((1000, 1000), dtype=np.bool)
r1_im[r1.pts()[:, 0], r1.pts()[:, 1]] = True
markers[np.logical_not(r1_im)] = -1
img = vm.get_frame(r1.frame())
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
for i, r in enumerate(in_regions):
if whole_labels is None:
im2 = np.zeros((1000, 1000), dtype=np.bool)
im2[r.pts()[:, 0], r.pts()[:, 1]] = True
markers[np.logical_and(r1_im, im2)] = i+1
else:
l_ = whole_labels==i+1
l_ = binary_erosion(l_, iterations=5)
markers[np.logical_and(r1_im, l_)] = i+1
tl = r1.roi().top_left_corner()
br = r1.roi().bottom_right_corner()
gray = gray[tl[0]:br[0], tl[1]:br[1]].copy()
markers = markers[tl[0]:br[0], tl[1]:br[1]].copy()
r1_im = r1_im[tl[0]:br[0], tl[1]:br[1]].copy()
skel = skeletonize_3d(r1_im)
data=np.asarray(r1_im, dtype=np.uint8)*255
labels = random_walker(gray, markers, beta=500000, mode='bf')
whole_labels = np.zeros((1000, 1000), dtype=np.int32)
whole_labels[tl[0]:br[0], tl[1]:br[1]] = labels.copy()
# Plot results
fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(8, 3.2), sharex=True, sharey=True)
ax1.imshow(gray, cmap='gray', interpolation='nearest')
ax1.axis('off')
ax1.set_adjustable('box-forced')
ax1.set_title('Noisy data')
ax2.imshow(markers, cmap='hot', interpolation='nearest')
ax2.axis('off')
ax2.set_adjustable('box-forced')
ax2.set_title('Markers')
ax3.imshow(labels, cmap='hot', interpolation='nearest')
ax3.axis('off')
ax3.set_adjustable('box-forced')
ax3.set_title('Segmentation')
ax4.imshow(skel)
ax4.axis('off')
ax4.set_adjustable('box-forced')
ax4.set_title('skeleton')
fig.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
plt.ion()
plt.waitforbuttonpress()
plt.close()
|
py | 1a2fdcd2a8cab6de6db5933a0757a8beb063ac74 | from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from datetime import datetime
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_configuration_set(self, ConfigurationSetName: str = None, TrackingOptions: Dict = None, DeliveryOptions: Dict = None, ReputationOptions: Dict = None, SendingOptions: Dict = None, Tags: List = None) -> Dict:
"""
Create a configuration set. *Configuration sets* are groups of rules that you can apply to the emails you send using Amazon Pinpoint. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/CreateConfigurationSet>`_
**Request Syntax**
::
response = client.create_configuration_set(
ConfigurationSetName='string',
TrackingOptions={
'CustomRedirectDomain': 'string'
},
DeliveryOptions={
'SendingPoolName': 'string'
},
ReputationOptions={
'ReputationMetricsEnabled': True|False,
'LastFreshStart': datetime(2015, 1, 1)
},
SendingOptions={
'SendingEnabled': True|False
},
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
An HTTP 200 response if the request succeeds, or an error message if the request fails.
:type ConfigurationSetName: string
:param ConfigurationSetName:
The name of the configuration set.
:type TrackingOptions: dict
:param TrackingOptions:
An object that defines the open and click tracking options for emails that you send using the configuration set.
- **CustomRedirectDomain** *(string) --* **[REQUIRED]**
The domain that you want to use for tracking open and click events.
:type DeliveryOptions: dict
:param DeliveryOptions:
An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set.
- **SendingPoolName** *(string) --*
The name of the dedicated IP pool that you want to associate with the configuration set.
:type ReputationOptions: dict
:param ReputationOptions:
An object that defines whether or not Amazon Pinpoint collects reputation metrics for the emails that you send that use the configuration set.
- **ReputationMetricsEnabled** *(boolean) --*
If ``true`` , tracking of reputation metrics is enabled for the configuration set. If ``false`` , tracking of reputation metrics is disabled for the configuration set.
- **LastFreshStart** *(datetime) --*
The date and time (in Unix time) when the reputation metrics were last given a fresh start. When your account is given a fresh start, your reputation metrics are calculated starting from the date of the fresh start.
:type SendingOptions: dict
:param SendingOptions:
An object that defines whether or not Amazon Pinpoint can send email that you send using the configuration set.
- **SendingEnabled** *(boolean) --*
If ``true`` , email sending is enabled for the configuration set. If ``false`` , email sending is disabled for the configuration set.
:type Tags: list
:param Tags:
An object that defines the tags (keys and values) that you want to associate with the configuration set.
- *(dict) --*
An object that defines the tags that are associated with a resource. A *tag* is a label that you optionally define and associate with a resource in Amazon Pinpoint. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.
Each tag consists of a required *tag key* and an associated *tag value* , both of which you define. A tag key is a general label that acts as a category for a more specific tag value. A tag value acts as a descriptor within a tag key. For example, if you have two versions of an Amazon Pinpoint project, one for internal testing and another for external use, you might assign a ``Stack`` tag key to both projects. The value of the ``Stack`` tag key might be ``Test`` for one project and ``Production`` for the other project.
A tag key can contain as many as 128 characters. A tag value can contain as many as 256 characters. The characters can be Unicode letters, digits, white space, or one of the following symbols: _ . : / = + -. The following additional restrictions apply to tags:
* Tag keys and values are case sensitive.
* For each associated resource, each tag key must be unique and it can have only one value.
* The ``aws:`` prefix is reserved for use by AWS; you can’t use it in any tag keys or values that you define. In addition, you can\'t edit or remove tag keys or values that use this prefix. Tags that use this prefix don’t count against the limit of 50 tags per resource.
* You can associate tags with public or shared resources, but the tags are available only for your AWS account, not any other accounts that share the resource. In addition, the tags are available only for resources that are located in the specified AWS Region for your AWS account.
- **Key** *(string) --* **[REQUIRED]**
One part of a key-value pair that defines a tag. The maximum length of a tag key is 128 characters. The minimum length is 1 character.
- **Value** *(string) --* **[REQUIRED]**
The optional part of a key-value pair that defines a tag. The maximum length of a tag value is 256 characters. The minimum length is 0 characters. If you don’t want a resource to have a specific tag value, don’t specify a value for this parameter. Amazon Pinpoint will set the value to an empty string.
:rtype: dict
:returns:
"""
pass
def create_configuration_set_event_destination(self, ConfigurationSetName: str, EventDestinationName: str, EventDestination: Dict) -> Dict:
"""
Create an event destination. In Amazon Pinpoint, *events* include message sends, deliveries, opens, clicks, bounces, and complaints. *Event destinations* are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
A single configuration set can include more than one event destination.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/CreateConfigurationSetEventDestination>`_
**Request Syntax**
::
response = client.create_configuration_set_event_destination(
ConfigurationSetName='string',
EventDestinationName='string',
EventDestination={
'Enabled': True|False,
'MatchingEventTypes': [
'SEND'|'REJECT'|'BOUNCE'|'COMPLAINT'|'DELIVERY'|'OPEN'|'CLICK'|'RENDERING_FAILURE',
],
'KinesisFirehoseDestination': {
'IamRoleArn': 'string',
'DeliveryStreamArn': 'string'
},
'CloudWatchDestination': {
'DimensionConfigurations': [
{
'DimensionName': 'string',
'DimensionValueSource': 'MESSAGE_TAG'|'EMAIL_HEADER'|'LINK_TAG',
'DefaultDimensionValue': 'string'
},
]
},
'SnsDestination': {
'TopicArn': 'string'
},
'PinpointDestination': {
'ApplicationArn': 'string'
}
}
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
An HTTP 200 response if the request succeeds, or an error message if the request fails.
:type ConfigurationSetName: string
:param ConfigurationSetName: **[REQUIRED]**
The name of the configuration set that you want to add an event destination to.
:type EventDestinationName: string
:param EventDestinationName: **[REQUIRED]**
A name that identifies the event destination within the configuration set.
:type EventDestination: dict
:param EventDestination: **[REQUIRED]**
An object that defines the event destination.
- **Enabled** *(boolean) --*
If ``true`` , the event destination is enabled. When the event destination is enabled, the specified event types are sent to the destinations in this ``EventDestinationDefinition`` .
If ``false`` , the event destination is disabled. When the event destination is disabled, events aren\'t sent to the specified destinations.
- **MatchingEventTypes** *(list) --*
An array that specifies which events Amazon Pinpoint should send to the destinations in this ``EventDestinationDefinition`` .
- *(string) --*
An email sending event type. For example, email sends, opens, and bounces are all email events.
- **KinesisFirehoseDestination** *(dict) --*
An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.
- **IamRoleArn** *(string) --* **[REQUIRED]**
The Amazon Resource Name (ARN) of the IAM role that Amazon Pinpoint uses when sending email events to the Amazon Kinesis Data Firehose stream.
- **DeliveryStreamArn** *(string) --* **[REQUIRED]**
The Amazon Resource Name (ARN) of the Amazon Kinesis Data Firehose stream that Amazon Pinpoint sends email events to.
- **CloudWatchDestination** *(dict) --*
An object that defines an Amazon CloudWatch destination for email events. You can use Amazon CloudWatch to monitor and gain insights on your email sending metrics.
- **DimensionConfigurations** *(list) --* **[REQUIRED]**
An array of objects that define the dimensions to use when you send email events to Amazon CloudWatch.
- *(dict) --*
An object that defines the dimension configuration to use when you send Amazon Pinpoint email events to Amazon CloudWatch.
- **DimensionName** *(string) --* **[REQUIRED]**
The name of an Amazon CloudWatch dimension associated with an email sending metric. The name has to meet the following criteria:
* It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).
* It can contain no more than 256 characters.
- **DimensionValueSource** *(string) --* **[REQUIRED]**
The location where Amazon Pinpoint finds the value of a dimension to publish to Amazon CloudWatch. If you want Amazon Pinpoint to use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail/SendRawEmail API, choose ``messageTag`` . If you want Amazon Pinpoint to use your own email headers, choose ``emailHeader`` . If you want Amazon Pinpoint to use link tags, choose ``linkTags`` .
- **DefaultDimensionValue** *(string) --* **[REQUIRED]**
The default value of the dimension that is published to Amazon CloudWatch if you don\'t provide the value of the dimension when you send an email. This value has to meet the following criteria:
* It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).
* It can contain no more than 256 characters.
- **SnsDestination** *(dict) --*
An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.
- **TopicArn** *(string) --* **[REQUIRED]**
The Amazon Resource Name (ARN) of the Amazon SNS topic that you want to publish email events to. For more information about Amazon SNS topics, see the `Amazon SNS Developer Guide <https://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html>`__ .
- **PinpointDestination** *(dict) --*
An object that defines a Amazon Pinpoint destination for email events. You can use Amazon Pinpoint events to create attributes in Amazon Pinpoint projects. You can use these attributes to create segments for your campaigns.
- **ApplicationArn** *(string) --*
The Amazon Resource Name (ARN) of the Amazon Pinpoint project that you want to send email events to.
:rtype: dict
:returns:
"""
pass
def create_dedicated_ip_pool(self, PoolName: str, Tags: List = None) -> Dict:
"""
Create a new pool of dedicated IP addresses. A pool can include one or more dedicated IP addresses that are associated with your Amazon Pinpoint account. You can associate a pool with a configuration set. When you send an email that uses that configuration set, Amazon Pinpoint sends it using only the IP addresses in the associated pool.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/CreateDedicatedIpPool>`_
**Request Syntax**
::
response = client.create_dedicated_ip_pool(
PoolName='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
An HTTP 200 response if the request succeeds, or an error message if the request fails.
:type PoolName: string
:param PoolName: **[REQUIRED]**
The name of the dedicated IP pool.
:type Tags: list
:param Tags:
An object that defines the tags (keys and values) that you want to associate with the pool.
- *(dict) --*
An object that defines the tags that are associated with a resource. A *tag* is a label that you optionally define and associate with a resource in Amazon Pinpoint. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.
Each tag consists of a required *tag key* and an associated *tag value* , both of which you define. A tag key is a general label that acts as a category for a more specific tag value. A tag value acts as a descriptor within a tag key. For example, if you have two versions of an Amazon Pinpoint project, one for internal testing and another for external use, you might assign a ``Stack`` tag key to both projects. The value of the ``Stack`` tag key might be ``Test`` for one project and ``Production`` for the other project.
A tag key can contain as many as 128 characters. A tag value can contain as many as 256 characters. The characters can be Unicode letters, digits, white space, or one of the following symbols: _ . : / = + -. The following additional restrictions apply to tags:
* Tag keys and values are case sensitive.
* For each associated resource, each tag key must be unique and it can have only one value.
* The ``aws:`` prefix is reserved for use by AWS; you can’t use it in any tag keys or values that you define. In addition, you can\'t edit or remove tag keys or values that use this prefix. Tags that use this prefix don’t count against the limit of 50 tags per resource.
* You can associate tags with public or shared resources, but the tags are available only for your AWS account, not any other accounts that share the resource. In addition, the tags are available only for resources that are located in the specified AWS Region for your AWS account.
- **Key** *(string) --* **[REQUIRED]**
One part of a key-value pair that defines a tag. The maximum length of a tag key is 128 characters. The minimum length is 1 character.
- **Value** *(string) --* **[REQUIRED]**
The optional part of a key-value pair that defines a tag. The maximum length of a tag value is 256 characters. The minimum length is 0 characters. If you don’t want a resource to have a specific tag value, don’t specify a value for this parameter. Amazon Pinpoint will set the value to an empty string.
:rtype: dict
:returns:
"""
pass
def create_deliverability_test_report(self, FromEmailAddress: str, Content: Dict, ReportName: str = None, Tags: List = None) -> Dict:
"""
Create a new predictive inbox placement test. Predictive inbox placement tests can help you predict how your messages will be handled by various email providers around the world. When you perform a predictive inbox placement test, you provide a sample message that contains the content that you plan to send to your customers. Amazon Pinpoint then sends that message to special email addresses spread across several major email providers. After about 24 hours, the test is complete, and you can use the ``GetDeliverabilityTestReport`` operation to view the results of the test.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/CreateDeliverabilityTestReport>`_
**Request Syntax**
::
response = client.create_deliverability_test_report(
ReportName='string',
FromEmailAddress='string',
Content={
'Simple': {
'Subject': {
'Data': 'string',
'Charset': 'string'
},
'Body': {
'Text': {
'Data': 'string',
'Charset': 'string'
},
'Html': {
'Data': 'string',
'Charset': 'string'
}
}
},
'Raw': {
'Data': b'bytes'
}
},
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'ReportId': 'string',
'DeliverabilityTestStatus': 'IN_PROGRESS'|'COMPLETED'
}
**Response Structure**
- *(dict) --*
Information about the predictive inbox placement test that you created.
- **ReportId** *(string) --*
A unique string that identifies the predictive inbox placement test.
- **DeliverabilityTestStatus** *(string) --*
The status of the predictive inbox placement test. If the status is ``IN_PROGRESS`` , then the predictive inbox placement test is currently running. Predictive inbox placement tests are usually complete within 24 hours of creating the test. If the status is ``COMPLETE`` , then the test is finished, and you can use the ``GetDeliverabilityTestReport`` to view the results of the test.
:type ReportName: string
:param ReportName:
A unique name that helps you to identify the predictive inbox placement test when you retrieve the results.
:type FromEmailAddress: string
:param FromEmailAddress: **[REQUIRED]**
The email address that the predictive inbox placement test email was sent from.
:type Content: dict
:param Content: **[REQUIRED]**
The HTML body of the message that you sent when you performed the predictive inbox placement test.
- **Simple** *(dict) --*
The simple email message. The message consists of a subject and a message body.
- **Subject** *(dict) --* **[REQUIRED]**
The subject line of the email. The subject line can only contain 7-bit ASCII characters. However, you can specify non-ASCII characters in the subject line by using encoded-word syntax, as described in `RFC 2047 <https://tools.ietf.org/html/rfc2047>`__ .
- **Data** *(string) --* **[REQUIRED]**
The content of the message itself.
- **Charset** *(string) --*
The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify ``UTF-8`` , ``ISO-8859-1`` , or ``Shift_JIS`` .
- **Body** *(dict) --* **[REQUIRED]**
The body of the message. You can specify an HTML version of the message, a text-only version of the message, or both.
- **Text** *(dict) --*
An object that represents the version of the message that is displayed in email clients that don\'t support HTML, or clients where the recipient has disabled HTML rendering.
- **Data** *(string) --* **[REQUIRED]**
The content of the message itself.
- **Charset** *(string) --*
The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify ``UTF-8`` , ``ISO-8859-1`` , or ``Shift_JIS`` .
- **Html** *(dict) --*
An object that represents the version of the message that is displayed in email clients that support HTML. HTML messages can include formatted text, hyperlinks, images, and more.
- **Data** *(string) --* **[REQUIRED]**
The content of the message itself.
- **Charset** *(string) --*
The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify ``UTF-8`` , ``ISO-8859-1`` , or ``Shift_JIS`` .
- **Raw** *(dict) --*
The raw email message. The message has to meet the following criteria:
* The message has to contain a header and a body, separated by one blank line.
* All of the required header fields must be present in the message.
* Each part of a multipart MIME message must be formatted properly.
* If you include attachments, they must be in a file format that Amazon Pinpoint supports.
* The entire message must be Base64 encoded.
* If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients\' email clients render the message properly.
* The length of any single line of text in the message can\'t exceed 1,000 characters. This restriction is defined in `RFC 5321 <https://tools.ietf.org/html/rfc5321>`__ .
- **Data** *(bytes) --* **[REQUIRED]**
The raw email message. The message has to meet the following criteria:
* The message has to contain a header and a body, separated by one blank line.
* All of the required header fields must be present in the message.
* Each part of a multipart MIME message must be formatted properly.
* Attachments must be in a file format that Amazon Pinpoint supports.
* The entire message must be Base64 encoded.
* If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients\' email clients render the message properly.
* The length of any single line of text in the message can\'t exceed 1,000 characters. This restriction is defined in `RFC 5321 <https://tools.ietf.org/html/rfc5321>`__ .
:type Tags: list
:param Tags:
An object that defines the tags (keys and values) that you want to associate with the predictive inbox placement test.
- *(dict) --*
An object that defines the tags that are associated with a resource. A *tag* is a label that you optionally define and associate with a resource in Amazon Pinpoint. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.
Each tag consists of a required *tag key* and an associated *tag value* , both of which you define. A tag key is a general label that acts as a category for a more specific tag value. A tag value acts as a descriptor within a tag key. For example, if you have two versions of an Amazon Pinpoint project, one for internal testing and another for external use, you might assign a ``Stack`` tag key to both projects. The value of the ``Stack`` tag key might be ``Test`` for one project and ``Production`` for the other project.
A tag key can contain as many as 128 characters. A tag value can contain as many as 256 characters. The characters can be Unicode letters, digits, white space, or one of the following symbols: _ . : / = + -. The following additional restrictions apply to tags:
* Tag keys and values are case sensitive.
* For each associated resource, each tag key must be unique and it can have only one value.
* The ``aws:`` prefix is reserved for use by AWS; you can’t use it in any tag keys or values that you define. In addition, you can\'t edit or remove tag keys or values that use this prefix. Tags that use this prefix don’t count against the limit of 50 tags per resource.
* You can associate tags with public or shared resources, but the tags are available only for your AWS account, not any other accounts that share the resource. In addition, the tags are available only for resources that are located in the specified AWS Region for your AWS account.
- **Key** *(string) --* **[REQUIRED]**
One part of a key-value pair that defines a tag. The maximum length of a tag key is 128 characters. The minimum length is 1 character.
- **Value** *(string) --* **[REQUIRED]**
The optional part of a key-value pair that defines a tag. The maximum length of a tag value is 256 characters. The minimum length is 0 characters. If you don’t want a resource to have a specific tag value, don’t specify a value for this parameter. Amazon Pinpoint will set the value to an empty string.
:rtype: dict
:returns:
"""
pass
def create_email_identity(self, EmailIdentity: str, Tags: List = None) -> Dict:
"""
Verifies an email identity for use with Amazon Pinpoint. In Amazon Pinpoint, an identity is an email address or domain that you use when you send email. Before you can use an identity to send email with Amazon Pinpoint, you first have to verify it. By verifying an address, you demonstrate that you're the owner of the address, and that you've given Amazon Pinpoint permission to send email from the address.
When you verify an email address, Amazon Pinpoint sends an email to the address. Your email address is verified as soon as you follow the link in the verification email.
When you verify a domain, this operation provides a set of DKIM tokens, which you can convert into CNAME tokens. You add these CNAME tokens to the DNS configuration for your domain. Your domain is verified when Amazon Pinpoint detects these records in the DNS configuration for your domain. It usually takes around 72 hours to complete the domain verification process.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/CreateEmailIdentity>`_
**Request Syntax**
::
response = client.create_email_identity(
EmailIdentity='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{
'IdentityType': 'EMAIL_ADDRESS'|'DOMAIN'|'MANAGED_DOMAIN',
'VerifiedForSendingStatus': True|False,
'DkimAttributes': {
'SigningEnabled': True|False,
'Status': 'PENDING'|'SUCCESS'|'FAILED'|'TEMPORARY_FAILURE'|'NOT_STARTED',
'Tokens': [
'string',
]
}
}
**Response Structure**
- *(dict) --*
If the email identity is a domain, this object contains tokens that you can use to create a set of CNAME records. To sucessfully verify your domain, you have to add these records to the DNS configuration for your domain.
If the email identity is an email address, this object is empty.
- **IdentityType** *(string) --*
The email identity type.
- **VerifiedForSendingStatus** *(boolean) --*
Specifies whether or not the identity is verified. In Amazon Pinpoint, you can only send email from verified email addresses or domains. For more information about verifying identities, see the `Amazon Pinpoint User Guide <https://docs.aws.amazon.com/pinpoint/latest/userguide/channels-email-manage-verify.html>`__ .
- **DkimAttributes** *(dict) --*
An object that contains information about the DKIM attributes for the identity. This object includes the tokens that you use to create the CNAME records that are required to complete the DKIM verification process.
- **SigningEnabled** *(boolean) --*
If the value is ``true`` , then the messages that Amazon Pinpoint sends from the identity are DKIM-signed. If the value is ``false`` , then the messages that Amazon Pinpoint sends from the identity aren't DKIM-signed.
- **Status** *(string) --*
Describes whether or not Amazon Pinpoint has successfully located the DKIM records in the DNS records for the domain. The status can be one of the following:
* ``PENDING`` – Amazon Pinpoint hasn't yet located the DKIM records in the DNS configuration for the domain, but will continue to attempt to locate them.
* ``SUCCESS`` – Amazon Pinpoint located the DKIM records in the DNS configuration for the domain and determined that they're correct. Amazon Pinpoint can now send DKIM-signed email from the identity.
* ``FAILED`` – Amazon Pinpoint was unable to locate the DKIM records in the DNS settings for the domain, and won't continue to search for them.
* ``TEMPORARY_FAILURE`` – A temporary issue occurred, which prevented Amazon Pinpoint from determining the DKIM status for the domain.
* ``NOT_STARTED`` – Amazon Pinpoint hasn't yet started searching for the DKIM records in the DKIM records for the domain.
- **Tokens** *(list) --*
A set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon Pinpoint detects these records in the DNS configuration for your domain, the DKIM authentication process is complete. Amazon Pinpoint usually detects these records within about 72 hours of adding them to the DNS configuration for your domain.
- *(string) --*
:type EmailIdentity: string
:param EmailIdentity: **[REQUIRED]**
The email address or domain that you want to verify.
:type Tags: list
:param Tags:
An object that defines the tags (keys and values) that you want to associate with the email identity.
- *(dict) --*
An object that defines the tags that are associated with a resource. A *tag* is a label that you optionally define and associate with a resource in Amazon Pinpoint. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.
Each tag consists of a required *tag key* and an associated *tag value* , both of which you define. A tag key is a general label that acts as a category for a more specific tag value. A tag value acts as a descriptor within a tag key. For example, if you have two versions of an Amazon Pinpoint project, one for internal testing and another for external use, you might assign a ``Stack`` tag key to both projects. The value of the ``Stack`` tag key might be ``Test`` for one project and ``Production`` for the other project.
A tag key can contain as many as 128 characters. A tag value can contain as many as 256 characters. The characters can be Unicode letters, digits, white space, or one of the following symbols: _ . : / = + -. The following additional restrictions apply to tags:
* Tag keys and values are case sensitive.
* For each associated resource, each tag key must be unique and it can have only one value.
* The ``aws:`` prefix is reserved for use by AWS; you can’t use it in any tag keys or values that you define. In addition, you can\'t edit or remove tag keys or values that use this prefix. Tags that use this prefix don’t count against the limit of 50 tags per resource.
* You can associate tags with public or shared resources, but the tags are available only for your AWS account, not any other accounts that share the resource. In addition, the tags are available only for resources that are located in the specified AWS Region for your AWS account.
- **Key** *(string) --* **[REQUIRED]**
One part of a key-value pair that defines a tag. The maximum length of a tag key is 128 characters. The minimum length is 1 character.
- **Value** *(string) --* **[REQUIRED]**
The optional part of a key-value pair that defines a tag. The maximum length of a tag value is 256 characters. The minimum length is 0 characters. If you don’t want a resource to have a specific tag value, don’t specify a value for this parameter. Amazon Pinpoint will set the value to an empty string.
:rtype: dict
:returns:
"""
pass
def delete_configuration_set(self, ConfigurationSetName: str) -> Dict:
"""
Delete an existing configuration set.
In Amazon Pinpoint, *configuration sets* are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/DeleteConfigurationSet>`_
**Request Syntax**
::
response = client.delete_configuration_set(
ConfigurationSetName='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
An HTTP 200 response if the request succeeds, or an error message if the request fails.
:type ConfigurationSetName: string
:param ConfigurationSetName: **[REQUIRED]**
The name of the configuration set that you want to delete.
:rtype: dict
:returns:
"""
pass
def delete_configuration_set_event_destination(self, ConfigurationSetName: str, EventDestinationName: str) -> Dict:
"""
Delete an event destination.
In Amazon Pinpoint, *events* include message sends, deliveries, opens, clicks, bounces, and complaints. *Event destinations* are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/DeleteConfigurationSetEventDestination>`_
**Request Syntax**
::
response = client.delete_configuration_set_event_destination(
ConfigurationSetName='string',
EventDestinationName='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
An HTTP 200 response if the request succeeds, or an error message if the request fails.
:type ConfigurationSetName: string
:param ConfigurationSetName: **[REQUIRED]**
The name of the configuration set that contains the event destination that you want to delete.
:type EventDestinationName: string
:param EventDestinationName: **[REQUIRED]**
The name of the event destination that you want to delete.
:rtype: dict
:returns:
"""
pass
def delete_dedicated_ip_pool(self, PoolName: str) -> Dict:
"""
Delete a dedicated IP pool.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/DeleteDedicatedIpPool>`_
**Request Syntax**
::
response = client.delete_dedicated_ip_pool(
PoolName='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
An HTTP 200 response if the request succeeds, or an error message if the request fails.
:type PoolName: string
:param PoolName: **[REQUIRED]**
The name of the dedicated IP pool that you want to delete.
:rtype: dict
:returns:
"""
pass
def delete_email_identity(self, EmailIdentity: str) -> Dict:
"""
Deletes an email identity that you previously verified for use with Amazon Pinpoint. An identity can be either an email address or a domain name.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/DeleteEmailIdentity>`_
**Request Syntax**
::
response = client.delete_email_identity(
EmailIdentity='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
An HTTP 200 response if the request succeeds, or an error message if the request fails.
:type EmailIdentity: string
:param EmailIdentity: **[REQUIRED]**
The identity (that is, the email address or domain) that you want to delete from your Amazon Pinpoint account.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_account(self) -> Dict:
"""
Obtain information about the email-sending status and capabilities of your Amazon Pinpoint account in the current AWS Region.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/GetAccount>`_
**Request Syntax**
::
response = client.get_account()
**Response Syntax**
::
{
'SendQuota': {
'Max24HourSend': 123.0,
'MaxSendRate': 123.0,
'SentLast24Hours': 123.0
},
'SendingEnabled': True|False,
'DedicatedIpAutoWarmupEnabled': True|False,
'EnforcementStatus': 'string',
'ProductionAccessEnabled': True|False
}
**Response Structure**
- *(dict) --*
A list of details about the email-sending capabilities of your Amazon Pinpoint account in the current AWS Region.
- **SendQuota** *(dict) --*
An object that contains information about the per-day and per-second sending limits for your Amazon Pinpoint account in the current AWS Region.
- **Max24HourSend** *(float) --*
The maximum number of emails that you can send in the current AWS Region over a 24-hour period. This value is also called your *sending quota* .
- **MaxSendRate** *(float) --*
The maximum number of emails that you can send per second in the current AWS Region. This value is also called your *maximum sending rate* or your *maximum TPS (transactions per second) rate* .
- **SentLast24Hours** *(float) --*
The number of emails sent from your Amazon Pinpoint account in the current AWS Region over the past 24 hours.
- **SendingEnabled** *(boolean) --*
Indicates whether or not email sending is enabled for your Amazon Pinpoint account in the current AWS Region.
- **DedicatedIpAutoWarmupEnabled** *(boolean) --*
Indicates whether or not the automatic warm-up feature is enabled for dedicated IP addresses that are associated with your account.
- **EnforcementStatus** *(string) --*
The reputation status of your Amazon Pinpoint account. The status can be one of the following:
* ``HEALTHY`` – There are no reputation-related issues that currently impact your account.
* ``PROBATION`` – We've identified some issues with your Amazon Pinpoint account. We're placing your account under review while you work on correcting these issues.
* ``SHUTDOWN`` – Your account's ability to send email is currently paused because of an issue with the email sent from your account. When you correct the issue, you can contact us and request that your account's ability to send email is resumed.
- **ProductionAccessEnabled** *(boolean) --*
Indicates whether or not your account has production access in the current AWS Region.
If the value is ``false`` , then your account is in the *sandbox* . When your account is in the sandbox, you can only send email to verified identities. Additionally, the maximum number of emails you can send in a 24-hour period (your sending quota) is 200, and the maximum number of emails you can send per second (your maximum sending rate) is 1.
If the value is ``true`` , then your account has production access. When your account has production access, you can send email to any address. The sending quota and maximum sending rate for your account vary based on your specific use case.
:rtype: dict
:returns:
"""
pass
def get_blacklist_reports(self, BlacklistItemNames: List) -> Dict:
"""
Retrieve a list of the blacklists that your dedicated IP addresses appear on.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/GetBlacklistReports>`_
**Request Syntax**
::
response = client.get_blacklist_reports(
BlacklistItemNames=[
'string',
]
)
**Response Syntax**
::
{
'BlacklistReport': {
'string': [
{
'RblName': 'string',
'ListingTime': datetime(2015, 1, 1),
'Description': 'string'
},
]
}
}
**Response Structure**
- *(dict) --*
An object that contains information about blacklist events.
- **BlacklistReport** *(dict) --*
An object that contains information about a blacklist that one of your dedicated IP addresses appears on.
- *(string) --*
An IP address that you want to obtain blacklist information for.
- *(list) --*
- *(dict) --*
An object that contains information about a blacklisting event that impacts one of the dedicated IP addresses that is associated with your account.
- **RblName** *(string) --*
The name of the blacklist that the IP address appears on.
- **ListingTime** *(datetime) --*
The time when the blacklisting event occurred, shown in Unix time format.
- **Description** *(string) --*
Additional information about the blacklisting event, as provided by the blacklist maintainer.
:type BlacklistItemNames: list
:param BlacklistItemNames: **[REQUIRED]**
A list of IP addresses that you want to retrieve blacklist information about. You can only specify the dedicated IP addresses that you use to send email using Amazon Pinpoint or Amazon SES.
- *(string) --*
An IP address that you want to obtain blacklist information for.
:rtype: dict
:returns:
"""
pass
def get_configuration_set(self, ConfigurationSetName: str) -> Dict:
"""
Get information about an existing configuration set, including the dedicated IP pool that it's associated with, whether or not it's enabled for sending email, and more.
In Amazon Pinpoint, *configuration sets* are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/GetConfigurationSet>`_
**Request Syntax**
::
response = client.get_configuration_set(
ConfigurationSetName='string'
)
**Response Syntax**
::
{
'ConfigurationSetName': 'string',
'TrackingOptions': {
'CustomRedirectDomain': 'string'
},
'DeliveryOptions': {
'SendingPoolName': 'string'
},
'ReputationOptions': {
'ReputationMetricsEnabled': True|False,
'LastFreshStart': datetime(2015, 1, 1)
},
'SendingOptions': {
'SendingEnabled': True|False
}
}
**Response Structure**
- *(dict) --*
Information about a configuration set.
- **ConfigurationSetName** *(string) --*
The name of the configuration set.
- **TrackingOptions** *(dict) --*
An object that defines the open and click tracking options for emails that you send using the configuration set.
- **CustomRedirectDomain** *(string) --*
The domain that you want to use for tracking open and click events.
- **DeliveryOptions** *(dict) --*
An object that defines the dedicated IP pool that is used to send emails that you send using the configuration set.
- **SendingPoolName** *(string) --*
The name of the dedicated IP pool that you want to associate with the configuration set.
- **ReputationOptions** *(dict) --*
An object that defines whether or not Amazon Pinpoint collects reputation metrics for the emails that you send that use the configuration set.
- **ReputationMetricsEnabled** *(boolean) --*
If ``true`` , tracking of reputation metrics is enabled for the configuration set. If ``false`` , tracking of reputation metrics is disabled for the configuration set.
- **LastFreshStart** *(datetime) --*
The date and time (in Unix time) when the reputation metrics were last given a fresh start. When your account is given a fresh start, your reputation metrics are calculated starting from the date of the fresh start.
- **SendingOptions** *(dict) --*
An object that defines whether or not Amazon Pinpoint can send email that you send using the configuration set.
- **SendingEnabled** *(boolean) --*
If ``true`` , email sending is enabled for the configuration set. If ``false`` , email sending is disabled for the configuration set.
:type ConfigurationSetName: string
:param ConfigurationSetName: **[REQUIRED]**
The name of the configuration set that you want to obtain more information about.
:rtype: dict
:returns:
"""
pass
def get_configuration_set_event_destinations(self, ConfigurationSetName: str) -> Dict:
"""
Retrieve a list of event destinations that are associated with a configuration set.
In Amazon Pinpoint, *events* include message sends, deliveries, opens, clicks, bounces, and complaints. *Event destinations* are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/GetConfigurationSetEventDestinations>`_
**Request Syntax**
::
response = client.get_configuration_set_event_destinations(
ConfigurationSetName='string'
)
**Response Syntax**
::
{
'EventDestinations': [
{
'Name': 'string',
'Enabled': True|False,
'MatchingEventTypes': [
'SEND'|'REJECT'|'BOUNCE'|'COMPLAINT'|'DELIVERY'|'OPEN'|'CLICK'|'RENDERING_FAILURE',
],
'KinesisFirehoseDestination': {
'IamRoleArn': 'string',
'DeliveryStreamArn': 'string'
},
'CloudWatchDestination': {
'DimensionConfigurations': [
{
'DimensionName': 'string',
'DimensionValueSource': 'MESSAGE_TAG'|'EMAIL_HEADER'|'LINK_TAG',
'DefaultDimensionValue': 'string'
},
]
},
'SnsDestination': {
'TopicArn': 'string'
},
'PinpointDestination': {
'ApplicationArn': 'string'
}
},
]
}
**Response Structure**
- *(dict) --*
Information about an event destination for a configuration set.
- **EventDestinations** *(list) --*
An array that includes all of the events destinations that have been configured for the configuration set.
- *(dict) --*
In Amazon Pinpoint, *events* include message sends, deliveries, opens, clicks, bounces, and complaints. *Event destinations* are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
- **Name** *(string) --*
A name that identifies the event destination.
- **Enabled** *(boolean) --*
If ``true`` , the event destination is enabled. When the event destination is enabled, the specified event types are sent to the destinations in this ``EventDestinationDefinition`` .
If ``false`` , the event destination is disabled. When the event destination is disabled, events aren't sent to the specified destinations.
- **MatchingEventTypes** *(list) --*
The types of events that Amazon Pinpoint sends to the specified event destinations.
- *(string) --*
An email sending event type. For example, email sends, opens, and bounces are all email events.
- **KinesisFirehoseDestination** *(dict) --*
An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.
- **IamRoleArn** *(string) --*
The Amazon Resource Name (ARN) of the IAM role that Amazon Pinpoint uses when sending email events to the Amazon Kinesis Data Firehose stream.
- **DeliveryStreamArn** *(string) --*
The Amazon Resource Name (ARN) of the Amazon Kinesis Data Firehose stream that Amazon Pinpoint sends email events to.
- **CloudWatchDestination** *(dict) --*
An object that defines an Amazon CloudWatch destination for email events. You can use Amazon CloudWatch to monitor and gain insights on your email sending metrics.
- **DimensionConfigurations** *(list) --*
An array of objects that define the dimensions to use when you send email events to Amazon CloudWatch.
- *(dict) --*
An object that defines the dimension configuration to use when you send Amazon Pinpoint email events to Amazon CloudWatch.
- **DimensionName** *(string) --*
The name of an Amazon CloudWatch dimension associated with an email sending metric. The name has to meet the following criteria:
* It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).
* It can contain no more than 256 characters.
- **DimensionValueSource** *(string) --*
The location where Amazon Pinpoint finds the value of a dimension to publish to Amazon CloudWatch. If you want Amazon Pinpoint to use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail/SendRawEmail API, choose ``messageTag`` . If you want Amazon Pinpoint to use your own email headers, choose ``emailHeader`` . If you want Amazon Pinpoint to use link tags, choose ``linkTags`` .
- **DefaultDimensionValue** *(string) --*
The default value of the dimension that is published to Amazon CloudWatch if you don't provide the value of the dimension when you send an email. This value has to meet the following criteria:
* It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).
* It can contain no more than 256 characters.
- **SnsDestination** *(dict) --*
An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.
- **TopicArn** *(string) --*
The Amazon Resource Name (ARN) of the Amazon SNS topic that you want to publish email events to. For more information about Amazon SNS topics, see the `Amazon SNS Developer Guide <https://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html>`__ .
- **PinpointDestination** *(dict) --*
An object that defines a Amazon Pinpoint destination for email events. You can use Amazon Pinpoint events to create attributes in Amazon Pinpoint projects. You can use these attributes to create segments for your campaigns.
- **ApplicationArn** *(string) --*
The Amazon Resource Name (ARN) of the Amazon Pinpoint project that you want to send email events to.
:type ConfigurationSetName: string
:param ConfigurationSetName: **[REQUIRED]**
The name of the configuration set that contains the event destination.
:rtype: dict
:returns:
"""
pass
def get_dedicated_ip(self, Ip: str) -> Dict:
"""
Get information about a dedicated IP address, including the name of the dedicated IP pool that it's associated with, as well information about the automatic warm-up process for the address.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/GetDedicatedIp>`_
**Request Syntax**
::
response = client.get_dedicated_ip(
Ip='string'
)
**Response Syntax**
::
{
'DedicatedIp': {
'Ip': 'string',
'WarmupStatus': 'IN_PROGRESS'|'DONE',
'WarmupPercentage': 123,
'PoolName': 'string'
}
}
**Response Structure**
- *(dict) --*
Information about a dedicated IP address.
- **DedicatedIp** *(dict) --*
An object that contains information about a dedicated IP address.
- **Ip** *(string) --*
An IP address that is reserved for use by your Amazon Pinpoint account.
- **WarmupStatus** *(string) --*
The warm-up status of a dedicated IP address. The status can have one of the following values:
* ``IN_PROGRESS`` – The IP address isn't ready to use because the dedicated IP warm-up process is ongoing.
* ``DONE`` – The dedicated IP warm-up process is complete, and the IP address is ready to use.
- **WarmupPercentage** *(integer) --*
Indicates how complete the dedicated IP warm-up process is. When this value equals 1, the address has completed the warm-up process and is ready for use.
- **PoolName** *(string) --*
The name of the dedicated IP pool that the IP address is associated with.
:type Ip: string
:param Ip: **[REQUIRED]**
The IP address that you want to obtain more information about. The value you specify has to be a dedicated IP address that\'s assocaited with your Amazon Pinpoint account.
:rtype: dict
:returns:
"""
pass
def get_dedicated_ips(self, PoolName: str = None, NextToken: str = None, PageSize: int = None) -> Dict:
"""
List the dedicated IP addresses that are associated with your Amazon Pinpoint account.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/GetDedicatedIps>`_
**Request Syntax**
::
response = client.get_dedicated_ips(
PoolName='string',
NextToken='string',
PageSize=123
)
**Response Syntax**
::
{
'DedicatedIps': [
{
'Ip': 'string',
'WarmupStatus': 'IN_PROGRESS'|'DONE',
'WarmupPercentage': 123,
'PoolName': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
Information about the dedicated IP addresses that are associated with your Amazon Pinpoint account.
- **DedicatedIps** *(list) --*
A list of dedicated IP addresses that are reserved for use by your Amazon Pinpoint account.
- *(dict) --*
Contains information about a dedicated IP address that is associated with your Amazon Pinpoint account.
- **Ip** *(string) --*
An IP address that is reserved for use by your Amazon Pinpoint account.
- **WarmupStatus** *(string) --*
The warm-up status of a dedicated IP address. The status can have one of the following values:
* ``IN_PROGRESS`` – The IP address isn't ready to use because the dedicated IP warm-up process is ongoing.
* ``DONE`` – The dedicated IP warm-up process is complete, and the IP address is ready to use.
- **WarmupPercentage** *(integer) --*
Indicates how complete the dedicated IP warm-up process is. When this value equals 1, the address has completed the warm-up process and is ready for use.
- **PoolName** *(string) --*
The name of the dedicated IP pool that the IP address is associated with.
- **NextToken** *(string) --*
A token that indicates that there are additional dedicated IP addresses to list. To view additional addresses, issue another request to ``GetDedicatedIps`` , passing this token in the ``NextToken`` parameter.
:type PoolName: string
:param PoolName:
The name of the IP pool that the dedicated IP address is associated with.
:type NextToken: string
:param NextToken:
A token returned from a previous call to ``GetDedicatedIps`` to indicate the position of the dedicated IP pool in the list of IP pools.
:type PageSize: integer
:param PageSize:
The number of results to show in a single call to ``GetDedicatedIpsRequest`` . If the number of results is larger than the number you specified in this parameter, then the response includes a ``NextToken`` element, which you can use to obtain additional results.
:rtype: dict
:returns:
"""
pass
def get_deliverability_dashboard_options(self) -> Dict:
"""
Show the status of the Deliverability dashboard. When the Deliverability dashboard is enabled, you gain access to reputation metrics for the domains that you use to send email using Amazon Pinpoint. You also gain the ability to perform predictive inbox placement tests.
When you use the Deliverability dashboard, you pay a monthly charge of USD$1,250.00, in addition to any other fees that you accrue by using Amazon Pinpoint. If you enable the Deliverability dashboard after the first day of a calendar month, AWS prorates the monthly charge based on how many days have elapsed in the current calendar month.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/GetDeliverabilityDashboardOptions>`_
**Request Syntax**
::
response = client.get_deliverability_dashboard_options()
**Response Syntax**
::
{
'DashboardEnabled': True|False
}
**Response Structure**
- *(dict) --*
An object that shows the status of the Deliverability dashboard for your Amazon Pinpoint account.
- **DashboardEnabled** *(boolean) --*
Indicates whether the Deliverability dashboard is enabled. If the value is ``true`` , then the dashboard is enabled.
:rtype: dict
:returns:
"""
pass
def get_deliverability_test_report(self, ReportId: str) -> Dict:
"""
Retrieve the results of a predictive inbox placement test.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/GetDeliverabilityTestReport>`_
**Request Syntax**
::
response = client.get_deliverability_test_report(
ReportId='string'
)
**Response Syntax**
::
{
'DeliverabilityTestReport': {
'ReportId': 'string',
'ReportName': 'string',
'Subject': 'string',
'FromEmailAddress': 'string',
'CreateDate': datetime(2015, 1, 1),
'DeliverabilityTestStatus': 'IN_PROGRESS'|'COMPLETED'
},
'OverallPlacement': {
'InboxPercentage': 123.0,
'SpamPercentage': 123.0,
'MissingPercentage': 123.0,
'SpfPercentage': 123.0,
'DkimPercentage': 123.0
},
'IspPlacements': [
{
'IspName': 'string',
'PlacementStatistics': {
'InboxPercentage': 123.0,
'SpamPercentage': 123.0,
'MissingPercentage': 123.0,
'SpfPercentage': 123.0,
'DkimPercentage': 123.0
}
},
],
'Message': 'string'
}
**Response Structure**
- *(dict) --*
The results of the predictive inbox placement test.
- **DeliverabilityTestReport** *(dict) --*
An object that contains the results of the predictive inbox placement test.
- **ReportId** *(string) --*
A unique string that identifies the predictive inbox placement test.
- **ReportName** *(string) --*
A name that helps you identify a predictive inbox placement test report.
- **Subject** *(string) --*
The subject line for an email that you submitted in a predictive inbox placement test.
- **FromEmailAddress** *(string) --*
The sender address that you specified for the predictive inbox placement test.
- **CreateDate** *(datetime) --*
The date and time when the predictive inbox placement test was created, in Unix time format.
- **DeliverabilityTestStatus** *(string) --*
The status of the predictive inbox placement test. If the status is ``IN_PROGRESS`` , then the predictive inbox placement test is currently running. Predictive inbox placement tests are usually complete within 24 hours of creating the test. If the status is ``COMPLETE`` , then the test is finished, and you can use the ``GetDeliverabilityTestReport`` to view the results of the test.
- **OverallPlacement** *(dict) --*
An object that specifies how many test messages that were sent during the predictive inbox placement test were delivered to recipients' inboxes, how many were sent to recipients' spam folders, and how many weren't delivered.
- **InboxPercentage** *(float) --*
The percentage of emails that arrived in recipients' inboxes during the predictive inbox placement test.
- **SpamPercentage** *(float) --*
The percentage of emails that arrived in recipients' spam or junk mail folders during the predictive inbox placement test.
- **MissingPercentage** *(float) --*
The percentage of emails that didn't arrive in recipients' inboxes at all during the predictive inbox placement test.
- **SpfPercentage** *(float) --*
The percentage of emails that were authenticated by using Sender Policy Framework (SPF) during the predictive inbox placement test.
- **DkimPercentage** *(float) --*
The percentage of emails that were authenticated by using DomainKeys Identified Mail (DKIM) during the predictive inbox placement test.
- **IspPlacements** *(list) --*
An object that describes how the test email was handled by several email providers, including Gmail, Hotmail, Yahoo, AOL, and others.
- *(dict) --*
An object that describes how email sent during the predictive inbox placement test was handled by a certain email provider.
- **IspName** *(string) --*
The name of the email provider that the inbox placement data applies to.
- **PlacementStatistics** *(dict) --*
An object that contains inbox placement metrics for a specific email provider.
- **InboxPercentage** *(float) --*
The percentage of emails that arrived in recipients' inboxes during the predictive inbox placement test.
- **SpamPercentage** *(float) --*
The percentage of emails that arrived in recipients' spam or junk mail folders during the predictive inbox placement test.
- **MissingPercentage** *(float) --*
The percentage of emails that didn't arrive in recipients' inboxes at all during the predictive inbox placement test.
- **SpfPercentage** *(float) --*
The percentage of emails that were authenticated by using Sender Policy Framework (SPF) during the predictive inbox placement test.
- **DkimPercentage** *(float) --*
The percentage of emails that were authenticated by using DomainKeys Identified Mail (DKIM) during the predictive inbox placement test.
- **Message** *(string) --*
An object that contains the message that you sent when you performed this predictive inbox placement test.
:type ReportId: string
:param ReportId: **[REQUIRED]**
A unique string that identifies the predictive inbox placement test.
:rtype: dict
:returns:
"""
pass
def get_domain_statistics_report(self, Domain: str, StartDate: datetime, EndDate: datetime) -> Dict:
"""
Retrieve inbox placement and engagement rates for the domains that you use to send email.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/GetDomainStatisticsReport>`_
**Request Syntax**
::
response = client.get_domain_statistics_report(
Domain='string',
StartDate=datetime(2015, 1, 1),
EndDate=datetime(2015, 1, 1)
)
**Response Syntax**
::
{
'OverallVolume': {
'VolumeStatistics': {
'InboxRawCount': 123,
'SpamRawCount': 123,
'ProjectedInbox': 123,
'ProjectedSpam': 123
},
'ReadRatePercent': 123.0,
'DomainIspPlacements': [
{
'IspName': 'string',
'InboxRawCount': 123,
'SpamRawCount': 123,
'InboxPercentage': 123.0,
'SpamPercentage': 123.0
},
]
},
'DailyVolumes': [
{
'StartDate': datetime(2015, 1, 1),
'VolumeStatistics': {
'InboxRawCount': 123,
'SpamRawCount': 123,
'ProjectedInbox': 123,
'ProjectedSpam': 123
},
'DomainIspPlacements': [
{
'IspName': 'string',
'InboxRawCount': 123,
'SpamRawCount': 123,
'InboxPercentage': 123.0,
'SpamPercentage': 123.0
},
]
},
]
}
**Response Structure**
- *(dict) --*
An object that includes statistics that are related to the domain that you specified.
- **OverallVolume** *(dict) --*
An object that contains deliverability metrics for the domain that you specified. The data in this object is a summary of all of the data that was collected from the ``StartDate`` to the ``EndDate`` .
- **VolumeStatistics** *(dict) --*
An object that contains information about the numbers of messages that arrived in recipients' inboxes and junk mail folders.
- **InboxRawCount** *(integer) --*
The total number of emails that arrived in recipients' inboxes.
- **SpamRawCount** *(integer) --*
The total number of emails that arrived in recipients' spam or junk mail folders.
- **ProjectedInbox** *(integer) --*
An estimate of the percentage of emails sent from the current domain that will arrive in recipients' inboxes.
- **ProjectedSpam** *(integer) --*
An estimate of the percentage of emails sent from the current domain that will arrive in recipients' spam or junk mail folders.
- **ReadRatePercent** *(float) --*
The percentage of emails that were sent from the domain that were read by their recipients.
- **DomainIspPlacements** *(list) --*
An object that contains inbox and junk mail placement metrics for individual email providers.
- *(dict) --*
An object that contains inbox placement data for email sent from one of your email domains to a specific email provider.
- **IspName** *(string) --*
The name of the email provider that the inbox placement data applies to.
- **InboxRawCount** *(integer) --*
The total number of messages that were sent from the selected domain to the specified email provider that arrived in recipients' inboxes.
- **SpamRawCount** *(integer) --*
The total number of messages that were sent from the selected domain to the specified email provider that arrived in recipients' spam or junk mail folders.
- **InboxPercentage** *(float) --*
The percentage of messages that were sent from the selected domain to the specified email provider that arrived in recipients' inboxes.
- **SpamPercentage** *(float) --*
The percentage of messages that were sent from the selected domain to the specified email provider that arrived in recipients' spam or junk mail folders.
- **DailyVolumes** *(list) --*
An object that contains deliverability metrics for the domain that you specified. This object contains data for each day, starting on the ``StartDate`` and ending on the ``EndDate`` .
- *(dict) --*
An object that contains information about the volume of email sent on each day of the analysis period.
- **StartDate** *(datetime) --*
The date that the DailyVolume metrics apply to, in Unix time.
- **VolumeStatistics** *(dict) --*
An object that contains inbox placement metrics for a specific day in the analysis period.
- **InboxRawCount** *(integer) --*
The total number of emails that arrived in recipients' inboxes.
- **SpamRawCount** *(integer) --*
The total number of emails that arrived in recipients' spam or junk mail folders.
- **ProjectedInbox** *(integer) --*
An estimate of the percentage of emails sent from the current domain that will arrive in recipients' inboxes.
- **ProjectedSpam** *(integer) --*
An estimate of the percentage of emails sent from the current domain that will arrive in recipients' spam or junk mail folders.
- **DomainIspPlacements** *(list) --*
An object that contains inbox placement metrics for a specifid day in the analysis period, broken out by the recipient's email provider.
- *(dict) --*
An object that contains inbox placement data for email sent from one of your email domains to a specific email provider.
- **IspName** *(string) --*
The name of the email provider that the inbox placement data applies to.
- **InboxRawCount** *(integer) --*
The total number of messages that were sent from the selected domain to the specified email provider that arrived in recipients' inboxes.
- **SpamRawCount** *(integer) --*
The total number of messages that were sent from the selected domain to the specified email provider that arrived in recipients' spam or junk mail folders.
- **InboxPercentage** *(float) --*
The percentage of messages that were sent from the selected domain to the specified email provider that arrived in recipients' inboxes.
- **SpamPercentage** *(float) --*
The percentage of messages that were sent from the selected domain to the specified email provider that arrived in recipients' spam or junk mail folders.
:type Domain: string
:param Domain: **[REQUIRED]**
The domain that you want to obtain deliverability metrics for.
:type StartDate: datetime
:param StartDate: **[REQUIRED]**
The first day (in Unix time) that you want to obtain domain deliverability metrics for.
:type EndDate: datetime
:param EndDate: **[REQUIRED]**
The last day (in Unix time) that you want to obtain domain deliverability metrics for. The ``EndDate`` that you specify has to be less than or equal to 30 days after the ``StartDate`` .
:rtype: dict
:returns:
"""
pass
def get_email_identity(self, EmailIdentity: str) -> Dict:
"""
Provides information about a specific identity associated with your Amazon Pinpoint account, including the identity's verification status, its DKIM authentication status, and its custom Mail-From settings.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/GetEmailIdentity>`_
**Request Syntax**
::
response = client.get_email_identity(
EmailIdentity='string'
)
**Response Syntax**
::
{
'IdentityType': 'EMAIL_ADDRESS'|'DOMAIN'|'MANAGED_DOMAIN',
'FeedbackForwardingStatus': True|False,
'VerifiedForSendingStatus': True|False,
'DkimAttributes': {
'SigningEnabled': True|False,
'Status': 'PENDING'|'SUCCESS'|'FAILED'|'TEMPORARY_FAILURE'|'NOT_STARTED',
'Tokens': [
'string',
]
},
'MailFromAttributes': {
'MailFromDomain': 'string',
'MailFromDomainStatus': 'PENDING'|'SUCCESS'|'FAILED'|'TEMPORARY_FAILURE',
'BehaviorOnMxFailure': 'USE_DEFAULT_VALUE'|'REJECT_MESSAGE'
}
}
**Response Structure**
- *(dict) --*
Details about an email identity.
- **IdentityType** *(string) --*
The email identity type.
- **FeedbackForwardingStatus** *(boolean) --*
The feedback forwarding configuration for the identity.
If the value is ``true`` , Amazon Pinpoint sends you email notifications when bounce or complaint events occur. Amazon Pinpoint sends this notification to the address that you specified in the Return-Path header of the original email.
When you set this value to ``false`` , Amazon Pinpoint sends notifications through other mechanisms, such as by notifying an Amazon SNS topic or another event destination. You're required to have a method of tracking bounces and complaints. If you haven't set up another mechanism for receiving bounce or complaint notifications, Amazon Pinpoint sends an email notification when these events occur (even if this setting is disabled).
- **VerifiedForSendingStatus** *(boolean) --*
Specifies whether or not the identity is verified. In Amazon Pinpoint, you can only send email from verified email addresses or domains. For more information about verifying identities, see the `Amazon Pinpoint User Guide <https://docs.aws.amazon.com/pinpoint/latest/userguide/channels-email-manage-verify.html>`__ .
- **DkimAttributes** *(dict) --*
An object that contains information about the DKIM attributes for the identity. This object includes the tokens that you use to create the CNAME records that are required to complete the DKIM verification process.
- **SigningEnabled** *(boolean) --*
If the value is ``true`` , then the messages that Amazon Pinpoint sends from the identity are DKIM-signed. If the value is ``false`` , then the messages that Amazon Pinpoint sends from the identity aren't DKIM-signed.
- **Status** *(string) --*
Describes whether or not Amazon Pinpoint has successfully located the DKIM records in the DNS records for the domain. The status can be one of the following:
* ``PENDING`` – Amazon Pinpoint hasn't yet located the DKIM records in the DNS configuration for the domain, but will continue to attempt to locate them.
* ``SUCCESS`` – Amazon Pinpoint located the DKIM records in the DNS configuration for the domain and determined that they're correct. Amazon Pinpoint can now send DKIM-signed email from the identity.
* ``FAILED`` – Amazon Pinpoint was unable to locate the DKIM records in the DNS settings for the domain, and won't continue to search for them.
* ``TEMPORARY_FAILURE`` – A temporary issue occurred, which prevented Amazon Pinpoint from determining the DKIM status for the domain.
* ``NOT_STARTED`` – Amazon Pinpoint hasn't yet started searching for the DKIM records in the DKIM records for the domain.
- **Tokens** *(list) --*
A set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon Pinpoint detects these records in the DNS configuration for your domain, the DKIM authentication process is complete. Amazon Pinpoint usually detects these records within about 72 hours of adding them to the DNS configuration for your domain.
- *(string) --*
- **MailFromAttributes** *(dict) --*
An object that contains information about the Mail-From attributes for the email identity.
- **MailFromDomain** *(string) --*
The name of a domain that an email identity uses as a custom MAIL FROM domain.
- **MailFromDomainStatus** *(string) --*
The status of the MAIL FROM domain. This status can have the following values:
* ``PENDING`` – Amazon Pinpoint hasn't started searching for the MX record yet.
* ``SUCCESS`` – Amazon Pinpoint detected the required MX record for the MAIL FROM domain.
* ``FAILED`` – Amazon Pinpoint can't find the required MX record, or the record no longer exists.
* ``TEMPORARY_FAILURE`` – A temporary issue occurred, which prevented Amazon Pinpoint from determining the status of the MAIL FROM domain.
- **BehaviorOnMxFailure** *(string) --*
The action that Amazon Pinpoint to takes if it can't read the required MX record for a custom MAIL FROM domain. When you set this value to ``UseDefaultValue`` , Amazon Pinpoint uses *amazonses.com* as the MAIL FROM domain. When you set this value to ``RejectMessage`` , Amazon Pinpoint returns a ``MailFromDomainNotVerified`` error, and doesn't attempt to deliver the email.
These behaviors are taken when the custom MAIL FROM domain configuration is in the ``Pending`` , ``Failed`` , and ``TemporaryFailure`` states.
:type EmailIdentity: string
:param EmailIdentity: **[REQUIRED]**
The email identity that you want to retrieve details for.
:rtype: dict
:returns:
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def list_configuration_sets(self, NextToken: str = None, PageSize: int = None) -> Dict:
"""
List all of the configuration sets associated with your Amazon Pinpoint account in the current region.
In Amazon Pinpoint, *configuration sets* are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/ListConfigurationSets>`_
**Request Syntax**
::
response = client.list_configuration_sets(
NextToken='string',
PageSize=123
)
**Response Syntax**
::
{
'ConfigurationSets': [
'string',
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
A list of configuration sets in your Amazon Pinpoint account in the current AWS Region.
- **ConfigurationSets** *(list) --*
An array that contains all of the configuration sets in your Amazon Pinpoint account in the current AWS Region.
- *(string) --*
The name of a configuration set.
In Amazon Pinpoint, *configuration sets* are groups of rules that you can apply to the emails you send. You apply a configuration set to an email by including a reference to the configuration set in the headers of the email. When you apply a configuration set to an email, all of the rules in that configuration set are applied to the email.
- **NextToken** *(string) --*
A token that indicates that there are additional configuration sets to list. To view additional configuration sets, issue another request to ``ListConfigurationSets`` , and pass this token in the ``NextToken`` parameter.
:type NextToken: string
:param NextToken:
A token returned from a previous call to ``ListConfigurationSets`` to indicate the position in the list of configuration sets.
:type PageSize: integer
:param PageSize:
The number of results to show in a single call to ``ListConfigurationSets`` . If the number of results is larger than the number you specified in this parameter, then the response includes a ``NextToken`` element, which you can use to obtain additional results.
:rtype: dict
:returns:
"""
pass
def list_dedicated_ip_pools(self, NextToken: str = None, PageSize: int = None) -> Dict:
"""
List all of the dedicated IP pools that exist in your Amazon Pinpoint account in the current AWS Region.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/ListDedicatedIpPools>`_
**Request Syntax**
::
response = client.list_dedicated_ip_pools(
NextToken='string',
PageSize=123
)
**Response Syntax**
::
{
'DedicatedIpPools': [
'string',
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
A list of dedicated IP pools.
- **DedicatedIpPools** *(list) --*
A list of all of the dedicated IP pools that are associated with your Amazon Pinpoint account.
- *(string) --*
The name of a dedicated IP pool.
- **NextToken** *(string) --*
A token that indicates that there are additional IP pools to list. To view additional IP pools, issue another request to ``ListDedicatedIpPools`` , passing this token in the ``NextToken`` parameter.
:type NextToken: string
:param NextToken:
A token returned from a previous call to ``ListDedicatedIpPools`` to indicate the position in the list of dedicated IP pools.
:type PageSize: integer
:param PageSize:
The number of results to show in a single call to ``ListDedicatedIpPools`` . If the number of results is larger than the number you specified in this parameter, then the response includes a ``NextToken`` element, which you can use to obtain additional results.
:rtype: dict
:returns:
"""
pass
def list_deliverability_test_reports(self, NextToken: str = None, PageSize: int = None) -> Dict:
"""
Show a list of the predictive inbox placement tests that you've performed, regardless of their statuses. For predictive inbox placement tests that are complete, you can use the ``GetDeliverabilityTestReport`` operation to view the results.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/ListDeliverabilityTestReports>`_
**Request Syntax**
::
response = client.list_deliverability_test_reports(
NextToken='string',
PageSize=123
)
**Response Syntax**
::
{
'DeliverabilityTestReports': [
{
'ReportId': 'string',
'ReportName': 'string',
'Subject': 'string',
'FromEmailAddress': 'string',
'CreateDate': datetime(2015, 1, 1),
'DeliverabilityTestStatus': 'IN_PROGRESS'|'COMPLETED'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
A list of the predictive inbox placement test reports that are available for your account, regardless of whether or not those tests are complete.
- **DeliverabilityTestReports** *(list) --*
An object that contains a lists of predictive inbox placement tests that you've performed.
- *(dict) --*
An object that contains metadata related to a predictive inbox placement test.
- **ReportId** *(string) --*
A unique string that identifies the predictive inbox placement test.
- **ReportName** *(string) --*
A name that helps you identify a predictive inbox placement test report.
- **Subject** *(string) --*
The subject line for an email that you submitted in a predictive inbox placement test.
- **FromEmailAddress** *(string) --*
The sender address that you specified for the predictive inbox placement test.
- **CreateDate** *(datetime) --*
The date and time when the predictive inbox placement test was created, in Unix time format.
- **DeliverabilityTestStatus** *(string) --*
The status of the predictive inbox placement test. If the status is ``IN_PROGRESS`` , then the predictive inbox placement test is currently running. Predictive inbox placement tests are usually complete within 24 hours of creating the test. If the status is ``COMPLETE`` , then the test is finished, and you can use the ``GetDeliverabilityTestReport`` to view the results of the test.
- **NextToken** *(string) --*
A token that indicates that there are additional predictive inbox placement tests to list. To view additional predictive inbox placement tests, issue another request to ``ListDeliverabilityTestReports`` , and pass this token in the ``NextToken`` parameter.
:type NextToken: string
:param NextToken:
A token returned from a previous call to ``ListDeliverabilityTestReports`` to indicate the position in the list of predictive inbox placement tests.
:type PageSize: integer
:param PageSize:
The number of results to show in a single call to ``ListDeliverabilityTestReports`` . If the number of results is larger than the number you specified in this parameter, then the response includes a ``NextToken`` element, which you can use to obtain additional results.
The value you specify has to be at least 0, and can be no more than 1000.
:rtype: dict
:returns:
"""
pass
def list_email_identities(self, NextToken: str = None, PageSize: int = None) -> Dict:
"""
Returns a list of all of the email identities that are associated with your Amazon Pinpoint account. An identity can be either an email address or a domain. This operation returns identities that are verified as well as those that aren't.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/ListEmailIdentities>`_
**Request Syntax**
::
response = client.list_email_identities(
NextToken='string',
PageSize=123
)
**Response Syntax**
::
{
'EmailIdentities': [
{
'IdentityType': 'EMAIL_ADDRESS'|'DOMAIN'|'MANAGED_DOMAIN',
'IdentityName': 'string',
'SendingEnabled': True|False
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
A list of all of the identities that you've attempted to verify for use with Amazon Pinpoint, regardless of whether or not those identities were successfully verified.
- **EmailIdentities** *(list) --*
An array that includes all of the identities associated with your Amazon Pinpoint account.
- *(dict) --*
Information about an email identity.
- **IdentityType** *(string) --*
The email identity type. The identity type can be one of the following:
* ``EMAIL_ADDRESS`` – The identity is an email address.
* ``DOMAIN`` – The identity is a domain.
* ``MANAGED_DOMAIN`` – The identity is a domain that is managed by AWS.
- **IdentityName** *(string) --*
The address or domain of the identity.
- **SendingEnabled** *(boolean) --*
Indicates whether or not you can send email from the identity.
In Amazon Pinpoint, an identity is an email address or domain that you send email from. Before you can send email from an identity, you have to demostrate that you own the identity, and that you authorize Amazon Pinpoint to send email from that identity.
- **NextToken** *(string) --*
A token that indicates that there are additional configuration sets to list. To view additional configuration sets, issue another request to ``ListEmailIdentities`` , and pass this token in the ``NextToken`` parameter.
:type NextToken: string
:param NextToken:
A token returned from a previous call to ``ListEmailIdentities`` to indicate the position in the list of identities.
:type PageSize: integer
:param PageSize:
The number of results to show in a single call to ``ListEmailIdentities`` . If the number of results is larger than the number you specified in this parameter, then the response includes a ``NextToken`` element, which you can use to obtain additional results.
The value you specify has to be at least 0, and can be no more than 1000.
:rtype: dict
:returns:
"""
pass
def list_tags_for_resource(self, ResourceArn: str) -> Dict:
"""
Retrieve a list of the tags (keys and values) that are associated with a specific resource. A *tag* is a label that you optionally define and associate with a resource in Amazon Pinpoint. Each tag consists of a required *tag key* and an optional associated *tag value* . A tag key is a general label that acts as a category for more specific tag values. A tag value acts as a descriptor within a tag key.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/ListTagsForResource>`_
**Request Syntax**
::
response = client.list_tags_for_resource(
ResourceArn='string'
)
**Response Syntax**
::
{
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
}
**Response Structure**
- *(dict) --*
- **Tags** *(list) --*
An array that lists all the tags that are associated with the resource. Each tag consists of a required tag key (``Key`` ) and an associated tag value (``Value`` )
- *(dict) --*
An object that defines the tags that are associated with a resource. A *tag* is a label that you optionally define and associate with a resource in Amazon Pinpoint. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.
Each tag consists of a required *tag key* and an associated *tag value* , both of which you define. A tag key is a general label that acts as a category for a more specific tag value. A tag value acts as a descriptor within a tag key. For example, if you have two versions of an Amazon Pinpoint project, one for internal testing and another for external use, you might assign a ``Stack`` tag key to both projects. The value of the ``Stack`` tag key might be ``Test`` for one project and ``Production`` for the other project.
A tag key can contain as many as 128 characters. A tag value can contain as many as 256 characters. The characters can be Unicode letters, digits, white space, or one of the following symbols: _ . : / = + -. The following additional restrictions apply to tags:
* Tag keys and values are case sensitive.
* For each associated resource, each tag key must be unique and it can have only one value.
* The ``aws:`` prefix is reserved for use by AWS; you can’t use it in any tag keys or values that you define. In addition, you can't edit or remove tag keys or values that use this prefix. Tags that use this prefix don’t count against the limit of 50 tags per resource.
* You can associate tags with public or shared resources, but the tags are available only for your AWS account, not any other accounts that share the resource. In addition, the tags are available only for resources that are located in the specified AWS Region for your AWS account.
- **Key** *(string) --*
One part of a key-value pair that defines a tag. The maximum length of a tag key is 128 characters. The minimum length is 1 character.
- **Value** *(string) --*
The optional part of a key-value pair that defines a tag. The maximum length of a tag value is 256 characters. The minimum length is 0 characters. If you don’t want a resource to have a specific tag value, don’t specify a value for this parameter. Amazon Pinpoint will set the value to an empty string.
:type ResourceArn: string
:param ResourceArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the resource that you want to retrieve tag information for.
:rtype: dict
:returns:
"""
pass
def put_account_dedicated_ip_warmup_attributes(self, AutoWarmupEnabled: bool = None) -> Dict:
"""
Enable or disable the automatic warm-up feature for dedicated IP addresses.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/PutAccountDedicatedIpWarmupAttributes>`_
**Request Syntax**
::
response = client.put_account_dedicated_ip_warmup_attributes(
AutoWarmupEnabled=True|False
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
An HTTP 200 response if the request succeeds, or an error message if the request fails.
:type AutoWarmupEnabled: boolean
:param AutoWarmupEnabled:
Enables or disables the automatic warm-up feature for dedicated IP addresses that are associated with your Amazon Pinpoint account in the current AWS Region. Set to ``true`` to enable the automatic warm-up feature, or set to ``false`` to disable it.
:rtype: dict
:returns:
"""
pass
def put_account_sending_attributes(self, SendingEnabled: bool = None) -> Dict:
"""
Enable or disable the ability of your account to send email.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/PutAccountSendingAttributes>`_
**Request Syntax**
::
response = client.put_account_sending_attributes(
SendingEnabled=True|False
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
An HTTP 200 response if the request succeeds, or an error message if the request fails.
:type SendingEnabled: boolean
:param SendingEnabled:
Enables or disables your account\'s ability to send email. Set to ``true`` to enable email sending, or set to ``false`` to disable email sending.
.. note::
If AWS paused your account\'s ability to send email, you can\'t use this operation to resume your account\'s ability to send email.
:rtype: dict
:returns:
"""
pass
def put_configuration_set_delivery_options(self, ConfigurationSetName: str, SendingPoolName: str = None) -> Dict:
"""
Associate a configuration set with a dedicated IP pool. You can use dedicated IP pools to create groups of dedicated IP addresses for sending specific types of email.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/PutConfigurationSetDeliveryOptions>`_
**Request Syntax**
::
response = client.put_configuration_set_delivery_options(
ConfigurationSetName='string',
SendingPoolName='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
An HTTP 200 response if the request succeeds, or an error message if the request fails.
:type ConfigurationSetName: string
:param ConfigurationSetName: **[REQUIRED]**
The name of the configuration set that you want to associate with a dedicated IP pool.
:type SendingPoolName: string
:param SendingPoolName:
The name of the dedicated IP pool that you want to associate with the configuration set.
:rtype: dict
:returns:
"""
pass
def put_configuration_set_reputation_options(self, ConfigurationSetName: str, ReputationMetricsEnabled: bool = None) -> Dict:
"""
Enable or disable collection of reputation metrics for emails that you send using a particular configuration set in a specific AWS Region.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/PutConfigurationSetReputationOptions>`_
**Request Syntax**
::
response = client.put_configuration_set_reputation_options(
ConfigurationSetName='string',
ReputationMetricsEnabled=True|False
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
An HTTP 200 response if the request succeeds, or an error message if the request fails.
:type ConfigurationSetName: string
:param ConfigurationSetName: **[REQUIRED]**
The name of the configuration set that you want to enable or disable reputation metric tracking for.
:type ReputationMetricsEnabled: boolean
:param ReputationMetricsEnabled:
If ``true`` , tracking of reputation metrics is enabled for the configuration set. If ``false`` , tracking of reputation metrics is disabled for the configuration set.
:rtype: dict
:returns:
"""
pass
def put_configuration_set_sending_options(self, ConfigurationSetName: str, SendingEnabled: bool = None) -> Dict:
"""
Enable or disable email sending for messages that use a particular configuration set in a specific AWS Region.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/PutConfigurationSetSendingOptions>`_
**Request Syntax**
::
response = client.put_configuration_set_sending_options(
ConfigurationSetName='string',
SendingEnabled=True|False
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
An HTTP 200 response if the request succeeds, or an error message if the request fails.
:type ConfigurationSetName: string
:param ConfigurationSetName: **[REQUIRED]**
The name of the configuration set that you want to enable or disable email sending for.
:type SendingEnabled: boolean
:param SendingEnabled:
If ``true`` , email sending is enabled for the configuration set. If ``false`` , email sending is disabled for the configuration set.
:rtype: dict
:returns:
"""
pass
def put_configuration_set_tracking_options(self, ConfigurationSetName: str, CustomRedirectDomain: str = None) -> Dict:
"""
Specify a custom domain to use for open and click tracking elements in email that you send using Amazon Pinpoint.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/PutConfigurationSetTrackingOptions>`_
**Request Syntax**
::
response = client.put_configuration_set_tracking_options(
ConfigurationSetName='string',
CustomRedirectDomain='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
An HTTP 200 response if the request succeeds, or an error message if the request fails.
:type ConfigurationSetName: string
:param ConfigurationSetName: **[REQUIRED]**
The name of the configuration set that you want to add a custom tracking domain to.
:type CustomRedirectDomain: string
:param CustomRedirectDomain:
The domain that you want to use to track open and click events.
:rtype: dict
:returns:
"""
pass
def put_dedicated_ip_in_pool(self, Ip: str, DestinationPoolName: str) -> Dict:
"""
Move a dedicated IP address to an existing dedicated IP pool.
.. note::
The dedicated IP address that you specify must already exist, and must be associated with your Amazon Pinpoint account.
The dedicated IP pool you specify must already exist. You can create a new pool by using the ``CreateDedicatedIpPool`` operation.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/PutDedicatedIpInPool>`_
**Request Syntax**
::
response = client.put_dedicated_ip_in_pool(
Ip='string',
DestinationPoolName='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
An HTTP 200 response if the request succeeds, or an error message if the request fails.
:type Ip: string
:param Ip: **[REQUIRED]**
The IP address that you want to move to the dedicated IP pool. The value you specify has to be a dedicated IP address that\'s associated with your Amazon Pinpoint account.
:type DestinationPoolName: string
:param DestinationPoolName: **[REQUIRED]**
The name of the IP pool that you want to add the dedicated IP address to. You have to specify an IP pool that already exists.
:rtype: dict
:returns:
"""
pass
def put_dedicated_ip_warmup_attributes(self, Ip: str, WarmupPercentage: int) -> Dict:
"""
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/PutDedicatedIpWarmupAttributes>`_
**Request Syntax**
::
response = client.put_dedicated_ip_warmup_attributes(
Ip='string',
WarmupPercentage=123
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
An HTTP 200 response if the request succeeds, or an error message if the request fails.
:type Ip: string
:param Ip: **[REQUIRED]**
The dedicated IP address that you want to update the warm-up attributes for.
:type WarmupPercentage: integer
:param WarmupPercentage: **[REQUIRED]**
The warm-up percentage that you want to associate with the dedicated IP address.
:rtype: dict
:returns:
"""
pass
def put_deliverability_dashboard_option(self, DashboardEnabled: bool) -> Dict:
"""
Enable or disable the Deliverability dashboard. When you enable the Deliverability dashboard, you gain access to reputation metrics for the domains that you use to send email using Amazon Pinpoint. You also gain the ability to perform predictive inbox placement tests.
When you use the Deliverability dashboard, you pay a monthly charge of USD$1,250.00, in addition to any other fees that you accrue by using Amazon Pinpoint. If you enable the Deliverability dashboard after the first day of a calendar month, we prorate the monthly charge based on how many days have elapsed in the current calendar month.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/PutDeliverabilityDashboardOption>`_
**Request Syntax**
::
response = client.put_deliverability_dashboard_option(
DashboardEnabled=True|False
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
A response that indicates whether the Deliverability dashboard is enabled for your Amazon Pinpoint account.
:type DashboardEnabled: boolean
:param DashboardEnabled: **[REQUIRED]**
Indicates whether the Deliverability dashboard is enabled. If the value is ``true`` , then the dashboard is enabled.
:rtype: dict
:returns:
"""
pass
def put_email_identity_dkim_attributes(self, EmailIdentity: str, SigningEnabled: bool = None) -> Dict:
"""
Used to enable or disable DKIM authentication for an email identity.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/PutEmailIdentityDkimAttributes>`_
**Request Syntax**
::
response = client.put_email_identity_dkim_attributes(
EmailIdentity='string',
SigningEnabled=True|False
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
An HTTP 200 response if the request succeeds, or an error message if the request fails.
:type EmailIdentity: string
:param EmailIdentity: **[REQUIRED]**
The email identity that you want to change the DKIM settings for.
:type SigningEnabled: boolean
:param SigningEnabled:
Sets the DKIM signing configuration for the identity.
When you set this value ``true`` , then the messages that Amazon Pinpoint sends from the identity are DKIM-signed. When you set this value to ``false`` , then the messages that Amazon Pinpoint sends from the identity aren\'t DKIM-signed.
:rtype: dict
:returns:
"""
pass
def put_email_identity_feedback_attributes(self, EmailIdentity: str, EmailForwardingEnabled: bool = None) -> Dict:
"""
Used to enable or disable feedback forwarding for an identity. This setting determines what happens when an identity is used to send an email that results in a bounce or complaint event.
When you enable feedback forwarding, Amazon Pinpoint sends you email notifications when bounce or complaint events occur. Amazon Pinpoint sends this notification to the address that you specified in the Return-Path header of the original email.
When you disable feedback forwarding, Amazon Pinpoint sends notifications through other mechanisms, such as by notifying an Amazon SNS topic. You're required to have a method of tracking bounces and complaints. If you haven't set up another mechanism for receiving bounce or complaint notifications, Amazon Pinpoint sends an email notification when these events occur (even if this setting is disabled).
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/PutEmailIdentityFeedbackAttributes>`_
**Request Syntax**
::
response = client.put_email_identity_feedback_attributes(
EmailIdentity='string',
EmailForwardingEnabled=True|False
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
An HTTP 200 response if the request succeeds, or an error message if the request fails.
:type EmailIdentity: string
:param EmailIdentity: **[REQUIRED]**
The email identity that you want to configure bounce and complaint feedback forwarding for.
:type EmailForwardingEnabled: boolean
:param EmailForwardingEnabled:
Sets the feedback forwarding configuration for the identity.
If the value is ``true`` , Amazon Pinpoint sends you email notifications when bounce or complaint events occur. Amazon Pinpoint sends this notification to the address that you specified in the Return-Path header of the original email.
When you set this value to ``false`` , Amazon Pinpoint sends notifications through other mechanisms, such as by notifying an Amazon SNS topic or another event destination. You\'re required to have a method of tracking bounces and complaints. If you haven\'t set up another mechanism for receiving bounce or complaint notifications, Amazon Pinpoint sends an email notification when these events occur (even if this setting is disabled).
:rtype: dict
:returns:
"""
pass
def put_email_identity_mail_from_attributes(self, EmailIdentity: str, MailFromDomain: str = None, BehaviorOnMxFailure: str = None) -> Dict:
"""
Used to enable or disable the custom Mail-From domain configuration for an email identity.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/PutEmailIdentityMailFromAttributes>`_
**Request Syntax**
::
response = client.put_email_identity_mail_from_attributes(
EmailIdentity='string',
MailFromDomain='string',
BehaviorOnMxFailure='USE_DEFAULT_VALUE'|'REJECT_MESSAGE'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
An HTTP 200 response if the request succeeds, or an error message if the request fails.
:type EmailIdentity: string
:param EmailIdentity: **[REQUIRED]**
The verified email identity that you want to set up the custom MAIL FROM domain for.
:type MailFromDomain: string
:param MailFromDomain:
The custom MAIL FROM domain that you want the verified identity to use. The MAIL FROM domain must meet the following criteria:
* It has to be a subdomain of the verified identity.
* It can\'t be used to receive email.
* It can\'t be used in a \"From\" address if the MAIL FROM domain is a destination for feedback forwarding emails.
:type BehaviorOnMxFailure: string
:param BehaviorOnMxFailure:
The action that you want Amazon Pinpoint to take if it can\'t read the required MX record when you send an email. When you set this value to ``UseDefaultValue`` , Amazon Pinpoint uses *amazonses.com* as the MAIL FROM domain. When you set this value to ``RejectMessage`` , Amazon Pinpoint returns a ``MailFromDomainNotVerified`` error, and doesn\'t attempt to deliver the email.
These behaviors are taken when the custom MAIL FROM domain configuration is in the ``Pending`` , ``Failed`` , and ``TemporaryFailure`` states.
:rtype: dict
:returns:
"""
pass
def send_email(self, Destination: Dict, Content: Dict, FromEmailAddress: str = None, ReplyToAddresses: List = None, FeedbackForwardingEmailAddress: str = None, EmailTags: List = None, ConfigurationSetName: str = None) -> Dict:
"""
Sends an email message. You can use the Amazon Pinpoint Email API to send two types of messages:
* **Simple** – A standard email message. When you create this type of message, you specify the sender, the recipient, and the message body, and Amazon Pinpoint assembles the message for you.
* **Raw** – A raw, MIME-formatted email message. When you send this type of email, you have to specify all of the message headers, as well as the message body. You can use this message type to send messages that contain attachments. The message that you specify has to be a valid MIME message.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/SendEmail>`_
**Request Syntax**
::
response = client.send_email(
FromEmailAddress='string',
Destination={
'ToAddresses': [
'string',
],
'CcAddresses': [
'string',
],
'BccAddresses': [
'string',
]
},
ReplyToAddresses=[
'string',
],
FeedbackForwardingEmailAddress='string',
Content={
'Simple': {
'Subject': {
'Data': 'string',
'Charset': 'string'
},
'Body': {
'Text': {
'Data': 'string',
'Charset': 'string'
},
'Html': {
'Data': 'string',
'Charset': 'string'
}
}
},
'Raw': {
'Data': b'bytes'
}
},
EmailTags=[
{
'Name': 'string',
'Value': 'string'
},
],
ConfigurationSetName='string'
)
**Response Syntax**
::
{
'MessageId': 'string'
}
**Response Structure**
- *(dict) --*
A unique message ID that you receive when Amazon Pinpoint accepts an email for sending.
- **MessageId** *(string) --*
A unique identifier for the message that is generated when Amazon Pinpoint accepts the message.
.. note::
It is possible for Amazon Pinpoint to accept a message without sending it. This can happen when the message you're trying to send has an attachment doesn't pass a virus check, or when you send a templated email that contains invalid personalization content, for example.
:type FromEmailAddress: string
:param FromEmailAddress:
The email address that you want to use as the \"From\" address for the email. The address that you specify has to be verified.
:type Destination: dict
:param Destination: **[REQUIRED]**
An object that contains the recipients of the email message.
- **ToAddresses** *(list) --*
An array that contains the email addresses of the \"To\" recipients for the email.
- *(string) --*
- **CcAddresses** *(list) --*
An array that contains the email addresses of the \"CC\" (carbon copy) recipients for the email.
- *(string) --*
- **BccAddresses** *(list) --*
An array that contains the email addresses of the \"BCC\" (blind carbon copy) recipients for the email.
- *(string) --*
:type ReplyToAddresses: list
:param ReplyToAddresses:
The \"Reply-to\" email addresses for the message. When the recipient replies to the message, each Reply-to address receives the reply.
- *(string) --*
:type FeedbackForwardingEmailAddress: string
:param FeedbackForwardingEmailAddress:
The address that Amazon Pinpoint should send bounce and complaint notifications to.
:type Content: dict
:param Content: **[REQUIRED]**
An object that contains the body of the message. You can send either a Simple message or a Raw message.
- **Simple** *(dict) --*
The simple email message. The message consists of a subject and a message body.
- **Subject** *(dict) --* **[REQUIRED]**
The subject line of the email. The subject line can only contain 7-bit ASCII characters. However, you can specify non-ASCII characters in the subject line by using encoded-word syntax, as described in `RFC 2047 <https://tools.ietf.org/html/rfc2047>`__ .
- **Data** *(string) --* **[REQUIRED]**
The content of the message itself.
- **Charset** *(string) --*
The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify ``UTF-8`` , ``ISO-8859-1`` , or ``Shift_JIS`` .
- **Body** *(dict) --* **[REQUIRED]**
The body of the message. You can specify an HTML version of the message, a text-only version of the message, or both.
- **Text** *(dict) --*
An object that represents the version of the message that is displayed in email clients that don\'t support HTML, or clients where the recipient has disabled HTML rendering.
- **Data** *(string) --* **[REQUIRED]**
The content of the message itself.
- **Charset** *(string) --*
The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify ``UTF-8`` , ``ISO-8859-1`` , or ``Shift_JIS`` .
- **Html** *(dict) --*
An object that represents the version of the message that is displayed in email clients that support HTML. HTML messages can include formatted text, hyperlinks, images, and more.
- **Data** *(string) --* **[REQUIRED]**
The content of the message itself.
- **Charset** *(string) --*
The character set for the content. Because of the constraints of the SMTP protocol, Amazon Pinpoint uses 7-bit ASCII by default. If the text includes characters outside of the ASCII range, you have to specify a character set. For example, you could specify ``UTF-8`` , ``ISO-8859-1`` , or ``Shift_JIS`` .
- **Raw** *(dict) --*
The raw email message. The message has to meet the following criteria:
* The message has to contain a header and a body, separated by one blank line.
* All of the required header fields must be present in the message.
* Each part of a multipart MIME message must be formatted properly.
* If you include attachments, they must be in a file format that Amazon Pinpoint supports.
* The entire message must be Base64 encoded.
* If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients\' email clients render the message properly.
* The length of any single line of text in the message can\'t exceed 1,000 characters. This restriction is defined in `RFC 5321 <https://tools.ietf.org/html/rfc5321>`__ .
- **Data** *(bytes) --* **[REQUIRED]**
The raw email message. The message has to meet the following criteria:
* The message has to contain a header and a body, separated by one blank line.
* All of the required header fields must be present in the message.
* Each part of a multipart MIME message must be formatted properly.
* Attachments must be in a file format that Amazon Pinpoint supports.
* The entire message must be Base64 encoded.
* If any of the MIME parts in your message contain content that is outside of the 7-bit ASCII character range, you should encode that content to ensure that recipients\' email clients render the message properly.
* The length of any single line of text in the message can\'t exceed 1,000 characters. This restriction is defined in `RFC 5321 <https://tools.ietf.org/html/rfc5321>`__ .
:type EmailTags: list
:param EmailTags:
A list of tags, in the form of name/value pairs, to apply to an email that you send using the ``SendEmail`` operation. Tags correspond to characteristics of the email that you define, so that you can publish email sending events.
- *(dict) --*
Contains the name and value of a tag that you apply to an email. You can use message tags when you publish email sending events.
- **Name** *(string) --* **[REQUIRED]**
The name of the message tag. The message tag name has to meet the following criteria:
* It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).
* It can contain no more than 256 characters.
- **Value** *(string) --* **[REQUIRED]**
The value of the message tag. The message tag value has to meet the following criteria:
* It can only contain ASCII letters (a–z, A–Z), numbers (0–9), underscores (_), or dashes (-).
* It can contain no more than 256 characters.
:type ConfigurationSetName: string
:param ConfigurationSetName:
The name of the configuration set that you want to use when sending the email.
:rtype: dict
:returns:
"""
pass
def tag_resource(self, ResourceArn: str, Tags: List) -> Dict:
"""
Add one or more tags (keys and values) to one or more specified resources. A *tag* is a label that you optionally define and associate with a resource in Amazon Pinpoint. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.
Each tag consists of a required *tag key* and an associated *tag value* , both of which you define. A tag key is a general label that acts as a category for more specific tag values. A tag value acts as a descriptor within a tag key.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/TagResource>`_
**Request Syntax**
::
response = client.tag_resource(
ResourceArn='string',
Tags=[
{
'Key': 'string',
'Value': 'string'
},
]
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type ResourceArn: string
:param ResourceArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the resource that you want to add one or more tags to.
:type Tags: list
:param Tags: **[REQUIRED]**
A list of the tags that you want to add to the resource. A tag consists of a required tag key (``Key`` ) and an associated tag value (``Value`` ). The maximum length of a tag key is 128 characters. The maximum length of a tag value is 256 characters.
- *(dict) --*
An object that defines the tags that are associated with a resource. A *tag* is a label that you optionally define and associate with a resource in Amazon Pinpoint. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.
Each tag consists of a required *tag key* and an associated *tag value* , both of which you define. A tag key is a general label that acts as a category for a more specific tag value. A tag value acts as a descriptor within a tag key. For example, if you have two versions of an Amazon Pinpoint project, one for internal testing and another for external use, you might assign a ``Stack`` tag key to both projects. The value of the ``Stack`` tag key might be ``Test`` for one project and ``Production`` for the other project.
A tag key can contain as many as 128 characters. A tag value can contain as many as 256 characters. The characters can be Unicode letters, digits, white space, or one of the following symbols: _ . : / = + -. The following additional restrictions apply to tags:
* Tag keys and values are case sensitive.
* For each associated resource, each tag key must be unique and it can have only one value.
* The ``aws:`` prefix is reserved for use by AWS; you can’t use it in any tag keys or values that you define. In addition, you can\'t edit or remove tag keys or values that use this prefix. Tags that use this prefix don’t count against the limit of 50 tags per resource.
* You can associate tags with public or shared resources, but the tags are available only for your AWS account, not any other accounts that share the resource. In addition, the tags are available only for resources that are located in the specified AWS Region for your AWS account.
- **Key** *(string) --* **[REQUIRED]**
One part of a key-value pair that defines a tag. The maximum length of a tag key is 128 characters. The minimum length is 1 character.
- **Value** *(string) --* **[REQUIRED]**
The optional part of a key-value pair that defines a tag. The maximum length of a tag value is 256 characters. The minimum length is 0 characters. If you don’t want a resource to have a specific tag value, don’t specify a value for this parameter. Amazon Pinpoint will set the value to an empty string.
:rtype: dict
:returns:
"""
pass
def untag_resource(self, ResourceArn: str, TagKeys: List) -> Dict:
"""
Remove one or more tags (keys and values) from a specified resource.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/UntagResource>`_
**Request Syntax**
::
response = client.untag_resource(
ResourceArn='string',
TagKeys=[
'string',
]
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type ResourceArn: string
:param ResourceArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the resource that you want to remove one or more tags from.
:type TagKeys: list
:param TagKeys: **[REQUIRED]**
The tags (tag keys) that you want to remove from the resource. When you specify a tag key, the action removes both that key and its associated tag value.
To remove more than one tag from the resource, append the ``TagKeys`` parameter and argument for each additional tag to remove, separated by an ampersand. For example: ``/v1/email/tags?ResourceArn=ResourceArn&TagKeys=Key1&TagKeys=Key2``
- *(string) --*
:rtype: dict
:returns:
"""
pass
def update_configuration_set_event_destination(self, ConfigurationSetName: str, EventDestinationName: str, EventDestination: Dict) -> Dict:
"""
Update the configuration of an event destination for a configuration set.
In Amazon Pinpoint, *events* include message sends, deliveries, opens, clicks, bounces, and complaints. *Event destinations* are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/pinpoint-email-2018-07-26/UpdateConfigurationSetEventDestination>`_
**Request Syntax**
::
response = client.update_configuration_set_event_destination(
ConfigurationSetName='string',
EventDestinationName='string',
EventDestination={
'Enabled': True|False,
'MatchingEventTypes': [
'SEND'|'REJECT'|'BOUNCE'|'COMPLAINT'|'DELIVERY'|'OPEN'|'CLICK'|'RENDERING_FAILURE',
],
'KinesisFirehoseDestination': {
'IamRoleArn': 'string',
'DeliveryStreamArn': 'string'
},
'CloudWatchDestination': {
'DimensionConfigurations': [
{
'DimensionName': 'string',
'DimensionValueSource': 'MESSAGE_TAG'|'EMAIL_HEADER'|'LINK_TAG',
'DefaultDimensionValue': 'string'
},
]
},
'SnsDestination': {
'TopicArn': 'string'
},
'PinpointDestination': {
'ApplicationArn': 'string'
}
}
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
An HTTP 200 response if the request succeeds, or an error message if the request fails.
:type ConfigurationSetName: string
:param ConfigurationSetName: **[REQUIRED]**
The name of the configuration set that contains the event destination that you want to modify.
:type EventDestinationName: string
:param EventDestinationName: **[REQUIRED]**
The name of the event destination that you want to modify.
:type EventDestination: dict
:param EventDestination: **[REQUIRED]**
An object that defines the event destination.
- **Enabled** *(boolean) --*
If ``true`` , the event destination is enabled. When the event destination is enabled, the specified event types are sent to the destinations in this ``EventDestinationDefinition`` .
If ``false`` , the event destination is disabled. When the event destination is disabled, events aren\'t sent to the specified destinations.
- **MatchingEventTypes** *(list) --*
An array that specifies which events Amazon Pinpoint should send to the destinations in this ``EventDestinationDefinition`` .
- *(string) --*
An email sending event type. For example, email sends, opens, and bounces are all email events.
- **KinesisFirehoseDestination** *(dict) --*
An object that defines an Amazon Kinesis Data Firehose destination for email events. You can use Amazon Kinesis Data Firehose to stream data to other services, such as Amazon S3 and Amazon Redshift.
- **IamRoleArn** *(string) --* **[REQUIRED]**
The Amazon Resource Name (ARN) of the IAM role that Amazon Pinpoint uses when sending email events to the Amazon Kinesis Data Firehose stream.
- **DeliveryStreamArn** *(string) --* **[REQUIRED]**
The Amazon Resource Name (ARN) of the Amazon Kinesis Data Firehose stream that Amazon Pinpoint sends email events to.
- **CloudWatchDestination** *(dict) --*
An object that defines an Amazon CloudWatch destination for email events. You can use Amazon CloudWatch to monitor and gain insights on your email sending metrics.
- **DimensionConfigurations** *(list) --* **[REQUIRED]**
An array of objects that define the dimensions to use when you send email events to Amazon CloudWatch.
- *(dict) --*
An object that defines the dimension configuration to use when you send Amazon Pinpoint email events to Amazon CloudWatch.
- **DimensionName** *(string) --* **[REQUIRED]**
The name of an Amazon CloudWatch dimension associated with an email sending metric. The name has to meet the following criteria:
* It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).
* It can contain no more than 256 characters.
- **DimensionValueSource** *(string) --* **[REQUIRED]**
The location where Amazon Pinpoint finds the value of a dimension to publish to Amazon CloudWatch. If you want Amazon Pinpoint to use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail/SendRawEmail API, choose ``messageTag`` . If you want Amazon Pinpoint to use your own email headers, choose ``emailHeader`` . If you want Amazon Pinpoint to use link tags, choose ``linkTags`` .
- **DefaultDimensionValue** *(string) --* **[REQUIRED]**
The default value of the dimension that is published to Amazon CloudWatch if you don\'t provide the value of the dimension when you send an email. This value has to meet the following criteria:
* It can only contain ASCII letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-).
* It can contain no more than 256 characters.
- **SnsDestination** *(dict) --*
An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.
- **TopicArn** *(string) --* **[REQUIRED]**
The Amazon Resource Name (ARN) of the Amazon SNS topic that you want to publish email events to. For more information about Amazon SNS topics, see the `Amazon SNS Developer Guide <https://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html>`__ .
- **PinpointDestination** *(dict) --*
An object that defines a Amazon Pinpoint destination for email events. You can use Amazon Pinpoint events to create attributes in Amazon Pinpoint projects. You can use these attributes to create segments for your campaigns.
- **ApplicationArn** *(string) --*
The Amazon Resource Name (ARN) of the Amazon Pinpoint project that you want to send email events to.
:rtype: dict
:returns:
"""
pass
|
py | 1a2fdcd5621eb97edc60c038cd9ce943d5bb93d9 | # -*- coding: utf-8 -*-
"""
Contains generic utillity functions used in various different parts of
Drogulus.
"""
from .constants import K
def distance(key_one, key_two):
"""
Calculate the XOR result between two string representations of hex values.
Returned as an int.
"""
val_key_one = int(key_one, 16)
val_key_two = int(key_two, 16)
return val_key_one ^ val_key_two
def sort_peer_nodes(peer_nodes, target_key):
"""
Given a list of peer nodes, efficiently sorts it so that the peers closest
to the target key are at the head. If the list is longer than K then only
the K closest contacts will be returned.
"""
def node_key(node, target_key=target_key):
"""
Returns the node's distance to the target key.
"""
return distance(node.network_id, target_key)
peer_nodes.sort(key=node_key)
return peer_nodes[:K]
|
py | 1a2fdd0290f3811fe758653f2b1b5980f836138d | def imc (peso,altura):
valor = peso / altura **2
if valor <18:
return "Delgadez"
elif valor <25:
return "Normal"
elif valor <29:
return "Sobrepeso"
else:
return "Obesidad"
valor_imc = imc (58,1.55)
print (valor_imc)
|
py | 1a2fddce03107c1b6356b81174debceb05409fdc | #!/usr/bin/env python
#
# Public Domain 2014-2018 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# WiredTigerTestCase
# parent class for all test cases
#
# If unittest2 is available, use it in preference to (the old) unittest
try:
import unittest2 as unittest
except ImportError:
import unittest
from contextlib import contextmanager
import glob, os, re, shutil, sys, time, traceback
import wiredtiger, wtscenario
def shortenWithEllipsis(s, maxlen):
if len(s) > maxlen:
s = s[0:maxlen-3] + '...'
return s
class CapturedFd(object):
"""
CapturedFd encapsulates a file descriptor (e.g. 1 or 2) that is diverted
to a file. We use this to capture and check the C stdout/stderr.
Meanwhile we reset Python's sys.stdout, sys.stderr, using duped copies
of the original 1, 2 fds. The end result is that Python's sys.stdout
sys.stderr behave normally (e.g. go to the tty), while the C stdout/stderr
ends up in a file that we can verify.
"""
def __init__(self, filename, desc):
self.filename = filename
self.desc = desc
self.expectpos = 0
self.file = None
def readFileFrom(self, filename, pos, maxchars):
"""
Read a file starting at a given position,
returning the beginning of its contents
"""
with open(filename, 'r') as f:
f.seek(pos)
return shortenWithEllipsis(f.read(maxchars+1), maxchars)
def capture(self):
"""
Start capturing the file descriptor.
Note that the original targetFd is closed, we expect
that the caller has duped it and passed the dup to us
in the constructor.
"""
self.file = open(self.filename, 'w')
return self.file
def release(self):
"""
Stop capturing.
"""
self.file.close()
self.file = None
def check(self, testcase):
"""
Check to see that there is no unexpected output in the captured output
file. If there is, raise it as a test failure.
This is generally called after 'release' is called.
"""
if self.file != None:
self.file.flush()
filesize = os.path.getsize(self.filename)
if filesize > self.expectpos:
contents = self.readFileFrom(self.filename, self.expectpos, 10000)
WiredTigerTestCase.prout('ERROR: ' + self.filename +
' unexpected ' + self.desc +
', contains:\n"' + contents + '"')
testcase.fail('unexpected ' + self.desc + ', contains: "' +
contents + '"')
self.expectpos = filesize
def checkAdditional(self, testcase, expect):
"""
Check to see that an additional string has been added to the
output file. If it has not, raise it as a test failure.
In any case, reset the expected pos to account for the new output.
"""
if self.file != None:
self.file.flush()
gotstr = self.readFileFrom(self.filename, self.expectpos, 1000)
testcase.assertEqual(gotstr, expect, 'in ' + self.desc +
', expected "' + expect + '", but got "' +
gotstr + '"')
self.expectpos = os.path.getsize(self.filename)
def checkAdditionalPattern(self, testcase, pat):
"""
Check to see that an additional string has been added to the
output file. If it has not, raise it as a test failure.
In any case, reset the expected pos to account for the new output.
"""
if self.file != None:
self.file.flush()
gotstr = self.readFileFrom(self.filename, self.expectpos, 1000)
if re.search(pat, gotstr) == None:
testcase.fail('in ' + self.desc +
', expected pattern "' + pat + '", but got "' +
gotstr + '"')
self.expectpos = os.path.getsize(self.filename)
class TestSuiteConnection(object):
def __init__(self, conn, connlist):
connlist.append(conn)
self._conn = conn
self._connlist = connlist
def close(self, config=''):
self._connlist.remove(self._conn)
return self._conn.close(config)
# Proxy everything except what we explicitly define to the
# wrapped connection
def __getattr__(self, attr):
if attr in self.__dict__:
return getattr(self, attr)
else:
return getattr(self._conn, attr)
# Just like a list of strings, but with a convenience function
class ExtensionList(list):
skipIfMissing = False
def extension(self, dirname, name, extarg=None):
if name != None and name != 'none':
ext = '' if extarg == None else '=' + extarg
self.append(dirname + '/' + name + ext)
class WiredTigerTestCase(unittest.TestCase):
_globalSetup = False
_printOnceSeen = {}
# conn_config can be overridden to add to basic connection configuration.
# Can be a string or a callable function or lambda expression.
conn_config = ''
# session_config can be overridden to add to basic session configuration.
# Can be a string or a callable function or lambda expression.
session_config = ''
# conn_extensions can be overridden to add a list of extensions to load.
# Each entry is a string (directory and extension name) and optional config.
# Example:
# conn_extensions = ('extractors/csv_extractor',
# 'test/fail_fs={allow_writes=100}')
conn_extensions = ()
@staticmethod
def globalSetup(preserveFiles = False, useTimestamp = False,
gdbSub = False, lldbSub = False, verbose = 1, builddir = None, dirarg = None,
longtest = False):
WiredTigerTestCase._preserveFiles = preserveFiles
d = 'WT_TEST' if dirarg == None else dirarg
if useTimestamp:
d += '.' + time.strftime('%Y%m%d-%H%M%S', time.localtime())
shutil.rmtree(d, ignore_errors=True)
os.makedirs(d)
wtscenario.set_long_run(longtest)
WiredTigerTestCase._parentTestdir = d
WiredTigerTestCase._builddir = builddir
WiredTigerTestCase._origcwd = os.getcwd()
WiredTigerTestCase._resultfile = open(os.path.join(d, 'results.txt'), "w", 0) # unbuffered
WiredTigerTestCase._gdbSubprocess = gdbSub
WiredTigerTestCase._lldbSubprocess = lldbSub
WiredTigerTestCase._longtest = longtest
WiredTigerTestCase._verbose = verbose
WiredTigerTestCase._dupout = os.dup(sys.stdout.fileno())
WiredTigerTestCase._stdout = sys.stdout
WiredTigerTestCase._stderr = sys.stderr
WiredTigerTestCase._concurrent = False
WiredTigerTestCase._globalSetup = True
WiredTigerTestCase._ttyDescriptor = None
def fdSetUp(self):
self.captureout = CapturedFd('stdout.txt', 'standard output')
self.captureerr = CapturedFd('stderr.txt', 'error output')
sys.stdout = self.captureout.capture()
sys.stderr = self.captureerr.capture()
def fdTearDown(self):
# restore stderr/stdout
self.captureout.release()
self.captureerr.release()
sys.stdout = WiredTigerTestCase._stdout
sys.stderr = WiredTigerTestCase._stderr
def __init__(self, *args, **kwargs):
if hasattr(self, 'scenarios'):
assert(len(self.scenarios) == len(dict(self.scenarios)))
unittest.TestCase.__init__(self, *args, **kwargs)
if not self._globalSetup:
WiredTigerTestCase.globalSetup()
def __str__(self):
# when running with scenarios, if the number_scenarios() method
# is used, then each scenario is given a number, which can
# help distinguish tests.
scen = ''
if hasattr(self, 'scenario_number') and hasattr(self, 'scenario_name'):
scen = ' -s ' + str(self.scenario_number) + \
' (' + self.scenario_name + ')'
return self.simpleName() + scen
def shortDesc(self):
ret_str = ''
if hasattr(self, 'scenario_number'):
ret_str = ' -s ' + str(self.scenario_number)
return self.simpleName() + ret_str
def simpleName(self):
return "%s.%s.%s" % (self.__module__,
self.className(), self._testMethodName)
# Return the wiredtiger_open extension argument for
# any needed shared library.
def extensionsConfig(self):
exts = self.conn_extensions
if hasattr(exts, '__call__'):
exts = ExtensionList()
self.conn_extensions(exts)
result = ''
extfiles = {}
skipIfMissing = False
if hasattr(exts, 'skip_if_missing'):
skipIfMissing = exts.skip_if_missing
for ext in exts:
extconf = ''
if '=' in ext:
splits = ext.split('=', 1)
ext = splits[0]
extconf = '=' + splits[1]
splits = ext.split('/')
if len(splits) != 2:
raise Exception(self.shortid() +
": " + ext +
": extension is not named <dir>/<name>")
libname = splits[1]
dirname = splits[0]
pat = os.path.join(WiredTigerTestCase._builddir, 'ext',
dirname, libname, '.libs', 'libwiredtiger_*.so')
filenames = glob.glob(pat)
if len(filenames) == 0:
if skipIfMissing:
self.skipTest('extension "' + ext + '" not built')
continue
else:
raise Exception(self.shortid() +
": " + ext +
": no extensions library found matching: " + pat)
elif len(filenames) > 1:
raise Exception(self.shortid() +
": " + ext +
": multiple extensions libraries found matching: " + pat)
complete = '"' + filenames[0] + '"' + extconf
if ext in extfiles:
if extfiles[ext] != complete:
raise Exception(self.shortid() +
": non-matching extension arguments in " +
str(exts))
else:
extfiles[ext] = complete
if len(extfiles) != 0:
result = ',extensions=[' + ','.join(extfiles.values()) + ']'
return result
# Can be overridden, but first consider setting self.conn_config
# or self.conn_extensions
def setUpConnectionOpen(self, home):
self.home = home
config = self.conn_config
if hasattr(config, '__call__'):
config = self.conn_config()
config += self.extensionsConfig()
# In case the open starts additional threads, flush first to
# avoid confusion.
sys.stdout.flush()
conn_param = 'create,error_prefix="%s",%s' % (self.shortid(), config)
try:
conn = self.wiredtiger_open(home, conn_param)
except wiredtiger.WiredTigerError as e:
print "Failed wiredtiger_open: dir '%s', config '%s'" % \
(home, conn_param)
raise e
self.pr(`conn`)
return conn
# Replacement for wiredtiger.wiredtiger_open that returns
# a proxied connection that knows to close it itself at the
# end of the run, unless it was already closed.
def wiredtiger_open(self, home=None, config=''):
conn = wiredtiger.wiredtiger_open(home, config)
return TestSuiteConnection(conn, self._connections)
# Can be overridden, but first consider setting self.session_config
def setUpSessionOpen(self, conn):
config = self.session_config
if hasattr(config, '__call__'):
config = self.session_config()
return conn.open_session(config)
# Can be overridden
def close_conn(self, config=''):
"""
Close the connection if already open.
"""
if self.conn != None:
self.conn.close(config)
self.conn = None
def open_conn(self, directory=".", config=None):
"""
Open the connection if already closed.
"""
if self.conn == None:
if config != None:
self._old_config = self.conn_config
self.conn_config = config
self.conn = self.setUpConnectionOpen(directory)
if config != None:
self.conn_config = self._old_config
self.session = self.setUpSessionOpen(self.conn)
def reopen_conn(self, directory=".", config=None):
"""
Reopen the connection.
"""
self.close_conn()
self.open_conn(directory, config)
def setUp(self):
if not hasattr(self.__class__, 'wt_ntests'):
self.__class__.wt_ntests = 0
if WiredTigerTestCase._concurrent:
self.testsubdir = self.shortid() + '.' + str(self.__class__.wt_ntests)
else:
self.testsubdir = self.className() + '.' + str(self.__class__.wt_ntests)
self.testdir = os.path.join(WiredTigerTestCase._parentTestdir, self.testsubdir)
self.__class__.wt_ntests += 1
self.starttime = time.time()
if WiredTigerTestCase._verbose > 2:
self.prhead('started in ' + self.testdir, True)
# tearDown needs connections list, set it here in case the open fails.
self._connections = []
self.origcwd = os.getcwd()
shutil.rmtree(self.testdir, ignore_errors=True)
if os.path.exists(self.testdir):
raise Exception(self.testdir + ": cannot remove directory")
os.makedirs(self.testdir)
os.chdir(self.testdir)
with open('testname.txt', 'w+') as namefile:
namefile.write(str(self) + '\n')
self.fdSetUp()
# tearDown needs a conn field, set it here in case the open fails.
self.conn = None
try:
self.conn = self.setUpConnectionOpen(".")
self.session = self.setUpSessionOpen(self.conn)
except:
self.tearDown()
raise
def tearDown(self):
excinfo = sys.exc_info()
passed = (excinfo == (None, None, None))
if passed:
skipped = False
else:
skipped = (excinfo[0] == unittest.SkipTest)
self.pr('finishing')
# Close all connections that weren't explicitly closed.
# Connections left open (as a result of a test failure)
# can result in cascading errors. We also make sure
# self.conn is on the list of active connections.
if not self.conn in self._connections:
self._connections.append(self.conn)
for conn in self._connections:
try:
conn.close()
except:
pass
self._connections = []
try:
self.fdTearDown()
# Only check for unexpected output if the test passed
if passed:
self.captureout.check(self)
self.captureerr.check(self)
finally:
# always get back to original directory
os.chdir(self.origcwd)
# Make sure no read-only files or directories were left behind
os.chmod(self.testdir, 0777)
for root, dirs, files in os.walk(self.testdir):
for d in dirs:
os.chmod(os.path.join(root, d), 0777)
for f in files:
os.chmod(os.path.join(root, f), 0666)
# Clean up unless there's a failure
if (passed or skipped) and not WiredTigerTestCase._preserveFiles:
shutil.rmtree(self.testdir, ignore_errors=True)
else:
self.pr('preserving directory ' + self.testdir)
elapsed = time.time() - self.starttime
if elapsed > 0.001 and WiredTigerTestCase._verbose >= 2:
print "%s: %.2f seconds" % (str(self), elapsed)
if not passed and not skipped:
print "ERROR in " + str(self)
self.pr('FAIL')
self.prexception(excinfo)
self.pr('preserving directory ' + self.testdir)
if WiredTigerTestCase._verbose > 2:
self.prhead('TEST COMPLETED')
def backup(self, backup_dir, session=None):
if session is None:
session = self.session
shutil.rmtree(backup_dir, ignore_errors=True)
os.mkdir(backup_dir)
bkp_cursor = session.open_cursor('backup:', None, None)
while True:
ret = bkp_cursor.next()
if ret != 0:
break
shutil.copy(bkp_cursor.get_key(), backup_dir)
self.assertEqual(ret, wiredtiger.WT_NOTFOUND)
bkp_cursor.close()
@contextmanager
def expectedStdout(self, expect):
self.captureout.check(self)
yield
self.captureout.checkAdditional(self, expect)
@contextmanager
def expectedStderr(self, expect):
self.captureerr.check(self)
yield
self.captureerr.checkAdditional(self, expect)
@contextmanager
def expectedStdoutPattern(self, pat):
self.captureout.check(self)
yield
self.captureout.checkAdditionalPattern(self, pat)
@contextmanager
def expectedStderrPattern(self, pat):
self.captureerr.check(self)
yield
self.captureerr.checkAdditionalPattern(self, pat)
def assertRaisesWithMessage(self, exceptionType, expr, message):
"""
Like TestCase.assertRaises(), but also checks to see
that a message is printed on stderr. If message starts
and ends with a slash, it is considered a pattern that
must appear in stderr (it need not encompass the entire
error output). Otherwise, the message must match verbatim,
including any trailing newlines.
"""
if len(message) > 2 and message[0] == '/' and message[-1] == '/':
with self.expectedStderrPattern(message[1:-1]):
self.assertRaises(exceptionType, expr)
else:
with self.expectedStderr(message):
self.assertRaises(exceptionType, expr)
def assertRaisesException(self, exceptionType, expr,
exceptionString=None, optional=False):
"""
Like TestCase.assertRaises(), with some additional options.
If the exceptionString argument is used, the exception's string
must match it. If optional is set, then no assertion occurs
if the exception doesn't occur.
Returns true if the assertion is raised.
"""
raised = False
try:
expr()
except BaseException, err:
if not isinstance(err, exceptionType):
self.fail('Exception of incorrect type raised, got type: ' + \
str(type(err)))
if exceptionString != None and exceptionString != str(err):
self.fail('Exception with incorrect string raised, got: "' + \
str(err) + '"')
raised = True
if not raised and not optional:
self.fail('no assertion raised')
return raised
def raisesBusy(self, expr):
"""
Execute the expression, returning true if a 'Resource busy'
exception is raised, returning false if no exception is raised.
Any other exception raises a test suite failure.
"""
return self.assertRaisesException(wiredtiger.WiredTigerError, \
expr, exceptionString='Resource busy', optional=True)
def assertTimestampsEqual(self, ts1, ts2):
"""
TestCase.assertEqual() for timestamps
"""
self.assertEqual(int(ts1, 16), int(ts2, 16))
def exceptionToStderr(self, expr):
"""
Used by assertRaisesHavingMessage to convert an expression
that throws an error to an expression that throws the
same error but also has the exception string on stderr.
"""
try:
expr()
except BaseException, err:
sys.stderr.write('Exception: ' + str(err))
raise
def assertRaisesHavingMessage(self, exceptionType, expr, message):
"""
Like TestCase.assertRaises(), but also checks to see
that the assert exception, when string-ified, includes a message.
If message starts and ends with a slash, it is considered a pattern that
must appear (it need not encompass the entire message).
Otherwise, the message must match verbatim.
"""
self.assertRaisesWithMessage(
exceptionType, lambda: self.exceptionToStderr(expr), message)
@staticmethod
def printOnce(msg):
# There's a race condition with multiple threads,
# but we won't worry about it. We err on the side
# of printing the message too many times.
if not msg in WiredTigerTestCase._printOnceSeen:
WiredTigerTestCase._printOnceSeen[msg] = msg
WiredTigerTestCase.prout(msg)
def KNOWN_FAILURE(self, name):
myname = self.simpleName()
msg = '**** ' + myname + ' HAS A KNOWN FAILURE: ' + name + ' ****'
self.printOnce(msg)
self.skipTest('KNOWN FAILURE: ' + name)
def KNOWN_LIMITATION(self, name):
myname = self.simpleName()
msg = '**** ' + myname + ' HAS A KNOWN LIMITATION: ' + name + ' ****'
self.printOnce(msg)
@staticmethod
def printVerbose(level, message):
if level <= WiredTigerTestCase._verbose:
WiredTigerTestCase.prout(message)
def verbose(self, level, message):
WiredTigerTestCase.printVerbose(level, message)
def prout(self, s):
WiredTigerTestCase.prout(s)
@staticmethod
def prout(s):
os.write(WiredTigerTestCase._dupout, s + '\n')
def pr(self, s):
"""
print a progress line for testing
"""
msg = ' ' + self.shortid() + ': ' + s
WiredTigerTestCase._resultfile.write(msg + '\n')
def prhead(self, s, *beginning):
"""
print a header line for testing, something important
"""
msg = ''
if len(beginning) > 0:
msg += '\n'
msg += ' ' + self.shortid() + ': ' + s
self.prout(msg)
WiredTigerTestCase._resultfile.write(msg + '\n')
def prexception(self, excinfo):
WiredTigerTestCase._resultfile.write('\n')
traceback.print_exception(excinfo[0], excinfo[1], excinfo[2], None, WiredTigerTestCase._resultfile)
WiredTigerTestCase._resultfile.write('\n')
# print directly to tty, useful for debugging
def tty(self, message):
WiredTigerTestCase.tty(message)
@staticmethod
def tty(message):
if WiredTigerTestCase._ttyDescriptor == None:
WiredTigerTestCase._ttyDescriptor = open('/dev/tty', 'w')
WiredTigerTestCase._ttyDescriptor.write(message + '\n')
def ttyVerbose(self, level, message):
WiredTigerTestCase.ttyVerbose(level, message)
@staticmethod
def ttyVerbose(level, message):
if level <= WiredTigerTestCase._verbose:
WiredTigerTestCase.tty(message)
def shortid(self):
return self.id().replace("__main__.","")
def className(self):
return self.__class__.__name__
def longtest(description):
"""
Used as a function decorator, for example, @wttest.longtest("description").
The decorator indicates that this test function should only be included
when running the test suite with the --long option.
"""
def runit_decorator(func):
return func
if not WiredTigerTestCase._longtest:
return unittest.skip(description + ' (enable with --long)')
else:
return runit_decorator
def islongtest():
return WiredTigerTestCase._longtest
def runsuite(suite, parallel):
suite_to_run = suite
if parallel > 1:
from concurrencytest import ConcurrentTestSuite, fork_for_tests
if not WiredTigerTestCase._globalSetup:
WiredTigerTestCase.globalSetup()
WiredTigerTestCase._concurrent = True
suite_to_run = ConcurrentTestSuite(suite, fork_for_tests(parallel))
try:
return unittest.TextTestRunner(
verbosity=WiredTigerTestCase._verbose).run(suite_to_run)
except BaseException as e:
# This should not happen for regular test errors, unittest should catch everything
print('ERROR: running test: ', e)
raise e
def run(name='__main__'):
result = runsuite(unittest.TestLoader().loadTestsFromName(name), False)
sys.exit(0 if result.wasSuccessful() else 1)
|
py | 1a2fde60eade4ec61d78ac03634d61eca65b86a7 | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import base64
import collections
import datetime
import hashlib
import imghdr
import json
import os
import random
import re
import string
import sys
import time
import unicodedata
import zlib
from constants import constants
import feconf
import python_utils
_YAML_PATH = os.path.join(os.getcwd(), '..', 'oppia_tools', 'pyyaml-5.1.2')
sys.path.insert(0, _YAML_PATH)
import yaml # isort:skip #pylint: disable=wrong-import-position
DATETIME_FORMAT = '%m/%d/%Y, %H:%M:%S:%f'
PNG_DATA_URL_PREFIX = 'data:image/png;base64,'
SECONDS_IN_HOUR = 60 * 60
SECONDS_IN_MINUTE = 60
class InvalidInputException(Exception):
"""Error class for invalid input."""
pass
class ValidationError(Exception):
"""Error class for when a domain object fails validation."""
pass
class ExplorationConversionError(Exception):
"""Error class for when an exploration fails to convert from a certain
version to a certain version.
"""
pass
def create_enum(*sequential, **names):
"""Creates a enumerated constant.
Args:
*sequential: *. Sequence List to generate the enumerations.
**names: *. Names of the enumerration.
Returns:
dict. Dictionary containing the enumerated constants.
"""
enums = dict(python_utils.ZIP(sequential, sequential), **names)
return type(b'Enum', (), enums)
def get_file_contents(filepath, raw_bytes=False, mode='r'):
"""Gets the contents of a file, given a relative filepath from oppia/.
Args:
filepath: str. A full path to the file.
raw_bytes: bool. Flag for the raw_bytes output.
mode: str. File opening mode, default is in read mode.
Returns:
*. Either the raw_bytes stream if the flag is set or the
decoded stream in utf-8 format.
"""
if raw_bytes:
mode = 'rb'
encoding = None
else:
encoding = 'utf-8'
with python_utils.open_file(filepath, mode, encoding=encoding) as f:
return f.read()
def get_exploration_components_from_dir(dir_path):
"""Gets the (yaml, assets) from the contents of an exploration data dir.
Args:
dir_path: str. A full path to the exploration root directory.
Returns:
*. A 2-tuple, the first element of which is a yaml string, and the
second element of which is a list of (filepath, content) 2-tuples.
The filepath does not include the assets/ prefix.
Raises:
Exception. If the following condition doesn't hold: "There
is exactly one file not in assets/, and this file has a
.yaml suffix".
"""
yaml_content = None
assets_list = []
dir_path_array = dir_path.split('/')
while dir_path_array[-1] == '':
dir_path_array = dir_path_array[:-1]
dir_path_length = len(dir_path_array)
for root, directories, files in os.walk(dir_path):
for directory in directories:
if root == dir_path and directory != 'assets':
raise Exception(
'The only directory in %s should be assets/' % dir_path)
for filename in files:
filepath = os.path.join(root, filename)
if root == dir_path:
# These files are added automatically by Mac OS Xsystems.
# We ignore them.
if not filepath.endswith('.DS_Store'):
if yaml_content is not None:
raise Exception(
'More than one non-asset file specified '
'for %s' % dir_path)
elif not filepath.endswith('.yaml'):
raise Exception(
'Found invalid non-asset file %s. There '
'should only be a single non-asset file, '
'and it should have a .yaml suffix.' % filepath)
else:
yaml_content = get_file_contents(filepath)
else:
filepath_array = filepath.split('/')
# The additional offset is to remove the 'assets/' prefix.
filename = '/'.join(filepath_array[dir_path_length + 1:])
assets_list.append((filename, get_file_contents(
filepath, raw_bytes=True)))
if yaml_content is None:
raise Exception('No yaml file specifed for %s' % dir_path)
return yaml_content, assets_list
def get_comma_sep_string_from_list(items):
"""Turns a list of items into a comma-separated string.
Args:
items: list(str). List of the items.
Returns:
str. String containing the items in the list separated by commas.
"""
if not items:
return ''
if len(items) == 1:
return items[0]
return '%s and %s' % (', '.join(items[:-1]), items[-1])
def to_ascii(input_string):
"""Change unicode characters in a string to ascii if possible.
Args:
input_string: str. String to convert.
Returns:
str. String containing the ascii representation of the input string.
"""
return unicodedata.normalize(
'NFKD', python_utils.UNICODE(input_string)).encode('ascii', 'ignore')
def dict_from_yaml(yaml_str):
"""Gets the dict representation of a YAML string.
Args:
yaml_str: str. Yaml string for conversion into dict.
Returns:
dict. Parsed dict representation of the yaml string.
Raises:
InavlidInputException. If the yaml string sent as the
parameter is unable to get parsed, them this error gets
raised.
"""
try:
retrieved_dict = yaml.safe_load(yaml_str)
assert isinstance(retrieved_dict, dict)
return retrieved_dict
except (AssertionError, yaml.YAMLError) as e:
raise InvalidInputException(e)
def recursively_remove_key(obj, key_to_remove):
"""Recursively removes keys from a list or dict.
Args:
obj: *. List or dict passed for which the keys has to
be removed.
key_to_remove: str. Key value that has to be removed.
Returns:
*. Dict or list with a particular key value removed.
"""
if isinstance(obj, list):
for item in obj:
recursively_remove_key(item, key_to_remove)
elif isinstance(obj, dict):
if key_to_remove in obj:
del obj[key_to_remove]
for key, unused_value in obj.items():
recursively_remove_key(obj[key], key_to_remove)
def get_random_int(upper_bound):
"""Returns a random integer in [0, upper_bound).
Args:
upper_bound: int. Upper limit for generation of random
integer.
Returns:
int. Randomly generated integer less than the upper_bound.
"""
assert upper_bound >= 0 and isinstance(upper_bound, int)
generator = random.SystemRandom()
return generator.randrange(0, stop=upper_bound)
def get_random_choice(alist):
"""Gets a random element from a list.
Args:
alist: list(*). Input to get a random choice.
Returns:
*. Random element choosen from the passed input list.
"""
assert isinstance(alist, list) and len(alist) > 0
index = get_random_int(len(alist))
return alist[index]
def convert_png_data_url_to_binary(image_data_url):
"""Converts a PNG base64 data URL to a PNG binary data.
Args:
image_data_url: str. A string that is to be interpreted as a PNG
data URL.
Returns:
str. Binary content of the PNG created from the data URL.
Raises:
Exception. The given string does not represent a PNG data URL.
"""
if image_data_url.startswith(PNG_DATA_URL_PREFIX):
return base64.b64decode(
python_utils.urllib_unquote(
image_data_url[len(PNG_DATA_URL_PREFIX):]))
else:
raise Exception('The given string does not represent a PNG data URL.')
def convert_png_binary_to_data_url(content):
"""Converts a PNG image string (represented by 'content') to a data URL.
Args:
content: str. PNG binary file content.
Returns:
str. Data URL created from the binary content of the PNG.
Raises:
Exception. The given binary string does not represent a PNG image.
"""
if imghdr.what(None, h=content) == 'png':
return '%s%s' % (
PNG_DATA_URL_PREFIX,
python_utils.url_quote(base64.b64encode(content))
)
else:
raise Exception('The given string does not represent a PNG image.')
def convert_png_to_data_url(filepath):
"""Converts the png file at filepath to a data URL.
Args:
filepath: str. A full path to the file.
Returns:
str. Data url created from the filepath of the PNG.
"""
file_contents = get_file_contents(filepath, raw_bytes=True, mode='rb')
return convert_png_binary_to_data_url(file_contents)
def camelcase_to_hyphenated(camelcase_str):
"""Camelcase to hyhpenated conversion of the passed string.
Args:
camelcase_str: str. Camelcase string representation.
Returns:
str. Hypenated string representation of the camelcase string.
"""
intermediate_str = re.sub('(.)([A-Z][a-z]+)', r'\1-\2', camelcase_str)
return re.sub('([a-z0-9])([A-Z])', r'\1-\2', intermediate_str).lower()
def camelcase_to_snakecase(camelcase_str):
"""Camelcase to snake case conversion of the passed string.
Args:
camelcase_str: str. Camelcase string representation.
Returns:
str. Snakecase representation of the passed camelcase string.
"""
intermediate_str = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camelcase_str)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', intermediate_str).lower()
def set_url_query_parameter(url, param_name, param_value):
"""Set or replace a query parameter, and return the modified URL.
Args:
url: str. URL string which contains the query parameter.
param_name: str. Parameter name to be removed.
param_value: str. Set the parameter value, if it exists.
Returns:
str. Formated URL that has query parameter set or replaced.
Raises:
Exception. If the query parameter sent is not of string type,
them this exception is raised.
"""
if not isinstance(param_name, python_utils.BASESTRING):
raise Exception(
'URL query parameter name must be a string, received %s'
% param_name)
scheme, netloc, path, query_string, fragment = python_utils.url_split(url)
query_params = python_utils.parse_query_string(query_string)
query_params[param_name] = [param_value]
new_query_string = python_utils.url_encode(query_params, doseq=True)
return python_utils.url_unsplit(
(scheme, netloc, path, new_query_string, fragment))
class JSONEncoderForHTML(json.JSONEncoder):
"""Encodes JSON that is safe to embed in HTML."""
def encode(self, o):
chunks = self.iterencode(o, True)
return ''.join(chunks) if self.ensure_ascii else u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(
JSONEncoderForHTML, self).iterencode(o, _one_shot=_one_shot)
for chunk in chunks:
yield chunk.replace('&', '\\u0026').replace(
'<', '\\u003c').replace('>', '\\u003e')
def convert_to_hash(input_string, max_length):
"""Convert a string to a SHA1 hash.
Args:
input_string: str. Input string for conversion to hash.
max_length: int. Maximum Length of the generated hash.
Returns:
str. Hash Value generated from the input_String of the
specified length.
Raises:
Exception. If the input string is not the instance of the str,
them this exception is raised.
"""
if not isinstance(input_string, python_utils.BASESTRING):
raise Exception(
'Expected string, received %s of type %s' %
(input_string, type(input_string)))
# Encodes strings using the character set [A-Za-z0-9].
# Prefixing altchars with b' to ensure that all characters in encoded_string
# remain encoded (otherwise encoded_string would be of type unicode).
encoded_string = base64.b64encode(
hashlib.sha1(python_utils.convert_to_bytes(input_string)).digest(),
altchars=b'ab'
).replace('=', 'c')
return encoded_string[:max_length]
def base64_from_int(value):
"""Converts the number into base64 representation.
Args:
value: int. Integer value for conversion into base64.
Returns:
*. Returns the base64 representation of the number passed.
"""
return base64.b64encode(bytes([value]))
def get_time_in_millisecs(datetime_obj):
"""Returns time in milliseconds since the Epoch.
Args:
datetime_obj: datetime. An object of type datetime.datetime.
Returns:
float. The time in milliseconds since the Epoch.
"""
msecs = time.mktime(datetime_obj.timetuple()) * 1000.0
return msecs + python_utils.divide(datetime_obj.microsecond, 1000.0)
def convert_naive_datetime_to_string(datetime_obj):
"""Returns a human-readable string representing the naive datetime object.
Args:
datetime_obj: datetime. An object of type datetime.datetime. Must be a
naive datetime object.
Returns:
str. The string representing the naive datetime object.
"""
return datetime_obj.strftime(DATETIME_FORMAT)
def convert_string_to_naive_datetime_object(date_time_string):
"""Returns the naive datetime object equivalent of the date string.
Args:
date_time_string: str. The string format representing the datetime
object in the format: Month/Day/Year,
Hour:Minute:Second:MicroSecond.
Returns:
datetime. An object of type naive datetime.datetime corresponding to
that string.
"""
return datetime.datetime.strptime(date_time_string, DATETIME_FORMAT)
def get_current_time_in_millisecs():
"""Returns time in milliseconds since the Epoch.
Returns:
float. The time in milliseconds since the Epoch.
"""
return get_time_in_millisecs(datetime.datetime.utcnow())
def get_human_readable_time_string(time_msec):
"""Given a time in milliseconds since the epoch, get a human-readable
time string for the admin dashboard.
Args:
time_msec: float. Time in milliseconds since the Epoch.
Returns:
str. A string representing the time.
"""
return time.strftime(
'%B %d %H:%M:%S', time.gmtime(python_utils.divide(time_msec, 1000.0)))
def create_string_from_largest_unit_in_timedelta(timedelta_obj):
"""Given the timedelta object, find the largest nonzero time unit and
return that value, along with the time unit, as a human readable string.
The returned string is not localized.
Args:
timedelta_obj: datetime.timedelta. A datetime timedelta object. Datetime
timedelta objects are created when you subtract two datetime
objects.
Returns:
str. A human readable string representing the value of the largest
nonzero time unit, along with the time units. If the largest time unit
is seconds, 1 minute is returned. The value is represented as an integer
in the string.
Raises:
Exception. If the provided timedelta is not positive.
"""
total_seconds = timedelta_obj.total_seconds()
if total_seconds <= 0:
raise Exception(
'Expected a positive timedelta, received: %s.' % total_seconds)
elif timedelta_obj.days != 0:
return '%s day%s' % (
int(timedelta_obj.days), 's' if timedelta_obj.days > 1 else '')
else:
number_of_hours, remainder = divmod(total_seconds, SECONDS_IN_HOUR)
number_of_minutes, _ = divmod(remainder, SECONDS_IN_MINUTE)
if number_of_hours != 0:
return '%s hour%s' % (
int(number_of_hours), 's' if number_of_hours > 1 else '')
elif number_of_minutes > 1:
return '%s minutes' % int(number_of_minutes)
# Round any seconds up to one minute.
else:
return '1 minute'
def are_datetimes_close(later_datetime, earlier_datetime):
"""Given two datetimes, determines whether they are separated by less than
feconf.PROXIMAL_TIMEDELTA_SECS seconds.
Args:
later_datetime: datetime. The later datetime.
earlier_datetime: datetime. The earlier datetime.
Returns:
bool. True if difference between two datetimes is less than
feconf.PROXIMAL_TIMEDELTA_SECS seconds otherwise false.
"""
difference_in_secs = (later_datetime - earlier_datetime).total_seconds()
return difference_in_secs < feconf.PROXIMAL_TIMEDELTA_SECS
def generate_random_string(length):
"""Generates a random string of the specified length.
Args:
length: int. Length of the string to be generated.
Returns:
str. Random string of specified length.
"""
return base64.urlsafe_b64encode(os.urandom(length))[:length]
def generate_new_session_id():
"""Generates a new session id.
Returns:
str. Random string of length 24.
"""
return generate_random_string(24)
def vfs_construct_path(base_path, *path_components):
"""Mimics behavior of os.path.join on Posix machines.
Args:
base_path: str. The initial path upon which components would be added.
*path_components: list(str). Components that would be added to the path.
Returns:
str. The path that is obtained after adding the components.
"""
return os.path.join(base_path, *path_components)
def vfs_normpath(path):
"""Normalize path from posixpath.py, eliminating double slashes, etc.
Args:
path: str. Path that is to be normalized.
Returns:
str. Path if it is not null else a dot string.
"""
return os.path.normpath(path)
def require_valid_name(name, name_type, allow_empty=False):
"""Generic name validation.
Args:
name: str. The name to validate.
name_type: str. A human-readable string, like 'the exploration title' or
'a state name'. This will be shown in error messages.
allow_empty: bool. If True, empty strings are allowed.
Raises:
Exception. Name isn't a string.
Exception. The length of the name_type isn't between
1 and 50.
Exception. Name starts or ends with whitespace.
Exception. Adjacent whitespace in name_type isn't collapsed.
Exception. Invalid character is present in name.
"""
if not isinstance(name, python_utils.BASESTRING):
raise ValidationError('%s must be a string.' % name)
if allow_empty and name == '':
return
# This check is needed because state names are used in URLs and as ids
# for statistics, so the name length should be bounded above.
if len(name) > 50 or len(name) < 1:
raise ValidationError(
'The length of %s should be between 1 and 50 '
'characters; received %s' % (name_type, name))
if name[0] in string.whitespace or name[-1] in string.whitespace:
raise ValidationError(
'Names should not start or end with whitespace.')
if re.search(r'\s\s+', name):
raise ValidationError(
'Adjacent whitespace in %s should be collapsed.' % name_type)
for character in constants.INVALID_NAME_CHARS:
if character in name:
raise ValidationError(
'Invalid character %s in %s: %s' %
(character, name_type, name))
def require_valid_url_fragment(name, name_type, allowed_length):
"""Generic URL fragment validation.
Args:
name: str. The name to validate.
name_type: str. A human-readable string, like 'topic url fragment'.
This will be shown in error messages.
allowed_length: int. Allowed length for the name.
Raises:
Exception. Name is not a string.
Exception. Name is empty.
Exception. The length of the name_type is not correct.
Exception. Invalid character is present in the name.
"""
if not isinstance(name, python_utils.BASESTRING):
raise ValidationError(
'%s field must be a string. Received %s.' % (name_type, name))
if name == '':
raise ValidationError(
'%s field should not be empty.' % name_type)
if len(name) > allowed_length:
raise ValidationError(
'%s field should not exceed %d characters, '
'received %s.' % (name_type, allowed_length, name))
if not re.match(constants.VALID_URL_FRAGMENT_REGEX, name):
raise ValidationError(
'%s field contains invalid characters. Only lowercase words'
' separated by hyphens are allowed. Received %s.' % (
name_type, name))
def require_valid_thumbnail_filename(thumbnail_filename):
"""Generic thumbnail filename validation.
Args:
thumbnail_filename: str. The thumbnail filename to validate.
"""
if thumbnail_filename is not None:
if not isinstance(thumbnail_filename, python_utils.BASESTRING):
raise ValidationError(
'Expected thumbnail filename to be a string, received %s'
% thumbnail_filename)
if thumbnail_filename.rfind('.') == 0:
raise ValidationError(
'Thumbnail filename should not start with a dot.')
if '/' in thumbnail_filename or '..' in thumbnail_filename:
raise ValidationError(
'Thumbnail filename should not include slashes or '
'consecutive dot characters.')
if '.' not in thumbnail_filename:
raise ValidationError(
'Thumbnail filename should include an extension.')
dot_index = thumbnail_filename.rfind('.')
extension = thumbnail_filename[dot_index + 1:].lower()
if extension != 'svg':
raise ValidationError(
'Expected a filename ending in svg, received %s' %
thumbnail_filename)
def require_valid_meta_tag_content(meta_tag_content):
"""Generic meta tag content validation.
Args:
meta_tag_content: str. The meta tag content to validate.
"""
if not isinstance(meta_tag_content, python_utils.BASESTRING):
raise ValidationError(
'Expected meta tag content to be a string, received %s'
% meta_tag_content)
if len(meta_tag_content) > constants.MAX_CHARS_IN_META_TAG_CONTENT:
raise ValidationError(
'Meta tag content should not be longer than %s characters.'
% constants.MAX_CHARS_IN_META_TAG_CONTENT)
def require_valid_page_title_fragment_for_web(page_title_fragment_for_web):
"""Generic page title fragment validation.
Args:
page_title_fragment_for_web: str. The page title fragment to validate.
Raises:
Exception. Page title fragment is not a string.
Exception. Page title fragment is too lengthy.
"""
max_chars_in_page_title_frag_for_web = (
constants.MAX_CHARS_IN_PAGE_TITLE_FRAGMENT_FOR_WEB)
if not isinstance(page_title_fragment_for_web, python_utils.BASESTRING):
raise ValidationError(
'Expected page title fragment to be a string, received %s'
% page_title_fragment_for_web)
if len(page_title_fragment_for_web) > max_chars_in_page_title_frag_for_web:
raise ValidationError(
'Page title fragment should not be longer than %s characters.'
% constants.MAX_CHARS_IN_PAGE_TITLE_FRAGMENT_FOR_WEB)
def capitalize_string(input_string):
"""Converts the first character of a string to its uppercase equivalent (if
it's a letter), and returns the result.
Args:
input_string: str. String to process (to capitalize).
Returns:
str. Capitalizes the string.
"""
# This guards against empty strings.
if input_string:
return input_string[0].upper() + input_string[1:]
else:
return input_string
def get_hex_color_for_category(category):
"""Returns the category, it returns the color associated with the category,
if the category is present in the app constants else given a default color.
Args:
category: str. Category to get color.
Returns:
str. Color assigned to that category.
"""
return (
constants.CATEGORIES_TO_COLORS[category]
if category in constants.CATEGORIES_TO_COLORS
else constants.DEFAULT_COLOR)
def get_thumbnail_icon_url_for_category(category):
"""Returns the category, it returns the associated thumbnail icon, if the
category is present in the app constants else given a default thumbnail.
Args:
category: str. Category to get Thumbnail icon.
Returns:
str. Path to the Thumbnail Icon assigned to that category.
"""
icon_name = (
category if category in constants.CATEGORIES_TO_COLORS
else constants.DEFAULT_THUMBNAIL_ICON)
# Remove all spaces from the string.
return '/subjects/%s.svg' % (icon_name.replace(' ', ''))
def is_supported_audio_language_code(language_code):
"""Checks if the given language code is a supported audio language code.
Args:
language_code: str. The language code.
Returns:
bool. Whether the language code is supported audio language code or not.
"""
language_codes = [lc['id'] for lc in constants.SUPPORTED_AUDIO_LANGUAGES]
return language_code in language_codes
def is_valid_language_code(language_code):
"""Checks if the given language code is a valid language code.
Args:
language_code: str. The language code.
Returns:
bool. Whether the language code is valid or not.
"""
language_codes = [
lc['code'] for lc in constants.SUPPORTED_CONTENT_LANGUAGES]
return language_code in language_codes
def get_supported_audio_language_description(language_code):
"""Returns the language description for the given language code.
Args:
language_code: str. The language code for which the description is
required.
Returns:
str. The language description for the given language code.
Raises:
Exception. If the given language code is unsupported.
"""
for language in constants.SUPPORTED_AUDIO_LANGUAGES:
if language['id'] == language_code:
return language['description']
raise Exception('Unsupported audio language code: %s' % language_code)
def is_user_id_valid(
user_id, allow_system_user_id=False, allow_pseudonymous_id=False):
"""Verify that the user ID is in a correct format or that it belongs to
a system user.
Args:
user_id: str. The user ID to be checked.
allow_system_user_id: bool. Whether to allow system user ID.
allow_pseudonymous_id: bool. Whether to allow pseudonymized ID.
Returns:
bool. True when the ID is in a correct format or if the ID belongs to
a system user, False otherwise.
"""
if allow_system_user_id and user_id in feconf.SYSTEM_USERS.keys():
return True
if allow_pseudonymous_id and is_pseudonymous_id(user_id):
return True
return bool(re.match(feconf.USER_ID_REGEX, user_id))
def is_pseudonymous_id(user_id):
"""Check that the ID is a pseudonymous one.
Args:
user_id: str. The ID to be checked.
Returns:
bool. Whether the ID represents a pseudonymous user.
"""
return bool(re.match(feconf.PSEUDONYMOUS_ID_REGEX, user_id))
def unescape_encoded_uri_component(escaped_string):
"""Unescape a string that is encoded with encodeURIComponent.
Args:
escaped_string: str. String that is encoded with encodeURIComponent.
Returns:
str. Decoded string that was initially encoded with encodeURIComponent.
"""
return python_utils.urllib_unquote(escaped_string).decode('utf-8')
def snake_case_to_camel_case(snake_str):
"""Converts a string in snake_case to camelCase.
Args:
snake_str: str. String that is in snake_case.
Returns:
str. Converted string that is in camelCase.
"""
components = snake_str.split('_')
# We capitalize the first letter of each component except the first one
# with the 'title' method and join them together.
return components[0] + ''.join(x.title() for x in components[1:])
def get_asset_dir_prefix():
"""Returns prefix for asset directory depending whether dev or prod.
It is used as a prefix in urls for images, css and script files.
Returns:
str. Prefix '/build' if constants.DEV_MODE is false, otherwise
null string.
"""
asset_dir_prefix = ''
if not constants.DEV_MODE:
asset_dir_prefix = '/build'
return asset_dir_prefix
def get_hashable_value(value):
"""This function returns a hashable version of the input JSON-like value.
It converts the built-in sequences into their hashable counterparts
{list: tuple, dict: (sorted tuple of pairs)}. Additionally, their
elements are converted to hashable values through recursive calls. All
other value types are assumed to already be hashable.
Args:
value: *. Some JSON-like object, that is, an object made-up of only:
lists, dicts, strings, ints, bools, None. Types can be nested in
each other.
Returns:
*. A new object that will always have the same hash for "equivalent"
values.
"""
if isinstance(value, list):
return tuple(get_hashable_value(e) for e in value)
elif isinstance(value, dict):
return tuple(sorted(
# Dict keys are already hashable, only values need converting.
(k, get_hashable_value(v)) for k, v in value.items()))
else:
return value
def compress_to_zlib(data):
"""Compress the data to zlib format for efficient storage and communication.
Args:
data: str. Data to be compressed.
Returns:
str. Compressed data string.
"""
return zlib.compress(data)
def decompress_from_zlib(data):
"""Decompress the zlib compressed data.
Args:
data: str. Data to be decompressed.
Returns:
str. Decompressed data string.
"""
return zlib.decompress(data)
def compute_list_difference(list_a, list_b):
"""Returns the set difference of two lists.
Args:
list_a: list. The first list.
list_b: list. The second list.
Returns:
list. List of the set difference of list_a - list_b.
"""
return list(set(list_a) - set(list_b))
class OrderedCounter(collections.Counter, collections.OrderedDict):
"""Counter that remembers the order elements are first encountered."""
pass
|
py | 1a2fdf100886bc09e642bf3cf8b1059a3f23e0c7 | from .ai import *
from .condition import *
from .debug import *
from .diplomacy import *
from .effect import *
from .player import *
from .resource import *
from .tile import *
from .trigger import *
from .unit import *
from .units import * |
py | 1a2fe071196781a69531862b2a578f9971f3bd29 | """Provides the 'OffshoreSubstationDesign` class."""
__author__ = "Jake Nunemaker"
__copyright__ = "Copyright 2020, National Renewable Energy Laboratory"
__maintainer__ = "Jake Nunemaker"
__email__ = "[email protected]"
import numpy as np
from ORBIT.phases.design import DesignPhase
class OffshoreSubstationDesign(DesignPhase):
"""Offshore Substation Design Class."""
expected_config = {
"site": {"depth": "m"},
"plant": {"num_turbines": "int"},
"turbine": {"turbine_rating": "MW"},
"substation_design": {
"mpt_cost_rate": "USD/MW (optional)",
"topside_fab_cost_rate": "USD/t (optional)",
"topside_design_cost": "USD (optional)",
"shunt_cost_rate": "USD/MW (optional)",
"switchgear_cost": "USD (optional)",
"backup_gen_cost": "USD (optional)",
"workspace_cost": "USD (optional)",
"other_ancillary_cost": "USD (optional)",
"topside_assembly_factor": "float (optional)",
"oss_substructure_cost_rate": "USD/t (optional)",
"oss_pile_cost_rate": "USD/t (optional)",
"num_substations": "int (optional)",
},
}
output_config = {
"num_substations": "int",
"offshore_substation_topside": "dict",
"offshore_substation_substructure": "dict",
}
def __init__(self, config, **kwargs):
"""
Creates an instance of OffshoreSubstationDesign.
Parameters
----------
config : dict
"""
config = self.initialize_library(config, **kwargs)
self.config = self.validate_config(config)
self._outputs = {}
def run(self):
"""Main run function."""
self.calc_substructure_length()
self.calc_substructure_deck_space()
self.calc_topside_deck_space()
self.calc_num_mpt_and_rating()
self.calc_mpt_cost()
self.calc_topside_mass_and_cost()
self.calc_shunt_reactor_cost()
self.calc_switchgear_cost()
self.calc_ancillary_system_cost()
self.calc_assembly_cost()
self.calc_substructure_mass_and_cost()
self._outputs["offshore_substation_substructure"] = {
"type": "Monopile", # Substation install only supports monopiles
"deck_space": self.substructure_deck_space,
"mass": self.substructure_mass,
"length": self.substructure_length,
"unit_cost": self.substructure_cost,
}
self._outputs["offshore_substation_topside"] = {
"deck_space": self.topside_deck_space,
"mass": self.topside_mass,
"unit_cost": self.substation_cost,
}
self._outputs["num_substations"] = self.num_substations
@property
def substation_cost(self):
"""Returns total procuremet cost of the topside."""
return (
self.mpt_cost
+ self.topside_cost
+ self.shunt_reactor_cost
+ self.switchgear_costs
+ self.ancillary_system_costs
+ self.land_assembly_cost
)
@property
def total_cost(self):
"""Returns total procurement cost of the substation(s)."""
if not self._outputs:
raise Exception("Has OffshoreSubstationDesign been ran yet?")
return (
self.substructure_cost + self.substation_cost
) * self.num_substations
def calc_substructure_length(self):
"""
Calculates substructure length as the site depth + 10m
"""
self.substructure_length = self.config["site"]["depth"] + 10
def calc_substructure_deck_space(self):
"""
Calculates required deck space for the substation substructure.
Coming soon!
"""
self.substructure_deck_space = 1
def calc_topside_deck_space(self):
"""
Calculates required deck space for the substation topside.
Coming soon!
"""
self.topside_deck_space = 1
def calc_num_mpt_and_rating(self):
"""
Calculates the number of main power transformers (MPTs) and their rating.
Parameters
----------
num_turbines : int
turbine_rating : float
"""
_design = self.config.get("substation_design", {})
num_turbines = self.config["plant"]["num_turbines"]
turbine_rating = self.config["turbine"]["turbine_rating"]
capacity = num_turbines * turbine_rating
self.num_substations = _design.get(
"num_substations", int(np.ceil(capacity / 500))
)
self.num_mpt = np.ceil(
num_turbines * turbine_rating / (250 * self.num_substations)
)
self.mpt_rating = (
round(
(
(num_turbines * turbine_rating * 1.15)
/ (self.num_mpt * self.num_substations)
)
/ 10.0
)
* 10.0
)
def calc_mpt_cost(self):
"""
Calculates the total cost for all MPTs.
Parameters
----------
mpt_cost_rate : float
"""
_design = self.config.get("substation_design", {})
mpt_cost_rate = _design.get("mpt_cost_rate", 12500)
self.mpt_cost = self.mpt_rating * self.num_mpt * mpt_cost_rate
def calc_topside_mass_and_cost(self):
"""
Calculates the mass and cost of the substation topsides.
Parameters
----------
topside_fab_cost_rate : int | float
topside_design_cost: int | float
"""
_design = self.config.get("substation_design", {})
topside_fab_cost_rate = _design.get("topside_fab_cost_rate", 14500)
topside_design_cost = _design.get("topside_design_cost", 4.5e6)
self.topside_mass = 3.85 * self.mpt_rating * self.num_mpt + 285
self.topside_cost = (
self.topside_mass * topside_fab_cost_rate + topside_design_cost
)
def calc_shunt_reactor_cost(self):
"""
Calculates the cost of the shunt reactor.
Parameters
----------
shunt_cost_rate : int | float
"""
_design = self.config.get("substation_design", {})
shunt_cost_rate = _design.get("shunt_cost_rate", 35000)
self.shunt_reactor_cost = (
self.mpt_rating * self.num_mpt * shunt_cost_rate * 0.5
)
def calc_switchgear_cost(self):
"""
Calculates the cost of the switchgear.
Parameters
----------
switchgear_cost : int | float
"""
_design = self.config.get("substation_design", {})
switchgear_cost = _design.get("switchgear_cost", 14.5e5)
self.switchgear_costs = self.num_mpt * switchgear_cost
def calc_ancillary_system_cost(self):
"""
Calculates cost of ancillary systems.
Parameters
----------
backup_gen_cost : int | float
workspace_cost : int | float
other_ancillary_cost : int | float
"""
_design = self.config.get("substation_design", {})
backup_gen_cost = _design.get("backup_gen_cost", 1e6)
workspace_cost = _design.get("workspace_cost", 2e6)
other_ancillary_cost = _design.get("other_ancillary_cost", 3e6)
self.ancillary_system_costs = (
backup_gen_cost + workspace_cost + other_ancillary_cost
)
def calc_assembly_cost(self):
"""
Calculates the cost of assembly on land.
Parameters
----------
topside_assembly_factor : int | float
"""
_design = self.config.get("substation_design", {})
topside_assembly_factor = _design.get("topside_assembly_factor", 0.075)
self.land_assembly_cost = (
self.switchgear_costs
+ self.shunt_reactor_cost
+ self.ancillary_system_costs
) * topside_assembly_factor
def calc_substructure_mass_and_cost(self):
"""
Calculates the mass and associated cost of the substation substructure.
Parameters
----------
oss_substructure_cost_rate : int | float
oss_pile_cost_rate : int | float
"""
_design = self.config.get("substation_design", {})
oss_substructure_cost_rate = _design.get(
"oss_substructure_cost_rate", 3000
)
oss_pile_cost_rate = _design.get("oss_pile_cost_rate", 0)
substructure_mass = 0.4 * self.topside_mass
substructure_pile_mass = 8 * substructure_mass ** 0.5574
self.substructure_cost = (
substructure_mass * oss_substructure_cost_rate
+ substructure_pile_mass * oss_pile_cost_rate
)
self.substructure_mass = substructure_mass + substructure_pile_mass
@property
def design_result(self):
"""
Returns the results of self.run().
"""
if not self._outputs:
raise Exception("Has OffshoreSubstationDesign been ran yet?")
return self._outputs
@property
def detailed_output(self):
"""Returns detailed phase information."""
_outputs = {
"num_substations": self.num_substations,
"substation_mpt_rating": self.mpt_rating,
"substation_topside_mass": self.topside_mass,
"substation_topside_cost": self.topside_cost,
"substation_substructure_mass": self.substructure_mass,
"substation_substructure_cost": self.substructure_cost,
}
return _outputs
|
py | 1a2fe0cee1c38ac5f162959121d9c4410c81ef8d | # -------------------------------------------------------------------------------------------------
# Embroidery
# -------------------------------------------------------------------------------------------------
#
# The program is to design simple ornament matrices for Christmas fair. It creates a matrix
# where 0 means an empty pixel, and positive integers mean different colors.
#
# -------------------------------------------------------------------------------------------------
NULL_CELL = "" # null cell is a cell to fill with some color in the next step of create triangle function
ZERO_CELL = 0
# ---------------------------------------- main functions -----------------------------------------
def draw_rectangle(width, height, border_color=1, fill_color=1, border_width=1):
'''
Creats the rectangle matrix like this:
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 2 2 2 2 2 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 2 2 2 2 2 1 1 1 2 2 2 1 1
1 1 1 1 1 1 1 1 2 2 2 2 2 1 1 1 2 2 2 1 1
1 1 1 1 1 1 1 1 2 2 2 2 2 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
'''
matrix = []
for row_no in range(height):
row = []
for col_no in range(width):
fill_pattern = border_color if any([
row_no + 1 <= border_width, # first row
height - row_no <= border_width, # last row
col_no + 1 <= border_width, # first column
height - col_no <= border_width # last column
]) else fill_color
row.append(fill_pattern)
matrix.append(row)
return matrix
def draw_triangle(height, border_color=1, fill_color=1):
'''
Creats the rectangle matrix like this:
0 0 0 1 0 0 0 0 0 0 1 0 0 0
0 0 1 1 1 0 0 0 0 1 2 1 0 0
0 1 1 1 1 1 0 0 1 2 2 2 1 0
1 1 1 1 1 1 1 1 1 1 1 1 1 1
'''
width = 2 * height - 1
return create_triangle(height, width, fill_color, border_color)
def draw_christmas_tree(blocks, border_color=1, fill_color=1):
'''
Creats the rectangle matrix like this:
0 0 0 0 0 1 0 0 0 0 0
0 0 0 0 1 2 1 0 0 0 0
0 0 0 1 2 2 2 1 0 0 0
0 0 0 0 1 2 1 0 0 0 0
0 0 0 1 2 2 2 1 0 0 0
0 0 1 2 2 2 2 2 1 0 0
0 0 0 1 2 2 2 1 0 0 0
0 0 1 2 2 2 2 2 1 0 0
0 1 2 2 2 2 2 2 2 1 0
0 0 1 2 2 2 2 2 1 0 0
0 1 2 2 2 2 2 2 2 1 0
1 1 1 1 1 1 1 1 1 1 1
'''
height = 3
width = 5 + (blocks - 1) * 2 # 5 - the base of the tree in the first block; 2 - distance between the bases of the trees of the next blocks
base_width = 5
matrix = []
for block in range(blocks - 1): # -1 -> without the last row
matrix.extend(create_triangle(height, width, fill_color, border_color, False, base_width + block * 2))
matrix.extend(create_triangle(height, width, fill_color, border_color, True, base_width + (block + 1) * 2)) # the last row with border
return matrix
def draw_circle(radius, border_color=1, fill_color=1, half=True):
'''
Creats the circle matrix like this:
0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 0 0 1 1 1 2 2 2 2 2 2 2 1 1 1 0 0 0 0 0 0 0 0 0
0 0 0 0 0 0 0 1 1 2 2 2 2 2 2 2 2 2 2 2 2 2 1 1 0 0 0 0 0 0 0
0 0 0 0 0 0 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 0 0 0 0 0 0
0 0 0 0 0 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 0 0 0 0 0
0 0 0 0 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 0 0 0 0
0 0 0 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 0 0 0
0 0 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 0 0
0 0 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 0 0
0 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 0
0 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 0
0 1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1 0
1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1
1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1
1 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 1
'''
import math
def get_distance_from_center(center_x, center_y, point_x, point_y):
'''
Based on Pythagoras' theorem, it calculates the length by the point from the center of the circle.
____________________
|AB| = \\/ (Xa-Xb)² + (Ya-Yb)²
'''
return round(math.sqrt(math.pow(abs(center_x - point_x), 2) + math.pow(abs(center_y - point_y), 2)), 0)
def create_matrix(size_x, size_y):
'''Sets matrix.'''
matrix = []
for row_no in range(size_x):
row = []
for col_no in range(size_y):
row.append(NULL_CELL)
matrix.append(row)
return matrix
def fill_empty(matrix, radius):
'''Fills the matrix with empty cells located outside the circle.'''
center_x, center_y = 0, 1
circle_center = (radius - 1, radius - 1)
for x in range(size_x):
for y in range(size_y):
distance = get_distance_from_center(circle_center[center_x], circle_center[center_y], x, y)
if distance >= radius:
matrix[x][y] = ZERO_CELL
return matrix
def fill_border(matrix, border_color):
'''Fills matrix with border cells.'''
for x in range(size_x):
for y in range(size_y):
if matrix[x][y] == NULL_CELL: # cell to fill
if x == 0 or x == size_x - 1 or y == 0 or y == size_y - 1: # the first and the last row and column
matrix[x][y] = border_color
elif matrix[x][y - 1] == ZERO_CELL or matrix[x][y + 1] == ZERO_CELL or matrix[x - 1][y] == ZERO_CELL or matrix[x + 1][y]: # checks whether it is adjacent to zero (before, after, above, under)
matrix[x][y] = border_color
return matrix
# ------- draw_circle main code -------
size_x = radius - 1 if half is True else radius * 2 - 1
size_y = radius * 2 - 1
matrix = create_matrix(size_x, size_y)
matrix = fill_empty(matrix, radius)
matrix = fill_border(matrix, border_color)
matrix = fill_normal(matrix, fill_color)
return matrix
def embroider(matrix, color_scheme):
'''Draws on screen (console) created the matrix with patterns.'''
for row in matrix:
for cell in row:
print(color_scheme[cell], end=' ')
print()
print()
# -------------------------------------- internal functions ---------------------------------------
def create_triangle(matrix_height, matrix_width, fill_color, border_color, border_last_row=True, base_width=None):
'''
Returns matrix filled with triangle pattern. Default values: base_width = matrix_width
0 0 0 1 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 2 2 2 2 2 1 0 0
0 0 1 2 1 0 0 0 0 1 2 1 0 0 0 0 0 0 1 2 1 0 0 0 0 0 1 2 2 2 2 2 2 2 1 0
0 1 2 2 2 1 0 0 1 2 2 2 1 0 0 0 0 1 1 1 1 1 0 0 0 1 2 2 2 2 2 2 2 2 2 1
1 1 1 1 1 1 1 1 2 2 2 2 2 1
height = 4 matrix_height = 4 matrix_height = None matrix_height = None
matrix_width = 7 matrix_width = 7 matrix_width = 11 matrix_width = 11
base_width=None base_width=None base_width=5 base_width=None
border_last_row=True border_last_row=False border_last_row=True border_last_row=False
'''
def fill_empty(matrix_height, matrix_width, base_width):
'''Fills matrix with empty cells (zeros).'''
matrix = []
empty_cell = (matrix_width - base_width) / 2 + (matrix_height - 1) # number of empty cells one side from the middle column
for row_no in range(matrix_height):
row = []
for col_no in range(matrix_width):
row.append(ZERO_CELL) if (col_no + 1 <= empty_cell or matrix_width - col_no <= empty_cell) else row.append(NULL_CELL) # empty cells from left or right side
matrix.append(row)
empty_cell -= 1
return matrix
def fill_border(matrix, border_color, border_last_row):
'''Fills matrix with border cells.'''
for row_no in range(matrix_height):
for col_no in range(matrix_width):
if matrix[row_no][col_no] == NULL_CELL: # cell to fill
if col_no == 0 or matrix[row_no][col_no - 1] == ZERO_CELL or col_no == matrix_width - 1 or matrix[row_no][col_no + 1] == ZERO_CELL:
matrix[row_no][col_no] = border_color
if border_last_row:
for col_no in range(matrix_width): # fills the last row border cell
if matrix[row_no][col_no] == NULL_CELL: # cell to fill
matrix[matrix_height - 1][col_no] = border_color
return matrix
# ------- create_triangle main code -------
if base_width is None: # base_width default value
base_width = matrix_width
elif base_width < matrix_height: # the minimum base fills the entire height
base_width = (matrix_height * 2) - 1
matrix = fill_empty(matrix_height, matrix_width, base_width)
matrix = fill_border(matrix, border_color, border_last_row)
matrix = fill_normal(matrix, fill_color)
return matrix
def fill_normal(matrix, fill_color):
'''Fills matrix with normaln filled cells.'''
matrix_height, matrix_width = len(matrix), len(matrix[0])
for row_no in range(matrix_height):
for col_no in range(matrix_width):
if matrix[row_no][col_no] == NULL_CELL: # cell to fill
matrix[row_no][col_no] = fill_color
return matrix
# ------------------------------------------- main code -------------------------------------------
if __name__ == '__main__':
color_scheme = {ZERO_CELL: '0', 1: '1', 2: '2'}
print("Rectangle:")
embroider(draw_rectangle(19, 19, 1, 2, 3), color_scheme)
print("Triangle:")
embroider(draw_triangle(10, border_color=1, fill_color=2), color_scheme)
print("Christmas tree:")
embroider(draw_christmas_tree(8, 1, 2), color_scheme)
print("Circle:")
embroider(draw_circle(15, 1, 2), color_scheme)
|
py | 1a2fe258a7a0651b5613d4a8e173a6e631ac491d | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Banner.disabled'
db.add_column(u'mx_banner', 'disabled',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Banner.disabled'
db.delete_column(u'mx_banner', 'disabled')
models = {
u'downtime.period': {
'Meta': {'object_name': 'Period'},
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'mx.banner': {
'Meta': {'object_name': 'Banner'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days': ('django.db.models.fields.PositiveIntegerField', [], {}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'period': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['downtime.Period']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '25'})
}
}
complete_apps = ['mx'] |
py | 1a2fe4209f60b2aa39a190e6e08b53c970a9149a | from __future__ import absolute_import
from mock import MagicMock, patch
from sentry.testutils.cases import RuleTestCase
from sentry.rules.actions.notify_event_service import NotifyEventServiceAction
from sentry.tasks.sentry_apps import notify_sentry_app
class NotifyEventServiceActionTest(RuleTestCase):
rule_cls = NotifyEventServiceAction
def test_applies_correctly_for_plugins(self):
event = self.get_event()
plugin = MagicMock()
plugin.is_enabled.return_value = True
plugin.should_notify.return_value = True
rule = self.get_rule(data={"service": "mail"})
with patch("sentry.plugins.plugins.get") as get_plugin:
get_plugin.return_value = plugin
results = list(rule.after(event=event, state=self.get_state()))
assert len(results) is 1
assert plugin.should_notify.call_count is 1
assert results[0].callback is plugin.rule_notify
def test_applies_correctly_for_sentry_apps(self):
event = self.get_event()
self.create_sentry_app(
organization=event.organization, name="Test Application", is_alertable=True
)
rule = self.get_rule(data={"service": "test-application"})
results = list(rule.after(event=event, state=self.get_state()))
assert len(results) is 1
assert results[0].callback is notify_sentry_app
|
py | 1a2fe48ebdb2b88fc6f3dd24f3c29d964431cc31 | import pytest
import torch as to
import torch.nn as nn
from functools import partial
from tqdm import tqdm
from pyrado.sampling.utils import gen_batches, gen_ordered_batches
from pyrado.utils.data_types import *
from pyrado.utils.functions import noisy_nonlin_fcn
from pyrado.utils.math import cosine_similarity, cov
from pyrado.environments.pysim.ball_on_beam import BallOnBeamSim
from pyrado.policies.dummy import DummyPolicy
from pyrado.sampling.rollout import rollout
from pyrado.sampling.step_sequence import StepSequence
from pyrado.utils.nn_layers import IndiNonlinLayer
from pyrado.utils.optimizers import GSS
from pyrado.utils.averaging import RunningExpDecayingAverage, RunningMemoryAverage
from pyrado.utils.standardizing import RunningStandardizer, Standardizer
from pyrado.utils.normalizing import RunningNormalizer, normalize
@pytest.mark.parametrize(
'x, data_along_rows', [
(np.random.rand(100, 4), True),
(np.random.rand(4, 100), False)
], ids=['100_4', '4_100']
)
def test_cov(x, data_along_rows):
rowvar = not data_along_rows
cov_np = np.cov(x, rowvar=rowvar)
cov_pyrado = cov(to.from_numpy(x), data_along_rows=data_along_rows).numpy()
assert cov_pyrado.shape[0] == cov_pyrado.shape[1]
if data_along_rows:
assert cov_np.shape[0] == x.shape[1]
assert cov_pyrado.shape[0] == x.shape[1]
else:
assert cov_np.shape[0] == x.shape[0]
assert cov_pyrado.shape[0] == x.shape[0]
assert np.allclose(cov_np, cov_pyrado)
@pytest.mark.parametrize(
'env, expl_strat', [
(BallOnBeamSim(dt=0.02, max_steps=100),
DummyPolicy(BallOnBeamSim(dt=0.02, max_steps=100).spec)),
], ids=['bob_dummy']
)
def test_concat_rollouts(env, expl_strat):
ro1 = rollout(env, expl_strat)
ro2 = rollout(env, expl_strat)
ro_cat = StepSequence.concat([ro1, ro2])
assert isinstance(ro_cat, StepSequence)
assert ro_cat.length == ro1.length + ro2.length
@pytest.mark.parametrize(
'x, y', [
(to.tensor([1., 2., 3.]), to.tensor([1., 2., 3.])),
(to.tensor([1., 0., 1.]), to.tensor([1., 1e12, 1.])),
(to.tensor([0., 0., 0.]), to.tensor([1., 2, 3.])),
(to.tensor([1., 2., 3.]), to.tensor([2., 4., 6.])),
(to.tensor([1., 2., 3.]), to.tensor([-1., -2., -3.])),
], ids=['same', 'similarity_1', 'similarity_0', 'colinear_scaled', 'colinear_opposite']
)
def test_cosine_similarity(x, y):
# Only tested for vector inputs
d_cos = cosine_similarity(x, y)
assert isinstance(d_cos, to.Tensor)
# The examples are chosen to result in 0, 1, or -1
assert to.isclose(d_cos, to.tensor(0.)) or to.isclose(d_cos, to.tensor(1.)) or to.isclose(d_cos, to.tensor(-1.))
@pytest.mark.parametrize(
'x, y', [
({'a': 1, 'b': 2}, {'c': 1, 'd': 4}),
({'a': 1, 'b': 2}, {'b': 3, 'd': 4}),
], ids=['disjoint', 'overlapping']
)
def test_merge_lod_var_dtype(x, y):
z = merge_lod_var_dtype([x, y])
assert z['a'] == 1
if z['b'] == 2: # disjoint
assert z['c'] == 1
elif z['b'] == 3: # overlapping
assert len(z) == 3
else:
assert False
assert z['d'] == 4
@pytest.mark.parametrize(
'batch_size, data_size', [
(3, 30),
(3, 29),
(3, 28),
(2, 2)
], ids=['division_mod0', 'division_mod1', 'division_mod2', 'edge_case']
)
def test_gen_ordered_batches(batch_size, data_size):
from math import ceil
generator = gen_batches(batch_size, data_size)
unordered_batches = list(generator)
assert len(unordered_batches) == ceil(data_size/batch_size)
assert all(len(uob) <= batch_size for uob in unordered_batches)
generator = gen_ordered_batches(batch_size, data_size)
ordered_batches = list(generator)
assert len(ordered_batches) == ceil(data_size/batch_size)
assert all(len(ob) <= batch_size for ob in ordered_batches)
# Check if each mini-batch is sorted
assert all(all(ob[i] <= ob[i + 1] for i in range(len(ob) - 1)) for ob in ordered_batches)
@pytest.mark.parametrize('dtype', ['torch', 'numpy'], ids=['to', 'np'])
@pytest.mark.parametrize('axis', [0, 1], ids=['ax_0', 'ax_1'])
def test_normalize(dtype, axis):
for _ in range(10):
x = to.rand(5, 3) if dtype == 'torch' else np.random.rand(5, 3)
x_norm = normalize(x, axis=axis, order=1)
if isinstance(x_norm, to.Tensor):
x_norm = x_norm.numpy() # for easier checking with pytest.approx
assert np.sum(x_norm, axis=axis) == pytest.approx(1.)
@pytest.mark.parametrize(
'data_seq, axis', [
([np.array([1, 1, 2]), np.array([1, 6, 3]), np.array([1, 6, 3]), np.array([10, -20, 20])], 0),
([np.array([1, 1, 2]), np.array([1, 6, 3]), np.array([1, 6, 3]), np.array([10, -20, 20])], None),
([np.array([1, 1, 2, 2]), np.array([1, 6, 3]), np.array([1, 6, 3]), np.array([10, 10, -20, 20])], 0),
([np.array([1, 1, 2, 2]), np.array([1, 6, 3]), np.array([1, 6, 3]), np.array([10, 10, -20, 20])], None),
(
[to.tensor([1., 1., 2]), to.tensor([1., 6., 3.]), to.tensor([1., 6., 3.]),
to.tensor([10., -20., 20.])],
0),
(
[to.tensor([1., 1., 2]), to.tensor([1., 6., 3.]), to.tensor([1., 6., 3.]),
to.tensor([10., -20., 20.])],
-1),
(
[to.tensor([1., 1, 2, 2]), to.tensor([1., 6, 3]), to.tensor([1., 6, 3]),
to.tensor([10., 10, -20, 20])],
0),
(
[to.tensor([1., 1, 2, 2]), to.tensor([1., 6, 3]), to.tensor([1., 6, 3]),
to.tensor([10., 10, -20, 20])],
-1),
], ids=['np_same_length_0', 'np_same_length_None', 'np_mixed_length_0', 'np_mixed_length_None',
'to_same_length_0', 'to_same_length_-1', 'to_mixed_length_0', 'to_mixed_length_-1']
)
def test_running_standardizer(data_seq, axis):
rs = RunningStandardizer()
for data in data_seq:
z = rs(data, axis)
assert z is not None
rs.reset()
assert rs._mean is None and rs._sum_sq_diffs is None and rs._iter == 0
@pytest.mark.parametrize(
'data_seq, alpha', [
(
[np.array([1, 1, 2]), np.array([1, 6, 3]), np.array([1, 6, 3]), np.array([10, -20, 20])],
0.9
),
(
[to.tensor([1., 1., 2]), to.tensor([1., 6., 3.]), to.tensor([1., 6., 3.]), to.tensor([10., -20., 20.])],
0.1
),
], ids=['np', 'to']
)
def test_running_expdecay_average(data_seq, alpha):
reda = RunningExpDecayingAverage(alpha)
for data in data_seq:
z = reda(data)
assert z is not None
reda.reset(alpha=0.5)
assert reda._alpha == 0.5 and reda._prev_est is None
@pytest.mark.parametrize(
'data_seq, capacity', [
(
[np.array([1., 1, 2]), np.array([1., 1, 2]), np.array([1., 1, 2]), np.array([-2., -2, -4])],
3
),
(
[to.tensor([1., 1, 2]), to.tensor([1., 1, 2]), to.tensor([1., 1, 2]), to.tensor([-2., -2, -4])],
3
),
], ids=['np', 'to']
)
def test_running_mem_average(data_seq, capacity):
rma = RunningMemoryAverage(capacity)
for i, data in enumerate(data_seq):
z = rma(data)
if i <= 2:
to.testing.assert_allclose(z, to.tensor([1., 1, 2])) # works with PyTorch Tensors and numpy arrays
elif i == 3:
to.testing.assert_allclose(z, to.tensor([0., 0, 0])) # works with PyTorch Tensors and numpy arrays
rma.reset(capacity=5)
assert rma.capacity == 5 and rma.memory is None
@pytest.mark.parametrize(
'data_seq', [
[5*np.random.rand(25, 3), 0.1*np.random.rand(5, 3), 20*np.random.rand(70, 3)],
[5*to.rand(25, 3), 0.1*to.rand(5, 3), 20*to.rand(70, 3)]
], ids=['np', 'to']
)
def test_running_normalizer(data_seq):
rn = RunningNormalizer()
for data in data_seq:
data_norm = rn(data)
assert (-1 <= data_norm).all()
assert (data_norm <= 1).all()
@pytest.mark.parametrize(
'x', [
to.rand(1000, 1),
to.rand(1, 1000),
to.rand(1000, 1000),
np.random.rand(1, 1000),
np.random.rand(1000, 1),
np.random.rand(1000, 1000)
], ids=['to_1x1000', 'to_1000x1', 'to_1000x1000', 'np_1x1000', 'np_1000x1', 'np_1000x1000']
)
def test_stateful_standardizer(x):
ss = Standardizer()
if isinstance(x, to.Tensor):
x_stdized = ss.standardize(x)
assert x_stdized.shape == x.shape
assert to.allclose(x_stdized.mean(), to.zeros(1))
assert to.allclose(x_stdized.std(), to.ones(1))
x_restrd = ss.unstandardize(x_stdized)
assert x_restrd.shape == x.shape
assert to.allclose(x_restrd, x, rtol=1e-02, atol=1e-05)
elif isinstance(x, np.ndarray):
x_stdized = ss.standardize(x)
assert x_stdized.shape == x.shape
assert np.allclose(x_stdized.mean(), np.zeros(1))
assert np.allclose(x_stdized.std(), np.ones(1))
x_restrd = ss.unstandardize(x_stdized)
assert x_restrd.shape == x.shape
assert np.allclose(x_restrd, x, rtol=1e-02, atol=1e-05)
@pytest.mark.parametrize(
'g, ed', [
(1., 2.),
(np.array([-1., 2.]), np.eye(2))
], ids=['scalar', 'array']
)
def test_ds_spec(g, ed):
# Base class
dss = DSSpec(function='name', goal=g)
assert isinstance(dss, dict)
assert dss['function'] == 'name'
if isinstance(g, np.ndarray):
assert np.all(dss['goal'] == g)
else:
assert dss['goal'] == g
# Linear first order subclass
lds = LinDSSpec(function='lin', goal=g, errorDynamics=ed)
assert isinstance(dss, dict)
assert lds['function'] == 'lin'
if isinstance(g, np.ndarray):
assert np.all(lds['goal'] == g)
assert np.all(lds['errorDynamics'] == ed)
else:
assert lds['goal'] == g
assert lds['errorDynamics'] == ed
# Mass-Spring-Damper subclass
msds = MSDDSSpec(function='msd', goal=g, damping=2., attractorStiffness=3., mass=4.)
assert isinstance(dss, dict)
assert msds['function'] == 'msd'
if isinstance(g, np.ndarray):
assert np.all(msds['goal'] == g)
else:
assert msds['goal'] == g
assert msds['damping'] == 2.
assert msds['attractorStiffness'] == 3.
assert msds['mass'] == 4.
@pytest.mark.optim
@pytest.mark.visualization
@pytest.mark.parametrize(
'identical_bounds', [
True, False
], ids=['identical', 'separate']
)
def test_gss_optimizer_identical_bounds(identical_bounds):
class Dummy:
def loss_fcn(self):
# Some function to minimize
return (self.x + self.y + 4)**2
def __init__(self):
# Test with different lower and upper bounds
self.x, self.y = to.tensor([0.]), to.tensor([4.])
x_min, x_max = to.tensor([-10.]), to.tensor([5.])
if identical_bounds:
self.optim = GSS([{'params': self.x}, {'params': self.y}], x_min, x_max)
else:
x_min_override = to.tensor([-6.])
self.optim = GSS([{'params': self.x, 'param_min': x_min_override}, {'params': self.y}], x_min, x_max)
print(self.optim)
dummy = Dummy()
for i in range(2):
dummy.optim.step(dummy.loss_fcn)
assert dummy.x != dummy.y
print(f'x = {dummy.x.item()} \t y = {dummy.y.item()}')
@pytest.mark.optim
def test_gss_optimizer_functional():
class Dummy:
def loss_fcn(self):
# Some function to minimize
return (self.x + 4)**2
def __init__(self):
# Test with different lower and upper bounds
self.x = to.tensor([0.])
x_min, x_max = to.tensor([-10.]), to.tensor([10.])
self.optim = GSS([{'params': self.x}], x_min, x_max)
dummy = Dummy()
for i in range(100):
dummy.optim.step(dummy.loss_fcn)
assert to.norm(dummy.x + 4) < 1e-4
@pytest.mark.optim
@pytest.mark.visualization
def test_gss_optimizer_nlin_fcn():
from matplotlib import pyplot as plt
# Parameters
x_grid = to.linspace(-2., 3., 200)
f = 1.
noise_std = 0.1
# Init param and optimizer
x_init = to.rand(1)*(x_grid.max() - x_grid.min())/2 + x_grid.min() + (x_grid.max() - x_grid.min())/4 # [.25, .75]
x = nn.Parameter(to.tensor([x_init]), requires_grad=False)
optim = GSS([x], param_min=x_grid.min().unsqueeze(0), param_max=x_grid.max().unsqueeze(0))
obj_fcn = partial(noisy_nonlin_fcn, x=x, f=f, noise_std=noise_std)
num_epochs = 10
# Init plotting
fig = plt.figure()
plt.plot(x_grid, noisy_nonlin_fcn(x=x_grid, f=f), label='noise free fcn')
plt.scatter(x.data.numpy(), obj_fcn().numpy(), s=40, marker='x', color='k', label='init guess')
colors = plt.get_cmap('inferno')(np.linspace(0, 1, num_epochs))
for e in tqdm(range(num_epochs), total=num_epochs):
# Evaluate at a the current point
optim.step(obj_fcn)
# Plot current evaluation
plt.plot(x_grid, noisy_nonlin_fcn(x=x_grid, f=f, noise_std=noise_std), alpha=0.2)
plt.scatter(x.data.numpy(), obj_fcn().numpy(), s=16, color=colors[e])
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.legend()
plt.show()
assert noisy_nonlin_fcn(x, f=f, noise_std=noise_std) < noisy_nonlin_fcn(x_init, f=f, noise_std=noise_std)
@pytest.mark.parametrize('in_features', [1, 3], ids=['1dim', '3dim'])
@pytest.mark.parametrize('same_nonlin', [True, False], ids=['same_nonlin', 'different_nonlin'])
@pytest.mark.parametrize('bias', [True, False], ids=['bias', 'no_bias'])
@pytest.mark.parametrize('weight', [True, False], ids=['weight', 'no_weight'])
def test_indi_nonlin_layer(in_features, same_nonlin, bias, weight):
if not same_nonlin and in_features > 1:
nonlin = in_features*[to.tanh]
else:
nonlin = to.sigmoid
layer = IndiNonlinLayer(in_features, nonlin, bias, weight)
assert isinstance(layer, nn.Module)
i = to.randn(in_features)
o = layer(i)
assert isinstance(o, to.Tensor)
assert i.shape == o.shape
|
py | 1a2fe54d88c1e5c718bd8552ab2e975dc2d3be6e | # --------------
# import packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import re
from nltk.corpus import stopwords
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score ,confusion_matrix
# Code starts here
# load data
news = pd.read_csv(path)
# subset data
news = news[['TITLE','CATEGORY']]
# distribution of classes
dist = news['CATEGORY'].value_counts()
# display class distribution
print(dist.head())
# display data
print(news.head())
# Code ends here
# --------------
# Code starts here
# stopwords
stop = (set(stopwords.words('english')))
# retain only alphabets
news['TITLE'] = news['TITLE'].apply(lambda x : re.sub("[^a-zA-Z]", " ",x) )
# convert to lowercase and tokenize
news['TITLE'] = news['TITLE'].apply(lambda x : x.lower().split())
# remove stopwords
news['TITLE'] = news['TITLE'].apply(lambda x : [i for i in x if i not in stop])
# join list elements
print(news['TITLE'].head(2))
news['TITLE'] = news['TITLE'].apply(lambda x : ' '.join(x))
print(news['TITLE'].head(2))
# split into training and test sets
X_train, X_test, y_train, y_test = train_test_split(news['TITLE'], news['CATEGORY'], test_size=0.2, random_state=3)
# Code ends here
# --------------
# Code starts here
# initialize count vectorizer
count_vectorizer = CountVectorizer()
# initialize tfidf vectorizer
tfidf_vectorizer = TfidfVectorizer(ngram_range=(1,3))
# fit and transform with count vectorizer
X_train_count= count_vectorizer.fit_transform(X_train)
X_test_count = count_vectorizer.transform(X_test)
# fit and transform with tfidf vectorizer
X_train_tfidf= tfidf_vectorizer.fit_transform(X_train)
X_test_tfidf = tfidf_vectorizer.transform(X_test)
# Code ends here
# --------------
# Code starts here
# initialize multinomial naive bayes
nb_1 = MultinomialNB()
nb_2 = MultinomialNB()
# fit on count vectorizer training data
nb_1.fit(X_train_count, y_train)
# fit on tfidf vectorizer training data
nb_2.fit(X_train_tfidf, y_train)
# accuracy with count vectorizer
acc_count_nb = accuracy_score(nb_1.predict(X_test_count), y_test)
# accuracy with tfidf vectorizer
acc_tfidf_nb = accuracy_score(nb_2.predict(X_test_tfidf), y_test)
# display accuracies
print('Count Vectorizer accuracy is', acc_count_nb)
print('TFIDF accuracy is', acc_tfidf_nb)
# Code ends here
# --------------
import warnings
warnings.filterwarnings('ignore')
# initialize logistic regression
logreg_1 = OneVsRestClassifier(LogisticRegression(random_state=10))
logreg_2 = OneVsRestClassifier(LogisticRegression(random_state=10))
# fit on count vectorizer training data
logreg_1.fit(X_train_count, y_train)
logreg_2.fit(X_train_tfidf, y_train)
# fit on tfidf vectorizer training data
acc_count_logreg = accuracy_score(logreg_1.predict(X_test_count), y_test)
acc_tfidf_logreg = accuracy_score(logreg_2.predict(X_test_tfidf), y_test)
# accuracy with count vectorizer
print('Count vectorizer accurancy is', acc_count_logreg)
# accuracy with tfidf vectorizer
print('TFIDF accuracy is', acc_tfidf_logreg)
# display accuracies
# Code ends here
|
py | 1a2fe588feedd1327744d61141ef0189c19be486 | from bisect import bisect_right
from itertools import accumulate
from math import inf, sqrt
from numbers import Number
class ApproximateHistogram:
"""
Streaming, approximate histogram
Based on http://jmlr.org/papers/volume11/ben-haim10a/ben-haim10a.pdf
Performance of adding a point is about 5x faster than
https://github.com/carsonfarmer/streamhist (unmaintained).
The output of quantile() will match numpy.quantile() exactly until
the number of points reaches max_bins, and then gracefully transition
to an approximation.
"""
def __init__(self, max_bins):
self._max_bins = max_bins
self._bins = [] # (point, count)
self._costs = [] # item i is _bins[i+1].point - _bins[i].point
self._count = 0
# TODO: maintain min/max as bin entries with infinite merge cost
self._min = inf
self._max = -inf
@staticmethod
def _update_costs(costs, l, i, val):
"""update costs array to reflect l.insert(i, val)"""
if i > 0:
new_cost = val[0] - l[i - 1][0]
costs.insert(i - 1, new_cost)
if i < len(costs):
costs[i] = l[i + 1][0] - val[0]
elif len(l) > 1:
costs.insert(0, l[1][0] - val[0])
# assert costs == approx([b - a for (a, _), (b, _) in zip(l, l[1:])], rel=1e-4)
@staticmethod
def _update_costs_for_merge(costs, l, i, val):
"""update costs array to reflect l[i:i+2] = (val, )"""
# TODO: combine with update_costs()
if 0 < i < len(costs) - 1:
costs[i - 1:i + 2] = val[0] - l[i - 1][0], l[i + 1][0] - val[0]
elif i > 0:
costs[i - 1:i + 1] = (val[0] - l[i - 1][0], )
else:
costs[i:i + 2] = (l[i + 1][0] - val[0], )
# assert costs == approx([b - a for (a, _), (b, _) in zip(l, l[1:])], rel=1e-4)
@classmethod
def _insert_with_cost(cls, costs, l, val):
i = bisect_right(l, val)
l.insert(i, val)
cls._update_costs(costs, l, i, val)
def add(self, point):
"""Add point to histogram"""
# optimization: maintain cost array
self._count += 1
self._min = min(self._min, point)
self._max = max(self._max, point)
bins = self._bins
costs = self._costs
self._insert_with_cost(costs, bins, (point, 1))
if len(bins) > self._max_bins:
i = costs.index(min(costs))
(q0, k0), (q1, k1) = bins[i:i+2]
_count = k0 + k1
median = (q0 * k0 + q1 * k1) / _count
bins[i:i+2] = ((median, _count), )
self._update_costs_for_merge(costs, bins, i, (median, _count))
@property
def count(self):
"""Return number of points represented by this histogram."""
return self._count
@property
def min(self):
"""Return minimum point represented by this histogram"""
return self._min
@property
def max(self):
"""Return maximum point represented by this histogram"""
return self._max
def mean(self):
"""Return mean; O(max_bins) complexity."""
return sum(p * count for p, count in self._bins) / self._count
def std(self):
"""Return standard deviation; O(max_bins) complexity."""
mean = self.mean()
sum_squares = sum((p - mean) ** 2 * count for p, count in self._bins)
return sqrt(sum_squares / self._count)
def _quantile(self, sums, q):
if q <= 0:
return self._min
if q >= 1:
return self._max
bins = self._bins
target_sum = q * (self._count - 1) + 1
i = bisect_right(sums, target_sum) - 1
left = bins[i] if i >= 0 else (self._min, 0)
right = bins[i+1] if i+1 < len(bins) else (self._max, 0)
l0, r0 = left[0], right[0]
l1, r1 = left[1], right[1]
s = target_sum - (sums[i] if i >= 0 else 1)
if l1 <= 1 and r1 <= 1:
# We have exact info at this quantile. Match linear interpolation
# strategy of numpy.quantile().
b = l0 + (r0 - l0) * s / r1 if r1 > 0 else l0
else:
if r1 == 1:
# For exact bin on RHS, compensate for trapezoid interpolation using
# only half of count.
r1 = 2
if l1 == r1:
bp_ratio = s / l1
else:
bp_ratio = (l1 - (l1 ** 2 - 2 * s * (l1 - r1)) ** .5) / (l1 - r1)
assert bp_ratio.imag == 0
b = bp_ratio * (r0 - l0) + l0
return b
def sum(self):
"""Return sum of points; O(max_bins) complexity."""
return sum(x * count for x, count in self._bins)
def quantile(self, q):
"""Return list of values at given quantile fraction(s); O(max_bins) complexity."""
# Deviation from Ben-Haim sum strategy:
# * treat count 1 bins as "exact" rather than dividing the count at the point
# * for neighboring exact bins, use simple linear interpolation matching
# numpy.quantile()
if isinstance(q, Number):
q = (q, )
bins = self._bins
sums = [x - (y/2 if y > 1 else 0) for x, (_, y) in \
zip(accumulate(bin[1] for bin in bins), bins)]
return list(self._quantile(sums, q_item) for q_item in q)
|
py | 1a2fe743e17247433559d43f10d9b87dd9e11ea7 | import numpy as np
def scroll(clip, h=None, w=None, x_speed=0, y_speed=0,
x_start=0, y_start=0, apply_to="mask"):
""" Scrolls horizontally or vertically a clip, e.g. to make fin
credits """
if h is None: h = clip.h
if w is None: w = clip.w
xmax = clip.w-w-1
ymax = clip.h-h-1
def f(gf,t):
x = max(0, min(xmax, x_start+ np.round(x_speed*t)))
y = max(0, min(ymax, y_start+ np.round(y_speed*t)))
return gf(t)[y:y+h, x:x+w]
return clip.fl(f, apply_to = apply_to)
|
py | 1a2fe7ee1460e4c36f5e3ec530fe14354abcca3a | """Non-Maximum Suppression module."""
import numpy as np
import torch
def nms(detections, threshold):
"""Apply Non-Maximum Suppression over the detections.
The detections must be a tensor with two dimensions: (number of detections, 5).
Why 5? Because a detection has x1, y1, x2, y2 and score.
Heavily inspired by Adrian Rosebrock at:
https://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/
Why not the version of GPU? Because I couldn't make it work in my GPU.
Args:
detections (torch.Tensor): A tensor with all the detections. The shape must be
(number of detections, 5) with the score as the last value of the second
dimension.
threshold (float): The threshold for the IoU (intersection over union) to take
two detections as detecting the same object.
Returns:
torch.Tensor: A tensor with the indexes of the detections to keep.
"""
# If there aren't detections return empty
if detections.shape[0] == 0:
return torch.zeros((0))
# Get the numpy version
was_cuda = detections.is_cuda
detections = detections.cpu().numpy()
# Start the picked indexes list empty
picked = []
# Get the coordinates
x1 = detections[:, 0]
y1 = detections[:, 1]
x2 = detections[:, 2]
y2 = detections[:, 3]
scores = detections[:, 4]
# Compute the area of the bounding boxes
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# Get the indexes of the detections sorted by score
indexes = np.argsort(scores)
while len(indexes) > 0:
# Take the last index (highest score) and add it to the picked
last = len(indexes) - 1
actual = indexes[last]
picked.append(actual)
# We need to find the overlap of the bounding boxes with the actual picked bounding box
# Find the largest (more to the bottom-right) (x,y) coordinates for the start
# (top-left) of the bounding box
xx1 = np.maximum(x1[actual], x1[indexes[:last]])
yy1 = np.maximum(y1[actual], y1[indexes[:last]])
# Find the smallest (more to the top-left) (x,y) coordinates for the end (bottom-right)
# of the bounding box
xx2 = np.minimum(x2[actual], x2[indexes[:last]])
yy2 = np.minimum(y2[actual], y2[indexes[:last]])
# Compute width and height to compute the intersection over union
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
intersection = (w * h)
union = areas[actual] + areas[indexes[:last]] - intersection
iou = intersection / union
# Delete the last index and all that overlap is bigger than threshold
indexes = np.delete(indexes, np.concatenate(([last], np.where(iou > threshold)[0])))
# Return the picked indexes
picked = torch.Tensor(picked).long()
if was_cuda:
picked = picked.cuda()
return picked
|
py | 1a2fe94d05b3883bd5969cc6fc5bcb6944bdaec8 | import os
from functools import partial
import numpy as np
import pandas as pd
import tables
import matplotlib
import warnings
from PyQt5.QtCore import Qt, QPointF
from PyQt5.QtGui import QPixmap, QPainter, QFont, QPen, QPolygonF, QColor, QKeySequence, QBrush
from PyQt5.QtWidgets import QApplication, QMessageBox
from tierpsy.analysis.ske_create.helperIterROI import getWormROI
from tierpsy.analysis.split_fov.FOVMultiWellsSplitter import FOVMultiWellsSplitter
from tierpsy.gui.MWTrackerViewer_ui import Ui_MWTrackerViewer
from tierpsy.gui.TrackerViewerAux import TrackerViewerAuxGUI
from tierpsy.gui.PlotFeatures import PlotFeatures
from tierpsy.helper.misc import WLAB, save_modified_table
from tierpsy.analysis.split_fov.helper import get_well_color
class WellsDrawer(TrackerViewerAuxGUI):
'''
Dummy class with the wells division drawer functions
'''
def __init__(self, ui):
super().__init__(ui)
# colour
self.fovsplitter_mask = None
self.fovsplitter_feat = None
self.fovsplitter = None
self.is_fov_tosplit = None
def updateVideoFile(self, vfilename):
super().updateVideoFile(vfilename)
# check if /fov_wells exists in masked video
if self.fid is not None:
if '/fov_wells' not in self.fid:
self.is_fov_tosplit = False
else:
self.is_fov_tosplit = True
# if it exists, read it
if self.is_fov_tosplit:
# self.wells_in_mask = pd.DataFrame(
# self.fid.get_node('/fov_wells').read())
self.fovsplitter_mask = FOVMultiWellsSplitter(self.vfilename)
def updateSkelFile(self, skeletons_file):
super().updateSkelFile(skeletons_file)
# if no skeletons, skip
if not self.skeletons_file:
return
# check if /fov_wells exists in features video
with tables.File(self.skeletons_file, 'r') as fid:
if '/fov_wells' not in fid:
self.is_fov_tosplit = False
# print("didn't find fov wells though")
else:
self.is_fov_tosplit = True
# print("found fov wells in featuresN")
# if it exists, read it
if self.is_fov_tosplit:
# print('reading fov_wells from featuresN')
# print('pre-reading:')
# print(self.wells)
# self.wells_in_feat = pd.DataFrame(
# fid.get_node('/fov_wells').read())
self.fovsplitter_feat = FOVMultiWellsSplitter(self.skeletons_file)
def draw_wells(self, image):
'''
Draw wells.
'''
if self.is_fov_tosplit:
if self.fovsplitter_feat is not None:
self.fovsplitter = self.fovsplitter_feat
else: # fall back to mask ones
print('falling back')
self.fovsplitter = self.fovsplitter_mask
# prepare constants for drawing
self.fontsize = max(1, max(image.height(), image.width()) // 60)
penwidth = max(1, max(image.height(), image.width()) // 400)
self.penwidth = penwidth if penwidth % 2 == 1 else penwidth + 1
# self.wellsC = QColor(250, 140, 0)
if 'is_good_well' in self.fovsplitter.wells.columns:
is_color_by_well = True
else:
is_color_by_well = False
# Qt drawing code
painter = QPainter()
painter.begin(image)
pen = QPen()
pen.setWidth(self.penwidth)
painter.setFont(QFont('Decorative', self.fontsize))
# loop on wells
for _, well in self.fovsplitter.wells.iterrows():
# update color every time
if is_color_by_well:
wellC = get_well_color(well['is_good_well'], forCV=True)
wellC = QColor(*wellC)
else:
wellC = QColor(250, 140, 0)
pen.setColor(wellC)
painter.setPen(pen)
# draw well name
painter.drawText(well['x_min'] + self.fontsize*0.4,
well['y_min'] + self.fontsize*1.2,
well['well_name'])
# draw rectangle
painter.drawRect(well['x_min'],
well['y_min'],
well['x_max'] - well['x_min'],
well['y_max'] - well['y_min'])
if well['is_good_well'] == False:
painter.drawLine(well['x_min'],
well['y_min'],
well['x_max'],
well['y_max'])
painter.end()
# super().keyPressEvent(event)
class ContourDrawer(TrackerViewerAuxGUI):
'''
Dummy class with the contour functions
'''
def __init__(self, ui):
super().__init__(ui)
self.food_coordinates = None
self.wlabC = {
WLAB['U']: Qt.white,
WLAB['WORM']: Qt.green,
WLAB['WORMS']: Qt.blue,
WLAB['BAD']: Qt.darkRed,
WLAB['GOOD_SKE']: Qt.darkCyan
}
self.ui.checkBox_showFood.stateChanged.connect(self.updateImage)
self.ui.checkBox_showFood.setEnabled(False)
self.ui.checkBox_showFood.setChecked(True)
def updateSkelFile(self, skeletons_file):
super().updateSkelFile(skeletons_file)
if not self.skeletons_file or self.trajectories_data is None:
self.food_coordinates = None
return
with tables.File(self.skeletons_file, 'r') as fid:
if not '/food_cnt_coord' in fid:
self.food_coordinates = None
self.ui.checkBox_showFood.setEnabled(False)
else:
#change from microns to pixels
self.food_coordinates = fid.get_node('/food_cnt_coord')[:]
self.food_coordinates /= self.microns_per_pixel
self.ui.checkBox_showFood.setEnabled(True)
def draw_food_contour(self, image):
if self.food_coordinates is None or not self.ui.checkBox_showFood.isChecked():
return
painter = QPainter()
painter.begin(image)
penwidth = max(1, max(image.height(), image.width()) // 800)
col = Qt.darkMagenta
p = QPolygonF()
for x,y in self.food_coordinates:
p.append(QPointF(x,y))
pen = QPen()
pen.setWidth(penwidth)
pen.setColor(col)
painter.setPen(pen)
painter.drawPolyline(p)
painter.end()
class IntensityLabeler(TrackerViewerAuxGUI):
def __init__(self, ui):
super().__init__(ui)
self.mean_intensity = None
self.ui.intensity_label.setStyleSheet('') #avoid displaying color at the start of the programı
def updateVideoFile(self, vfilename):
super().updateVideoFile(vfilename)
if self.fid is not None:
#get mean intensity information.
#Useful for the optogenetic experiments.
try:
mean_int = self.fid.get_node('/mean_intensity')[:]
#calculate the intensity range and normalize the data.
#I am ignoring any value less than 1. The viewer only works with uint8 data.
dd = mean_int[mean_int>=1]
if dd.size == 0:
raise ValueError
bot = np.min(dd)
top = np.max(dd)
rr = top-bot
# if the mean value change is less than 1 (likely continous image do nothing)
if rr <= 1:
raise ValueError
self.mean_intensity = (mean_int-bot)/(rr)
except (tables.exceptions.NoSuchNodeError, ValueError):
self.mean_intensity = None
self.ui.intensity_label.setStyleSheet('')
def display_intensity(self):
if self.mean_intensity is not None and self.frame_number < self.mean_intensity.size:
d = int(self.mean_intensity[self.frame_number]*255)
self.ui.intensity_label.setStyleSheet('QLabel {background-color: rgb(%i, %i, %i);}' % (0, 0, d))
class BlobLabeler(TrackerViewerAuxGUI):
def __init__(self, ui):
super().__init__(ui)
self.wlab = WLAB
self.label_type = 'worm_label'
self.ui.pushButton_U.clicked.connect(
partial(self._h_tag_worm, self.wlab['U']))
self.ui.pushButton_W.clicked.connect(
partial(self._h_tag_worm, self.wlab['WORM']))
self.ui.pushButton_WS.clicked.connect(
partial(self._h_tag_worm, self.wlab['WORMS']))
self.ui.pushButton_B.clicked.connect(
partial(self._h_tag_worm, self.wlab['BAD']))
self.ui.pushButton_W.setShortcut(QKeySequence(Qt.Key_W))
self.ui.pushButton_U.setShortcut(QKeySequence(Qt.Key_U))
self.ui.pushButton_WS.setShortcut(QKeySequence(Qt.Key_C))
self.ui.pushButton_B.setShortcut(QKeySequence(Qt.Key_B))
def enable_label_buttons(self, value):
self.ui.pushButton_U.setEnabled(value)
self.ui.pushButton_W.setEnabled(value)
self.ui.pushButton_WS.setEnabled(value)
self.ui.pushButton_B.setEnabled(value)
def _h_tag_worm(self, label_ind):
if not self.worm_index_type == 'worm_index_manual':
return
worm_ind = self.current_worm_index
if self.frame_data is None:
return
if not worm_ind in self.frame_data['worm_index_manual'].values:
QMessageBox.critical(
self,
'The selected worm is not in this frame.',
'Select a worm in the current frame to label.',
QMessageBox.Ok)
return
good = self.trajectories_data['worm_index_manual'] == worm_ind
self.trajectories_data.loc[good, 'worm_label'] = label_ind
self.updateImage()
class ROIWorm():
def __init__(self, wormCanvas, comboBox_ROI, checkBox_ROI):
self.worm_index = None
self.wormCanvas = wormCanvas
self.comboBox_ROI = comboBox_ROI
self.checkBox_ROI = checkBox_ROI
self.comboBox_ROI.activated.connect(self.selectROI)
def selectROI(self, index):
try:
self.worm_index = int(self.comboBox_ROI.itemText(index))
except ValueError:
self.worm_index = None
@property
def isDrawSkel(self):
return self.checkBox_ROI.isChecked()
class ROIManager(TrackerViewerAuxGUI):
def __init__(self, ui):
super().__init__(ui)
self.rois = [
ROIWorm(
self.ui.wormCanvas1,
self.ui.comboBox_ROI1,
self.ui.checkBox_ROI1
),
ROIWorm(
self.ui.wormCanvas2,
self.ui.comboBox_ROI2,
self.ui.checkBox_ROI2
)
]
self.ui.radioButton_ROI1.setShortcut(QKeySequence(Qt.Key_Up))
self.ui.radioButton_ROI2.setShortcut(QKeySequence(Qt.Key_Down))
self.ui.checkBox_ROI1.stateChanged.connect(partial(self._updateROI, self.rois[0]))
self.ui.checkBox_ROI2.stateChanged.connect(partial(self._updateROI, self.rois[1]))
self.ui.comboBox_ROI1.activated.connect(partial(self._updateROI, self.rois[0]))
self.ui.comboBox_ROI2.activated.connect(partial(self._updateROI, self.rois[1]))
# flags for RW and FF
self.RW, self.FF = 1, 2
self.ui.pushButton_ROI1_RW.clicked.connect(partial(self.roiRWFF, self.RW, self.rois[0]))
self.ui.pushButton_ROI1_FF.clicked.connect(partial(self.roiRWFF, self.FF, self.rois[0]))
self.ui.pushButton_ROI2_RW.clicked.connect(partial(self.roiRWFF, self.RW, self.rois[1]))
self.ui.pushButton_ROI2_FF.clicked.connect(partial(self.roiRWFF, self.FF, self.rois[1]))
@property
def current_roi(self):
if self.ui.radioButton_ROI1.isChecked():
return self.rois[0]
elif self.ui.radioButton_ROI2.isChecked():
return self.rois[1]
else:
raise ValueError("I shouldn't be here")
@property
def current_worm_index(self):
return self.current_roi.worm_index
def updateSkelFile(self, skeletons_file):
for roi in self.rois:
roi.worm_index = None
super().updateSkelFile(skeletons_file)
def keyPressEvent(self, event):
#MORE SHORTCUTS
# go the the start of end of a trajectory
if event.key() == Qt.Key_BracketLeft:
self.roiRWFF(self.RW, self.current_roi)
elif event.key() == Qt.Key_BracketRight:
self.roiRWFF(self.FF, self.current_roi)
super().keyPressEvent(event)
def updateROIcomboBox(self, roi):
# update valid index for the comboBox
roi.comboBox_ROI.clear()
if roi.worm_index is not None:
roi.comboBox_ROI.addItem(str(int(roi.worm_index)))
for ind in self.frame_data[self.worm_index_type]:
roi.comboBox_ROI.addItem(str(int(ind)))
if roi.worm_index is None:
w_ind = float(roi.comboBox_ROI.itemText(0))
roi.worm_index = int(w_ind)
# function that generalized the updating of the ROI
def _updateROI(self, roi):
if self.frame_data is None or not self.worm_index_type:
# no trajectories data presented, nothing to do here
roi.wormCanvas.clear()
return
self.updateROIcomboBox(roi)
# extract individual worm ROI
good = self.frame_data[self.worm_index_type] == roi.worm_index
row_data = self.frame_data.loc[good].squeeze()
if row_data.size == 0 or \
np.isnan(row_data['coord_x']) or \
np.isnan(row_data['coord_y']):
# invalid data nothing to do here
roi.wormCanvas.clear()
return
worm_img, roi_corner = getWormROI(self.frame_img,
row_data['coord_x'],
row_data['coord_y'],
row_data['roi_size']
)
roi_ori_size = worm_img.shape
worm_img = np.ascontiguousarray(worm_img)
worm_qimg = self._convert2Qimg(worm_img)
canvas_size = min(roi.wormCanvas.height(), roi.wormCanvas.width())
worm_qimg = worm_qimg.scaled(
canvas_size, canvas_size, Qt.KeepAspectRatio)
worm_qimg = self.drawSkelResult(worm_img, worm_qimg, row_data, roi.isDrawSkel, roi_corner, read_center=False)
pixmap = QPixmap.fromImage(worm_qimg)
roi.wormCanvas.setPixmap(pixmap)
def updateROIs(self):
for roi in self.rois:
self._updateROI(roi)
def clearROIs(self):
for roi in self.rois:
roi.wormCanvas.clear()
# move to the first or the last frames of a trajectory
def roiRWFF(self, rwff, roi):
if self.frame_data is None:
return
# use 1 for rewind RW or 2 of fast forward
good = self.trajectories_data[self.worm_index_type] == roi.worm_index
frames = self.trajectories_data.loc[good, 'frame_number']
if frames.size == 0:
return
if rwff == self.RW:
self.frame_number = frames.min()
elif rwff == self.FF:
self.frame_number = frames.max()
else:
raise ValueError('Invalid rwff value : {} '.format(rwff))
self.ui.spinBox_frame.setValue(self.frame_number)
class TrajectoryEditor(ROIManager):
def __init__(self, ui):
super().__init__(ui)
self.ui.pushButton_join.clicked.connect(self.joinTraj)
self.ui.pushButton_split.clicked.connect(self.splitTraj)
#SHORTCUTS
self.ui.pushButton_join.setShortcut(QKeySequence(Qt.Key_J))
self.ui.pushButton_split.setShortcut(QKeySequence(Qt.Key_S))
def enable_trajectories_buttons(self, value):
self.ui.pushButton_join.setEnabled(value)
self.ui.pushButton_split.setEnabled(value)
def joinTraj(self):
if self.worm_index_type != 'worm_index_manual' \
or self.frame_data is None:
return
worm_ind1 = self.rois[0].worm_index
worm_ind2 = self.rois[1].worm_index
if worm_ind1 == worm_ind2:
QMessageBox.critical(
self,
'Cannot join the same trajectory with itself',
'Cannot join the same trajectory with itself.',
QMessageBox.Ok)
return
index1 = (self.trajectories_data[
'worm_index_manual'] == worm_ind1).values
index2 = (self.trajectories_data[
'worm_index_manual'] == worm_ind2).values
# if the trajectories do not overlap they shouldn't have frame_number
# indexes in commun
frame_number = self.trajectories_data.loc[
index1 | index2, 'frame_number']
if frame_number.size != np.unique(frame_number).size:
QMessageBox.critical(
self,
'Cannot join overlaping trajectories',
'Cannot join overlaping trajectories.',
QMessageBox.Ok)
return
if not (worm_ind1 in self.frame_data[
'worm_index_manual'].values or worm_ind2 in self.frame_data['worm_index_manual'].values):
reply = QMessageBox.question(
self,
'Message',
"The none of the selected worms to join is not in this frame. Are you sure to continue?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if reply == QMessageBox.No:
return
# get the first row for each segment to extract some data
first_row1 = self.trajectories_data.loc[index1, :].iloc[0]
first_row2 = self.trajectories_data.loc[index2, :].iloc[0]
# join trajectories
self.trajectories_data.loc[
index2, 'worm_label'] = first_row1['worm_label']
self.trajectories_data.loc[index2, 'worm_index_manual'] = worm_ind1
self.rois[0].worm_index = worm_ind1
self.rois[1].worm_index = worm_ind1
#this might be too slow. I might need to change it
self.traj_worm_index_grouped = self.trajectories_data.groupby(self.worm_index_type)
self.updateImage()
def splitTraj(self):
if self.worm_index_type != 'worm_index_manual' \
or self.frame_data is None:
return
worm_ind = self.current_worm_index
if not worm_ind in self.frame_data['worm_index_manual'].data:
QMessageBox.critical(
self,
'Worm index is not in the current frame.',
'Worm index is not in the current frame. Select a valid index.',
QMessageBox.Ok)
return
last_index = self.trajectories_data['worm_index_manual'].max()
new_ind1 = last_index + 1
new_ind2 = last_index + 2
good = self.trajectories_data['worm_index_manual'] == worm_ind
frames = self.trajectories_data.loc[good, 'frame_number']
frames = frames.sort_values(inplace=False)
good = frames < self.frame_number
index1 = frames[good].index
index2 = frames[~good].index
self.trajectories_data.ix[index1, 'worm_index_manual'] = new_ind1
self.trajectories_data.ix[index2, 'worm_index_manual'] = new_ind2
self.rois[0].index = new_ind1
self.rois[1].index = new_ind2
#this might be too slow. I might need to change it
self.traj_worm_index_grouped = self.trajectories_data.groupby(self.worm_index_type)
self.updateImage()
class FeatureReaderBase(TrackerViewerAuxGUI):
index_cols = ['worm_index', 'timestamp', 'motion_modes', 'skeleton_id', 'well_name']
valid_fields = ['/timeseries_data', '/features_timeseries']
def __init__(self, ui):
self.timeseries_data = None
self.feat_column = ''
super().__init__(ui)
def updateSkelFile(self, skeletons_file):
super().updateSkelFile(skeletons_file)
try:
self.traj_colors = {}
with pd.HDFStore(self.skeletons_file, 'r') as ske_file_id:
for field in self.valid_fields:
if field in ske_file_id:
self.timeseries_data = ske_file_id[field]
if field == '/timeseries_data':
blob_features = ske_file_id['/blob_features']
blob_features.columns = ['blob_' + x for x in blob_features.columns]
self.timeseries_data = pd.concat((self.timeseries_data, blob_features), axis=1)
break
else:
raise KeyError
if not len(self.timeseries_data) != len(self.trajectories_data):
ValueError('timeseries_data and trajectories_data does not match. You might be using an old version of featuresN.hdf5')
self.valid_features = [x for x in self.timeseries_data.columns if x not in self.index_cols]
except (TypeError, AttributeError, IOError, KeyError, tables.exceptions.HDF5ExtError):
self.valid_features = None
self.timeseries_data = None
class MarkersDrawer(FeatureReaderBase):
def __init__(self, ui):
super().__init__(ui)
self.traj_colors = {}
self.n_points_traj = 250
self.n_colors = 256
cmap = matplotlib.cm.get_cmap("bwr")
palette = [cmap(x) for x in np.linspace(0, 1, self.n_colors)]
#palette = sns.color_palette("RdBu_r", self.n_colors)
palette = np.round(np.array(palette)*255).astype(np.int)
self.palette = [QColor(*x) for x in palette]
self.drawT = {x: self.ui.comboBox_drawType.findText(x , flags=Qt.MatchContains)
for x in ['boxes', 'traj']}
self.showT = {x: self.ui.comboBox_showLabels.findText(x , flags=Qt.MatchContains)
for x in ['hide', 'all', 'filter']}
self.ui.comboBox_showLabels.setCurrentIndex(self.showT['all'])
self.ui.comboBox_showLabels.currentIndexChanged.connect(self.updateImage)
self.ui.comboBox_drawType.currentIndexChanged.connect(self.updateImage)
self.ui.feature_column.currentIndexChanged.connect(self.change_feature)
self.ui.feat_max_value.valueChanged.connect(self.updateImage)
self.ui.feat_min_value.valueChanged.connect(self.updateImage)
self.ui.is_color_features.stateChanged.connect(self.updateImage)
self.enable_color_feats(False)
self.ui.spinBox_step.valueChanged.connect(self.updateImage)
def updateSkelFile(self, skeletons_file):
self.ui.is_color_features.setChecked(False)
super().updateSkelFile(skeletons_file)
self.ui.feature_column.clear()
if self.timeseries_data is None:
#no feature data
self.enable_color_feats(False)
else:
self.enable_color_feats(True)
self.ui.feature_column.addItems(self.valid_features)
self._h_find_feat_limits()
def change_feature(self):
self._h_find_feat_limits()
self.updateImage()
def _h_find_feat_limits(self):
self.feat_column = str(self.ui.feature_column.currentText())
print(self.feat_column)
if self.feat_column and self.timeseries_data is not None:
f_max = self.timeseries_data[self.feat_column].max()
f_min = self.timeseries_data[self.feat_column].min()
q1, q2 = self.timeseries_data[self.feat_column].quantile([0.02, 0.98])
else:
f_min, f_max, q1, q2 = 0,0,0,0
self.ui.feat_max_value.setRange(f_min, f_max)
self.ui.feat_min_value.setRange(f_min, f_max)
self.ui.feat_min_value.setValue(q1)
self.ui.feat_max_value.setValue(q2)
def enable_color_feats(self, value):
self.ui.feature_column.setEnabled(value)
self.ui.feat_min_value.setEnabled(value)
self.ui.feat_max_value.setEnabled(value)
self.ui.is_color_features.setEnabled(value)
def _h_assign_feat_color(self, irow):
feat_val = self.timeseries_data.loc[irow, self.feat_column]
if (feat_val != feat_val):
return Qt.black
#this function can and should be optimized
f_min = self.ui.feat_min_value.value()
f_max = self.ui.feat_max_value.value()
if f_min == f_max: #dummy range in case all the values are the same
f_min, f_max = -1, 1
elif f_min > f_max:
return Qt.black
nn = np.clip((feat_val - f_min)/(f_max - f_min), 0, 1)
ind = int(np.round(nn*(self.n_colors-1)))
col = self.palette[ind]
return col
def draw_worm_markers(self, image):
'''
Draw traj worm trajectory.
'''
if not self.worm_index_type in self.frame_data or \
self.ui.comboBox_showLabels.currentIndex() == self.showT['hide']:
return
if hasattr(self, 'current_worm_index'):
current_index = self.current_worm_index
else:
current_index = -1
painter = QPainter()
painter.begin(image)
self.fontsize = max(1, max(image.height(), image.width()) // 120)
penwidth = max(1, max(image.height(), image.width()) // 800)
self.penwidth = penwidth if penwidth % 2 == 1 else penwidth + 1
if not self.label_type in self.frame_data:
self.frame_data[self.label_type] = self.wlab['U']
for row_id, row_data in self.frame_data.iterrows():
# check if the coordinates are nan
if np.isnan(row_data['coord_x']) or np.isnan(row_data['coord_y']):
continue
#if select between showing filtered index or not
if self.ui.comboBox_showLabels.currentIndex() == self.showT['filter']:
continue
is_current_index = current_index == int(row_data[self.worm_index_type])
cb_ind = self.ui.comboBox_drawType.currentIndex()
if cb_ind == self.drawT['boxes']:
self.draw_boxes(painter, row_id, row_data, is_current_index)
elif cb_ind == self.drawT['traj']:
self.draw_trajectories(painter, row_data, is_current_index)
painter.end()
def _h_get_trajectory(self, worm_index, current_frame):
worm_data = self.traj_worm_index_grouped.get_group(worm_index)
valid_index = worm_data.index[worm_data['frame_number']<= current_frame]
ini = max(0, valid_index.size - self.frame_step*self.n_points_traj)
traj_ind = valid_index.values[ini::self.frame_step]
traj_data = worm_data.loc[traj_ind]
return traj_data
def draw_trajectories(self, painter, row_data, is_current_index):
if self.traj_worm_index_grouped is None:
return
worm_index = int(row_data[self.worm_index_type])
current_frame = row_data['frame_number']
traj_data = self._h_get_trajectory(worm_index, current_frame)
traj_data = traj_data.dropna(subset=['coord_x', 'coord_y'])
x_v = traj_data['coord_x'].round()
y_v = traj_data['coord_y'].round()
points = [QPointF(*map(int, c)) for c in zip(x_v, y_v)]
if self.ui.is_color_features.isChecked():
vec_color = [self._h_assign_feat_color(x) for x in traj_data.index]
pen = QPen()
pen.setWidth(self.penwidth)
for p1, p2, c in zip(points[1:], points[:-1], vec_color):
pen.setColor(c)
painter.setPen(pen)
painter.drawLine(p1, p2)
else:
pol = QPolygonF()
for p in points:
pol.append(p)
if not worm_index in self.traj_colors:
self.traj_colors[worm_index] = QColor(*np.random.randint(50, 230, 3))
col = self.traj_colors[worm_index]
pen = QPen()
pen.setWidth(self.penwidth)
pen.setColor(col)
painter.setPen(pen)
painter.drawPolyline(pol)
def draw_boxes(self, painter, row_id, row_data, is_current_index):
'''
Draw traj worm trajectory.
'''
worm_index = int(row_data[self.worm_index_type])
x = int(round(row_data['coord_x']))
y = int(round(row_data['coord_y']))
label_color = self.wlabC[int(row_data[self.label_type])]
if not self.ui.is_color_features.isChecked():
label_color = self.wlabC[int(row_data[self.label_type])]
else:
label_color = self._h_assign_feat_color(row_id)
pen = QPen()
pen.setColor(label_color)
pen.setWidth(self.penwidth)
painter.setPen(pen)
painter.setFont(QFont('Decorative', self.fontsize))
painter.drawText(x, y, str(worm_index))
bb = row_data['roi_size']
painter.drawRect(x - bb / 2, y - bb / 2, bb, bb)
if is_current_index:
b_size = bb//5
offset = bb/2 - b_size
painter.fillRect(x + offset, y + offset, b_size, b_size, QBrush(label_color))
class PlotCommunicator(FeatureReaderBase, ROIManager):
def __init__(self, ui=''):
super().__init__(ui)
self.ui.pushButton_plot.setEnabled(False)
self.ui.pushButton_plot.clicked.connect(self.show_plot)
self.plotter = None
def closePrev(self):
if self.plotter is not None:
self.plotter.close()
self.plotter = None
def updateSkelFile(self, skeletons_file):
super().updateSkelFile(skeletons_file)
self.closePrev()
if self.timeseries_data is None:
self.ui.pushButton_plot.setEnabled(False)
else:
self.ui.pushButton_plot.setEnabled(True)
def show_plot(self):
self.closePrev()
self.plotter = PlotFeatures(self.skeletons_file,
self.timeseries_data,
self.traj_worm_index_grouped,
self.time_units,
self.xy_units,
self.fps,
parent = self)
self.plotter.setWindowFlags(self.plotter.windowFlags() | Qt.WindowStaysOnTopHint)
self.plotter.show()
self.update_plot()
def update_plot(self):
if self.plotter:
self.plotter.plot(self.current_worm_index, self.feat_column)
class MWTrackerViewer_GUI( MarkersDrawer, PlotCommunicator,
ContourDrawer, BlobLabeler, IntensityLabeler, TrajectoryEditor, WellsDrawer):
def __init__(self, ui='', argv=''):
if not ui:
super().__init__(Ui_MWTrackerViewer())
else:
super().__init__(ui)
self.setWindowTitle("Multi-Worm Viewer")
self.vfilename = '' if len(argv) <= 1 else argv[1]
self.videos_dir = r"/Volumes/behavgenom$/GeckoVideo/MaskedVideos/"
self.results_dir = ''
self.skeletons_file = ''
self.worm_index_type = 'worm_index_manual'
self.frame_data = None
self.ui.comboBox_labelType.currentIndexChanged.connect(self.selectWormIndexType)
self.ui.pushButton_save.clicked.connect(self.saveData)
# select worm ROI when doubleclick a worm
self.mainImage._canvas.mouseDoubleClickEvent = self.selectWorm
self.mainImage._canvas.mouseRightClickEvent = self.toggleWellStatus
self.ui.comboBox_ROI1.activated.connect(self.update_plot)
self.ui.comboBox_ROI2.activated.connect(self.update_plot)
def saveData(self):
'''save data from manual labelling. pytables saving format is more convenient than pandas'''
if os.name == 'nt':
# I Windows the paths return by QFileDialog use / as the file
# separation character. We need to correct it.
for field_name in ['vfilename', 'skeletons_file']:
setattr(
self, field_name, getattr(
self, field_name).replace(
'/', os.sep))
has_skeletons_file = ((self.skeletons_file is not None)
and (self.skeletons_file != ''))
if has_skeletons_file:
save_modified_table(self.skeletons_file,
self.trajectories_data,
'trajectories_data')
if self.is_fov_tosplit:
if has_skeletons_file:
self.fovsplitter.write_fov_wells_to_file(self.skeletons_file)
else:
warnings.warn('No skeletons file. Saving wells info in masked video')
self.fid.close()
self.fovsplitter.write_fov_wells_to_file(self.vfilename)
# self.fid = tables.File(self.vfilename, 'r')
self.updateVideoFile(self.vfilename)
if has_skeletons_file:
self.updateSkelFile(self.skeletons_file)
def updateVideoFile(self, vfilename):
super().updateVideoFile(vfilename)
self.updateImage()
def updateSkelFile(self, skeletons_file):
super().updateSkelFile(skeletons_file)
if self.trajectories_data is None:
#empty file nothing to do here
self.updateImage()
return
#correct the `worm_index_N` to the actual name `worm_index_manual`
if 'worm_index_N' in self.trajectories_data:
self.trajectories_data = self.trajectories_data.rename(
columns={'worm_index_N': 'worm_index_manual'})
#if this is really a trajectories_data not (_features.hdf5) add `worm_index_manual` if it does not exists
if not 'worm_index_manual' in self.trajectories_data and not self.is_estimated_trajectories_data:
self.trajectories_data['worm_label'] = self.wlab['U']
self.trajectories_data['worm_index_manual'] = self.trajectories_data['worm_index_joined']
#deactiate the save option if we are dealing with estimated data...
self.ui.pushButton_save.setEnabled(not self.is_estimated_trajectories_data)
#add this column if it does not exist
if not 'has_skeleton' in self.trajectories_data:
self.trajectories_data['has_skeleton'] = self.trajectories_data['skeleton_id'] >= 0
self.updateWormIndexTypeMenu()
self.updateImage()
def updateWormIndexTypeMenu(self):
possible_indexes = [x.replace('worm_index_', '') for x in self.trajectories_data.columns if x.startswith('worm_index_')]
assert len(set(possible_indexes)) == len(possible_indexes) #all indexes ending must be different
menu_names = sorted([x + ' index' for x in possible_indexes])
self.ui.comboBox_labelType.clear()
self.ui.comboBox_labelType.addItems(menu_names)
if 'manual' in possible_indexes:
dd = self.ui.comboBox_labelType.findText('manual index')
self.ui.comboBox_labelType.setCurrentIndex(dd);
self.selectWormIndexType()
def selectWormIndexType(self):
index_option = self.ui.comboBox_labelType.currentText()
if not index_option:
return
assert index_option.endswith(' index')
self.worm_index_type = 'worm_index_' + index_option.replace(' index', '')
# select between automatic and manual worm indexing and label
if self.worm_index_type == 'worm_index_manual':
self.label_type = 'worm_label'
self.enable_trajectories_buttons(True)
self.enable_label_buttons(True)
else:
self.label_type = 'auto_label'
self.enable_trajectories_buttons(False)
self.enable_label_buttons(False)
#recalculate the grouped indexes
self.traj_worm_index_grouped = self.trajectories_data.groupby(self.worm_index_type)
self.updateImage()
# update image
def updateImage(self):
if (self.image_group is None) and (self.isimgstore is False):
return
super(TrackerViewerAuxGUI, self).readCurrentFrame()
# read the data of the particles that exists in the frame
self.frame_data = self.getFrameData(self.frame_number)
#draw extra info only if the worm_index_type is valid
if self.frame_data is not None and \
self.worm_index_type in self.frame_data:
#filter any -1 index
self.frame_data = self.frame_data[self.frame_data[self.worm_index_type]>=0]
if self.frame_data.size > 0:
self.draw_worm_markers(self.frame_qimg)
self.draw_food_contour(self.frame_qimg)
self.updateROIs()
else:
self.clearROIs()
# plot wells
self.draw_wells(self.frame_qimg)
# create the pixmap for the label
self.mainImage.setPixmap(self.frame_qimg)
self.display_intensity()
def selectWorm(self, event):
x = event.pos().x()
y = event.pos().y()
print(x,y)
if self.frame_data is None or self.frame_data.size == 0:
return
R = (x - self.frame_data['coord_x'])**2 + \
(y - self.frame_data['coord_y'])**2
ind = R.idxmin()
good_row = self.frame_data.loc[ind]
if np.sqrt(R.loc[ind]) < good_row['roi_size']:
self.current_roi.worm_index = int(good_row[self.worm_index_type])
self.update_plot()
self.updateImage()
def toggleWellStatus(self, event):
# abort if not multifov
if self.is_fov_tosplit != True:
return
# event is for sure a right click or this does not get called
x = event.pos().x()
y = event.pos().y()
# this will always return something. n/a if clicking outside a well
well_name = self.fovsplitter.find_well_of_xy(x, y)[0].decode('utf-8')
idx = self.fovsplitter.wells['well_name'] == str(well_name)
self.fovsplitter.wells.loc[idx, 'is_good_well'] = \
np.mod(self.fovsplitter.wells.loc[idx, 'is_good_well']+1, 2)
# print(self.fovsplitter.wells)
self.updateImage()
def joinTraj(self):
super().joinTraj()
self.update_plot()
def splitTraj(self):
super().splitTraj()
self.update_plot()
def change_feature(self):
super().change_feature()
self.update_plot()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
main = MWTrackerViewer_GUI(argv=sys.argv)
#mask_file = '/Users/avelinojaver/OneDrive - Imperial College London/tierpsy_examples/mutliworm_example/BRC20067_worms10_food1-10_Set2_Pos5_Ch2_02062017_121709.hdf5'
#mask_file = '/Volumes/rescomp1/data/WormData/screenings/Pratheeban/First_Set/MaskedVideos/Old_Adult/16_07_22/W3_ELA_1.0_Ch1_22072016_131149.hdf5'
#mask_file = '/Users/avelinojaver/Documents/GitHub/tierpsy-tracker/tests/data/AVI_VIDEOS/MaskedVideos/AVI_VIDEOS_1.hdf5'
# mask_file = '/Users/avelinojaver/Documents/GitHub/tierpsy-tracker/tests/data/WT2/MaskedVideos/WT2.hdf5'
mask_file = '/Users/lferiani/Hackathon/multiwell_tierpsy/12_FEAT_TIERPSY_forGUI/MaskedVideos/20191205/syngenta_screen_run1_bluelight_20191205_151104.22956805/metadata.hdf5'
main.updateVideoFile(mask_file)
main.show()
sys.exit(app.exec_())
|
py | 1a2fe978c192b9bc4458f081aa6ee8b19d6746e0 | from typing import List
import numpy as np
class DNNLayer:
def __init__(self, out_shape, depends_on: List["DNNLayer"] = tuple(), param_count=0):
assert out_shape is not None # get around varargs restriction
self.extra_repr_params = {}
self.unique_idx = "{}{:02d}".format(self.__class__.__name__, id(self) % 100)
self.out_shape = out_shape
self.depends_on = depends_on
self.param_count = param_count
def __repr__(self):
args = self.extra_repr_params
args["out_shape"] = self.out_shape
args["param_count"] = self.param_count
args["depends_on"] = "[{}]".format(", ".join([x.unique_idx for x in self.depends_on]))
return "{}({})".format(self.unique_idx, ",".join(["{}={}".format(k, v) for k, v in args.items()]))
class QueryKeyValueMatrix(DNNLayer):
# Fusing Query, Key, And Value into 1
def __init__(self, SEQ_LEN, HIDDEN_DIM, I, ATTN_HEADS, input):
super().__init__(
out_shape=(3 * SEQ_LEN,I,ATTN_HEADS), # [seq_lean X intermediate_vector_dim] for 12 heads
depends_on=[input] if input is not None else [],
param_count=3 * HIDDEN_DIM*I*ATTN_HEADS)
self.flop = 3 * SEQ_LEN*HIDDEN_DIM*I*ATTN_HEADS
class QKTMatrix(DNNLayer):
# Fusing Masking and Dropout
def __init__(self, SEQ_LEN, HIDDEN_DIM, I, ATTN_HEADS, input):
super().__init__(
out_shape=(SEQ_LEN,I,ATTN_HEADS),
depends_on=[input] if input is not None else [], # Different to accept a list
param_count=0)
self.flop = SEQ_LEN*HIDDEN_DIM*I*ATTN_HEADS + np.prod(self.out_shape) + np.prod(self.out_shape) # QKT + mask + dropout
class Mask(DNNLayer):
def __init__(self, input: DNNLayer):
super().__init__(
out_shape=input.out_shape,
depends_on=[input] if input is not None else [],
param_count=0)
self.flop = np.prod(self.out_shape)
class QKTVMatrix(DNNLayer):
# QKTV + Concat
def __init__(self, SEQ_LEN, HIDDEN_DIM, I, ATTN_HEADS, input):
super().__init__(
out_shape=(SEQ_LEN,I * ATTN_HEADS),
depends_on=[input] if input is not None else [],
param_count=0)
self.flop = SEQ_LEN*HIDDEN_DIM*I*ATTN_HEADS + SEQ_LEN*HIDDEN_DIM*I*ATTN_HEADS # QKTVMatrix + Concat
class Concat(DNNLayer):
def __init__(self, SEQ_LEN, HIDDEN_DIM, I, ATTN_HEADS, input):
super().__init__(
out_shape=(SEQ_LEN,I * ATTN_HEADS),
depends_on=[input] if input is not None else [],
param_count=HIDDEN_DIM*I*ATTN_HEADS)
# self.flop = SEQ_LEN*HIDDEN_DIM*I*ATTN_HEADS
self.flop = 0
class LinearLayerReLU(DNNLayer):
def __init__(self, in_features: int, out_features: int, input: DNNLayer):
super().__init__(
self.find_outshape(in_features, out_features, input),
[input] if input is not None else [],
param_count=((in_features + 1) * out_features),
)
self.extra_repr_params["in_features"] = in_features
self.extra_repr_params["out_features"] = out_features
self.in_features = in_features
self.out_features = out_features
self.flop = 2 * self.param_count + self.out_features + np.prod(self.out_shape) # (Linear) + ReLU
def find_outshape(self, in_features, out_features, input):
assert len(input.out_shape) == 2 and input.out_shape[1] == in_features, f"{input.out_shape}, {in_features}"
return (input.out_shape[0], out_features)
def selfattn_flop(B, H, K, Tc, Tg, cache_length=0):
assert cache_length >= 0, "cache_length should be non-negative"
x = DNNLayer(out_shape=(B, Tc, H))
qkt = QKTMatrix(SEQ_LEN=Tc, HIDDEN_DIM=H, I=H//K, ATTN_HEADS=K, input=x)
mask = Mask(input=x)
flops = qkt.flop + mask.flop
for i in range(1, Tg):
x = DNNLayer(out_shape=(B, Tc + i, H))
if i <= cache_length:
qkt = QKTMatrix(SEQ_LEN=1, HIDDEN_DIM=H, I=H//K, ATTN_HEADS=K, input=x)
else:
qkt = QKTMatrix(SEQ_LEN=Tc + i, HIDDEN_DIM=H, I=H//K, ATTN_HEADS=K, input=x)
flops += qkt.flop
print(f"selfattn_flop: {flops}")
return flops
if __name__ == "__main__":
hparams = {"117M": (12, 768), "345M": (24, 1024), "762M": (36, 1280), "1542M": (48, 1600)}
K = 4
B, H = hparams["117M"]
Tc = 128
Tg = 128
selfattn_flop(B=B, H=H, K=K, Tc=Tc, Tg=Tg, cache_length=0)
selfattn_flop(B=B, H=H, K=K, Tc=Tc, Tg=Tg, cache_length=64)
selfattn_flop(B=B, H=H, K=K, Tc=Tc, Tg=Tg, cache_length=128)
|
py | 1a2fe9c92a6d945dfd2ba4172ed35e87e3b9f918 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class TradeFundBillDetail(object):
def __init__(self):
self._amount = None
self._asset_type_code = None
self._asset_user_id = None
self._biz_pay_type = None
self._create_time = None
self._payment_no = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def asset_type_code(self):
return self._asset_type_code
@asset_type_code.setter
def asset_type_code(self, value):
self._asset_type_code = value
@property
def asset_user_id(self):
return self._asset_user_id
@asset_user_id.setter
def asset_user_id(self, value):
self._asset_user_id = value
@property
def biz_pay_type(self):
return self._biz_pay_type
@biz_pay_type.setter
def biz_pay_type(self, value):
self._biz_pay_type = value
@property
def create_time(self):
return self._create_time
@create_time.setter
def create_time(self, value):
self._create_time = value
@property
def payment_no(self):
return self._payment_no
@payment_no.setter
def payment_no(self, value):
self._payment_no = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.asset_type_code:
if hasattr(self.asset_type_code, 'to_alipay_dict'):
params['asset_type_code'] = self.asset_type_code.to_alipay_dict()
else:
params['asset_type_code'] = self.asset_type_code
if self.asset_user_id:
if hasattr(self.asset_user_id, 'to_alipay_dict'):
params['asset_user_id'] = self.asset_user_id.to_alipay_dict()
else:
params['asset_user_id'] = self.asset_user_id
if self.biz_pay_type:
if hasattr(self.biz_pay_type, 'to_alipay_dict'):
params['biz_pay_type'] = self.biz_pay_type.to_alipay_dict()
else:
params['biz_pay_type'] = self.biz_pay_type
if self.create_time:
if hasattr(self.create_time, 'to_alipay_dict'):
params['create_time'] = self.create_time.to_alipay_dict()
else:
params['create_time'] = self.create_time
if self.payment_no:
if hasattr(self.payment_no, 'to_alipay_dict'):
params['payment_no'] = self.payment_no.to_alipay_dict()
else:
params['payment_no'] = self.payment_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = TradeFundBillDetail()
if 'amount' in d:
o.amount = d['amount']
if 'asset_type_code' in d:
o.asset_type_code = d['asset_type_code']
if 'asset_user_id' in d:
o.asset_user_id = d['asset_user_id']
if 'biz_pay_type' in d:
o.biz_pay_type = d['biz_pay_type']
if 'create_time' in d:
o.create_time = d['create_time']
if 'payment_no' in d:
o.payment_no = d['payment_no']
return o
|
py | 1a2fea07750d6014226f0bc33440cd6ceaed26d9 | from typing import TYPE_CHECKING
# typing doesnt understand aenum so im pretending its stdlib enum while type checking
if TYPE_CHECKING:
import enum
else:
import aenum as enum
__all__ = (
"ChannelType",
"PresenceType",
"RelationshipType",
"AssetType",
"SortType",
)
class ChannelType(enum.Enum):
saved_message = "SavedMessage"
direct_message = "DirectMessage"
group = "Group"
text_channel = "TextChannel"
voice_channel = "VoiceChannel"
class PresenceType(enum.Enum):
busy = "Busy"
idle = "Idle"
invisible = "Invisible"
online = "Online"
class RelationshipType(enum.Enum):
blocked = "Blocked"
blocked_other = "BlockedOther"
friend = "Friend"
incoming_friend_request = "Incoming"
none = "None"
outgoing_friend_request = "Outgoing"
user = "User"
class AssetType(enum.Enum):
image = "Image"
video = "Video"
text = "Text"
audio = "Audio"
file = "File"
class SortType(enum.Enum):
latest = "Latest"
oldest = "Oldest"
relevance = "Relevance"
|
py | 1a2fea9c884de04b9d8461dfabf1a0c9a9419dee | '''
There are a total of numCourses courses you have to take, labeled from 0 to numCourses-1.
Some courses may have prerequisites, for example to take course 0 you have to first take course 1, which is expressed as a pair: [0,1]
Given the total number of courses and a list of prerequisite pairs, is it possible for you to finish all courses?
**Example 1**
`Input: numCourses = 2, prerequisites = [[1,0]]`
`Output: true`
Explanation: There are a total of 2 courses to take.
To take course 1 you should have finished course 0. So it is possible.
**Example 2**
`Input: numCourses = 2, prerequisites = [[1,0],[0,1]]`
`Output: false`
Explanation: There are a total of 2 courses to take.
To take course 1 you should have finished course 0, and to take course 0 you should
also have finished course 1. So it is impossible.
**Note**
You may assume that there are no duplicate edges in the input prerequisites.
'''
from collections import defaultdict
class Solution(object):
def __init__(self):
self.eligibleCourses = []
self.visited = []
def seedEligibleCourses(self, g):
for index, node in g.items():
if len(node) == 0 and index not in self.visited:
self.eligibleCourses.append(index)
def dfs(self, node, g):
if node in self.visited:
return
self.visited.append(node)
for _, n in g.items():
if node in n:
n.remove(node)
for successor in g[node]:
if successor not in self.visited:
self.eligibleCourses.append(successor)
self.dfs(node, g)
def canFinish(self, numCourses, prerequisites):
if not prerequisites:
return True
graph = defaultdict(list)
for relation in prerequisites:
currentCourse, prerequisite = relation[0], relation[1]
graph[prerequisite].append(currentCourse) # post order!!
if currentCourse not in graph:
graph[currentCourse] = []
self.seedEligibleCourses(graph)
while self.eligibleCourses:
current = self.eligibleCourses.pop(0)
self.dfs(current, graph)
self.seedEligibleCourses(graph)
for _, n in graph.items():
if len(n) > 0:
return False
return True
|
py | 1a2fead4cdba019420ee37f32acc4de5822c2905 |
#%% load the background
from __future__ import print_function, division
import torch
from torchvision import datasets, transforms
import os
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import pandas as pd
import numpy as np
import torch.nn as nn
#%% define the datasets
list_datasets = ['/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/original',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/normalized_to_HE',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/normalized_to_tumorLymphnode_165',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/normalized_to_onlyH',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/tumorLymphnode/patches/size_165/original',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/tumorLymphnode/patches/size_165/normalized_to_HE_165',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/tumorLymphnode/patches/size_165/normalized_to_camelyon_165',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/tumorLymphnode/patches/size_165/normalized_to_onlyH_165'
]
list_dataset_names = ['camelyon_ori', 'camelyon_to_HE', 'camelyon_to_tL', 'camelyon_to_H',
'tumorLymphnode_ori', 'tumorLymphnode_to_HE', 'tumorLymphnode_to_ca', 'tumorLymphnode_to_H']
list_models = ['/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/original/model_ResNet152.pt',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/normalized_to_HE/model_ResNet152.pt',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/normalized_to_tumorLymphnode_165/model_ResNet152.pt',
'/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/patchCamelyon/patches/normalized_to_onlyH/model_ResNet152.pt' ]
list_model_names = ['ResNet_original', "ResNet_normalized_to_HE", "ResNet_normalized_to_tumorLymphnode", "ResNet_normalized_to_H"]
#%% iterate over all datasets (and later over all models)
list_model = []
list_dataset = []
list_kappa = []
list_accuracy = []
list_loss = []
for idataset, tdataset in enumerate(list_datasets):
#print(idataset)
#%% define the folder
if tdataset.find("patches") > 0:
dataset2use = "val"
else:
dataset2use = 'test'
# %%define the function to get the data
def get_datatransform(inputSize, data_dir):
data_transforms = {
dataset2use: transforms.Compose([
transforms.Resize([inputSize, inputSize]),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in [dataset2use]}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=False, num_workers=4)
for x in [dataset2use]}
return(data_transforms, image_datasets, dataloaders)
#%% prepare the transformations and the dataset
data_transforms , image_datasets, dataloaders= get_datatransform(259, tdataset)
class_names = dataloaders[dataset2use].dataset.classes
nb_classes = len(class_names)
confusion_matrix = torch.zeros(nb_classes, nb_classes)
#%% visualize the input data (to look if evey class is evenly)
class_names = ['normal', 'tumor']
df = pd.DataFrame(dataloaders[dataset2use].dataset.samples)
df.columns = ['file', 'class_nr']
df.class_nr = np.array(df.class_nr)
class_labels = ['NaN' for x in range(df.shape[0])]
for i in range(0,df.shape[0]):
class_labels[i] = class_names[df.class_nr[int(i)]]
df = df.assign(class_labels = class_labels)
sns.set_palette("Set1", n_colors = 12)
sns.countplot(df.class_labels)
plt.xlabel('Pattern')
plt.ylabel('Count [n]')
plt.savefig('DataBase_' + dataset2use + '.jpg')
plt.show()
plt.close()
n_normal = sum(map(lambda x : x == "normal", class_labels))
n_tumor = sum(map(lambda x: x == "tumor", class_labels))
print("n = " + str(n_normal) + " tiles without and n = " + str(n_tumor) + " tiles with tumor.")
#%% iterate over the models
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import accuracy_score
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
n = 0
df_values = pd.DataFrame(list(range(0,len(dataloaders[dataset2use].sampler.data_source.imgs))))
for imodel, tmodel in enumerate(list_models):
print(imodel)
#%% prepare the dataset
inputSize = 224
data_transforms, image_datasets, dataloaders = get_datatransform(inputSize, tdataset)
#%% apply model on test data set (and get a confusion matrix)
model_ft = torch.load(tmodel)
model_ft.eval()
vector_prd = []
vector_exp = []
with torch.no_grad():
for i, (inputs, classes) in enumerate(dataloaders[dataset2use]):
inputs = inputs.to(device)
classes = classes.to(device)
outputs = model_ft(inputs)
_, preds = torch.max(outputs, 1)
if i == 0:
outputs_matrix = outputs
else:
outputs_matrix = torch.cat((outputs_matrix, outputs), 0)
vector_prd = vector_prd + preds.view(-1).cpu().tolist()
vector_exp = vector_exp + classes.view(-1).cpu().tolist()
confusion_matrix = torch.zeros(nb_classes, nb_classes)
for x, y in zip(vector_exp, vector_prd):
confusion_matrix[y, x] += 1
loss_function = nn.CrossEntropyLoss()
loss_value = loss_function(outputs_matrix.to('cpu'), torch.tensor(vector_exp))
print(confusion_matrix)
#%% calcualte the comparison values
list_model.append(list_model_names[imodel])
list_dataset.append(list_dataset_names[idataset])
list_kappa.append(cohen_kappa_score(vector_prd, vector_exp))
list_accuracy.append(accuracy_score(vector_prd, vector_exp))
list_loss.append(loss_value.tolist())
print('Kappa-value: ' + str(list_kappa[-1]))
print('Accurary-value: ' + str(list_accuracy[-1]))
#%% plot a confusion matrix
matrix2plot = confusion_matrix.numpy()
matrix2plot = matrix2plot.astype(int)
ax = sns.heatmap(matrix2plot,
annot = True, linewidths=5, annot_kws={"size": 10},
xticklabels=class_names, yticklabels=class_names,
cmap = "Blues")
plt.xlabel('Ground Truth')
plt.ylabel('Model ' + list_model[-1] + " on " + list_dataset[-1])
plt.savefig('ConfMat_' +'Model ' + list_model[-1] + " on " + list_dataset[-1] + '.jpg')
plt.show()
plt.close()
#%% make a dataframe
df = pd.DataFrame(list(zip(list_model, list_dataset, list_kappa)), columns=['model', 'data', 'kappa'])
df = df.pivot_table(index = ["model"], columns = ["data"], values = "kappa")
df.to_csv('/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/table.csv')
df.to_excel('/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/table.xlsx')
with open('/home/cw9/sds_hd/sd18a006/marlen/datasets/stainNormalization/table.tex', 'w') as tf:
tf.write(df.to_latex()) |
py | 1a2feae71d87f1c61be44ff563c96ea2e1ff4ca0 | def is_element(element):
pass
|
py | 1a2feb1d38513c2e04722f0abbb4308498ded0bc | """
Limits
======
Implemented according to the PhD thesis
http://www.cybertester.com/data/gruntz.pdf, which contains very thorough
descriptions of the algorithm including many examples. We summarize here
the gist of it.
All functions are sorted according to how rapidly varying they are at
infinity using the following rules. Any two functions f and g can be
compared using the properties of L:
L=lim log|f(x)| / log|g(x)| (for x -> oo)
We define >, < ~ according to::
1. f > g .... L=+-oo
we say that:
- f is greater than any power of g
- f is more rapidly varying than g
- f goes to infinity/zero faster than g
2. f < g .... L=0
we say that:
- f is lower than any power of g
3. f ~ g .... L!=0, +-oo
we say that:
- both f and g are bounded from above and below by suitable integral
powers of the other
Examples
========
::
2 < x < exp(x) < exp(x**2) < exp(exp(x))
2 ~ 3 ~ -5
x ~ x**2 ~ x**3 ~ 1/x ~ x**m ~ -x
exp(x) ~ exp(-x) ~ exp(2x) ~ exp(x)**2 ~ exp(x+exp(-x))
f ~ 1/f
So we can divide all the functions into comparability classes (x and x^2
belong to one class, exp(x) and exp(-x) belong to some other class). In
principle, we could compare any two functions, but in our algorithm, we
do not compare anything below the class 2~3~-5 (for example log(x) is
below this), so we set 2~3~-5 as the lowest comparability class.
Given the function f, we find the list of most rapidly varying (mrv set)
subexpressions of it. This list belongs to the same comparability class.
Let's say it is {exp(x), exp(2x)}. Using the rule f ~ 1/f we find an
element "w" (either from the list or a new one) from the same
comparability class which goes to zero at infinity. In our example we
set w=exp(-x) (but we could also set w=exp(-2x) or w=exp(-3x) ...). We
rewrite the mrv set using w, in our case {1/w, 1/w^2}, and substitute it
into f. Then we expand f into a series in w::
f = c0*w^e0 + c1*w^e1 + ... + O(w^en), where e0<e1<...<en, c0!=0
but for x->oo, lim f = lim c0*w^e0, because all the other terms go to zero,
because w goes to zero faster than the ci and ei. So::
for e0>0, lim f = 0
for e0<0, lim f = +-oo (the sign depends on the sign of c0)
for e0=0, lim f = lim c0
We need to recursively compute limits at several places of the algorithm, but
as is shown in the PhD thesis, it always finishes.
Important functions from the implementation:
compare(a, b, x) compares "a" and "b" by computing the limit L.
mrv(e, x) returns list of most rapidly varying (mrv) subexpressions of "e"
rewrite(e, Omega, x, wsym) rewrites "e" in terms of w
leadterm(f, x) returns the lowest power term in the series of f
mrv_leadterm(e, x) returns the lead term (c0, e0) for e
limitinf(e, x) computes lim e (for x->oo)
limit(e, z, z0) computes any limit by converting it to the case x->oo
All the functions are really simple and straightforward except
rewrite(), which is the most difficult/complex part of the algorithm.
When the algorithm fails, the bugs are usually in the series expansion
(i.e. in SymPy) or in rewrite.
This code is almost exact rewrite of the Maple code inside the Gruntz
thesis.
Debugging
---------
Because the gruntz algorithm is highly recursive, it's difficult to
figure out what went wrong inside a debugger. Instead, turn on nice
debug prints by defining the environment variable SYMPY_DEBUG. For
example:
[user@localhost]: SYMPY_DEBUG=True ./bin/isympy
In [1]: limit(sin(x)/x, x, 0)
limitinf(_x*sin(1/_x), _x) = 1
+-mrv_leadterm(_x*sin(1/_x), _x) = (1, 0)
| +-mrv(_x*sin(1/_x), _x) = set([_x])
| | +-mrv(_x, _x) = set([_x])
| | +-mrv(sin(1/_x), _x) = set([_x])
| | +-mrv(1/_x, _x) = set([_x])
| | +-mrv(_x, _x) = set([_x])
| +-mrv_leadterm(exp(_x)*sin(exp(-_x)), _x, set([exp(_x)])) = (1, 0)
| +-rewrite(exp(_x)*sin(exp(-_x)), set([exp(_x)]), _x, _w) = (1/_w*sin(_w), -_x)
| +-sign(_x, _x) = 1
| +-mrv_leadterm(1, _x) = (1, 0)
+-sign(0, _x) = 0
+-limitinf(1, _x) = 1
And check manually which line is wrong. Then go to the source code and
debug this function to figure out the exact problem.
"""
from functools import reduce
from sympy.core import Basic, S, Mul, PoleError
from sympy.core.cache import cacheit
from sympy.core.numbers import ilcm, I, oo
from sympy.core.symbol import Dummy, Wild
from sympy.core.traversal import bottom_up
from sympy.functions import log, exp, sign as _sign
from sympy.series.order import Order
from sympy.simplify import logcombine
from sympy.simplify.powsimp import powsimp, powdenest
from sympy.utilities.misc import debug_decorator as debug
from sympy.utilities.timeutils import timethis
timeit = timethis('gruntz')
def compare(a, b, x):
"""Returns "<" if a<b, "=" for a == b, ">" for a>b"""
# log(exp(...)) must always be simplified here for termination
la, lb = log(a), log(b)
if isinstance(a, Basic) and (isinstance(a, exp) or (a.is_Pow and a.base == S.Exp1)):
la = a.exp
if isinstance(b, Basic) and (isinstance(b, exp) or (b.is_Pow and b.base == S.Exp1)):
lb = b.exp
c = limitinf(la/lb, x)
if c == 0:
return "<"
elif c.is_infinite:
return ">"
else:
return "="
class SubsSet(dict):
"""
Stores (expr, dummy) pairs, and how to rewrite expr-s.
Explanation
===========
The gruntz algorithm needs to rewrite certain expressions in term of a new
variable w. We cannot use subs, because it is just too smart for us. For
example::
> Omega=[exp(exp(_p - exp(-_p))/(1 - 1/_p)), exp(exp(_p))]
> O2=[exp(-exp(_p) + exp(-exp(-_p))*exp(_p)/(1 - 1/_p))/_w, 1/_w]
> e = exp(exp(_p - exp(-_p))/(1 - 1/_p)) - exp(exp(_p))
> e.subs(Omega[0],O2[0]).subs(Omega[1],O2[1])
-1/w + exp(exp(p)*exp(-exp(-p))/(1 - 1/p))
is really not what we want!
So we do it the hard way and keep track of all the things we potentially
want to substitute by dummy variables. Consider the expression::
exp(x - exp(-x)) + exp(x) + x.
The mrv set is {exp(x), exp(-x), exp(x - exp(-x))}.
We introduce corresponding dummy variables d1, d2, d3 and rewrite::
d3 + d1 + x.
This class first of all keeps track of the mapping expr->variable, i.e.
will at this stage be a dictionary::
{exp(x): d1, exp(-x): d2, exp(x - exp(-x)): d3}.
[It turns out to be more convenient this way round.]
But sometimes expressions in the mrv set have other expressions from the
mrv set as subexpressions, and we need to keep track of that as well. In
this case, d3 is really exp(x - d2), so rewrites at this stage is::
{d3: exp(x-d2)}.
The function rewrite uses all this information to correctly rewrite our
expression in terms of w. In this case w can be chosen to be exp(-x),
i.e. d2. The correct rewriting then is::
exp(-w)/w + 1/w + x.
"""
def __init__(self):
self.rewrites = {}
def __repr__(self):
return super().__repr__() + ', ' + self.rewrites.__repr__()
def __getitem__(self, key):
if not key in self:
self[key] = Dummy()
return dict.__getitem__(self, key)
def do_subs(self, e):
"""Substitute the variables with expressions"""
for expr, var in self.items():
e = e.xreplace({var: expr})
return e
def meets(self, s2):
"""Tell whether or not self and s2 have non-empty intersection"""
return set(self.keys()).intersection(list(s2.keys())) != set()
def union(self, s2, exps=None):
"""Compute the union of self and s2, adjusting exps"""
res = self.copy()
tr = {}
for expr, var in s2.items():
if expr in self:
if exps:
exps = exps.xreplace({var: res[expr]})
tr[var] = res[expr]
else:
res[expr] = var
for var, rewr in s2.rewrites.items():
res.rewrites[var] = rewr.xreplace(tr)
return res, exps
def copy(self):
"""Create a shallow copy of SubsSet"""
r = SubsSet()
r.rewrites = self.rewrites.copy()
for expr, var in self.items():
r[expr] = var
return r
@debug
def mrv(e, x):
"""Returns a SubsSet of most rapidly varying (mrv) subexpressions of 'e',
and e rewritten in terms of these"""
e = powsimp(e, deep=True, combine='exp')
if not isinstance(e, Basic):
raise TypeError("e should be an instance of Basic")
if not e.has(x):
return SubsSet(), e
elif e == x:
s = SubsSet()
return s, s[x]
elif e.is_Mul or e.is_Add:
i, d = e.as_independent(x) # throw away x-independent terms
if d.func != e.func:
s, expr = mrv(d, x)
return s, e.func(i, expr)
a, b = d.as_two_terms()
s1, e1 = mrv(a, x)
s2, e2 = mrv(b, x)
return mrv_max1(s1, s2, e.func(i, e1, e2), x)
elif e.is_Pow and e.base != S.Exp1:
e1 = S.One
while e.is_Pow:
b1 = e.base
e1 *= e.exp
e = b1
if b1 == 1:
return SubsSet(), b1
if e1.has(x):
base_lim = limitinf(b1, x)
if base_lim is S.One:
return mrv(exp(e1 * (b1 - 1)), x)
return mrv(exp(e1 * log(b1)), x)
else:
s, expr = mrv(b1, x)
return s, expr**e1
elif isinstance(e, log):
s, expr = mrv(e.args[0], x)
return s, log(expr)
elif isinstance(e, exp) or (e.is_Pow and e.base == S.Exp1):
# We know from the theory of this algorithm that exp(log(...)) may always
# be simplified here, and doing so is vital for termination.
if isinstance(e.exp, log):
return mrv(e.exp.args[0], x)
# if a product has an infinite factor the result will be
# infinite if there is no zero, otherwise NaN; here, we
# consider the result infinite if any factor is infinite
li = limitinf(e.exp, x)
if any(_.is_infinite for _ in Mul.make_args(li)):
s1 = SubsSet()
e1 = s1[e]
s2, e2 = mrv(e.exp, x)
su = s1.union(s2)[0]
su.rewrites[e1] = exp(e2)
return mrv_max3(s1, e1, s2, exp(e2), su, e1, x)
else:
s, expr = mrv(e.exp, x)
return s, exp(expr)
elif e.is_Function:
l = [mrv(a, x) for a in e.args]
l2 = [s for (s, _) in l if s != SubsSet()]
if len(l2) != 1:
# e.g. something like BesselJ(x, x)
raise NotImplementedError("MRV set computation for functions in"
" several variables not implemented.")
s, ss = l2[0], SubsSet()
args = [ss.do_subs(x[1]) for x in l]
return s, e.func(*args)
elif e.is_Derivative:
raise NotImplementedError("MRV set computation for derviatives"
" not implemented yet.")
raise NotImplementedError(
"Don't know how to calculate the mrv of '%s'" % e)
def mrv_max3(f, expsf, g, expsg, union, expsboth, x):
"""
Computes the maximum of two sets of expressions f and g, which
are in the same comparability class, i.e. max() compares (two elements of)
f and g and returns either (f, expsf) [if f is larger], (g, expsg)
[if g is larger] or (union, expsboth) [if f, g are of the same class].
"""
if not isinstance(f, SubsSet):
raise TypeError("f should be an instance of SubsSet")
if not isinstance(g, SubsSet):
raise TypeError("g should be an instance of SubsSet")
if f == SubsSet():
return g, expsg
elif g == SubsSet():
return f, expsf
elif f.meets(g):
return union, expsboth
c = compare(list(f.keys())[0], list(g.keys())[0], x)
if c == ">":
return f, expsf
elif c == "<":
return g, expsg
else:
if c != "=":
raise ValueError("c should be =")
return union, expsboth
def mrv_max1(f, g, exps, x):
"""Computes the maximum of two sets of expressions f and g, which
are in the same comparability class, i.e. mrv_max1() compares (two elements of)
f and g and returns the set, which is in the higher comparability class
of the union of both, if they have the same order of variation.
Also returns exps, with the appropriate substitutions made.
"""
u, b = f.union(g, exps)
return mrv_max3(f, g.do_subs(exps), g, f.do_subs(exps),
u, b, x)
@debug
@cacheit
@timeit
def sign(e, x):
"""
Returns a sign of an expression e(x) for x->oo.
::
e > 0 for x sufficiently large ... 1
e == 0 for x sufficiently large ... 0
e < 0 for x sufficiently large ... -1
The result of this function is currently undefined if e changes sign
arbitrarily often for arbitrarily large x (e.g. sin(x)).
Note that this returns zero only if e is *constantly* zero
for x sufficiently large. [If e is constant, of course, this is just
the same thing as the sign of e.]
"""
if not isinstance(e, Basic):
raise TypeError("e should be an instance of Basic")
if e.is_positive:
return 1
elif e.is_negative:
return -1
elif e.is_zero:
return 0
elif not e.has(x):
e = logcombine(e)
return _sign(e)
elif e == x:
return 1
elif e.is_Mul:
a, b = e.as_two_terms()
sa = sign(a, x)
if not sa:
return 0
return sa * sign(b, x)
elif isinstance(e, exp):
return 1
elif e.is_Pow:
if e.base == S.Exp1:
return 1
s = sign(e.base, x)
if s == 1:
return 1
if e.exp.is_Integer:
return s**e.exp
elif isinstance(e, log):
return sign(e.args[0] - 1, x)
# if all else fails, do it the hard way
c0, e0 = mrv_leadterm(e, x)
return sign(c0, x)
@debug
@timeit
@cacheit
def limitinf(e, x, leadsimp=False):
"""Limit e(x) for x-> oo.
Explanation
===========
If ``leadsimp`` is True, an attempt is made to simplify the leading
term of the series expansion of ``e``. That may succeed even if
``e`` cannot be simplified.
"""
# rewrite e in terms of tractable functions only
if not e.has(x):
return e # e is a constant
if e.has(Order):
e = e.expand().removeO()
if not x.is_positive or x.is_integer:
# We make sure that x.is_positive is True and x.is_integer is None
# so we get all the correct mathematical behavior from the expression.
# We need a fresh variable.
p = Dummy('p', positive=True)
e = e.subs(x, p)
x = p
e = e.rewrite('tractable', deep=True, limitvar=x)
e = powdenest(e)
c0, e0 = mrv_leadterm(e, x)
sig = sign(e0, x)
if sig == 1:
return S.Zero # e0>0: lim f = 0
elif sig == -1: # e0<0: lim f = +-oo (the sign depends on the sign of c0)
if c0.match(I*Wild("a", exclude=[I])):
return c0*oo
s = sign(c0, x)
# the leading term shouldn't be 0:
if s == 0:
raise ValueError("Leading term should not be 0")
return s*oo
elif sig == 0:
if leadsimp:
c0 = c0.simplify()
return limitinf(c0, x, leadsimp) # e0=0: lim f = lim c0
else:
raise ValueError("{} could not be evaluated".format(sig))
def moveup2(s, x):
r = SubsSet()
for expr, var in s.items():
r[expr.xreplace({x: exp(x)})] = var
for var, expr in s.rewrites.items():
r.rewrites[var] = s.rewrites[var].xreplace({x: exp(x)})
return r
def moveup(l, x):
return [e.xreplace({x: exp(x)}) for e in l]
@debug
@timeit
def calculate_series(e, x, logx=None):
""" Calculates at least one term of the series of ``e`` in ``x``.
This is a place that fails most often, so it is in its own function.
"""
from sympy.polys import cancel
for t in e.lseries(x, logx=logx):
# bottom_up function is required for a specific case - when e is
# -exp(p/(p + 1)) + exp(-p**2/(p + 1) + p). No current simplification
# methods reduce this to 0 while not expanding polynomials.
t = bottom_up(t, lambda w: getattr(w, 'normal', lambda: w)())
t = cancel(t, expand=False).factor()
if t.has(exp) and t.has(log):
t = powdenest(t)
if not t.is_zero:
break
return t
@debug
@timeit
@cacheit
def mrv_leadterm(e, x):
"""Returns (c0, e0) for e."""
Omega = SubsSet()
if not e.has(x):
return (e, S.Zero)
if Omega == SubsSet():
Omega, exps = mrv(e, x)
if not Omega:
# e really does not depend on x after simplification
return exps, S.Zero
if x in Omega:
# move the whole omega up (exponentiate each term):
Omega_up = moveup2(Omega, x)
exps_up = moveup([exps], x)[0]
# NOTE: there is no need to move this down!
Omega = Omega_up
exps = exps_up
#
# The positive dummy, w, is used here so log(w*2) etc. will expand;
# a unique dummy is needed in this algorithm
#
# For limits of complex functions, the algorithm would have to be
# improved, or just find limits of Re and Im components separately.
#
w = Dummy("w", real=True, positive=True)
f, logw = rewrite(exps, Omega, x, w)
series = calculate_series(f, w, logx=logw)
try:
lt = series.leadterm(w, logx=logw)
except (ValueError, PoleError):
lt = f.as_coeff_exponent(w)
# as_coeff_exponent won't always split in required form. It may simply
# return (f, 0) when a better form may be obtained. Example (-x)**(-pi)
# can be written as (-1**(-pi), -pi) which as_coeff_exponent does not return
if lt[0].has(w):
base = f.as_base_exp()[0].as_coeff_exponent(w)
ex = f.as_base_exp()[1]
lt = (base[0]**ex, base[1]*ex)
return (lt[0].subs(log(w), logw), lt[1])
def build_expression_tree(Omega, rewrites):
r""" Helper function for rewrite.
We need to sort Omega (mrv set) so that we replace an expression before
we replace any expression in terms of which it has to be rewritten::
e1 ---> e2 ---> e3
\
-> e4
Here we can do e1, e2, e3, e4 or e1, e2, e4, e3.
To do this we assemble the nodes into a tree, and sort them by height.
This function builds the tree, rewrites then sorts the nodes.
"""
class Node:
def __init__(self):
self.before = []
self.expr = None
self.var = None
def ht(self):
return reduce(lambda x, y: x + y,
[x.ht() for x in self.before], 1)
nodes = {}
for expr, v in Omega:
n = Node()
n.var = v
n.expr = expr
nodes[v] = n
for _, v in Omega:
if v in rewrites:
n = nodes[v]
r = rewrites[v]
for _, v2 in Omega:
if r.has(v2):
n.before.append(nodes[v2])
return nodes
@debug
@timeit
def rewrite(e, Omega, x, wsym):
"""e(x) ... the function
Omega ... the mrv set
wsym ... the symbol which is going to be used for w
Returns the rewritten e in terms of w and log(w). See test_rewrite1()
for examples and correct results.
"""
if not isinstance(Omega, SubsSet):
raise TypeError("Omega should be an instance of SubsSet")
if len(Omega) == 0:
raise ValueError("Length cannot be 0")
# all items in Omega must be exponentials
for t in Omega.keys():
if not isinstance(t, exp):
raise ValueError("Value should be exp")
rewrites = Omega.rewrites
Omega = list(Omega.items())
nodes = build_expression_tree(Omega, rewrites)
Omega.sort(key=lambda x: nodes[x[1]].ht(), reverse=True)
# make sure we know the sign of each exp() term; after the loop,
# g is going to be the "w" - the simplest one in the mrv set
for g, _ in Omega:
sig = sign(g.exp, x)
if sig != 1 and sig != -1:
raise NotImplementedError('Result depends on the sign of %s' % sig)
if sig == 1:
wsym = 1/wsym # if g goes to oo, substitute 1/w
# O2 is a list, which results by rewriting each item in Omega using "w"
O2 = []
denominators = []
for f, var in Omega:
c = limitinf(f.exp/g.exp, x)
if c.is_Rational:
denominators.append(c.q)
arg = f.exp
if var in rewrites:
if not isinstance(rewrites[var], exp):
raise ValueError("Value should be exp")
arg = rewrites[var].args[0]
O2.append((var, exp((arg - c*g.exp).expand())*wsym**c))
# Remember that Omega contains subexpressions of "e". So now we find
# them in "e" and substitute them for our rewriting, stored in O2
# the following powsimp is necessary to automatically combine exponentials,
# so that the .xreplace() below succeeds:
# TODO this should not be necessary
f = powsimp(e, deep=True, combine='exp')
for a, b in O2:
f = f.xreplace({a: b})
for _, var in Omega:
assert not f.has(var)
# finally compute the logarithm of w (logw).
logw = g.exp
if sig == 1:
logw = -logw # log(w)->log(1/w)=-log(w)
# Some parts of SymPy have difficulty computing series expansions with
# non-integral exponents. The following heuristic improves the situation:
exponent = reduce(ilcm, denominators, 1)
f = f.subs({wsym: wsym**exponent})
logw /= exponent
return f, logw
def gruntz(e, z, z0, dir="+"):
"""
Compute the limit of e(z) at the point z0 using the Gruntz algorithm.
Explanation
===========
``z0`` can be any expression, including oo and -oo.
For ``dir="+"`` (default) it calculates the limit from the right
(z->z0+) and for ``dir="-"`` the limit from the left (z->z0-). For infinite z0
(oo or -oo), the dir argument doesn't matter.
This algorithm is fully described in the module docstring in the gruntz.py
file. It relies heavily on the series expansion. Most frequently, gruntz()
is only used if the faster limit() function (which uses heuristics) fails.
"""
if not z.is_symbol:
raise NotImplementedError("Second argument must be a Symbol")
# convert all limits to the limit z->oo; sign of z is handled in limitinf
r = None
if z0 == oo:
e0 = e
elif z0 == -oo:
e0 = e.subs(z, -z)
else:
if str(dir) == "-":
e0 = e.subs(z, z0 - 1/z)
elif str(dir) == "+":
e0 = e.subs(z, z0 + 1/z)
else:
raise NotImplementedError("dir must be '+' or '-'")
try:
r = limitinf(e0, z)
except ValueError:
r = limitinf(e0, z, leadsimp=True)
# This is a bit of a heuristic for nice results... we always rewrite
# tractable functions in terms of familiar intractable ones.
# It might be nicer to rewrite the exactly to what they were initially,
# but that would take some work to implement.
return r.rewrite('intractable', deep=True)
|
py | 1a2fed1e3ee2c697484cfb881c4f54cc98e55ef3 | from .control_sequence import ControlSequence
from .formatter import format_table
class DocumentClass(ControlSequence):
def __init__(self, name, descr=''):
ControlSequence.__init__(self, name, descr)
# A document class may have options.
self.has_opts = True
return
|
py | 1a2fed5be55c9adffa50acb62c7dc0bd99897820 | from unicodedata import name
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient,Recipe
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
#Test tha publicly available ingredients API
def setUp(self):
self.client = APIClient()
def test_login_required(self):
#Test tha login is required to access the endpoint
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsApiTests(TestCase):
# Test that private ingredient api
def setUp(self) -> None:
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'[email protected]',
'pss123'
)
self.client.force_authenticate(self.user)
def test_retrive_ingredient_list(self):
#Test that retrivinng the list of ingredients
Ingredient.objects.create(user = self.user,name='Kale')
Ingredient.objects.create(user = self.user,name='Salt')
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code,status.HTTP_200_OK)
self.assertEqual(res.data,serializer.data)
def test_ingredients_limits_to_user(self):
#Test that ingredients for authenticated user and return it
user2 = get_user_model().objects.create_user(
'[email protected]'
'pass321'
)
Ingredient.objects.create(user=user2,name='Vinegar')
ingredient = Ingredient.objects.create(user = self.user,name = 'Tumeric')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code,status.HTTP_200_OK)
self.assertEqual(len(res.data),1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
#Test creating a new ingredient
payload = {'name':'Cabbage'}
self.client.post(INGREDIENTS_URL,payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
#Test creating a new ingredient with invalid payload
payload = {'name':''}
res = self.client.post(INGREDIENTS_URL,payload)
self.assertEqual(res.status_code,status.HTTP_400_BAD_REQUEST)
def test_retrieve_ingredients_assigned_to_recipes(self):
#Test filtering ingredients by those assigned to recipes
ingredient1 = Ingredient.objects.create(user=self.user,name='Apples')
ingredient2 = Ingredient.objects.create(user=self.user,name='Turkey')
recipe = Recipe.objects.create(
title ='Apple crumble',
time_minutes=5,
price=10,
user = self.user
)
recipe.ingredients.add(ingredient1)
res = self.client.get(INGREDIENTS_URL, {'assigned_only':1})
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retreive_ingredients_assigned_unique(self):
#Test filtering ingredients by assigned return unique items
ingredient = Ingredient.objects.create(user=self.user,name='Eggs')
Ingredient.objects.create(user=self.user,name='Cheese')
recipe1 = Recipe.objects.create(
title = 'Eggs benedict',
time_minutes = 30,
price = 12.00,
user=self.user
)
recipe1.ingredients.add(ingredient)
recipe2 = Recipe.objects.create(
title = 'Coriander eggs',
time_minutes=20,
price=5.00,
user=self.user
)
recipe2.ingredients.add(ingredient)
res = self.client.get(INGREDIENTS_URL,{'assigned_only':1})
self.assertEqual(len(res.data),1) |
py | 1a2fee16fc06e4ddbb7103d84074920037c39442 | import time
import numpy as np
from equipment.custom import mmwave_source
from equipment.hittite import signal_generator
from equipment.srs import lockin
from xystage import stepper
from kid_readout.interactive import *
from kid_readout.equipment import hardware
from kid_readout.measurement import mmw_source_sweep, core, acquire
logger.setLevel(logging.DEBUG)
# fg = FunctionGenerator()
#hittite = signal_generator.Hittite(ipaddr='192.168.0.200')
#hittite.set_power(0)
#hittite.on()
lockin = lockin.Lockin(LOCKIN_SERIAL_PORT)
tic = time.time()
# lockin.sensitivity = 17
print lockin.identification
print lockin.identification
# print time.time()-tic
# tic = time.time()
# print lockin.state(measurement_only=True)
# print time.time()-tic
source = mmwave_source.MMWaveSource()
source.set_attenuator_turns(6.0,6.0)
source.multiplier_input = 'thermal'
source.waveguide_twist_angle = 0
source.ttl_modulation_source = 'roach'
hwp_motor = stepper.SimpleStepper(port='/dev/ttyACM2')
setup = hardware.Hardware(hwp_motor, source, lockin)
ri = Roach2Baseband()
ri.set_modulation_output(7)
initial_f0s = np.load('/data/readout/resonances/2017-06-JPL-8x8-LF-N1_single_horn_4.npy')/1e6
nf = len(initial_f0s)
atonce = 4
if nf % atonce > 0:
print "extending list of resonators to make a multiple of ", atonce
initial_f0s = np.concatenate((initial_f0s, np.arange(1, 1 + atonce - (nf % atonce)) + initial_f0s.max()))
print len(initial_f0s)
nsamp = 2**20
offsets = np.arange(-16,16)*512./nsamp
last_f0s = initial_f0s
mmw_freqs = np.linspace(140e9, 165e9, 128)
ri.set_dac_atten(35)
tic = time.time()
f0s = initial_f0s
#setup.hittite.off()
#high is off for initital
ri.set_modulation_output('high')
ncf_source_off = new_nc_file(suffix= 'mmw_broadband_source_off')
print f0s
swpa = acquire.run_sweep(ri,tone_banks=f0s[None,:]+offsets[:,None],num_tone_samples=nsamp,
length_seconds=0.2,
verbose=True, state=setup.state())
print "resonance sweep done", (time.time()-tic)/60.
print "sweep written", (time.time()-tic)/60.
current_f0s = []
for sidx in range(swpa.num_channels):
swp = swpa.sweep(sidx)
res = swp.resonator
print res.f_0, res.Q, res.delay*1e6, res.current_result.redchi, (f0s[sidx]*1e6-res.f_0)
if np.abs(f0s[sidx]*1e6-res.f_0) > 100e3:
current_f0s.append(f0s[sidx]*1e6)
logger.info("Resonator index %d moved more than 100 kHz, keeping original value %.1f MHz" % (sidx,
f0s[sidx]))
else:
current_f0s.append(res.f_0)
print "fits complete", (time.time()-tic)/60.
current_f0s = np.array(current_f0s)/1e6
current_f0s.sort()
bad_deltas = np.diff(current_f0s) < (256./2**14)*8
if bad_deltas.sum():
print "found bad deltas", bad_deltas.sum()
current_f0s[np.nonzero(bad_deltas)] -= 0.1
bad_deltas = np.diff(current_f0s) < (256./2**14)*8
if bad_deltas.sum():
print "found bad deltas", bad_deltas.sum()
current_f0s[np.nonzero(bad_deltas)] -= 0.1
ri.set_tone_freqs(current_f0s,nsamp=nsamp)
ri.select_fft_bins(range(len(current_f0s)))
print ri.fpga_fft_readout_indexes
print np.diff(ri.fpga_fft_readout_indexes.astype('float')).min()
meas = ri.get_measurement(num_seconds=30)
meas.state = setup.state(fast=True)
sweep_stream_array = basic.SweepStreamArray(sweep_array = swpa, stream_array = meas, state = meas.state, description= 'source off')
ncf_source_off.write(sweep_stream_array)
ncf_source_off.close()
turnlist = np.arange(9,-0.1,-0.5)
#turnlist = [9,5,1]
turnlist = [3]
for turn_num in turnlist:
ri.set_modulation_output(7)
raw_input('set attenuator knobs to %f turns & check lock-in range' %turn_num)
source.set_attenuator_turns(turn_num, turn_num)
#turn on source
ri.set_modulation_output('low')
ncf = new_nc_file(suffix='mmw_broadband_source_on_%.2f_turns' %turn_num)
swpa = acquire.run_sweep(ri, tone_banks=f0s[None, :] + offsets[:, None], num_tone_samples=nsamp,
length_seconds=0.2,
verbose=True, state=setup.state())
print "resonance sweep done", (time.time() - tic) / 60.
print "sweep written", (time.time() - tic) / 60.
current_f0s = []
for sidx in range(swpa.num_channels):
swp = swpa.sweep(sidx)
res = swp.resonator
print res.f_0, res.Q, res.delay * 1e6, res.current_result.redchi, (f0s[sidx] * 1e6 - res.f_0)
if np.abs(f0s[sidx] * 1e6 - res.f_0) > 100e3:
current_f0s.append(f0s[sidx] * 1e6)
logger.info("Resonator index %d moved more than 100 kHz, keeping original value %.1f MHz" % (sidx,
f0s[sidx]))
else:
current_f0s.append(res.f_0)
print "fits complete", (time.time() - tic) / 60.
current_f0s = np.array(current_f0s) / 1e6
current_f0s.sort()
bad_deltas = np.diff(current_f0s) < (256. / 2 ** 14) * 8
if bad_deltas.sum():
print "found bad deltas", bad_deltas.sum()
current_f0s[np.nonzero(bad_deltas)] -= 0.1
bad_deltas = np.diff(current_f0s) < (256. / 2 ** 14) * 8
if bad_deltas.sum():
print "found bad deltas", bad_deltas.sum()
current_f0s[np.nonzero(bad_deltas)] -= 0.1
ri.set_tone_freqs(current_f0s, nsamp=nsamp)
ri.select_fft_bins(range(len(current_f0s)))
print ri.fpga_fft_readout_indexes
print np.diff(ri.fpga_fft_readout_indexes.astype('float')).min()
meas = ri.get_measurement(num_seconds=30)
#turn on modulation to get zbd voltage
ri.set_modulation_output(7)
time.sleep(2)
sweep_stream_array = basic.SweepStreamArray(sweep_array=swpa, stream_array=meas, state=setup.state(fast=True),
description='source on')
ncf.write(sweep_stream_array)
meas = ri.get_measurement(num_seconds=30)
meas.state = setup.state(fast=True)
sweep_stream_array = basic.SweepStreamArray(sweep_array=swpa, stream_array=meas, state=meas.state,
description='chopped')
ncf.write(sweep_stream_array)
ncf.close() |
py | 1a2fef043bb60b57d450dba7201945e2b51ef1aa |
import random
from tqdm import tqdm
import glob
import numpy as np
import torch
from sparse_ct.reconstructor_2d.n2self import (
N2SelfReconstructor)
from sparse_ct.reconstructor_2d.dataset import (
DeepLesionDataset, EllipsesDataset)
if __name__ == "__main__":
params= {'batch_size': 8,
'shuffle': True,
'num_workers': 8}
N_PROJ = 64
pwd_train = '/external/CT_30_000/train'
pwd_test = '/external/CT_30_000/test'
file_list_train = glob.glob(pwd_train+'/*/*/*/*.png')
file_list_test = glob.glob(pwd_test+'/*/*/*/*.png')
print("file_list_train", len(file_list_train))
print("file_list_test", len(file_list_test))
# train_loader = torch.utils.data.DataLoader(
# DeepLesionDataset(
# file_list_train,
# return_gt=False,
# n_proj=N_PROJ,
# img_size=512),
# **params
# )
# test_loader = torch.utils.data.DataLoader(
# DeepLesionDataset(
# random.choices(file_list_test, k=1000),
# return_gt=True,
# n_proj=N_PROJ,
# img_size=512),
# **params
# )
train_loader = torch.utils.data.DataLoader(
EllipsesDataset(
ellipses_type='train',
return_gt=False,
n_proj=N_PROJ,
img_size=512),
**params
)
test_loader = torch.utils.data.DataLoader(
EllipsesDataset(
ellipses_type='validation',
return_gt=True,
n_proj=N_PROJ,
img_size=512),
**params
)
theta = np.linspace(0.0, 180.0, N_PROJ, endpoint=False)
recon_n2self = N2SelfReconstructor(
'N2SelfTrained',
net='unet', lr=0.0001,
n2self_weights=None,#'selfsuper-ellipses-64-l1-train1/iter_180000.pth',#'iter_15000.pth',
#'selfsuper-ellipses-64-train8/iter_58800.pth', #'self-super-train9/iter_199800.pth',
learnable_filter=False
)
recon_n2self.init_train(theta)
recon_n2self._eval(test_loader)
for i in range(50):
print('--------------- ',i)
recon_n2self._train_one_epoch(train_loader, test_loader)
recon_n2self._eval(test_loader)
recon_n2self._save('epoch_{}.pth'.format(i))
recon_n2self._save('end.pth')
|
py | 1a2feff3c24738113364bd8043fa2206d4733889 | # Generated by Django 4.0.1 on 2022-02-08 00:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('items', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='item',
name='ocultar',
field=models.BooleanField(default=True),
),
]
|
py | 1a2ff1878fb5b2ace84def751fd6818c96ee7e46 | # Hash Table; Bit Manipulation
# Given two strings s and t which consist of only lowercase letters.
#
# String t is generated by random shuffling string s and then add one more letter at a random position.
#
# Find the letter that was added in t.
#
# Example:
#
# Input:
# s = "abcd"
# t = "abcde"
#
# Output:
# e
#
# Explanation:
# 'e' is the letter that was added.
class Solution(object):
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
sMap = collections.Counter(s)
tMap = collections.Counter(t)
for x in tMap:
if x not in sMap or tMap[x] != sMap[x]:
return x
return None
|
py | 1a2ff1b61ed8184e6edaf2a24430eb7123687cc3 | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 功能测试
Case Name : overlay函数入参为中文字符
Description :
1.使用overlay函数对中文字符进行处理
Expect :
1.返回结果正确
History :
"""
import unittest
import sys
from yat.test import Node
from yat.test import macro
sys.path.append(sys.path[0] + "/../")
from testcase.utils.Logger import Logger
from testcase.utils.CommonSH import CommonSH
logger = Logger()
class Function(unittest.TestCase):
def setUp(self):
logger.info("--------Opengauss_Function_Innerfunc_Overlay_Case0004.py开始执行--------")
self.commonsh = CommonSH('dbuser')
self.userNode = Node('dbuser')
self.DB_ENV_PATH = macro.DB_ENV_PATH
def test_right(self):
encoding = ['SQL_ASCII', 'UTF-8']
sql_cmd = "SELECT overlay('和卡拉和梵蒂冈' placing '猕猴桃666' from 4 for 9 );"
result = ["和猕猴桃666梵蒂冈", "和卡拉猕猴桃666"]
for i in range(2):
# 创建数据库
db_create = f"""drop database if exists aaa;
create database aaa encoding = '{encoding[i]}';"""
msg1 = self.commonsh.execut_db_sql(db_create)
logger.info(msg1)
self.assertTrue('CREATE' in msg1)
# 连接新建的编码类型的库执行sql语句
cmd1 = f'''source {self.DB_ENV_PATH};
gsql -d aaa -p {self.userNode.db_port} -c "{sql_cmd}"'''
msg2 = self.userNode.sh(cmd1).result()
logger.info(msg2)
self.assertTrue(msg2.splitlines()[-2].strip() == result[i])
# 删除数据库
db_drop = f'''drop database aaa;'''
msg3 = self.commonsh.execut_db_sql(db_drop)
logger.info(msg3)
self.assertTrue('DROP' in msg3)
def tearDown(self):
logger.info('--------Opengauss_Function_Innerfunc_Overlay_Case0004.py执行结束--------') |
py | 1a2ff2cf5841c7d94e23405e4f6c9a7b0ee641e6 | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "gazebo_ros".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "turtlebot3_description_reduced_mesh"
PROJECT_SPACE_DIR = "/home/ubuntu/environment/HelloWorld/simulation_ws/build/turtlebot3_description_reduced_mesh/devel"
PROJECT_VERSION = "1.1.0"
|
py | 1a2ff3a4303be91e30df11d584e36598bd4917c2 | GeometryConf={
'DBExtended' : 'DB:Extended',
'DBExtendedGFlash' : 'DB:ExtendedGFlash',
'DBExtendedLiMax' : 'DB:ExtendedLiMax',
'DBExtendedLiMin' : 'DB:ExtendedLiMin',
'DBExtendedX0Max' : 'DB:ExtendedX0Max',
'DBExtendedX0Min' : 'DB:ExtendedX0Min',
'DBExtended2015' : 'DB:Extended',
'DBExtended2017' : 'DB:Extended',
'DBExtended2017ZeroMaterial' : 'DB:Extended2017ZeroMaterial',
'DBExtended2018' : 'DB:Extended',
'DBExtended2021' : 'DB:Extended',
'DBExtended2026' : 'DB:Extended2026',
'DBIdeal2015' : 'DB:Ideal2015',
'DBIdeal2015dev' : 'DB:Ideal2015dev',
'Ideal2015' : 'Ideal2015,Ideal2015Reco',
'Ideal2015dev' : 'Ideal2015dev,Ideal2015devReco',
'Extended' : 'Extended,ExtendedReco',
'Extended2015' : 'Extended2015,Extended2015Reco',
'Extended2015dev': 'Extended2015dev,Extended2015devReco',
'Extended2016' : 'Extended2016,Extended2016Reco',
'Extended2017' : 'Extended2017,Extended2017Reco',
'Extended2018' : 'Extended2018,Extended2018Reco',
'Extended2017Plan1' : 'Extended2017Plan1,Extended2017Plan1Reco',
'Extended2017Plan1ZeroMaterial' : 'Extended2017Plan1ZeroMaterial,Extended2017Plan1ZeroMaterialReco',
'Extended2021' : 'Extended2021,Extended2021Reco',
'All' : 'Configuration.Geometry.GeometrySimAll_cff,Reco',
'ECALHCAL' : 'Configuration.Geometry.GeometrySimECALHCAL_cff,Configuration.Geometry.GeometryRecoECALHCAL_cff',
'TrackerOnly' : 'Configuration.Geometry.GeometrySimTracker_cff,Configuration.Geometry.GeometryRecoTracker_cff',
'HCal' : 'Configuration.Geometry.GeometrySimHCAL_cff,Configuration.Geometry.GeometryRecoHCAL_cff',
'Extended2026D35' : 'Extended2026D35,Extended2026D35Reco',
'Extended2026D41' : 'Extended2026D41,Extended2026D41Reco',
'Extended2026D43' : 'Extended2026D43,Extended2026D43Reco',
'Extended2026D44' : 'Extended2026D44,Extended2026D44Reco',
'Extended2026D45' : 'Extended2026D45,Extended2026D45Reco',
'Extended2026D46' : 'Extended2026D46,Extended2026D46Reco',
'Extended2026D47' : 'Extended2026D47,Extended2026D47Reco',
}
|
py | 1a2ff41ce31e65dadf81b74360bf9e1cdebdf445 | """
Ethereum Virtual Machine (EVM) Interpreter
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. contents:: Table of Contents
:backlinks: none
:local:
Introduction
------------
A straightforward interpreter that executes EVM code.
"""
from dataclasses import dataclass
from typing import Iterable, Set, Tuple, Union
from ethereum.base_types import U256, Bytes0, Uint
from ethereum.utils.ensure import EnsureError, ensure
from ..eth_types import Address, Log
from ..state import (
account_has_code_or_nonce,
begin_transaction,
commit_transaction,
get_account,
increment_nonce,
move_ether,
rollback_transaction,
set_code,
touch_account,
)
from ..utils.address import to_address
from ..vm import Message
from ..vm.error import (
InsufficientFunds,
InvalidJumpDestError,
InvalidOpcode,
OutOfGasError,
StackDepthLimitError,
StackOverflowError,
StackUnderflowError,
)
from ..vm.gas import GAS_CODE_DEPOSIT, REFUND_SELF_DESTRUCT, subtract_gas
from ..vm.precompiled_contracts.mapping import PRE_COMPILED_CONTRACTS
from . import Environment, Evm
from .instructions import Ops, op_implementation
from .runtime import get_valid_jump_destinations
STACK_DEPTH_LIMIT = U256(1024)
MAX_CODE_SIZE = 0x6000
RIPEMD160_ADDRESS = to_address(Uint(3))
@dataclass
class MessageCallOutput:
"""
Output of a particular message call
Contains the following:
1. `gas_left`: remaining gas after execution.
2. `refund_counter`: gas to refund after execution.
3. `logs`: list of `Log` generated during execution.
4. `accounts_to_delete`: Contracts which have self-destructed.
5. `touched_accounts`: Accounts that have been touched.
6. `has_erred`: True if execution has caused an error.
"""
gas_left: U256
refund_counter: U256
logs: Union[Tuple[()], Tuple[Log, ...]]
accounts_to_delete: Set[Address]
touched_accounts: Iterable[Address]
has_erred: bool
def process_message_call(
message: Message, env: Environment
) -> MessageCallOutput:
"""
If `message.current` is empty then it creates a smart contract
else it executes a call from the `message.caller` to the `message.target`.
Parameters
----------
message :
Transaction specific items.
env :
External items required for EVM execution.
Returns
-------
output : `MessageCallOutput`
Output of the message call
"""
if message.target == Bytes0(b""):
is_collision = account_has_code_or_nonce(
env.state, message.current_target
)
if is_collision:
return MessageCallOutput(
U256(0), U256(0), tuple(), set(), set(), True
)
else:
evm = process_create_message(message, env)
else:
evm = process_message(message, env)
accounts_to_delete = collect_accounts_to_delete(evm, set())
evm.refund_counter += len(accounts_to_delete) * REFUND_SELF_DESTRUCT
return MessageCallOutput(
gas_left=evm.gas_left,
refund_counter=evm.refund_counter,
logs=evm.logs,
accounts_to_delete=accounts_to_delete,
touched_accounts=collect_touched_accounts(evm),
has_erred=evm.has_erred,
)
def process_create_message(message: Message, env: Environment) -> Evm:
"""
Executes a call to create a smart contract.
Parameters
----------
message :
Transaction specific items.
env :
External items required for EVM execution.
Returns
-------
evm: :py:class:`~ethereum.spurious_dragon.vm.Evm`
Items containing execution specific objects.
"""
# take snapshot of state before processing the message
begin_transaction(env.state)
increment_nonce(env.state, message.current_target)
evm = process_message(message, env)
if not evm.has_erred:
contract_code = evm.output
contract_code_gas = len(contract_code) * GAS_CODE_DEPOSIT
try:
evm.gas_left = subtract_gas(evm.gas_left, contract_code_gas)
ensure(len(contract_code) <= MAX_CODE_SIZE, OutOfGasError)
except OutOfGasError:
rollback_transaction(env.state)
evm.gas_left = U256(0)
evm.logs = ()
evm.accounts_to_delete = dict()
evm.refund_counter = U256(0)
evm.has_erred = True
else:
set_code(env.state, message.current_target, contract_code)
commit_transaction(env.state)
else:
rollback_transaction(env.state)
return evm
def process_message(message: Message, env: Environment) -> Evm:
"""
Executes a call to create a smart contract.
Parameters
----------
message :
Transaction specific items.
env :
External items required for EVM execution.
Returns
-------
evm: :py:class:`~ethereum.spurious_dragon.vm.Evm`
Items containing execution specific objects
"""
if message.depth > STACK_DEPTH_LIMIT:
raise StackDepthLimitError("Stack depth limit reached")
# take snapshot of state before processing the message
begin_transaction(env.state)
touch_account(env.state, message.current_target)
sender_balance = get_account(env.state, message.caller).balance
if message.should_transfer_value and message.value != 0:
if sender_balance < message.value:
rollback_transaction(env.state)
raise InsufficientFunds(
f"Insufficient funds: {sender_balance} < {message.value}"
)
move_ether(
env.state, message.caller, message.current_target, message.value
)
evm = execute_code(message, env)
if evm.has_erred:
# revert state to the last saved checkpoint
# since the message call resulted in an error
rollback_transaction(env.state)
else:
commit_transaction(env.state)
return evm
def execute_code(message: Message, env: Environment) -> Evm:
"""
Executes bytecode present in the `message`.
Parameters
----------
message :
Transaction specific items.
env :
External items required for EVM execution.
Returns
-------
evm: `ethereum.vm.EVM`
Items containing execution specific objects
"""
code = message.code
valid_jump_destinations = get_valid_jump_destinations(code)
evm = Evm(
pc=Uint(0),
stack=[],
memory=bytearray(),
code=code,
gas_left=message.gas,
env=env,
valid_jump_destinations=valid_jump_destinations,
logs=(),
refund_counter=U256(0),
running=True,
message=message,
output=b"",
accounts_to_delete=dict(),
has_erred=False,
children=[],
)
try:
if evm.message.code_address in PRE_COMPILED_CONTRACTS:
PRE_COMPILED_CONTRACTS[evm.message.code_address](evm)
return evm
while evm.running and evm.pc < len(evm.code):
try:
op = Ops(evm.code[evm.pc])
except ValueError:
raise InvalidOpcode(evm.code[evm.pc])
op_implementation[op](evm)
except (
OutOfGasError,
InvalidOpcode,
InvalidJumpDestError,
InsufficientFunds,
StackOverflowError,
StackUnderflowError,
StackDepthLimitError,
):
evm.gas_left = U256(0)
evm.logs = ()
evm.accounts_to_delete = dict()
evm.refund_counter = U256(0)
evm.has_erred = True
except (
EnsureError,
ValueError,
):
evm.has_erred = True
finally:
return evm
def collect_touched_accounts(
evm: Evm, ancestor_had_error: bool = False
) -> Iterable[Address]:
"""
Collect all of the accounts that *may* need to be deleted based on
`EIP-161 <https://eips.ethereum.org/EIPS/eip-161>`_.
Checking whether they *do* need to be deleted happens in the caller.
See also: https://github.com/ethereum/EIPs/issues/716
Parameters
----------
evm :
The current EVM frame.
ancestor_had_error :
True if the ancestors of the evm object erred else False
Returns
-------
touched_accounts: `typing.Iterable`
returns all the accounts that were touched and may need to be deleted.
"""
# collect the coinbase account if it was touched via zero-fee transfer
if (evm.message.caller == evm.env.origin) and evm.env.gas_price == 0:
yield evm.env.coinbase
# collect those explicitly marked for deletion
# ("beneficiary" is of SELFDESTRUCT)
for beneficiary in sorted(set(evm.accounts_to_delete.values())):
if evm.has_erred or ancestor_had_error:
# Special case to account for geth+parity bug
# https://github.com/ethereum/EIPs/issues/716
if beneficiary == RIPEMD160_ADDRESS:
yield beneficiary
continue
else:
yield beneficiary
# collect account directly addressed
if not isinstance(evm.message.target, Bytes0):
if evm.has_erred or ancestor_had_error:
# collect RIPEMD160 precompile even if ancestor evm had error.
# otherwise, skip collection from children of erred-out evm objects
if evm.message.target == RIPEMD160_ADDRESS:
yield evm.message.target
else:
yield evm.message.target
# recurse into nested computations
# (even erred ones, since looking for RIPEMD160)
for child in evm.children:
yield from collect_touched_accounts(
child, ancestor_had_error=(evm.has_erred or ancestor_had_error)
)
def collect_accounts_to_delete(
evm: Evm, accounts_to_delete: Set[Address]
) -> Set[Address]:
"""
Collects all the accounts that need to deleted from the `evm` object and
its children
Parameters
----------
evm :
The current EVM frame.
accounts_to_delete :
list of accounts that need to be deleted.
Note: An empty set should be passed to this parameter. This set
is used to store the results obtained by recursively iterating over the
child evm objects
Returns
-------
touched_accounts: `set`
returns all the accounts that were touched and may need to be deleted.
"""
if not evm.has_erred:
for address in evm.accounts_to_delete.keys():
accounts_to_delete.add(address)
for child in evm.children:
collect_accounts_to_delete(child, accounts_to_delete)
return accounts_to_delete
|
py | 1a2ff4f07e115816c75cad8a5fba6e76c1da9375 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="simplevae", # Replace with your own username
version="1.0.0",
author="Chenxi Wu, Yizi Zhang",
author_email="[email protected], [email protected]",
description="Final project of STA 663: Implementation of Variational Autoencoder",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/yizi0511/sta_663_vae",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
'numpy',
'tensorflow',
],
)
|
py | 1a2ff60432bc1611b23789f0e3b556f4022df84f | from definitions import PATH_INPUT_CSV, PATH_OUTPUT_GEOJSON
from modules.geojson.df_to_geojson import df_to_geojson
from modules.geojson.export_geojson import export_geojson
from modules.init.init_program import init_program
import pandas as pd
def main():
# init
init_program()
# actions to perform...
df = pd.read_csv(PATH_INPUT_CSV)
cols = ["provnum","name","address","city","state","zip","county","bedcert","ownership","employee_cases",
"resident_cases", "resident_deaths","cna_hprd",'lpn_hprd',"rn_hprd","total_hprd"]
geojson_dict = df_to_geojson(df, cols, lon="google_long", lat="google_lat")
export_geojson(geojson_dict, PATH_OUTPUT_GEOJSON)
if __name__ == '__main__':
main() |
py | 1a2ff622703e4f13a0742c22b66b54ea0b634262 | from lstm import BilstmAttention
from config import LSTMConfig
import torch
import pandas as pd
import numpy as np
from tqdm import tqdm
import os
import directory
def load_model(weight_path):
print(weight_path)
model = BilstmAttention(embed_num=859)
model.load_state_dict(torch.load(weight_path)) # 返回的是一个OrderDict,存储了网络结构的名字和对应的参数
model.to(device)
model.eval()
return model
@torch.no_grad()
def predict(texts):
pres_all = []
for text in tqdm(texts):
text = [int(i) for i in text.split(' ')]
# 统一样本的长度,这里选择55个词语作为样本长度,多的截断,少的补齐(用858补齐)
seq_len = LSTMConfig.seq_len
if len(text) > seq_len:
text = text[:seq_len]
else:
text = text + [858] * (seq_len - len(text))
text = torch.from_numpy(np.array(text))
text = text.unsqueeze(0)
text = text.type(torch.LongTensor).cuda()
#
for i in range(len(model_list)):
model = model_list[i]
outputs = model(text)
outputs = outputs.sigmoid().detach().cpu().numpy()[0]
if i == 0:
pres_fold = outputs / len(model_list)
else:
pres_fold += outputs / len(model_list)
# print("bilstm+attention_pres_fold:",pres_fold)
# print("bilstm+attention_pres_fold:",type(pres_fold))
pres_fold = [str(p) for p in pres_fold]
pres_fold = ' '.join(pres_fold)
pres_all.append(pres_fold)
return pres_all
if __name__ == "__main__":
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_list = []
n_splits = LSTMConfig.n_splits
for i in range(n_splits):
model_list.append(load_model('./dl/user_data/model_data/label1/LSTMfold_' + str(i + 1) + '_best.pth'))
test_df = pd.read_csv(directory.SEMI_TEST_SET_B_PATH, header=None)
test_df.columns = ['report_ID', 'description']
submit = test_df.copy()
print("test_df:{}".format(test_df.shape))
new_des = [i.strip('|').strip() for i in test_df['description'].values]
'''
# 获取停用词
stopwords_path = './dl/code/test/label1/stopwords.txt'
stopwords = []
with open(stopwords_path, 'r', encoding='utf-8') as f:
for line in f:
if len(line) > 0:
stopwords.append(line.strip())
# 去掉new_des_test中的停用词
for j in range(0, len(new_des)):
str2lst = new_des[j].split()
copy = str2lst[:]
for i in copy:
if i in stopwords:
copy.remove(i)
str2lst = copy
lst2str = " ".join(str(i) for i in str2lst)
new_des[j] = lst2str
'''
test_df['description'] = new_des
sub_id = test_df['report_ID'].values
print(sub_id[0])
save_dir = './dl/prediction_result/label1/'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
pres_all = predict(new_des)
str_w = ''
with open(save_dir + 'submit_lstm.csv', 'w') as f:
for i in range(len(sub_id)):
str_w += sub_id[i] + ',' + '|' + pres_all[i] + '\n'
str_w = str_w.strip('\n')
f.write(str_w)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.