content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
from eingabe import EinsatzstoffeEingabe
from ausgabe import *
from verarbeitung import *
from plannung import *
if __name__ == '__main__':
eingabe1 = EinsatzstoffeEingabe(100000, "Erz", "Indien")
eingabe2 = EinsatzstoffeEingabe(59000, "Kochen", "Rumänien")
ausgabe1 = ProzessAusgabe(100, 200, "Schienen")
ausgabe2 = ProzessAusgabe(300, 1200, "Rohre")
verarbeitung0 = StahlVerarbeitung(1, "Walzwerk", 4)
verarbeitung1 = VorVerarbeitung(1, "Walzwerk", 2, ausgabe1)
verarbeitung2 = HauptVerarbeitung(1, "Stahlwerk", 3, ausgabe2)
verarbeitung0.getSchrittDauer()
verarbeitung1.getSchrittDauer()
verarbeitung2.getSchrittDauer()
plannung1 = StahlProzessPlannung(2, "Vorverarbeitung", verarbeitung1)
plannung2 = StahlProzessPlannung(2, "Hauptverarbeitung", verarbeitung2)
| 35.083333 | 76 | 0.723278 | [
"MIT"
] | caxenie/oom-oop-intro | main.py | 843 | Python |
from chatterbot.trainers import ListTrainer
from chatterbot import ChatBot
bot = ChatBot('Test')
conversa = ['oi', 'olá', 'Tudo bem?', 'Estou bem']
conversa2 = ['Gosta de futebol?','Eu adoro,sou tricolor Paulista e você','Qual seu filme favorito?' , 'O meu é Rocky 1']
bot.set_trainer(ListTrainer)
bot.train(conversa)
bot.train(conversa2)
while True:
quest = input ("Voce:")
respota = bot.get_response(quest)
#if float (response.confidence) >0.5
print ('Bot:', respota)
#else:
# print ("Eu não sei")
| 26.4 | 120 | 0.681818 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | Ageursilva/Bot | Bot.py | 532 | Python |
from peewee import *
import psycopg2
import datetime
db = PostgresqlDatabase("prueba", host="localhost", port=5432, user="postgres", password="P@ssw0rd")
class BaseModel(Model):
class Meta:
database = db
class User(BaseModel):
Username = CharField(unique = True)
email = CharField(unique = True)
created_date = DateTimeField(default= datetime.datetime.now)
class Meta:
db_table = 'Users'
if __name__== '__main__':
if not User.table_exists():
User.create_table()
query_1 = User.select().where( User.Username == "Raul").get()
print (query_1.email)
for all_users in User.select():
print (all_users.Username)
| 26.230769 | 100 | 0.670088 | [
"MIT"
] | Raul-Flores/ORM-example | Postgress-example/peewee-orm-test.py | 682 | Python |
# Copyright 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
import argparse
import pathlib
import pandas as pd
from jinja2 import Environment, PackageLoader, select_autoescape
parser = argparse.ArgumentParser(
description='Takes an input csv report from the run_designs.py script and creates an html summary for it')
parser.add_argument('--csv_file', '-i',required=True,
help='The input csv file')
parser.add_argument('--html_file', '-o', required=True,
help='The output html file')
args = parser.parse_args()
csv_file = args.csv_file
html_file = args.html_file
env = Environment(
loader=PackageLoader('csv2html', 'templates'),
autoescape=select_autoescape('html')
)
template = env.get_template('main.html')
def get_static_folder(file_name):
p = pathlib.Path('.')
return pathlib.PosixPath(str(p) +'/scripts/csv2html/static/'+str(file_name))
def read_csv(csv_file):
csv_file_opener = open(csv_file, 'r')
csv_data = csv.reader(csv_file_opener)
csv_headers = next(csv_data)
return csv_headers, csv_data
def create_output_html(csv_file, html_file):
colms = ['design','config','runtime','DIEAREA_mm^2','OpenDP_Util','cell_count','tritonRoute_violations',
'Short_violations', 'Magic_violations', 'antenna_violations', 'wns', 'CLOCK_PERIOD']
allData = pd.read_csv(csv_file, error_bad_lines=False)
dataFrame = pd.DataFrame(data=allData)
usedData = dataFrame[colms]
usedData.to_csv(csv_file.split(".csv")[0]+"_tmp_report.csv")
headers, data = read_csv(csv_file.split(".csv")[0]+"_tmp_report.csv")
with open(html_file, 'w') as output:
static_file = 'style.css'
output.write(template.render(headers=headers, rows=data, style_url=get_static_folder(static_file).resolve()))
os.remove(csv_file.split(".csv")[0]+"_tmp_report.csv")
if __name__ == '__main__':
create_output_html(csv_file, html_file)
| 32.75 | 117 | 0.723584 | [
"Apache-2.0"
] | Manarabdelaty/openlane | scripts/csv2html/csv2html.py | 2,489 | Python |
"""
Load volumes into vpv from a toml config file. Just load volumes and no overlays
Examples
--------
Example toml file
orientation = 'sagittal'
[top]
specimens = [
'path1.nrrd',
'path2.nrrd',
'path3.nrrd']
[bottom]
specimens = [
'path1.nrrd',
'path2.nrrd',
'path3.nrrd']
"""
import sys
from pathlib import Path
from itertools import chain
import toml
from PyQt5 import QtGui
from vpv.vpv import Vpv
from vpv.common import Layers
from typing import Dict
def load(config: Dict):
top_vols = config['top']['specimens']
bottom = config['bottom']['specimens']
if bottom:
bottom_vols = config['bottom']['specimens']
else: # We allow only top vier visible
bottom_specs = []
bottom_vols = []
bottom_labels = []
app = QtGui.QApplication([])
ex = Vpv()
p2s = lambda x: [str(z) for z in x]
all_vols = top_vols + bottom_vols
ex.load_volumes(chain(p2s(top_vols), p2s(bottom_vols)), 'vol')
# Set the top row of views
for i in range(3):
try:
vol_id = Path(top_vols[i]).stem
ex.views[i].layers[Layers.vol1].set_volume(vol_id)
except IndexError:
continue
if bottom:
# Set the top row of views
for i in range(3):
try:
vol_id = Path(bottom_vols[i]).stem
ex.views[i + 3].layers[Layers.vol1].set_volume(vol_id)
except IndexError:
continue
print('Finished loading')
# Show two rows
ex.data_manager.show2Rows(True if bottom else False)
# Set orientation
ex.data_manager.on_orientation(config['orientation'])
sys.exit(app.exec_())
if __name__ == '__main__':
file_ = sys.argv[1]
config = toml.load(file_)
load(config) | 19.326087 | 80 | 0.615298 | [
"Apache-2.0"
] | Dorky-Lever/vpv | utils/data_loader_2.py | 1,778 | Python |
from Person_1.project.person import Person
class Child(Person):
pass
| 12.5 | 42 | 0.76 | [
"MIT"
] | EmilianStoyanov/Projects-in-SoftUni | python__OOP/09.inheritance_exercise/01.person/child.py | 75 | Python |
# author rovo98
import os
import tensorflow as tf
from tensorflow.keras.utils import plot_model
from tensorflow.keras.callbacks import EarlyStopping
from model_data_input import load_processed_dataset
from models.fdconv1d_lstm.model import build_fdconv1d_lstm
from models.utils.misc import running_timer
from models.utils.misc import plot_training_history
# filter warning logs of tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# enable memory growth for every GPU.
# Using GPU devices to train the models is recommended.
# uncomment the following several lines of code to disable forcing using GPU.
physical_devices = tf.config.experimental.list_physical_devices('GPU')
assert len(physical_devices) > 0, 'Not enough GPU hardware available'
for gpu in physical_devices:
tf.config.experimental.set_memory_growth(gpu, True)
# noinspection DuplicatedCode
@running_timer
def train_model(epochs=10,
batch_size=32,
training_verbose=1,
print_model_summary=False,
using_validation=False,
validation_split=0.2,
plot_history_data=False,
history_fig_name='default',
plot_model_arch=False,
plot_model_name='default',
save_model=False,
save_model_name='default'):
# num_of_faulty_type = 3
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-02-22 20:34:10_czE4OmZzNDphczE2OmZlczI=_processed_logs_rnn', num_of_faulty_type,
# location='../../dataset', for_rnn=True)
#
# num_of_faulty_type = 5
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2019-12-28 00:46:37_czc1OmZzNzphczE1OmZlczQ=_processed_logs', num_of_faulty_type,
# location='../../dataset')
# 1. single faulty mode(small state size): short logs (10 - 50)
num_of_faulty_type = 3
train_x, train_y, test_x, test_y = load_processed_dataset(
'2020-03-17 15:55:22_czE4OmZzNDphczE2OmZlczI=_processed_logs', num_of_faulty_type,
location='../../dataset')
# 2. single faulty mode(small state size): long logs (60 - 100)
# num_of_faulty_type = 3
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-17 16:00:22_czE4OmZzNDphczE2OmZlczI=_processed_logs_b', num_of_faulty_type,
# location='../../dataset')
# 3. single faulty mode(big state size): short logs (10 - 50)
# num_of_faulty_type = 5
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-17 16:16:04_czgwOmZzODphczE4OmZlczQ=_processed_logs', num_of_faulty_type,
# location='../../dataset')
# 4. single faulty mode(big state size): long logs (60 - 100)
# num_of_faulty_type = 5
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-19 17:09:05_czgwOmZzODphczE4OmZlczQ=_processed_logs_b_rg', num_of_faulty_type,
# location='../../dataset')
# 5. multi faulty mode (small state size): short logs
# num_of_faulty_type = 4
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-17 16:34:50_czE3OmZzNDphczE0OmZlczI=_processed_logs', num_of_faulty_type,
# location='../../dataset')
# 6. multi faulty mode (small state size): long logs
# num_of_faulty_type = 4
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-17 16:36:40_czE3OmZzNDphczE0OmZlczI=_processed_logs_b', num_of_faulty_type,
# location='../../dataset')
# 7. multi faulty mode (big state size): short logs
# num_of_faulty_type = 16
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-17 16:40:03_czgwOmZzODphczIwOmZlczQ=_processed_logs', num_of_faulty_type,
# location='../../dataset')
# 8. multi faulty mode (big state size): long logs
# num_of_faulty_type = 16
# train_x, train_y, test_x, test_y = load_processed_dataset(
# '2020-03-17 16:41:29_czgwOmZzODphczIwOmZlczQ=_processed_logs_b', num_of_faulty_type,
# location='../../dataset')
n_timesteps, n_features = train_x.shape[1], train_x.shape[2]
# building the model.
model = build_fdconv1d_lstm((n_timesteps, n_features), num_of_faulty_type, kernel_size=31)
# print out the model summary
if print_model_summary:
model.summary()
# plot and save the model architecture.
if plot_model_arch:
plot_model(model, to_file=plot_model_name, show_shapes=True)
# fit network
if plot_history_data:
history = model.fit(x=[train_x, train_x], y=train_y, epochs=epochs, batch_size=batch_size,
verbose=training_verbose, validation_split=validation_split)
plot_training_history(history, 'fdconv1d-lstm', history_fig_name, '../exper_imgs')
elif using_validation:
es = EarlyStopping('val_categorical_accuracy', 1e-4, 3, 1, 'max')
history = model.fit(x=[train_x, train_x], y=train_y, epochs=epochs, batch_size=batch_size,
verbose=training_verbose, validation_split=validation_split, callbacks=[es])
plot_training_history(history, 'fdconv1d-lstm', history_fig_name, '../exper_imgs')
else:
model.fit(x=[train_x, train_x], y=train_y, epochs=epochs, batch_size=batch_size, verbose=training_verbose)
_, accuracy = model.evaluate(x=[test_x, test_x], y=test_y, batch_size=batch_size, verbose=0)
# saving the model
if save_model:
model.save(save_model_name)
print('>>> model saved: {}'.format(save_model_name))
print('\n>>> Accuracy on testing given testing dataset: {}'.format(accuracy * 100))
# Driver the program to test the methods above.
if __name__ == '__main__':
train_model(50,
print_model_summary=True,
using_validation=True,
history_fig_name='fdConv1d-lstm_czE4OmZzNDphczE2OmZlczI=_small.png',
save_model=True,
save_model_name='../trained_saved/fdConv1d-lstm_czE4OmZzNDphczE2OmZlczI=_small.h5')
| 43.697842 | 114 | 0.689167 | [
"Apache-2.0"
] | rovo98/model-unkown-dfa-diagnosis-based-on-running-logs | models/fdconv1d_lstm/train.py | 6,074 | Python |
from app.crud.crud_crosswalk import *
from app.crud.crud_statistics import *
from app.crud.crud_users import *
| 27.75 | 38 | 0.810811 | [
"MIT"
] | Infam852/IoT-project | backend/app/crud/__init__.py | 111 | Python |
"""babyshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('products.urls')),
path('users/',include('users.urls')),
]+static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| 37.259259 | 77 | 0.724652 | [
"MIT"
] | MET-DEV/Django-E-Commerce | babyshop_app/babyshop/urls.py | 1,006 | Python |
# __author__ = 'clarkmatthew'
#
import json
class Namespace(object):
"""
Convert dict (if provided) into attributes and return a somewhat
generic object
"""
def __init__(self, newdict=None):
if newdict:
for key in newdict:
value = newdict[key]
try:
if isinstance(value, dict):
setattr(self, Namespace(value), key)
else:
setattr(self, key, value)
except:
print '"{0}" ---> "{1}" , type: "{2}"'.format(key,
value,
type(value))
raise
def _get_keys(self):
return vars(self).keys()
def _to_json(self):
return json.dumps(self,
default=lambda o: o.__dict__,
sort_keys=True,
indent=4)
| 30.323529 | 78 | 0.402522 | [
"Apache-2.0"
] | tbeckham/DeploymentManager | config_manager/namespace.py | 1,031 | Python |
import datetime
import json
import os
import sys
import urllib
import urlparse
from collections import OrderedDict
from time import mktime
import dateutil.parser
import feedparser
import requests
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
from bs4 import BeautifulSoup
stations = {
'p00fzl68': {'name': 'BBC Asian Network', 'image': 'bbc_asian_network_colour'},
'p00fzl78': {'name': 'BBC Coventry & Warwickshire', 'image': 'bbc_radio_coventry_warwickshire_colour'},
'p00fzl7f': {'name': 'BBC Essex', 'image': 'bbc_radio_essex_colour'},
'p00fzl7q': {'name': 'BBC Hereford & Worcester', 'image': 'bbc_radio_hereford_worcester_colour'},
'p00fzl82': {'name': 'BBC Newcastle', 'image': 'bbc_radio_newcastle_colour'},
'p00fzl86': {'name': 'BBC Radio 1', 'image': 'bbc_radio_one_colour'},
'p00fzl64': {'name': 'BBC Radio 1Xtra', 'image': 'bbc_1xtra_colour'},
'p00fzl8v': {'name': 'BBC Radio 2', 'image': 'bbc_radio_two_colour'},
'p00fzl8t': {'name': 'BBC Radio 3', 'image': 'bbc_radio_three_colour'},
'p00fzl7j': {'name': 'BBC Radio 4 FM', 'image': 'bbc_radio_fourfm_colour'},
'p00fzl7k': {'name': 'BBC Radio 4 LW', 'image': 'bbc_radio_four_colour'},
'p00fzl7l': {'name': 'BBC Radio 4 Extra', 'image': 'bbc_radio_four_extra_colour'},
'p00fzl7g': {'name': 'BBC Radio 5 live', 'image': 'bbc_radio_five_live_colour'},
'p00fzl7h': {'name': 'BBC Radio 5 live sports extra', 'image': 'bbc_radio_five_live_sports_extra_colour'},
'p00fzl65': {'name': 'BBC Radio 6 Music', 'image': 'bbc_6music_colour'},
'p00fzl74': {'name': 'BBC Radio Berkshire', 'image': 'bbc_radio_berkshire_colour'},
'p00fzl75': {'name': 'BBC Radio Bristol', 'image': 'bbc_radio_bristol_colour'},
'p00fzl76': {'name': 'BBC Radio Cambridgeshire', 'image': 'bbc_radio_cambridge_colour'},
'p00fzl77': {'name': 'BBC Radio Cornwall', 'image': 'bbc_radio_cornwall_colour'},
'p00fzl79': {'name': 'BBC Radio Cumbria', 'image': 'bbc_radio_cumbria_colour'},
'p00fzl7b': {'name': 'BBC Radio Cymru', 'image': 'bbc_radio_cymru_colour'},
'p00fzl7c': {'name': 'BBC Radio Derby', 'image': 'bbc_radio_derby_colour'},
'p00fzl7d': {'name': 'BBC Radio Devon', 'image': 'bbc_radio_devon_colour'},
'p00fzl7m': {'name': 'BBC Radio Foyle', 'image': 'bbc_radio_foyle_colour'},
'p00fzl7n': {'name': 'BBC Radio Gloucestershire', 'image': 'bbc_radio_gloucestershire_colour'},
'p00fzl7p': {'name': 'BBC Radio Guernsey', 'image': 'bbc_radio_guernsey_colour'},
'p00fzl7r': {'name': 'BBC Radio Humberside', 'image': 'bbc_radio_humberside_colour'},
'p00fzl7s': {'name': 'BBC Radio Jersey', 'image': 'bbc_radio_jersey_colour'},
'p00fzl7t': {'name': 'BBC Radio Kent', 'image': 'bbc_radio_kent_colour'},
'p00fzl7v': {'name': 'BBC Radio Lancashire', 'image': 'bbc_radio_lancashire_colour'},
'p00fzl7w': {'name': 'BBC Radio Leeds', 'image': 'bbc_radio_leeds_colour'},
'p00fzl7x': {'name': 'BBC Radio Leicester', 'image': 'bbc_radio_leicester_colour'},
'p00fzl7y': {'name': 'BBC Radio Lincolnshire', 'image': 'bbc_radio_lincolnshire_colour'},
'p00fzl6f': {'name': 'BBC Radio London', 'image': 'bbc_london_colour'},
'p00fzl7z': {'name': 'BBC Radio Manchester', 'image': 'bbc_radio_manchester_colour'},
'p00fzl80': {'name': 'BBC Radio Merseyside', 'image': 'bbc_radio_merseyside_colour'},
'p00fzl81': {'name': 'BBC Radio Nan Gaidheal', 'image': 'bbc_radio_nan_gaidheal_colour'},
'p00fzl83': {'name': 'BBC Radio Norfolk', 'image': 'bbc_radio_norfolk_colour'},
'p00fzl84': {'name': 'BBC Radio Northampton', 'image': 'bbc_radio_northampton_colour'},
'p00fzl85': {'name': 'BBC Radio Nottingham', 'image': 'bbc_radio_nottingham_colour'},
'p00fzl8c': {'name': 'BBC Radio Oxford', 'image': 'bbc_radio_oxford_colour'},
'p00fzl8d': {'name': 'BBC Radio Scotland (FM)', 'image': 'bbc_radio_scotland_fm_colour'},
'p00fzl8g': {'name': 'BBC Radio Scotland (MW)', 'image': 'bbc_radio_scotland_colour'},
'p00fzl8b': {'name': 'BBC Radio Scotland (Orkney)', 'image': 'bbc_radio_scotland_colour'},
'p00fzl8j': {'name': 'BBC Radio Scotland (Shetland)', 'image': 'bbc_radio_scotland_colour'},
'p00fzl8h': {'name': 'BBC Radio Sheffield', 'image': 'bbc_radio_sheffield_colour'},
'p00fzl8k': {'name': 'BBC Radio Shropshire', 'image': 'bbc_radio_shropshire_colour'},
'p00fzl8l': {'name': 'BBC Radio Solent', 'image': 'bbc_radio_solent_colour'},
'p00fzl8n': {'name': 'BBC Radio Stoke', 'image': 'bbc_radio_stoke_colour'},
'p00fzl8p': {'name': 'BBC Radio Suffolk', 'image': 'bbc_radio_suffolk_colour'},
'p00fzl8w': {'name': 'BBC Radio Ulster', 'image': 'bbc_radio_ulster_colour'},
'p00fzl8y': {'name': 'BBC Radio Wales (FM)', 'image': 'bbc_radio_wales_fm_colour'},
'p00fzl8x': {'name': 'BBC Radio Wales (LW)', 'image': 'bbc_radio_wales_colour'},
'p00fzl90': {'name': 'BBC Radio York', 'image': 'bbc_radio_york_colour'},
'p00fzl8m': {'name': 'BBC Somerset', 'image': 'bbc_radio_somerset_sound_colour'},
'p00fzl8q': {'name': 'BBC Surrey', 'image': 'bbc_radio_surrey_colour'},
'p00fzl8r': {'name': 'BBC Sussex', 'image': 'bbc_radio_sussex_colour'},
'p00fzl93': {'name': 'BBC Tees', 'image': 'bbc_tees_colour'},
'p00fzl96': {'name': 'BBC Three Counties Radio', 'image': 'bbc_three_counties_radio_colour'},
'p00fzl8z': {'name': 'BBC Wiltshire', 'image': 'bbc_radio_wiltshire_colour'},
'p00fzl9f': {'name': 'BBC WM 95.6', 'image': 'bbc_wm_colour'},
'p02zbmb3': {'name': 'BBC World Service', 'image': 'bbc_world_service_colour'},
'p02jf21y': {'name': 'CBeebies Radio', 'image': 'cbeebies_radio_colour'},
}
stations_ordered = OrderedDict(sorted(stations.items(), key=lambda x: x[1]['name']))
def get_page(url):
# download the source HTML for the page using requests
# and parse the page using BeautifulSoup
return BeautifulSoup(requests.get(url).text, 'html.parser')
__addon__ = xbmcaddon.Addon()
__addonname__ = __addon__.getAddonInfo('name')
# Parse the stuff passed into the addon
base_url = sys.argv[0]
addon_handle = int(sys.argv[1])
args = dict(urlparse.parse_qsl(sys.argv[2][1:]))
xbmcplugin.setContent(addon_handle, 'audio')
def build_url(query):
return base_url + '?' + urllib.urlencode(query)
def mode_default():
categories = {
'podcasts': 'Podcasts',
'stations': 'Stations'
}
for mode, category in categories.items():
url = build_url({'mode': mode})
li = xbmcgui.ListItem(category)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def mode_episode(pid):
programme = requests.get('https://www.bbc.co.uk/programmes/' + pid + '.json')
programme_json = programme.json()["programme"]
picked_url = None
for version in programme_json["versions"]:
playlist = requests.get(
'https://open.live.bbc.co.uk/mediaselector/6/select/version/2.0/mediaset/iptv-all/vpid/' + version["pid"] + '/format/json')
playlist_json = playlist.json()
if "media" not in playlist_json:
# TODO
continue
# Filter by only audio items, and order with the highest bitrate first
audio_items = [item for item in playlist_json['media'] if item['kind'] == 'audio']
audio_items.sort(key=lambda x: x['bitrate'], reverse=True)
xbmc.log('Found {0} audio items for the programme version {1}'.format(len(audio_items), version['pid']), level=xbmc.LOGNOTICE)
# Pick the first stream available for the highest bitrate item
picked_stream = audio_items[0]
picked_url = picked_stream["connection"][1]["href"]
xbmc.log('Picked the {0} stream with the bitrate {1}'.format(picked_stream['encoding'], picked_stream['bitrate']), level=xbmc.LOGNOTICE)
play_item = xbmcgui.ListItem(path=picked_url)
play_item.setArt({
'thumb': 'https://ichef.bbci.co.uk/images/ic/480xn/' + programme_json["image"]["pid"] + '.jpg',
'icon': 'https://ichef.bbci.co.uk/images/ic/480xn/' + programme_json["image"]["pid"] + '.jpg'
})
play_item.setInfo('music', {
'title': programme_json["display_title"]["title"],
'artist': programme_json["display_title"]["subtitle"],
'album': programme_json["ownership"]["service"]["title"],
'comment': programme_json["short_synopsis"]
})
xbmcplugin.setResolvedUrl(addon_handle, True, listitem=play_item)
if picked_url is None:
xbmcgui.Dialog().notification(__addonname__, "Episode not available to stream", icon=xbmcgui.NOTIFICATION_ERROR)
def mode_podcasts():
podcasts = requests.get('https://www.bbc.co.uk/podcasts.json')
podcasts_json = podcasts.json()["podcasts"]
# Sort the podcasts by title
podcasts_ordered = sorted(podcasts_json, key=lambda x: x["title"])
for podcast in podcasts_ordered:
url = build_url({'mode': 'podcast', 'pid': podcast["shortTitle"]})
li = xbmcgui.ListItem(podcast["title"])
li.setInfo('video', {'plot': podcast["description"]})
if "imageUrl" in podcast:
li.setThumbnailImage(podcast["imageUrl"].replace('{recipe}', '624x624'))
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def mode_podcast(pid):
podcast = feedparser.parse('https://podcasts.files.bbci.co.uk/' + pid + '.rss')
image_url = None
if "image" in podcast.feed:
image_url = podcast.feed.image.url
for entry in podcast.entries:
entry_pid = entry.ppg_canonical.split('/')
entry_date = datetime.datetime.fromtimestamp(mktime(entry.published_parsed)).strftime('%Y-%m-%d')
entry_title = entry_date + ": " + entry.title
if len(entry_pid) > 2:
url = build_url({'mode': 'episode', 'pid': entry_pid[2]})
li = xbmcgui.ListItem(entry_title)
li.setInfo('video', {'plot': entry.description})
li.setThumbnailImage(image_url)
li.setProperty('IsPlayable', 'true')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
else:
xbmc.log('No pid could be found for the item at ' + entry.link, level=xbmc.LOGERROR)
xbmcplugin.endOfDirectory(addon_handle)
def mode_stations():
for pid, station in stations_ordered.items():
url = build_url({'mode': 'station', 'pid': pid})
li = xbmcgui.ListItem(station['name'])
li.setThumbnailImage(xbmc.translatePath(os.path.join(__addon__.getAddonInfo('path'), 'resources', station['image'] + '.png')))
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def mode_station(pid):
base = datetime.datetime.today()
# Create a range of the last 30 days
for delta in range(30):
date = base - datetime.timedelta(days=delta)
year = '%04d' % date.year
month = '%02d' % date.month
day = '%02d' % date.day
url = build_url({'mode': 'station_date', 'pid': pid, 'year': year, 'month': month, 'day': day})
list_item = xbmcgui.ListItem(date.strftime('%Y-%m-%d (%A)'))
xbmcplugin.addDirectoryItem(addon_handle, url, list_item, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
def mode_station_date(pid, year, month, day):
# Load the schedules for the station
schedule = get_page('https://www.bbc.co.uk/schedules/' + pid + '/' + year + '/' + month + '/' + day)
result = None
for tag in schedule.find_all('script', type='application/ld+json'):
if 'RadioEpisode' in tag.contents[0]:
result = json.loads(tag.contents[0])
if result is None:
xbmcgui.Dialog().notification(__addonname__, "Something went wrong parsing the station's schedule",
icon=xbmcgui.NOTIFICATION_ERROR)
return
for episode in result["@graph"]:
date = dateutil.parser.parse(episode["publication"]["startDate"])
time = date.strftime('%Y-%m-%d, %H:%M')
if "partOfSeries" in episode:
title = time + ": " + episode["partOfSeries"]["name"] + " - " + episode["name"]
else:
title = time + ": " + episode["name"]
url = build_url({'mode': 'episode', 'pid': episode["identifier"]})
list_item = xbmcgui.ListItem(title)
list_item.setInfo('video', {'plot': episode["description"]})
list_item.setPath(url)
list_item.setProperty('IsPlayable', "true")
list_item.setThumbnailImage(episode["image"])
xbmcplugin.addDirectoryItem(addon_handle, url, list_item, isFolder=False)
xbmcplugin.endOfDirectory(addon_handle)
mode = args.get('mode', None)
if mode is None:
mode_default()
elif mode == 'episode':
mode_episode(args['pid'])
elif mode == 'podcasts':
mode_podcasts()
elif mode == 'podcast':
mode_podcast(args['pid'])
elif mode == 'stations':
mode_stations()
elif mode == 'station':
mode_station(args['pid'])
elif mode == 'station_date':
mode_station_date(args['pid'], args['year'], args['month'], args['day'])
| 44.781145 | 144 | 0.656917 | [
"MIT"
] | jonjomckay/kodi-addon-bbcsounds | addon.py | 13,300 | Python |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs."""
import struct
from test_framework.test_framework import BitcoinTestFramework
from test_framework.mininode import *
from test_framework.util import *
class TxnMallTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
super(TxnMallTest, self).setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
miner_reward = Decimal('0.005')
# All nodes should start with starting_balance:
starting_balance = BASE_CB_AMOUNT * 25
for i in range(self.num_nodes):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].settxfee(.001)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
burn_foo = -find_burned_amount_in_tx(fund_foo_tx)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
burn_bar = -find_burned_amount_in_tx(fund_bar_tx)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"] - burn_foo - burn_bar)
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
outputs_count = 4 # dest, change, burn1, burn2
assert_equal(len(rawtx1['vout']), outputs_count)
tx1_cl = CTransaction()
tx1_cl.nVersion = 2
tx1_cl.vin = [CTxIn(COutPoint(int(rawtx1['vin'][0]['txid'], 16), rawtx1['vin'][0]['vout']), b'', 0xFFFFFFFE)]
for out in rawtx1['vout']:
tx1_cl.vout.append(CTxOut(ToSatoshi(out['value']), hex_str_to_bytes(out['scriptPubKey']['hex'])))
tx1_cl.nLockTime = rawtx1['locktime']
clone_raw = bytes_to_hex_str(tx1_cl.serialize())
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50 PLCU for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"] - burn_foo - burn_bar
if self.options.mine_block: expected += miner_reward
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
burned1 = -find_burned_amount_in_tx(tx1)
burned2 = -find_burned_amount_in_tx(tx2)
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]) - burned1 - burned2)
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + miner_reward * 2 PLCU for 2 matured,
# less possible orphaned matured subsidy
expected += miner_reward * 2
if (self.options.mine_block):
expected -= miner_reward
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
# "foo" should have been debited by the equivalent clone of tx1
assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"])
# "bar" should have been debited by (possibly unconfirmed) tx2
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
# "" should have starting balance, less funding txes, plus subsidies
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 1219
+ fund_foo_tx["fee"] - burn_foo
- 29
+ fund_bar_tx["fee"] - burn_bar
+ miner_reward * 2)
# Node1's "from0" account balance
burned1 = -find_burned_amount_in_tx(tx1)
burned2 = -find_burned_amount_in_tx(tx2)
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]) - burned1 - burned2)
if __name__ == '__main__':
TxnMallTest().main()
| 47.683544 | 117 | 0.623706 | [
"MIT"
] | plc-ultima/plcu | test/functional/txn_clone.py | 7,534 | Python |
import hydra
import os
import logging
import json
import numpy as np
import torch
import matplotlib.pyplot as plt
from collections import defaultdict
import json
from IPython import embed
# from AD_models import AD_Time_Series
# from AD_utils import AD_report, AD_dataset, plot_AD_dataset, AD_preprocessing
# import T_models, A_models
import stric.datasets as datasets
import stric.detection_models.time_series_models as models
import stric.detection_models.detector_models as detectors
from stric.detection_models.time_series_models.stric import InterpretableTCNFading
import stric.detection_models.detector_models.likelihood_ratio_estimators as likelihood_ratio_estimators
from stric.detection_models.detector_models.base_detector import Detector
@hydra.main(config_name="config/config_interpretable_model")
def main(cfg):
data_path = os.path.join(hydra.utils.get_original_cwd(), 'data')
dataset = datasets.__dict__[cfg.dataset.info.name](
past_len=cfg.t_model.info.memory_length,
fut_len=cfg.t_model.info.pred_length,
data_path=data_path,
dataset_subset=cfg.dataset.info.subname,
dataset_index=cfg.dataset.info.index,
normalize=cfg.dataset.preprocessing.normalize,
)
linear_kernel_sizes = cfg.t_model.info.linear_kernel_sizes
interpretable_kernel_sizes = cfg.t_model.info.memory_length if linear_kernel_sizes is None else linear_kernel_sizes
############# Trend parameters ################
HP_lams = np.logspace(8, 10, cfg.t_model.info.num_trends_filters) # Range of values of regularization parameter for HP filter (regulates the regularity of the trend component)
HP_Ts = [interpretable_kernel_sizes] * cfg.t_model.info.num_trends_filters # Lenght of the HP filter (here we could choose large numbers if we want to increase the memory of the HP filter)
############# Periodic part parameters ################
theta = np.random.uniform(2 * np.pi / 20, 2 * np.pi / 10, cfg.t_model.info.n_periodic_poles).reshape(-1, 1)
r = np.random.uniform(1, 1, cfg.t_model.info.n_periodic_poles).reshape(-1, 1)
purely_periodic_poles = np.concatenate((r, theta), 1)
############# Linear part parameters ################
real_poles = np.random.uniform(-1, 1, cfg.t_model.info.n_complex_poles).reshape(-1, 1)
theta = np.random.uniform(2 * np.pi / 20, 2 * np.pi / 10, cfg.t_model.info.n_complex_poles).reshape(-1, 1)
r = np.random.uniform(0, 1, cfg.t_model.info.n_complex_poles).reshape(-1, 1)
complex_poles = np.concatenate((r, theta), 1)
model = InterpretableTCNFading(data=dataset, test_portion=cfg.t_model.info.test_portion,
memory_length=cfg.t_model.info.memory_length, pred_length=cfg.t_model.info.pred_length,
input_channels=dataset.n_timeseries, output_channels=dataset.n_timeseries,
linear_kernel_sizes=interpretable_kernel_sizes,
HP_lams=HP_lams, HP_Ts=HP_Ts,
purely_periodic_poles=purely_periodic_poles,
real_poles=real_poles,
complex_poles=complex_poles,
num_channels_TCN=cfg.t_model.info.num_channels_TCN,
kernel_size_TCN=cfg.t_model.info.kernel_size_TCN,
dropout_TCN=cfg.t_model.info.dropout_TCN,
learnable_filters=False, random_init=False,
).to(cfg.device)
model.train_model(bs=cfg.t_model.info.bs, lr=cfg.t_model.info.lr, epochs=cfg.t_model.info.epochs)
# To visualize predictions per time-series (this plots all the available time-series)
model.visualize(save=cfg.save_images)
# Test predictive performance of the trained_model: see prediction errors across time-series for training and test
ind = 4
train_residuals, test_residuals = model.get_residuals(ind=ind)
# Save results
predictions_logs = defaultdict(list)
predictions_logs['train_residuals'] = train_residuals.tolist()
predictions_logs['test_residuals'] = test_residuals.tolist()
predictions_logs['train_residuals_stds'] = train_residuals.std(0).tolist()
predictions_logs['test_residuals_stds'] = test_residuals.std(0).tolist()
predictions_logs['train_residuals_stds_mean'] = train_residuals.std(0).mean().item()
predictions_logs['test_residuals_stds_mean'] = test_residuals.std(0).mean().item()
with open('predictions_logs.json', 'w') as file:
json.dump(predictions_logs, file)
# Plot Interepretable decomposition
_ = model.get_components(ind=None, save=cfg.save_images)
# Anomaly detection
####### Detector' HPs ########
kernel_length_scale = cfg.a_model.info.kernel_length_scale * test_residuals.std()
kernel_type = cfg.a_model.info.kernel_type
kernel_hps = {'length_scales': torch.tensor(kernel_length_scale), 'train_length_scales': False,
'scale_factor': torch.tensor(1.), 'train_scale_factor': False}
ones = np.ones(dataset.n_timeseries)
####### Detector' HPs ########
a_model = Detector(test_residuals, detectors.__dict__[cfg.a_model.type],
cfg.a_model.info.kernel_type, kernel_hps, win_length=cfg.a_model.info.k, n=cfg.a_model.info.n,
device=cfg.device)
a_model.fit()
log_lik = a_model.get_future_log_lik()
a_labels = a_model.get_anomaly_labels(cfg.a_model.info.threshold * ones)
a_model.visualize_anomaly_scores(save=cfg.save_images)
a_model.visualize_anomaly_labels(thresholds=cfg.a_model.info.threshold * ones, save=cfg.save_images)
# Save results
anomaly_logs = defaultdict(list)
anomaly_logs['log_lik'] = log_lik.tolist()
anomaly_logs['a_labels'] = a_labels.tolist()
with open('anomaly_logs.json', 'w') as file:
json.dump(anomaly_logs, file)
if __name__ == "__main__":
main() | 48.892308 | 193 | 0.65922 | [
"Apache-2.0"
] | LucaZancato/stric | main.py | 6,356 | Python |
"""DATA STRUCTURES"""
# Algorithms are set of rules used to solve a problem
# Data structures are a way of organizing data in a computer
# colors = ['red', 'yellow', [5, 6], 'blue']
friends = ['Josh', 'Renee', 'Agnes']
# print(colors)
# print(colors[1])
# colors[2] = 'green' # mutability of lists
# print(colors)
# print(len(friends))
# print(len(colors)) # gives you the number of items in the list variable
# print(range(len(friends)))
# for i in range(len(friends)): # loops through list when you know position of items
# friend = friends[i]
# print('Happy new year,', friend)
# for friend in friends: # better for looping since you get to write less code
# print('Happy New Year, %s!' % friend)
numbers = [2, 4, 6, 8, 10]
for i in range(len(numbers)): # range can also be used as such to update elements using indices
numbers[i] = numbers[i] * 2
print(numbers)
| 35.36 | 96 | 0.676471 | [
"MIT"
] | Peabody29/Python_Projects-ST | Lists/lists-beg.py | 884 | Python |
import numpy as np
import random
import os
import json
import math
import cv2
def getPaddedROI(img, center_x, center_y, width, height):
#print(str(int(center_x)) + "," + str(int(center_y)))
paddingColor = [0,0,0]
top_left_x = center_x - int(width/2)-1
#print("top_left_x:")
#print(top_left_x)
top_left_y = center_y - int(height/2)-1
#print("top_left_y:")
#print(top_left_y)
bottom_right_x = center_x + int(width/2)
bottom_right_y = center_y + int(height/2)
#print ("bottom_right_x / y")
#print(str(bottom_right_x) + " / " + str(bottom_right_y))
img_height = np.size(img, 0)
img_width = np.size(img, 1)
if(top_left_x <0 or top_left_y <0 or bottom_right_x >img_width or bottom_right_y > img_height):
#border padding needed
border_left = 0
border_right = 0
border_top= 0
border_bottom= 0
if(top_left_x < 0):
width = width + top_left_x
border_left = -1 * top_left_x
top_left_x = 0
if(top_left_y < 0):
height = height + top_left_y
border_top = -1 * top_left_y
top_left_y = 0
if(bottom_right_x > img_width):
width = width -(bottom_right_x - img_width)
border_right = bottom_right_x - img_width
if(bottom_right_y> img_height):
height = height -(bottom_right_y - img_height)
border_bottom = bottom_right_y - img_height
#print(border_left)
#print(border_right)
#print(border_top)
#print(border_bottom)
img_roi = img[top_left_y : bottom_right_y ,top_left_x : bottom_right_x ]
#cv2.imshow("originalROI",img_roi)
img_roi = cv2.copyMakeBorder(img_roi, border_top,border_bottom,border_left, border_right, cv2.BORDER_CONSTANT,value=paddingColor)
else:
img_roi = img[top_left_y : bottom_right_y ,top_left_x : bottom_right_x ]
return img_roi
#similarity map converter
#convert 16 target ground truth label(coordinates) into 16 Distance maps
#Each map have value '0' on the kepoint and '32'(according to the length of the generated Hash codes) on non-keypoint areas
def make_heatmap(emptymap ,joint_idx, point, sigma):
point_x,point_y = point
_, height, width = emptymap.shape[:3]
th= 4.605
delta = math.sqrt(th * 2)
x0 = int(max(0, point_x - delta * sigma))
y0 = int(max(0, point_y - delta * sigma))
x1 = int(min(width, point_x + delta * sigma))
y1 = int(min(height, point_y + delta * sigma))
for y in range(y0,y1):
for x in range(x0,x1):
d = (x - point_x)**2 + (y - point_y)**2
exp = d / 2.0 / sigma / sigma
if exp > th:
continue
emptymap[joint_idx][y][x] = max (emptymap[joint_idx][y][x], math.exp(-exp))
emptymap[joint_idx][y][x] = min (emptymap[joint_idx][y][x], 1.0)
def training_data_feeder(joint_data_path, train_val_path, imgpath, input_size, hint_roi_size):
#load trainvalset data,
train_val = open(train_val_path).readlines()
train_groups = json.loads(train_val[0].strip())["train_set"]
#print(train_groups)
#load one of train set indecies
index = random.choice(train_groups)
#print(index)
#create path object to the image directory( index "0" to dir_name "001")
dir_name = str(index+1)
if((index+1) < 100):
dir_name ="0"+ dir_name
if((index+1) < 10):
dir_name = "0" + dir_name
#print(dir_name)
dir_path = imgpath + dir_name + "/"
#print(dir_path)
#ramdomly load three images, get file names
#from "sample_names" will load first two names as h_img1 h_iimg2, third name as t_img
file_list = []
for file in os.listdir(dir_path):
if len(file) > 5:
file_list.append(file)
#print(file_list)
#print("selected: ")
sample_name = random.sample(file_list, 3)
#print(sample_name)
#load image files
h_img1 = cv2.imread(dir_path + sample_name[0])
h_img2 = cv2.imread(dir_path + sample_name[1])
t_img = cv2.imread(dir_path + sample_name[2])
#load corresponding joint data as labels
h_label1 = []
h_label2 = []
t_label = []
label_data = open(joint_data_path).readlines()
for i in range( len(label_data)):
datum = json.loads(label_data[i].strip())
if(datum["filename"] == sample_name[0]):
for joint in datum["joint_pos"]:
h_label1.append(joint[1])
#print(h_label1)
elif(datum["filename"] == sample_name[1]):
for joint in datum["joint_pos"]:
h_label2.append(joint[1])
elif(datum["filename"] == sample_name[2]):
for joint in datum["joint_pos"]:
t_label.append(joint[1])
#resize the two images and get resize ratios
resize_ratioh1 = (input_size / h_img1.shape[1] , input_size / h_img1.shape[0])
resize_ratioh2 = (input_size / h_img2.shape[1] , input_size / h_img2.shape[0])
resize_ratiot = (1 / t_img.shape[1] , 1 / t_img.shape[0])
h_img1= cv2.resize(h_img1,(input_size,input_size))
h_img2= cv2.resize(h_img2,(input_size,input_size))
t_img = cv2.resize(t_img,(input_size,input_size))
#Convert the joint position according to the resize ratios
#crop rois from two hint images to get the hintsets
#img_point = None
hintSet01 = []
hintSet02 = []
for joint in h_label1:
joint[0] = joint[0]*resize_ratioh1[0]
joint[1] = joint[1]*resize_ratioh1[1]
for i in range(len(h_label1)):
tmp = getPaddedROI(h_img1, int(h_label1[i][0]), int(h_label1[i][1]), hint_roi_size, hint_roi_size)
hintSet01.append(tmp)
#cv2.imshow("tmp",tmp)
#cv2.imshow("h_img1",h_img1)
#for tmp in hintSet01:
# cv2.imshow("tmp",tmp)
# cv2.waitKey(0)
for joint in h_label2:
joint[0] = joint[0]*resize_ratioh2[0]
joint[1] = joint[1]*resize_ratioh2[1]
for i in range(len(h_label2)):
tmp = getPaddedROI(h_img2, int(h_label2[i][0]), int(h_label2[i][1]), hint_roi_size, hint_roi_size)
hintSet02.append(tmp)
#Normalize the value by dividing with input_size
#
joint_idx = 0
heatmap = np.zeros((16, 76, 76) , dtype = np.float32)
for joint in t_label:
point =[ joint[0]*resize_ratiot[0] * 76, joint[1]*resize_ratiot[1] *76 ]
make_heatmap(heatmap, joint_idx, point, 1) #sigma = 1
joint_idx +=1
heatmap = 1 - heatmap
return hintSet01, hintSet02, t_img, heatmap
#cv2.imshow("img_point",img_point)
#cv2.waitKey(0)
#cv2.imshow("h_img1",h_img1)
#cv2.imshow("h_img2",h_img2)
#cv2.imshow("t_img",t_img)
#cv2.waitKey(0)
#define sub function crop roi
#return roi*16
#crop rois x 2 times to get 2 hintsets
#return hintset01,hintset02,target image, target label
#joint_data_path = "./custom_data.json"
#train_val_path = "./train_val_indices.json"
#imgpath = "./000/"
#input_size = 400
#hint_roi = 14
#hintSet01,hintSet02,t_img, heatmap = training_data_feeder(joint_data_path, train_val_path, imgpath, input_size, hint_roi )
#print(np.shape(heatmap))
#cv2.imshow('target_image',t_img)
#for i in range(16):
# cv2.imshow('heat map',heatmap[i])
# cv2.waitKey(0)
| 35.311005 | 137 | 0.628862 | [
"Apache-2.0"
] | gitpharm01/Parapose | imageLoader.py | 7,380 | Python |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for calculating loss, accuracy, and other model metrics.
Metrics:
- Padded loss, accuracy, and negative log perplexity. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/metrics.py
- BLEU approximation. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/bleu_hook.py
- ROUGE score. Source:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/rouge.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
def _pad_tensors_to_same_length(x, y):
"""Pad x and y so that the results have the same length (second dimension)."""
with tf.name_scope("pad_to_same_length"):
x_length = tf.shape(x)[1]
y_length = tf.shape(y)[1]
max_length = tf.maximum(x_length, y_length)
x = tf.pad(x, [[0, 0], [0, max_length - x_length], [0, 0]])
y = tf.pad(y, [[0, 0], [0, max_length - y_length]])
return x, y
def padded_cross_entropy_loss(logits, labels, smoothing, vocab_size):
"""Calculate cross entropy loss while ignoring padding.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch_size, length_labels]
smoothing: Label smoothing constant, used to determine the on and off values
vocab_size: int size of the vocabulary
Returns:
Returns the cross entropy loss and weight tensors: float32 tensors with
shape [batch_size, max(length_logits, length_labels)]
"""
with tf.name_scope("loss", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
# Calculate smoothing cross entropy
with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]):
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1)
soft_targets = tf.one_hot(
tf.cast(labels, tf.int32),
depth=vocab_size,
on_value=confidence,
off_value=low_confidence)
xentropy = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=soft_targets)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * tf.log(confidence) + tf.to_float(vocab_size - 1) *
low_confidence * tf.log(low_confidence + 1e-20))
xentropy -= normalizing_constant
weights = tf.to_float(tf.not_equal(labels, 0))
return xentropy * weights, weights
def _convert_to_eval_metric(metric_fn):
"""Wrap a metric fn that returns scores and weights as an eval metric fn.
The input metric_fn returns values for the current batch. The wrapper
aggregates the return values collected over all of the batches evaluated.
Args:
metric_fn: function that returns scores and weights for the current batch's
logits and predicted labels.
Returns:
function that aggregates the scores and weights from metric_fn.
"""
def problem_metric_fn(*args):
"""Returns an aggregation of the metric_fn's returned values."""
(scores, weights) = metric_fn(*args)
# The tf.metrics.mean function assures correct aggregation.
return tf.metrics.mean(scores, weights)
return problem_metric_fn
def get_eval_metrics(logits, labels, params):
"""Return dictionary of model evaluation metrics."""
metrics = {
"accuracy": _convert_to_eval_metric(padded_accuracy)(logits, labels),
"accuracy_top5": _convert_to_eval_metric(padded_accuracy_top5)(
logits, labels),
"accuracy_per_sequence": _convert_to_eval_metric(
padded_sequence_accuracy)(logits, labels),
"neg_log_perplexity": _convert_to_eval_metric(padded_neg_log_perplexity)(
logits, labels, params["vocab_size"]),
}
if not params["use_tpu"]:
# TPU does not support tf.py_func
metrics.update({
"approx_bleu_score": _convert_to_eval_metric(
bleu_score)(logits, labels),
"rouge_2_fscore": _convert_to_eval_metric(
rouge_2_fscore)(logits, labels),
"rouge_L_fscore": _convert_to_eval_metric(
rouge_l_fscore)(logits, labels),
})
# Prefix each of the metric names with "metrics/". This allows the metric
# graphs to display under the "metrics" category in TensorBoard.
metrics = {"metrics/%s" % k: v for k, v in six.iteritems(metrics)}
return metrics
def padded_accuracy(logits, labels):
"""Percentage of times that predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
return tf.to_float(tf.equal(outputs, padded_labels)), weights
def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights
def padded_accuracy_top5(logits, labels):
return padded_accuracy_topk(logits, labels, 5)
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0)
def padded_neg_log_perplexity(logits, labels, vocab_size):
"""Average log-perplexity excluding padding 0s. No smoothing."""
num, den = padded_cross_entropy_loss(logits, labels, 0, vocab_size)
return -num, den
def bleu_score(logits, labels):
"""Approximate BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
logits: Tensor of size [batch_size, length_logits, vocab_size]
labels: Tensor of size [batch-size, length_labels]
Returns:
bleu: int, approx bleu score
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
bleu = tf.py_func(compute_bleu, (labels, predictions), tf.float32)
return bleu, tf.constant(1.0)
def _get_ngrams_with_counter(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in xrange(1, max_order + 1):
for i in xrange(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in xrange(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu)
def rouge_2_fscore(logits, labels):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
logits: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
predictions = tf.to_int32(tf.argmax(logits, axis=-1))
# TODO: Look into removing use of py_func
rouge_2_f_score = tf.py_func(rouge_n, (predictions, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
def _get_ngrams(n, text):
"""Calculates n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def rouge_n(eval_sentences, ref_sentences, n=2):
"""Computes ROUGE-N f1 score of two text collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Args:
eval_sentences: Predicted sentences.
ref_sentences: Sentences from the reference set
n: Size of ngram. Defaults to 2.
Returns:
f1 score for ROUGE-N
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
eval_ngrams = _get_ngrams(n, eval_sentence)
ref_ngrams = _get_ngrams(n, ref_sentence)
ref_count = len(ref_ngrams)
eval_count = len(eval_ngrams)
# Count the overlapping ngrams between evaluated and reference
overlapping_ngrams = eval_ngrams.intersection(ref_ngrams)
overlapping_count = len(overlapping_ngrams)
# Handle edge case. This isn't mathematically correct, but it's good enough
if eval_count == 0:
precision = 0.0
else:
precision = float(overlapping_count) / eval_count
if ref_count == 0:
recall = 0.0
else:
recall = float(overlapping_count) / ref_count
f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8)))
# return overlapping_count / reference_count
return np.mean(f1_scores, dtype=np.float32)
def rouge_l_fscore(predictions, labels):
"""ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
def rouge_l_sentence_level(eval_sentences, ref_sentences):
"""Computes ROUGE-L (sentence level) of two collections of sentences.
Source: https://www.microsoft.com/en-us/research/publication/
rouge-a-package-for-automatic-evaluation-of-summaries/
Calculated according to:
R_lcs = LCS(X,Y)/m
P_lcs = LCS(X,Y)/n
F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs)
where:
X = reference summary
Y = Candidate summary
m = length of reference summary
n = length of candidate summary
Args:
eval_sentences: The sentences that have been picked by the summarizer
ref_sentences: The sentences from the reference set
Returns:
A float: F_lcs
"""
f1_scores = []
for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences):
m = float(len(ref_sentence))
n = float(len(eval_sentence))
lcs = _len_lcs(eval_sentence, ref_sentence)
f1_scores.append(_f_lcs(lcs, m, n))
return np.mean(f1_scores, dtype=np.float32)
def _len_lcs(x, y):
"""Returns the length of the Longest Common Subsequence between two seqs.
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: sequence of words
y: sequence of words
Returns
integer: Length of LCS between x and y
"""
table = _lcs(x, y)
n, m = len(x), len(y)
return table[n, m]
def _lcs(x, y):
"""Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = dict()
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table
def _f_lcs(llcs, m, n):
"""Computes the LCS-based F-measure score.
Source: http://research.microsoft.com/en-us/um/people/cyl/download/papers/
rouge-working-note-v1.3.1.pdf
Args:
llcs: Length of LCS
m: number of words in reference summary
n: number of words in candidate summary
Returns:
Float. LCS-based F-measure score
"""
r_lcs = llcs / m
p_lcs = llcs / n
beta = p_lcs / (r_lcs + 1e-12)
num = (1 + (beta ** 2)) * r_lcs * p_lcs
denom = r_lcs + ((beta ** 2) * p_lcs)
f_lcs = num / (denom + 1e-12)
return f_lcs
| 33.765784 | 93 | 0.701671 | [
"Apache-2.0"
] | 1110sillabo/models | official/nlp/transformer/utils/metrics.py | 16,579 | Python |
# Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
resources_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'resources'))
mnist_path = os.path.join(resources_path, 'mnist')
data_dir = os.path.join(mnist_path, 'data')
training_dir = os.path.join(data_dir, 'training')
cpu_sub_dir = 'model_cpu'
gpu_sub_dir = 'model_gpu'
eia_sub_dir = 'model_eia'
model_cpu_dir = os.path.join(mnist_path, cpu_sub_dir)
mnist_cpu_script = os.path.join(model_cpu_dir, 'mnist.py')
model_cpu_1d_dir = os.path.join(model_cpu_dir, '1d')
mnist_1d_script = os.path.join(model_cpu_1d_dir, 'mnist_1d.py')
model_gpu_dir = os.path.join(mnist_path, gpu_sub_dir)
mnist_gpu_script = os.path.join(model_gpu_dir, 'mnist.py')
model_gpu_1d_dir = os.path.join(model_gpu_dir, '1d')
model_eia_dir = os.path.join(mnist_path, eia_sub_dir)
mnist_eia_script = os.path.join(model_eia_dir, 'mnist.py')
call_model_fn_once_script = os.path.join(model_cpu_dir, 'call_model_fn_once.py')
ROLE = 'dummy/unused-role'
DEFAULT_TIMEOUT = 20
PYTHON3 = 'py3'
RESOURCE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'resources'))
# These regions have some p2 and p3 instances, but not enough for automated testing
NO_P2_REGIONS = ['ca-central-1', 'eu-central-1', 'eu-west-2', 'us-west-1', 'eu-west-3',
'eu-north-1', 'sa-east-1', 'ap-east-1']
NO_P3_REGIONS = ['ap-southeast-1', 'ap-southeast-2', 'ap-south-1', 'ca-central-1',
'eu-central-1', 'eu-west-2', 'us-west-1', 'eu-west-3', 'eu-north-1',
'sa-east-1', 'ap-east-1']
| 44.333333 | 92 | 0.724624 | [
"Apache-2.0"
] | YYStreet/sagemaker-pytorch-serving-container | test-toolkit/integration/__init__.py | 2,128 | Python |
from config_utils import read_main_config
from deep_q_network import DeepQNetwork
from gym_wrapper import GymWrapper
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
config = read_main_config()
gym_wrapper = GymWrapper(config['general']['scenario'])
deep_q_network = DeepQNetwork(config, gym_wrapper)
deep_q_network.train()
deep_q_network.test(episodes=3) | 31 | 67 | 0.848635 | [
"MIT"
] | tomjur/TF2.0DQN | main.py | 403 | Python |
from spaceone.repository.model.plugin_model import *
from spaceone.repository.model.schema_model import *
from spaceone.repository.model.policy_model import *
from spaceone.repository.model.repository_model import *
| 43.2 | 56 | 0.851852 | [
"Apache-2.0"
] | gikang82/repository | src/spaceone/repository/model/__init__.py | 216 | Python |
from setuptools import setup
from Cython.Build import cythonize
setup(
name='Fibonacci',
package_dir={'Fibonacci/functions_folder': ''},
ext_modules=cythonize("fib_module.pyx"),
)
| 21.444444 | 51 | 0.735751 | [
"MIT"
] | dalexa10/EngineeringDesignOptimization | Cython/Fibonacci/functions_folder/setup.py | 193 | Python |
# -*- coding: utf-8 -*-
#
# This file is part of Flask-CLI
# Copyright (C) 2015 CERN.
#
# Flask-AppFactory is free software; you can redistribute it and/or
# modify it under the terms of the Revised BSD License; see LICENSE
# file for more details.
"""Flask extension to enable CLI."""
import types
from . import AppGroup
class FlaskCLI(object):
"""Flask-CLI extension.
Initialization of the extension:
>>> from flask import Flask
>>> from flask_cli import FlaskCLI
>>> app = Flask('myapp')
>>> FlaskCLI(app)
or alternatively using the factory pattern:
>>> app = Flask('myapp')
>>> ext = FlaskCLI()
>>> ext.init_app(app)
"""
def __init__(self, app=None):
"""Initialize the Flask-CLI."""
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Initialize a Flask application."""
# Follow the Flask guidelines on usage of app.extensions
if not hasattr(app, 'extensions'):
app.extensions = {}
if 'flask-cli' in app.extensions:
raise RuntimeError("Flask-CLI application already initialized")
app.extensions['flask-cli'] = self
self.setup_pre10(app)
def setup_pre10(self, app):
"""Setup Flask pre-1.0 application object."""
if hasattr(app, 'cli'):
return
from flask_cli.app import make_shell_context, shell_context_processor
app.cli = AppGroup(app.name)
app.shell_context_processors = []
app.make_shell_context = types.MethodType(make_shell_context, app)
app.shell_context_processor = types.MethodType(
shell_context_processor, app)
| 28.133333 | 77 | 0.635664 | [
"MIT"
] | muneneee/blog | virtual/lib/python3.6/site-packages/flask_cli/ext.py | 1,688 | Python |
from django.http import HttpResponseRedirect
from django.views.generic import ListView,CreateView,UpdateView,DetailView,View
from django.shortcuts import render, redirect
from ecom import forms, models
from django.utils.decorators import method_decorator
def admin_required(function):
def wrap(request, *args, **kwargs):
if not request.user.groups.filter(name='Administrador').exists():
return redirect('')
return function(request, *args, **kwargs)
return wrap
class Agregar_paquete_view(CreateView):
# specify the model for create view
model = models.Paquete
form_class = forms.PaqueteForm
# specify the fields to be displayed
template_name = 'ecom/paquetes/Agregar_paquete.html' # templete for updating
success_url = "/Ver-paquete"
@method_decorator(admin_required)
def dispatch(self, request, *args, **kwargs):
return super(Agregar_paquete_view, self).dispatch(request, *args, **kwargs)
class paquete_view(View):
@method_decorator(admin_required)
def dispatch(self, request, *args, **kwargs):
return super(paquete_view, self).dispatch(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
paquete = models.Paquete.objects.all()
return render(request, 'ecom/paquetes/Ver_paquete.html',{"paquete": paquete})
class Actualizar_paquete(UpdateView):
model = models.Paquete #model
fields = "__all__" # fields / if you want to select all fields, use "__all__"
template_name = 'ecom/paquetes/Actualizar_paquete.html' # templete for updating
success_url = "/Ver-paquete"
@method_decorator(admin_required)
def dispatch(self, request, *args, **kwargs):
return super(Actualizar_paquete, self).dispatch(request, *args, **kwargs)
def paquetes(request):
if 'product_ids' in request.COOKIES:
product_ids = request.COOKIES['product_ids']
counter = product_ids.split('|')
product_count_in_cart = len(set(counter))
else:
product_count_in_cart = 0
paquetes = models.Paquete.objects.all()
return render(request, 'ecom/paquetes/paquete.html',{"paquetes":paquetes,"product_count_in_cart":product_count_in_cart }) | 36.916667 | 128 | 0.715576 | [
"MIT"
] | Gustolidel/Ikergust | ecom/paquetes/view_paquete.py | 2,215 | Python |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the JAX interface"""
import pytest
jax = pytest.importorskip("jax")
jnp = pytest.importorskip("jax.numpy")
import numpy as np
import pennylane as qml
from pennylane.tape import JacobianTape, qnode, QNode, QubitParamShiftTape
def test_qnode_intergration():
"""Test a simple use of qnode with a JAX interface and non-JAX device"""
dev = qml.device("default.mixed", wires=2) # A non-JAX device
@qml.qnode(dev, interface="jax")
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RZ(weights[1], wires=1)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
weights = jnp.array([0.1, 0.2])
val = circuit(weights)
assert "DeviceArray" in val.__repr__()
def test_to_jax():
"""Test the to_jax method"""
dev = qml.device("default.mixed", wires=2)
@qml.qnode(dev, interface="autograd")
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RZ(weights[1], wires=1)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
circuit.to_jax()
weights = jnp.array([0.1, 0.2])
val = circuit(weights)
assert "DeviceArray" in val.__repr__()
def test_simple_jacobian():
"""Test the use of jax.jaxrev"""
dev = qml.device("default.mixed", wires=2) # A non-JAX device.
@qml.qnode(dev, interface="jax", diff_method="parameter-shift")
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RY(weights[1], wires=1)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
weights = jnp.array([0.1, 0.2])
grads = jax.jacrev(circuit)(weights)
# This is the easiest way to ensure our object is a DeviceArray instead
# of a numpy array.
assert "DeviceArray" in grads.__repr__()
assert grads.shape == (2,)
np.testing.assert_allclose(grads, np.array([-0.09784342, -0.19767685]))
def test_simple_grad():
"""Test the use of jax.grad"""
dev = qml.device("default.mixed", wires=2) # A non-JAX device.
@qml.qnode(dev, interface="jax", diff_method="parameter-shift")
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RZ(weights[1], wires=1)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
weights = jnp.array([0.1, 0.2])
val = jax.grad(circuit)(weights)
assert "DeviceArray" in val.__repr__()
@pytest.mark.parametrize("diff_method", ['parameter-shift', 'finite-diff'])
def test_differentiable_expand(diff_method):
"""Test that operation and nested tapes expansion
is differentiable"""
class U3(qml.U3):
def expand(self):
theta, phi, lam = self.data
wires = self.wires
with JacobianTape() as tape:
qml.Rot(lam, theta, -lam, wires=wires)
qml.PhaseShift(phi + lam, wires=wires)
return tape
dev = qml.device("default.mixed", wires=1)
a = jnp.array(0.1)
p = jnp.array([0.1, 0.2, 0.3])
@qnode(dev, diff_method=diff_method, interface="jax")
def circuit(a, p):
qml.RX(a, wires=0)
U3(p[0], p[1], p[2], wires=0)
return qml.expval(qml.PauliX(0))
res = circuit(a, p)
expected = np.cos(a) * np.cos(p[1]) * np.sin(p[0]) + np.sin(a) * (
np.cos(p[2]) * np.sin(p[1]) + np.cos(p[0]) * np.cos(p[1]) * np.sin(p[2])
)
tol = 1e-5
assert np.allclose(res, expected, atol=tol, rtol=0)
res = jax.grad(circuit, argnums=1)(a, p)
expected = np.array(
[
np.cos(p[1]) * (np.cos(a) * np.cos(p[0]) - np.sin(a) * np.sin(p[0]) * np.sin(p[2])),
np.cos(p[1]) * np.cos(p[2]) * np.sin(a)
- np.sin(p[1])
* (np.cos(a) * np.sin(p[0]) + np.cos(p[0]) * np.sin(a) * np.sin(p[2])),
np.sin(a)
* (np.cos(p[0]) * np.cos(p[1]) * np.cos(p[2]) - np.sin(p[1]) * np.sin(p[2])),
]
)
assert np.allclose(res, expected, atol=tol, rtol=0)
def qtransform(qnode, a, framework=jnp):
"""Transforms every RY(y) gate in a circuit to RX(-a*cos(y))"""
def construct(self, args, kwargs):
"""New quantum tape construct method, that performs
the transform on the tape in a define-by-run manner"""
t_op = []
QNode.construct(self, args, kwargs)
new_ops = []
for o in self.qtape.operations:
# here, we loop through all tape operations, and make
# the transformation if a RY gate is encountered.
if isinstance(o, qml.RY):
t_op.append(qml.RX(-a * framework.cos(o.data[0]), wires=o.wires))
new_ops.append(t_op[-1])
else:
new_ops.append(o)
self.qtape._ops = new_ops
self.qtape._update()
import copy
new_qnode = copy.deepcopy(qnode)
new_qnode.construct = construct.__get__(new_qnode, QNode)
return new_qnode
@pytest.mark.parametrize(
"dev_name,diff_method",
[("default.mixed", "finite-diff"), ("default.qubit.autograd", "parameter-shift")],
)
def test_transform(dev_name, diff_method, monkeypatch, tol):
"""Test an example transform"""
monkeypatch.setattr(qml.operation.Operation, "do_check_domain", False)
dev = qml.device(dev_name, wires=1)
@qnode(dev, interface="jax", diff_method=diff_method)
def circuit(weights):
op1 = qml.RY(weights[0], wires=0)
op2 = qml.RX(weights[1], wires=0)
return qml.expval(qml.PauliZ(wires=0))
weights = np.array([0.32, 0.543])
a = np.array(0.5)
def loss(weights, a):
# transform the circuit QNode with trainable weight 'a'
new_circuit = qtransform(circuit, a)
# evaluate the transformed QNode
res = new_circuit(weights)
# evaluate the original QNode with pre-processed parameters
res2 = circuit(jnp.sin(weights))
# return the sum of the two QNode evaluations
return res + res2
res = loss(weights, a)
grad = jax.grad(loss, argnums=[0, 1])(weights, a)
assert len(grad) == 2
assert grad[0].shape == weights.shape
assert grad[1].shape == a.shape
# compare against the expected values
tol = 1e-5
assert np.allclose(res, 1.8244501889992706, atol=tol, rtol=0)
assert np.allclose(grad[0], [-0.26610258, -0.47053553], atol=tol, rtol=0)
assert np.allclose(grad[1], 0.06486032, atol=tol, rtol=0)
| 33.985294 | 97 | 0.617337 | [
"Apache-2.0"
] | PritishSehzpaul/pennylane | tests/tape/interfaces/test_qnode_jax.py | 6,933 | Python |
"""Common DB report tests."""
import datetime
from pycounter.constants import METRICS
def test_version(db_report):
assert db_report.report_version == 4
def test_year(db_report):
assert db_report.year == 2012
def test_publisher(db_report):
for publication in db_report:
assert publication.publisher == u"Megadodo Publications"
def test_platform(db_report):
for publication in db_report:
assert publication.platform == u"HHGTTG Online"
def test_customer(db_report):
assert db_report.customer == u"University of Maximegalon"
def test_date_run(db_report):
assert db_report.date_run == datetime.date(2012, 7, 9)
def test_period(db_report):
assert db_report.period == (datetime.date(2012, 1, 1), datetime.date(2012, 6, 30))
def test_report_metric(db_report):
for metric in db_report.metric:
assert metric in METRICS[db_report.report_type]
| 22.65 | 86 | 0.737307 | [
"MIT"
] | beda42/pycounter | pycounter/test/test_db_common.py | 906 | Python |
from . import is_palindrome
test_subjects = [
is_palindrome
]
complex_pali = '''Anita. .laVa,
:; la?
TINa!'''
def test_is_palindrome():
for subject in test_subjects:
assert subject.algorithm('')
assert subject.algorithm(' ')
assert subject.algorithm(complex_pali)
assert not subject.algorithm('Nope')
| 19.611111 | 46 | 0.648725 | [
"MIT"
] | IngCarlosPedroza/algorithms-and-data-structures-py | algorithms_and_data_structures/algorithms/string_processing/is_palindrome/test_is_palindrome.py | 353 | Python |
# coding: utf-8
"""
convertapi
Convert API lets you effortlessly convert file formats and types. # noqa: E501
OpenAPI spec version: v1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import cloudmersive_convert_api_client
from cloudmersive_convert_api_client.models.docx_set_header_request import DocxSetHeaderRequest # noqa: E501
from cloudmersive_convert_api_client.rest import ApiException
class TestDocxSetHeaderRequest(unittest.TestCase):
"""DocxSetHeaderRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDocxSetHeaderRequest(self):
"""Test DocxSetHeaderRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudmersive_convert_api_client.models.docx_set_header_request.DocxSetHeaderRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.707317 | 117 | 0.741362 | [
"Apache-2.0"
] | Cloudmersive/Cloudmersive.APIClient.Python.Convert | test/test_docx_set_header_request.py | 1,013 | Python |
import abc
import logging
import Sea
import numpy as np
import itertools
from ..base import Base
class Connection(Base, Sea.model.connections.Connection):
"""
Abstract base class for all :mod:`Sea.adapter.connections` classes.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, obj, system, components):
Base.__init__(self, obj)
obj.addProperty("App::PropertyLink", "System", "Component", "System this connection belongs to.")
obj.System = system
obj.couplings = self.couplings
obj.makeCoupling = self.makeCoupling
obj.updateCouplings = self.updateCouplings
obj.addCouplings = self.addCouplings
#obj.addProperty("App::PropertyLinkList", "Couplings", "Connection", "List of all couplings.")
obj.addProperty("App::PropertyLinkList", "Components", "Connection", "Components that are connected via this connection.")
obj.Frequency = system.Frequency
#obj.addProperty("App::PropertyLink", "CouplingsGroup", "Groups", "Couplings that are part of System.")
#obj.CouplingsGroup = group.newObject("App::DocumentObjectGroup", "GroupCouplings")
#obj.CouplingsGroup.Label = "Couplings"
#obj.addProperty("Part::PropertyPartShape", "Shape", "Connection", "Shape of the connection.")
#obj.addProperty("App::PropertyBool", "UpdateCouplings", "Connection", "Update couplings when the connection changes.").UpdateCouplings = True
#obj.addProperty("App::PropertyString", "Sort", "Connection", "Is the connection described by a point, line or area.")
obj.addProperty("App::PropertyFloatList", "ImpedanceJunction", "Connection", "Total impedance at the junction.")
obj.setEditorMode("ImpedanceJunction", 1)
obj.Components = components
#obj.Shape = component_a.Shape.common(component_b.Shape)
obj.updateCouplings()
def onChanged(self, obj, prop):
Base.onChanged(self, obj, prop)
if prop == 'Components':
pass
#elif prop == 'Shape':
#self.updateCouplings(obj)
#if prop == 'Frequency':
#for coupling in obj.couplings():
#coupling.Frequency = obj.Frequency
def execute(self, obj):
Base.execute(self, obj)
@staticmethod
def couplings(obj):
return filter(Sea.actions.document.isCoupling, obj.InList)
@abc.abstractmethod
def updateComponents(self, obj):
pass
#@staticmethod
#def updateShape(obj):
#"""
#Update the common shape between the components.
#"""
#connection = Sea.adapter.connection.ShapeConnection([item.Shape for item in self.Components])
#shape = connection.shape()
#obj.Shape = shape
@staticmethod
def updateCouplings(connection):
"""
The shape has changed, which means couplings might have to change, be added or removed.
To be sure all couplings in this connection are deleted and then build up from scratch.
"""
"""Remove all old couplings."""
for coupling in connection.couplings():
connection.Document.removeObject(coupling.Name)
"""Add couplings for every shape."""
connection.addCouplings()
@staticmethod
def addCouplings(connection):
"""
Add couplings to the :attr:`connection`.
:param connection: an instance of :class:`Sea.adapter.baseclasses.Connection`
"""
for comp_from, comp_to in itertools.permutations(connection.Components, 2):
coupling_sort = Connection.determineCouplingType(connection.ClassName, comp_from, comp_to)
if not coupling_sort:
App.Console.PrintWarning("Cannot add coupling.\n")
return
for sub_from, sub_to in itertools.product(comp_from.subsystems(), comp_to.subsystems()):
#print connection
#print 'From: ' + comp_from.ClassName + sub_from
#print 'To: ' + comp_to.ClassName + sub_to
connection.makeCoupling(sub_from, sub_to, coupling_sort)
coupling_options = {
('ConnectionPoint', 'Component1DBeam', 'Component1DBeam') : 'Coupling1DStructural',
('ConnectionLine', 'Component1DBeam', 'Component1DBeam') : 'Coupling1DStructural',
('ConnectionSurface', 'Component1DBeam', 'Component1DBeam') : 'Coupling1DStructural',
('ConnectionPoint', 'Component2DPlate', 'Component2DPlate') : 'Coupling1DStructural',
('ConnectionLine', 'Component2DPlate', 'Component2DPlate') : 'Coupling2DStructural',
('ConnectionSurface', 'Component2DPlate', 'Component2DPlate') : 'Coupling2DStructural',
('ConnectionSurface', 'Component2DPlate', 'Component3DCavity') : 'Coupling3DPlateCavity',
('ConnectionSurface', 'Component3DCavity', 'Component2DPlate') : 'Coupling3DCavityPlate',
}
@staticmethod
def determineCouplingType(connection_type, component_from, component_to):
"""
Determine the type of coupling. Detects what type of connection the components have.
Based on the type of connection and on the types of components a coupling is returned.
:param component_from: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
:param component_to: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
"""
if connection_type:
item = (connection_type, component_from.ClassName, component_to.ClassName)
try:
return Connection.coupling_options[item]
except KeyError:
txt = 'Could not determine the type of coupling for ' + component_from.ClassName + ' to ' + component_to.ClassName + ' with ' + connection_type + '.\n'
App.Console.PrintWarning(txt)
return None
@staticmethod
def makeCoupling(connection, subsystem_from, subsystem_to, sort):
"""
Add a coupling to system.
:param connection: an instance of :class:`Sea.adapter.baseclasses.Connection`
:param component_from: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
:param subsystem_from: string representing the type of subsystem
:param component_to: an instance of a child of :class:`Sea.adapter.baseclasses.Component`
:param subsystem_to: string representing the type of subsystem
:param sort: sort of coupling as specified in :class:`Sea.adapter.couplings.couplings_map`
"""
#if connection.System == component_from.System == component_to.System:
from Sea.adapter.object_maps import couplings_map
obj = connection.Document.addObject("App::FeaturePython", 'Coupling')
couplings_map[sort](obj, connection, subsystem_from, subsystem_to)
try:
Sea.adapter.couplings.ViewProviderCoupling(obj.ViewObject)
except AttributeError:
pass
obj.Label = obj.ClassName + '_' + subsystem_from.ClassName.replace('Subsystem', '') + '_to_' + subsystem_to.ClassName.replace('Subsystem', '')
logging.info("Sea: Created %s.", obj.Name)
obj.Document.recompute()
return obj
| 42.848315 | 168 | 0.629343 | [
"BSD-3-Clause"
] | FRidh/Sea | Sea/adapter/connections/Connection.py | 7,627 | Python |
import warnings
from contextlib import contextmanager
from numba.tests.support import override_config, TestCase
from numba.cuda.testing import skip_on_cudasim
from numba import cuda
from numba.core import types
from numba.cuda.testing import SerialMixin
import unittest
@skip_on_cudasim("Skipped on simulator")
class TestCudaDebugInfo(SerialMixin, TestCase):
"""Tests features that will be deprecated
"""
@contextmanager
def assert_deprecation_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
yield w
def test_autotune(self):
@cuda.jit("(int32[:],)")
def foo(xs):
xs[0] = 1
with self.assert_deprecation_warning() as w:
foo.autotune
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert ".autotune" in str(w[-1].message)
with self.assert_deprecation_warning() as w:
foo.occupancy
assert len(w) == 2
assert issubclass(w[0].category, DeprecationWarning)
assert ".occupancy" in str(w[0].message)
assert issubclass(w[1].category, DeprecationWarning)
assert ".autotune" in str(w[1].message)
if __name__ == '__main__':
unittest.main()
| 30.181818 | 65 | 0.653614 | [
"BSD-2-Clause"
] | aerusso/numba | numba/cuda/tests/cudapy/test_deprecation.py | 1,328 | Python |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import compat
class VariablesTestCase(test.TestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def testDistributeStrategy(self):
v = variables.VariableV1(0.0)
self.assertIsNone(v._distribute_strategy)
@test_util.run_v1_only("b/120545219")
def testInitialization(self):
with self.cached_session():
var0 = variables.VariableV1(0.0)
self.assertEqual("Variable:0", var0.name)
self.assertEqual("Variable", var0._shared_name)
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.shape)
var1 = variables.VariableV1(1.1)
self.assertEqual("Variable_1:0", var1.name)
self.assertEqual("Variable_1", var1._shared_name)
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.shape)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(var0)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(var1)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var0))
self.assertAllClose(1.1, self.evaluate(var1))
@test_util.run_v1_only("b/120545219")
def testInitializationOrder(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([3, 6]), name="rnd")
self.assertEqual("rnd:0", rnd.name)
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.shape)
dep = variables.Variable(rnd.initialized_value(), name="dep")
self.assertEqual("dep:0", dep.name)
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.shape)
# Currently have to set the shape manually for Add.
added_val = rnd.initialized_value() + dep.initialized_value() + 2.0
added_val.set_shape(rnd.get_shape())
depdep = variables.Variable(added_val, name="depdep")
self.assertEqual("depdep:0", depdep.name)
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.shape)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(rnd), self.evaluate(dep))
self.assertAllClose(
self.evaluate(rnd) + self.evaluate(dep) + 2.0, self.evaluate(depdep))
@test_util.run_deprecated_v1
def testCyclicInitializer(self):
with self.cached_session():
cyclic = control_flow_ops.while_loop(
cond=lambda i: i < 10,
body=lambda i: i + 1,
loop_vars=(constant_op.constant(0),))
initial_value = variables._try_guard_against_uninitialized_dependencies(
"test", cyclic)
self.assertIs(initial_value, cyclic)
def testIterable(self):
with self.assertRaisesRegex(TypeError, "not iterable"):
for _ in variables.Variable(0.0):
pass
with self.assertRaisesRegex(TypeError, "not iterable"):
for _ in variables.Variable([0.0, 1.0]):
pass
@test_util.run_deprecated_v1
def testAssignments(self):
with self.cached_session():
var = variables.Variable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var))
self.assertAllClose(1.0, self.evaluate(plus_one))
self.assertAllClose(1.0, self.evaluate(var))
self.assertAllClose(-1.0, self.evaluate(minus_one))
self.assertAllClose(-1.0, self.evaluate(var))
self.assertAllClose(4.0, self.evaluate(four))
self.assertAllClose(4.0, self.evaluate(var))
@test_util.run_deprecated_v1
def testResourceAssignments(self):
with self.session(use_gpu=True):
var = resource_variable_ops.ResourceVariable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var))
self.evaluate(plus_one)
self.assertAllClose(1.0, self.evaluate(var))
self.evaluate(minus_one)
self.assertAllClose(-1.0, self.evaluate(var))
self.evaluate(four)
self.assertAllClose(4.0, self.evaluate(var))
def testAssignDifferentShapesEagerNotAllowed(self):
with context.eager_mode():
var = variables.Variable(np.zeros(shape=[1, 1]))
with self.assertRaisesRegex(ValueError, "Shapes.*and.*are incompatible"):
var.assign(np.zeros(shape=[2, 2]))
@test_util.disable_tfrt("Graph is not supported yet. b/156187905")
@test_util.run_in_graph_and_eager_modes
def testAssignDifferentShapesAllowed(self):
var = variables.Variable(np.zeros(shape=[1, 1]),
shape=tensor_shape.TensorShape(None))
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(np.zeros(shape=[1, 1]), var.read_value())
self.evaluate(var.assign(np.zeros(shape=[2, 2])))
self.assertAllEqual(np.zeros(shape=[2, 2]), var.read_value())
@test_util.disable_tfrt("GetHostSize() is not expected to be called with "
"string type. b/156761465")
def testZeroSizeStringAssign(self):
with self.cached_session() as sess:
array = variables.VariableV1(
initial_value=array_ops.zeros((0,), dtype=dtypes.string),
name="foo",
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
self.evaluate(variables.local_variables_initializer())
old_value = array.value()
copy_op = array.assign(old_value)
self.assertEqual([], list(self.evaluate(copy_op)))
def _countUpToTest(self, dtype):
with self.cached_session():
zero = constant_op.constant(0, dtype=dtype)
var = variables.Variable(zero)
count_up_to = var.count_up_to(3)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(var))
self.assertEqual(0, self.evaluate(count_up_to))
self.assertEqual(1, self.evaluate(var))
self.assertEqual(1, self.evaluate(count_up_to))
self.assertEqual(2, self.evaluate(var))
self.assertEqual(2, self.evaluate(count_up_to))
self.assertEqual(3, self.evaluate(var))
with self.assertRaisesOpError("Reached limit of 3"):
self.evaluate(count_up_to)
self.assertEqual(3, self.evaluate(var))
with self.assertRaisesOpError("Reached limit of 3"):
self.evaluate(count_up_to)
self.assertEqual(3, self.evaluate(var))
@test_util.run_deprecated_v1
def testCountUpToInt32(self):
self._countUpToTest(dtypes.int32)
@test_util.run_deprecated_v1
def testCountUpToInt64(self):
self._countUpToTest(dtypes.int64)
@test_util.run_v1_only("b/120545219")
def testControlDepsNone(self):
with self.cached_session():
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
# d get the control dep.
d = constant_op.constant(2.0)
# variables do not.
var_x = variables.VariableV1(2.0)
self.assertEqual([c.op], d.op.control_inputs)
self.assertEqual([], var_x.initializer.control_inputs)
self.assertEqual([], var_x.value().op.control_inputs)
self.assertEqual([], var_x._ref().op.control_inputs) # pylint: disable=protected-access
@test_util.run_v1_only("b/120545219")
def testControlFlow(self):
with self.cached_session() as sess:
v0 = variables.Variable(0, name="v0")
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variables.Variable(1, name="v1")
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variables.Variable(2, name="v2")
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
self.evaluate(v1.initializer)
self.assertEqual([1], self.evaluate(v1))
self.evaluate(v2.initializer)
self.assertEqual([2], self.evaluate(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegex(errors_impl.OpError, "uninitialized"):
self.evaluate(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegex(errors_impl.OpError, "uninitialized"):
self.evaluate(add)
# If we initialize v0 we should be able to run 'add'.
self.evaluate(v0.initializer)
self.evaluate(add)
@test_util.run_v1_only("b/120545219")
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = variables.Variable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegex(ValueError, "inside a control-flow"):
control_flow_ops.while_loop(cond, body, [0, 0])
@test_util.run_deprecated_v1
def testUseVariableAsTensor(self):
with self.cached_session():
var_x = variables.Variable(2.0)
var_y = variables.Variable(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(2.0, self.evaluate(var_x))
self.assertAllClose(3.0, self.evaluate(var_y))
self.assertAllClose(5.0, self.evaluate(math_ops.add(var_x, var_y)))
@test_util.run_deprecated_v1
def testZeroSizeVarSameAsConst(self):
with self.cached_session():
zero_size_var = variables.Variable(array_ops.zeros([0, 2]))
zero_size_const = array_ops.ones([2, 0])
variable_mul = math_ops.matmul(zero_size_const, zero_size_var)
const_mul = math_ops.matmul(
zero_size_const, zero_size_const, transpose_b=True)
self.evaluate(variables.global_variables_initializer())
variable_output = self.evaluate(variable_mul)
self.assertAllClose(self.evaluate(const_mul), variable_output)
self.assertAllClose([[0., 0.], [0., 0.]], variable_output)
@test_util.run_deprecated_v1
def testCachingDevice(self):
with self.cached_session():
var = variables.Variable(2.0)
self.assertEqual(var.device, var.initialized_value().device)
var_cached = variables.Variable(2.0, caching_device="/job:foo")
self.assertFalse(var_cached.device.startswith("/job:foo"))
self.assertTrue(var_cached.value().device.startswith("/job:foo"))
@test_util.run_deprecated_v1
def testCollections(self):
with self.cached_session():
var_x = variables.VariableV1(2.0)
var_y = variables.VariableV1(2.0, trainable=False)
var_z = variables.VariableV1(2.0, trainable=True)
var_t = variables.VariableV1(
2.0,
trainable=True,
collections=[
ops.GraphKeys.TRAINABLE_VARIABLES, ops.GraphKeys.GLOBAL_VARIABLES
])
self.assertEqual([var_x, var_y, var_z, var_t],
variables.global_variables())
self.assertEqual([var_x, var_z, var_t], variables.trainable_variables())
@test_util.run_deprecated_v1
def testCollectionsWithScope(self):
with self.cached_session():
with ops.name_scope("scope_1"):
var_x = variables.VariableV1(2.0)
with ops.name_scope("scope_2"):
var_y = variables.VariableV1(2.0)
self.assertEqual([var_x, var_y], variables.global_variables())
self.assertEqual([var_x], variables.global_variables("scope_1"))
self.assertEqual([var_y], variables.global_variables("scope_2"))
self.assertEqual([var_x, var_y], variables.trainable_variables())
self.assertEqual([var_x], variables.trainable_variables("scope_1"))
self.assertEqual([var_y], variables.trainable_variables("scope_2"))
def testOperatorWrapping(self):
for attr in functools.WRAPPER_ASSIGNMENTS:
self.assertEqual(
getattr(variables.Variable.__add__, attr),
getattr(ops.Tensor.__add__, attr))
@test_util.run_deprecated_v1
def testOperators(self):
with self.cached_session():
var_f = variables.Variable([2.0])
add = var_f + 0.0
radd = 1.0 + var_f
sub = var_f - 1.0
rsub = 1.0 - var_f
mul = var_f * 10.0
rmul = 10.0 * var_f
div = var_f / 10.0
rdiv = 10.0 / var_f
lt = var_f < 3.0
rlt = 3.0 < var_f
le = var_f <= 2.0
rle = 2.0 <= var_f
gt = var_f > 3.0
rgt = 3.0 > var_f
ge = var_f >= 2.0
rge = 2.0 >= var_f
neg = -var_f
abs_v = abs(var_f)
var_i = variables.Variable([20])
mod = var_i % 7
rmod = 103 % var_i
var_b = variables.Variable([True, False])
and_v = operator.and_(var_b, [True, True])
or_v = operator.or_(var_b, [False, True])
xor_v = operator.xor(var_b, [False, False])
invert_v = ~var_b
rnd = np.random.rand(4, 4).astype("f")
var_t = variables.Variable(rnd)
slice_v = var_t[2, 0:0]
var_m = variables.Variable([[2.0, 3.0]])
matmul = var_m.__matmul__([[10.0], [20.0]])
rmatmul = var_m.__rmatmul__([[10.0], [20.0]])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([2.0], self.evaluate(add))
self.assertAllClose([3.0], self.evaluate(radd))
self.assertAllClose([1.0], self.evaluate(sub))
self.assertAllClose([-1.0], self.evaluate(rsub))
self.assertAllClose([20.0], self.evaluate(mul))
self.assertAllClose([20.0], self.evaluate(rmul))
self.assertAllClose([0.2], self.evaluate(div))
self.assertAllClose([5.0], self.evaluate(rdiv))
self.assertAllClose([-2.0], self.evaluate(neg))
self.assertAllClose([2.0], self.evaluate(abs_v))
self.assertAllClose([True], self.evaluate(lt))
self.assertAllClose([False], self.evaluate(rlt))
self.assertAllClose([True], self.evaluate(le))
self.assertAllClose([True], self.evaluate(rle))
self.assertAllClose([False], self.evaluate(gt))
self.assertAllClose([True], self.evaluate(rgt))
self.assertAllClose([True], self.evaluate(ge))
self.assertAllClose([True], self.evaluate(rge))
self.assertAllClose([6], self.evaluate(mod))
self.assertAllClose([3], self.evaluate(rmod))
self.assertAllClose([True, False], self.evaluate(and_v))
self.assertAllClose([True, True], self.evaluate(or_v))
self.assertAllClose([True, False], self.evaluate(xor_v))
self.assertAllClose([False, True], self.evaluate(invert_v))
self.assertAllClose(rnd[2, 0:0], self.evaluate(slice_v))
self.assertAllClose([[80.0]], self.evaluate(matmul))
self.assertAllClose([[20.0, 30.0], [40.0, 60.0]], self.evaluate(rmatmul))
@test_util.run_deprecated_v1
def testSession(self):
with self.cached_session() as sess:
var = variables.Variable([1, 12])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([1, 12], self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testColocation(self):
with ops.device("/job:ps"):
var = variables.VariableV1(0, name="v")
with ops.device("/job:worker/task:7"):
assign_op = var.assign(1)
self.assertDeviceEqual("/job:ps", assign_op.device)
self.assertEqual([b"loc:@v"], assign_op.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testInitializerFunction(self):
value = [[-42], [133.7]]
shape = [2, 1]
with self.cached_session():
initializer = lambda: constant_op.constant(value)
v1 = variables.Variable(initializer, dtype=dtypes.float32)
self.assertEqual(shape, v1.get_shape())
self.assertEqual(shape, v1.shape)
self.assertAllClose(value, self.evaluate(v1.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v1)
v2 = variables.Variable(
math_ops.negative(v1.initialized_value()), dtype=dtypes.float32)
self.assertEqual(v1.get_shape(), v2.get_shape())
self.assertEqual(v1.shape, v2.shape)
self.assertAllClose(np.negative(value), self.evaluate(v2.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v2)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(np.negative(value), self.evaluate(v2))
def testConstraintArg(self):
constraint = lambda x: x
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
self.assertEqual(v.constraint, constraint)
constraint = 0
with self.assertRaises(ValueError):
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
@test_util.run_v1_only("b/120545219")
def testNoRefDataRace(self):
with self.cached_session():
a = variables.Variable([1, 2, 3], dtype=dtypes.float32)
b = variables.Variable(a.initialized_value() + 2)
c = variables.Variable(b.initialized_value() + 2)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(a), [1, 2, 3])
self.assertAllEqual(self.evaluate(b), [3, 4, 5])
self.assertAllEqual(self.evaluate(c), [5, 6, 7])
@test_util.run_deprecated_v1
def testInitializerFunctionDevicePlacement(self):
with self.cached_session():
initializer = lambda: constant_op.constant(42.0)
with ops.device("/cpu:100"):
v1 = variables.Variable(initializer, dtype=dtypes.float32, name="v1")
expected_device = "/device:CPU:100"
expected_group_v1 = [b"loc:@v1"]
self.assertEqual(expected_device, v1.op.device)
self.assertEqual(expected_group_v1, v1.op.colocation_groups())
for i in v1.initializer.inputs:
self.assertEqual(expected_group_v1, i.op.colocation_groups())
v2 = variables.Variable(initializer, dtype=dtypes.float32, name="v2")
expected_group_v2 = [b"loc:@v2"]
self.assertEqual(expected_group_v2, v2.op.colocation_groups())
for i in v2.initializer.inputs:
self.assertEqual(expected_group_v2, i.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testVariableDefInitializedInstances(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v_def = variables.Variable(
initial_value=constant_op.constant(3.0)).to_proto()
with ops.Graph().as_default(), self.cached_session() as sess:
# v describes a VariableDef-based variable without an initial value.
v = variables.Variable(variable_def=v_def)
self.assertEqual(3.0, self.evaluate(v.initialized_value()))
# initialized_value should not rerun the initializer_op if the variable
# has already been initialized elsewhere.
self.evaluate(v.assign(1.0))
self.assertEqual(1.0, self.evaluate(v.initialized_value()))
v_def.ClearField("initial_value_name")
with ops.Graph().as_default(), self.cached_session() as sess:
# Restoring a legacy VariableDef proto that does not have
# initial_value_name set should still work.
v = variables.Variable(variable_def=v_def)
# We should also be able to re-export the variable to a new meta graph.
self.assertProtoEquals(v_def, v.to_proto())
# But attempts to use initialized_value will result in errors.
with self.assertRaises(ValueError):
self.evaluate(v.initialized_value())
def testTrainableInProto(self):
with ops.Graph().as_default():
non_trainable_variable = variables.Variable(
trainable=False,
initial_value=constant_op.constant(10.0))
self.assertEqual(
False,
variables.Variable(variable_def=non_trainable_variable.to_proto())
.trainable)
trainable_variable = variables.Variable(
trainable=True,
initial_value=constant_op.constant(10.0))
self.assertEqual(
True,
variables.Variable(variable_def=trainable_variable.to_proto())
.trainable)
def testSynchronizationAndAggregationSaved(self):
with ops.Graph().as_default():
original_variable = variables.Variable(
initial_value=constant_op.constant(10.0),
synchronization=variables.VariableSynchronization.NONE,
aggregation=variables.VariableAggregationV2.ONLY_FIRST_REPLICA)
self.assertEqual(variables.VariableSynchronization.NONE,
original_variable.synchronization)
self.assertEqual(variables.VariableAggregation.ONLY_FIRST_REPLICA,
original_variable.aggregation)
laundered = variables.Variable(
variable_def=original_variable.to_proto())
self.assertEqual(
variables.VariableSynchronization.NONE,
laundered.synchronization)
self.assertEqual(variables.VariableAggregationV2.ONLY_FIRST_REPLICA,
laundered.aggregation)
@test_util.run_deprecated_v1
def testLoad(self):
with self.cached_session():
var = variables.Variable(np.zeros((5, 5), np.float32))
self.evaluate(variables.global_variables_initializer())
var.load(np.ones((5, 5), np.float32))
self.assertAllClose(np.ones((5, 5), np.float32), self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testRepr(self):
var = variables.VariableV1(np.zeros((5, 5), np.float32), name="noop")
self.assertEqual(
"<tf.Variable 'noop:0' shape=(5, 5) dtype=float32_ref>",
repr(var))
def testVariableNamesPreserveNameScopesWithDefun(self):
@function.defun
def create_variable():
with ops.name_scope("foo"):
v = variables.Variable(0.0, name="bar")
self.assertEqual(v.name, "foo/bar:0")
with ops.get_default_graph().as_default():
create_variable()
@parameterized.parameters(variables.VariableV1, variables.Variable)
def testTrainableVariable(self, cls):
v1 = cls(1.0)
self.assertEqual(True, v1.trainable)
v2 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ)
self.assertEqual(False, v2.trainable)
v3 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ,
trainable=True)
self.assertEqual(True, v3.trainable)
v4 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ,
trainable=False)
self.assertEqual(False, v4.trainable)
class IsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default(), self.cached_session() as sess:
uninited = variables.report_uninitialized_variables()
self.assertEqual(0, self.evaluate(uninited).size)
def testAssertVariablesInitialized(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable([1, 2], name="v")
w = variables.Variable([3, 4], name="w")
_ = v, w
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(uninited).size)
@test_util.run_v1_only("b/120545219")
def testVariableList(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2], name="v")
w = variables.VariableV1([3, 4], name="w")
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
self.evaluate(w.initializer)
self.assertAllEqual(np.array([b"v"]), self.evaluate(uninited))
v.initializer.run()
self.assertEqual(0, self.evaluate(uninited).size)
def testZeroSizeVarInitialized(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable(array_ops.zeros([0, 2]), name="v")
uninited = variables.report_uninitialized_variables()
v.initializer.run() # not strictly necessary
self.assertEqual(0, self.evaluate(uninited).size)
def testTrainingWithZeroSizeVar(self):
with ops.Graph().as_default(), self.cached_session() as sess:
a = variables.Variable(array_ops.zeros([0, 2]))
b = variables.Variable(array_ops.ones([2, 2]))
objective = math_ops.reduce_sum(b + math_ops.matmul(
a, a, transpose_a=True))
self.evaluate(variables.global_variables_initializer())
do_opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
objective)
self.evaluate([do_opt])
self.assertAllClose([[0.9, 0.9], [0.9, 0.9]], self.evaluate(b))
@test_util.run_v1_only("b/120545219")
class ObsoleteIsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default():
self.assertEqual(None, variables.assert_variables_initialized())
def testVariables(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2])
w = variables.VariableV1([3, 4])
_ = v, w
inited = variables.assert_variables_initialized()
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(inited)
self.evaluate(variables.global_variables_initializer())
self.evaluate(inited)
def testVariableList(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2])
w = variables.VariableV1([3, 4])
inited = variables.assert_variables_initialized([v])
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
self.evaluate(w.initializer)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
v.initializer.run()
inited.op.run()
class PartitionedVariableTest(test.TestCase):
def testPartitionedVariable(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
# Pass variable_list as [v1, v0] to ensure they are properly
# re-sorted to [v0, v1] based on their slice info offsets.
partitioned_variable = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
concatenated = ops.convert_to_tensor(partitioned_variable)
num_partitions = len(partitioned_variable)
iterated_partitions = list(partitioned_variable)
self.assertEqual(2, num_partitions)
self.assertEqual([v0, v1], iterated_partitions)
self.assertEqual([2], partitioned_variable.get_shape())
self.assertEqual([2], partitioned_variable.shape)
self.assertEqual([2], concatenated.get_shape())
self.assertEqual([2], concatenated.shape)
def testPartitionedVariableFailures(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(ValueError, "empty"):
variables.PartitionedVariable(
name="fail",
shape=2,
dtype=dtypes.int32,
variable_list=[],
partitions=[])
with self.assertRaisesRegex(ValueError, "must have a save_slice_info"):
v0 = variables.Variable([0])
partitions = [1]
variables.PartitionedVariable(
name="two_vars",
shape=[1],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
with self.assertRaisesRegex(ValueError, "full shapes must match"):
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
variables.PartitionedVariable(
name="two_vars",
shape=[3],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
with self.assertRaisesRegex(ValueError, "must be positive"):
v0 = variables.Variable([0])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
partitions = [0]
variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
def testPartitionedVariableAssignments(self):
with ops.Graph().as_default(), self.cached_session():
v0 = variables.Variable(initial_value=[0.0])
v1 = variables.Variable(initial_value=[1.0])
v2 = variables.Variable(initial_value=[20.0])
v3 = variables.Variable(initial_value=[30.0])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v1.name, [2], [1], [1]))
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo(v2.name, [2], [0], [1]))
v3._set_save_slice_info(
variables.Variable.SaveSliceInfo(v3.name, [2], [1], [1]))
partitions = [2]
# Pass variable_list as [v1, v0] to ensure they are properly
# re-sorted to [v0, v1] based on their slice info offsets.
pv_0 = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v0, v1],
partitions=partitions)
pv_1 = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v2, v3],
partitions=partitions)
deltas_a = constant_op.constant([1.0, 2.0])
deltas_b = constant_op.constant([3.0, 4.0])
ones = array_ops.ones([2])
plus_delta = pv_0.assign_add(deltas_a)
minus_delta = pv_0.assign_sub(deltas_b)
assign_ones = pv_0.assign(ones)
c_0 = constant_op.constant([2.0])
c_1 = constant_op.constant([3.0])
assign_list = pv_1.assign([c_0, c_1])
assign_part_value = pv_1.assign_add(assign_ones)
assign_part_var = pv_1.assign_sub(pv_0)
self.evaluate(variables.global_variables_initializer())
self.assertEqual([1.0], self.evaluate(plus_delta[0]))
self.assertEqual([1.0], self.evaluate(v0))
self.assertEqual([3.0], self.evaluate(plus_delta[1]))
self.assertEqual([3.0], self.evaluate(v1))
self.assertEqual([-2.0], self.evaluate(minus_delta[0]))
self.assertEqual([-2.0], self.evaluate(v0))
self.assertEqual([-1.0], self.evaluate(minus_delta[1]))
self.assertEqual([-1.0], self.evaluate(v1))
self.assertEqual([1.0], self.evaluate(assign_ones[0]))
self.assertEqual([1.0], self.evaluate(v0))
self.assertEqual([1.0], self.evaluate(assign_ones[1]))
self.assertEqual([1.0], self.evaluate(v1))
self.assertEqual([2.0], self.evaluate(assign_list[0]))
self.assertEqual([2.0], self.evaluate(v2))
self.assertEqual([3.0], self.evaluate(assign_list[1]))
self.assertEqual([3.0], self.evaluate(v3))
self.assertEqual([3.0], self.evaluate(assign_part_value[0]))
self.assertEqual([3.0], self.evaluate(v2))
self.assertEqual([4.0], self.evaluate(assign_part_value[1]))
self.assertEqual([4.0], self.evaluate(v3))
self.assertEqual([2.0], self.evaluate(assign_part_var[0]))
self.assertEqual([2.0], self.evaluate(v2))
self.assertEqual([3.0], self.evaluate(assign_part_var[1]))
self.assertEqual([3.0], self.evaluate(v3))
class VariableContainerTest(test.TestCase):
def testContainer(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
with ops.container("l1"):
v1 = variables.Variable([1])
with ops.container("l2"):
v2 = variables.Variable([2])
special_v = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="VariableInL3",
container="l3",
shared_name="")
v3 = variables.Variable([3])
v4 = variables.Variable([4])
self.assertEqual(compat.as_bytes(""), v0.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v1.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l2"), v2.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l3"), special_v.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v3.op.get_attr("container"))
self.assertEqual(compat.as_bytes(""), v4.op.get_attr("container"))
class AggregationModesTest(test.TestCase):
def testV1V2Equal(self):
v1 = variables.VariableAggregation
v2 = variables.VariableAggregationV2
self.assertEqual(v1.NONE, v2.NONE)
self.assertEqual(v1.SUM, v2.SUM)
self.assertEqual(v1.MEAN, v2.MEAN)
self.assertEqual(v1.ONLY_FIRST_REPLICA, v2.ONLY_FIRST_REPLICA)
self.assertEqual(v1.ONLY_FIRST_TOWER, v2.ONLY_FIRST_REPLICA)
self.assertEqual(v2.NONE, v1.NONE)
self.assertEqual(v2.SUM, v1.SUM)
self.assertEqual(v2.MEAN, v1.MEAN)
self.assertEqual(v2.ONLY_FIRST_REPLICA, v1.ONLY_FIRST_REPLICA)
self.assertEqual(v2.ONLY_FIRST_REPLICA, v1.ONLY_FIRST_TOWER)
self.assertEqual(hash(v1.NONE), hash(v2.NONE))
self.assertEqual(hash(v1.SUM), hash(v2.SUM))
self.assertEqual(hash(v1.MEAN), hash(v2.MEAN))
self.assertEqual(hash(v1.ONLY_FIRST_REPLICA), hash(v2.ONLY_FIRST_REPLICA))
self.assertEqual(hash(v1.ONLY_FIRST_TOWER), hash(v2.ONLY_FIRST_REPLICA))
if __name__ == "__main__":
test.main()
| 38.926327 | 94 | 0.676863 | [
"Apache-2.0"
] | ArnovanHilten/tensorflow | tensorflow/python/kernel_tests/variables_test.py | 35,929 | Python |
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/autolabor/catkin_ws/devel/include;/home/autolabor/catkin_ws/src/navigation/amcl/include".split(';') if "/home/autolabor/catkin_ws/devel/include;/home/autolabor/catkin_ws/src/navigation/amcl/include" != "" else []
PROJECT_CATKIN_DEPENDS = "rosbag;roscpp;dynamic_reconfigure;tf;nav_msgs;std_srvs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lamcl_sensors;-lamcl_map;-lamcl_pf".split(';') if "-lamcl_sensors;-lamcl_map;-lamcl_pf" != "" else []
PROJECT_NAME = "amcl"
PROJECT_SPACE_DIR = "/home/autolabor/catkin_ws/devel"
PROJECT_VERSION = "1.14.3"
| 75.555556 | 253 | 0.777941 | [
"BSD-2-Clause"
] | lty1994/ros_project | build/navigation/amcl/catkin_generated/pkg.develspace.context.pc.py | 680 | Python |
import re
from pygbif.gbifutils import gbif_baseurl, bool2str, requests_argset, gbif_GET
def search(
taxonKey=None,
repatriated=None,
kingdomKey=None,
phylumKey=None,
classKey=None,
orderKey=None,
familyKey=None,
genusKey=None,
subgenusKey=None,
scientificName=None,
country=None,
publishingCountry=None,
hasCoordinate=None,
typeStatus=None,
recordNumber=None,
lastInterpreted=None,
continent=None,
geometry=None,
recordedBy=None,
recordedByID=None,
identifiedByID=None,
basisOfRecord=None,
datasetKey=None,
eventDate=None,
catalogNumber=None,
year=None,
month=None,
decimalLatitude=None,
decimalLongitude=None,
elevation=None,
depth=None,
institutionCode=None,
collectionCode=None,
hasGeospatialIssue=None,
issue=None,
q=None,
spellCheck=None,
mediatype=None,
limit=300,
offset=0,
establishmentMeans=None,
facet=None,
facetMincount=None,
facetMultiselect=None,
timeout=60,
**kwargs
):
"""
Search GBIF occurrences
:param taxonKey: [int] A GBIF occurrence identifier
:param q: [str] Simple search parameter. The value for this parameter can be a simple word or a phrase.
:param spellCheck: [bool] If ``True`` ask GBIF to check your spelling of the value passed to the ``search`` parameter.
IMPORTANT: This only checks the input to the ``search`` parameter, and no others. Default: ``False``
:param repatriated: [str] Searches for records whose publishing country is different to the country where the record was recorded in
:param kingdomKey: [int] Kingdom classification key
:param phylumKey: [int] Phylum classification key
:param classKey: [int] Class classification key
:param orderKey: [int] Order classification key
:param familyKey: [int] Family classification key
:param genusKey: [int] Genus classification key
:param subgenusKey: [int] Subgenus classification key
:param scientificName: [str] A scientific name from the GBIF backbone. All included and synonym taxa are included in the search.
:param datasetKey: [str] The occurrence dataset key (a uuid)
:param catalogNumber: [str] An identifier of any form assigned by the source within a physical collection or digital dataset for the record which may not unique, but should be fairly unique in combination with the institution and collection code.
:param recordedBy: [str] The person who recorded the occurrence.
:param recordedByID: [str] Identifier (e.g. ORCID) for the person who recorded the occurrence
:param identifiedByID: [str] Identifier (e.g. ORCID) for the person who provided the taxonomic identification of the occurrence.
:param collectionCode: [str] An identifier of any form assigned by the source to identify the physical collection or digital dataset uniquely within the text of an institution.
:param institutionCode: [str] An identifier of any form assigned by the source to identify the institution the record belongs to. Not guaranteed to be que.
:param country: [str] The 2-letter country code (as per ISO-3166-1) of the country in which the occurrence was recorded. See here http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
:param basisOfRecord: [str] Basis of record, as defined in our BasisOfRecord enum here http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html Acceptable values are:
- ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen.
- ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people.
- ``LIVING_SPECIMEN`` An occurrence record describing a living specimen.
- ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine.
- ``MATERIAL_CITATION`` An occurrence record based on a reference to a scholarly publication.
- ``OBSERVATION`` An occurrence record describing an observation.
- ``OCCURRENCE`` An existence of an organism at a particular place and time. No more specific basis.
- ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen.
:param eventDate: [date] Occurrence date in ISO 8601 format: yyyy, yyyy-MM, yyyy-MM-dd, or
MM-dd. Supports range queries, smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990``
wouldn't work)
:param year: [int] The 4 digit year. A year of 98 will be interpreted as AD 98. Supports range queries,
smaller,larger (e.g., ``1990,1991``, whereas ``1991,1990`` wouldn't work)
:param month: [int] The month of the year, starting with 1 for January. Supports range queries,
smaller,larger (e.g., ``1,2``, whereas ``2,1`` wouldn't work)
:param decimalLatitude: [float] Latitude in decimals between -90 and 90 based on WGS 84.
Supports range queries, smaller,larger (e.g., ``25,30``, whereas ``30,25`` wouldn't work)
:param decimalLongitude: [float] Longitude in decimals between -180 and 180 based on WGS 84.
Supports range queries (e.g., ``-0.4,-0.2``, whereas ``-0.2,-0.4`` wouldn't work).
:param publishingCountry: [str] The 2-letter country code (as per ISO-3166-1) of the
country in which the occurrence was recorded.
:param elevation: [int/str] Elevation in meters above sea level. Supports range queries, smaller,larger
(e.g., ``5,30``, whereas ``30,5`` wouldn't work)
:param depth: [int/str] Depth in meters relative to elevation. For example 10 meters below a
lake surface with given elevation. Supports range queries, smaller,larger (e.g., ``5,30``,
whereas ``30,5`` wouldn't work)
:param geometry: [str] Searches for occurrences inside a polygon described in Well Known
Text (WKT) format. A WKT shape written as either POINT, LINESTRING, LINEARRING
POLYGON, or MULTIPOLYGON. Example of a polygon: ``((30.1 10.1, 20, 20 40, 40 40, 30.1 10.1))`` would be queried as http://bit.ly/1BzNwDq.
Polygons must have counter-clockwise ordering of points.
:param hasGeospatialIssue: [bool] Includes/excludes occurrence records which contain spatial
issues (as determined in our record interpretation), i.e. ``hasGeospatialIssue=TRUE``
returns only those records with spatial issues while ``hasGeospatialIssue=FALSE`` includes
only records without spatial issues. The absence of this parameter returns any
record with or without spatial issues.
:param issue: [str] One or more of many possible issues with each occurrence record. See
Details. Issues passed to this parameter filter results by the issue.
:param hasCoordinate: [bool] Return only occurence records with lat/long data (``True``) or
all records (``False``, default).
:param typeStatus: [str] Type status of the specimen. One of many options. See ?typestatus
:param recordNumber: [int] Number recorded by collector of the data, different from GBIF record
number. See http://rs.tdwg.org/dwc/terms/#recordNumber} for more info
:param lastInterpreted: [date] Date the record was last modified in GBIF, in ISO 8601 format:
yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Supports range queries, smaller,larger (e.g.,
``1990,1991``, whereas ``1991,1990`` wouldn't work)
:param continent: [str] Continent. One of ``africa``, ``antarctica``, ``asia``, ``europe``, ``north_america``
(North America includes the Caribbean and reachies down and includes Panama), ``oceania``,
or ``south_america``
:param fields: [str] Default (``all``) returns all fields. ``minimal`` returns just taxon name,
key, latitude, and longitude. Or specify each field you want returned by name, e.g.
``fields = c('name','latitude','elevation')``.
:param mediatype: [str] Media type. Default is ``NULL``, so no filtering on mediatype. Options:
``NULL``, ``MovingImage``, ``Sound``, and ``StillImage``
:param limit: [int] Number of results to return. Default: ``300``
:param offset: [int] Record to start at. Default: ``0``
:param facet: [str] a character vector of length 1 or greater
:param establishmentMeans: [str] EstablishmentMeans, possible values include: INTRODUCED,
INVASIVE, MANAGED, NATIVE, NATURALISED, UNCERTAIN
:param facetMincount: [int] minimum number of records to be included in the faceting results
:param facetMultiselect: [bool] Set to ``True`` to still return counts for values that are not currently
filtered. See examples. Default: ``False``
:return: A dictionary
Usage::
from pygbif import occurrences
occurrences.search(taxonKey = 3329049)
# Return 2 results, this is the default by the way
occurrences.search(taxonKey=3329049, limit=2)
# Instead of getting a taxon key first, you can search for a name directly
# However, note that using this approach (with `scientificName="..."`)
# you are getting synonyms too. The results for using `scientifcName` and
# `taxonKey` parameters are the same in this case, but I wouldn't be surprised if for some
# names they return different results
occurrences.search(scientificName = 'Ursus americanus')
from pygbif import species
key = species.name_backbone(name = 'Ursus americanus', rank='species')['usageKey']
occurrences.search(taxonKey = key)
# Search by dataset key
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', limit=20)
# Search by catalog number
occurrences.search(catalogNumber="49366", limit=20)
# occurrences.search(catalogNumber=["49366","Bird.27847588"], limit=20)
# Use paging parameters (limit and offset) to page. Note the different results
# for the two queries below.
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=10, limit=5)
occurrences.search(datasetKey='7b5d6a48-f762-11e1-a439-00145eb45e9a', offset=20, limit=5)
# Many dataset keys
# occurrences.search(datasetKey=["50c9509d-22c7-4a22-a47d-8c48425ef4a7", "7b5d6a48-f762-11e1-a439-00145eb45e9a"], limit=20)
# Search by collector name
res = occurrences.search(recordedBy="smith", limit=20)
[ x['recordedBy'] for x in res['results'] ]
# Many collector names
# occurrences.search(recordedBy=["smith","BJ Stacey"], limit=20)
# recordedByID
occurrences.search(recordedByID="https://orcid.org/0000-0003-1691-239X", limit = 3)
# identifiedByID
occurrences.search(identifiedByID="https://orcid.org/0000-0003-1691-239X", limit = 3)
# Search for many species
splist = ['Cyanocitta stelleri', 'Junco hyemalis', 'Aix sponsa']
keys = [ species.name_suggest(x)[0]['key'] for x in splist ]
out = [ occurrences.search(taxonKey = x, limit=1) for x in keys ]
[ x['results'][0]['speciesKey'] for x in out ]
# Search - q parameter
occurrences.search(q = "kingfisher", limit=20)
## spell check - only works with the `search` parameter
### spelled correctly - same result as above call
occurrences.search(q = "kingfisher", limit=20, spellCheck = True)
### spelled incorrectly - stops with suggested spelling
occurrences.search(q = "kajsdkla", limit=20, spellCheck = True)
### spelled incorrectly - stops with many suggested spellings
### and number of results for each
occurrences.search(q = "helir", limit=20, spellCheck = True)
# Search on latitidue and longitude
occurrences.search(decimalLatitude=50, decimalLongitude=10, limit=2)
# Search on a bounding box
## in well known text format
occurrences.search(geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20)
from pygbif import species
key = species.name_suggest(q='Aesculus hippocastanum')[0]['key']
occurrences.search(taxonKey=key, geometry='POLYGON((30.1 10.1, 10 20, 20 40, 40 40, 30.1 10.1))', limit=20)
## multipolygon
wkt = 'MULTIPOLYGON(((-123 38, -123 43, -116 43, -116 38, -123 38)),((-97 41, -97 45, -93 45, -93 41, -97 41)))'
occurrences.search(geometry = wkt, limit = 20)
# Search on country
occurrences.search(country='US', limit=20)
occurrences.search(country='FR', limit=20)
occurrences.search(country='DE', limit=20)
# Get only occurrences with lat/long data
occurrences.search(taxonKey=key, hasCoordinate=True, limit=20)
# Get only occurrences that were recorded as living specimens
occurrences.search(taxonKey=key, basisOfRecord="LIVING_SPECIMEN", hasCoordinate=True, limit=20)
# Get occurrences for a particular eventDate
occurrences.search(taxonKey=key, eventDate="2013", limit=20)
occurrences.search(taxonKey=key, year="2013", limit=20)
occurrences.search(taxonKey=key, month="6", limit=20)
# Get occurrences based on depth
key = species.name_backbone(name='Salmo salar', kingdom='animals')['usageKey']
occurrences.search(taxonKey=key, depth="5", limit=20)
# Get occurrences based on elevation
key = species.name_backbone(name='Puma concolor', kingdom='animals')['usageKey']
occurrences.search(taxonKey=key, elevation=50, hasCoordinate=True, limit=20)
# Get occurrences based on institutionCode
occurrences.search(institutionCode="TLMF", limit=20)
# Get occurrences based on collectionCode
occurrences.search(collectionCode="Floristic Databases MV - Higher Plants", limit=20)
# Get only those occurrences with spatial issues
occurrences.search(taxonKey=key, hasGeospatialIssue=True, limit=20)
# Search using a query string
occurrences.search(q="kingfisher", limit=20)
# Range queries
## See Detail for parameters that support range queries
### this is a range depth, with lower/upper limits in character string
occurrences.search(depth='50,100')
## Range search with year
occurrences.search(year='1999,2000', limit=20)
## Range search with latitude
occurrences.search(decimalLatitude='29.59,29.6')
# Search by specimen type status
## Look for possible values of the typeStatus parameter looking at the typestatus dataset
occurrences.search(typeStatus = 'allotype')
# Search by specimen record number
## This is the record number of the person/group that submitted the data, not GBIF's numbers
## You can see that many different groups have record number 1, so not super helpful
occurrences.search(recordNumber = 1)
# Search by last time interpreted: Date the record was last modified in GBIF
## The lastInterpreted parameter accepts ISO 8601 format dates, including
## yyyy, yyyy-MM, yyyy-MM-dd, or MM-dd. Range queries are accepted for lastInterpreted
occurrences.search(lastInterpreted = '2014-04-01')
# Search by continent
## One of africa, antarctica, asia, europe, north_america, oceania, or south_america
occurrences.search(continent = 'south_america')
occurrences.search(continent = 'africa')
occurrences.search(continent = 'oceania')
occurrences.search(continent = 'antarctica')
# Search for occurrences with images
occurrences.search(mediatype = 'StillImage')
occurrences.search(mediatype = 'MovingImage')
x = occurrences.search(mediatype = 'Sound')
[z['media'] for z in x['results']]
# Query based on issues
occurrences.search(taxonKey=1, issue='DEPTH_UNLIKELY')
occurrences.search(taxonKey=1, issue=['DEPTH_UNLIKELY','COORDINATE_ROUNDED'])
# Show all records in the Arizona State Lichen Collection that cant be matched to the GBIF
# backbone properly:
occurrences.search(datasetKey='84c0e1a0-f762-11e1-a439-00145eb45e9a', issue=['TAXON_MATCH_NONE','TAXON_MATCH_HIGHERRANK'])
# If you pass in an invalid polygon you get hopefully informative errors
### the WKT string is fine, but GBIF says bad polygon
wkt = 'POLYGON((-178.59375 64.83258989321493,-165.9375 59.24622380205539,
-147.3046875 59.065977905449806,-130.78125 51.04484764446178,-125.859375 36.70806354647625,
-112.1484375 23.367471303759686,-105.1171875 16.093320185359257,-86.8359375 9.23767076398516,
-82.96875 2.9485268155066175,-82.6171875 -14.812060061226388,-74.8828125 -18.849111862023985,
-77.34375 -47.661687803329166,-84.375 -49.975955187343295,174.7265625 -50.649460483096114,
179.296875 -42.19189902447192,-176.8359375 -35.634976650677295,176.8359375 -31.835565983656227,
163.4765625 -6.528187613695323,152.578125 1.894796132058301,135.703125 4.702353722559447,
127.96875 15.077427674847987,127.96875 23.689804541429606,139.921875 32.06861069132688,
149.4140625 42.65416193033991,159.2578125 48.3160811030533,168.3984375 57.019804336633165,
178.2421875 59.95776046458139,-179.6484375 61.16708631440347,-178.59375 64.83258989321493))'
occurrences.search(geometry = wkt)
# Faceting
## return no occurrence records with limit=0
x = occurrences.search(facet = "country", limit = 0)
x['facets']
## also return occurrence records
x = occurrences.search(facet = "establishmentMeans", limit = 10)
x['facets']
x['results']
## multiple facet variables
x = occurrences.search(facet = ["country", "basisOfRecord"], limit = 10)
x['results']
x['facets']
x['facets']['country']
x['facets']['basisOfRecord']
x['facets']['basisOfRecord']['count']
## set a minimum facet count
x = occurrences.search(facet = "country", facetMincount = 30000000L, limit = 0)
x['facets']
## paging per each faceted variable
### do so by passing in variables like "country" + "_facetLimit" = "country_facetLimit"
### or "country" + "_facetOffset" = "country_facetOffset"
x = occurrences.search(
facet = ["country", "basisOfRecord", "hasCoordinate"],
country_facetLimit = 3,
basisOfRecord_facetLimit = 6,
limit = 0
)
x['facets']
# requests package options
## There's an acceptable set of requests options (['timeout', 'cookies', 'auth',
## 'allow_redirects', 'proxies', 'verify', 'stream', 'cert']) you can pass
## in via **kwargs, e.g., set a timeout. Default timeout set to 60 seconds.
x = occurrences.search(timeout = 1)
"""
url = gbif_baseurl + "occurrence/search"
args = {
"taxonKey": taxonKey,
"repatriated": repatriated,
"kingdomKey": kingdomKey,
"phylumKey": phylumKey,
"classKey": classKey,
"orderKey": orderKey,
"familyKey": familyKey,
"genusKey": genusKey,
"subgenusKey": subgenusKey,
"scientificName": scientificName,
"country": country,
"publishingCountry": publishingCountry,
"hasCoordinate": bool2str(hasCoordinate),
"typeStatus": typeStatus,
"recordNumber": recordNumber,
"lastInterpreted": lastInterpreted,
"continent": continent,
"geometry": geometry,
"recordedBy": recordedBy,
"recordedByID": recordedByID,
"identifiedByID": identifiedByID,
"basisOfRecord": basisOfRecord,
"datasetKey": datasetKey,
"eventDate": eventDate,
"catalogNumber": catalogNumber,
"year": year,
"month": month,
"decimalLatitude": decimalLatitude,
"decimalLongitude": decimalLongitude,
"elevation": elevation,
"depth": depth,
"institutionCode": institutionCode,
"collectionCode": collectionCode,
"hasGeospatialIssue": bool2str(hasGeospatialIssue),
"issue": issue,
"q": q,
"spellCheck": bool2str(spellCheck),
"mediatype": mediatype,
"limit": limit,
"offset": offset,
"establishmentMeans": establishmentMeans,
"facetMincount": facetMincount,
"facet": facet,
"facetMultiselect": bool2str(facetMultiselect),
}
gbif_kwargs = {key: kwargs[key] for key in kwargs if key not in requests_argset}
if gbif_kwargs is not None:
xx = dict(
zip([re.sub("_", ".", x) for x in gbif_kwargs.keys()], gbif_kwargs.values())
)
args.update(xx)
kwargs = {key: kwargs[key] for key in kwargs if key in requests_argset}
out = gbif_GET(url, args, **kwargs)
return out
| 50.844282 | 250 | 0.676748 | [
"MIT"
] | livatras/pygbif | pygbif/occurrences/search.py | 20,897 | Python |
# coding: utf-8
"""
SCORM Cloud Rest API
REST API used for SCORM Cloud integrations.
OpenAPI spec version: 2.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class DestinationIdSchema(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, id=None, data=None):
"""
DestinationIdSchema - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'str',
'data': 'DestinationSchema'
}
self.attribute_map = {
'id': 'id',
'data': 'data'
}
self._id = id
self._data = data
@property
def id(self):
"""
Gets the id of this DestinationIdSchema.
:return: The id of this DestinationIdSchema.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this DestinationIdSchema.
:param id: The id of this DestinationIdSchema.
:type: str
"""
self._id = id
@property
def data(self):
"""
Gets the data of this DestinationIdSchema.
:return: The data of this DestinationIdSchema.
:rtype: DestinationSchema
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this DestinationIdSchema.
:param data: The data of this DestinationIdSchema.
:type: DestinationSchema
"""
self._data = data
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, DestinationIdSchema):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 24.387324 | 77 | 0.524978 | [
"Apache-2.0"
] | ryanhope2/scormcloud-api-v2-client-python | rustici_software_cloud_v2/models/destination_id_schema.py | 3,463 | Python |
from __future__ import print_function
import os
import time
import random
import datetime
import scipy.misc
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from datetime import datetime
from util.util import *
from util.BasicConvLSTMCell import *
class DEBLUR(object):
def __init__(self, args):
self.args = args
self.n_levels = 3
self.scale = 0.5
self.chns = 3 if self.args.model == 'color' else 1 # input / output channels
# if args.phase == 'train':
self.crop_size = 256
self.data_list = open(args.datalist, 'rt').read().splitlines()
self.data_list = list(map(lambda x: x.split(' '), self.data_list))
random.shuffle(self.data_list)
self.train_dir = os.path.join('./checkpoints', args.model)
if not os.path.exists(self.train_dir):
os.makedirs(self.train_dir)
self.batch_size = args.batch_size
self.epoch = args.epoch
self.data_size = (len(self.data_list)) // self.batch_size
self.max_steps = int(self.epoch * self.data_size)
self.learning_rate = args.learning_rate
def input_producer(self, batch_size=10):
def read_data():
img_a = tf.image.decode_image(tf.read_file(tf.string_join(['./training_set/', self.data_queue[0]])),
channels=3)
img_b = tf.image.decode_image(tf.read_file(tf.string_join(['./training_set/', self.data_queue[1]])),
channels=3)
img_a, img_b = preprocessing([img_a, img_b])
return img_a, img_b
def preprocessing(imgs):
imgs = [tf.cast(img, tf.float32) / 255.0 for img in imgs]
if self.args.model != 'color':
imgs = [tf.image.rgb_to_grayscale(img) for img in imgs]
img_crop = tf.unstack(tf.random_crop(tf.stack(imgs, axis=0), [2, self.crop_size, self.crop_size, self.chns]),
axis=0)
return img_crop
with tf.variable_scope('input'):
List_all = tf.convert_to_tensor(self.data_list, dtype=tf.string)
gt_list = List_all[:, 0]
in_list = List_all[:, 1]
self.data_queue = tf.train.slice_input_producer([in_list, gt_list], capacity=20)
image_in, image_gt = read_data()
batch_in, batch_gt = tf.train.batch([image_in, image_gt], batch_size=batch_size, num_threads=8, capacity=20)
return batch_in, batch_gt
def generator(self, inputs, reuse=False, scope='g_net'):
n, h, w, c = inputs.get_shape().as_list()
if self.args.model == 'lstm':
with tf.variable_scope('LSTM'):
cell = BasicConvLSTMCell([h / 4, w / 4], [3, 3], 128)
rnn_state = cell.zero_state(batch_size=self.batch_size, dtype=tf.float32)
x_unwrap = []
with tf.variable_scope(scope, reuse=reuse):
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
activation_fn=tf.nn.relu, padding='SAME', normalizer_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True),
biases_initializer=tf.constant_initializer(0.0)):
inp_pred = inputs
for i in xrange(self.n_levels):
scale = self.scale ** (self.n_levels - i - 1)
hi = int(round(h * scale))
wi = int(round(w * scale))
inp_blur = tf.image.resize_images(inputs, [hi, wi], method=0)
inp_pred = tf.stop_gradient(tf.image.resize_images(inp_pred, [hi, wi], method=0))
inp_all = tf.concat([inp_blur, inp_pred], axis=3, name='inp')
if self.args.model == 'lstm':
rnn_state = tf.image.resize_images(rnn_state, [hi // 4, wi // 4], method=0)
# encoder
conv1_1 = slim.conv2d(inp_all, 32, [5, 5], scope='enc1_1')
conv1_2 = ResnetBlock(conv1_1, 32, 5, scope='enc1_2')
conv1_3 = ResnetBlock(conv1_2, 32, 5, scope='enc1_3')
conv1_4 = ResnetBlock(conv1_3, 32, 5, scope='enc1_4')
conv2_1 = slim.conv2d(conv1_4, 64, [5, 5], stride=2, scope='enc2_1')
conv2_2 = ResnetBlock(conv2_1, 64, 5, scope='enc2_2')
conv2_3 = ResnetBlock(conv2_2, 64, 5, scope='enc2_3')
conv2_4 = ResnetBlock(conv2_3, 64, 5, scope='enc2_4')
conv3_1 = slim.conv2d(conv2_4, 128, [5, 5], stride=2, scope='enc3_1')
conv3_2 = ResnetBlock(conv3_1, 128, 5, scope='enc3_2')
conv3_3 = ResnetBlock(conv3_2, 128, 5, scope='enc3_3')
conv3_4 = ResnetBlock(conv3_3, 128, 5, scope='enc3_4')
if self.args.model == 'lstm':
deconv3_4, rnn_state = cell(conv3_4, rnn_state)
else:
deconv3_4 = conv3_4
# decoder
deconv3_3 = ResnetBlock(deconv3_4, 128, 5, scope='dec3_3')
deconv3_2 = ResnetBlock(deconv3_3, 128, 5, scope='dec3_2')
deconv3_1 = ResnetBlock(deconv3_2, 128, 5, scope='dec3_1')
deconv2_4 = slim.conv2d_transpose(deconv3_1, 64, [4, 4], stride=2, scope='dec2_4')
cat2 = deconv2_4 + conv2_4
deconv2_3 = ResnetBlock(cat2, 64, 5, scope='dec2_3')
deconv2_2 = ResnetBlock(deconv2_3, 64, 5, scope='dec2_2')
deconv2_1 = ResnetBlock(deconv2_2, 64, 5, scope='dec2_1')
deconv1_4 = slim.conv2d_transpose(deconv2_1, 32, [4, 4], stride=2, scope='dec1_4')
cat1 = deconv1_4 + conv1_4
deconv1_3 = ResnetBlock(cat1, 32, 5, scope='dec1_3')
deconv1_2 = ResnetBlock(deconv1_3, 32, 5, scope='dec1_2')
deconv1_1 = ResnetBlock(deconv1_2, 32, 5, scope='dec1_1')
inp_pred = slim.conv2d(deconv1_1, self.chns, [5, 5], activation_fn=None, scope='dec1_0')
if i >= 0:
x_unwrap.append(inp_pred)
if i == 0:
tf.get_variable_scope().reuse_variables()
return x_unwrap
def build_model(self):
img_in, img_gt = self.input_producer(self.batch_size)
tf.summary.image('img_in', im2uint8(img_in))
tf.summary.image('img_gt', im2uint8(img_gt))
print('img_in, img_gt', img_in.get_shape(), img_gt.get_shape())
# generator
x_unwrap = self.generator(img_in, reuse=False, scope='g_net')
# calculate multi-scale loss
self.loss_total = 0
for i in xrange(self.n_levels):
_, hi, wi, _ = x_unwrap[i].get_shape().as_list()
gt_i = tf.image.resize_images(img_gt, [hi, wi], method=0)
loss = tf.reduce_mean((gt_i - x_unwrap[i]) ** 2)
self.loss_total += loss
tf.summary.image('out_' + str(i), im2uint8(x_unwrap[i]))
tf.summary.scalar('loss_' + str(i), loss)
# losses
tf.summary.scalar('loss_total', self.loss_total)
# training vars
all_vars = tf.trainable_variables()
self.all_vars = all_vars
self.g_vars = [var for var in all_vars if 'g_net' in var.name]
self.lstm_vars = [var for var in all_vars if 'LSTM' in var.name]
for var in all_vars:
print(var.name)
def train(self):
def get_optimizer(loss, global_step=None, var_list=None, is_gradient_clip=False):
train_op = tf.train.AdamOptimizer(self.lr)
if is_gradient_clip:
grads_and_vars = train_op.compute_gradients(loss, var_list=var_list)
unchanged_gvs = [(grad, var) for grad, var in grads_and_vars if not 'LSTM' in var.name]
rnn_grad = [grad for grad, var in grads_and_vars if 'LSTM' in var.name]
rnn_var = [var for grad, var in grads_and_vars if 'LSTM' in var.name]
capped_grad, _ = tf.clip_by_global_norm(rnn_grad, clip_norm=3)
capped_gvs = list(zip(capped_grad, rnn_var))
train_op = train_op.apply_gradients(grads_and_vars=capped_gvs + unchanged_gvs, global_step=global_step)
else:
train_op = train_op.minimize(loss, global_step, var_list)
return train_op
global_step = tf.Variable(initial_value=0, dtype=tf.int32, trainable=False)
self.global_step = global_step
# build model
self.build_model()
# learning rate decay
self.lr = tf.train.polynomial_decay(self.learning_rate, global_step, self.max_steps, end_learning_rate=0.0,
power=0.3)
tf.summary.scalar('learning_rate', self.lr)
# training operators
train_gnet = get_optimizer(self.loss_total, global_step, self.all_vars)
# session and thread
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
self.sess = sess
sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(max_to_keep=50, keep_checkpoint_every_n_hours=1)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# training summary
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(self.train_dir, sess.graph, flush_secs=30)
for step in xrange(sess.run(global_step), self.max_steps + 1):
start_time = time.time()
# update G network
_, loss_total_val = sess.run([train_gnet, self.loss_total])
duration = time.time() - start_time
# print loss_value
assert not np.isnan(loss_total_val), 'Model diverged with loss = NaN'
if step % 5 == 0:
num_examples_per_step = self.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = (%.5f; %.5f, %.5f)(%.1f data/s; %.3f s/bch)')
print(format_str % (datetime.now().strftime('%Y-%m-%d %H:%M:%S'), step, loss_total_val, 0.0,
0.0, examples_per_sec, sec_per_batch))
if step % 20 == 0:
# summary_str = sess.run(summary_op, feed_dict={inputs:batch_input, gt:batch_gt})
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, global_step=step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or step == self.max_steps:
checkpoint_path = os.path.join(self.train_dir, 'checkpoints')
self.save(sess, checkpoint_path, step)
def save(self, sess, checkpoint_dir, step):
model_name = "deblur.model"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(sess, os.path.join(checkpoint_dir, model_name), global_step=step)
def load(self, sess, checkpoint_dir, step=None):
print(" [*] Reading checkpoints...")
model_name = "deblur.model"
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if step is not None:
ckpt_name = model_name + '-' + str(step)
self.saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
print(" [*] Reading intermediate checkpoints... Success")
return str(step)
elif ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
ckpt_iter = ckpt_name.split('-')[1]
self.saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
print(" [*] Reading updated checkpoints... Success")
return ckpt_iter
else:
print(" [*] Reading checkpoints... ERROR")
return False
def test(self, height, width, input_path, output_path):
if not os.path.exists(output_path):
os.makedirs(output_path)
imgsName = sorted(os.listdir(input_path))
H, W = height, width
inp_chns = 3 if self.args.model == 'color' else 1
self.batch_size = 1 if self.args.model == 'color' else 3
inputs = tf.placeholder(shape=[self.batch_size, H, W, inp_chns], dtype=tf.float32)
outputs = self.generator(inputs, reuse=False)
sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))
self.saver = tf.train.Saver()
self.load(sess, self.train_dir, step=523000)
for imgName in imgsName:
blur = scipy.misc.imread(os.path.join(input_path, imgName))
h, w, c = blur.shape
# make sure the width is larger than the height
rot = False
if h > w:
blur = np.transpose(blur, [1, 0, 2])
rot = True
h = int(blur.shape[0])
w = int(blur.shape[1])
resize = False
if h > H or w > W:
scale = min(1.0 * H / h, 1.0 * W / w)
new_h = int(h * scale)
new_w = int(w * scale)
blur = scipy.misc.imresize(blur, [new_h, new_w], 'bicubic')
resize = True
blurPad = np.pad(blur, ((0, H - new_h), (0, W - new_w), (0, 0)), 'edge')
else:
blurPad = np.pad(blur, ((0, H - h), (0, W - w), (0, 0)), 'edge')
blurPad = np.expand_dims(blurPad, 0)
if self.args.model != 'color':
blurPad = np.transpose(blurPad, (3, 1, 2, 0))
start = time.time()
deblur = sess.run(outputs, feed_dict={inputs: blurPad / 255.0})
duration = time.time() - start
print('Saving results: %s ... %4.3fs' % (os.path.join(output_path, imgName), duration))
res = deblur[-1]
if self.args.model != 'color':
res = np.transpose(res, (3, 1, 2, 0))
res = im2uint8(res[0, :, :, :])
# crop the image into original size
if resize:
res = res[:new_h, :new_w, :]
res = scipy.misc.imresize(res, [h, w], 'bicubic')
else:
res = res[:h, :w, :]
if rot:
res = np.transpose(res, [1, 0, 2])
scipy.misc.imsave(os.path.join(output_path, imgName), res)
| 46.30094 | 121 | 0.565471 | [
"MIT"
] | DagothHertil/NNVEP-SRN-Deblur | models/model.py | 14,770 | Python |
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torchvision import datasets, models, transforms
import numpy as np
import time
import os
import copy
import argparse
from azureml.core.run import Run
from azureml.core import Dataset, Workspace
from azureml.core.model import Model
# get the Azure ML run object
run = Run.get_context()
ws = run.experiment.workspace
def load_data(data_dir):
"""Load the train/val data."""
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
return dataloaders, dataset_sizes, class_names
def train_model(model, criterion, optimizer, scheduler, num_epochs, data_dir):
"""Train the model."""
# load training/validation data
dataloaders, dataset_sizes, class_names = load_data(data_dir)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
# log the best val accuracy to AML run
run.log('best_val_acc', np.float(best_acc))
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def fine_tune_model(num_epochs, data_dir, learning_rate, momentum):
"""Load a pretrained model and reset the final fully connected layer."""
# log the hyperparameter metrics to the AML run
run.log('lr', np.float(learning_rate))
run.log('momentum', np.float(momentum))
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 2) # only 2 classes to predict
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model_ft = model_ft.to(device)
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(),
lr=learning_rate, momentum=momentum)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(
optimizer_ft, step_size=7, gamma=0.1)
model = train_model(model_ft, criterion, optimizer_ft,
exp_lr_scheduler, num_epochs, data_dir)
return model
def download_data():
dataset = Dataset.get_by_name(ws, name='pytorchdataset')
dataset.download(target_path='fowl_data', overwrite=True)
return 'fowl_data'
# def download_data():
# """Download and extract the training data."""
# import urllib
# from zipfile import ZipFile
# # download data
# data_file = './fowl_data.zip'
# download_url = 'https://azureopendatastorage.blob.core.windows.net/testpublic/temp/fowl_data.zip'
# urllib.request.urlretrieve(download_url, filename=data_file)
# # extract files
# with ZipFile(data_file, 'r') as zip:
# print('extracting files...')
# zip.extractall()
# print('finished extracting')
# data_dir = zip.namelist()[0]
# # delete zip file
# os.remove(data_file)
# return data_dir
def main():
import torch
print("Torch version:", torch.__version__)
print(torch.cuda.is_available())
# get command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--num_epochs', type=int, default=25,
help='number of epochs to train')
parser.add_argument('--output_dir', type=str, help='output directory')
parser.add_argument('--learning_rate', type=float,
default=0.001, help='learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
args = parser.parse_args()
data_dir = download_data()
print("data directory is: " + data_dir)
model = fine_tune_model(args.num_epochs, data_dir,
args.learning_rate, args.momentum)
os.makedirs(args.output_dir, exist_ok=True)
torch.save(model, os.path.join(args.output_dir, 'model.pt'))
model = Model.register(model_name='my_model', model_path=os.path.join(args.output_dir, 'model.pt'), workspace = ws)
if __name__ == "__main__":
main()
| 34.215962 | 119 | 0.612514 | [
"MIT"
] | hudua/azureml | azure-ml-pipelines/pytorch/training-folder/pytorch_train.py | 7,288 | Python |
from mooquant import bar, strategy
from mooquant.analyzer import drawdown, returns, sharpe, trades
from mooquant.broker.backtesting import TradePercentage
from mooquant.broker.fillstrategy import DefaultStrategy
from mooquant.technical import cross, ma
from mooquant.tools import tushare
class thrSMA(strategy.BacktestingStrategy):
def __init__(self, feed, instrument, short_l, mid_l, long_l, up_cum):
strategy.BacktestingStrategy.__init__(self, feed)
self.__instrument = instrument
self.getBroker().setFillStrategy(DefaultStrategy(None))
self.getBroker().setCommission(TradePercentage(0.001))
self.__position = None
self.__prices = feed[instrument].getPriceDataSeries()
self.__malength1 = int(short_l)
self.__malength2 = int(mid_l)
self.__malength3 = int(long_l)
self.__circ = int(up_cum)
self.__ma1 = ma.SMA(self.__prices, self.__malength1)
self.__ma2 = ma.SMA(self.__prices, self.__malength2)
self.__ma3 = ma.SMA(self.__prices, self.__malength3)
def getPrice(self):
return self.__prices
def getSMA(self):
return self.__ma1, self.__ma2, self.__ma3
def onEnterCanceled(self, position):
self.__position = None
def onEnterOK(self):
pass
def onExitOk(self, position):
self.__position = None
# self.info("long close")
def onExitCanceled(self, position):
self.__position.exitMarket()
def buyCon1(self):
if cross.cross_above(self.__ma1, self.__ma2) > 0:
return True
def buyCon2(self):
m1 = 0
m2 = 0
for i in range(self.__circ):
assert self.__ma1[-i - 1] > self.__ma3[-i - 1]
if self.__ma1[-i - 1] > self.__ma3[-i - 1]:
m1 += 1
if self.__ma2[-i - 1] > self.__ma3[-i - 1]:
m2 += 1
if m1 >= self.__circ and m2 >= self.__circ:
return True
def sellCon1(self):
if cross.cross_below(self.__ma1, self.__ma2) > 0:
return True
def onBars(self, bars):
# If a position was not opened, check if we should enter a long
# position.
if self.__ma2[-1] is None:
return
if self.__position is not None:
if not self.__position.exitActive() and cross.cross_below(
self.__ma1, self.__ma2) > 0:
self.__position.exitMarket()
# self.info("sell %s" % (bars.getDateTime()))
if self.__position is None:
if self.buyCon1() and self.buyCon2():
shares = int(self.getBroker().getCash() * 0.2 / bars[self.__instrument].getPrice())
self.__position = self.enterLong(self.__instrument, shares)
print(bars[self.__instrument].getDateTime(),
bars[self.__instrument].getPrice())
# self.info("buy %s" % (bars.getDateTime()))
def testStrategy():
strat = thrSMA
instrument = '600288'
market = 'SH'
fromDate = '20150101'
toDate = '20150601'
frequency = bar.Frequency.MINUTE
plot = True
paras = [2, 20, 60, 10]
feeds = tushare.build_feed([instrument], 2016, 2017, "histdata/tushare")
strat = strat(feeds, instrument, *paras)
retAnalyzer = returns.Returns()
strat.attachAnalyzer(retAnalyzer)
sharpeRatioAnalyzer = sharpe.SharpeRatio()
strat.attachAnalyzer(sharpeRatioAnalyzer)
drawDownAnalyzer = drawdown.DrawDown()
strat.attachAnalyzer(drawDownAnalyzer)
tradesAnalyzer = trades.Trades()
strat.attachAnalyzer(tradesAnalyzer)
strat.run()
# 夏普率
sharp = sharpeRatioAnalyzer.getSharpeRatio(0.05)
# 最大回撤
maxdd = drawDownAnalyzer.getMaxDrawDown()
# 收益率
return_ = retAnalyzer.getCumulativeReturns()[-1]
# 收益曲线
return_list = []
for item in retAnalyzer.getCumulativeReturns():
return_list.append(item)
def run_strategy(ticker, account_id, paras):
print(ticker)
print(account_id)
print(paras)
strat = testStrategy()
if __name__ == "__main__":
testStrategy()
| 29.075862 | 99 | 0.617173 | [
"Apache-2.0"
] | bopo/MooQuant | stratlib/sample_SMA.py | 4,244 | Python |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Glew(Package):
"""The OpenGL Extension Wrangler Library."""
homepage = "http://glew.sourceforge.net/"
url = "https://sourceforge.net/projects/glew/files/glew/2.0.0/glew-2.0.0.tgz/download"
version('2.0.0', '2a2cd7c98f13854d2fcddae0d2b20411')
depends_on("cmake", type='build')
depends_on("gl")
def install(self, spec, prefix):
options = []
options.extend(std_cmake_args)
with working_dir('build'):
cmake('./cmake/', *options)
# https://github.com/Homebrew/legacy-homebrew/issues/22025
# Note: This file is generated only after cmake is run
filter_file(r'Requires: glu',
(''), '../glew.pc')
make()
make("install")
| 38.415094 | 95 | 0.63556 | [
"BSD-3-Clause"
] | ctuning/ck-spack | package/spack-glew/package.py | 2,036 | Python |
from django.db import models
from django.utils.timezone import now
# Create your models here.
# <HINT> Create a Car Make model `class CarMake(models.Model)`:
# - Name
# - Description
# - Any other fields you would like to include in car make model
# - __str__ method to print a car make object
class CarMake(models.Model):
name = models.CharField(null=False, max_length=30, default='')
description = models.CharField(max_length=1000)
def __str__(self):
return "Name: " + self.name + "," + \
"Description: " + self.description
# <HINT> Create a Car Model model `class CarModel(models.Model):`:
# - Many-To-One relationship to Car Make model (One Car Make has many Car Models, using ForeignKey field)
# - Name
# - Dealer id, used to refer a dealer created in cloudant database
# - Type (CharField with a choices argument to provide limited choices such as Sedan, SUV, WAGON, etc.)
# - Year (DateField)
# - Any other fields you would like to include in car model
# - __str__ method to print a car make object
class CarModel(models.Model):
SEDAN = 'sedan'
SUV= 'suv'
WAGON = 'wagon'
TYPE_CHOICES = [
(SEDAN, 'Sedan'),
(SUV, 'Suv'),
(WAGON, 'Wagon')
]
model = models.ForeignKey(CarMake, on_delete=models.CASCADE)
dealerId = models.IntegerField(default=0)
type = models.CharField(
null=False,
max_length=20,
choices=TYPE_CHOICES,
default=SEDAN
)
title = models.CharField(max_length=200, default="title")
date = models.DateField(null=True)
def __str__(self):
return "title: " + self.title
# <HINT> Create a plain Python class `CarDealer` to hold dealer data
class CarDealer:
def __init__(self, address, city, full_name, id, lat, long, short_name, st, zip):
# Dealer address
self.address = address
# Dealer city
self.city = city
# Dealer Full Name
self.full_name = full_name
# Dealer id
self.id = id
# Location lat
self.lat = lat
# Location long
self.long = long
# Dealer short name
self.short_name = short_name
# Dealer state
self.st = st
# Dealer zip
self.zip = zip
def __str__(self):
return "Dealer name: " + self.full_name
# <HINT> Create a plain Python class `DealerReview` to hold review data
class DealerReview:
def __init__(self, name, dealership, review, purchase, sentiment):
self.name = name
self.dealership = dealership
self.review = review
self.purchase = purchase
def __str__(self):
return "Review: " + self.review
| 31.611765 | 105 | 0.639375 | [
"Apache-2.0"
] | jahi-96/agfzb-CloudAppDevelopment_Capstone | server/djangoapp/models.py | 2,687 | Python |
#!/usr/bin/python
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam_user
short_description: Manage AWS IAM users
description:
- Manage AWS IAM users
version_added: "2.5"
author: Josh Souza, @joshsouza
options:
name:
description:
- The name of the user to create.
required: true
managed_policy:
description:
- A list of managed policy ARNs or friendly names to attach to the user. To embed an inline policy, use M(iam_policy).
required: false
state:
description:
- Create or remove the IAM user
required: true
choices: [ 'present', 'absent' ]
purge_policy:
description:
- Detach policies which are not included in managed_policy list
required: false
default: false
requirements: [ botocore, boto3 ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Note: This module does not allow management of groups that users belong to.
# Groups should manage their membership directly using `iam_group`,
# as users belong to them.
# Create a user
- iam_user:
name: testuser1
state: present
# Create a user and attach a managed policy using its ARN
- iam_user:
name: testuser1
managed_policy:
- arn:aws:iam::aws:policy/AmazonSNSFullAccess
state: present
# Remove all managed policies from an existing user with an empty list
- iam_user:
name: testuser1
state: present
purge_policy: true
# Delete the user
- iam_user:
name: testuser1
state: absent
'''
RETURN = '''
user:
description: dictionary containing all the user information
returned: success
type: complex
contains:
arn:
description: the Amazon Resource Name (ARN) specifying the user
type: string
sample: "arn:aws:iam::1234567890:user/testuser1"
create_date:
description: the date and time, in ISO 8601 date-time format, when the user was created
type: string
sample: "2017-02-08T04:36:28+00:00"
user_id:
description: the stable and unique string identifying the user
type: string
sample: AGPAIDBWE12NSFINE55TM
user_name:
description: the friendly name that identifies the user
type: string
sample: testuser1
path:
description: the path to the user
type: string
sample: /
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, ec2_argument_spec, get_aws_connection_info, boto3_conn
from ansible.module_utils.ec2 import HAS_BOTO3
import traceback
try:
from botocore.exceptions import ClientError, ParamValidationError
except ImportError:
pass # caught by imported HAS_BOTO3
def compare_attached_policies(current_attached_policies, new_attached_policies):
# If new_attached_policies is None it means we want to remove all policies
if len(current_attached_policies) > 0 and new_attached_policies is None:
return False
current_attached_policies_arn_list = []
for policy in current_attached_policies:
current_attached_policies_arn_list.append(policy['PolicyArn'])
if not set(current_attached_policies_arn_list).symmetric_difference(set(new_attached_policies)):
return True
else:
return False
def convert_friendly_names_to_arns(connection, module, policy_names):
# List comprehension that looks for any policy in the 'policy_names' list
# that does not begin with 'arn'. If there aren't any, short circuit.
# If there are, translate friendly name to the full arn
if not any([not policy.startswith('arn:') for policy in policy_names if policy is not None]):
return policy_names
allpolicies = {}
paginator = connection.get_paginator('list_policies')
policies = paginator.paginate().build_full_result()['Policies']
for policy in policies:
allpolicies[policy['PolicyName']] = policy['Arn']
allpolicies[policy['Arn']] = policy['Arn']
try:
return [allpolicies[policy] for policy in policy_names]
except KeyError as e:
module.fail_json(msg="Couldn't find policy: " + str(e))
def create_or_update_user(connection, module):
params = dict()
params['UserName'] = module.params.get('name')
managed_policies = module.params.get('managed_policy')
purge_policy = module.params.get('purge_policy')
changed = False
if managed_policies:
managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies)
# Get user
user = get_user(connection, module, params['UserName'])
# If user is None, create it
if user is None:
# Check mode means we would create the user
if module.check_mode:
module.exit_json(changed=True)
try:
connection.create_user(**params)
changed = True
except ClientError as e:
module.fail_json(msg="Unable to create user: {0}".format(to_native(e)), exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to create user: {0}".format(to_native(e)), exception=traceback.format_exc())
# Manage managed policies
current_attached_policies = get_attached_policy_list(connection, module, params['UserName'])
if not compare_attached_policies(current_attached_policies, managed_policies):
current_attached_policies_arn_list = []
for policy in current_attached_policies:
current_attached_policies_arn_list.append(policy['PolicyArn'])
# If managed_policies has a single empty element we want to remove all attached policies
if purge_policy:
# Detach policies not present
for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)):
changed = True
if not module.check_mode:
try:
connection.detach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn)
except ClientError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc())
# If there are policies to adjust that aren't in the current list, then things have changed
# Otherwise the only changes were in purging above
if set(managed_policies).difference(set(current_attached_policies_arn_list)):
changed = True
# If there are policies in managed_policies attach each policy
if managed_policies != [None] and not module.check_mode:
for policy_arn in managed_policies:
try:
connection.attach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn)
except ClientError as e:
module.fail_json(msg="Unable to attach policy {0} to user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to attach policy {0} to user {1}: {2}".format(
policy_arn, params['UserName'], to_native(e)),
exception=traceback.format_exc())
if module.check_mode:
module.exit_json(changed=changed)
# Get the user again
user = get_user(connection, module, params['UserName'])
module.exit_json(changed=changed, iam_user=camel_dict_to_snake_dict(user))
def destroy_user(connection, module):
params = dict()
params['UserName'] = module.params.get('name')
if get_user(connection, module, params['UserName']):
# Check mode means we would remove this user
if module.check_mode:
module.exit_json(changed=True)
# Remove any attached policies otherwise deletion fails
try:
for policy in get_attached_policy_list(connection, module, params['UserName']):
connection.detach_user_policy(UserName=params['UserName'], PolicyArn=policy['PolicyArn'])
except ClientError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy['PolicyArn'], params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to detach policy {0} from user {1}: {2}".format(
policy['PolicyArn'], params['UserName'], to_native(e)),
exception=traceback.format_exc())
try:
connection.delete_user(**params)
except ClientError as e:
module.fail_json(msg="Unable to delete user {0}: {1}".format(params['UserName'], to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except ParamValidationError as e:
module.fail_json(msg="Unable to delete user {0}: {1}".format(params['UserName'], to_native(e)),
exception=traceback.format_exc())
else:
module.exit_json(changed=False)
module.exit_json(changed=True)
def get_user(connection, module, name):
params = dict()
params['UserName'] = name
try:
return connection.get_user(**params)
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
return None
else:
module.fail_json(msg="Unable to get user {0}: {1}".format(name, to_native(e)),
**camel_dict_to_snake_dict(e.response))
def get_attached_policy_list(connection, module, name):
try:
return connection.list_attached_user_policies(UserName=name)['AttachedPolicies']
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchEntity':
return None
else:
module.fail_json(msg="Unable to get policies for user {0}: {1}".format(name, to_native(e)),
**camel_dict_to_snake_dict(e.response))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
managed_policy=dict(default=[], type='list'),
state=dict(choices=['present', 'absent'], required=True),
purge_policy=dict(default=False, type='bool')
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_params)
state = module.params.get("state")
if state == 'present':
create_or_update_user(connection, module)
else:
destroy_user(connection, module)
if __name__ == '__main__':
main()
| 37.987692 | 126 | 0.640936 | [
"Apache-2.0"
] | aburan28/ansible-devops-pipeline | venv/lib/python2.7/site-packages/ansible/modules/cloud/amazon/iam_user.py | 12,346 | Python |
# encoding: utf-8
import pytest
from osf_tests.factories import (
RegistrationFactory,
RegistrationProviderFactory
)
from osf.models import (
RegistrationSchema,
)
from osf.management.commands.move_egap_regs_to_provider import (
main as move_egap_regs
)
from django.conf import settings
@pytest.mark.django_db
class TestEGAPMoveToProvider:
@pytest.fixture()
def egap_provider(self):
return RegistrationProviderFactory(name=settings.EGAP_PROVIDER_NAME)
@pytest.fixture()
def non_egap_provider(self):
return RegistrationProviderFactory()
@pytest.fixture()
def egap_reg(self):
egap_schema = RegistrationSchema.objects.filter(
name='EGAP Registration'
).order_by(
'-schema_version'
)[0]
cos = RegistrationProviderFactory(_id='osf')
return RegistrationFactory(schema=egap_schema, provider=cos)
@pytest.fixture()
def egap_non_reg(self, non_egap_provider):
return RegistrationFactory(provider=non_egap_provider)
def test_move_to_provider(self, egap_provider, egap_reg, non_egap_provider, egap_non_reg):
assert egap_reg.provider != egap_provider
assert egap_non_reg.provider != egap_provider
move_egap_regs(dry_run=False)
egap_reg.refresh_from_db()
assert egap_reg.provider == egap_provider
assert egap_non_reg.provider != egap_provider
| 26.481481 | 94 | 0.718881 | [
"Apache-2.0"
] | RCOSDP/RDM-osf.io | osf_tests/management_commands/test_move_egap_regs_to_provider.py | 1,430 | Python |
import torch
import torchvision
from torchvision import transforms, utils, datasets
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler
from sklearn.metrics import classification_report, confusion_matrix
def makeDataSet(IMAGE_SHAPE = 300,DATA_PATH = './data_after_splitting/'):
image_transforms = {
"train": transforms.Compose([
transforms.Resize((IMAGE_SHAPE, IMAGE_SHAPE)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5],
[0.5, 0.5, 0.5])
]),
"val": transforms.Compose([
transforms.Resize((IMAGE_SHAPE, IMAGE_SHAPE)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5],
[0.5, 0.5, 0.5])
])
}
train_dataset = datasets.ImageFolder(root = DATA_PATH + "train",
transform = image_transforms["train"]
)
val_dataset = datasets.ImageFolder(root = DATA_PATH + "val",
transform = image_transforms["val"]
)
train_dataloader = DataLoader(train_dataset, batch_size=4, num_workers=2, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=4, num_workers=2, shuffle=True)
return train_dataloader,val_dataloader
| 37.166667 | 88 | 0.608371 | [
"MIT"
] | manhph2211/Fp-Classification | dataloader.py | 1,338 | Python |
# -*- coding: utf8 -*-
from __future__ import unicode_literals
import logging
import netifaces
def getIpWindows(adapteridx):
try:
import wmi
except:
logging.error("You must need Win32com (win32 extensions for python)")
raise
adapters = wmi.WMI().Win32_NetworkAdapter()
wlan_int_id = adapters[adapteridx].Index
adaptername = adapters[adapteridx].NetConnectionID
ip = ''
for nic in wmi.WMI().Win32_NetworkAdapterConfiguration(IPEnabled=1):
if nic.Index == wlan_int_id:
ip = nic.IPAddress[0]
logging.info("[Windows] Showing IP for adapter %d (%s): %s",
adapteridx, adaptername, ip)
return ip
def filtre(addrInfo):
for typ, addrList in addrInfo.iteritems():
if len(addrList) == 0:
continue
for addrDetails in addrList:
if len(addrDetails.get('addr', '').split('.')) != 4:
continue
if not addrDetails.get('addr').startswith('192.168') and\
addrDetails.get('addr') != '127.0.0.1' and not \
addrDetails.get('addr').startswith('0'):
return addrDetails.get('addr')
def getIp(adapteridx):
adapters = netifaces.interfaces()
addrInfo = [netifaces.ifaddresses(a) for a in adapters]
addrInfo = [filtre(info) for info in addrInfo]
addrInfo = [info for info in addrInfo if info is not None]
return addrInfo[adapteridx % len(addrInfo)]
Conf = {
'state': 'DEBUG',
'log': {
'fileLevel': logging.WARNING
},
'database': {
'name': 'db/miniboard-factorio.db'
},
'server': {
'port': 15000,
'ip': '',
'assets': {
'minifiedCleanups': [
'http/assets/custom/css/',
'http/assets/custom/js/'
],
'minifyOnDebug': False
},
},
'factorio': {
'allowedPorts': sorted(
[34197, 34190, 34191, 34192, 34193]),
'savesFolder': (
'/Users/romain/Library/Application Support/factorio/saves'),
'binary': '/Applications/factorio.app',
'configFolder': (
'/Users/romain/Library/Application Support/factorio/config'),
'autosaveInterval': 15 # in minutes
}
}
| 29.6625 | 78 | 0.552465 | [
"MIT"
] | Hiestaa/miniboard-factorio-manager | conf.py | 2,373 | Python |
"""
@Author : liujianhan
@Date : 2018/5/15 上午10:48
@Project : KGE
@FileName : service.py
@Description : 服务接口模块
"""
import codecs
import json
import os
import time
from typing import Dict
import torch
from dotmap import DotMap
from .core.predict import get_entity_relation_with_id
from .layer.model import KGEModel
kge_model, entity2id, id2entity, relation2id, all_true_triples, args = None, None, None, None, None, None
def load_model(model_path: str) -> None:
"""
模型加载
@param model_path: 模型文件夹路径
@return:
"""
global kge_model, entity2id, id2entity, relation2id, all_true_triples, args
args = DotMap(json.load(codecs.open(os.path.join(model_path, 'config.json'), 'r', encoding='utf-8')))
entity2id, id2entity, relation2id, id2relation, all_true_triples = get_entity_relation_with_id(args.data_path)
kge_model = KGEModel(
model_name=args.model,
nentity=args.nentity,
nrelation=args.nrelation,
hidden_dim=args.hidden_dim,
gamma=args.gamma,
double_entity_embedding=args.double_entity_embedding,
double_relation_embedding=args.double_relation_embedding
)
if args.cuda:
kge_model = kge_model.cuda()
checkpoint = torch.load(os.path.join(args.init_checkpoint, 'checkpoint'))
kge_model.load_state_dict(checkpoint['model_state_dict'])
def inference(target_triple: str) -> Dict:
"""
推理函数
@param target_triple: 目标需预测三元组:'头实体 关系 尾实体'
@return: 头尾实体的10个预测结果
"""
if kge_model is None:
return {'预测结果': '提醒:模型未加载'}
try:
target_triple = target_triple.split()
head = entity2id[target_triple[0]]
tail = entity2id[target_triple[2]]
relation = relation2id[target_triple[1]]
target_triple = [(head, relation, tail)]
except KeyError as e:
return {'预测结果': f'实体或者关系 <{e}> 不存在,请确保输入的实体或者关系已存在。'}
prediction = kge_model.test_step(kge_model, target_triple, all_true_triples, args, True)
head_entity_prediction = [id2entity[str(idx)] for idx in prediction['head_predict']]
tail_entity_prediction = [id2entity[str(idx)] for idx in prediction['tail_predict']]
result = {'头实体预测结果': head_entity_prediction, '尾实体预测结果': tail_entity_prediction}
return result
if __name__ == '__main__':
t1 = time.time()
load_model('data_path/model/DistMult_cn_military_300k_10')
test_cases = [
'摩耶号/Maya巡洋舰 建造时间 1928年',
'1949年2月28日 星座 双鱼座'
]
t2 = time.time()
res = inference(test_cases[0])
print(f'模型加载耗时: {t2 - t1: .3}s')
print(f'推理耗时: {time.time() - t2: .3}s')
print(res)
| 30.732558 | 114 | 0.678396 | [
"MIT"
] | Jianhan-Liu/solid_ai_waddle | project/knowledge_graph_embedding/project_distmult_rotate_transe/service.py | 2,917 | Python |
from keras.layers import Activation, Reshape, Lambda, dot, add
from keras.layers import Conv1D, Conv2D, Conv3D
from keras.layers import MaxPool1D,GlobalAveragePooling2D,Dense,multiply,Activation,concatenate
from keras import backend as K
def squeeze_excitation_layer(x, out_dim, ratio = 4, concate = True):
squeeze = GlobalAveragePooling2D()(x)
excitation = Dense(units=out_dim // ratio)(squeeze)
excitation = Activation('relu')(excitation)
excitation = Dense(units=out_dim)(excitation)
excitation = Activation('sigmoid')(excitation)
excitation = Reshape((1, 1, out_dim))(excitation)
scale = multiply([x, excitation])
if concate:
scale = concatenate([scale, x],axis=3)
return scale
| 33.272727 | 95 | 0.73224 | [
"MIT"
] | ailabnjtech/B-CNN | channel_attention.py | 732 | Python |
# Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import numpy as np
import pandas as pd
class LeastSquaresBinaryClassifierLearner:
def __init__(self):
self.weights = None
def fit(self, X, Y, sample_weight):
sqrtW = np.sqrt(sample_weight)
matX = np.array(X) * sqrtW[:, np.newaxis]
vecY = Y * sqrtW
self.lsqinfo = np.linalg.lstsq(matX, vecY, rcond=-1)
self.weights = pd.Series(self.lsqinfo[0], index=list(X))
def predict(self, X):
pred = X.dot(np.asarray(self.weights))
return 1 * (pred > 0.5)
class LeastSquaresRegressor:
def __init__(self):
self.weights = None
def fit(self, X, Y, sample_weight):
sqrtW = np.sqrt(sample_weight)
matX = np.array(X) * sqrtW[:, np.newaxis]
vecY = Y * sqrtW
self.lsqinfo = np.linalg.lstsq(matX, vecY, rcond=-1)
self.weights = pd.Series(self.lsqinfo[0], index=list(X))
def predict(self, X):
return X.dot(self.weights)
| 28.108108 | 64 | 0.623077 | [
"MIT"
] | Acornagain/fair_regression_revised | test/unit/reductions/exponentiated_gradient/simple_learners.py | 1,040 | Python |
"""
An ASGI middleware.
Based on Tom Christie's `sentry-asgi <https://github.com/encode/sentry-asgi>`_.
"""
import asyncio
import inspect
import urllib
from sentry_sdk._functools import partial
from sentry_sdk._types import MYPY
from sentry_sdk.hub import Hub, _should_send_default_pii
from sentry_sdk.integrations._wsgi_common import _filter_headers
from sentry_sdk.utils import (
ContextVar,
event_from_exception,
transaction_from_function,
HAS_REAL_CONTEXTVARS,
CONTEXTVARS_ERROR_MESSAGE,
)
from sentry_sdk.tracing import Transaction
if MYPY:
from typing import Dict
from typing import Any
from typing import Optional
from typing import Callable
from typing_extensions import Literal
from sentry_sdk._types import Event, Hint
_asgi_middleware_applied = ContextVar("sentry_asgi_middleware_applied")
_DEFAULT_TRANSACTION_NAME = "generic ASGI request"
def _capture_exception(hub, exc):
# type: (Hub, Any) -> None
# Check client here as it might have been unset while streaming response
if hub.client is not None:
event, hint = event_from_exception(
exc,
client_options=hub.client.options,
mechanism={"type": "asgi", "handled": False},
)
hub.capture_event(event, hint=hint)
def _looks_like_asgi3(app):
# type: (Any) -> bool
"""
Try to figure out if an application object supports ASGI3.
This is how uvicorn figures out the application version as well.
"""
if inspect.isclass(app):
return hasattr(app, "__await__")
elif inspect.isfunction(app):
return asyncio.iscoroutinefunction(app)
else:
call = getattr(app, "__call__", None) # noqa
return asyncio.iscoroutinefunction(call)
class SentryAsgiMiddleware:
__slots__ = ("app", "__call__")
def __init__(self, app, unsafe_context_data=False):
# type: (Any, bool) -> None
"""
Instrument an ASGI application with Sentry. Provides HTTP/websocket
data to sent events and basic handling for exceptions bubbling up
through the middleware.
:param unsafe_context_data: Disable errors when a proper contextvars installation could not be found. We do not recommend changing this from the default.
"""
if not unsafe_context_data and not HAS_REAL_CONTEXTVARS:
# We better have contextvars or we're going to leak state between
# requests.
raise RuntimeError(
"The ASGI middleware for Sentry requires Python 3.7+ "
"or the aiocontextvars package." + CONTEXTVARS_ERROR_MESSAGE
)
self.app = app
if _looks_like_asgi3(app):
self.__call__ = self._run_asgi3 # type: Callable[..., Any]
else:
self.__call__ = self._run_asgi2
def _run_asgi2(self, scope):
# type: (Any) -> Any
async def inner(receive, send):
# type: (Any, Any) -> Any
return await self._run_app(scope, lambda: self.app(scope)(receive, send))
return inner
async def _run_asgi3(self, scope, receive, send):
# type: (Any, Any, Any) -> Any
return await self._run_app(scope, lambda: self.app(scope, receive, send))
async def _run_app(self, scope, callback):
# type: (Any, Any) -> Any
if _asgi_middleware_applied.get(False):
return await callback()
_asgi_middleware_applied.set(True)
try:
hub = Hub(Hub.current)
with hub:
with hub.configure_scope() as sentry_scope:
sentry_scope.clear_breadcrumbs()
sentry_scope._name = "asgi"
processor = partial(self.event_processor, asgi_scope=scope)
sentry_scope.add_event_processor(processor)
ty = scope["type"]
if ty in ("http", "websocket"):
transaction = Transaction.continue_from_headers(
dict(scope["headers"]),
op="{}.server".format(ty),
)
else:
transaction = Transaction(op="asgi.server")
transaction.name = _DEFAULT_TRANSACTION_NAME
transaction.set_tag("asgi.type", ty)
with hub.start_transaction(transaction):
# XXX: Would be cool to have correct span status, but we
# would have to wrap send(). That is a bit hard to do with
# the current abstraction over ASGI 2/3.
try:
return await callback()
except Exception as exc:
_capture_exception(hub, exc)
raise exc from None
finally:
_asgi_middleware_applied.set(False)
def event_processor(self, event, hint, asgi_scope):
# type: (Event, Hint, Any) -> Optional[Event]
request_info = event.get("request", {})
ty = asgi_scope["type"]
if ty in ("http", "websocket"):
request_info["method"] = asgi_scope.get("method")
request_info["headers"] = headers = _filter_headers(
self._get_headers(asgi_scope)
)
request_info["query_string"] = self._get_query(asgi_scope)
request_info["url"] = self._get_url(
asgi_scope, "http" if ty == "http" else "ws", headers.get("host")
)
client = asgi_scope.get("client")
if client and _should_send_default_pii():
request_info["env"] = {"REMOTE_ADDR": client[0]}
if (
event.get("transaction", _DEFAULT_TRANSACTION_NAME)
== _DEFAULT_TRANSACTION_NAME
):
endpoint = asgi_scope.get("endpoint")
# Webframeworks like Starlette mutate the ASGI env once routing is
# done, which is sometime after the request has started. If we have
# an endpoint, overwrite our generic transaction name.
if endpoint:
event["transaction"] = transaction_from_function(endpoint)
event["request"] = request_info
return event
# Helper functions for extracting request data.
#
# Note: Those functions are not public API. If you want to mutate request
# data to your liking it's recommended to use the `before_send` callback
# for that.
def _get_url(self, scope, default_scheme, host):
# type: (Dict[str, Any], Literal["ws", "http"], Optional[str]) -> str
"""
Extract URL from the ASGI scope, without also including the querystring.
"""
scheme = scope.get("scheme", default_scheme)
server = scope.get("server", None)
path = scope.get("root_path", "") + scope.get("path", "")
if host:
return "%s://%s%s" % (scheme, host, path)
if server is not None:
host, port = server
default_port = {"http": 80, "https": 443, "ws": 80, "wss": 443}[scheme]
if port != default_port:
return "%s://%s:%s%s" % (scheme, host, port, path)
return "%s://%s%s" % (scheme, host, path)
return path
def _get_query(self, scope):
# type: (Any) -> Any
"""
Extract querystring from the ASGI scope, in the format that the Sentry protocol expects.
"""
qs = scope.get("query_string")
if not qs:
return None
return urllib.parse.unquote(qs.decode("latin-1"))
def _get_headers(self, scope):
# type: (Any) -> Dict[str, str]
"""
Extract headers from the ASGI scope, in the format that the Sentry protocol expects.
"""
headers = {} # type: Dict[str, str]
for raw_key, raw_value in scope["headers"]:
key = raw_key.decode("latin-1")
value = raw_value.decode("latin-1")
if key in headers:
headers[key] = headers[key] + ", " + value
else:
headers[key] = value
return headers
| 34.837607 | 161 | 0.59421 | [
"BSD-2-Clause"
] | cuenca-mx/sentry-python | sentry_sdk/integrations/asgi.py | 8,152 | Python |
# -*- coding: utf-8 -*-
import re
from packaging import version
import phonemizer
from phonemizer.phonemize import phonemize
from TTS.utils.text import cleaners
from TTS.utils.text.symbols import make_symbols, symbols, phonemes, _phoneme_punctuations, _bos, \
_eos
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
_phonemes_to_id = {s: i for i, s in enumerate(phonemes)}
_id_to_phonemes = {i: s for i, s in enumerate(phonemes)}
# Regular expression matching text enclosed in curly braces:
_CURLY_RE = re.compile(r'(.*?)\{(.+?)\}(.*)')
# Regular expression matching punctuations, ignoring empty space
PHONEME_PUNCTUATION_PATTERN = r'['+_phoneme_punctuations+']+'
def text2phone(text, language):
'''
Convert graphemes to phonemes.
'''
seperator = phonemizer.separator.Separator(' |', '', '|')
#try:
punctuations = re.findall(PHONEME_PUNCTUATION_PATTERN, text)
if version.parse(phonemizer.__version__) < version.parse('2.1'):
ph = phonemize(text, separator=seperator, strip=False, njobs=1, backend='espeak', language=language)
ph = ph[:-1].strip() # skip the last empty character
# phonemizer does not tackle punctuations. Here we do.
# Replace \n with matching punctuations.
if punctuations:
# if text ends with a punctuation.
if text[-1] == punctuations[-1]:
for punct in punctuations[:-1]:
ph = ph.replace('| |\n', '|'+punct+'| |', 1)
ph = ph + punctuations[-1]
else:
for punct in punctuations:
ph = ph.replace('| |\n', '|'+punct+'| |', 1)
elif version.parse(phonemizer.__version__) >= version.parse('2.1'):
ph = phonemize(text, separator=seperator, strip=False, njobs=1, backend='espeak', language=language, preserve_punctuation=True)
# this is a simple fix for phonemizer.
# https://github.com/bootphon/phonemizer/issues/32
if punctuations:
for punctuation in punctuations:
ph = ph.replace(f"| |{punctuation} ", f"|{punctuation}| |").replace(f"| |{punctuation}", f"|{punctuation}| |")
ph = ph[:-3]
else:
raise RuntimeError(" [!] Use 'phonemizer' version 2.1 or older.")
return ph
def pad_with_eos_bos(phoneme_sequence, tp=None):
# pylint: disable=global-statement
global _phonemes_to_id, _bos, _eos
if tp:
_bos = tp['bos']
_eos = tp['eos']
_, _phonemes = make_symbols(**tp)
_phonemes_to_id = {s: i for i, s in enumerate(_phonemes)}
return [_phonemes_to_id[_bos]] + list(phoneme_sequence) + [_phonemes_to_id[_eos]]
def phoneme_to_sequence(text, cleaner_names, language, enable_eos_bos=False, tp=None):
# pylint: disable=global-statement
global _phonemes_to_id
if tp:
_, _phonemes = make_symbols(**tp)
_phonemes_to_id = {s: i for i, s in enumerate(_phonemes)}
sequence = []
text = text.replace(":", "")
clean_text = _clean_text(text, cleaner_names)
to_phonemes = text2phone(clean_text, language)
if to_phonemes is None:
print("!! After phoneme conversion the result is None. -- {} ".format(clean_text))
# iterate by skipping empty strings - NOTE: might be useful to keep it to have a better intonation.
for phoneme in filter(None, to_phonemes.split('|')):
sequence += _phoneme_to_sequence(phoneme)
# Append EOS char
if enable_eos_bos:
sequence = pad_with_eos_bos(sequence, tp=tp)
return sequence
def sequence_to_phoneme(sequence, tp=None):
# pylint: disable=global-statement
'''Converts a sequence of IDs back to a string'''
global _id_to_phonemes
result = ''
if tp:
_, _phonemes = make_symbols(**tp)
_id_to_phonemes = {i: s for i, s in enumerate(_phonemes)}
for symbol_id in sequence:
if symbol_id in _id_to_phonemes:
s = _id_to_phonemes[symbol_id]
result += s
return result.replace('}{', ' ')
def text_to_sequence(text, cleaner_names, tp=None):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
# pylint: disable=global-statement
global _symbol_to_id
if tp:
_symbols, _ = make_symbols(**tp)
_symbol_to_id = {s: i for i, s in enumerate(_symbols)}
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while text:
m = _CURLY_RE.match(text)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(
_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(sequence, tp=None):
'''Converts a sequence of IDs back to a string'''
# pylint: disable=global-statement
global _id_to_symbol
if tp:
_symbols, _ = make_symbols(**tp)
_id_to_symbol = {i: s for i, s in enumerate(_symbols)}
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(syms):
return [_symbol_to_id[s] for s in syms if _should_keep_symbol(s)]
def _phoneme_to_sequence(phons):
return [_phonemes_to_id[s] for s in list(phons) if _should_keep_phoneme(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s not in ['~', '^', '_']
def _should_keep_phoneme(p):
return p in _phonemes_to_id and p not in ['~', '^', '_']
| 35.042328 | 135 | 0.641401 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | DanBmh/TTS | utils/text/__init__.py | 6,623 | Python |
# -*- coding: utf-8 -*-
'''
loadFromExcel.py is an example of a plug-in that will load an extension taxonomy from Excel
input and optionally save an (extension) DTS.
(c) Copyright 2013 Mark V Systems Limited, All rights reserved.
'''
import os, io, sys, time, re, traceback, json, posixpath
from fnmatch import fnmatch
from collections import defaultdict, OrderedDict
from arelle import PythonUtil, XbrlConst, ModelDocument, UrlUtil
from arelle.PythonUtil import OrderedDefaultDict, OrderedSet
from arelle.ModelDocument import Type, create as createModelDocument
from arelle.ModelValue import qname, QName
from arelle.XbrlConst import (qnLinkLabel, standardLabelRoles, qnLinkReference, standardReferenceRoles,
qnLinkPart, gen, link, defaultLinkRole,
conceptLabel, elementLabel, conceptReference, summationItem
)
qnXbrldtClosed = qname("{http://xbrl.org/2005/xbrldt}xbrldt:closed")
importColHeaderMap = defaultdict(list)
resourceParsePattern = re.compile(r"(label[s]?|reference[s]?|relationship to),?\s*([\w][\w\s#+-:/]+[\w#+-/])(\s*[(]([^)]+)[)])?$")
roleNumberPattern = re.compile(r"(.*)[#]([0-9][0-9A-Za-z]*)")
xlUnicodePattern = re.compile("_x([0-9A-F]{4})_")
excludeDesignatedEnumerations = False
annotateEnumerationsDocumentation = False
annotateElementDocumentation = False
saveXmlLang = None
NULLENTRY = ({},)
facetSortOrder = {
"fractionDigits" : "_00",
"length": "_01",
"minInclusive": "_02",
"maxInclusive": "_03",
"minExclusive": "_04",
"maxExclusive": "_05",
"minLength": "_06",
"maxLength": "_07",
"pattern": "_08",
"totalDigits": "_09",
"whiteSpace": "_10",
"enumeration": "_11"}
def loadFromExcel(cntlr, modelXbrl, excelFile, mappedUri):
from openpyxl import load_workbook
from arelle import ModelDocument, ModelXbrl, XmlUtil
from arelle.ModelDocument import ModelDocumentReference
from arelle.ModelValue import qname
def xlUnicodeChar(match):
return chr(int(match.group(1), 16))
def xlValue(cell): # excel values may have encoded unicode, such as _0000D_
v = cell.value
if isinstance(v, str):
return xlUnicodePattern.sub(xlUnicodeChar, v).replace('\r\n','\n').replace('\r','\n')
return v
defaultLabelLang = saveXmlLang or "en"
importColumnHeaders = {
"名前空間プレフィックス": "prefix",
"prefix": "prefix",
"要素名": "name",
"name": "name",
"type": "type",
"typePrefix": "typePrefix", # usually part of type but optionally separate column
"substitutionGroup": "substitutionGroup",
"periodType": "periodType",
"balance": "balance",
"abstract": "abstract", # contains true if abstract
"nillable": "nillable",
"depth": "depth",
"minLength": "minLength",
"maxLength": "maxLength",
"minInclusive": "minInclusive",
"maxInclusive": "maxInclusive",
"length": "length",
"fixed": "fixed",
"pattern": "pattern",
"enumeration": "enumeration",
"excludedEnumeration": "excludedEnumeration",
"preferred label": "preferredLabel",
"preferredLabel": "preferredLabel",
"presentation parent": "presentationParent", # qname -- instead of label hierarchy and depth
"calculation parent": "calculationParent", # qname
"calculation weight": "calculationWeight",
# label col heading: ("label", role, lang [indented]),
"標準ラベル(日本語)": ("label", XbrlConst.standardLabel, "ja", "indented"),
"冗長ラベル(日本語)": ("label", XbrlConst.verboseLabel, "ja"),
"標準ラベル(英語)": ("label", XbrlConst.standardLabel, "en"),
"冗長ラベル(英語)": ("label", XbrlConst.verboseLabel, "en"),
"用途区分、財務諸表区分及び業種区分のラベル(日本語)": ("labels", XbrlConst.standardLabel, "ja"),
"用途区分、財務諸表区分及び業種区分のラベル(英語)": ("labels", XbrlConst.standardLabel, "en"),
# label [, role [(lang)]] : ("label", http resource role, lang [indented|overridePreferred])
"label": ("label", XbrlConst.standardLabel, defaultLabelLang, "indented"),
"label, standard": ("label", XbrlConst.standardLabel, defaultLabelLang, "overridePreferred"),
"label, terse": ("label", XbrlConst.terseLabel, defaultLabelLang),
"label, verbose": ("label", XbrlConst.verboseLabel, defaultLabelLang),
"label, documentation": ("label", XbrlConst.documentationLabel, defaultLabelLang),
"group": "linkrole",
"linkrole": "linkrole",
"ELR": "linkrole",
"dimension default": "dimensionDefault"
# reference ("reference", reference http resource role, reference part QName)
# reference, required": ("reference", "http://treasury.gov/dataact/role/taxonomyImplementationNote", qname("{http://treasury.gov/dataact/parts-2015-12-31}dataact-part:Required"))
# attribute, qname (attribute on element in xsd)
}
fatalLoadingErrors = []
startedAt = time.time()
if os.path.isabs(excelFile):
# allow relative filenames to loading directory
priorCWD = os.getcwd()
os.chdir(os.path.dirname(excelFile))
else:
priorCWD = None
importExcelBook = load_workbook(excelFile, data_only=True)
sheetNames = importExcelBook.get_sheet_names()
dtsSheet = None
if "XBRL DTS" in sheetNames:
dtsSheet = "XBRL DTS"
elif "DTS" in sheetNames:
dtsSheet = "DTS"
elif "Sheet2" in sheetNames:
dtsSheet = "Sheet2"
if dtsSheet:
dtsWs = importExcelBook[dtsSheet]
else:
dtsWs = None
imports = {"xbrli": ( ("namespace", XbrlConst.xbrli),
("schemaLocation", "http://www.xbrl.org/2003/xbrl-instance-2003-12-31.xsd") )} # xml of imports
importXmlns = {}
hasPreLB = hasCalLB = hasDefLB = hasRefLB = hasGenLB = False
# xxxLB structure [ (elr1, def1, "_ELR_", [roots]), (elr2, def2, "_ELR_", [rootw]) ...]
# roots = (rootHref, None, "_root_", [children])
# children = (childPrefix, childName, arcrole, [grandChildren])
preLB = []
defLB = []
calLB = []
refLB = []
genLB = []
def lbDepthList(lbStruct, depth, parentList=None):
if len(lbStruct) > 0:
if depth == topDepth or not hasDepthColumn:
return lbStruct[-1].childStruct
return lbDepthList(lbStruct[-1].childStruct, depth-1, list)
else:
if hasDepthColumn:
cntlr.addToLog("Depth error, Excel sheet: {excelSheet} row: {excelRow}"
.format(excelSheet=importSheetName, excelRow=iRow),
messageCode="importExcel:depth")
return None
splitString = None # to split repeating groups (order, depth)
importFileName = None # for alternate import file
importSheetNames = []
skipRows = [] # [(from,to),(from,to)] row number starting at 1
genDocs = {} # generated documents (schema + referenced linkbases)
genElementsDoc = None
def newDoc(name):
genDocs[name] = PythonUtil.attrdict(
name = name,
initialComment = None,
schemaDocumentation = None,
extensionSchemaPrefix = "",
extensionSchemaFilename = "",
extensionSchemaRelDirname = None, # only non-null for relative directory path
extensionSchemaNamespaceURI = "",
extensionSchemaVersion = None, # <schema @version>
extensionRoles = {}, # key is roleURI, value is role definition
extensionRoleLabels= defaultdict(set), # key is roleURI, value is set( (lang, label) )
extensionElements = {},
extensionTypes = {}, # attrs are name, base. has facets in separate dict same as elements
extensionLabels = {}, # key = (prefix, name, lang, role), value = label text
extensionReferences = OrderedDefaultDict(OrderedSet), # key = (prefix, name, role) values = (partQn, text)
hasEnumerationDocumentation = False,
imports = {"xbrli": ( ("namespace", XbrlConst.xbrli),
("schemaLocation", "http://www.xbrl.org/2003/xbrl-instance-2003-12-31.xsd") )}, # xml of imports
includes = [], # just schemaLocation
importXmlns = {},
importFilenames = {}, # file names relative to base
childGenDocs = [],
linkbaseRefs = [],
labelLinkbases = [],
referenceLinkbases = [],
hasPreLB = False,
hasCalLB = False,
hasDefLB = False,
hasRefLB = False,
hasGenLB = False,
generated = False
)
return genDocs[name]
thisDoc = newDoc(None)
excelDir = os.path.dirname(excelFile) + os.path.sep
def docRelpath(filename, baseDir=None):
if baseDir is None:
baseDir = thisDoc.extensionSchemaRelDirname
if (baseDir is not None and
not (UrlUtil.isAbsolute(filename) or os.path.isabs(filename))):
return posixpath.relpath(filename, baseDir)
return filename
isUSGAAP = False
isGenerateAndImport = True
extensionPrefixForCoreLabels = None
dtsActionColIndex = 0
dtsFiletypeColIndex = 1
dtsPrefixColIndex = 2
dtsFilenameColIndex = 3
dtsNamespaceURIColIndex = 4
for iRow, row in enumerate(dtsWs.rows if dtsWs else ()):
try:
if (len(row) < 1): # skip if col 1 is non-existent
continue
_col0 = row[0].value
if isinstance(_col0, str) and _col0.startswith("#"): # empty or "#"
continue
if iRow == 0:
# title row may have columns differently laid out
for i, col in enumerate(row):
v = xlValue(col)
if isinstance(v, str):
if v == "specification": dtsActionColIndex = i
if v.startswith("file type"): dtsFiletypeColIndex = i
if v.startswith("prefix"): dtsPrefixColIndex = i
if v.startswith("file, href or role definition"): dtsFilenameColIndex = i
if v.startswith("namespace URI"): dtsNamespaceURIColIndex = i
continue
action = filetype = prefix = filename = namespaceURI = None
if len(row) > dtsActionColIndex: action = xlValue(row[dtsActionColIndex])
if len(row) > dtsFiletypeColIndex: filetype = xlValue(row[dtsFiletypeColIndex])
if len(row) > dtsPrefixColIndex: prefix = xlValue(row[dtsPrefixColIndex])
if len(row) > dtsFilenameColIndex: filename = xlValue(row[dtsFilenameColIndex])
if len(row) > dtsNamespaceURIColIndex: namespaceURI = xlValue(row[dtsNamespaceURIColIndex])
lbType = lang = None
if action == "import":
if filetype in ("role", "arcrole"):
continue
elif filetype == "schema":
thisDoc.imports[prefix] = ( ("namespace", namespaceURI), ("schemaLocation", docRelpath(filename)) )
thisDoc.importXmlns[prefix] = namespaceURI
thisDoc.importFilenames[prefix] = filename
if re.match(r"http://[^/]+/us-gaap/", namespaceURI):
isUSGAAP = True
elif filetype == "linkbase":
typeLang = prefix.split()
if len(typeLang) > 0:
lbType = typeLang[0]
else:
lbType = "unknown"
thisDoc.linkbaseRefs.append( (lbType, filename, False) )
elif action == "include" and filename:
thisDoc.includes.append(docRelpath(filename))
elif action == "xmlns" and prefix and namespaceURI:
thisDoc.importXmlns[prefix] = namespaceURI
elif action in ("extension", "generate"):
if filetype == "schema":
if prefix:
# starts new document.
if not thisDoc.name:
del genDocs[thisDoc.name] # remove anonymous doc
thisDoc = newDoc(prefix) # new doc with prefix as its name
thisDoc.extensionSchemaPrefix = prefix
thisDoc.extensionSchemaFilename = filename
thisDoc.extensionSchemaNamespaceURI = namespaceURI
if not UrlUtil.isAbsolute(filename) and not os.path.isabs(filename):
thisDoc.extensionSchemaRelDirname = posixpath.dirname(filename)
else:
thisDoc.extensionSchemaRelDirname = None
elif filetype == "linkbase":
typeLang = prefix.split()
if len(typeLang) > 0:
lbType = typeLang[0]
else:
lbType = "unknown"
if len(typeLang) > 1:
lang = referenceRole = typeLang[1]
else:
lang = None
referenceRole = XbrlConst.standardReference
if lbType in ("label", "generic-label"):
# lang, if provided, is a regex pattern
thisDoc.labelLinkbases.append((lbType, lang, filename))
if action == "extension" and not extensionPrefixForCoreLabels:
extensionPrefixForCoreLabels = thisDoc.extensionSchemaPrefix
elif lbType in ("reference", "generic-reference"):
hasRefLB = True
thisDoc.referenceLinkbases.append((lbType, referenceRole, filename))
elif lbType == "presentation":
thisDoc.hasPreLB = hasPreLB = True
elif lbType == "definition":
thisDoc.hasDefLB = hasDefLB = True
elif lbType == "calculation":
thisDoc.hasCalLB = hasCalLB = True
elif lbType == "generic":
thisDoc.hasGenLB = hasGenLB = True
thisDoc.linkbaseRefs.append( (lbType, filename, True) )
elif filetype == "initialComment" and prefix:
thisDoc.initialComment = prefix
elif filetype == "schemaDocumentation" and prefix:
thisDoc.schemaDocumentation = prefix
elif filetype == "enumerationDocumentation":
thisDoc.hasEnumerationDocumentation = True
elif filetype == "role" and namespaceURI: # filename is definition, prefix is optional used-on QNames
thisDoc.extensionRoles[namespaceURI] = (filename, prefix)
elif filetype == "role label" and namespaceURI and prefix: # filename is label, prefix is language
thisDoc.extensionRoleLabels[namespaceURI].add( (filename, prefix) )
elif filetype == "schema-version" and filename:
thisDoc.extensionSchemaVersion = filename
elif filetype == "table-style" and filename == "xbrl-us":
isUSGAAP = True
elif filetype == "elements":
genElementsDoc = thisDoc
elif action == "meta" and filetype == "table-style" and filename == "xbrl-us":
isUSGAAP = True
elif action == "meta" and filetype == "generate-style" and filename == "import-separately":
isGenerateAndImport = False
elif action == "workbook" and filename:
importFileName = filename
elif action == "worksheet" and filename:
importSheetNames.append(filename)
elif action == "colheader" and filename and namespaceURI:
if namespaceURI == "split":
splitString = filename
else:
importColHeaderMap[filename].append(namespaceURI)
if namespaceURI not in importColumnHeaders:
fatalLoadingErrors.append("colheader {} definition {} not recognized.".format(filename, namespaceURI))
elif action == "skip rows" and filename:
fromRow, _sep, toRow = filename.partition("-")
try:
skipRows.append((int(fromRow), int(toRow) if toRow else int(fromRow)))
except (ValueError, TypeError):
fatalLoadingErrors.append("Exception (at skip rows): {error}, Excel sheet: {excelSheet} row: {excelRow}"
.format(error=err, excelSheet=dtsSheet, excelRow=iRow))
except Exception as err:
fatalLoadingErrors.append("Exception: {error}, Excel sheet: {excelSheet} row: {excelRow}, Traceback: {traceback}"
.format(error=err, excelSheet=dtsSheet, excelRow=iRow, traceback=traceback.format_tb(sys.exc_info()[2])))
# remove any imported linkbaseRefs that are also generated
for thisDoc in genDocs.values():
linkbaseRefsToRemove = [i
for i, (lbType, filename, generate) in enumerate(thisDoc.linkbaseRefs)
if not generate and (lbType, filename, True) in thisDoc.linkbaseRefs]
while len(linkbaseRefsToRemove):
i = linkbaseRefsToRemove.pop()
thisDoc.linkbaseRefs.pop(i)
dtsWs = None # dereference
genOrder = []
for name, doc in genDocs.items():
insertPos = len(genOrder)
for i, otherDoc in enumerate(genOrder):
if doc.name in otherDoc.imports:
insertPos = i # put this doc before any firstr doc that imports it
break
genOrder.insert(insertPos, doc)
if importFileName: # alternative workbook
importExcelBook = load_workbook(importFileName, read_only=True, data_only=True)
sheetNames = importExcelBook.get_sheet_names()
if importSheetNames:
for importSheetName in importSheetNames:
if importSheetName not in sheetNames:
fatalLoadingErrors.append("Worksheet {} specified for Excel importing, but not present in workbook.".format(importSheetName))
else:
for s in sheetNames:
if s.endswith("Concepts"):
importSheetNames.append(s)
if not importSheetNames:
for s in sheetNames:
if "xbrl" in s.lower() and "dts" not in s:
importSheetNames.append(s)
if not importSheetNames:
fatalLoadingErrors.append("Worksheet {} specified for Excel importing, but not present in workbook.".format(importSheetName))
if not isUSGAAP and genOrder: # need extra namespace declaration
genOrder[0].importXmlns["iod"] = "http://disclosure.edinet-fsa.go.jp/taxonomy/common/2013-03-31/iod"
# find column headers row
headerCols = OrderedDict()
headerColsAllElrs = set()
hasLinkroleSeparateRow = True
hasPreferredLabelTextColumn = False
hasConceptAttributeColumn = False
hasDepthColumn = False
hasPresentationParentColumn = False
hasRelationshipToCol = False
hasrelationshipAttributeColumn = False
headerRows = set()
topDepth = 999999
for importSheetName in importSheetNames:
if importSheetName not in sheetNames:
continue
headerCols.clear()
headerRows.clear()
hasConceptAttributeColumn = False
hasDepthColumn = False
hasPresentationParentColumn = False
hasRelationshipToCol = False
hasrelationshipAttributeColumn = False
conceptsWs = importExcelBook[importSheetName]
def setHeaderCols(row):
headerCols.clear()
for iCol, colCell in enumerate(row):
v = xlValue(colCell)
if isinstance(v,str):
v = v.strip()
if v in importColHeaderMap:
for hdr in importColHeaderMap[v]:
if hdr in importColumnHeaders:
headerCols[importColumnHeaders[hdr]] = iCol
elif v in importColumnHeaders:
headerCols[importColumnHeaders[v]] = iCol
elif isinstance(v,str):
if any(v.startswith(r) for r in ("label,", "labels,", "reference,", "references,", "relationship to,")):
# custom/extension label/reference
m = resourceParsePattern.match(v)
if m:
_resourceType = m.group(1)
_resourceRole = "/" + m.group(2) # last path seg of role
_resourceLangOrPart = m.group(4) # lang or part
headerCols[(_resourceType, _resourceRole, _resourceLangOrPart)] = iCol
else:
# custom/extension non-label/reference value column
headerCols[v] = iCol
# find out which rows are header rows
for iRow, row in enumerate(conceptsWs.rows if conceptsWs else ()):
if any(fromRow <= iRow+1 <= toRow for fromRow,toRow in skipRows):
continue
#for iCol, colCell in enumerate(row):
setHeaderCols(row)
# must have some of these to be a header col
if (sum(1 for h in headerCols if h in ("name", "type", "depth", "periodType")) >= 3 or
sum(1 for h in headerCols if h == "name" or (isinstance(h, tuple) and h[0] == "relationship to")) >= 2):
# it's a header col
headerRows.add(iRow+1)
if 'linkrole' in headerCols:
hasLinkroleSeparateRow = False
if 'preferredLabel' in headerCols and any(isinstance(h, tuple) and h[0] == 'label' and h[1] == '/preferredLabel'
for h in headerCols):
hasPreferredLabelTextColumn = True
if 'depth' in headerCols:
hasDepthColumn = True
if 'presentationParent' in headerCols:
hasPresentationParentColumn = True
if not hasDepthColumn and hasPresentationParentColumn:
topDepth = 0
hasRelationshipToCol = any(h[0] == "relationship to" for h in headerCols if isinstance(h, tuple))
headerCols.clear()
def cellHasValue(row, header, _type):
if header in headerCols:
iCol = headerCols[header]
return iCol < len(row) and isinstance(row[iCol].value, _type)
return False
def cellValue(row, header, strip=False, nameChars=False, default=None):
if header in headerCols:
iCol = headerCols[header]
if iCol < len(row):
v = xlValue(row[iCol])
if strip and isinstance(v, str):
v = v.strip()
if nameChars and isinstance(v, str):
v = ''.join(c for c in v if c.isalnum() or c in ('.', '_', '-'))
if v is None:
return default
return v
return default
def valueNameChars(v):
return ''.join(c for c in v if c.isalnum() or c in ('.', '_', '-'))
def rowPrefixNameValues(row):
prefix = cellValue(row, 'prefix', nameChars=True)
if cellHasValue(row, 'name', str):
if not prefix: # maybe name is a qname
prefix, _sep, _name = cellValue(row, 'name').partition(":")
if not _sep: # no prefix at all, whole string is name
prefix = ""
name = cellValue(row, 'name', nameChars=True)[len(prefix):]
else:
name = cellValue(row, 'name', nameChars=True)
else:
name = None
if not prefix and "prefix" not in headerCols and genElementsDoc is not None:
prefix = genElementsDoc.extensionSchemaPrefix
return prefix, name
def checkImport(thisDoc, qname):
prefix, sep, localName = qname.partition(":")
if sep:
if prefix not in thisDoc.imports:
if prefix == "xbrldt":
thisDoc.imports["xbrldt"] = ("namespace", XbrlConst.xbrldt), ("schemaLocation", "http://www.xbrl.org/2005/xbrldt-2005.xsd")
elif prefix == "nonnum":
thisDoc.imports["nonnum"] = ("namespace", "http://www.xbrl.org/dtr/type/non-numeric"), ("schemaLocation", "http://www.xbrl.org/dtr/type/nonNumeric-2009-12-16.xsd")
elif prefix != thisDoc.extensionSchemaPrefix and prefix != "xs":
cntlr.addToLog("Warning: prefix schema file is not imported for: {qname}"
.format(qname=qname),
messageCode="importExcel:warning", file=thisDoc.extensionSchemaFilename)
# find top depth
for iRow, row in enumerate(conceptsWs.rows if conceptsWs else ()):
if (iRow + 1) in headerRows:
setHeaderCols(row)
hasConceptAttributeColumn = any(v.startswith("attribute, ") for v in headerCols if isinstance(v,str))
hasRelationshipAttributeColumn = any(v.startswith("relationship attribute, ") for v in headerCols if isinstance(v,str))
elif not (hasLinkroleSeparateRow and (iRow + 1) in headerRows) and 'depth' in headerCols:
depth = cellValue(row, 'depth')
if isinstance(depth, int) and depth < topDepth:
topDepth = depth
# find header rows
currentELR = currentELRdefinition = None
for iRow, row in enumerate(conceptsWs.rows if conceptsWs else ()):
useLabels = False
eltEnumRefsParts = None
if any(fromRow <= iRow+1 <= toRow for fromRow,toRow in skipRows):
continue
if (all(col.value is None for col in row) or
all(isinstance(row[i].value, str) and row[i].value.strip() == "n/a"
for i in (headerCols.get("name"), headerCols.get("type"), headerCols.get("value"))
if i is not None)):
continue # skip blank row
try:
isHeaderRow = (iRow + 1) in headerRows
isELRrow = hasLinkroleSeparateRow and (iRow + 2) in headerRows
if isHeaderRow:
setHeaderCols(row)
headerColsAllElrs |= _DICT_SET(headerCols.keys()) # accumulate all header cols for role checks
elif isELRrow:
currentELR = currentELRdefinition = None
for colCell in row:
v = str(xlValue(colCell) or '')
if v.startswith("http://"):
currentELR = v
elif not currentELRdefinition and v.endswith(" 科目一覧"):
currentELRdefinition = v[0:-5]
elif not currentELRdefinition:
currentELRdefinition = v
if currentELR or currentELRdefinition:
if hasPreLB:
preLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
if hasPresentationParentColumn:
preRels = set()
if hasDefLB:
defLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
if hasCalLB:
calLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
calRels = set() # prevent duplications when same rel in different parts of tree
if hasGenLB:
genLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
elif headerCols:
if "linkrole" in headerCols and cellHasValue(row, 'linkrole', str):
v = cellValue(row, 'linkrole', strip=True)
_trialELR = _trialELRdefinition = None
if v.startswith("http://"):
_trialELR = v
elif v.endswith(" 科目一覧"):
_trialELRdefinition = v[0:-5]
else:
_trialELRdefinition = v
if (_trialELR and _trialELR != currentELR) or (_trialELRdefinition and _trialELRdefinition != currentELRdefinition):
currentELR = _trialELR
currentELRdefinition = _trialELRdefinition
if currentELR or currentELRdefinition:
if hasPreLB:
preLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
if hasDefLB:
defLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
if hasCalLB:
calLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
calRels = set() # prevent duplications when same rel in different parts of tree
if hasGenLB:
genLB.append( LBentry(role=currentELR, name=currentELRdefinition, isELR=True) )
prefix, name = rowPrefixNameValues(row)
if cellHasValue(row, 'depth', int):
depth = cellValue(row, 'depth')
elif hasDepthColumn:
depth = None # non-ELR section, no depth
else: # depth provided by parent reference
depth = 0
subsGrp = cellValue(row, 'substitutionGroup')
isConcept = subsGrp in ("xbrli:item", "xbrli:tuple",
"xbrldt:hypercubeItem", "xbrldt:dimensionItem")
if (prefix in genDocs) and name not in genDocs[prefix].extensionElements and name:
thisDoc = genDocs[prefix]
# elements row
eltType = cellValue(row, 'type')
eltTypePrefix = cellValue(row, 'typePrefix')
if not eltType:
eltType = 'xbrli:stringItemType'
elif eltTypePrefix and ':' not in eltType:
eltType = eltTypePrefix + ':' + eltType
elif ':' not in eltType and eltType.endswith("ItemType"):
eltType = 'xbrli:' + eltType
abstract = cellValue(row, 'abstract')
nillable = cellValue(row, 'nillable')
balance = cellValue(row, 'balance')
periodType = cellValue(row, 'periodType')
eltAttrs = {"name": name, "id": (prefix or "") + "_" + name}
if eltType:
eltAttrs["type"] = eltType
checkImport(thisDoc, eltType)
if subsGrp:
eltAttrs["substitutionGroup"] = subsGrp
checkImport(thisDoc, subsGrp)
if abstract or subsGrp in ("xbrldt:hypercubeItem", "xbrldt:dimensionItem"):
eltAttrs["abstract"] = abstract or "true"
if nillable:
eltAttrs["nillable"] = nillable
if balance:
eltAttrs["{http://www.xbrl.org/2003/instance}balance"] = balance
if periodType:
eltAttrs["{http://www.xbrl.org/2003/instance}periodType"] = periodType
if hasConceptAttributeColumn:
# custom attributes (attribute, prefix:localName in header)
for header in headerCols:
if isinstance(header, str) and header.startswith("attribute, "):
value = cellValue(row, header)
if value not in (None, ""):
eltAttrs[header[11:]] = value # fix QName later after schemaElt exists
eltFacets = None
eltEnumRefParts = None
if eltType not in ("nonnum:domainItemType", "xbrli:booleanItemType", "xbrli:positiveIntegerItemType", "xbrli:dateItemType",
"xbrli:gYearItemType"):
for facet in ("minLength", "maxLength", "minInclusive", "maxInclusive",
"length", "fixed", "pattern", "enumeration", "excludedEnumeration"):
v = cellValue(row, facet)
if v is not None:
if facet == "enumeration" and v.startswith("See tab "): # check for local or tab-contained enumeration
_match = re.match(r"See tab ([^!]+)([!]([0-9]+):([0-9]+))?", v)
if _match:
_tab, _dummy, _rowFrom, _rowTo = _match.groups()
if _tab in sheetNames:
enumWs = importExcelBook[_tab]
if _rowFrom and _rowTo:
# take cols named "enumeration" and "reference parts"
colHdrs = [enumWs.cell(row=1,column=i).value for i in range(1,enumWs.max_column+1)]
eltEnumValues = []
eltEnumRefsParts = []
for i in range(int(_rowFrom), int(_rowTo)+1):
_parts = []
eltEnumRefsParts.append(_parts)
for j, h in enumerate(colHdrs):
c = enumWs.cell(row=i,column=j+1).value
if c is not None:
if h == "enumeration":
eltEnumValues.append(str(c))
else:
m = resourceParsePattern.match(h)
if m:
_resourceType = m.group(1)
_resourceRole = "/" + m.group(2) # last path seg of role
_resourceLangOrPart = m.group(4) # lang or part
_parts.append(((_resourceType, _resourceRole, _resourceLangOrPart), c))
v = "\n".join(eltEnumValues) if eltEnumValues else None
else: # cols 1 and 2 are enum and labels
v = "\n".join(" = ".join(xlValue(col) for col in row if xlValue(col))
for i, row in enumerate(enumWs.rows)
if i > 0) # skip heading row
if v is not None:
if eltFacets is None: eltFacets = {}
eltFacets[facet] = v
# if extension type is this schema, add extensionType for facets
if eltType and ':' in eltType:
_typePrefix, _sep, _typeName = eltType.rpartition(":")
baseType = cellValue(row, 'baseType')
baseTypePrefix = cellValue(row, 'baseTypePrefix')
if baseType and baseTypePrefix:
_baseType = "{}:{}".format(baseTypePrefix, baseType)
elif baseType:
_baseType = baseType
elif _typeName.endswith("ItemType"):
_baseType = "xbrli:tokenItemType" # should be a column??
else:
_baseType = "xs:token"
if _typePrefix in genDocs:
_typeDoc = genDocs[_typePrefix]
if _typeName not in _typeDoc.extensionTypes:
_typeDoc.extensionTypes[_typeName] = ({"name":_typeName, "base":_baseType},eltFacets)
thisDoc.extensionElements[name] = (eltAttrs, None)
else: # not declarable
thisDoc.extensionElements[name] = (eltAttrs, eltFacets)
else:
thisDoc.extensionElements[name] = (eltAttrs, eltFacets)
thisDoc = None # deref for debugging
useLabels = True
if depth is not None or hasPresentationParentColumn:
if name is None:
_label = None
for colCell in row:
if colCell.value is not None:
_label = xlValue(colCell)
break
print ("Sheet {} row {} has relationships and no \"name\" field, label: {}".format(importSheetName, iRow+1, _label))
if hasPreLB:
preferredLabel = cellValue(row, 'preferredLabel')
if hasDepthColumn:
entryList = lbDepthList(preLB, depth)
if entryList is not None and isConcept:
if not name or not prefix:
_name = "none"
if depth == topDepth:
entryList.append( LBentry(prefix=prefix, name=name, isRoot=True) )
else:
entryList.append( LBentry(prefix=prefix, name=name, arcrole=XbrlConst.parentChild,
role=preferredLabel) )
elif hasPresentationParentColumn:
preParent = cellValue(row, 'presentationParent', default='') # only one top parent makes sense
if preParent:
preParentPrefix, _sep, preParentName = preParent.rpartition(":")
preParentName = valueNameChars(preParentName)
entryList = lbDepthList(preLB, topDepth)
if entryList is not None:
preRel = (preParentPrefix, preParentName, prefix, name, currentELR or currentELRdefinition)
if preRel not in preRels:
entryList.append( LBentry(prefix=preParentPrefix, name=preParentName, isRoot=True, childStruct=
[LBentry(prefix=prefix, name=name, arcrole=XbrlConst.parentChild,
preferredLabel=preferredLabel )]) )
preRels.add(preRel)
else:
pass
if hasDefLB and topDepth != 999999:
entryList = lbDepthList(defLB, depth)
if entryList is not None:
if depth == topDepth:
if isConcept:
entryList.append( LBentry(prefix=prefix, name=name, isRoot=True) )
else:
if (not preferredLabel or # prevent start/end labels from causing duplicate dim-mem relationships
not any(lbEntry.prefix == prefix and lbEntry.name == name
for lbEntry in entryList)):
# check if entry is a typed dimension
eltAttrs = {}
parentLBentry = lbDepthList(defLB, depth - 1)[-1]
parentName = parentLBentry.name
parentEltAttrs = {}
for doc in genDocs.values():
if name in doc.extensionElements:
eltAttrs = doc.extensionElements.get(name, NULLENTRY)[0]
if parentName in doc.extensionElements:
parentEltAttrs = doc.extensionElements.get(parentName, NULLENTRY)[0]
if (isUSGAAP and # check for typed dimensions
parentEltAttrs.get("substitutionGroup") == "xbrldt:dimensionItem"
and eltAttrs.get("type") != "nonnum:domainItemType"):
# typed dimension, no LBentry
typedDomainRef = "#" + eltAttrs.get("id", "")
parentEltAttrs["{http://xbrl.org/2005/xbrldt}typedDomainRef"] = typedDomainRef
elif isConcept:
# explicit dimension
role = None # default for a default dimension
if "dimensionDefault" in headerCols and cellHasValue(row, 'dimensionDefault', (str,bool)):
v = cellValue(row, 'dimensionDefault', strip=True)
if v:
role = "_dimensionDefault_"
entryList.append( LBentry(prefix=prefix, name=name, arcrole="_dimensions_", role=role) )
if hasCalLB:
calcParents = cellValue(row, 'calculationParent', default='').split()
calcWeights = str(cellValue(row, 'calculationWeight', default='')).split() # may be float or string
if calcParents and calcWeights:
# may be multiple parents split by whitespace
for i, calcParent in enumerate(calcParents):
calcWeight = calcWeights[i] if i < len(calcWeights) else calcWeights[-1]
calcParentPrefix, _sep, calcParentName = calcParent.rpartition(":")
calcParentName = valueNameChars(calcParentName)
entryList = lbDepthList(calLB, topDepth)
if entryList is not None:
calRel = (calcParentPrefix, calcParentName, prefix, name)
if calRel not in calRels:
entryList.append( LBentry(prefix=calcParentPrefix, name=calcParentName, isRoot=True, childStruct=
[LBentry(prefix=prefix, name=name, arcrole=XbrlConst.summationItem, weight=calcWeight )]) )
calRels.add(calRel)
else:
pass
hasRelationshipToCol = any(h[0] == "relationship to" for h in headerCols if isinstance(h, tuple))
# accumulate extension labels and any reference parts
if useLabels or hasRelationshipToCol:
prefix, name = rowPrefixNameValues(row)
if name is not None and (prefix in genDocs or extensionPrefixForCoreLabels or hasRelationshipToCol):
thisDoc = genDocs.get(extensionPrefixForCoreLabels or prefix) # None for relationshipTo a imported concept
preferredLabel = cellValue(row, 'preferredLabel')
for colItem, iCol in headerCols.items():
if isinstance(colItem, tuple):
colItemType = colItem[0]
role = colItem[1]
lang = part = colItem[2] # lang for label, part for reference
cell = row[iCol]
v = xlValue(cell)
if v is None or (isinstance(v, str) and not v):
values = ()
else:
v = str(v) # may be an int or float instead of str
if colItemType in ("label", "reference", "relationship to"):
values = (v,)
elif colItemType in ("labels", "references"):
values = v.split('\n')
if preferredLabel and "indented" in colItem and not hasPreferredLabelTextColumn: # indented column sets preferredLabel if any
role = preferredLabel
for i, value in enumerate(values):
if colItemType == "relationship to": # doesn't require thisDoc
entryList = lbDepthList(genLB, topDepth)
if entryList is not None:
toName = value
if ":" in toName:
toPrefix, _sep, toName = value.partition(":")
else:
toPrefix = prefix
if hasRelationshipAttributeColumn:
# custom attributes (attribute, prefix:localName in header)
relAttrs = None
for header in headerCols:
if isinstance(header, str) and header.startswith("relationship attribute, "):
attrValue = cellValue(row, header)
if attrValue not in (None, ""):
if relAttrs is None: relAttrs = {}
relAttrs[header[24:]] = attrValue # fix QName later after schemaElt exists
entryList.append( LBentry(prefix=prefix, name=name, isRoot=True, childStruct=
[LBentry(prefix=toPrefix, name=toName, arcrole=role, relAttrs=relAttrs)]) )
elif thisDoc is None:
pass
# following options only apply to linkbases of generated taxonomies
elif colItemType in ("label", "labels"):
if isConcept:
if hasPreferredLabelTextColumn and role == "/preferredLabel":
role = preferredLabel
else:
if role == XbrlConst.standardLabel:
role = XbrlConst.genStandardLabel # must go in generic labels LB
elif role == XbrlConst.documentationLabel:
role = XbrlConst.genDocumentationLabel
else:
continue
thisDoc.extensionLabels[prefix, name, lang, role] = value.strip()
elif hasRefLB and colItemType == "reference":
if isConcept:
# keep parts in order and not duplicated
thisDoc.extensionReferences[prefix, name, role].add((part, value.strip()))
elif hasRefLB and colItemType == "references":
if isConcept:
# role ending in # is appended with the value ordinal
if role.endswith("#"):
_role = "{}{:05.0f}".format(role, i)
else:
_role = role
_value = value.strip().replace("\\n", "\n")
if part is None: # part space value
_part, _sep, _value = _value.partition(" ")
else:
_part = part
# keep parts in order and not duplicated
thisDoc.extensionReferences[prefix, name, _role].add((_part, _value))
if isConcept and eltEnumRefsParts and thisDoc is not None:
for i, _enumRefParts in enumerate(eltEnumRefsParts):
for (colItemType, role, part), value in _enumRefParts:
if colItemType == "reference":
_role = "{}#{:05.0f}".format(role, i+1)
thisDoc.extensionReferences[prefix, name, _role].add((part, value.strip()))
thisDoc = None # deref for debugging
except Exception as err:
fatalLoadingErrors.append("Excel sheet: {excelSheet}, row: {excelRow}, error: {error}, Traceback: {traceback}"
.format(error=err, excelSheet=importSheetName, excelRow=iRow, traceback=traceback.format_tb(sys.exc_info()[2]))) # uncomment to debug raise
if not headerCols:
if not conceptsWs:
fatalLoadingErrors.append("Neither control worksheet (XBRL DTS tab) nor standard columns found, no DTS imported.")
elif not currentELR:
fatalLoadingErrors.append("Extended link role not found, no DTS imported.")
if fatalLoadingErrors:
raise Exception(",\n ".join(fatalLoadingErrors))
if isUSGAAP and hasDefLB:
# move line items above table
def fixUsggapTableDims(lvl1Struct, level=0):
foundTable = False
emptyLinks = []
foundHeadingItems = []
foundLineItems = []
for lvl1Entry in lvl1Struct:
for lvl2Entry in lvl1Entry.childStruct:
if any(lvl2Entry.name.endswith(suffix) for suffix in ("Table", "_table", "Cube", "_cube")):
for lvl3Entry in lvl2Entry.childStruct:
if any(lvl3Entry.name.endswith(suffix) for suffix in ("LineItems", "_line_items")):
foundLineItems.append((lvl1Entry, lvl2Entry, lvl3Entry))
foundTable = True
break
else:
foundHeadingItems.append((lvl1Entry, lvl2Entry))
if not foundLineItems:
foundNestedTable = fixUsggapTableDims(lvl1Entry.childStruct, level+1)
if level == 0 and not foundNestedTable:
emptyLinks.append(lvl1Entry)
foundTable |= foundNestedTable
del foundHeadingItems[:]
#if foundLineItems or foundHeadingItems:
# print("lvlentry {}\n headingITems {}\n emptyLinks {}\n\n".format(foundLineItems, foundHeadingItems, emptyLinks))
for lvl1Entry, lvl2Entry, lvl3Entry in foundLineItems:
i1 = lvl1Entry.childStruct.index(lvl2Entry)
lvl1Entry.childStruct.insert(i1, lvl3Entry) # must keep lvl1Rel if it is __root__
lvl3Entry.childStruct.insert(0, lvl2Entry)
if any(lvl1Entry.name.endswith(suffix)
for suffix in ("Abstract", "_abstract", "Root", "_root", "_package", "_heading")):
lvl1Entry.childStruct.remove(lvl2Entry)
lvl2Entry.childStruct.remove(lvl3Entry)
for lvl1Entry, lvl2Entry in foundHeadingItems:
lvl1Entry.childStruct.remove(lvl2Entry)
for emptyLink in emptyLinks:
lvl1Struct.remove(emptyLink)
return foundTable
fixUsggapTableDims(defLB)
modelDocuments = []
modelXbrl.blockDpmDBrecursion = True
def generateDoc(thisDoc, parentDoc, visitedDocNames):
if thisDoc.name in visitedDocNames:
modelXbrl.error("loadFromExcel:circularDependency",
"Generation order dependency is circular: %(circularDependency)s",
modelXbrl=modelXbrl, circularDependency=",".join(visitedDocNames) + ", " + thisDoc.name)
return
visitedDocNames.append(thisDoc.name)
if XbrlConst.xsd not in thisDoc.importXmlns.values():
eltName = 'schema xmlns="{}"'.format(XbrlConst.xsd)
else:
for k,v in thisDoc.importXmlns.items():
if v == XbrlConst.xsd:
eltName = "{}:schema".format(k)
break
doc = createModelDocument(
modelXbrl,
Type.SCHEMA,
thisDoc.extensionSchemaFilename,
isEntry=(parentDoc is None),
# initialComment="extracted from OIM {}".format(mappedUri),
documentEncoding="utf-8",
base='', # block pathname from becomming absolute
initialXml='''
<{eltName}
targetNamespace="{targetNamespace}"
attributeFormDefault="unqualified"
elementFormDefault="qualified"
xmlns:xs="http://www.w3.org/2001/XMLSchema"
xmlns:{extensionPrefix}="{targetNamespace}"
{importXmlns}
xmlns:nonnum="http://www.xbrl.org/dtr/type/non-numeric"
xmlns:link="http://www.xbrl.org/2003/linkbase"
xmlns:xbrli="http://www.xbrl.org/2003/instance"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xbrldt="http://xbrl.org/2005/xbrldt"
{schemaVersion}{xmlLang} />
'''.format(eltName=eltName,
targetNamespace=thisDoc.extensionSchemaNamespaceURI,
extensionPrefix=thisDoc.extensionSchemaPrefix,
importXmlns=''.join('xmlns:{0}="{1}"\n'.format(prefix, namespaceURI)
for prefix, namespaceURI in thisDoc.importXmlns.items()),
schemaVersion='version="{}" '.format(thisDoc.extensionSchemaVersion) if thisDoc.extensionSchemaVersion else '',
xmlLang='\n xml:lang="{}"'.format(saveXmlLang) if saveXmlLang else "",
),
initialComment=thisDoc.initialComment
)
if parentDoc is None:
modelXbrl.modelDocument = doc
thisDoc.generated = True # prevent recursion
doc.loadedFromExcel = True # signal to save generated taoxnomy in saveToFile below
doc.inDTS = True # entry document always in DTS
doc.targetNamespace = thisDoc.extensionSchemaNamespaceURI # not set until schemaDiscover too late otherwise
schemaElt = doc.xmlRootElement
#foreach linkbase
annotationElt = XmlUtil.addChild(schemaElt, XbrlConst.xsd, "annotation")
if thisDoc.schemaDocumentation:
XmlUtil.addChild(annotationElt, XbrlConst.xsd, "documentation", text=thisDoc.schemaDocumentation)
appinfoElt = XmlUtil.addChild(annotationElt, XbrlConst.xsd, "appinfo")
# add linkbaseRefs
appinfoElt = XmlUtil.descendant(schemaElt, XbrlConst.xsd, "appinfo")
# don't yet add linkbase refs, want to process imports first to get roleType definitions
# add includes
for filename in thisDoc.includes:
XmlUtil.addChild(schemaElt, XbrlConst.xsd, "include", attributes=( ("schemaLocation", filename), ) )
# add imports
for importPrefix, importAttributes in sorted(thisDoc.imports.items(),
key=lambda item:item[1]):
XmlUtil.addChild(schemaElt, XbrlConst.xsd, "import", attributes=importAttributes)
# is the import an xsd which we have to generate
if importPrefix in genDocs and not genDocs[importPrefix].generated:
generateDoc(genDocs[importPrefix], doc, visitedDocNames) # generate document
# add imports for gen LB if any role definitions (for discovery) and generic labels
if any(roleURI in thisDoc.extensionRoleLabels for roleURI in thisDoc.extensionRoles.keys()):
for importAttributes in ((("namespace", XbrlConst.gen), ("schemaLocation", "http://www.xbrl.org/2008/generic-link.xsd")),
(("namespace", XbrlConst.genLabel), ("schemaLocation", "http://www.xbrl.org/2008/generic-label.xsd"))):
XmlUtil.addChild(schemaElt, XbrlConst.xsd, "import", attributes=importAttributes )
_enumNum = [1] # must be inside an object to be referenced in a nested procedure
def addFacets(thisDoc, restrElt, facets):
if facets:
excludedEnumeration = facets.get("excludedEnumeration")
if ((annotateEnumerationsDocumentation and excludedEnumeration == "X")
or excludedEnumeration == "D"):
# if generateEnumerationsDocumentationOnly annotation must be first child element
for facet, facetValue in facets.items():
if facet == "enumeration":
enumerationsDocumentation = []
for valLbl in facetValue.split("\n"):
val, _sep, _label = valLbl.partition("=")
val = val.strip()
if len(val):
if val == "(empty)":
val = ""
_label = _label.strip()
enumerationsDocumentation.append("{}: {}".format(val, _label) if _label else val)
XmlUtil.addChild(XmlUtil.addChild(restrElt, XbrlConst.xsd, "annotation"),
XbrlConst.xsd, "documentation", text=
" \n".join(enumerationsDocumentation))
for facet, facetValue in sorted(facets.items(), key=lambda i:facetSortOrder.get(i[0],i[0])):
if facet == "enumeration":
if not annotateEnumerationsDocumentation and not excludedEnumeration:
for valLbl in facetValue.split("\n"):
val, _sep, _label = valLbl.partition("=")
val = val.strip()
_label = _label.strip()
if len(val):
if val == "(empty)":
val = ""
_attributes = {"value":val}
if _label:
_labelsByLang = None
if _label.startswith("{") and _label.endswith("}"):
try:
# multi-lingual labels are json dict
_labelsByLang = json.loads(_label)
except json.decoder.JSONDecodeError:
_labelsByLang = None
_name = "enum{}".format(_enumNum[0])
_attributes["id"] = thisDoc.extensionSchemaPrefix + "_" + _name
_enumNum[0] += 1
if _labelsByLang: #multilingual
for _lang, _langLabel in _labelsByLang.items():
thisDoc.extensionLabels[thisDoc.extensionSchemaPrefix, _name, _lang, XbrlConst.genStandardLabel] = _langLabel
else: # non-multi-lingual labels
thisDoc.extensionLabels[thisDoc.extensionSchemaPrefix, _name, defaultLabelLang, XbrlConst.genStandardLabel] = _label
enumElt = XmlUtil.addChild(restrElt, XbrlConst.xsd, facet, attributes=_attributes)
if thisDoc.hasEnumerationDocumentation and _label:
if _labelsByLang: #multilingual
annotationElt = XmlUtil.addChild(enumElt, XbrlConst.xsd, "annotation")
for _lang, _langLabel in _labelsByLang.items():
thisDoc.extensionLabels[thisDoc.extensionSchemaPrefix, _name, _lang, XbrlConst.genStandardLabel] = _langLabel
XmlUtil.addChild(annotationElt, XbrlConst.xsd, "documentation", text=_langLabel,
attributes={"{http://www.w3.org/XML/1998/namespace}lang": _lang})
else: # non-multi-lingual labels
XmlUtil.addChild(XmlUtil.addChild(enumElt, XbrlConst.xsd, "annotation"),
XbrlConst.xsd, "documentation", text=_label)
elif facet != "excludedEnumeration":
XmlUtil.addChild(restrElt, XbrlConst.xsd, facet, attributes={"value":str(facetValue)})
# add elements
for eltName, eltDef in sorted(thisDoc.extensionElements.items(), key=lambda item: item[0]):
eltAttrs, eltFacets = eltDef
if eltFacets and "type" in eltAttrs:
eltType = eltAttrs["type"]
del eltAttrs["type"]
if any(':' in attrname for attrname in eltAttrs.keys()): # fix up any prefixed attr names to be clark notation
for attrname, attrvalue in eltAttrs.copy().items():
if not attrname.startswith('{') and ':' in attrname:
del eltAttrs[attrname]
eltAttrs[schemaElt.prefixedNameQname(attrname).clarkNotation] = attrvalue
isConcept = eltAttrs.get('substitutionGroup') in (
"xbrli:item", "xbrli:tuple", "xbrldt:hypercubeItem", "xbrldt:dimensionItem")
elt = XmlUtil.addChild(schemaElt,
XbrlConst.xsd, "element",
attributes=eltAttrs)
if annotateElementDocumentation:
for labelRole in (XbrlConst.documentationLabel, XbrlConst.genDocumentationLabel):
labelKey = (thisDoc.extensionSchemaPrefix, eltAttrs["name"], defaultLabelLang, labelRole)
if labelKey in thisDoc.extensionLabels:
XmlUtil.addChild(XmlUtil.addChild(elt, XbrlConst.xsd, "annotation"),
XbrlConst.xsd, "documentation", text=thisDoc.extensionLabels[labelKey])
break # if std doc label found, don't continue to look for generic doc labe
if elt is not None and eltFacets and isConcept:
cmplxType = XmlUtil.addChild(elt, XbrlConst.xsd, "complexType")
cmplxCont = XmlUtil.addChild(cmplxType, XbrlConst.xsd, "simpleContent")
restrElt = XmlUtil.addChild(cmplxCont, XbrlConst.xsd, "restriction", attributes={"base": eltType})
addFacets(thisDoc, restrElt, eltFacets)
del eltType
for roleURI, (roleDefinition, usedOnRoles) in sorted(thisDoc.extensionRoles.items(), key=lambda rd: rd[1]):
roleElt = XmlUtil.addChild(appinfoElt, XbrlConst.link, "roleType",
attributes=(("roleURI", roleURI),
("id", "roleType_" + roleURI.rpartition("/")[2])))
if roleDefinition:
XmlUtil.addChild(roleElt, XbrlConst.link, "definition", text=roleDefinition)
if usedOnRoles:
for usedOnRole in usedOnRoles.split():
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text=usedOnRole)
else:
if hasPreLB and any(e.childStruct and e.isELR and (e.role == roleURI or e.name == roleDefinition) for e in preLB):
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text="link:presentationLink")
if hasDefLB and any(e.childStruct and e.isELR and (e.role == roleURI or e.name == roleDefinition) for e in defLB):
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text="link:definitionLink")
if hasCalLB and any(e.childStruct and e.isELR and (e.role == roleURI or e.name == roleDefinition) for e in calLB):
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text="link:calculationLink")
if hasGenLB and any(e.childStruct and e.isELR and (e.role == roleURI or e.name == roleDefinition) for e in genLB):
XmlUtil.addChild(roleElt, XbrlConst.link, "usedOn", text=qname("{http://xbrl.org/2008/generic}genlink:link"))
# add role definitions (for discovery) and generic labels
if any(roleURI in thisDoc.extensionRoleLabels for roleURI in thisDoc.extensionRoles.keys()):
# add appinfo generic linkbase for gen labels
genLabLB = XmlUtil.addChild(appinfoElt, XbrlConst.link, "linkbase")
XmlUtil.addChild(genLabLB, XbrlConst.link, "roleRef",
attributes=(("roleURI", XbrlConst.genStandardLabel),
("{http://www.w3.org/1999/xlink}href", "http://www.xbrl.org/2008/generic-label.xsd#standard-label"),
("{http://www.w3.org/1999/xlink}type", "simple")))
XmlUtil.addChild(genLabLB, XbrlConst.link, "arcroleRef",
attributes=(("arcroleURI", elementLabel),
("{http://www.w3.org/1999/xlink}href", "http://www.xbrl.org/2008/generic-label.xsd#element-label"),
("{http://www.w3.org/1999/xlink}type", "simple")))
linkElt = XmlUtil.addChild(genLabLB, qname("{http://xbrl.org/2008/generic}genlink:link"),
attributes=(("{http://www.w3.org/1999/xlink}type", "extended"),
("{http://www.w3.org/1999/xlink}role", defaultLinkRole)))
for roleURI, _defLabel in sorted(thisDoc.extensionRoles.items(), key=lambda rd: rd[0]):
if roleURI in thisDoc.extensionRoleLabels:
xlLabel = roleURI.rpartition("/")[2]
XmlUtil.addChild(linkElt, XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", "#roleType_" + xlLabel),
("{http://www.w3.org/1999/xlink}label", "loc_" + xlLabel)))
XmlUtil.addChild(linkElt, XbrlConst.qnGenArc,
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", elementLabel),
("{http://www.w3.org/1999/xlink}from", "loc_" + xlLabel),
("{http://www.w3.org/1999/xlink}to", "label_" + xlLabel)))
for (text, lang) in thisDoc.extensionRoleLabels[roleURI]:
XmlUtil.addChild(linkElt, qname("{http://xbrl.org/2008/label}genlabel:label"),
attributes=(("{http://www.w3.org/1999/xlink}type", "resource"),
("{http://www.w3.org/1999/xlink}label", "label_" + xlLabel),
("{http://www.w3.org/1999/xlink}role", XbrlConst.genStandardLabel),
("{http://www.w3.org/XML/1998/namespace}lang", lang)),
text=text)
def addLinkbaseRef(lbType, lbFilename, lbDoc):
role = "http://www.xbrl.org/2003/role/{0}LinkbaseRef".format(lbType)
lbRefElt = XmlUtil.addChild(appinfoElt, XbrlConst.link, "linkbaseRef",
attributes=(("{http://www.w3.org/1999/xlink}type", "simple"),
("{http://www.w3.org/1999/xlink}href",
docRelpath(lbFilename, thisDoc.extensionSchemaRelDirname)),
("{http://www.w3.org/1999/xlink}arcrole", "http://www.w3.org/1999/xlink/properties/linkbase"),
# generic label ref has no role
) + (() if lbType.startswith("generic") else
(("{http://www.w3.org/1999/xlink}role", role),))
)
if lbDoc: # provided for generated linbase refs
doc.referencesDocument[lbDoc] = ModelDocumentReference("href", lbRefElt)
# add referenced (not generated) linkbases
for lbRefType, filename, generate in thisDoc.linkbaseRefs:
if not generate:
# if linkbase is generated by another doc which isn't generated yet, generate it
for otherGenDoc in genDocs.values():
if not otherGenDoc.generated and any(
_otherLbRefType == lbRefType and _otherFilename == filename and _otherGenerate
for _otherLbRefType, _otherFilename, _otherGenerate in otherGenDoc.linkbaseRefs):
generateDoc(otherGenDoc, doc, visitedDocNames) # generate document
addLinkbaseRef(lbRefType, filename, None)
doc.schemaDiscover(schemaElt, False, thisDoc.extensionSchemaNamespaceURI)
# add types after include and import are discovered
# block creating any type which was previously provided by an include of the same namespace
for typeName, typeDef in sorted(thisDoc.extensionTypes.items(), key=lambda item: item[0]):
if qname(thisDoc.extensionSchemaNamespaceURI, typeName) in modelXbrl.qnameTypes:
continue # type already exists, don't duplicate
typeAttrs, typeFacets = typeDef
if typeName.endswith("ItemType") or typeAttrs.get("base", "").endswith("ItemType"):
cmplxType = XmlUtil.addChild(schemaElt, XbrlConst.xsd, "complexType", attributes={"name": typeAttrs["name"]})
contElt = XmlUtil.addChild(cmplxType, XbrlConst.xsd, "simpleContent")
else:
contElt = XmlUtil.addChild(schemaElt, XbrlConst.xsd, "simpleType", attributes={"name": typeAttrs["name"]})
restrElt = XmlUtil.addChild(contElt, XbrlConst.xsd, "restriction", attributes={"base": typeAttrs["base"]})
# remove duplicitous facets already in base type
baseQn = qname(schemaElt, typeAttrs.get("base"))
if typeFacets:
if baseQn and baseQn.namespaceURI not in (XbrlConst.xsd, XbrlConst.xbrli) and baseQn in modelXbrl.qnameTypes:
# remove duplicated facets of underlying type
baseTypeFacets = modelXbrl.qnameTypes[baseQn].facets or () # allow iteration if None
typeFacets = dict((facet, value)
for facet, value in typeFacets.items()
if facet not in baseTypeFacets or str(baseTypeFacets[facet]) != value)
addFacets(thisDoc, restrElt, typeFacets)
# find extension label roles, reference roles and parts
extLabelRoles = {}
extReferenceRoles = {}
extReferenceParts = {}
extReferenceSchemaDocs = {}
extUnrecognizedRoles = set()
relationshipArcroles = {}
relationshipArcqnames = {}
def setExtRefPart(partLocalName):
if partLocalName not in extReferenceParts:
for partConcept in modelXbrl.nameConcepts.get(partLocalName, ()):
if partConcept is not None and partConcept.subGroupHeadQname == qnLinkPart:
extReferenceParts[partLocalName] = partConcept.qname
extReferenceSchemaDocs[partConcept.qname.namespaceURI] = (
partConcept.modelDocument.uri if partConcept.modelDocument.uri.startswith("http://") else
partConcept.modelDocument.basename)
break
for _headerColKey in headerColsAllElrs:
if isinstance(_headerColKey, tuple) and len(_headerColKey) >= 3 and not _headerColKey[1].startswith("http://"):
_resourceType = _headerColKey[0]
_resourceRole = _headerColKey[1]
_resourceLangOrPart = _headerColKey[2]
elif isinstance(_headerColKey, str) and "!reference" in _headerColKey:
m = resourceParsePattern.match(_headerColKey.partition("!")[2])
_resourceType = m.group(1)
_resourceRole = "/" + m.group(2)
_resourceLangOrPart = m.group(4)
else:
continue
_resourceQName, _standardRoles = {
"label": (qnLinkLabel, standardLabelRoles),
"labels": (qnLinkLabel, standardLabelRoles),
"reference": (qnLinkReference, standardReferenceRoles),
"references": (qnLinkReference, standardReferenceRoles)
}.get(_resourceType, (None,()))
_resourceRoleURI = None
# find resource role
for _roleURI in _standardRoles:
if _roleURI.endswith(_resourceRole):
_resourceRoleURI = _roleURI
_resourceRoleMatchPart = _resourceRole
break
if _resourceRoleURI is None: # try custom roles
_resourceRoleMatchPart = _resourceRole.partition("#")[0] # remove # part
for _roleURI in modelXbrl.roleTypes:
if _roleURI.endswith(_resourceRoleMatchPart):
for _roleType in modelXbrl.roleTypes[_roleURI]:
if _resourceQName in _roleType.usedOns:
_resourceRoleURI = _roleURI
break
if _resourceType in ("label", "labels"):
if _resourceRoleURI:
extLabelRoles[_resourceRoleMatchPart] = _resourceRoleURI
elif any(_resourceRoleMatchPart == k[2] for k in thisDoc.extensionLabels.keys()):
modelXbrl.error("loadFromExcel:labelResourceRole",
"Label resource role not found: %(role)s",
modelXbrl=modelXbrl, role=_resourceRoleMatchPart, filename=thisDoc.extensionSchemaNamespaceURI)
elif _resourceType in ("reference", "references"):
if _resourceRoleURI:
extReferenceRoles[_resourceRoleMatchPart] = _resourceRoleURI
# find part QName
setExtRefPart(_resourceLangOrPart)
elif any(_resourceRoleMatchPart == k[2] for k in thisDoc.extensionReferences.keys()):
modelXbrl.error("loadFromExcel:referenceResourceRole",
"Reference resource role not found: %(role)s",
modelXbrl=modelXbrl, role=_resourceRoleMatchPart, filename=thisDoc.extensionSchemaNamespaceURI)
elif _resourceType == "relationship to":
for _arcroleURI in modelXbrl.arcroleTypes:
if _arcroleURI.endswith(_resourceRoleMatchPart):
for _arcroleType in modelXbrl.arcroleTypes[_arcroleURI]:
for _resourceQName in _arcroleType.usedOns:
break
break
if _resourceQName is None:
modelXbrl.error("loadFromExcel:relationshipArcrole",
"Relationship arcrole not found: %(arcrole)s",
modelXbrl=modelXbrl, arcrole=_resourceRoleMatchPart, filename=thisDoc.extensionSchemaNamespaceURI)
else:
relationshipArcroles[_resourceRoleMatchPart] = _arcroleURI
relationshipArcqnames[_arcroleURI] = _resourceQName
# label linkbase
for lbType, lang, filename in thisDoc.labelLinkbases:
thisDoc.thisLBdir = posixpath.dirname(filename)
langPattern = re.compile(lang or ".*")
_isGeneric = lbType.startswith("generic")
if _isGeneric and "http://xbrl.org/2008/label" not in modelXbrl.namespaceDocs:
# must pre-load generic linkbases in order to create properly typed elements (before discovery because we're creating elements by lxml)
ModelDocument.load(modelXbrl, "http://www.xbrl.org/2008/generic-link.xsd", isDiscovered=True)
ModelDocument.load(modelXbrl, "http://www.xbrl.org/2008/generic-label.xsd", isDiscovered=True)
lbDoc = ModelDocument.create(modelXbrl, ModelDocument.Type.LINKBASE, filename, base="", initialXml="""
<linkbase
xmlns="http://www.xbrl.org/2003/linkbase"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xbrli="http://www.xbrl.org/2003/instance"
{}
xsi:schemaLocation="http://www.xbrl.org/2003/linkbase
http://www.xbrl.org/2003/xbrl-linkbase-2003-12-31.xsd{}"
{}>{}</linkbase>
""".format("""
xmlns:genlink="http://xbrl.org/2008/generic"
xmlns:genlabel="http://xbrl.org/2008/label"
""" if _isGeneric else "",
"""
http://xbrl.org/2008/generic http://www.xbrl.org/2008/generic-link.xsd
http://xbrl.org/2008/label http://www.xbrl.org/2008/generic-label.xsd
""" if _isGeneric else "",
'\n xml:lang="{}"'.format(saveXmlLang) if saveXmlLang else "",
"""
<arcroleRef arcroleURI="http://xbrl.org/arcrole/2008/element-label" xlink:href="http://www.xbrl.org/2008/generic-label.xsd#element-label" xlink:type="simple"/>
""" if _isGeneric else ""),
initialComment=thisDoc.initialComment)
lbDoc.inDTS = True
lbDoc.loadedFromExcel = True
if isGenerateAndImport:
addLinkbaseRef(lbType, filename, lbDoc) # must be explicitly imported
lbElt = lbDoc.xmlRootElement
linkElt = XmlUtil.addChild(lbElt,
gen if _isGeneric else link,
"link" if _isGeneric else "labelLink",
attributes=(("{http://www.w3.org/1999/xlink}type", "extended"),
("{http://www.w3.org/1999/xlink}role", defaultLinkRole)))
firstLinkElt = linkElt
locs = set()
roleRefs = set()
for labelKey, text in thisDoc.extensionLabels.items():
prefix, name, labelLang, role = labelKey
labelLang = labelLang or defaultLabelLang
role = role.partition("#")[0] # remove # part
role = extLabelRoles.get(role, role) # get custom role, if any
if langPattern.match(labelLang) and _isGeneric == (role in (XbrlConst.genStandardLabel, XbrlConst.genDocumentationLabel)):
locLabel = prefix + "_" + name
if locLabel not in locs:
locs.add(locLabel)
XmlUtil.addChild(linkElt,
XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", LBHref(thisDoc, prefix, name)),
("{http://www.w3.org/1999/xlink}label", locLabel)))
XmlUtil.addChild(linkElt,
gen if _isGeneric else link,
"arc" if _isGeneric else "labelArc",
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", elementLabel if _isGeneric else conceptLabel),
("{http://www.w3.org/1999/xlink}from", locLabel),
("{http://www.w3.org/1999/xlink}to", "label_" + locLabel),
("order", 1.0)))
XmlUtil.addChild(linkElt,
XbrlConst.genLabel if _isGeneric else XbrlConst.link,
"label",
attributes=(("{http://www.w3.org/1999/xlink}type", "resource"),
("{http://www.w3.org/1999/xlink}label", "label_" + locLabel),
("{http://www.w3.org/1999/xlink}role", role)) + (
(("{http://www.w3.org/XML/1998/namespace}lang", labelLang),)
if True or lang != saveXmlLang else ()),
text=text)
if role:
if role in XbrlConst.standardLabelRoles:
pass # no roleRef
elif role in modelXbrl.roleTypes:
roleType = modelXbrl.roleTypes[role][0]
roleRefs.add(("roleRef", role, roleType.modelDocument.uri + "#" + roleType.id))
elif role.startswith("http://www.xbrl.org/2009/role/negated"):
roleRefs.add(("roleRef", role, "http://www.xbrl.org/lrr/role/negated-2009-12-16.xsd#" + role.rpartition("/")[2]))
else:
extUnrecognizedRoles.add(role)
# add arcrole references
for roleref, roleURI, href in roleRefs:
XmlUtil.addChild(lbElt,
XbrlConst.link, roleref,
attributes=(("arcroleURI" if roleref == "arcroleRef" else "roleURI", roleURI),
("{http://www.w3.org/1999/xlink}type", "simple"),
("{http://www.w3.org/1999/xlink}href", href)),
beforeSibling=firstLinkElt)
lbDoc.linkbaseDiscover(lbElt)
if extUnrecognizedRoles:
modelXbrl.error("loadFromExcel:undefinedLabelRole",
"Label roles not defined: %(undefinedRoles)s",
modelXbrl=modelXbrl, undefinedRoles=",".join(sorted(extUnrecognizedRoles)))
extUnrecognizedRoles.clear()
# reference linkbase
for lbType, referenceRole, filename in thisDoc.referenceLinkbases:
thisDoc.thisLBdir = posixpath.dirname(filename)
_isGeneric = lbType.startswith("generic")
lbDoc = ModelDocument.create(modelXbrl, ModelDocument.Type.LINKBASE, filename, base="", initialXml="""
<linkbase
xmlns="http://www.xbrl.org/2003/linkbase"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xbrli="http://www.xbrl.org/2003/instance"
{}
xsi:schemaLocation="http://www.xbrl.org/2003/linkbase
http://www.xbrl.org/2003/xbrl-linkbase-2003-12-31.xsd{}{}"
{}>{}</linkbase>
""".format("""
xmlns:genlink="http://xbrl.org/2008/generic"
xmlns:genreference="http://xbrl.org/2008/rerference"
""" if _isGeneric else "",
"".join([" {} {}".format(_ns, _uri) for _ns, _uri in extReferenceSchemaDocs.items()]),
"""
http://xbrl.org/2008/generic http://www.xbrl.org/2008/generic-link.xsd
http://xbrl.org/2008/reference http://www.xbrl.org/2008/generic-reference.xsd
""" if _isGeneric else "",
'\n xml:lang="{}"'.format(saveXmlLang) if saveXmlLang else "",
"""
<roleRef roleURI="http://www.xbrl.org/2008/role/label" xlink:href="http://www.xbrl.org/2008/generic-label.xsd#standard-label" xlink:type="simple"/>
<arcroleRef arcroleURI="http://xbrl.org/arcrole/2008/element-reference" xlink:href="http://xbrl.org/2008/generic-reference.xsd#element-reference" xlink:type="simple"/>
""" if _isGeneric else ""),
initialComment=thisDoc.initialComment)
lbDoc.inDTS = True
lbDoc.loadedFromExcel = True
if isGenerateAndImport:
addLinkbaseRef(lbType, filename, lbDoc) # must be explicitly imported
lbElt = lbDoc.xmlRootElement
linkElt = XmlUtil.addChild(lbElt,
XbrlConst.gen if _isGeneric else XbrlConst.link,
"link" if _isGeneric else "referenceLink",
attributes=(("{http://www.w3.org/1999/xlink}type", "extended"),
("{http://www.w3.org/1999/xlink}role", defaultLinkRole)))
firstLinkElt = linkElt
locs = set()
roleRefs = set()
undefinedReferenceParts = set()
for referenceKey, references in thisDoc.extensionReferences.items():
prefix, name, role = referenceKey
role = role.partition("#")[0] # remove # part
role = extReferenceRoles.get(role, role) # get custom role, if any
if fnmatch(role, referenceRole):
locLabel = prefix + "_" + name
# must use separate arcs with order to force Altova to display parts in order
if locLabel not in locs:
locs.add(locLabel)
order = 1
else:
for order in range(2,1000):
_locLabel = "{}_{}".format(locLabel, order)
if _locLabel not in locs:
locLabel = _locLabel
locs.add(locLabel)
break
if order > 999:
print("resource order de-duplicate failure, too many reference parts")
XmlUtil.addChild(linkElt,
XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", LBHref(thisDoc, prefix, name)),
("{http://www.w3.org/1999/xlink}label", locLabel)))
XmlUtil.addChild(linkElt,
XbrlConst.link, "referenceArc",
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", conceptReference),
("{http://www.w3.org/1999/xlink}from", locLabel),
("{http://www.w3.org/1999/xlink}to", "label_" + locLabel),
("order", order)))
referenceResource = XmlUtil.addChild(linkElt,
XbrlConst.genReference if _isGeneric else XbrlConst.link,
"reference",
attributes=(("{http://www.w3.org/1999/xlink}type", "resource"),
("{http://www.w3.org/1999/xlink}label", "label_" + locLabel),
("{http://www.w3.org/1999/xlink}role", role)))
for part, text in references: # list to preserve desired order
setExtRefPart(part)
if part in extReferenceParts:
partQn = extReferenceParts.get(part, part) # get part QName if any
XmlUtil.addChild(referenceResource, partQn, text=text)
else:
undefinedReferenceParts.add(part)
if role:
if role in XbrlConst.standardLabelRoles:
pass # no roleRef
elif role in modelXbrl.roleTypes:
roleType = modelXbrl.roleTypes[role][0]
roleRefs.add(("roleRef", role, roleType.modelDocument.uri + "#" + roleType.id))
elif role.startswith("http://www.xbrl.org/2009/role/negated"):
roleRefs.add(("roleRef", role, "http://www.xbrl.org/lrr/role/negated-2009-12-16.xsd#" + role.rpartition("/")[2]))
else:
extUnrecognizedRoles.add(role)
for part in sorted(undefinedReferenceParts):
print("reference part not defined: {}".format(part))
# add arcrole references
for roleref, roleURI, href in roleRefs:
XmlUtil.addChild(lbElt,
XbrlConst.link, roleref,
attributes=(("arcroleURI" if roleref == "arcroleRef" else "roleURI", roleURI),
("{http://www.w3.org/1999/xlink}type", "simple"),
("{http://www.w3.org/1999/xlink}href", href)),
beforeSibling=firstLinkElt)
lbDoc.linkbaseDiscover(lbElt)
if extUnrecognizedRoles:
modelXbrl.error("loadFromExcel:undefinedReferenceRole",
"Reference roles not defined: %(undefinedRoles)s",
modelXbrl=modelXbrl, undefinedRoles=",".join(sorted(extUnrecognizedRoles)))
extUnrecognizedRoles.clear()
prefixedNamespaces = modelXbrl.prefixedNamespaces
def hrefConcept(prefix, name):
qn = qname(prefixedNamespaces[prefix], name)
if qn in modelXbrl.qnameConcepts:
return modelXbrl.qnameConcepts[qn]
elif name in modelXbrl.nameConcepts: # prefix may be null or ambiguous to multiple documents, try concept local name
return modelXbrl.nameConcepts[name][0]
if prefix not in prefixedNamespaces:
modelXbrl.error("loadFromExcel:undefinedRelationshipElementPrefix",
"Prefix not defined: %(prefix)s",
modelXbrl=modelXbrl, prefix=prefix)
return None
modelXbrl.error("loadFromExcel:undefinedRelationshipElement",
"QName not defined: %(prefix)s:%(localName)s",
modelXbrl=modelXbrl, prefix=prefix, localName=name)
return None
def prefixedNameQName(prefixedName):
if ":" not in prefixedName:
return prefixedName
prefix, _sep, name = prefixedName.rpartition(":")
if prefix not in prefixedNamespaces:
modelXbrl.error("loadFromExcel:undefinedRelationshipAttributePrefix",
"Prefix not defined: %(prefix)s",
modelXbrl=modelXbrl, prefix=prefix)
return prefixedName
return QName(prefix, prefixedNamespaces[prefix], name)
def lbTreeWalk(lbType, parentElt, lbStruct, roleRefs, dimDef=False, locs=None, arcsFromTo=None, fromPrefix=None, fromName=None):
order = 1.0
for lbEntry in lbStruct:
if lbEntry.isELR:
if not lbEntry.childStruct: # skip empty ELRs
continue
role = "unspecified"
if lbEntry.role and lbEntry.role.startswith("http://"): # have a role specified
role = lbEntry.role
elif lbEntry.name: #may be a definition
for linkroleUri, modelRoleTypes in modelXbrl.roleTypes.items():
definition = modelRoleTypes[0].definition
if lbEntry.name == definition and linkroleUri in thisDoc.extensionRoles:
role = linkroleUri
break
if role == "unspecified":
# don't generate for roles not for this schema
continue
#
#modelXbrl.error("loadFromExcel:linkRoleDefinition",
# "Link role has no definition: %(role)s",
# modelXbrl=modelXbrl, role=lbEntry.name, filename=thisDoc.extensionSchemaNamespaceURI)
if role not in thisDoc.extensionRoles:
# don't generate for roles not for this schema
continue
if role == XbrlConst.defaultLinkRole:
pass
elif role in thisDoc.extensionRoles:
roleRefs.add(("roleRef", role, doc.uri + "#roleType_" + role.rpartition("/")[2]))
elif role in modelXbrl.roleTypes: # add roleRef
roleType = modelRoleTypes[0]
roleRefs.add(("roleRef", role, roleType.modelDocument.uri + "#" + roleType.id))
else:
extUnrecognizedRoles.add(role)
linkElt = XmlUtil.addChild(parentElt,
XbrlConst.gen if lbType == "generic" else XbrlConst.link,
"link" if lbType == "generic" else lbType + "Link",
attributes=(("{http://www.w3.org/1999/xlink}type", "extended"),
("{http://www.w3.org/1999/xlink}role", role)))
locs = set()
arcsFromTo = set()
lbTreeWalk(lbType, linkElt, lbEntry.childStruct, roleRefs, dimDef, locs, arcsFromTo)
else:
toPrefix = lbEntry.prefix
toName = lbEntry.name
toHref = LBHref(thisDoc, toPrefix, toName)
if toHref is None:
modelXbrl.error("loadFromExcel:invalidQName",
"%(linkbase)s relationship element with prefix '%(prefix)s' localName '%(localName)s' not found",
modelXbrl=modelXbrl, linkbase=lbType, prefix=lbEntry.prefix, localName=lbEntry.name)
continue
if not toPrefix and toName in modelXbrl.nameConcepts:
toPrefix = modelXbrl.nameConcepts[toName][0].qname.prefix
toLabel = "{}_{}".format(toPrefix, toName)
toLabelAlt = None
if not lbEntry.isRoot:
if not fromPrefix and fromName in modelXbrl.nameConcepts:
fromPrefix = modelXbrl.nameConcepts[fromName][0].qname.prefix
fromLabel = "{}_{}".format(fromPrefix, fromName)
if (fromLabel, toLabel) in arcsFromTo:
# need extra loc to prevent arc from/to duplication in ELR
for i in range(1, 1000):
toLabelAlt = "{}_{}".format(toLabel, i)
if (fromLabel, toLabelAlt) not in arcsFromTo:
toLabel = toLabelAlt
break
if (toHref not in locs or toLabelAlt) and not dimDef:
XmlUtil.addChild(parentElt,
XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", toHref),
("{http://www.w3.org/1999/xlink}label", toLabel)))
locs.add(toHref)
if not lbEntry.isRoot:
arcsFromTo.add( (fromLabel, toLabel) )
if lbType == "calculation" and lbEntry.weight is not None:
otherAttrs = ( ("weight", lbEntry.weight), )
elif lbType == "presentation" and lbEntry.role:
if not lbEntry.role.startswith("http://"):
# check if any defined labels for this role
_labelRoleMatchPart = "/" + lbEntry.role
for _roleURI in modelXbrl.roleTypes:
if _roleURI.endswith(_labelRoleMatchPart):
for _roleType in modelXbrl.roleTypes[_roleURI]:
if XbrlConst.qnLinkLabel in _roleType.usedOns:
lbEntry.role = _roleURI
break
if not lbEntry.role.startswith("http://"):
# default to built in label roles
lbEntry.role = "http://www.xbrl.org/2003/role/" + lbEntry.role
otherAttrs = ( ("preferredLabel", lbEntry.role), )
if lbEntry.role and lbEntry.role not in XbrlConst.standardLabelRoles:
if lbEntry.role in modelXbrl.roleTypes:
roleType = modelXbrl.roleTypes[lbEntry.role][0]
roleRefs.add(("roleRef", lbEntry.role, roleType.modelDocument.uri + "#" + roleType.id))
else:
extUnrecognizedRoles.add(lbEntry.role)
elif lbType == "generic" and lbEntry.arcrole:
if not lbEntry.arcrole.startswith("http://"):
# check if any defined labels for this role
for _arcroleURI in modelXbrl.arcroleTypes:
if _arcroleURI.endswith(lbEntry.arcrole):
lbEntry.arcrole = _arcroleURI
break
otherAttrs = tuple( (prefixedNameQName(_key), _value) # may need to process qname in key into clark name
for _key, _value in (lbEntry.relAttrs.items() if lbEntry.relAttrs is not None else ()))
else:
otherAttrs = ( )
if lbEntry.arcrole == "_dimensions_": # pick proper consecutive arcrole
fromConcept = hrefConcept(fromPrefix, fromName)
toConcept = hrefConcept(toPrefix, toName)
if dimDef: # special case for default dimension
if lbEntry.role != "_dimensionDefault_" and not lbTreeHasDimDefault(lbEntry.childStruct):
continue # forget subtree, no default
if toConcept is not None and (toConcept.isDimensionItem or lbEntry.role == "_dimensionDefault_"):
if (toHref not in locs or toLabelAlt):
XmlUtil.addChild(parentElt,
XbrlConst.link, "loc",
attributes=(("{http://www.w3.org/1999/xlink}type", "locator"),
("{http://www.w3.org/1999/xlink}href", toHref),
("{http://www.w3.org/1999/xlink}label", toLabel)))
locs.add(toHref)
if lbEntry.role != "_dimensionDefault_":
lbTreeWalk(lbType, parentElt, lbEntry.childStruct, roleRefs, dimDef, locs, arcsFromTo, toPrefix, toName)
else:
XmlUtil.addChild(parentElt, XbrlConst.link, "definitionArc",
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", XbrlConst.dimensionDefault),
("{http://www.w3.org/1999/xlink}from", fromLabel),
("{http://www.w3.org/1999/xlink}to", toLabel),
("order", order)) + otherAttrs )
order += 1.0
else:
lbTreeWalk(lbType, parentElt, lbEntry.childStruct, roleRefs, dimDef, locs, arcsFromTo, fromPrefix, fromName)
continue
elif toConcept is not None and toConcept.isHypercubeItem:
arcrole = XbrlConst.all
otherAttrs += ( (XbrlConst.qnXbrldtContextElement, "segment"),
(qnXbrldtClosed, "true") )
elif toConcept is not None and toConcept.isDimensionItem:
arcrole = XbrlConst.hypercubeDimension
elif fromConcept is not None and fromConcept.isDimensionItem:
arcrole = XbrlConst.dimensionDomain
else:
arcrole = XbrlConst.domainMember
else:
arcrole = lbEntry.arcrole
if arcrole in relationshipArcqnames:
arcqname = relationshipArcqnames[arcrole]
arcNS = arcqname.namespaceURI
arcLocalname = arcqname.localName
elif lbType == "generic":
arcNS = XbrlConst.gen
arcLocalname = "arc"
else:
arcNS = XbrlConst.link
arcLocalname = lbType + "Arc"
XmlUtil.addChild(parentElt,
arcNS, arcLocalname,
attributes=(("{http://www.w3.org/1999/xlink}type", "arc"),
("{http://www.w3.org/1999/xlink}arcrole", arcrole),
("{http://www.w3.org/1999/xlink}from", fromLabel),
("{http://www.w3.org/1999/xlink}to", toLabel),
("order", order)) + otherAttrs )
order += 1.0
if lbType != "calculation" or lbEntry.isRoot:
lbTreeWalk(lbType, parentElt, lbEntry.childStruct, roleRefs, dimDef, locs, arcsFromTo, toPrefix, toName)
def lbTreeHasDimDefault(lbStruct):
for lbEntry in lbStruct:
if lbEntry.isELR:
if not lbEntry.childStruct:
continue
if lbTreeHasDimDefault(lbEntry.childStruct):
return True
else:
if not lbEntry.isRoot and (lbEntry.arcrole == "_dimensions_" and lbEntry.role == "_dimensionDefault_"):
return True
if lbTreeHasDimDefault(lbEntry.childStruct):
return True
return False
for hasLB, lbType, lbLB in ((hasPreLB and thisDoc.hasPreLB, "presentation", preLB),
(hasDefLB and thisDoc.hasDefLB, "definition", defLB),
(hasCalLB and thisDoc.hasCalLB, "calculation", calLB),
(hasGenLB and thisDoc.hasGenLB, "generic", genLB)):
if hasLB:
for lbRefType, filename, generate in thisDoc.linkbaseRefs:
thisDoc.thisLBdir = posixpath.dirname(filename)
if generate and lbType == lbRefType:
# output presentation linkbase
lbDoc = ModelDocument.create(modelXbrl, ModelDocument.Type.LINKBASE, filename, base='', initialXml="""
<linkbase
xmlns="http://www.xbrl.org/2003/linkbase"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xbrli="http://www.xbrl.org/2003/instance"{}
xsi:schemaLocation="http://www.xbrl.org/2003/linkbase
http://www.xbrl.org/2003/xbrl-linkbase-2003-12-31.xsd{}"
/>
""".format("""
xmlns:generic="http://xbrl.org/2008/generic"
""" if lbType == "generic" else "",
"""
http://xbrl.org/2008/generic http://www.xbrl.org/2008/generic-link.xsd
""" if lbType == "generic" else ""
),
initialComment=thisDoc.initialComment)
lbDoc.inDTS = True
lbDoc.loadedFromExcel = True
addLinkbaseRef(lbRefType, filename, lbDoc)
lbElt = lbDoc.xmlRootElement
roleRefs = set()
if lbType == "definition":
roleRefs.update((("arcroleRef", XbrlConst.all, "http://www.xbrl.org/2005/xbrldt-2005.xsd#all"),
("arcroleRef", XbrlConst.dimensionDefault, "http://www.xbrl.org/2005/xbrldt-2005.xsd#dimension-default"),
("arcroleRef", XbrlConst.dimensionDomain, "http://www.xbrl.org/2005/xbrldt-2005.xsd#dimension-domain"),
("arcroleRef", XbrlConst.domainMember, "http://www.xbrl.org/2005/xbrldt-2005.xsd#domain-member"),
("arcroleRef", XbrlConst.hypercubeDimension, "http://www.xbrl.org/2005/xbrldt-2005.xsd#hypercube-dimension")))
elif lbType == "generic":
for _arcroleURI in relationshipArcroles.values():
for _arcroleType in modelXbrl.arcroleTypes[_arcroleURI]:
roleRefs.add(("arcroleRef", _arcroleURI, _arcroleType.modelDocument.uri + "#" + _arcroleType.id))
break
lbTreeWalk(lbType, lbElt, lbLB, roleRefs)
if lbType == "definition" and lbTreeHasDimDefault(lbLB):
lbTreeWalk(lbType, lbElt, lbLB, roleRefs, dimDef=True) # second tree walk for any dimension-defaults
firstLinkElt = None
for firstLinkElt in lbElt.iterchildren():
break
# add arcrole references
for roleref, roleURI, href in roleRefs:
XmlUtil.addChild(lbElt,
link, roleref,
attributes=(("arcroleURI" if roleref == "arcroleRef" else "roleURI", roleURI),
("{http://www.w3.org/1999/xlink}type", "simple"),
("{http://www.w3.org/1999/xlink}href",
docRelpath(href, thisDoc.thisLBdir))),
beforeSibling=firstLinkElt)
lbDoc.linkbaseDiscover(lbElt)
break
if extUnrecognizedRoles:
modelXbrl.error("loadFromExcel:undefinedRole",
"%(lbType)s linkbase roles not defined: %(undefinedRoles)s",
modelXbrl=modelXbrl, lbType=lbType, undefinedRoles=",".join(sorted(extUnrecognizedRoles)))
extUnrecognizedRoles.clear()
visitedDocNames.pop()
def LBHref(thisDoc, prefix, name):
if not prefix and name in modelXbrl.nameConcepts:
_concept = modelXbrl.nameConcepts[name][0]
filename = _concept.modelDocument.uri
prefix = _concept.qname.prefix
elif prefix == thisDoc.extensionSchemaPrefix:
filename = thisDoc.extensionSchemaFilename
elif prefix in thisDoc.importFilenames:
filename = thisDoc.importFilenames[prefix]
elif prefix in genDocs:
doc = genDocs[prefix]
if not doc.generated:
# try to load recursively
generateDoc(doc, thisDoc)
if doc.generated:
filename = doc.extensionSchemaFilename
else:
return None
elif name in modelXbrl.nameConcepts:
filename = None
for _concept in modelXbrl.nameConcepts[name]:
if prefix == _concept.qname.prefix:
filename = _concept.modelDocument.uri
break
if not filename:
return None
else:
return None
return "{0}#{1}_{2}".format(docRelpath(filename, thisDoc.thisLBdir), prefix, name)
for thisDoc in genOrder:
if not thisDoc.generated:
generateDoc(thisDoc, None, [])
#cntlr.addToLog("Completed in {0:.2} secs".format(time.time() - startedAt),
# messageCode="loadFromExcel:info")
if priorCWD:
os.chdir(priorCWD) # restore prior current working directory
return modelXbrl.modelDocument
def isExcelPath(filepath):
return os.path.splitext(filepath)[1] in (".xlsx", ".xls", ".xlsm")
def isExcelLoadable(modelXbrl, mappedUri, normalizedUri, filepath, **kwargs):
return isExcelPath(filepath)
def excelLoaderFilingStart(cntlr, options, filesource, entrypointFiles, *args, **kwargs):
global excludeDesignatedEnumerations, annotateEnumerationsDocumentation, annotateElementDocumentation, saveXmlLang
excludeDesignatedEnumerations = options.ensure_value("excludeDesignatedEnumerations", False)
annotateEnumerationsDocumentation = options.ensure_value("annotateEnumerationsDocumentation", False)
annotateElementDocumentation = options.ensure_value("annotateElementDocumentation", False)
saveXmlLang = options.ensure_value("saveLang", None)
def excelLoader(modelXbrl, mappedUri, filepath, *args, **kwargs):
if not isExcelLoadable(modelXbrl, mappedUri, None, filepath):
return None # not an OIM file
cntlr = modelXbrl.modelManager.cntlr
cntlr.showStatus(_("Loading Excel file: {0}").format(os.path.basename(filepath)))
doc = loadFromExcel(cntlr, modelXbrl, filepath, mappedUri)
if doc is None:
return None # not an OIM file
modelXbrl.loadedFromExcel = True
return doc
def saveDts(cntlr, modelXbrl, outputDtsDir):
from arelle import ModelDocument
import shutil
excelFileDir = os.path.dirname(modelXbrl.fileSource.url)
def saveToFile(url):
if os.path.isabs(url):
return url
filepath = os.path.join(outputDtsDir, url)
os.makedirs(os.path.dirname(filepath), exist_ok=True)
return filepath
# save generated schema and their linkbases
for doc in modelXbrl.urlDocs.values():
if getattr(doc, "loadedFromExcel", False):
doc.save(saveToFile(doc.uri), updateFileHistory=False)
cntlr.showStatus(_("Saving XBRL DTS: {0}").format(os.path.basename(doc.uri)))
for refDoc in doc.referencesDocument.keys():
if refDoc.inDTS:
if refDoc.type == ModelDocument.Type.LINKBASE:
cntlr.showStatus(_("Saving XBRL DTS: {0}").format(os.path.basename(refDoc.uri)))
refDoc.save(saveToFile(refDoc.uri), updateFileHistory=False)
elif not (UrlUtil.isAbsolute(doc.uri) or os.path.isabs(doc.uri) or outputDtsDir == excelFileDir):
srcfile = os.path.join(excelFileDir, doc.uri)
destfile = saveToFile(doc.uri)
if os.path.exists(srcfile):
if not os.path.exists(destfile):
shutil.copyfile(srcfile, destfile)
else:
modelXbrl.error("loadFromExcel:missingReference",
"Missing source file to copy to output DTS directory: %(missingFile)s",
modelXbrl=modelXbrl, missingFile=doc.uri)
def guiXbrlLoaded(cntlr, modelXbrl, attach, *args, **kwargs):
if cntlr.hasGui and getattr(modelXbrl, "loadedFromExcel", False):
from tkinter.filedialog import askdirectory
outputDtsDir = askdirectory(parent=cntlr.parent,
initialdir=cntlr.config.setdefault("outputDtsDir","."),
title='Please select a directory for output DTS Contents')
cntlr.config["outputDtsDir"] = outputDtsDir
cntlr.saveConfig()
if outputDtsDir:
saveDts(cntlr, modelXbrl, outputDtsDir)
cntlr.showStatus(_("Excel loading completed"), 3500)
def cmdLineXbrlLoaded(cntlr, options, modelXbrl, *args, **kwargs):
if options.saveExcelDTSdirectory and getattr(modelXbrl, "loadedFromExcel", False):
saveDts(cntlr, modelXbrl, options.saveExcelDTSdirectory)
def excelLoaderOptionExtender(parser, *args, **kwargs):
parser.add_option("--save-Excel-DTS-directory",
action="store",
dest="saveExcelDTSdirectory",
help=_("Save a DTS loaded from Excel into this directory."))
parser.add_option("--exclude-designated-enumerations",
action="store_true",
dest="excludeDesignatedEnumerations",
help=_("Save a DTS loaded from Excel into this directory."))
parser.add_option("--annotate-enumerations-documentation",
action="store_true",
dest="annotateEnumerationsDocumentation",
help=_("Save a DTS loaded from Excel into this directory."))
parser.add_option("--annotate-element-documentation",
action="store_true",
dest="annotateElementDocumentation",
help=_("Save a DTS loaded from Excel into this directory."))
parser.add_option("--save-lang",
action="store",
dest="saveLang",
help=_("Save an xml:lang on top level elements (schema, linkbase)."))
class LBentry:
__slots__ = ("prefix", "name", "arcrole", "role", "childStruct", "preferredLabel", "relAttrs")
def __init__(self, prefix=None, name=None, arcrole=None, role=None, weight=None,
isELR=False, isRoot=False, childStruct=None, preferredLabel=None, relAttrs=None):
if childStruct is not None:
self.childStruct = childStruct
else:
self.childStruct = []
self.prefix = prefix
self.name = name
if isELR:
self.arcrole = "_ELR_"
elif isRoot:
self.arcrole = "_root_"
else:
self.arcrole = arcrole
if weight is not None: # summationItem
self.role = weight
else:
self.role = role # resource role, or "default" if conept is a default dimension
self.preferredLabel = preferredLabel
self.relAttrs = relAttrs
@property
def isELR(self):
return self.arcrole == "_ELR_"
@property
def isRoot(self):
return self.arcrole == "_root_"
@property
def weight(self):
if self.arcrole == summationItem:
return self.role
return None
def __repr__(self):
return "LBentry(prefix={},name={})".format(self.prefix,self.name)
__pluginInfo__ = {
'name': 'Load From Excel',
'version': '1.02',
'description': "This plug-in loads XBRL from Excel and saves the resulting XBRL DTS.",
'license': 'Apache-2',
'author': 'Mark V Systems Limited',
'copyright': '(c) Copyright 2013-2017 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'ModelDocument.IsPullLoadable': isExcelLoadable,
'ModelDocument.PullLoader': excelLoader,
'CntlrWinMain.Xbrl.Loaded': guiXbrlLoaded,
'CntlrCmdLine.Filing.Start': excelLoaderFilingStart,
'CntlrCmdLine.Options': excelLoaderOptionExtender,
'CntlrCmdLine.Xbrl.Loaded': cmdLineXbrlLoaded
}
| 61.984917 | 187 | 0.503301 | [
"Apache-2.0"
] | CapoeiraShaolin1/Arelle | arelle/plugin/loadFromExcel.py | 123,514 | Python |
import argparse
import colorama
import json
import os
import time
from string import Template
import modules
from modules import site_config
from modules import util
# argument defaults and options for the CLI
module_choices = ['clean', 'stix_data', 'groups', 'search', 'matrices', 'mitigations', 'software', 'tactics', 'techniques', 'tour', 'website_build', 'random_page', 'subdirectory', 'tests']
extras = ['resources', 'versions', 'contribute', 'blog', 'attack_redirections']
test_choices = ['size', 'links', 'external_links', 'citations']
def validate_subdirectory_string(subdirectory_str):
""" Validate subdirectory string """
if not subdirectory_str.isascii():
raise argparse.ArgumentTypeError("%s contains non ascii characters" % subdirectory_str)
# Remove leading and trailing /
if subdirectory_str.startswith("/"):
subdirectory_str = subdirectory_str[1:]
if subdirectory_str.endswith("/"):
subdirectory_str = subdirectory_str[:-1]
site_config.set_subdirectory(subdirectory_str)
return subdirectory_str
def get_parsed_args():
"""Create argument parser and parse arguments"""
parser = argparse.ArgumentParser(description=("Build the ATT&CK website.\n"
"All flags are optional. If you run the build without flags, "
"the modules that pertain to the ATT&CK dataset will be ran. "
"If you would like to run extra modules, opt-in these modules with the"
"--extras flag."))
parser.add_argument('--refresh', '-r', action='store_true',
help='Pull down the current STIX data from the MITRE/CTI GitHub respository')
parser.add_argument('--no-stix-link-replacement', action='store_true',
help="If this flag is absent, links to attack.mitre.org/[page] in the STIX data will be replaced with /[page]. Add this flag to preserve links to attack.mitre.org.")
parser.add_argument('--modules', '-m', nargs='+',
type=str,
choices=module_choices,
help=("Run specific modules by selecting from the "
"list and leaving one space in "
"between them. For example: '-m clean techniques tactics'."
"Will run all the modules if flag is not called, or selected "
"without arguments."))
parser.add_argument('--extras', '-e', nargs='*',
type=str,
choices=extras,
help=("Run extra modules that do not pertain to the ATT&CK dataset. "
"Select from the list and leaving one space in "
"between them. For example: '-m resources blog'.\n"
"These modules will only run if the user adds this flag. "
"Calling this flag without arguments will select all the extra modules."))
parser.add_argument('--test', '-t', nargs='+',
choices=test_choices,
dest="tests",
help="Run specific tests by selecting from the list and leaving "
"one space in between them. For example: '-t output links'. "
"Tests: "
"size (size of output directory against github pages limit); "
"links (dead internal hyperlinks and relative hyperlinks); "
"external_links (dead external hyperlinks); "
"citations (unparsed citation text).")
parser.add_argument('--attack-brand', action='store_true',
help="Applies ATT&CK brand colors. See also the --extras flag.")
parser.add_argument('--proxy', help="set proxy")
parser.add_argument('--subdirectory',
help="If you intend to host the site from a sub-directory, specify the directory using this flag.",
type=validate_subdirectory_string)
parser.add_argument("--print-tests",
dest="print_tests",
action="store_true",
help="Force test output to print to stdout even if the results are very long.")
parser.add_argument("--no-test-exitstatus",
dest="override_exit_status",
action='store_true',
help="Forces application to exit with success status codes even if tests fail.")
args = parser.parse_args()
# If modules is empty, means all modules will be ran
if not args.modules:
args.modules = module_choices
# If the extras flag was called without params, set to all
if not args.extras and isinstance(args.extras, list):
args.extras = extras
# Set global argument list for modules
site_config.args = args
return args
def remove_from_build(arg_modules, arg_extras):
""" Given a list of modules from command line, remove modules that appear in module
directory that are not in list.
"""
def remove_from_running_pool():
""" Remove modules from running pool if they are not in modules list from argument """
copy_of_modules = []
for module in modules.run_ptr:
if module["name"].lower() in arg_modules:
copy_of_modules.append(module)
modules.run_ptr = copy_of_modules
def remove_from_menu():
""" Remove modules from menu if they are not in modules list from argument """
copy_of_menu = []
for module in modules.menu_ptr:
if module["name"].lower() in arg_modules:
copy_of_menu.append(module)
modules.menu_ptr = copy_of_menu
# Only add extra modules if argument flag was used
if arg_extras:
arg_modules = arg_modules + arg_extras
remove_from_running_pool()
remove_from_menu()
if __name__ == "__main__":
"""Beginning of ATT&CK update module"""
# Get args
args = get_parsed_args()
# Remove modules from build
remove_from_build(args.modules, args.extras)
# Arguments used for pelican
site_config.send_to_pelican("no_stix_link_replacement", args.no_stix_link_replacement)
# Start time of update
update_start = time.time()
# Init colorama for output
colorama.init()
# Get running modules and priorities
for ptr in modules.run_ptr:
util.buildhelpers.print_start(ptr['name'])
start_time = time.time()
ptr['run_module']()
end_time = time.time()
util.buildhelpers.print_end(ptr['name'], start_time, end_time)
# Print end of module
update_end = time.time()
util.buildhelpers.print_end("TOTAL Update Time", update_start, update_end)
| 43.319018 | 189 | 0.596233 | [
"Apache-2.0"
] | Alexander-RB/attack-website | update-attack.py | 7,061 | Python |
#!/usr/bin/env python3
#
# main.py
#
# Specific command-line utility for Mellanox platform
#
try:
import sys
import subprocess
import click
import xml.etree.ElementTree as ET
from sonic_py_common import device_info
except ImportError as e:
raise ImportError("%s - required module not found" % str(e))
ENV_VARIABLE_SX_SNIFFER = 'SX_SNIFFER_ENABLE'
CONTAINER_NAME = 'syncd'
SNIFFER_CONF_FILE = '/etc/supervisor/conf.d/mlnx_sniffer.conf'
SNIFFER_CONF_FILE_IN_CONTAINER = CONTAINER_NAME + ':' + SNIFFER_CONF_FILE
TMP_SNIFFER_CONF_FILE = '/tmp/tmp.conf'
HWSKU_PATH = '/usr/share/sonic/hwsku/'
SAI_PROFILE_DELIMITER = '='
# run command
def run_command(command, display_cmd=False, ignore_error=False, print_to_console=True):
"""Run bash command and print output to stdout
"""
if display_cmd == True:
click.echo(click.style("Running command: ", fg='cyan') + click.style(command, fg='green'))
proc = subprocess.Popen(command, shell=True, text=True, stdout=subprocess.PIPE)
(out, err) = proc.communicate()
if len(out) > 0 and print_to_console:
click.echo(out)
if proc.returncode != 0 and not ignore_error:
sys.exit(proc.returncode)
return out, err
# 'mlnx' group
@click.group()
def mlnx():
""" Show Mellanox platform information """
pass
# get current status of sniffer from conf file
def sniffer_status_get(env_variable_name):
enabled = False
command = "docker exec {} bash -c 'touch {}'".format(CONTAINER_NAME, SNIFFER_CONF_FILE)
run_command(command)
command = 'docker cp {} {}'.format(SNIFFER_CONF_FILE_IN_CONTAINER, TMP_SNIFFER_CONF_FILE)
run_command(command)
conf_file = open(TMP_SNIFFER_CONF_FILE, 'r')
for env_variable_string in conf_file:
if env_variable_string.find(env_variable_name) >= 0:
enabled = True
break
conf_file.close()
command = 'rm -rf {}'.format(TMP_SNIFFER_CONF_FILE)
run_command(command)
return enabled
def is_issu_status_enabled():
""" This function parses the SAI XML profile used for mlnx to
get whether ISSU is enabled or disabled
@return: True/False
"""
# ISSU disabled if node in XML config wasn't found
issu_enabled = False
# Get the SAI XML path from sai.profile
sai_profile_path = '/{}/sai.profile'.format(HWSKU_PATH)
DOCKER_CAT_COMMAND = 'docker exec {container_name} cat {path}'
command = DOCKER_CAT_COMMAND.format(container_name=CONTAINER_NAME, path=sai_profile_path)
sai_profile_content, _ = run_command(command, print_to_console=False)
sai_profile_kvs = {}
for line in sai_profile_content.split('\n'):
if not SAI_PROFILE_DELIMITER in line:
continue
key, value = line.split(SAI_PROFILE_DELIMITER)
sai_profile_kvs[key] = value.strip()
try:
sai_xml_path = sai_profile_kvs['SAI_INIT_CONFIG_FILE']
except KeyError:
click.echo("Failed to get SAI XML from sai profile", err=True)
sys.exit(1)
# Get ISSU from SAI XML
command = DOCKER_CAT_COMMAND.format(container_name=CONTAINER_NAME, path=sai_xml_path)
sai_xml_content, _ = run_command(command, print_to_console=False)
try:
root = ET.fromstring(sai_xml_content)
except ET.ParseError:
click.echo("Failed to parse SAI xml", err=True)
sys.exit(1)
el = root.find('platform_info').find('issu-enabled')
if el is not None:
issu_enabled = int(el.text) == 1
return issu_enabled
@mlnx.command('sniffer')
def sniffer_status():
""" Show sniffer status """
components = ['sdk']
env_variable_strings = [ENV_VARIABLE_SX_SNIFFER]
for index in range(len(components)):
enabled = sniffer_status_get(env_variable_strings[index])
if enabled is True:
click.echo(components[index] + " sniffer is enabled")
else:
click.echo(components[index] + " sniffer is disabled")
@mlnx.command('issu')
def issu_status():
""" Show ISSU status """
res = is_issu_status_enabled()
click.echo('ISSU is enabled' if res else 'ISSU is disabled')
def register(cli):
version_info = device_info.get_sonic_version_info()
if (version_info and version_info.get('asic_type') == 'mellanox'):
cli.commands['platform'].add_command(mlnx)
| 29.643836 | 98 | 0.69085 | [
"Apache-2.0"
] | AshokDaparthi/sonic-utilities | show/plugins/mlnx.py | 4,328 | Python |
# 右侧加法和原处加法: __radd__和__iadd__
"""
__add__并不支持+运算符右侧使用实例对象。要实现一并编写__radd__方法。
只有当+右侧的对象是实例,而左边对象不是类实例时,Python才会调用__radd++,
在其他情况下则是由左侧对象调用__add__方法。
"""
class Commuter:
def __init__(self, val):
self.val = val
def __add__(self, other):
# 如果没有instance测试,当两个实例相加并且__add__触发
# __radd__的时候,我们最终得到一个Commuter,其val是另一个Commuter
if isinstance(other, Commuter): other = other.val
print("add")
return self.val + other
def __radd__(self, other):
print("radd")
# 注意和__add__顺序不一样
return other + self.val
# 原处加法 编写__iadd__或__add__如果前者空缺使用后者
class Number:
def __init__(self, val):
self.val = val
def __add__(self, other):
return Number(self.val + other)
x = Commuter(89)
y = Commuter(99)
print(x + 1)
print(x + y)
X = Number(5)
X += 1
X += 1
print(X.val) | 19.454545 | 57 | 0.653037 | [
"Apache-2.0"
] | xuguoliang1995/leetCodePython | python_know/normal/demo9_7.py | 1,142 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =========================================================================
# Author Eduard Kabrinskyi <[email protected]> Skype: [email protected]
# =========================================================================
# =========================
# Main APP definitions
# =========================
import logging
import os
import requests
from lxml import html
import time
from random import choice
# =========================
# Database APP definitions
# =========================
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine, Table, Column, Integer, String, MetaData, ForeignKey
from sqlalchemy.orm import Session
from sqlalchemy import func
# =========================
# Set Logging
# =========================
logging.basicConfig(format='%(asctime)s %(levelname)-7s %(module)s.%(funcName)s - %(message)s')
logging.getLogger().setLevel(logging.INFO)
logging.disable(logging.NOTSET)
logging.info('Loading %s', __name__)
# =========================
# Database Class
# =========================
Base = declarative_base()
class OrgTable(Base):
__tablename__ = 'organization'
id = Column(Integer, primary_key=True)
name = Column(String(2000))
inn = Column(Integer)
address = Column(String(2000))
def __init__(self, name, inn, address):
self.name = name
self.inn = inn
self.address = address
def __repr__(self):
return "<Data %s, %s>" % (self.name, self.innm, self.address)
# =========================
# Spider Class
# =========================
class Busgov(object):
def __init__(self):
basename = 'database.db'
self.engine = create_engine("sqlite:///%s" % basename, echo=False)
if not os.path.exists(basename):
Base.metadata.create_all(self.engine)
f = open('page.txt', 'r')
self.start = int(f.read())
f.close()
self.last_page = set()
def get_count_items(self):
self.session = Session(bind=self.engine)
items = self.session.query(func.count(OrgTable.id)).scalar()
self.session.close()
return logging.info('Now Database items count: %s' %items)
def get_pages(self, stop):
try:
for page in range(self.start, stop):
logging.info('Crawl page: %s' % (page))
page_text = get_page('http://bus.gov.ru/public/agency/choose.html?d-442831-p=' + str(page))
tree = html.fromstring(page_text)
org_list = tree.xpath('//table[@id="resultTable"]/tbody/tr[*]')
x=1
for org in org_list:
name = tree.xpath('//table[@id="resultTable"]/tbody/tr[' + str(x) + ']/td[2]/text()')[0].strip('\n ')
inn = tree.xpath('//table[@id="resultTable"]/tbody/tr['+str(x)+']/td[3]/text()')[0]
address = tree.xpath('//table[@id="resultTable"]/tbody/tr['+str(x)+']/td[4]/text()')[0].strip('\n ')
item = {'name': name, 'inn': inn, 'address': address}
x+=1
self.processed(item=item, page=page)
f = open('page.txt', 'w')
f.write(str(page))
f.close()
else:
raise logging.error('Stop Crawl last page: %' % page)
except Exception as e:
logging.error(e.message)
def processed(self, item, page):
self.session = Session(bind=self.engine)
#print item['name']
ot = OrgTable(item['name'], item['inn'], item['address'])
self.session.add(ot)
self.session.commit()
self.session.close()
# =========================
# Helper functions
# =========================
from requests.auth import HTTPDigestAuth, HTTPBasicAuth
proxies = {"http": (choice(list(open('proxy.txt')))).strip('\n')}
def get_request(page,proxies):
try:
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'
}
r = requests.get(page, headers=headers, proxies=proxies, timeout=10.0)
return r
except:
class r(object):
status_code = None
return r
pass
def get_page(page):
proxy_status = False
sleep_time = (1)
while proxy_status == False:
time.sleep(sleep_time)
logging.info("Set proxy: %s" %proxies["http"])
r = get_request(page=page,proxies=proxies)
if r.status_code == 200:
proxy_status = True
logging.info('Proxy UP: %s ' % proxies['http'])
else:
logging.info('Proxy DOWN: %s ' % proxies['http'])
global proxies
proxies = {"http": (choice(list(open('proxy.txt')))).strip('\n')}
return r.text
# =========================
# bg.get_pages(xxxx) количество страниц всего
# в файле page.txt текущая страница с которой стартовать
# =========================
if __name__ == "__main__":
bg = Busgov()
bg.get_count_items()
bg.get_pages(22278)
| 34.412162 | 122 | 0.531317 | [
"Apache-2.0"
] | edroot/busgov_spider | spider.py | 5,154 | Python |
from .base import *
import os
# how many data points are enough to calculate confidence?
MINIMUM_SAMPLE_SIZE = 3
# original phrase is good enough for export
TRANSCRIPT_PHRASE_POSITIVE_CONFIDENCE_LIMIT = .51
# original phrase needs correction
TRANSCRIPT_PHRASE_NEGATIVE_CONFIDENCE_LIMIT = -.51
# correction is good enough to award points and export data
TRANSCRIPT_PHRASE_CORRECTION_LOWER_LIMIT = .51
# correction no longer needs votes and can replace original phrase
TRANSCRIPT_PHRASE_CORRECTION_UPPER_LIMIT = .66
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = True
LOG_DIRECTORY = '/home/wgbh/logs'
STATIC_ROOT = '/home/wgbh/webroot/static'
ALLOWED_HOSTS = [
'mlagame-dev.wgbhdigital.org', 'mlagame.wgbhdigital.org',
'fixit.wgbhdigital.org',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': 'localhost',
'NAME': 'mla',
'USER': 'mla',
'PASSWORD': os.environ['PG_PASS'],
'TEST': {
'NAME': 'mla-test',
},
},
}
GA_CODE = 'null'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': '{}/django.log'.format(LOG_DIRECTORY),
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
},
}
| 23.190476 | 66 | 0.615332 | [
"MIT"
] | amazingwebdev/django-FixIt | mla_game/settings/stage.py | 1,461 | Python |
import time
import threading
import subprocess
import helpers
from settings import Settings
def listener():
global data_source
print("**** SIDE_THREAD ID == ", threading.get_ident())
while True:
return_data = subprocess.run([data_source.settings.loaded['localization_bin']], stdout=subprocess.PIPE)
parsed_return = helpers.parse_yaml_string(return_data.stdout.decode('utf8'))
data_source.settings.add_runtime('localization', parsed_return)
time.sleep(data_source.settings.loaded['localization_plugin_wait_time']) # Waits 1 second till the next localization check
def start_plugin(data_source_received):
global data_source
data_source = data_source_received
try:
thread = threading.Thread(target=listener)
thread.start()
except:
print("Failed to start localization plugin")
| 34.4 | 130 | 0.737209 | [
"MIT"
] | joaovitor123jv/rontext | virtual_filesystem/localization.py | 860 | Python |
import unittest
from robot.parsing import TestCaseFile
from robot.parsing.model import TestCaseTable
from robot.utils import ET, ETSource, StringIO
from robot.utils.asserts import assert_equal
def create_test_case_file():
data = TestCaseFile(source='foo.txt')
table = TestCaseTable(data)
data.testcase_table = table
table.set_header(['test case', 'some', 'and other'])
test = table.add('A test')
test.add_step(['A kw', 'an arg'])
return data
class _WriterTestCase(unittest.TestCase):
def _test_rows_are_not_split_if_there_are_headers(self, format='txt'):
output = self._add_long_step_and_save(format)
assert_equal(len(output.splitlines()), 3)
def _add_long_step_and_save(self, format):
data = create_test_case_file()
data.testcase_table.tests[0].add_step(['A kw', '1', '2', '3', '4', '6', '7', '8'])
output = StringIO()
data.save(format=format, output=output)
return output.getvalue().strip()
class TestSpaceSeparatedWriter(_WriterTestCase):
def test_end_of_line_whitespace_is_removed(self):
output = StringIO()
create_test_case_file().save(output=output)
expected = '''\
*** test case *** some and other
A test A kw an arg
'''
assert_equal(repr(expected), repr(output.getvalue()))
def test_rows_are_not_split_if_there_are_headers(self):
self._test_rows_are_not_split_if_there_are_headers()
def test_configuring_number_of_separating_spaces(self):
output = StringIO()
create_test_case_file().save(output=output, txt_separating_spaces=8)
expected = '''\
*** test case *** some and other
A test A kw an arg
'''
assert_equal(repr(expected), repr(output.getvalue()))
class TestTsvWriter(_WriterTestCase):
def test_rows_are_not_split_if_there_are_headers(self):
try:
import csv
except ImportError:
pass # csv not available on IronPython 2.7
else:
self._test_rows_are_not_split_if_there_are_headers('tsv')
class TestHtmlWriter(_WriterTestCase):
def test_rows_are_not_split_if_there_are_headers(self):
output = self._add_long_step_and_save('html')
with ETSource('\n'.join(output.splitlines()[1:])) as source:
tree = ET.parse(source)
lines = tree.findall('body/table/tr')
assert_equal(len(lines), 3)
for l in lines:
cols = l.findall('td') or l.findall('th')
assert_equal(len(cols), 9)
if __name__ == '__main__':
unittest.main()
| 31.698795 | 90 | 0.659065 | [
"ECL-2.0",
"Apache-2.0"
] | nopparat-mkw/robotframework | utest/writer/test_filewriters.py | 2,631 | Python |
import cv2.cv2 as cv2
import skimage.io as io
from skimage.transform import downscale_local_mean
import numpy as np
from model import *
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from images_to_arr import *
import pickle
import csv
def removeBackground(img_in):
Img_backless = np.copy(img_in)
Img_backless = np.subtract(np.multiply(Img_backless,1.11),0.11)
Img_backless[Img_backless < 0] = 0
return Img_backless
def newBBcoords(img_pred_Log,test_image):
# returns coordinates of the bounding box for the region with the largest area
kernel_ones = np.ones([3,3],np.uint8)
closing_Log = cv2.morphologyEx(img_pred_Log, cv2.MORPH_CLOSE, kernel_ones)
labelsLog, numLog = label(closing_Log, neighbors=8, background = 0, return_num = True)
regionsLog = regionprops(labelsLog)
areasLog = [region['area'] for region in regionsLog]
areasLogArr = np.array(areasLog)
maxIndex = np.argmax(areasLogArr)
value = labelsLog[regionsLog[maxIndex]['coords'][0][0],regionsLog[maxIndex]['coords'][0][1]]
labelsLog[labelsLog != value] = 0
labelsLog[labelsLog == value] = 1
labelsImg = np.multiply(np.array(labelsLog, np.uint8),255)
#myShowImage(labelsImg)
sizeBoxX = regionsLog[maxIndex]['bbox'][3]-regionsLog[maxIndex]['bbox'][1]
sizeBoxY = regionsLog[maxIndex]['bbox'][2]-regionsLog[maxIndex]['bbox'][0]
coordsBbox = list(regionsLog[maxIndex]['bbox'])
if sizeBoxX <= 0.5 * img_pred_Log.shape[1]:
newSizeBoxX = 0.3 / (sizeBoxX / img_pred_Log.shape[1])
coordsBbox[1] = coordsBbox[1] - sizeBoxX*(0.5*(newSizeBoxX-1))
coordsBbox[3] = coordsBbox[3] + sizeBoxX*(0.5*(newSizeBoxX-1))
if sizeBoxY <= 0.5 * img_pred_Log.shape[0]:
newSizeBoxY = 0.5 / (sizeBoxY / img_pred_Log.shape[0])
coordsBbox[0] = coordsBbox[0] - sizeBoxY*(0.5*(newSizeBoxY-1))
coordsBbox[2] = coordsBbox[2] + sizeBoxY*(0.5*(newSizeBoxY-1))
if coordsBbox[0] < 0:
coordsBbox[0] = 0
if coordsBbox[1] < 0:
coordsBbox[1] = 0
if coordsBbox[2] > test_image.shape[0]:
coordsBbox[2] = test_image.shape[0] - 1
if coordsBbox[3] > test_image.shape[1]:
coordsBbox[3] = test_image.shape[1] - 1
coordsBboxInt = [round(x) for x in coordsBbox]
return coordsBboxInt
def getLargestAreaEcentroid(img_pred_Log):
# returns mask with the regions with the largest area, coords of centroid and radius
kernel_ones = np.ones([3,3],np.uint8)
closing_Log = cv2.morphologyEx(img_pred_Log, cv2.MORPH_CLOSE, kernel_ones)
labelsLog, numLog = label(closing_Log, neighbors=8, background = 0, return_num = True)
regionsLog = regionprops(labelsLog)
areasLog = [region['area'] for region in regionsLog]
areasLogArr = np.array(areasLog)
maxIndex = np.argmax(areasLogArr)
value = labelsLog[regionsLog[maxIndex]['coords'][0][0],regionsLog[maxIndex]['coords'][0][1]]
labelsLog[labelsLog != value] = 0
labelsLog[labelsLog == value] = 1
centreCoords = np.round(regionsLog[maxIndex]['centroid'])
centreCoords = centreCoords.astype(np.uint)
radius = (regionsLog[maxIndex]['major_axis_length'] + regionsLog[maxIndex]['minor_axis_length']) / 4
colsCoord = [regionsLog[maxIndex]['bbox'][1],regionsLog[maxIndex]['bbox'][3]]
labelsArr = np.array(labelsLog)
return labelsArr, centreCoords, radius, colsCoord
image_arr = np.load('image_arr.npy')
mask_arr = np.load('mask_arr.npy')
image_arr_red_channels = np.load('image_arr_red_channels.npy')
image_arr_green_channels = np.load('image_arr_green_channels.npy')
image_arr_blue_channels = np.load('image_arr_blue_channels.npy')
entropy = np.load('entropy_arr.npy')
elips = np.load('elips_arr.npy')
vessels = np.load('vessels_arr.npy')
test_image = np.zeros(image_arr[0].shape)
test_image_mask = np.zeros(mask_arr[0].shape)
test_img_RC = np.zeros(image_arr[0].shape)
test_img_GC = np.zeros(image_arr[0].shape)
test_img_BC = np.zeros(image_arr[0].shape)
entropy_arr = np.zeros(image_arr[0].shape)
elips_arr = np.zeros(image_arr[0].shape)
ODROILog = []
ODROIBay = []
getClassifiers = False
if getClassifiers:
X_train = np.zeros([image_arr[0].shape[0]*image_arr[0].shape[1]*40,4])
Y_train = np.zeros([image_arr[0].shape[0]*image_arr[0].shape[1]*40,1])
for j in range(0,40):
for i in range(0,40): # Get train data
if i == j:
continue
test_image = image_arr[i]
test_image_mask = mask_arr[i]
labels, num = label(test_image_mask, neighbors=8, background = 0, return_num = True)
regions = regionprops(labels)
centreCoords = np.round(regions[0]['centroid'])
centreCoords = centreCoords.astype(np.uint)
centreMask = np.zeros(test_image_mask.shape)
centreMask[centreCoords[0],centreCoords[1]] = 1
#Change here!
#test_image_mask = centreMask
test_image_RC = image_arr_red_channels[i]
test_image_GC = image_arr_green_channels[i]
test_image_BC = image_arr_blue_channels[i]
entropy_arr = entropy[i]
elips_arr = elips[i]
#test_image_RC = removeBackground(test_image_RC)
#test_image = removeBackground(test_image)
imageIndxs = np.where(test_image != 0)
intensityColumn_Arr = np.squeeze(test_image.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
intensityColumn_Arr = (intensityColumn_Arr-np.average(intensityColumn_Arr)) / np.std(intensityColumn_Arr)
redChannel_Arr = np.squeeze(test_image_RC.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
redChannel_Arr = (redChannel_Arr-np.average(redChannel_Arr)) / np.std(redChannel_Arr)
entropy_arr = np.squeeze(entropy_arr.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
#entropy_arr = (entropy_arr-np.average(entropy_arr)) / np.std(entropy_arr)
# Distance Array
indices_Arr = np.indices((test_image.shape[0],test_image.shape[1])).transpose((1,2,0))
centreCoords = np.array([test_image.shape[0]/2,test_image.shape[1]/2])
distance_Arr = np.sqrt(np.add(np.power(indices_Arr[...,0]-centreCoords[0],2),np.power(indices_Arr[...,1]-centreCoords[1],2)))
normDistance_Arr = distance_Arr / np.max(distance_Arr)
normDistanceColumn_Arr = np.squeeze(normDistance_Arr.reshape([1,normDistance_Arr.shape[0]*normDistance_Arr.shape[1]])).T
X_train[i*image_arr[0].shape[0]*image_arr[0].shape[1]:(i+1)*image_arr[0].shape[0]*image_arr[0].shape[1],...] = np.column_stack((redChannel_Arr,entropy_arr,normDistanceColumn_Arr, intensityColumn_Arr))#,
Y_train[i*image_arr[0].shape[0]*image_arr[0].shape[1]:(i+1)*image_arr[0].shape[0]*image_arr[0].shape[1],0] = np.squeeze(test_image_mask.reshape([1,test_image_mask.shape[0]*test_image_mask.shape[1]])).T
X_train_2 = X_train
y_train_2 = Y_train
clf_bayes = GaussianNB()
clf_bayes.fit(X_train_2,y_train_2)
paramsBayes = clf_bayes.get_params
# Logistic regression
clf_log = LogisticRegression()
clf_log.fit(X_train_2,y_train_2)
log = open('Classifiers/Log/LogClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_log, log)
log.close()
bay = open('Classifiers/Bay/BayClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_bayes, bay)
bay.close()
'''
f = open('my_classifier.pickle', 'rb')
classifier = pickle.load(f)
f.close()
'''
test_image2 = np.zeros(image_arr[0].shape)
test_image_mask2 = np.zeros(mask_arr[0].shape)
test_img_RC2 = np.zeros(image_arr[0].shape)
# test_img_GC2 = np.zeros(image_arr[0].shape)
test_image2 = image_arr[j]
test_image_mask2 = mask_arr[j]
test_image_RC2 = image_arr_red_channels[j]
test_image_GC2 = image_arr_green_channels[j]
test_image_BC2 = image_arr_blue_channels[j]
entropy_arr2 = entropy[j]
intensityColumn_Arr2 = np.squeeze(test_image2.reshape([1,test_image2.shape[0]*test_image2.shape[1]])).T
intensityColumn_Arr2 = (intensityColumn_Arr2-np.average(intensityColumn_Arr2)) / np.std(intensityColumn_Arr2)
redChannel_Arr2 = np.squeeze(test_image_RC2.reshape([1,test_image2.shape[0]*test_image2.shape[1]])).T
redChannel_Arr2 = ( redChannel_Arr2 - np.average(redChannel_Arr2) ) / np.std(redChannel_Arr2)
entropy_arr = np.squeeze(entropy_arr2.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
X_val = np.column_stack((redChannel_Arr2,entropy_arr,normDistanceColumn_Arr,intensityColumn_Arr2))#,,greenChannel_Arr2))
Y_val = np.squeeze(test_image_mask2.reshape([1,test_image_mask2.shape[0]*test_image_mask2.shape[1]])).T
# predicts
predictsBayes = clf_bayes.predict(X_val)
predictsLog = clf_log.predict(X_val)
img_pred_Log = predictsLog.reshape([test_image.shape[0],test_image.shape[1]])
img_pred_Bayes = predictsBayes.reshape([test_image.shape[0],test_image.shape[1]])
# Y_train_reshaped = Y_train.reshape([test_image.shape[0],test_image.shape[1]])
#myShowImage(img_pred_Log,"img_pred_Log_" + str(j))
#myShowImage(img_pred_Bayes,"img_pred_Bayes_" + str(j))
try:
coordsBBLog = newBBcoords(img_pred_Log,test_image)
except:
coordsBBLog = []
try:
coordsBBBay = newBBcoords(img_pred_Bayes,test_image)
except:
coordsBBBay = []
ODROILog.append(coordsBBLog)
ODROIBay.append(coordsBBBay)
ODROILog_Arr = np.array(ODROILog)
ODROIBay_Arr = np.array(ODROIBay)
np.save('ODROILog_Arr.npy',ODROILog_Arr)
np.save('ODROIBay_Arr.npy',ODROIBay_Arr)
prepareSegments = False
if prepareSegments:
ODROILog_Arr = np.load('ODROILog_Arr.npy')
ODROIBay_Arr = np.load('ODROIBay_Arr.npy')
OD_section = []
OD_mask = []
OD_section_RC = []
lenX_Arr = 0
for i in range(0,40):
try:
coords = ODROILog_Arr[i]
#myShowImage(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]],"LOG" +str(i))
segMask = np.array(mask_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
segRC = np.array(image_arr_red_channels[i][coords[0]:coords[2],coords[1]:coords[3]])
imgSegment = np.array(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
vesslesSeg = np.array(vessels[i][coords[0]:coords[2],coords[1]:coords[3]])
kernel_ones = np.ones([3,3],np.uint8)
vesslesSeg = cv2.morphologyEx(vesslesSeg, cv2.MORPH_DILATE, kernel_ones)
indxsVesl = np.where(vesslesSeg != 0)
medianFiltered = median(imgSegment,disk(25))
maxFiltered = maximum_filter(imgSegment, size=15)
smoothVessels = np.copy(imgSegment)
smoothVessels[indxsVesl[0],indxsVesl[1]] = np.multiply(maxFiltered[indxsVesl[0],indxsVesl[1]],0.97)
#smoothDisk = mean(smoothVessels, disk(5))
OD_section.append(smoothVessels)
OD_mask.append(segMask)
OD_section_RC.append(segRC)
lenX_Arr = lenX_Arr + (imgSegment.shape[0]*imgSegment.shape[1])
#coords = ODROIBay_Arr[i]
#myShowImage(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]],"BAY" + str(i))
except:
coords = ODROIBay_Arr[i]
segMask = np.array(mask_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
segRC = np.array(image_arr_red_channels[i][coords[0]:coords[2],coords[1]:coords[3]])
imgSegment = np.array(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]])
vesslesSeg = np.array(vessels[i][coords[0]:coords[2],coords[1]:coords[3]])
kernel_ones = np.ones([3,3],np.uint8)
vesslesSeg = cv2.morphologyEx(vesslesSeg, cv2.MORPH_DILATE, kernel_ones)
indxsVesl = np.where(vesslesSeg != 0)
#medianFiltered = median(imgSegment,disk(25))
maxFiltered = maximum_filter(imgSegment, size=15)
smoothVessels = np.copy(imgSegment)
smoothVessels[indxsVesl[0],indxsVesl[1]] = np.multiply(maxFiltered[indxsVesl[0],indxsVesl[1]],0.97)
#myShowImage(image_arr[i][coords[0]:coords[2],coords[1]:coords[3]],"EXCEPT" + str(i))
OD_section.append(smoothVessels)
OD_mask.append(segMask)
OD_section_RC.append(segRC)
#print('except')
lenX_Arr = lenX_Arr + (imgSegment.shape[0]*imgSegment.shape[1])
#myShowImage(smoothVessels)
OD_section_Arr = np.array(OD_section)
OD_mask_Arr = np.array(OD_mask)
OD_section_RC = np.array(OD_section_RC)
np.save('OD_section_Arr.npy',OD_section_Arr)
np.save('OD_mask_Arr.npy',OD_mask_Arr)
np.save('OD_section_RC.npy',OD_section_RC)
print(lenX_Arr) # len = 4577126
finalSegmentation = False
finalMaskPredicts = []
if finalSegmentation:
OD_section_Arr = np.load('OD_section_Arr.npy')
OD_mask_Arr = np.load('OD_mask_Arr.npy')
OD_section_RC = np.load('OD_section_RC.npy')
clahe = cv2.createCLAHE(clipLimit=1, tileGridSize=(8, 8))
for j in range(0,40):
removeLen = OD_section_Arr[j].shape[0] * OD_section_Arr[j].shape[1]
X_train = np.zeros([4577126-removeLen,2])
Y_train = np.zeros([4577126-removeLen,1])
for i in range(0,40):
if i == j:
continue
test_image = OD_section_Arr[i]
test_image_mask = OD_mask_Arr[i]
segRC = OD_section_RC[i]
clahePrep = np.multiply(np.copy(test_image),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
intensityColumn_Arr = np.squeeze(test_image.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
intensityColumn_Arr = (intensityColumn_Arr-np.average(intensityColumn_Arr)) / np.std(intensityColumn_Arr)
segRC = np.squeeze(segRC.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
#segRC = (segRC-np.average(segRC)) / np.std(segRC)
if (i-1)*test_image.shape[0]*test_image.shape[1] < 0 and (i)*test_image.shape[0]*test_image.shape[1] == 0:
X_train[(i-1)*test_image.shape[0]*test_image.shape[1]::,...] = np.column_stack((intensityColumn_Arr,segRC))#,
Y_train[(i-1)*test_image.shape[0]*test_image.shape[1]::,0] = np.squeeze(test_image_mask.reshape([1,test_image_mask.shape[0]*test_image_mask.shape[1]])).T
continue
X_train[(i-1)*test_image.shape[0]*test_image.shape[1]:(i)*test_image.shape[0]*test_image.shape[1],...] = np.column_stack((intensityColumn_Arr,segRC))#,
Y_train[(i-1)*test_image.shape[0]*test_image.shape[1]:(i)*test_image.shape[0]*test_image.shape[1],0] = np.squeeze(test_image_mask.reshape([1,test_image_mask.shape[0]*test_image_mask.shape[1]])).T
X_train_2 = X_train
y_train_2 = Y_train
clf_bayes = GaussianNB()
clf_bayes.fit(X_train_2,y_train_2)
paramsBayes = clf_bayes.get_params
# Logistic regression
clf_log = LogisticRegression()
clf_log.fit(X_train_2,y_train_2)
log = open('Classifiers/Segments/Log/LogClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_log, log)
log.close()
bay = open('Classifiers/Segments/Bay/BayClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_bayes, bay)
bay.close()
test_image = OD_section_Arr[j]
test_image_mask = OD_mask_Arr[j]
segRC = OD_section_RC[j]
clahePrep = np.multiply(np.copy(test_image),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
intensityColumn_Arr = np.squeeze(test_image.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
intensityColumn_Arr = (intensityColumn_Arr-np.average(intensityColumn_Arr)) / np.std(intensityColumn_Arr)
segRC = np.squeeze(segRC.reshape([1,test_image.shape[0]*test_image.shape[1]])).T
#segRC = (segRC-np.average(segRC)) / np.std(segRC)
X_val = np.column_stack((intensityColumn_Arr,segRC))
predictsBayes = clf_bayes.predict(X_val)
predictsLog = clf_log.predict(X_val)
img_pred_Log = predictsLog.reshape([test_image.shape[0],test_image.shape[1]])
img_pred_Bayes = predictsBayes.reshape([test_image.shape[0],test_image.shape[1]])
#myShowImage(img_pred_Log,"Log")
#myShowImage(img_pred_Bayes,"Bayes")
#myShowImage(test_image,"Actual")
finalMaskPredicts.append(predictsBayes)
#print('ok')
finalMaskPredicts_Arr = np.array(finalMaskPredicts)
np.save("finalMaskPredicts_Bayes.npy",finalMaskPredicts_Arr)
loadFinalSegs = False
if loadFinalSegs:
foveaBBoxCoords = []
centroidCoord = []
ODmaskPredicts = []
elips = np.load('elips_arr.npy')
originalDimsBase = np.zeros(image_arr[0].shape)
OD_section_Arr = np.load('OD_section_Arr.npy')
finalMaskPredicts_Arr = np.load("finalMaskPredicts_Bayes.npy")
ODROILog_Arr = np.load('ODROILog_Arr.npy')
ODROIBay_Arr = np.load('ODROIBay_Arr.npy')
for i in range(0,40):
originalDims = np.copy(originalDimsBase)
test_image = OD_section_Arr[i]
maskPred = finalMaskPredicts_Arr[i].reshape([test_image.shape[0],test_image.shape[1]])
finalMask, centroidCoords, radius, colsCoord = getLargestAreaEcentroid(maskPred)
finalMaskImg = np.multiply(finalMask,255)
finalMaskImg[centroidCoords[0],centroidCoords[1]] = 255
try:
coords = ODROILog_Arr[i]
failTest = (coords[2])
except:
coords = ODROIBay_Arr[i]
failTest = (coords[2])
coordsReal =[centroidCoords[0] + coords[0],centroidCoords[1] + coords[1]]
colsCoordReal = [colsCoord[0] + coords[1],colsCoord[1] + coords[1]]
originalDims[coords[0]:coords[2],coords[1]:coords[3]] = finalMaskImg
#originalDims = originalDims or elips[i]
elipsResized = cv2.resize(elips[i], dsize=(originalDims.shape[1],originalDims.shape[0]), interpolation=cv2.INTER_CUBIC)
elipsResized = np.average(elipsResized,axis = 2) # 3 channels -> 1 channel
elipsResized[elipsResized>0.5] = 1
elipsResized[elipsResized<1] = 0
elipsResized = thin(elipsResized)
elipsIndexs = np.where(elipsResized != 0)
originalDims = originalDims.astype(np.uint8)
#originalDims[elipsIndexs] = 255
indexsOD_ELi = np.where(originalDims != 0)
#myShowImage(originalDims,str(i))
checkResults = np.copy(image_arr[i])
checkResults[indexsOD_ELi] = originalDims[indexsOD_ELi]
#checkResults[0::,np.min(elipsIndexs[1])] = 255 # left
#checkResults[0::,np.max(elipsIndexs[1])] = 255 # right
if abs(coordsReal[1]-np.min(elipsIndexs[1])) < abs(coordsReal[1]-np.max(elipsIndexs[1])):
#isleft -> walk right
#relevantColumn = coordsReal[1] + 30 # based on centroid
relevantColumn = colsCoordReal[1] - 10 # based on
columnROI_f = [coordsReal[1] + round(3*radius),coordsReal[1] + round(6*radius)]
else:
#isright -> walk left
#relevantColumn = coordsReal[1] - 30
relevantColumn = colsCoordReal[0] + 10
columnROI_f = [coordsReal[1] - round(6*radius),coordsReal[1] - round(3*radius)]
relevantRows = np.where(elipsResized[...,relevantColumn]!=0)
checkResults[relevantRows[0][0]:relevantRows[0][-1],columnROI_f[0]] = 0 # 1 - columnROI_f[0]
checkResults[relevantRows[0][0]:relevantRows[0][-1],columnROI_f[1]] = 0 # 3 - columnROI_f[1]
checkResults[relevantRows[0][0],columnROI_f[0]:columnROI_f[1]] = 0 # 0 - relevantRows[0][0]
checkResults[relevantRows[0][-1],columnROI_f[0]:columnROI_f[1]] = 0 # 2 - relevantRows[0][-1]
foveaBBoxCoords.append((relevantRows[0][0],columnROI_f[0],relevantRows[0][-1],columnROI_f[1]))
centroidCoord.append(coordsReal)
originalDims = np.divide(originalDims,255)
ODmaskPredicts.append(originalDims)
#myShowImage(originalDims,str(i))
#myShowImage(checkResults,str(i))
foveaBBoxCoords_Arr = np.array(foveaBBoxCoords)
centroidCoord_Arr = np.array(centroidCoord)
ODmaskPredicts_Arr = np.array(ODmaskPredicts)
np.save("bbox_fovea.npy",foveaBBoxCoords_Arr)
np.save("centroidCoord_Arr.npy",centroidCoord_Arr)
np.save("ODmaskPredicts_Arr.npy",ODmaskPredicts_Arr)
getFoveaGTCoords = True
if getFoveaGTCoords:
foveCoordsGT = []
tempCoords =[]
imgNo = 0
with open('Datasets/fovea_location.csv') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
#print(row)
tempCoords.append(float(row[1]))
tempCoords.append(float(row[2]))
foveCoordsGT.append(tempCoords)
tempCoords =[]
imgNo += 1
if imgNo == 40:
break
getFoveaCoordsPred = False
'''for i in range(0,40):
myShowImage(image_arr[i])
myShowImage(image_arr_red_channels[i])
myShowImage(image_arr_green_channels[i])
myShowImage(vessels[i])
myShowImage(entropy_arr[i])'''
if getFoveaCoordsPred:
foveaBBoxCoords_Arr = np.load("bbox_fovea.npy")
foveaBBoxCoords_Arr = np.absolute(foveaBBoxCoords_Arr)
removeLen = 0
realCentroidCoords_Arr = []
clahe = cv2.createCLAHE(clipLimit=1, tileGridSize=(8, 8))
for i in range(0,40): # not the best way...
if foveaBBoxCoords_Arr[i][3] < foveaBBoxCoords_Arr[i][1]:
temp = foveaBBoxCoords_Arr[i][1]
foveaBBoxCoords_Arr[i][1] = foveaBBoxCoords_Arr[i][3]
foveaBBoxCoords_Arr[i][3] = temp
if foveaBBoxCoords_Arr[i][2] < foveaBBoxCoords_Arr[i][0]:
temp = foveaBBoxCoords_Arr[i][0]
foveaBBoxCoords_Arr[i][0] = foveaBBoxCoords_Arr[i][2]
foveaBBoxCoords_Arr[i][2] = temp
test_image = image_arr[i]
fovea_region = test_image[foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
bboxShape = fovea_region.shape
removeLen += bboxShape[0]*bboxShape[1]
#print(removeLen)
for j in range(0,40):
removeLen = (foveaBBoxCoords_Arr[j][2]-foveaBBoxCoords_Arr[j][0]) * (foveaBBoxCoords_Arr[j][3]-foveaBBoxCoords_Arr[j][1])
X_train = np.zeros([3187816-removeLen,3]) # 3187816 = number of points in all fovea bboxs
Y_train = np.zeros([3187816-removeLen,1])
first = 0
for i in range(0,40):
if i == j:
continue
'''if foveaBBoxCoords_Arr[i][3] < foveaBBoxCoords_Arr[i][1]:
temp = foveaBBoxCoords_Arr[i][1]
foveaBBoxCoords_Arr[i][1] = foveaBBoxCoords_Arr[i][3]
foveaBBoxCoords_Arr[i][3] = temp
if foveaBBoxCoords_Arr[i][2] < foveaBBoxCoords_Arr[i][0]:
temp = foveaBBoxCoords_Arr[i][0]
foveaBBoxCoords_Arr[i][0] = foveaBBoxCoords_Arr[i][2]
foveaBBoxCoords_Arr[i][2] = temp'''
test_image = image_arr[i]
fovea_region = test_image[foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
bboxShape = fovea_region.shape
last = bboxShape[0]*bboxShape[1] + first
foveaRegionGC = image_arr_green_channels[i][foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
clahePrep = np.multiply(np.copy(foveaRegionGC),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
#mask
maskBig = np.zeros(test_image.shape)
coordsFoveaCenter = [round(foveCoordsGT[i][1]/4),round(foveCoordsGT[i][0]/4)]
maskBig[coordsFoveaCenter[0]-10:coordsFoveaCenter[0]+10,coordsFoveaCenter[1]-10:coordsFoveaCenter[1]+10] = 1
mask = maskBig[foveaBBoxCoords_Arr[i][0]:foveaBBoxCoords_Arr[i][2],foveaBBoxCoords_Arr[i][1]:foveaBBoxCoords_Arr[i][3]]
fovea_region = np.squeeze(fovea_region.reshape([1,bboxShape[0]*bboxShape[1]])).T
fovea_region = (fovea_region-np.average(fovea_region)) / np.std(fovea_region)
foveaRegionGC = np.squeeze(foveaRegionGC.reshape([1,bboxShape[0]*bboxShape[1]])).T
foveaRegionGC = (foveaRegionGC-np.average(foveaRegionGC)) / np.std(foveaRegionGC)
highContrast = np.squeeze(highContrast.reshape([1,bboxShape[0]*bboxShape[1]])).T
highContrast = (highContrast-np.average(highContrast)) / np.std(highContrast)
'''if (i-1)*bboxShape[0]*bboxShape[1] < 0 and (i)*bboxShape[0]*bboxShape[1] == 0:
X_train[(i-1)*bboxShape[0]*bboxShape[1]::,...] = np.column_stack((fovea_region,foveaRegionGC,highContrast))#,
Y_train[(i-1)*bboxShape[0]*bboxShape[1]::,0] = np.squeeze(mask.reshape([1,bboxShape[0]*bboxShape[1]])).T
continue'''
X_train[first:last,...] = np.column_stack((fovea_region,foveaRegionGC,highContrast))#,
Y_train[first:last,0] = np.squeeze(mask.reshape([1,bboxShape[0]*bboxShape[1]])).T
first = last
X_train_2 = X_train
y_train_2 = Y_train
clf_bayes = GaussianNB()
clf_bayes.fit(X_train_2,y_train_2)
paramsBayes = clf_bayes.get_params
# Logistic regression
clf_log = LogisticRegression()
clf_log.fit(X_train_2,y_train_2)
'''log = open('Classifiers/Segments/Log/LogClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_log, log)
log.close()
bay = open('Classifiers/Segments/Bay/BayClf_excluding_' + str(j) + '.pickle', 'wb')
pickle.dump(clf_bayes, bay)
bay.close()'''
test_image = image_arr[j]
fovea_region = test_image[foveaBBoxCoords_Arr[j][0]:foveaBBoxCoords_Arr[j][2],foveaBBoxCoords_Arr[j][1]:foveaBBoxCoords_Arr[j][3]]
bboxShape = fovea_region.shape
foveaRegionGC = image_arr_green_channels[j][foveaBBoxCoords_Arr[j][0]:foveaBBoxCoords_Arr[j][2],foveaBBoxCoords_Arr[j][1]:foveaBBoxCoords_Arr[j][3]]
clahePrep = np.multiply(np.copy(foveaRegionGC),255)
clahePrep = clahePrep.astype(np.uint8)
highContrast = clahe.apply(clahePrep)
fovea_region = np.squeeze(fovea_region.reshape([1,bboxShape[0]*bboxShape[1]])).T
fovea_region = (fovea_region-np.average(fovea_region)) / np.std(fovea_region)
foveaRegionGC = np.squeeze(foveaRegionGC.reshape([1,bboxShape[0]*bboxShape[1]])).T
foveaRegionGC = (foveaRegionGC-np.average(foveaRegionGC)) / np.std(foveaRegionGC)
highContrast = np.squeeze(highContrast.reshape([1,bboxShape[0]*bboxShape[1]])).T
highContrast = (highContrast-np.average(highContrast)) / np.std(highContrast)
X_val = np.column_stack((fovea_region,foveaRegionGC,highContrast))
predictsBayes = clf_bayes.predict(X_val)
predictsLog = clf_log.predict(X_val)
img_pred_Log = predictsLog.reshape(bboxShape)
img_pred_Bayes = predictsBayes.reshape(bboxShape)
try:
finalMask, centroidCoords, radius, colsCoord = getLargestAreaEcentroid(img_pred_Bayes)
if centroidCoords.size == 0:
finalMask = np.zeros(img_pred_Bayes.shape)
finalMask[round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)] = 1
centroidCoords = np.array([round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)])
except:
finalMask = np.zeros(img_pred_Bayes.shape)
finalMask[round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)] = 1
centroidCoords = np.array([round(finalMask.shape[0]/2),round(finalMask.shape[1]/2)])
maskEyes = np.copy(finalMask)
maskEyes = np.multiply(maskEyes,255)
maskEyes = maskEyes.astype(np.uint8)
#myShowImage(test_image[foveaBBoxCoords_Arr[j][0]:foveaBBoxCoords_Arr[j][2],foveaBBoxCoords_Arr[j][1]:foveaBBoxCoords_Arr[j][3]],"fovea")
#myShowImage(maskEyes,"Mask")
#myShowImage(img_pred_Bayes,"Bay")
realCentroidCoords = [centroidCoords[0] + foveaBBoxCoords_Arr[j][0],centroidCoords[1] + foveaBBoxCoords_Arr[j][1]]
realCentroidCoords_Arr.append(realCentroidCoords)
realCentroidCoords_Arr = np.array(realCentroidCoords_Arr)
np.save('fovea_centre_coords.npy',realCentroidCoords_Arr)
#centroidCoord_Arr = np.load("centroidCoord_Arr.npy")
#ODmaskPredicts_Arr = np.load("ODmaskPredicts_Arr.npy")
#for i in range(0,40):
showGraphsClass= False
if showGraphsClass:
import matplotlib.pyplot as plt
from sklearn import svm, datasets
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, proba=False, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
if proba:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:,-1]
else:
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z,20, **params)
return out
## import some data to play with
#iris = datasets.load_iris()
## Take the first two features. We could avoid this by using a two-dim dataset
#X = iris.data[:, :2]
#y = iris.target
X = X_train_2
y = y_train_2
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
models = (clf_bayes, clf_log) #, clf_svm, clf_svm_rbf)
# title for the plots
titles = ('Bayes',
'Logistic regression')
''' ,
'SVC with linear kernel',
'SVM with RBF kernel')'''
# Set-up 2x2 grid for plotting.
#fig, sub =
#plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[0::500, 0], X[0::500, 1]
xx, yy = make_meshgrid(X0, X1,h=0.005)
'''_,ax_all = plt.subplots(1,2)
ax = ax_all[1]
plot_contours(ax, clf_bayes, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y[0::500], cmap=plt.cm.coolwarm, s=20)
ax.set_xlim(X0.min(), X0.max())
ax.set_ylim(X1.min(), X1.max())
ax.set_xlabel('Distance')
ax.set_ylabel('Intensity')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title("Bayes")
plt.show()'''
showPlots = False
if showPlots:
for clf, title in zip(models, titles):
_,ax_all = plt.subplots(1,2)
ax = ax_all[0]
plot_contours(ax, clf, xx, yy, proba=True, # changed proba to probability
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y[0::500], cmap=plt.cm.coolwarm, s=20)
ax.set_xlim(X0.min(), X0.max())
ax.set_ylim(X1.min(), X1.max())
ax.set_xlabel('Distance')
ax.set_ylabel('Intensity')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
ax = ax_all[1]
plot_contours(ax, clf, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y[0::500], cmap=plt.cm.coolwarm, s=20)
ax.set_xlim(X0.min(), X0.max())
ax.set_ylim(X1.min(), X1.max())
ax.set_xlabel('Distance')
ax.set_ylabel('Intensity')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
plt.show()
print("Done") | 40.098341 | 215 | 0.625476 | [
"MIT"
] | MartimChaves/ret_detect | main.py | 33,843 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import pickle
import pandas as pd
from cgp import *
from cgp_config import *
from cnn_train import CNN_train
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evolving CAE structures')
parser.add_argument('--gpu_num', '-g', type=int, default=1, help='Num. of GPUs')
parser.add_argument('--lam', '-l', type=int, default=2, help='Num. of offsprings')
parser.add_argument('--net_info_file', default='network_info.pickle', help='Network information file name')
parser.add_argument('--log_file', default='./log_cgp.txt', help='Log file name')
parser.add_argument('--mode', '-m', default='evolution', help='Mode (evolution / retrain / reevolution)')
parser.add_argument('--init', '-i', action='store_true')
args = parser.parse_args()
# --- Optimization of the CNN architecture ---
if args.mode == 'evolution':
# Create CGP configuration and save network information
network_info = CgpInfoConvSet(rows=5, cols=30, level_back=10, min_active_num=1, max_active_num=30)
with open(args.net_info_file, mode='wb') as f:
pickle.dump(network_info, f)
# Evaluation function for CGP (training CNN and return validation accuracy)
imgSize = 32
eval_f = CNNEvaluation(gpu_num=args.gpu_num, dataset='cifar10', verbose=True, epoch_num=50, batchsize=128,
imgSize=imgSize)
# Execute evolution
cgp = CGP(network_info, eval_f, lam=args.lam, imgSize=imgSize, init=args.init)
cgp.modified_evolution(max_eval=250, mutation_rate=0.1, log_file=args.log_file)
# --- Retraining evolved architecture ---
elif args.mode == 'retrain':
print('Retrain')
# In the case of existing log_cgp.txt
# Load CGP configuration
with open(args.net_info_file, mode='rb') as f:
network_info = pickle.load(f)
# Load network architecture
cgp = CGP(network_info, None)
data = pd.read_csv(args.log_file, header=None) # Load log file
cgp.load_log(list(data.tail(1).values.flatten().astype(int))) # Read the log at final generation
print(cgp._log_data(net_info_type='active_only', start_time=0))
# Retraining the network
temp = CNN_train('cifar10', validation=False, verbose=True, batchsize=128)
acc = temp(cgp.pop[0].active_net_list(), 0, epoch_num=500, out_model='retrained_net.model')
print(acc)
# # otherwise (in the case where we do not have a log file.)
# temp = CNN_train('haze1', validation=False, verbose=True, imgSize=128, batchsize=16)
# cgp = [['input', 0], ['S_SumConvBlock_64_3', 0], ['S_ConvBlock_64_5', 1], ['S_SumConvBlock_128_1', 2], ['S_SumConvBlock_64_1', 3], ['S_SumConvBlock_64_5', 4], ['S_DeConvBlock_3_3', 5]]
# acc = temp(cgp, 0, epoch_num=500, out_model='retrained_net.model')
elif args.mode == 'reevolution':
# restart evolution
print('Restart Evolution')
imgSize = 64
with open('network_info.pickle', mode='rb') as f:
network_info = pickle.load(f)
eval_f = CNNEvaluation(gpu_num=args.gpu_num, dataset='cifar10', verbose=True, epoch_num=50, batchsize=128,
imgSize=imgSize)
cgp = CGP(network_info, eval_f, lam=args.lam, imgSize=imgSize)
data = pd.read_csv('./log_cgp.txt', header=None)
cgp.load_log(list(data.tail(1).values.flatten().astype(int)))
cgp.modified_evolution(max_eval=250, mutation_rate=0.1, log_file='./log_restat.txt')
else:
print('Undefined mode. Please check the "-m evolution or retrain or reevolution" ')
| 49.078947 | 194 | 0.657105 | [
"MIT"
] | dongzhiming/cgp-cnn-PyTorch | exp_main.py | 3,730 | Python |
from sparknlp.annotator import *
class BertSentence:
@staticmethod
def get_default_model():
return BertSentenceEmbeddings.pretrained() \
.setInputCols("sentence") \
.setOutputCol("sentence_embeddings")
@staticmethod
def get_pretrained_model(name, language, bucket=None):
return BertSentenceEmbeddings.pretrained(name,language,bucket) \
.setInputCols('sentence') \
.setOutputCol("sentence_embeddings")
| 25.210526 | 72 | 0.686848 | [
"Apache-2.0"
] | JohnSnowLabs/nlu | nlu/components/embeddings/sentence_bert/BertSentenceEmbedding.py | 479 | Python |
#!/usr/bin/env python3
import os
import shutil
import threading
from selfdrive.swaglog import cloudlog
from selfdrive.loggerd.config import ROOT, get_available_bytes, get_available_percent
from selfdrive.loggerd.uploader import listdir_by_creation
from selfdrive.dragonpilot.dashcam import DASHCAM_FREESPACE_LIMIT
MIN_BYTES = 5 * 1024 * 1024 * 1024
MIN_PERCENT = 10 + (DASHCAM_FREESPACE_LIMIT * 100)
def deleter_thread(exit_event):
while not exit_event.is_set():
out_of_bytes = get_available_bytes(default=MIN_BYTES + 1) < MIN_BYTES
out_of_percent = get_available_percent(default=MIN_PERCENT + 1) < MIN_PERCENT
if out_of_percent or out_of_bytes:
# remove the earliest directory we can
dirs = listdir_by_creation(ROOT)
for delete_dir in dirs:
delete_path = os.path.join(ROOT, delete_dir)
if any(name.endswith(".lock") for name in os.listdir(delete_path)):
continue
try:
cloudlog.info("deleting %s" % delete_path)
shutil.rmtree(delete_path)
break
except OSError:
cloudlog.exception("issue deleting %s" % delete_path)
exit_event.wait(.1)
else:
exit_event.wait(30)
def main():
deleter_thread(threading.Event())
if __name__ == "__main__":
main()
| 28.555556 | 85 | 0.714397 | [
"MIT"
] | Anthony919nc/Tessa | selfdrive/loggerd/deleter.py | 1,285 | Python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from future.utils import iteritems
from collections import defaultdict
from copy import deepcopy
from itertools import product
import re
from sqlalchemy.sql import select
from .models import Candidate, TemporarySpan, Sentence
from .udf import UDF, UDFRunner
QUEUE_COLLECT_TIMEOUT = 5
class CandidateExtractor(UDFRunner):
"""
An operator to extract Candidate objects from a Context.
:param candidate_class: The type of relation to extract, defined using
:func:`snorkel.models.candidate_subclass <snorkel.models.candidate.candidate_subclass>`
:param cspaces: one or list of :class:`CandidateSpace` objects, one for each relation argument. Defines space of
Contexts to consider
:param matchers: one or list of :class:`snorkel.matchers.Matcher` objects, one for each relation argument. Only tuples of
Contexts for which each element is accepted by the corresponding Matcher will be returned as Candidates
:param self_relations: Boolean indicating whether to extract Candidates that relate the same context.
Only applies to binary relations. Default is False.
:param nested_relations: Boolean indicating whether to extract Candidates that relate one Context with another
that contains it. Only applies to binary relations. Default is False.
:param symmetric_relations: Boolean indicating whether to extract symmetric Candidates, i.e., rel(A,B) and rel(B,A),
where A and B are Contexts. Only applies to binary relations. Default is False.
"""
def __init__(self, candidate_class, cspaces, matchers, self_relations=False, nested_relations=False, symmetric_relations=False):
super(CandidateExtractor, self).__init__(CandidateExtractorUDF,
candidate_class=candidate_class,
cspaces=cspaces,
matchers=matchers,
self_relations=self_relations,
nested_relations=nested_relations,
symmetric_relations=symmetric_relations)
def apply(self, xs, split=0, **kwargs):
super(CandidateExtractor, self).apply(xs, split=split, **kwargs)
def clear(self, session, split, **kwargs):
session.query(Candidate).filter(Candidate.split == split).delete()
class CandidateExtractorUDF(UDF):
def __init__(self, candidate_class, cspaces, matchers, self_relations, nested_relations, symmetric_relations, **kwargs):
self.candidate_class = candidate_class
# Note: isinstance is the way to check types -- not type(x) in [...]!
self.candidate_spaces = cspaces if isinstance(cspaces, (list, tuple)) else [cspaces]
self.matchers = matchers if isinstance(matchers, (list, tuple)) else [matchers]
self.nested_relations = nested_relations
self.self_relations = self_relations
self.symmetric_relations = symmetric_relations
# Check that arity is same
if len(self.candidate_spaces) != len(self.matchers):
raise ValueError("Mismatched arity of candidate space and matcher.")
else:
self.arity = len(self.candidate_spaces)
# Make sure the candidate spaces are different so generators aren't expended!
self.candidate_spaces = list(map(deepcopy, self.candidate_spaces))
# Preallocates internal data structures
self.child_context_sets = [None] * self.arity
for i in range(self.arity):
self.child_context_sets[i] = set()
super(CandidateExtractorUDF, self).__init__(**kwargs)
def apply(self, context, clear, split, **kwargs):
# Generate TemporaryContexts that are children of the context using the candidate_space and filtered
# by the Matcher
for i in range(self.arity):
self.child_context_sets[i].clear()
for tc in self.matchers[i].apply(self.candidate_spaces[i].apply(context)):
tc.load_id_or_insert(self.session)
self.child_context_sets[i].add(tc)
# Generates and persists candidates
extracted = set()
candidate_args = {'split': split}
for args in product(*[enumerate(child_contexts) for child_contexts in self.child_context_sets]):
# TODO: Make this work for higher-order relations
if self.arity == 2:
ai, a = args[0]
bi, b = args[1]
# Check for self-joins, "nested" joins (joins from span to its subspan), and flipped duplicate
# "symmetric" relations. For symmetric relations, if mentions are of the same type, maintain
# their order in the sentence.
if not self.self_relations and a == b:
continue
elif not self.nested_relations and (a in b or b in a):
continue
elif not self.symmetric_relations and ((b, a) in extracted or
(self.matchers[0] == self.matchers[1] and a.char_start > b.char_start)):
continue
# Keep track of extracted
extracted.add((a,b))
# Assemble candidate arguments
for i, arg_name in enumerate(self.candidate_class.__argnames__):
candidate_args[arg_name + '_id'] = args[i][1].id
# Checking for existence
if not clear:
q = select([self.candidate_class.id])
for key, value in iteritems(candidate_args):
q = q.where(getattr(self.candidate_class, key) == value)
candidate_id = self.session.execute(q).first()
if candidate_id is not None:
continue
# Add Candidate to session
yield self.candidate_class(**candidate_args)
class CandidateSpace(object):
"""
Defines the **space** of candidate objects
Calling _apply(x)_ given an object _x_ returns a generator over candidates in _x_.
"""
def __init__(self):
pass
def apply(self, x):
raise NotImplementedError()
class Ngrams(CandidateSpace):
"""
Defines the space of candidates as all n-grams (n <= n_max) in a Sentence _x_,
indexing by **character offset**.
"""
def __init__(self, n_max=5, split_tokens=('-', '/')):
CandidateSpace.__init__(self)
self.n_max = n_max
self.split_rgx = r'('+r'|'.join(split_tokens)+r')' if split_tokens and len(split_tokens) > 0 else None
def apply(self, context):
# These are the character offset--**relative to the sentence start**--for each _token_
offsets = context.char_offsets
# Loop over all n-grams in **reverse** order (to facilitate longest-match semantics)
L = len(offsets)
seen = set()
for l in range(1, self.n_max+1)[::-1]:
for i in range(L-l+1):
w = context.words[i+l-1]
start = offsets[i]
end = offsets[i+l-1] + len(w) - 1
ts = TemporarySpan(char_start=start, char_end=end, sentence=context)
if ts not in seen:
seen.add(ts)
yield ts
# Check for split
# NOTE: For simplicity, we only split single tokens right now!
if l == 1 and self.split_rgx is not None and end - start > 0:
m = re.search(self.split_rgx, context.text[start-offsets[0]:end-offsets[0]+1])
if m is not None and l < self.n_max + 1:
ts1 = TemporarySpan(char_start=start, char_end=start + m.start(1) - 1, sentence=context)
if ts1 not in seen:
seen.add(ts1)
yield ts
ts2 = TemporarySpan(char_start=start + m.end(1), char_end=end, sentence=context)
if ts2 not in seen:
seen.add(ts2)
yield ts2
class PretaggedCandidateExtractor(UDFRunner):
"""UDFRunner for PretaggedCandidateExtractorUDF"""
def __init__(self, candidate_class, entity_types, self_relations=False,
nested_relations=False, symmetric_relations=True, entity_sep='~@~'):
super(PretaggedCandidateExtractor, self).__init__(
PretaggedCandidateExtractorUDF, candidate_class=candidate_class,
entity_types=entity_types, self_relations=self_relations,
nested_relations=nested_relations, entity_sep=entity_sep,
symmetric_relations=symmetric_relations,
)
def apply(self, xs, split=0, **kwargs):
super(PretaggedCandidateExtractor, self).apply(xs, split=split, **kwargs)
def clear(self, session, split, **kwargs):
session.query(Candidate).filter(Candidate.split == split).delete()
class PretaggedCandidateExtractorUDF(UDF):
"""
An extractor for Sentences with entities pre-tagged, and stored in the entity_types and entity_cids
fields.
"""
def __init__(self, candidate_class, entity_types, self_relations=False, nested_relations=False, symmetric_relations=False, entity_sep='~@~', **kwargs):
self.candidate_class = candidate_class
self.entity_types = entity_types
self.arity = len(entity_types)
self.self_relations = self_relations
self.nested_relations = nested_relations
self.symmetric_relations = symmetric_relations
self.entity_sep = entity_sep
super(PretaggedCandidateExtractorUDF, self).__init__(**kwargs)
def apply(self, context, clear, split, check_for_existing=True, **kwargs):
"""Extract Candidates from a Context"""
# For now, just handle Sentences
if not isinstance(context, Sentence):
raise NotImplementedError("%s is currently only implemented for Sentence contexts." % self.__name__)
# Do a first pass to collect all mentions by entity type / cid
entity_idxs = dict((et, defaultdict(list)) for et in set(self.entity_types))
L = len(context.words)
for i in range(L):
if context.entity_types[i] is not None:
ets = context.entity_types[i].split(self.entity_sep)
cids = context.entity_cids[i].split(self.entity_sep)
for et, cid in zip(ets, cids):
if et in entity_idxs:
entity_idxs[et][cid].append(i)
# Form entity Spans
entity_spans = defaultdict(list)
entity_cids = {}
for et, cid_idxs in iteritems(entity_idxs):
for cid, idxs in iteritems(entity_idxs[et]):
while len(idxs) > 0:
i = idxs.pop(0)
char_start = context.char_offsets[i]
char_end = char_start + len(context.words[i]) - 1
while len(idxs) > 0 and idxs[0] == i + 1:
i = idxs.pop(0)
char_end = context.char_offsets[i] + len(context.words[i]) - 1
# Insert / load temporary span, also store map to entity CID
tc = TemporarySpan(char_start=char_start, char_end=char_end, sentence=context)
tc.load_id_or_insert(self.session)
entity_cids[tc.id] = cid
entity_spans[et].append(tc)
# Generates and persists candidates
candidate_args = {'split' : split}
for args in product(*[enumerate(entity_spans[et]) for et in self.entity_types]):
# TODO: Make this work for higher-order relations
if self.arity == 2:
ai, a = args[0]
bi, b = args[1]
# Check for self-joins, "nested" joins (joins from span to its subspan), and flipped duplicate
# "symmetric" relations
if not self.self_relations and a == b:
continue
elif not self.nested_relations and (a in b or b in a):
continue
elif not self.symmetric_relations and ai > bi:
continue
# Assemble candidate arguments
for i, arg_name in enumerate(self.candidate_class.__argnames__):
candidate_args[arg_name + '_id'] = args[i][1].id
candidate_args[arg_name + '_cid'] = entity_cids[args[i][1].id]
# Checking for existence
if check_for_existing:
q = select([self.candidate_class.id])
for key, value in iteritems(candidate_args):
q = q.where(getattr(self.candidate_class, key) == value)
candidate_id = self.session.execute(q).first()
if candidate_id is not None:
continue
# Add Candidate to session
yield self.candidate_class(**candidate_args)
| 46.638889 | 155 | 0.602814 | [
"Apache-2.0"
] | ailabx/snorkel | snorkel/candidates.py | 13,432 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 15 10:38:14 2021
@author: kunal001
"""
import logging
logger = logging.getLogger(__name__)
class CreateDatabase:
def __init__(self,hier_graph,const_parse):
self.hier_graph_dict = {}
self.const_parse = const_parse
self.G = hier_graph
def read_inputs(self,name:str):
"""
read circuit graphs
"""
top_ports = []
ports_weight = {}
for node, attr in self.G.nodes(data=True):
if 'source' in attr['inst_type']:
for source_nets in self.G.neighbors(node):
top_ports.append(source_nets)
elif 'net_type' in attr:
if attr['net_type'] == "external":
top_ports.append(node)
ports_weight[node]=[]
for nbr in list(self.G.neighbors(node)):
ports_weight[node].append(self.G.get_edge_data(node, nbr)['weight'])
logger.debug("Merging nested graph hierarchies to dictionary: ")
const = self.const_parse.read_user_const(name)
self.hier_graph_dict[name] = {
"graph": self.G,
"ports": top_ports,
"ports_weight": ports_weight,
"const": const
}
self._traverse_hier_in_graph(self.G)
logger.debug(f"read graph {self.hier_graph_dict}")
return self.hier_graph_dict
def _traverse_hier_in_graph(self,G):
"""
Recusively reads all hierachies in the graph and convert them to dictionary
"""
for node, attr in G.nodes(data=True):
if "sub_graph" in attr and attr["sub_graph"]:
logger.debug(f'Traversing sub graph: {node} {attr["inst_type"]} {attr["ports"]}')
sub_ports = []
ports_weight = {}
for sub_node, sub_attr in attr["sub_graph"].nodes(data=True):
if 'net_type' in sub_attr:
if sub_attr['net_type'] == "external":
sub_ports.append(sub_node)
ports_weight[sub_node] = []
for nbr in list(attr["sub_graph"].neighbors(sub_node)):
ports_weight[sub_node].append(attr["sub_graph"].get_edge_data(sub_node, nbr)['weight'])
logger.debug(f'external ports: {sub_ports}, {attr["connection"]}, {ports_weight}')
const = self.const_parse.read_user_const(attr["inst_type"])
self.hier_graph_dict[attr["inst_type"]] = {
"graph": attr["sub_graph"],
"ports": sub_ports,
"const": const,
"ports_weight": ports_weight
}
self._traverse_hier_in_graph(attr["sub_graph"])
| 38.906667 | 119 | 0.532557 | [
"BSD-3-Clause"
] | mabrains/ALIGN-public | align/compiler/create_database.py | 2,918 | Python |
def main():
# Open a file for writing and create it if it doesn't exist
# myfile = open("textfile.txt", "w+")
# # Open the file for appending text to the end
# myfile = open("textfile.txt", "a+")
# # write some lines of data to the file
# for i in range(10):
# myfile.write("This is some new text\n")
# # close the file when done
# myfile.close()
# Open the file back up and read the contents
myfile = open("textfile.txt", "r")
if myfile.mode == 'r':
# contents = myfile.read()
# print(contents)
filelines = myfile.readlines()
for fileline in filelines:
print(fileline)
if __name__ == "__main__":
main()
| 25.733333 | 64 | 0.537565 | [
"MIT"
] | JeffreyAsuncion/LearningPython | Chapter03/file_start.py | 772 | Python |
from robotMap import XboxMap
from components.Actuators.LowLevel.shooterMotors import ShooterMotors
from components.Actuators.LowLevel.intakeMotor import IntakeMotor
from components.Actuators.HighLevel.hopperMotor import HopperMotor
from utils.DirectionEnums import Direction
from enum import Enum, auto
from magicbot import tunable
import logging as log
class Type(Enum):
"""Enumeration for the two types within the feeder."""
kIntake = auto()
kHopper = auto()
class FeederMap:
"""Simple map that holds the logic for running elements of the feeder."""
compatString = ["doof", "teapot"]
shooterMotors: ShooterMotors
intakeMotor: IntakeMotor
hopperMotor: HopperMotor
xboxMap: XboxMap
loaderMotorSpeed = tunable(.2)
intakeMotorSpeed = tunable(.5)
def on_enable(self):
pass
# log.setLevel(logging.DEBUG)
def run(self, loaderFunc):
"""Called when execution of a feeder element is desired."""
if loaderFunc == Type.kIntake:
if self.xboxMap.getDriveLeftTrig() > 0 and self.xboxMap.getDriveRightTrig() == 0:
self.intakeMotor.runIntake(self.intakeMotorSpeed, Direction.kForwards)
log.debug("right trig intake", self.xboxMap.getMechRightTrig())
elif self.xboxMap.getDriveRightTrig() > 0 and self.xboxMap.getDriveLeftTrig() == 0:
self.intakeMotor.runIntake(self.intakeMotorSpeed, Direction.kBackwards)
log.debug("left trig intake", self.xboxMap.getMechLeftTrig())
else:
self.intakeMotor.runIntake(0, Direction.kForwards)
if loaderFunc == Type.kHopper:
if self.xboxMap.getDriveLeftTrig() > 0 and self.xboxMap.getDriveRightTrig() == 0:
self.hopperMotor.runHopperMotorForeside(self.loaderMotorSpeed, Direction.kForwards)
self.hopperMotor.runHopperMotorBackside(self.loaderMotorSpeed, Direction.kForwards)
log.debug("right trig manual", self.xboxMap.getMechRightTrig())
elif self.xboxMap.getDriveRightTrig() > 0 and self.xboxMap.getDriveLeftTrig() == 0:
self.hopperMotor.runHopperMotorForeside(self.loaderMotorSpeed, Direction.kBackwards)
self.hopperMotor.runHopperMotorBackside(self.loaderMotorSpeed, Direction.kBackwards)
log.debug("left trig manual", self.xboxMap.getMechLeftTrig())
else:
self.hopperMotor.stopHopperMotorBackside()
self.hopperMotor.stopHopperMotorForeside()
def execute(self):
pass
| 41.015873 | 100 | 0.689241 | [
"MIT"
] | Raptacon/Robot-2022 | components/Actuators/HighLevel/feederMap.py | 2,584 | Python |
#!/usr/bin/python3
import pytest
def test_weight(WBTC, WETH, accounts, SwapRouter, NonfungiblePositionManager, CellarPoolShareContract):
ACCURACY = 10 ** 6
SwapRouter.exactOutputSingle([WETH, WBTC, 3000, accounts[0], 2 ** 256 - 1, 10 ** 7, 2 * 10 ** 18, 0], {"from": accounts[0], "value": 2 * 10 ** 18})
WBTC.approve(CellarPoolShareContract, 10 ** 7, {"from": accounts[0]})
ETH_amount = 10 ** 18
WBTC_amount = 5 * 10 ** 6
cellarAddParams = [WBTC_amount, ETH_amount, 0, 0, 2 ** 256 - 1]
CellarPoolShareContract.addLiquidityForUniV3(cellarAddParams, {"from": accounts[0], "value": ETH_amount})
cellarAddParams = [WBTC_amount, ETH_amount, 0, 0, 2 ** 256 - 1]
CellarPoolShareContract.addLiquidityForUniV3(cellarAddParams, {"from": accounts[0], "value": ETH_amount})
token_id_0 = NonfungiblePositionManager.tokenOfOwnerByIndex(CellarPoolShareContract, 0)
liq_0 = NonfungiblePositionManager.positions(token_id_0)[7]
weight_0 = CellarPoolShareContract.cellarTickInfo(0)[3]
NFT_count = NonfungiblePositionManager.balanceOf(CellarPoolShareContract)
for i in range(NFT_count - 1):
token_id = NonfungiblePositionManager.tokenOfOwnerByIndex(CellarPoolShareContract, i + 1)
liq = NonfungiblePositionManager.positions(token_id)[7]
weight = CellarPoolShareContract.cellarTickInfo(i + 1)[3]
assert approximateCompare(liq_0 * weight, liq * weight_0, ACCURACY)
def approximateCompare(a, b, accuracy):
delta = 0
if a > b:
return (a - b) * accuracy < a
else:
return (b - a) * accuracy < b
| 49.78125 | 151 | 0.700565 | [
"Apache-2.0"
] | VolumeFi/somm-wbtc-eth-test-cellar | tests/test_05_weight.py | 1,593 | Python |
# NOTICE
#
# This software was produced for the U.S. Government under
# contract SB-1341-14-CQ-0010, and is subject to the Rights
# in Data-General Clause 52.227-14, Alt. IV (DEC 2007)
#
# (c) 2018 The MITRE Corporation. All Rights Reserved.
#====================================================
# CASE API
#!/usr/bin/env python
import datetime
import uuid
import rdflib
from rdflib import RDF
CASE = rdflib.Namespace('http://case.example.org/core#')
#====================================================
#-- CREATE A CASE DOCUMENT FOR A SINGLE REPORT
class Document(object):
def __init__(self, graph=None):
"""
Initializes the CASE document.
Args:
graph: The graph to populate (instance of rdflib.Graph)
If not provided, a graph in memory will be used.
"""
if not graph:
graph = rdflib.Graph()
graph.namespace_manager.bind('case', CASE)
self.graph = graph
def _sanitize_triple(self, triple):
"""Santizes the triple to contains pure rdflib terms."""
s, p, o = triple
if isinstance(s, Node):
s = s._node
if isinstance(o, Node):
o = o._node
elif o is not None and not isinstance(o, rdflib.term.Node):
o = rdflib.Literal(o)
if p is not None and not isinstance(p, rdflib.term.Node):
p = CASE[p]
return s, p, o
def __iter__(self):
"""Wrapper for iterating over all triples in the graph"""
return iter(self.graph)
def __contains__(self, triple):
"""Wrapper for checking if triple is contained in the graph."""
return self._sanitize_triple(triple) in self.graph
def triples(self, triple):
"""Generator over the triple store in graph."""
return self.graph.triples(self._sanitize_triple(triple))
def _json_ld_context(self):
context = dict(
(pfx, str(ns))
for (pfx, ns) in self.graph.namespaces() if pfx and
str(ns) != u"http://www.w3.org/XML/1998/namespace")
context['@vocab'] = str(CASE)
return context
# Manually specify properties to help inforce both properties are supplied.
def create_hash(self, hashMethod, hashValue):
return self.create_Node(
CASE.Hash, bnode=True, hashMethod=hashMethod, hashValue=hashValue)
# We are going to default to json-ld instead of rdflib's default of xml.
def serialize(self, format='json-ld', **kwargs):
"""Serializes the document's graph to a destination.
(Follows same arguments as rdflib.Graph().serialize())"""
if format == 'json-ld':
if 'context' not in kwargs:
kwargs['context'] = self._json_ld_context()
if 'auto_compact' not in kwargs:
kwargs['auto_compact'] = True
return self.graph.serialize(format=format, **kwargs)
# def serialize_append(self, format='json-ld', destination="new-api_output.json", **kwargs):
# """
# Serializes the document's graph to append to a destination file.
# """
# if format == 'json-ld':
# if 'context' not in kwargs:
# kwargs['context'] = self._json_ld_context()
# if 'auto_compact' not in kwargs:
# kwargs['auto_compact'] = True
# graph = self.graph.serialize(format=format, **kwargs)
# with open(destination, "a") as fin:
# fin.write(graph)
# fin.close()
#====================================================
#-- CREATE A CASE OBJECT
def create_Node(self, rdf_type=None, uri=None, bnode=False, **kwargs):
return Node(self.graph, rdf_type=rdf_type, uri=uri, bnode=bnode, **kwargs)
def create_CoreObject(self, _type=None, **kwargs):
"""
Creates and returns a CoreObject.
"""
return CoreObject(self.graph, rdf_type=_type, **kwargs)
def create_ContextObject(self, _type=None, **kwargs):
"""
Creates and returns a Context.
This class may not have PropertyBundles.
"""
return ContextObject(self.graph, rdf_type=_type, **kwargs)
def create_SubObject(self, _type=None, **kwargs):
"""
Creates and returns a Sub.
This class is for children of one of the above CASE classes.
This class may not have PropertyBundles.
"""
return SubObject(self.graph, rdf_type=_type, **kwargs)
def create_DuckObject(self, _type=None, **kwargs):
"""
Creates and returns a Duck.
These lonely Ducks have no parents and are fully duck-typed.
This class may not have PropertyBundles.
"""
return DuckObject(self.graph, rdf_type=_type, **kwargs)
#====================================================
#-- CASE OBJECT CLASSES
class Node(object):
"""Implements a generic node in the graph."""
RDF_TYPE = None
# Namespace to use when adding properties that are not of type rdflib.URIRef.
NAMESPACE = CASE
def __init__(self, graph, uri=None, bnode=False, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
uri: Optional string to set th URI to. (If not provided a UUID will be generated.)
bnode: Whether to create a blank node or a uri reference.
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
super(Node, self).__init__()
if uri:
self.uri = uri
else:
self.uri = str(uuid.uuid4())
if bnode:
self._node = rdflib.BNode(self.uri)
else:
self._node = rdflib.URIRef(self.uri)
self._graph = graph
if not rdf_type:
rdf_type = self.RDF_TYPE
# Add namespace prefix to non URIRef to allow abstraction from rdflib.
if not isinstance(rdf_type, rdflib.term.Node):
rdf_type = self.NAMESPACE[rdf_type]
self.add(RDF.type, rdf_type)
for key, value in iter(kwargs.items()):
self.add(key, value)
def add(self, property, value):
"""Adds a property and its value to the node."""
# type: (object, object) -> object
# Ignore setting properties with a None value.
if value is None:
return
# Lists and other iterables as values are the equivelent of having multiple properties.
# NOTE: Lists obviously lose their order.
# TODO: Add support for ordered lists.
if isinstance(value, (list, tuple, set)):
for item in value:
self.add(property, item)
return
if isinstance(value, Node):
value = value._node
# Convert basic python datatypes to literals.
elif not isinstance(value, rdflib.term.Node):
value = rdflib.Literal(value)
# Automatically convert non-node properties to URIRef using default prefix.
if not isinstance(property, rdflib.term.Node):
property = self.NAMESPACE[property]
self._graph.add((self._node, property, value))
class CoreObject(Node):
RDF_TYPE = CASE.CoreObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(CoreObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('CoreObjectCreationTime', datetime.datetime.utcnow())
self.pb = ""
def create_PropertyBundle(self, prop_type=None, **kwargs):
"""Convenience function for adding property bundles to this Trace.
Args:
type: The @type of property bundle (can be of type rdflib.URIRef or string).
properties: Properties to add to the created property bundle.
Returns:
The property bundle created (instance of PropertyBundle).
"""
self.pb = PropertyBundle(self._graph, rdf_type=prop_type, **kwargs)
self.add(CASE.propertyBundle, self.pb)
return self.pb
class PropertyBundle(Node):
RDF_TYPE = CASE.PropertyBundle
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type or a property must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
# Property bundles should be blank nodes because we should be referencing them
# through CoreObjects.
self.propObj = kwargs
super(PropertyBundle, self).__init__(
graph, bnode=True, rdf_type=rdf_type, **kwargs)
class ContextObject(Node):
RDF_TYPE = CASE.ContextObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(ContextObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('ContextObjectCreationTime', datetime.datetime.utcnow())
class SubObject(Node):
RDF_TYPE = CASE.SubObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(SubObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('SubObjectCreationTime', datetime.datetime.utcnow())
class DuckObject(Node):
RDF_TYPE = CASE.DuckObject
def __init__(self, graph, rdf_type=None, **kwargs):
"""Initializes and adds a node to the graph.
NOTE: At least the type must be supplied for the Node
to exist in the graph.
Args:
graph: The graph to add this node to. (instance of rdflib.Graph)
rdf_type: The RDF type to set this node to.
properties: Extra properties to add to this node.
(More properties can be set after initialization by using the add() function.)
"""
self.type = rdf_type
super(DuckObject, self).__init__(graph, rdf_type=rdf_type, **kwargs)
self.add('DuckObjectCreationTime', datetime.datetime.utcnow())
| 33.657303 | 95 | 0.608746 | [
"Apache-2.0"
] | casework/CASE-API-Python | example/case_example.py | 11,982 | Python |
import sys
import math
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../../../")
from sdc_etl_libs.sdc_dataframe.Dataframe import *
import pandas as pd
import numpy as np
import json
import pytest
def test_generate_insert_query_ddl(mocker):
test_schema = """
{
"namespace": "TimeControl",
"type": "object",
"name": "languages",
"country_code": "USA",
"data_sink": {"type":"snowflake", "database": "HRIS_DATA", "table_name": "LANGUAGES", "schema": "TIMECONTROL"},
"data_source": {"type": "api", "base_url": "https://smiledirectclub.timecontrol.net/api/v1"},
"fields": [
{"name":"_METADATA","type":{"type":"string","logical_type":"json"}},
{"name":"KEY","type":{"type":"int"},"sf_merge_key": true},
{"name":"NAME","type":{"type":"string"}},
{"name":"DESCRIPTION","type":{"type":"string"}},
{"name":"CULTURE","type":{"type":"string"}},
{"name":"_SF_INSERTEDDATETIME","type":{"type":"string","logical_type":"datetime", "add_column": true }}
]
}"""
test_data = """
[{"_metadata": {"links": [{"id": "9",
"rel": "self",
"href": "/api/v1/languages/9",
"code": "Ceština"}]},
"Key": 9,
"Name": "Ceština",
"Description": "Czech",
"Culture": "cs"},
{"_metadata": {"links": [{"id": "10",
"rel": "self",
"href": "/api/v1/languages/10",
"code": "This"}]},
"Key": 9,
"Name": "This",
"Description": "Is",
"Culture": "ze"}]
"""
df = Dataframe(SDCDFTypes.PANDAS, test_schema)
df.load_data(json.loads(test_data))
query = df.generate_insert_query_ddl(df.df)
assert query == '("CULTURE", "DESCRIPTION", "KEY", "NAME", "_METADATA", "_SF_INSERTEDDATETIME") select Column1 as "CULTURE", Column2 as "DESCRIPTION", Column3 as "KEY", Column4 as "NAME", PARSE_JSON(Column5) as "_METADATA", Column6 as "_SF_INSERTEDDATETIME" from values '
def test_generate_insert_query_values(mocker):
test_schema = """
{
"namespace": "TimeControl",
"type": "object",
"name": "languages",
"country_code": "USA",
"data_sink": {"type":"snowflake", "database": "HRIS_DATA", "table_name": "LANGUAGES", "schema": "TIMECONTROL"},
"data_source": {"type": "api", "base_url": "https://smiledirectclub.timecontrol.net/api/v1"},
"fields": [
{"name":"_METADATA","type":{"type":"string","logical_type":"json"}},
{"name":"KEY","type":{"type":"int"},"sf_merge_key": true},
{"name":"NAME","type":{"type":"string"}},
{"name":"DESCRIPTION","type":{"type":"string"}},
{"name":"CULTURE","type":{"type":"string"}}
]
}"""
test_data = """
[{"_metadata": {"links": [{"id": "9",
"rel": "self",
"href": "/api/v1/languages/9",
"code": "Ceština"}]},
"Key": 9,
"Name": "Ceština",
"Description": "Czech",
"Culture": "cs"},
{"_metadata": {"links": [{"id": "10",
"rel": "self",
"href": "/api/v1/languages/10",
"code": "This"}]},
"Key": 9,
"Name": "This",
"Description": "Is",
"Culture": "ze"}]
"""
df = Dataframe(SDCDFTypes.PANDAS, test_schema)
df.load_data(json.loads(test_data))
query = df.generate_insert_query_values(df.df)
assert query == "('cs', 'Czech', '9', 'Ceština', '{'links': [{'id': '9', 'rel': 'self', 'href': '/api/v1/languages/9', 'code': 'Ceština'}]}'), ('ze', 'Is', '9', 'This', '{'links': [{'id': '10', 'rel': 'self', 'href': '/api/v1/languages/10', 'code': 'This'}]}'), "
def test_convert_columns_to_json(mocker):
test_schema = """
{
"namespace": "TimeControl",
"type": "object",
"name": "languages",
"country_code": "USA",
"data_sink": {"type":"snowflake", "database": "HRIS_DATA",
"table_name": "LANGUAGES", "schema": "TIMECONTROL"},
"data_source": {"type": "api", "base_url":
"https://smiledirectclub.timecontrol.net/api/v1"},
"fields": [
{"name":"_METADATA","type":{"type":"string","logical_type":"json"}},
{"name":"KEY","type":{"type":"int"},"sf_merge_key": true},
{"name":"NAME","type":{"type":"string"}},
{"name":"DESCRIPTION","type":{"type":"string"}},
{"name":"CULTURE","type":{"type":"string"}}
]
}"""
test_data = """
[{"_metadata": {"links": [{"id": "9",
"rel": "self",
"href": "/api/v1/languages/9",
"code": "Ceština"}]},
"Key": 9,
"Name": "Ceština",
"Description": "Czech",
"Culture": "cs"},
{"_metadata": {"links": [{"id": "10",
"rel": "self",
"href": "/api/v1/languages/10",
"code": "This"}]},
"Key": 9,
"Name": "This",
"Description": "Is",
"Culture": "ze"}]
"""
df = Dataframe(SDCDFTypes.PANDAS, test_schema)
df.load_data(json.loads(test_data))
data_before = df.df["_METADATA"][0]
df.convert_columns_to_json()
data_after = df.df["_METADATA"][0]
pytest.assume(data_before == "{'links': [{'id': '9', 'rel': 'self', 'href': '/api/v1/languages/9', 'code': 'Ceština'}]}")
pytest.assume(data_after == '{"links": [{"id": "9", "rel": "self", "href": "/api/v1/languages/9", "code": "Ce\\u0161tina"}]}') | 35.722973 | 275 | 0.538491 | [
"Apache-2.0"
] | darknegma/docker-airflow | libs/sdc_etl_libs/test/dataframe_tests/sdc_dataframe_sql.py | 5,296 | Python |
#
# Copyright 2018 PyWren Team
# (C) Copyright IBM Corp. 2020
# (C) Copyright Cloudlab URV 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import pickle
import logging
from lithops import utils
from lithops.job.partitioner import create_partitions
from lithops.utils import is_object_processing_function, sizeof_fmt
from lithops.storage.utils import create_func_key, create_agg_data_key
from lithops.job.serialize import SerializeIndependent, create_module_data
from lithops.constants import MAX_AGG_DATA_SIZE, JOBS_PREFIX, LOCALHOST,\
SERVERLESS, STANDALONE, LITHOPS_TEMP_DIR
from types import SimpleNamespace
import os
import hashlib
import inspect
from lithops.utils import b64str_to_bytes
logger = logging.getLogger(__name__)
def create_map_job(config, internal_storage, executor_id, job_id, map_function,
iterdata, runtime_meta, runtime_memory, extra_env,
include_modules, exclude_modules, execution_timeout,
extra_args=None, obj_chunk_size=None, obj_chunk_number=None,
invoke_pool_threads=128):
"""
Wrapper to create a map job. It integrates COS logic to process objects.
"""
host_job_meta = {'host_job_create_tstamp': time.time()}
map_iterdata = utils.verify_args(map_function, iterdata, extra_args)
if config['lithops'].get('rabbitmq_monitor', False):
rabbit_amqp_url = config['rabbitmq'].get('amqp_url')
utils.create_rabbitmq_resources(rabbit_amqp_url, executor_id, job_id)
# Object processing functionality
parts_per_object = None
if is_object_processing_function(map_function):
create_partitions_start = time.time()
# Create partitions according chunk_size or chunk_number
logger.debug('ExecutorID {} | JobID {} - Calling map on partitions '
'from object storage flow'.format(executor_id, job_id))
map_iterdata, parts_per_object = create_partitions(config, internal_storage,
map_iterdata, obj_chunk_size,
obj_chunk_number)
host_job_meta['host_job_create_partitions_time'] = round(time.time()-create_partitions_start, 6)
# ########
job = _create_job(config=config,
internal_storage=internal_storage,
executor_id=executor_id,
job_id=job_id,
func=map_function,
iterdata=map_iterdata,
runtime_meta=runtime_meta,
runtime_memory=runtime_memory,
extra_env=extra_env,
include_modules=include_modules,
exclude_modules=exclude_modules,
execution_timeout=execution_timeout,
host_job_meta=host_job_meta,
invoke_pool_threads=invoke_pool_threads)
if parts_per_object:
job.parts_per_object = parts_per_object
return job
def create_reduce_job(config, internal_storage, executor_id, reduce_job_id,
reduce_function, map_job, map_futures, runtime_meta,
runtime_memory, reducer_one_per_object, extra_env,
include_modules, exclude_modules, execution_timeout=None):
"""
Wrapper to create a reduce job. Apply a function across all map futures.
"""
host_job_meta = {'host_job_create_tstamp': time.time()}
iterdata = [(map_futures, )]
if hasattr(map_job, 'parts_per_object') and reducer_one_per_object:
prev_total_partitons = 0
iterdata = []
for total_partitions in map_job.parts_per_object:
iterdata.append((map_futures[prev_total_partitons:prev_total_partitons+total_partitions],))
prev_total_partitons += total_partitions
reduce_job_env = {'__LITHOPS_REDUCE_JOB': True}
if extra_env is None:
ext_env = reduce_job_env
else:
ext_env = extra_env.copy()
ext_env.update(reduce_job_env)
iterdata = utils.verify_args(reduce_function, iterdata, None)
return _create_job(config=config,
internal_storage=internal_storage,
executor_id=executor_id,
job_id=reduce_job_id,
func=reduce_function,
iterdata=iterdata,
runtime_meta=runtime_meta,
runtime_memory=runtime_memory,
extra_env=ext_env,
include_modules=include_modules,
exclude_modules=exclude_modules,
execution_timeout=execution_timeout,
host_job_meta=host_job_meta)
'''
stores function and modules in temporary directory to be used later in optimized runtime
'''
def _store_func_and_modules(func_key, func_str, module_data):
# save function
func_path = '/'.join([LITHOPS_TEMP_DIR, func_key])
os.makedirs(os.path.dirname(func_path), exist_ok=True)
with open(func_path, "wb") as f:
f.write(func_str)
if module_data:
logger.debug("Writing Function dependencies to local disk")
modules_path = '/'.join([os.path.dirname(func_path), 'modules'])
for m_filename, m_data in module_data.items():
m_path = os.path.dirname(m_filename)
if len(m_path) > 0 and m_path[0] == "/":
m_path = m_path[1:]
to_make = os.path.join(modules_path, m_path)
try:
os.makedirs(to_make)
except OSError as e:
if e.errno == 17:
pass
else:
raise e
full_filename = os.path.join(to_make, os.path.basename(m_filename))
with open(full_filename, 'wb') as fid:
fid.write(b64str_to_bytes(m_data))
logger.debug("Finished storing function and modules")
def _create_job(config, internal_storage, executor_id, job_id, func,
iterdata, runtime_meta, runtime_memory, extra_env,
include_modules, exclude_modules, execution_timeout,
host_job_meta, invoke_pool_threads=128):
"""
:param func: the function to map over the data
:param iterdata: An iterable of input data
:param extra_env: Additional environment variables for CF environment. Default None.
:param extra_meta: Additional metadata to pass to CF. Default None.
:param remote_invocation: Enable remote invocation. Default False.
:param invoke_pool_threads: Number of threads to use to invoke.
:param data_all_as_one: upload the data as a single object. Default True
:param overwrite_invoke_args: Overwrite other args. Mainly used for testing.
:param exclude_modules: Explicitly keep these modules from pickled dependencies.
:return: A list with size `len(iterdata)` of futures for each job
:rtype: list of futures.
"""
ext_env = {} if extra_env is None else extra_env.copy()
if ext_env:
ext_env = utils.convert_bools_to_string(ext_env)
logger.debug("Extra environment vars {}".format(ext_env))
job = SimpleNamespace()
job.executor_id = executor_id
job.job_id = job_id
job.extra_env = ext_env
job.execution_timeout = execution_timeout or config['lithops']['execution_timeout']
job.function_name = func.__name__
job.total_calls = len(iterdata)
mode = config['lithops']['mode']
if mode == SERVERLESS:
job.invoke_pool_threads = invoke_pool_threads
job.runtime_memory = runtime_memory or config['serverless']['runtime_memory']
job.runtime_timeout = config['serverless']['runtime_timeout']
if job.execution_timeout >= job.runtime_timeout:
job.execution_timeout = job.runtime_timeout - 5
elif mode == STANDALONE:
job.runtime_memory = None
runtime_timeout = config['standalone']['hard_dismantle_timeout']
if job.execution_timeout >= runtime_timeout:
job.execution_timeout = runtime_timeout - 10
elif mode == LOCALHOST:
job.runtime_memory = None
job.runtime_timeout = execution_timeout
exclude_modules_cfg = config['lithops'].get('exclude_modules', [])
include_modules_cfg = config['lithops'].get('include_modules', [])
exc_modules = set()
inc_modules = set()
if exclude_modules_cfg:
exc_modules.update(exclude_modules_cfg)
if exclude_modules:
exc_modules.update(exclude_modules)
if include_modules_cfg is not None:
inc_modules.update(include_modules_cfg)
if include_modules_cfg is None and not include_modules:
inc_modules = None
if include_modules is not None and include_modules:
inc_modules.update(include_modules)
if include_modules is None:
inc_modules = None
logger.debug('ExecutorID {} | JobID {} - Serializing function and data'.format(executor_id, job_id))
job_serialize_start = time.time()
serializer = SerializeIndependent(runtime_meta['preinstalls'])
func_and_data_ser, mod_paths = serializer([func] + iterdata, inc_modules, exc_modules)
data_strs = func_and_data_ser[1:]
data_size_bytes = sum(len(x) for x in data_strs)
module_data = create_module_data(mod_paths)
func_str = func_and_data_ser[0]
func_module_str = pickle.dumps({'func': func_str, 'module_data': module_data}, -1)
func_module_size_bytes = len(func_module_str)
total_size = utils.sizeof_fmt(data_size_bytes+func_module_size_bytes)
host_job_meta['host_job_serialize_time'] = round(time.time()-job_serialize_start, 6)
host_job_meta['data_size_bytes'] = data_size_bytes
host_job_meta['func_module_size_bytes'] = func_module_size_bytes
if 'data_limit' in config['lithops']:
data_limit = config['lithops']['data_limit']
else:
data_limit = MAX_AGG_DATA_SIZE
if data_limit and data_size_bytes > data_limit*1024**2:
log_msg = ('ExecutorID {} | JobID {} - Total data exceeded maximum size '
'of {}'.format(executor_id, job_id, sizeof_fmt(data_limit*1024**2)))
raise Exception(log_msg)
logger.info('ExecutorID {} | JobID {} - Uploading function and data '
'- Total: {}'.format(executor_id, job_id, total_size))
# Upload data
data_key = create_agg_data_key(JOBS_PREFIX, executor_id, job_id)
job.data_key = data_key
data_bytes, data_ranges = utils.agg_data(data_strs)
job.data_ranges = data_ranges
data_upload_start = time.time()
internal_storage.put_data(data_key, data_bytes)
data_upload_end = time.time()
host_job_meta['host_data_upload_time'] = round(data_upload_end-data_upload_start, 6)
func_upload_start = time.time()
# Upload function and modules
if config[mode].get('customized_runtime'):
# Prepare function and modules locally to store in the runtime image later
function_file = func.__code__.co_filename
function_hash = hashlib.md5(open(function_file,'rb').read()).hexdigest()[:16]
mod_hash = hashlib.md5(repr(sorted(mod_paths)).encode('utf-8')).hexdigest()[:16]
uuid = f'{function_hash}{mod_hash}'
func_key = create_func_key(JOBS_PREFIX, uuid, "")
_store_func_and_modules(func_key, func_str, module_data)
job.ext_runtime_uuid = uuid
else:
func_key = create_func_key(JOBS_PREFIX, executor_id, job_id)
internal_storage.put_func(func_key, func_module_str)
job.func_key = func_key
func_upload_end = time.time()
host_job_meta['host_func_upload_time'] = round(func_upload_end - func_upload_start, 6)
host_job_meta['host_job_created_time'] = round(time.time() - host_job_meta['host_job_create_tstamp'], 6)
job.metadata = host_job_meta
return job
| 41.115894 | 108 | 0.67464 | [
"Apache-2.0"
] | pablogs98/lithops | lithops/job/job.py | 12,417 | Python |
import sys
import time
import os
import os.path as osp
import requests
import shutil
import tqdm
import pickle
import numpy as np
import torch
from cogdl.data import Data, Dataset, download_url
from . import register_dataset
def untar(path, fname, deleteTar=True):
"""
Unpacks the given archive file to the same directory, then (by default)
deletes the archive file.
"""
print('unpacking ' + fname)
fullpath = os.path.join(path, fname)
shutil.unpack_archive(fullpath, path)
if deleteTar:
os.remove(fullpath)
class GTNDataset(Dataset):
r"""The network datasets "ACM", "DBLP" and "IMDB" from the
`"Graph Transformer Networks"
<https://arxiv.org/abs/1911.06455>`_ paper.
Args:
root (string): Root directory where the dataset should be saved.
name (string): The name of the dataset (:obj:`"gtn-acm"`,
:obj:`"gtn-dblp"`, :obj:`"gtn-imdb"`).
"""
def __init__(self, root, name):
self.name = name
self.url = f'https://github.com/cenyk1230/gtn-data/blob/master/{name}.zip?raw=true'
super(GTNDataset, self).__init__(root)
self.data = torch.load(self.processed_paths[0])
self.num_classes = torch.max(self.data.train_target).item() + 1
self.num_edge = len(self.data.adj)
self.num_nodes = self.data.x.shape[0]
@property
def raw_file_names(self):
names = ["edges.pkl", "labels.pkl", "node_features.pkl"]
return names
@property
def processed_file_names(self):
return ["data.pt"]
def read_gtn_data(self, folder):
edges = pickle.load(open(osp.join(folder, 'edges.pkl'), 'rb'))
labels = pickle.load(open(osp.join(folder, 'labels.pkl'), 'rb'))
node_features = pickle.load(open(osp.join(folder, 'node_features.pkl'), 'rb'))
data = Data()
data.x = torch.from_numpy(node_features).type(torch.FloatTensor)
num_nodes = edges[0].shape[0]
node_type = np.zeros((num_nodes), dtype=int)
assert len(edges)==4
assert len(edges[0].nonzero())==2
node_type[edges[0].nonzero()[0]] = 0
node_type[edges[0].nonzero()[1]] = 1
node_type[edges[1].nonzero()[0]] = 1
node_type[edges[1].nonzero()[1]] = 0
node_type[edges[2].nonzero()[0]] = 0
node_type[edges[2].nonzero()[1]] = 2
node_type[edges[3].nonzero()[0]] = 2
node_type[edges[3].nonzero()[1]] = 0
print(node_type)
data.pos = torch.from_numpy(node_type)
edge_list = []
for i, edge in enumerate(edges):
edge_tmp = torch.from_numpy(np.vstack((edge.nonzero()[0], edge.nonzero()[1]))).type(torch.LongTensor)
edge_list.append(edge_tmp)
data.edge_index = torch.cat(edge_list, 1)
A = []
for i,edge in enumerate(edges):
edge_tmp = torch.from_numpy(np.vstack((edge.nonzero()[0], edge.nonzero()[1]))).type(torch.LongTensor)
value_tmp = torch.ones(edge_tmp.shape[1]).type(torch.FloatTensor)
A.append((edge_tmp,value_tmp))
edge_tmp = torch.stack((torch.arange(0,num_nodes),torch.arange(0,num_nodes))).type(torch.LongTensor)
value_tmp = torch.ones(num_nodes).type(torch.FloatTensor)
A.append((edge_tmp,value_tmp))
data.adj = A
data.train_node = torch.from_numpy(np.array(labels[0])[:,0]).type(torch.LongTensor)
data.train_target = torch.from_numpy(np.array(labels[0])[:,1]).type(torch.LongTensor)
data.valid_node = torch.from_numpy(np.array(labels[1])[:,0]).type(torch.LongTensor)
data.valid_target = torch.from_numpy(np.array(labels[1])[:,1]).type(torch.LongTensor)
data.test_node = torch.from_numpy(np.array(labels[2])[:,0]).type(torch.LongTensor)
data.test_target = torch.from_numpy(np.array(labels[2])[:,1]).type(torch.LongTensor)
y = np.zeros((num_nodes), dtype=int)
x_index = torch.cat((data.train_node, data.valid_node, data.test_node))
y_index = torch.cat((data.train_target, data.valid_target, data.test_target))
y[x_index.numpy()] = y_index.numpy()
data.y = torch.from_numpy(y)
self.data = data
def get(self, idx):
assert idx == 0
return self.data
def apply_to_device(self, device):
self.data.x = self.data.x.to(device)
self.data.train_node = self.data.train_node.to(device)
self.data.valid_node = self.data.valid_node.to(device)
self.data.test_node = self.data.test_node.to(device)
self.data.train_target = self.data.train_target.to(device)
self.data.valid_target = self.data.valid_target.to(device)
self.data.test_target = self.data.test_target.to(device)
new_adj = []
for (t1, t2) in self.data.adj:
new_adj.append((t1.to(device), t2.to(device)))
self.data.adj = new_adj
def download(self):
download_url(self.url, self.raw_dir, name=self.name + '.zip')
untar(self.raw_dir, self.name + '.zip')
def process(self):
self.read_gtn_data(self.raw_dir)
torch.save(self.data, self.processed_paths[0])
def __repr__(self):
return "{}()".format(self.name)
@register_dataset("gtn-acm")
class ACM_GTNDataset(GTNDataset):
def __init__(self):
dataset = "gtn-acm"
path = osp.join(osp.dirname(osp.realpath(__file__)), "../..", "data", dataset)
super(ACM_GTNDataset, self).__init__(path, dataset)
@register_dataset("gtn-dblp")
class DBLP_GTNDataset(GTNDataset):
def __init__(self):
dataset = "gtn-dblp"
path = osp.join(osp.dirname(osp.realpath(__file__)), "../..", "data", dataset)
super(DBLP_GTNDataset, self).__init__(path, dataset)
@register_dataset("gtn-imdb")
class IMDB_GTNDataset(GTNDataset):
def __init__(self):
dataset = "gtn-imdb"
path = osp.join(osp.dirname(osp.realpath(__file__)), "../..", "data", dataset)
super(IMDB_GTNDataset, self).__init__(path, dataset)
| 36.100592 | 113 | 0.626946 | [
"MIT"
] | AlvinWen428/cogdl | cogdl/datasets/gtn_data.py | 6,101 | Python |
#!/usr/bin/env python3
#
# Convert full firmware binary to rwd patch.
# Supported models:
# CR-V 5g (part num: 39990-TLA), tested
# Civic 2016 sedan (part num: 39990-TBA), tested
# Civic 2016 hatchback Australia (part num: 39990-TEA), tested
# Civic 2016 hatchback (part num: 39990-TGG), tested
#
import os
import sys
import argparse
import subprocess
import struct
# Decryption lookup table built from Civic 2016 sedan bin/rwd, also apply to CR-V 5g.
default_decrypt_lookup_table = {144: 72, 218: 55, 255: 255, 164: 1, 195: 26, 99: 2, 28: 178, 205: 158, 125: 138, 45: 118, 222: 98, 142: 78, 62: 58, 243: 38, 163: 18, 83: 254, 3: 234, 172: 214, 92: 194, 12: 174, 189: 154, 109: 134, 29: 114, 206: 94, 126: 74, 46: 54, 227: 34, 147: 14, 113: 0, 67: 250, 236: 230, 156: 210, 76: 190, 252: 170, 173: 150, 93: 130, 13: 110, 148: 253, 120: 159, 199: 148, 198: 137, 77: 126, 23: 104, 73: 83, 203: 73, 78: 62, 123: 53, 254: 42, 43: 33, 90: 23, 161: 12, 10: 3, 132: 249, 191: 239, 226: 220, 197: 201, 248: 191, 117: 181, 34: 172, 37: 161, 88: 151, 141: 142, 8: 131, 134: 121, 185: 111, 54: 101, 190: 90, 57: 79, 128: 68, 139: 57, 14: 46, 138: 35, 131: 10, 100: 241, 1: 228, 146: 200, 133: 185, 168: 171, 104: 155, 40: 139, 251: 85, 94: 66, 91: 45, 103: 124, 55: 112, 231: 156, 80: 56, 224: 92, 102: 113, 96: 60, 98: 188, 97: 252, 140: 206, 122: 31, 232: 187, 16: 40, 202: 51, 26: 7, 239: 251, 5: 153, 219: 77, 119: 128, 21: 157, 238: 102, 180: 5, 217: 119, 30: 50, 7: 100, 32: 44, 183: 144, 50: 176, 110: 70, 157: 146, 2: 164, 44: 182, 145: 8, 58: 15, 27: 29, 64: 52, 9: 67, 31: 199, 179: 22, 42: 11, 193: 20, 211: 30, 129: 4, 241: 32, 74: 19, 178: 208, 247: 160, 112: 64, 242: 224, 114: 192, 165: 193, 0: 36, 59: 37, 196: 9, 154: 39, 75: 41, 72: 147, 249: 127, 162: 204, 130: 196, 229: 209, 182: 133, 48: 48, 86: 109, 240: 96, 137: 99, 151: 136, 209: 24, 108: 198, 181: 197, 212: 13, 244: 21, 11: 25, 118: 117, 228: 17, 214: 141, 52: 229, 160: 76, 115: 6, 106: 27, 56: 143, 25: 71, 36: 225, 194: 212, 208: 88, 187: 69, 171: 65, 153: 103, 38: 97, 207: 243, 82: 184, 184: 175, 188: 218, 213: 205, 121: 95, 15: 195, 81: 248, 24: 135, 70: 105, 150: 125, 174: 86, 158: 82, 220: 226, 201: 115, 71: 116, 51: 246, 177: 16, 176: 80, 22: 93, 39: 108, 159: 231, 223: 247, 186: 47, 169: 107, 245: 213, 235: 81, 192: 84, 124: 202, 175: 235, 84: 237, 79: 211, 234: 59, 143: 227, 237: 166, 33: 236, 253: 106, 65: 244, 111: 219, 200: 179, 101: 177, 17: 232, 20: 221, 166: 129, 60: 186, 61: 122, 167: 140, 204: 222, 87: 120, 41: 75, 135: 132, 136: 163, 49: 240, 250: 63, 107: 49, 170: 43, 18: 168, 221: 162, 35: 242, 225: 28, 149: 189, 85: 173, 152: 167, 95: 215, 53: 165, 89: 87, 66: 180, 6: 89, 47: 203, 210: 216, 215: 152, 233: 123, 116: 245, 127: 223, 19: 238, 69: 169, 105: 91, 4: 217, 216: 183, 68: 233, 63: 207, 155: 61, 246: 149, 230: 145}
# sum of x, x is unsigned shorts
def checksum_by_sum(fw, start, end):
s = 0
for i in range(start, end - start, 2):
s += struct.unpack('!H', fw[i:i + 2])[0]
return s
# sum of -x, x is unsigned shorts
def checksum_by_negative_sum(fw, start, end):
s = 0
for i in range(start, end - start, 2):
s += -struct.unpack('!H', fw[i:i + 2])[0]
return s
checksum_funcs = [checksum_by_sum, checksum_by_negative_sum]
car_models = {
'39990-TLA-A030': { #CR-V thanks to joe1
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TLA-A030', '39990-TLA-A040', '39990-TLA,A030', '39990-TLA,A040'],
'security-key': ['0x011101121120', '0x011101121120', '0x011101121120', '0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x6c000,
# (checksum func idx, offset)
'checksum-offsets': [(0, 0x6bf80), (1, 0x6bffe)] #original bin checksums are 0x419b at offset 0x6FF80 and 0x24ef at 0x6FFFE, but since we start the bin from 0x4000 after bootloader, we offset the checksum accordingly
},
'39990-TBA-A030': { #civic sedan thanks to mystery leaker
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TBA-A000', '39990-TBA-A010', '39990-TBA-A020', '39990-TBA-A030'],
'security-key': ['0x011100121020', '0x011100121020', '0x011101121120', '0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
# (checksum func idx, offset)
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)] #original bin checksums are 0xDD23 at offset 0x4FF80 and 0xEDDF at 0x4FFFE, but since we start the bin from 0x4000 after bootloader, we offset the checksum accordingly
},
'39990-TEA-T330': { #civic hatch au thanks to ming
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TEA-T330'],
'security-key': ['0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
# (checksum func idx, offset)
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)]
},
'39990-TEA-H010': { # bccw test
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TEA-H010', '39990-TEA-H020', '39990-TEA,H020'],
'security-key': ['0x0111011211', '0x0111011211', '0x0111011211'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
# (checksum func idx, offset)
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)]
},
'39990-TGG-A120': { #civic hatch thanks to R3DLOBST3R
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TGG-A120'],
'security-key': ['0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
# (checksum func idx, offset)
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)]
},
'39990-TRW-A020': { #clarity thanks to wirelessnet2
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TRW-A010', '39990-TRW-A020', '39990-TRW,A010', '39990-TRW,A020'],
'security-key': ['0x011101121120', '0x011101121120', '0x011101121120', '0x011101121120'],
'encryption-key': '0x010203',
'start-address': 0x4000,
'data-size': 0x4c000,
#(checksum func idx, offset)
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)]
},
'39990-TBX-3050': { #civic sedan thanks to mystery leaker
'can-address': '0x18DA30F1',
'supported-versions': ['39990-TBX-H110', '39990-TBX-H120', '39990-TBX-3050'],
'security-key': ['0x0211021212', '0x0211021212', '0x0211021212'],
'encryption-key': '0xbf109e',
'start-address': 0x13000,
'data-size': 0xed000,
# (checksum func idx, offset)
'checksum-offsets': [(0, 0x4bf80), (1, 0x4bffe)] #original bin checksums are 0xDD23 at offset 0x4FF80 and 0xEDDF at 0x4FFFE, but since we start the bin from 0x4000 after bootloader, we offset the checksum accordingly
},
}
def main():
# example: python3 bin_to_rwd.py --input_bin crv_5g_user_patched.bin --model 39990-TLA-A030
parser = argparse.ArgumentParser()
parser.add_argument("--input_bin", required=True, help="Full firmware binary file")
parser.add_argument("--model", default='39990-TLA-A030', help="EPS part number")
args = parser.parse_args()
if not args.model in car_models:
print('Car model %s not found' % args.model)
sys.exit(-1)
print('Creating rwd for model %s' % args.model)
m = car_models[args.model]
if not os.path.exists(args.input_bin):
print('%s not found' % args.input_bin)
sys.exit(-1)
encrypt_lookup_table = {}
for k, v in default_decrypt_lookup_table.items():
encrypt_lookup_table[v] = k
with open(args.input_bin, 'rb') as f:
full_fw = f.read()
patch_fw = full_fw[m['start-address']:(m['start-address'] + m['data-size'])]
for func_idx, off in m['checksum-offsets']:
old_checksum = struct.unpack('!H', patch_fw[off:off+2])[0] & 0xFFFF
new_checksum = checksum_funcs[func_idx](patch_fw, 0, off) & 0xFFFF
print('Update checksum at offset %s from %s to %s' % (hex(off), hex(old_checksum), hex(new_checksum)))
patch_fw = patch_fw[:off] + struct.pack('!H', new_checksum & 0xFFFF) + patch_fw[off+2:]
encrypted = bytearray()
for b in patch_fw:
encrypted.append(encrypt_lookup_table[b])
out_enc_path = args.input_bin + '.enc'
with open(out_enc_path, 'wb') as out_f:
out_f.write(encrypted)
print('Encryption done, saved to %s.' % out_enc_path)
cur_dir = os.path.dirname(os.path.abspath(__file__))
cmds = [
'python2',
'rwd-builder.py',
'--can-address', m['can-address'],
'--supported-versions', *m['supported-versions'],
'--security-key', *m['security-key'],
'--encryption-key', m['encryption-key'],
'--encrypted-file', out_enc_path,
'--start-address', hex(m['start-address']),
'--data-size', hex(m['data-size'])
]
subprocess.check_call(cmds, cwd=cur_dir)
print('RWD file %s created.' % (out_enc_path[:-4] + '.rwd'))
if __name__== "__main__":
main()
| 51.277457 | 2,371 | 0.631496 | [
"MIT"
] | bccw-ai/rwd-xray | tools/bin_to_rwd.py | 8,871 | Python |
from django.apps import AppConfig
class AsciiArtConfig(AppConfig):
name = 'ascii_art'
| 15.333333 | 33 | 0.76087 | [
"MIT"
] | ChetanDehane/ascii-art | server/ascii_art_server/api/ascii_art/apps.py | 92 | Python |
from django.contrib.staticfiles.storage import staticfiles_storage
from django.urls import reverse
from ManagementStudents.jinja2 import Environment
# This enables us to use Django template tags like {% url ‘index’ %} or {% static ‘path/to/static/file.js’ %} in our Jinja2 templates.
def environment(**options):
env = Environment(**options)
env.globals.update({
'static': staticfiles_storage.url,
'url': reverse,
})
return env
| 32.928571 | 134 | 0.718004 | [
"MIT"
] | lpython2006e/exercies | 14_Tran_An_Thien/ManagementStudents/ManagementStudents/customsettings.py | 469 | Python |
"""
This module used for serializing data
CategorySchema - data from Category model
VacancySchema - data from Vacancy model
"""
# pylint: disable=too-many-ancestors
# pylint: disable=missing-class-docstring
# pylint: disable=too-few-public-methods
from app import ma
from app.models.model import Category, Vacancy
class CategorySchema(ma.SQLAlchemyAutoSchema):
"""
Used for serialize Category data
"""
class Meta:
model = Category
fields = ("name", )
class VacancySchema(ma.SQLAlchemyAutoSchema):
"""
Used for serialize Vacancy data
"""
class Meta:
model = Vacancy
fields = ("name", "salary", "info", "contacts")
ordered = True
categories_schema = CategorySchema(many=True)
vacancies_schema = VacancySchema(many=True)
| 22.166667 | 55 | 0.692982 | [
"Apache-2.0"
] | WishesFire/Epam-Python-Project | app/rest/serializers.py | 798 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class RouteType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The routing methodology to where the ICE server will be located from the client. "any" will
have higher reliability while "nearest" will have lower latency. It is recommended to default
to use the "any" routing method unless there are specific scenarios which minimizing latency is
critical.
"""
ANY = "any"
NEAREST = "nearest"
| 43.434783 | 99 | 0.648649 | [
"MIT"
] | AikoBB/azure-sdk-for-python | sdk/communication/azure-communication-networktraversal/azure/communication/networktraversal/_generated/models/_communication_network_traversal_client_enums.py | 999 | Python |
# -*- coding: utf-8 -*-
"""Class that defines the abstract interface for an object repository.
The scope of this class is intentionally very narrow. Any backend implementation should merely provide the methods to
store binary blobs, or "objects", and return a string-based key that unique identifies the object that was just created.
This key should then be able to be used to retrieve the bytes of the corresponding object or to delete it.
"""
import abc
import contextlib
import hashlib
import io
import pathlib
from typing import BinaryIO, Iterable, Iterator, List, Optional, Tuple, Union
from aiida.common.hashing import chunked_file_hash
__all__ = ('AbstractRepositoryBackend',)
class AbstractRepositoryBackend(metaclass=abc.ABCMeta):
"""Class that defines the abstract interface for an object repository.
The repository backend only deals with raw bytes, both when creating new objects as well as when returning a stream
or the content of an existing object. The encoding and decoding of the byte content should be done by the client
upstream. The file repository backend is also not expected to keep any kind of file hierarchy but must be assumed
to be a simple flat data store. When files are created in the file object repository, the implementation will return
a string-based key with which the content of the stored object can be addressed. This key is guaranteed to be unique
and persistent. Persisting the key or mapping it onto a virtual file hierarchy is again up to the client upstream.
"""
@property
@abc.abstractmethod
def uuid(self) -> Optional[str]:
"""Return the unique identifier of the repository."""
@property
@abc.abstractmethod
def key_format(self) -> Optional[str]:
"""Return the format for the keys of the repository.
Important for when migrating between backends (e.g. archive -> main), as if they are not equal then it is
necessary to re-compute all the `Node.repository_metadata` before importing (otherwise they will not match
with the repository).
"""
@abc.abstractmethod
def initialise(self, **kwargs) -> None:
"""Initialise the repository if it hasn't already been initialised.
:param kwargs: parameters for the initialisation.
"""
@property
@abc.abstractmethod
def is_initialised(self) -> bool:
"""Return whether the repository has been initialised."""
@abc.abstractmethod
def erase(self) -> None:
"""Delete the repository itself and all its contents.
.. note:: This should not merely delete the contents of the repository but any resources it created. For
example, if the repository is essentially a folder on disk, the folder itself should also be deleted, not
just its contents.
"""
@staticmethod
def is_readable_byte_stream(handle) -> bool:
return hasattr(handle, 'read') and hasattr(handle, 'mode') and 'b' in handle.mode
def put_object_from_filelike(self, handle: BinaryIO) -> str:
"""Store the byte contents of a file in the repository.
:param handle: filelike object with the byte content to be stored.
:return: the generated fully qualified identifier for the object within the repository.
:raises TypeError: if the handle is not a byte stream.
"""
if not isinstance(handle, io.BufferedIOBase) and not self.is_readable_byte_stream(handle):
raise TypeError(f'handle does not seem to be a byte stream: {type(handle)}.')
return self._put_object_from_filelike(handle)
@abc.abstractmethod
def _put_object_from_filelike(self, handle: BinaryIO) -> str:
pass
def put_object_from_file(self, filepath: Union[str, pathlib.Path]) -> str:
"""Store a new object with contents of the file located at `filepath` on this file system.
:param filepath: absolute path of file whose contents to copy to the repository.
:return: the generated fully qualified identifier for the object within the repository.
:raises TypeError: if the handle is not a byte stream.
"""
with open(filepath, mode='rb') as handle:
return self.put_object_from_filelike(handle)
@abc.abstractmethod
def has_objects(self, keys: List[str]) -> List[bool]:
"""Return whether the repository has an object with the given key.
:param keys:
list of fully qualified identifiers for objects within the repository.
:return:
list of logicals, in the same order as the keys provided, with value True if the respective
object exists and False otherwise.
"""
def has_object(self, key: str) -> bool:
"""Return whether the repository has an object with the given key.
:param key: fully qualified identifier for the object within the repository.
:return: True if the object exists, False otherwise.
"""
return self.has_objects([key])[0]
@abc.abstractmethod
def list_objects(self) -> Iterable[str]:
"""Return iterable that yields all available objects by key.
:return: An iterable for all the available object keys.
"""
@contextlib.contextmanager
def open(self, key: str) -> Iterator[BinaryIO]:
"""Open a file handle to an object stored under the given key.
.. note:: this should only be used to open a handle to read an existing file. To write a new file use the method
``put_object_from_filelike`` instead.
:param key: fully qualified identifier for the object within the repository.
:return: yield a byte stream object.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be opened.
"""
if not self.has_object(key):
raise FileNotFoundError(f'object with key `{key}` does not exist.')
def get_object_content(self, key: str) -> bytes:
"""Return the content of a object identified by key.
:param key: fully qualified identifier for the object within the repository.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be opened.
"""
with self.open(key) as handle: # pylint: disable=not-context-manager
return handle.read()
@abc.abstractmethod
def iter_object_streams(self, keys: List[str]) -> Iterator[Tuple[str, BinaryIO]]:
"""Return an iterator over the (read-only) byte streams of objects identified by key.
.. note:: handles should only be read within the context of this iterator.
:param keys: fully qualified identifiers for the objects within the repository.
:return: an iterator over the object byte streams.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if a file could not be opened.
"""
def get_object_hash(self, key: str) -> str:
"""Return the SHA-256 hash of an object stored under the given key.
.. important::
A SHA-256 hash should always be returned,
to ensure consistency across different repository implementations.
:param key: fully qualified identifier for the object within the repository.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be opened.
"""
with self.open(key) as handle: # pylint: disable=not-context-manager
return chunked_file_hash(handle, hashlib.sha256)
@abc.abstractmethod
def delete_objects(self, keys: List[str]) -> None:
"""Delete the objects from the repository.
:param keys: list of fully qualified identifiers for the objects within the repository.
:raise FileNotFoundError: if any of the files does not exist.
:raise OSError: if any of the files could not be deleted.
"""
keys_exist = self.has_objects(keys)
if not all(keys_exist):
error_message = 'some of the keys provided do not correspond to any object in the repository:\n'
for indx, key_exists in enumerate(keys_exist):
if not key_exists:
error_message += f' > object with key `{keys[indx]}` does not exist.\n'
raise FileNotFoundError(error_message)
def delete_object(self, key: str) -> None:
"""Delete the object from the repository.
:param key: fully qualified identifier for the object within the repository.
:raise FileNotFoundError: if the file does not exist.
:raise OSError: if the file could not be deleted.
"""
return self.delete_objects([key])
| 44.497462 | 120 | 0.682523 | [
"MIT",
"BSD-3-Clause"
] | azadoks/aiida-core | aiida/repository/backend/abstract.py | 8,766 | Python |
"""personal_gallery URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'',include('gallery.urls'))
]
| 35.608696 | 79 | 0.703297 | [
"MIT"
] | mikengugy/The-Gallery | personal_gallery/urls.py | 819 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .feature_client import FeatureClient
from .version import VERSION
__all__ = ['FeatureClient']
__version__ = VERSION
| 36.172414 | 76 | 0.656816 | [
"Apache-2.0"
] | HydAu/AzureSDKForPython | azure-mgmt-resource/azure/mgmt/resource/features/__init__.py | 1,049 | Python |
import sqlite3
import os
MSG_HELP = """List of commands:
!help
List commands
!listAll
List all animals
!show <animal>
Give description
!getFlag
Give flag (Admin only)
!serverInfo
Give server info (Dragonite only)
!addAdmin <id>
Make user an admin (Dragonite only)
!hint
Give you a hint.
Source_code:
https://github.com/Bankde/Hack-me-bot"""
MSG_NO_DRAGONITE = "You're not Dragonite. Go away !!"
MSG_SEARCH_ERROR = "We cannot find this animal in our database"
MSG_NO_ADMIN = "You are not Admin. Go away !!"
MSG_ANIMAL_CMD = "Please specify animal: e.g. !show dog"
APP_DB = "app.db"
HINT_URL = "https://i.imgur.com/QPKpeJL.jpg"
def init():
serverInfo = os.getenv('SERVER_INFO', None)
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
values = (serverInfo,)
cursor.execute("UPDATE ServerInfo SET info=?", values)
conn.commit()
values = ("TestLogUser", "TestLogMsg", )
cursor.execute("INSERT INTO MsgLog VALUES (?,?)", values)
conn.commit()
conn.close()
# Log userId and their msg here
def _msgLog(user, msg):
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
values = (user, msg,)
# CREATE TABLE MsgLog (user TEXT, msg TEXT);
cursor.execute("INSERT INTO MsgLog VALUES (?,?)", values)
conn.commit()
conn.close()
# Show animal description
def _showAnimal(animal):
try:
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
# CREATE TABLE Animals (animal TEXT UNIQUE, description TEXT);
cursor.execute("SELECT description FROM Animals WHERE animal='%s'" % (animal))
all_data = cursor.fetchone()
conn.close()
if all_data == None or len(all_data) == 0:
return MSG_SEARCH_ERROR
else:
return all_data[0]
except:
print("SQL error for arg: %s" % (animal))
return None
# List every animals
def _listAnimal():
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
# CREATE TABLE Animals (animal TEXT UNIQUE, description TEXT);
cursor.execute("SELECT animal FROM Animals")
all_data = cursor.fetchall()
conn.close()
return ", ".join([data[0] for data in all_data])
# My own reminder
def _getServerInfo(user):
if user.lower() == "dragonite":
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
# CREATE TABLE ServerInfo (info TEXT);
cursor.execute("SELECT info FROM ServerInfo")
all_data = cursor.fetchone()
conn.close()
return all_data[0]
else:
return MSG_NO_DRAGONITE
# You should ask Dragonite to add you to admin list
def _addAdmin(user, arg):
if user.lower() == "dragonite":
try:
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
values = (arg,)
# CREATE TABLE Admins (user TEXT PRIMARY KEY);
cursor.execute("INSERT INTO Admins VALUES (?)", values)
conn.commit()
conn.close()
return "Successfully add %s into admin" % (arg)
except:
return "You're already an admin"
else:
return MSG_NO_DRAGONITE
# Flag is secret. No one besides admin should see it.
def _getFlag(user):
conn = sqlite3.connect(APP_DB)
cursor = conn.cursor()
# CREATE TABLE Admins (user TEXT PRIMARY KEY);
cursor.execute("SELECT user FROM Admins WHERE user='%s'" % (user))
all_data = cursor.fetchone()
conn.close()
if all_data != None and len(all_data) == 1:
flag = os.getenv('FLAG', None)
return flag
else:
print("Alert: %s is not admin." % (user))
return MSG_NO_ADMIN
def runCmd(message, user):
_msgLog(user, message)
if message.lower() == "help" or message.lower() == "!help":
return MSG_HELP
elif message == "!listAll":
return _listAnimal()
elif message == ("!show"):
return MSG_ANIMAL_CMD
elif message.startswith("!show "):
return _showAnimal(message[6:])
elif message == "!serverInfo":
return _getServerInfo(user)
elif message == "!getFlag":
return _getFlag(user)
elif message[:10] == "!addAdmin ":
arg = message[10:]
return _addAdmin(user, arg)
elif message == "!hint":
return HINT_URL
else:
return ""
| 29.401361 | 86 | 0.621472 | [
"Apache-2.0"
] | Bankde/Hack-me-bot | botCmd.py | 4,322 | Python |
import unittest
import xmlrunner
# from selenium import webdriver
import pagemodels.headerpage
import tests.pickledlogin
import browserconfig
# VIDEO OF EXECUTION
# https://gyazo.com/b20fd223076bf34c1f2c9b94a4f1fe0a
# 2020-04-20 All tests passing, refactor complete
# All tests passed 5 executions in a row. v1 ready to ship.
# BUG- First execution will murder the cookies and break the following tests.
# interestingly, every subsequent test will pass once cookies are hard reset.
class HeaderPageTests(unittest.TestCase):
"""Test cases for the use of the header features atop most netflix pages."""
@classmethod
def setUpClass(cls):
"""Launch the webdriver of choice with selected options(see browserconfig.py).
Then login using pickled cookies(see tests/pickledlogin.py)."""
if browserconfig.current_browser in ['chrome', 'firefox']:
cls.driver = browserconfig.driver_runner(
executable_path=browserconfig.driver_path,
desired_capabilities=browserconfig.capabilities
)
elif browserconfig.current_browser == 'edge':
cls.driver = browserconfig.driver_runner(
executable_path=browserconfig.driver_path,
capabilities=browserconfig.capabilities
)
tests.pickledlogin.pickled_login(cls.driver)
@classmethod
def tearDownClass(cls):
"""Closes the browser and shuts down the driver executable."""
cls.driver.quit()
def setUp(self):
"""Return to the home page, netflix.com/browse, the staging place for header tests."""
self.driver.get("https://netflix.com/browse")
def test_logout_from_header(self):
"""Logout from the header."""
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.logout()
# user is redirected to https://www.netflix.com/logout after loging out
self.assertIn('logout', self.driver.current_url)
# CLEANUP
# log back in using the pickled cookies
tests.pickledlogin.pickled_login(self.driver)
def test_navigate_home_from_my_list(self):
"""Using the giant Netflix logo in the top left, navigate to the home page /browse/
from the my-list page."""
self.driver.get("https://www.netflix.com/browse/my-list")
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.navigate_to_home()
self.assertEqual("https://www.netflix.com/browse", self.driver.current_url)
def test_navigate_to_manage_profile(self):
"""Using the header account dropdown, navigate to the manage profile page."""
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.navigate_to_manage_profile()
# user is redirected to https://www.netflix.com/profiles/manage
self.assertIn('profiles/manage', self.driver.current_url)
def test_search_for_shawshank(self):
"""Using the search field, search for 'shawshank' and assert that shawshank was found."""
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.search("shawshank")
self.assertIn("The Shawshank Redemption", self.driver.page_source)
# I kind of like this assert now that I think about it. Its testing both the search
# function and Netflix's search algorithm.
# NOTE- test will not fail if "The Shawkshank Redemeption" is removed. Netflix displays
# "similar to {title_name}" for titles its search algorithm recognizes
def test_click_top_notification(self):
"""Click the top notification and assert that the page has changed."""
header_page = pagemodels.headerpage.HeaderPage(self.driver)
header_page.click_top_notification()
# Assert that we navigated to a notification page or a title page(only 2 options)
self.assertTrue(
'title' in self.driver.current_url or 'notification' in self.driver.current_url
)
# DIDNT MAKE THE FIRST CUT OF TESTS
# I could have 5 more test here for each one of the header buttons.
# Those are about as elementary of tests as possible. Skipping them but TODO- OKAY TO HAVE
# def test_clear_all_notifications(self):
# """ this is easy to do, but impossible to perfect. Netflix doesnt allow any sort of
# 'mark notification as unread' so I have no way of generating notifications. Since I have
# no way of managing the state, THIS TEST CAN NEVER BE RAN MORE THAN ONCE A DAY. Thus I am
# forced to leave it out in order to avoid inconsistent test results"""
# header_page = pagemodels.headerpage.HeaderPage(self.driver)
# header_page.clear_notifications()
if __name__ == '__main__':
with open(r'xmltestresults\pretestresults.xml', 'wb') as output:
unittest.main(
testRunner=xmlrunner.XMLTestRunner(output=output),
failfast=False, buffer=False, catchbreak=False)
| 41.504132 | 98 | 0.696734 | [
"MIT"
] | BradleyPelton/NetflixSelenium | tests/test_headerpage.py | 5,022 | Python |
class Sort_dic:
def __init__(self):
pass
@staticmethod
def sort_values(dic,rev=False,sort_by= 'values'):
if sort_by == 'values':
sv = sorted(dic.values(),reverse=rev)
new_dic = {}
for num in sv :
for k,v in dic.items():
if num == v:
new_dic[k] = v
return new_dic
elif sort_by == 'keys':
sk = sorted(dic.keys(),reverse=rev)
new_dic = {}
for num in sk :
for k,v in dic.items():
if k==num:
new_dic[k] = v
return new_dic
| 26.321429 | 54 | 0.385346 | [
"MIT"
] | jacktamin/king-tools | build/lib/king_libs/sort_val.py | 737 | Python |
import os
import sys
import copy as copy
from tensor_view_1d import TensorView1D
from tensor_view_2d import TensorView2D
from tensor_view_act import TensorViewAct
from tensor_view_filter import TensorViewFilter
from tensor_data import TensorData
import inspect
from PyQt4 import QtGui, QtCore
from pyqt_env import PyQTEnv
import xml.etree.ElementTree as ET
TEST_WATERFALL_VIEW = False
gui_root_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
class MainWindow(QtGui.QMainWindow):
def __init__(self, args):
super(MainWindow, self).__init__()
self.setGeometry(1400,70,600,370)
self.setWindowTitle("VISUALIZATION")
self.action_cb = args
#self.tensor_input_list = args['tensor_input_list']
quitAction = QtGui.QAction('Quit', self)
quitAction.triggered.connect(self.close_application)
saveAction = QtGui.QAction('Save', self)
saveAction.setShortcut('Ctrl+S')
saveAction.triggered.connect(self.save_WatchList)
loadAction = QtGui.QAction('Open File...', self)
loadAction.setShortcut('Ctrl+O')
loadAction.triggered.connect(self.action_cb['load_WatchList'])
input_file = QtGui.QAction('Open input file', self)
input_file.setShortcut('Ctrl+I')
input_file.triggered.connect(self.open_input_file)
menu = self.menuBar()
filemenu = menu.addMenu('&File')
filemenu.addAction(saveAction)
filemenu.addAction(loadAction)
filemenu.addAction(input_file)
self.toolBar = self.addToolBar("ToolBar")
self.toolBar.addAction(quitAction)
self.create_sub_windows()
def create_sub_windows(self):
pausecheck = QtGui.QCheckBox('Pause', self)
pausecheck.move(520,120)
pausecheck.toggle()
pausecheck.stateChanged.connect(self.action_cb['on_pause'])
self.step_btn = QtGui.QPushButton("Step",self)
self.step_btn.setStyleSheet("color: blue; font: bold 14px")
self.step_btn.resize(50,25)
self.step_btn.move(520,80)
self.step_btn.clicked.connect(self.action_cb['on_step'])
self.watch_com = QtGui.QLabel(self)
self.watch_com.setText('Watch :')
self.watch_com.move(520,244)
self.watch_com.setFont(QtGui.QFont("Times",13,weight=QtGui.QFont.Bold))
self.watch_choice = QtGui.QComboBox(self)
self.watch_choice.setStyleSheet("font: bold 14px")
self.watch_choice.move(520,280)
self.watch_choice.addItem('1-DIM')
self.watch_choice.addItem('2-DIM')
self.watch_choice.addItem('Activation')
self.watch_choice.addItem('Filter')
self.watch_choice.resize(70,30)
self.watch_choice.show()
self.watch_choice.activated[str].connect(self.action_cb['on_add_watch'])
self.showbtn = QtGui.QCheckBox('Show',self)
self.showbtn.move(520,195)
self.showbtn.toggle()
self.showbtn.hide()
self.showbtn.stateChanged.connect(self.action_cb['on_set_show'])
self.show_remove_btn = QtGui.QPushButton("Remove",self)
self.show_remove_btn.setStyleSheet("color: red; font: bold 14px")
self.show_remove_btn.resize(70,30)
self.show_remove_btn.move(520,240)
self.show_remove_btn.hide()
self.show_remove_btn.clicked.connect(self.action_cb['on_remove_watch'])
self.hd_all_btn = QtGui.QPushButton("Hide All",self)
self.hd_all_btn.setStyleSheet("color: red; font: bold 14px")
self.hd_all_btn.resize(84,30)
self.hd_all_btn.move(510,280)
self.hd_all_btn.hide()
self.hd_all_btn.clicked.connect(self.action_cb['on_hide_all'])
self.tensor_label = QtGui.QLabel(self)
self.tensor_label.setAlignment(QtCore.Qt.AlignCenter)
self.tensor_label.setGeometry(QtCore.QRect(80,180,200,20))
self.tensor_label.setFont(QtGui.QFont("Times",12,weight=QtGui.QFont.Bold))
self.tensor_reshape_label = QtGui.QLabel(self)
self.tensor_reshape_label.setAlignment(QtCore.Qt.AlignCenter)
self.tensor_reshape_label.setGeometry(QtCore.QRect(80,220,200,20))
self.tensor_reshape_label.setFont(QtGui.QFont("Times",12,weight=QtGui.QFont.Bold))
self.reshape_inlb = QtGui.QLabel(self)
self.reshape_inlb.move(80,220)
self.reshape_inlb.setText('Reshape: ')
self.reshape_inlb.setFont(QtGui.QFont('Times',12,weight=QtGui.QFont.Bold))
self.tensor_shape_input = QtGui.QLineEdit(self)
self.tensor_shape_input.textChanged.connect(self.action_cb['on_tensor_shape_input'])
self.tensor_shape_input.move(160,220)
self.sourceInput_list = QtGui.QComboBox(self)
self.sourceInput_list.move(160,270)
self.sourceInput_list.activated[str].connect(self.action_cb['on_input_select'])
listcombo = QtGui.QComboBox(self)
listcombo.addItem("Select List")
listcombo.addItem("Watch List")
listcombo.move(50,100)
subcombo = QtGui.QComboBox(self)
subcombo.addItem('USER_LIST')
subcombo.addItem('TRAINABLE_VARIABLES')
subcombo.addItem('ACTIVATIONS')
subcombo.addItem('GLOBAL_VARIABLES')
subcombo.addItem('ALL_OPS')
subcombo.move(180,100)
listcombo.activated[str].connect(self.action_cb['on_list_type_select'])
subcombo.activated[str].connect(self.action_cb['on_filter_type_select'])
self.create_list_view()
fontset = QtGui.QFont()
fontset.setPointSize(12)
self.filter_comment = QtGui.QLabel(self)
self.filter_comment.setText('Search Only in ALL_OPS:')
self.filter_comment.setGeometry(QtCore.QRect(100,34,180,25))
self.filter_comment.setFont(fontset)
self.filter_in = QtGui.QLineEdit(self)
self.filter_in.textChanged.connect(self.action_cb['on_filter_str_input'])
self.filter_in.move(290,30)
self.filter_in.resize(190,40)
self.show()
def create_list_view(self):
self.list_view=QtGui.QListView(self)
self.list_view.main = self
self.list_view.setEditTriggers(QtGui.QListView.NoEditTriggers)
self.list_view.setMouseTracking(True)
self.list_model = QtGui.QStandardItemModel()
self.list_view.setModel(self.list_model)
entries = [str(i) for i in range(50)]
for i in entries:
item = QtGui.QStandardItem(i)
self.list_model.appendRow(item)
self.list_view.setMinimumSize(170,200)
self.list_view.move(310,130)
self.list_view.clicked.connect(self.action_cb['on_tensor_select'])
def close_application(self):
choice = QtGui.QMessageBox.question(self, 'Warning',
"Do you want to quit?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if choice == QtGui.QMessageBox.Yes:
self.action_cb['on_close']()
else:
pass
def save_WatchList(self):
choice = QtGui.QMessageBox.question(self, '',
"Do you want to save the watch_list?",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No)
if choice == QtGui.QMessageBox.Yes:
self.action_cb['on_save']()
else:
pass
def update_tensor_list(self, list_type, list, pos, reset_pos):
items_str = [t.disp_name for t in list]
self.list_model.clear()
for text in items_str:
item = QtGui.QStandardItem(text)
self.list_model.appendRow(item)
def open_input_file(self):
name = QtGui.QFileDialog.getOpenFileName(self, 'Open input file')
input_file = open(name, 'r')
DIYname = QtGui.QInputDialog.getText(self, 'Name your input choice', None)
save_name = DIYname[0]
self.action_cb['add_input'](save_name, input_file.name)
def update_input_list(self, input_list):
self.sourceInput_list.clear()
for item in input_list:
self.sourceInput_list.addItem(item.name)
def enable_filter_input(self, enable):
if enable is False:
self.filter_in.setDisabled(True)
else:
self.filter_in.setDisabled(False)
class TensorItem(object):
def __init__(self, name, shape, op, input_name):
self.name = name
self.op = op
self.input_name = input_name
#self.data_source = TensorData(start_step=ControlPanel.step_count)
self.disp_name = name
try:
shape_str = '(' + ', '.join(map(str, shape)) + ')'
self.shape_str = shape_str
self.reshape = []
except: #TypeError: #fix for python3
self.shape_str = ""
self.reshape = []
####
#self.pyqt_window_id = None
#self.view = None
def copy(self, obj):
self.name = copy.copy(obj.name)
self.input_name = copy.copy(obj.input_name)
self.op = obj.op
self.disp_name = copy.copy(obj.disp_name)
self.shape_str = copy.copy(obj.shape_str)
self.reshape = copy.copy(obj.reshape)
def get_reshape_str(self):
return ', '.join(map(str, self.reshape))
class ControlPanel(object):
quit = False
pause = True
single_step_flag = False
step_count = 0
cur_list_type = 0
cur_filter_type_index = 0
tensor_select_list = []
select_list_cur_pos = 0
tensor_watch_list = []
watch_list_cur_pos = 0
tensor_input_list = []
console_cmd_list = []
pyqt_env = None
class TensorSelectItem(TensorItem):
def __init__(self, name, shape, op, input_name):
TensorItem.__init__(self, name, shape, op, input_name)
class TensorWatchItem(TensorItem):
def __init__(self, tensor_select_item):
self.showstate = True
self.copy(tensor_select_item)
self.data_source = TensorData(start_step=ControlPanel.step_count)
self.pyqt_window_id = None
self.picDIM = '1-DIM'
class TensorInputItem(object):
def __init__(self, name, input_obj):
self.name = name
self.input_obj = input_obj
"""
tensor panel
"""
def __open_tensor_view(self, index, text):
tensor_item = self.tensor_watch_list[index]
tensor_item.pyqt_window_id = self.pyqt_env.get_free_identity()
if text == '2-DIM':
self.pyqt_env.create_window(tensor_item.pyqt_window_id, TensorView2D,
{'data_source':tensor_item.data_source, 'name':tensor_item.name, 'shape':tensor_item.shape_str, 'reshape':tensor_item.reshape})
self.tensor_watch_list[index].picDIM = '2-DIM'
elif text == '1-DIM':
self.pyqt_env.create_window(tensor_item.pyqt_window_id, TensorView1D,
{'data_source':tensor_item.data_source, 'name':tensor_item.name})
self.tensor_watch_list[index].picDIM = '1-DIM'
elif text == 'Activation':
self.pyqt_env.create_window(tensor_item.pyqt_window_id, TensorViewAct,
{'data_source':tensor_item.data_source, 'name':tensor_item.name, 'shape':tensor_item.shape_str, 'reshape':tensor_item.reshape})
self.tensor_watch_list[index].picDIM = 'Activation'
elif text == 'Filter':
self.pyqt_env.create_window(tensor_item.pyqt_window_id, TensorViewFilter,
{'data_source':tensor_item.data_source, 'name':tensor_item.name, 'shape':tensor_item.shape_str, 'reshape':tensor_item.reshape})
self.tensor_watch_list[index].picDIM = 'Filter'
def __close_tensor_view(self, index):
tensor_item = self.tensor_watch_list[index]
if tensor_item.pyqt_window_id is not None:
self.pyqt_env.close(tensor_item.pyqt_window_id)
tensor_item.pyqt_window_id = None
def __close_all_tensor_views(self):
for i in range(len(self.tensor_watch_list)):
self.__close_tensor_view(i)
def __on_tensor_shape_input(self, text):
titem = self.tensor_select_list[self.select_list_cur_pos]
dims = text.split(',')
titem.reshape = []
for dim in dims:
try:
titem.reshape.append(int(dim))
except ValueError:
pass
def __on_add_watch(self, text):
titem = self.tensor_select_list[self.select_list_cur_pos]
new_titem = self.TensorWatchItem(titem)
"""
new_titem = copy.copy(titem) #shallow copy
new_titem.reshape = copy.copy(titem.reshape)
"""
self.tensor_watch_list.append(new_titem)
index = len(self.tensor_watch_list)-1
self.__open_tensor_view(index,text)
def __on_remove_watch(self):
self.__close_tensor_view(self.watch_list_cur_pos)
del self.tensor_watch_list[self.watch_list_cur_pos]
item_num = len(self.tensor_watch_list)
if self.watch_list_cur_pos >= item_num and item_num > 0:
self.watch_list_cur_pos = item_num-1
if self.cur_list_type==0:
list = self.tensor_select_list
pos = self.select_list_cur_pos
else:
list = self.tensor_watch_list
pos = self.watch_list_cur_pos
self.main_window.update_tensor_list(list_type=self.cur_list_type, list=list, pos=pos, reset_pos=False)
def __on_set_show(self, state):
if state == QtCore.Qt.Checked and self.tensor_watch_list[self.watch_list_cur_pos].showstate == False:
self.__open_tensor_view(self.watch_list_cur_pos, self.tensor_watch_list[self.watch_list_cur_pos].picDIM)
self.tensor_watch_list[self.watch_list_cur_pos].showstate = True
if state != QtCore.Qt.Checked and self.tensor_watch_list[self.watch_list_cur_pos].showstate == True:
self.__close_tensor_view(self.watch_list_cur_pos)
self.tensor_watch_list[self.watch_list_cur_pos].showstate = False
def __on_input_select(self, text):
titem = self.tensor_select_list[self.select_list_cur_pos]
titem.input_name = text
input_obj = self.__get_input_obj(text)
if input_obj is not None:
input_obj.show()
def __on_tensor_select(self, index):
index = index.row()
if self.cur_list_type == 0:
self.select_list_cur_pos = index
list = self.tensor_select_list
print(list[index].shape_str)
else:
self.watch_list_cur_pos = index
list = self.tensor_watch_list
if self.tensor_watch_list[index].showstate == False:
self.main_window.showbtn.setChecked(False)
else:
self.main_window.showbtn.setChecked(True)
self.main_window.tensor_reshape_label.setText('Reshape: ('+str(list[index].get_reshape_str())+')')
self.main_window.tensor_label.setText('Shape: '+list[index].shape_str)
"""
global control
"""
def __on_list_type_select(self, text):
if text == 'Select List':
index = 0
else:
index = 1
if index != self.cur_list_type:
if index == 0:
self.main_window.enable_filter_input(True)
else:
self.main_window.enable_filter_input(False)
self.cur_list_type = index
self.on_switch_btn(self.cur_list_type)
if self.cur_list_type == 0:
pos = self.select_list_cur_pos
self.main_window.update_tensor_list(list_type=self.cur_list_type, list=self.tensor_select_list, pos=pos, reset_pos=False)
else:
pos = self.watch_list_cur_pos
self.main_window.update_tensor_list(list_type=self.cur_list_type, list=self.tensor_watch_list, pos=pos, reset_pos=False)
def on_switch_btn(self,index):
if index == 0:
self.main_window.watch_choice.show()
self.main_window.show_remove_btn.hide()
self.main_window.hd_all_btn.hide()
self.main_window.showbtn.hide()
self.main_window.watch_com.show()
self.main_window.tensor_label.show()
self.main_window.tensor_label.setText('Shape: '+self.tensor_select_list[0].shape_str)
self.main_window.tensor_shape_input.show()
self.main_window.reshape_inlb.show()
self.main_window.tensor_shape_input.clear()
self.main_window.tensor_reshape_label.hide()
else:
self.main_window.watch_choice.hide()
self.main_window.show_remove_btn.show()
self.main_window.hd_all_btn.show()
self.main_window.watch_com.hide()
self.main_window.tensor_shape_input.hide()
if self.tensor_watch_list != []:
self.main_window.showbtn.show()
self.main_window.tensor_label.show()
self.main_window.tensor_reshape_label.show()
self.main_window.tensor_label.setText('Shape: '+self.tensor_watch_list[0].shape_str)
self.main_window.tensor_reshape_label.setText('Reshape: ('+str(self.tensor_watch_list[0].get_reshape_str())+')')
if self.tensor_watch_list[0].showstate == True:
self.main_window.showbtn.setChecked(True)
else:
self.main_window.showbtn.setChecked(False)
else:
self.main_window.showbtn.hide()
self.main_window.tensor_label.hide()
self.main_window.tensor_reshape_label.hide()
self.main_window.reshape_inlb.hide()
def __on_filter_type_select(self, text):
pwd = {'USER_LIST':0, 'TRAINABLE_VARIABLES':1, 'ACTIVATIONS':2, 'GLOBAL_VARIABLES':3, 'ALL_OPS':4 }
self.cur_filter_type_index = pwd[text]
if pwd[text] == 2:
pass
else:
pass
def __on_filter_str_input(self, text):
text = str(text)
self.filter_str = text.strip()
def __on_pause(self, state):
if state == QtCore.Qt.Checked:
self.pause = True
else:
self.pause = False
print(self.pause)
def __on_step(self):
self.pause = True
self.single_step_flag = True
def __on_hide_all(self):
self.__close_all_tensor_views()
self.main_window.showbtn.hide()
def __on_console_str_input(self):
return
cmd = copy.copy(text.strip())
self.console_cmd_list.append(cmd)
def __on_close(self):
self.quit = True
def __on_save(self):
NoWatchItem = len(self.tensor_watch_list)
watchlist = [None]*NoWatchItem
root = ET.Element('root')
for i in range(NoWatchItem):
watchlist[i] = ET.SubElement(root, 'Item'+str(i+1))
name = ET.SubElement(watchlist[i], 'name')
shape = ET.SubElement(watchlist[i], 'shape')
reshape = ET.SubElement(watchlist[i], 'reshape')
visType = ET.SubElement(watchlist[i], 'visType')
win_x = ET.SubElement(watchlist[i], 'win_x')
win_y = ET.SubElement(watchlist[i], 'win_y')
win_w = ET.SubElement(watchlist[i], 'win_w')
win_h = ET.SubElement(watchlist[i], 'win_h')
name.text = self.tensor_watch_list[i].name
shape.text = self.tensor_watch_list[i].shape_str
reshape.text = self.tensor_watch_list[i].reshape
visType.text = self.tensor_watch_list[i].picDIM
(x,y,w,h) = self.pyqt_env.get_win_pos_size(self.tensor_watch_list[i].pyqt_window_id)
win_x.text = str(x)
win_y.text = str(y)
win_w.text = str(w)
win_h.text = str(h)
my = ET.tostring(root)
myfile = open('Saved_WatchList.xml', 'wb')
myfile.write(my)
def __load_WatchList(self):
tree = ET.parse('Saved_WatchList.xml')
root = tree.getroot()
count = len(self.tensor_watch_list)
print(count)
for elem in root:
n = elem[0].text
for t in self.all_ops:
if t.name == n:
tem_select = self.TensorSelectItem(t.name, t.shape, t.op, self.tensor_input_list[0].name)
new = self.TensorWatchItem(tem_select)
self.tensor_watch_list.append(new)
print('now',len(self.tensor_watch_list), 'but count: ', count)
self.__open_tensor_view(count, elem[3].text)
self.pyqt_env.set_win_pos_size(self.tensor_watch_list[count].pyqt_window_id, \
int(elem[4].text),int(elem[5].text),int(elem[6].text),int(elem[7].text))
break
count += 1
def __create_main_window(self, args):
self.main_window = MainWindow(
{
'filter_type_list':self.filter_type_list,
'tensor_input_list': self.tensor_input_list,
'on_close':self.__on_close,
'on_save':self.__on_save,
# global control
'on_pause':self.__on_pause,
'on_step':self.__on_step,
'on_hide_all':self.__on_hide_all,
'on_console_str_input':self.__on_console_str_input,
'on_filter_type_select':self.__on_filter_type_select,
'on_filter_str_input':self.__on_filter_str_input,
'on_list_type_select':self.__on_list_type_select,
##
'on_tensor_select':self.__on_tensor_select,
# tensor select panel
'on_tensor_shape_input':self.__on_tensor_shape_input,
'on_input_select':self.__on_input_select,
# tensor watch panel
'on_remove_watch':self.__on_remove_watch,
'on_add_watch':self.__on_add_watch,
'on_set_show':self.__on_set_show,
'load_WatchList':self.__load_WatchList,
'add_input':self.__add_input
}
)
return None
def __init__(self, filter_type_list, input_list, loaded_list):
for input_name in input_list:
self.tensor_input_list.append(self.TensorInputItem(input_name, None))
self.filter_str = ""
self.filter_type_list = filter_type_list
self.pyqt_env = PyQTEnv()
self.pyqt_env.run(self.__create_main_window, None)
self.main_window.update_input_list(self.tensor_input_list)
print('control_panel _init')
self.all_ops = loaded_list
### add_input test
#for test/alexnet
#self.__add_input('img_input')
#for test/basic_test
#self.__add_input('test_input')
#self.pyqt_env.run(self.__load_input, None)
'''
def __load_input(self, args):
### add_input test
#for test/alexnet
self.__add_input('my_img_input', '../alexnet/img_input.py')
#for test/basic_test
self.__add_input('test_input', '../basic_test/test_input.py')
'''
def __get_input_obj(self, name):
for input_item in self.tensor_input_list:
if input_item.name == name:
return input_item.input_obj
return None
def __add_input(self, input_name, filename, config_dict={}):
import importlib
try:
placeholder_dict={}
for t in self.all_ops:
if t.op.op.type == 'Placeholder':
placeholder_dict[t.name] = t.op
names = os.path.split(os.path.abspath(filename))
path = names[0]
module_name = names[1].split('.')[-2]
print('* input_name is: %s, filename is: %s'%(input_name, filename))
print('* config_dict is:', config_dict)
print('* module path is: %s, name is: %s'%(path, module_name))
#add module search path
sys.path.append(path)
temp_module = importlib.import_module(module_name)
input_obj = temp_module.TensorInput(placeholder_dict, config_dict)
input_obj.show()
input_item = self.TensorInputItem(input_name, input_obj)
self.tensor_input_list.append(input_item)
self.main_window.update_input_list(self.tensor_input_list)
except Exception as e:
print('Add_input error:', e)
"""
public methods
"""
def update_tensor_list(self, tensor_list):
self.tensor_select_list = []
for t in tensor_list:
if len(self.tensor_input_list)>0:
input_name = self.tensor_input_list[0].name
else:
input_name = ''
self.tensor_select_list.append(self.TensorSelectItem(t[0], t[1], t[2], input_name))
if self.cur_list_type == 0:
self.select_list_cur_pos = 0
self.main_window.update_tensor_list(list_type=self.cur_list_type, list=self.tensor_select_list, pos=0, reset_pos=True)
def get_tensor_watch_list(self):
dict = {}
for input_item in self.tensor_input_list:
list = []
for t in self.tensor_watch_list:
if t.pyqt_window_id is not None and input_item.name == t.input_name:
list.append((t.name, t.reshape, t.op, t.data_source, t.input_name))
if len(list)>0:
dict[input_item] = list
return dict
def beat(self, update_step_flag):
if update_step_flag:
self.single_step_flag = False
ControlPanel.step_count += 1
if self.quit:
self.pyqt_env.quit()
return not self.quit
def is_pause(self):
return self.pause
def is_step(self):
return self.single_step_flag
def get_filter_type(self):
return [self.filter_type_list[self.cur_filter_type_index], self.filter_str]
def get_console_command(self):
if len(self.console_cmd_list)>0:
cmd = self.console_cmd_list.pop()
return cmd
| 40.035874 | 144 | 0.614658 | [
"MIT"
] | octaviaguo/Tensorflow-Visualizing | TensorMonitor/control_panel.py | 26,784 | Python |
"""TensorFlow ops for deep neural networks."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import nn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope as vs
from tensorflow.contrib.learn.python.learn.ops import dropout_ops
def dnn(tensor_in, hidden_units, activation=nn.relu, dropout=None):
"""Creates fully connected deep neural network subgraph.
Args:
tensor_in: tensor or placeholder for input features.
hidden_units: list of counts of hidden units in each layer.
activation: activation function between layers. Can be None.
dropout: if not None, will add a dropout layer with given probability.
Returns:
A tensor which would be a deep neural network.
"""
with vs.variable_scope('dnn'):
for i, n_units in enumerate(hidden_units):
with vs.variable_scope('layer%d' % i):
tensor_in = rnn_cell.linear(tensor_in, n_units, True)
if activation is not None:
tensor_in = activation(tensor_in)
if dropout is not None:
tensor_in = dropout_ops.dropout(tensor_in, prob=(1.0 - dropout))
return tensor_in
| 40.086957 | 75 | 0.748373 | [
"Apache-2.0"
] | InfoPrice/tensorflow | tensorflow/contrib/learn/python/learn/ops/dnn_ops.py | 1,844 | Python |
"""
This code was created by Tyler Adam Martinez for the BMEN3310 Final
#This are the varibles and what they stand for.
Hemodynamic Parameter Analysis
CS // Cross-Sectional Area of the heart valve
vR // Radius of Valve
DR // Disk Radius
TiA // Area of the Titanium wire
TiV // Volume of the Titanium wire
IRV // Inner Ring Volume
ORV // Outer Ring Volume
DR // Disk Volume
NS // Cost of Nitinol Stent per signal unit
PPC // Pure Pyrolytic Carbon per unit volume
Tf // Teflon Fabric per unit volume
Ti // Titanium Wire per unit volume
Hemodynamic Calculations
SVR // Systemic Vascular Resistance or Afterload on the heart
MAP // Mean Arterial Pressure
CVP // Central Venous Pressure
CO // Cardiac Output
SV // Stroke Volume
HR // Heart Rate
SBP // Systomic Blood Pressure
DBP // Diastolic Blood Pressure
"""
import math
pi = 3.14159265359;
## Hemodynamic Parameter Analysis
CS = input("The cross-sectional area of the valve: ");
CS = int(CS);
vR = math.sqrt(CS/pi); #Convert CS to v radius
height = 5.0; #mm
thinkness = 1.5; #mm
DR = vR - (2*thinkness); #calculating for the two outer disks
Diskheight = 1.5; #mm
#calculating the volumes of each material
TiA = 0.1024 * pi; #.32mm is radius of Titanium wire, and .1024 is r^2
TiV = 2*vR *TiA; #mm^3
IRV = pi * pow((DR + thinkness), 2) - (pi * pow(DR, 2)) * height; #mm^3
ORV = pi * pow((DR + (2*thinkness)), 2) - pi * pow((DR + thinkness),2) * height; #mm^3
DV = pi * pow(DR, 2) * Diskheight; #mm^3
#Constant Cost per volume values
NS = 100; # $ per unit
PPC = 0.00052; # $ per 1 mm^3
TF = 0.00014; # $ per 1 mm^3
Ti = 0.00064; # $ per 1 mm^3
#Material Cost = Volume of Material * Cost per Unit Volume
ORcost = ORV * TF + NS;
IRcost = IRV * PPC;
Dcost = (DV*(.9)*PPC) + (DV*(.1)*TF) + TiV*Ti;
TotalCost = ORcost + IRcost + Dcost;
#Outputting result to user
print("The total cost of your heart valve is $",format(TotalCost,'.2f'));
## Hemodynamic Calculations
SV = input("Enter in the Stroke Volume of the patient: ");
SV = int(SV);
HR = input("Enter in the Heart Rate of the patient: ");
HR = int(HR);
CO = SV * HR;
print("The Cardiac Output of the patient is ",CO);
SBP = input("Enter in the Systomic Blood Pressure of the patient: ");
SBP = int(SBP);
DBP = input("Enter in the Diastolic Blood Pressure of the patient: ");
DBP = int(DBP);
MAP = (((SBP) + (2 *(DBP)))/ 3);
print("The Mean Arterial Pressure of the patient is ",format(MAP, '.3f'));
CVP = input("Enter in the Central Venous Pressure of the patient: ");
CVP = int(CVP);
SVR = ((MAP - CVP)/(CO)) * 80;
print("The Systemic Vascular Resistance of the patient is ",format(SVR,'.3f'));
| 32.614458 | 87 | 0.655707 | [
"MIT"
] | TylerAdamMartinez/Minimally-Invasive-Monocusp-Valve | Hemodynamic_Parameter_Analysis.py | 2,707 | Python |
# -*- coding: utf-8 -*-
from ..Qt import QtGui, QtCore
from .GraphicsView import GraphicsView
from ..graphicsItems.GradientEditorItem import GradientEditorItem
import weakref
import numpy as np
__all__ = ['GradientWidget']
class GradientWidget(GraphicsView):
"""
Widget displaying an editable color gradient. The user may add, move, recolor,
or remove colors from the gradient. Additionally, a context menu allows the
user to select from pre-defined gradients.
"""
sigGradientChanged = QtCore.Signal(object)
sigGradientChangeFinished = QtCore.Signal(object)
def __init__(self, parent=None, orientation='bottom', *args, **kargs):
"""
The *orientation* argument may be 'bottom', 'top', 'left', or 'right'
indicating whether the gradient is displayed horizontally (top, bottom)
or vertically (left, right) and on what side of the gradient the editable
ticks will appear.
All other arguments are passed to
:func:`GradientEditorItem.__init__ <pyqtgraph.GradientEditorItem.__init__>`.
Note: For convenience, this class wraps methods from
:class:`GradientEditorItem <pyqtgraph.GradientEditorItem>`.
"""
GraphicsView.__init__(self, parent, useOpenGL=False, background=None)
self.maxDim = 31
kargs['tickPen'] = 'k'
self.item = GradientEditorItem(*args, **kargs)
self.item.sigGradientChanged.connect(self.sigGradientChanged)
self.item.sigGradientChangeFinished.connect(self.sigGradientChangeFinished)
self.setCentralItem(self.item)
self.setOrientation(orientation)
self.setCacheMode(self.CacheNone)
self.setRenderHints(QtGui.QPainter.Antialiasing | QtGui.QPainter.TextAntialiasing)
self.setFrameStyle(QtGui.QFrame.NoFrame | QtGui.QFrame.Plain)
#self.setBackgroundRole(QtGui.QPalette.NoRole)
#self.setBackgroundBrush(QtGui.QBrush(QtCore.Qt.NoBrush))
#self.setAutoFillBackground(False)
#self.setAttribute(QtCore.Qt.WA_PaintOnScreen, False)
#self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent, True)
def setOrientation(self, ort):
"""Set the orientation of the widget. May be one of 'bottom', 'top',
'left', or 'right'."""
self.item.setOrientation(ort)
self.orientation = ort
self.setMaxDim()
def setMaxDim(self, mx=None):
if mx is None:
mx = self.maxDim
else:
self.maxDim = mx
if self.orientation in ['bottom', 'top']:
self.setFixedHeight(mx)
self.setMaximumWidth(16777215)
else:
self.setFixedWidth(mx)
self.setMaximumHeight(16777215)
def __getattr__(self, attr):
### wrap methods from GradientEditorItem
return getattr(self.item, attr)
| 39.666667 | 91 | 0.647059 | [
"Apache-2.0"
] | kuldeepaman/tf-pose | scripts/pyqtgraph-develop/pyqtgraph/widgets/GradientWidget.py | 2,975 | Python |
from typing_extensions import Final # noqa: F401
CONTAINER_CLIENT_PACKAGES = 'compressedpackages' # type: Final
CONTAINER_EMAILS = 'emails' # type: Final
CONTAINER_MAILBOX = 'mailbox' # type: Final
CONTAINER_SENDGRID_MIME = 'sendgridinboundemails' # type: Final
TABLE_DOMAIN_X_DELIVERED = 'emaildomainxdelivered' # type: Final
TABLE_AUTH = 'clientsauth' # type: Final
QUEUE_CLIENT_PACKAGE = 'lokoleinboundemails' # type: Final
QUEUE_EMAIL_SEND = 'sengridoutboundemails' # type: Final
QUEUE_SENDGRID_MIME = 'sengridinboundemails' # type: Final
| 46.166667 | 65 | 0.785199 | [
"Apache-2.0"
] | tezzytezzy/opwen-cloudserver | opwen_email_server/constants/azure.py | 554 | Python |
#
#
# Copyright 2020-21 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ..interface.graph import Graph as AbstractGraph
from typing import TypeVar, Callable, Awaitable, Set
import neo4j
R = TypeVar('R')
class Graph (AbstractGraph[neo4j.graph.Graph]):
"""A conceptual wrapper for a neo4j query which will return a neo4j.graph.Graph object.
To execute the query and return the underlying object await this object. But the
returned neo4j.graph.Graph is unlikely to be very useful outside of the context managers
in which it was created.
A better way to use this object is to use the 'nodes' coroutine property.
"""
def __init__(
self,
execute: Callable[[Callable[[neo4j.Transaction], neo4j.graph.Graph]], Awaitable[neo4j.graph.Graph]],
func: Callable[[neo4j.Transaction], neo4j.graph.Graph]
):
self._func = func
self._execute = execute
def __await__(self):
return self._execute(self._func).__await__()
@property
async def nodes(self) -> Set[neo4j.graph.Node]:
"""This property is a Coroutine, which is weird, but better matches the neo4j interface.
When awaited this property will execute the query and return you a Set[neo4j.graph.Node]
containing all of the nodes which the query matched.
"""
return await self._execute(lambda tx: set(self._func(tx).nodes))
@property
async def relationships(self) -> Set[neo4j.graph.Relationship]:
"""This property is a Coroutine, which is weird, but better matches the neo4j interface.
When awaited this property will execute the query and return you a Set[neo4j.graph.Relationship]
containing all of the relationships which the query matched.
"""
return await self._execute(lambda tx: set(self._func(tx).relationships))
| 36.723077 | 108 | 0.713448 | [
"ECL-2.0",
"Apache-2.0"
] | bbc/rd-cloudfit-python-aiocypher | aiocypher/aioneo4j/graph.py | 2,387 | Python |
# -*- coding: utf-8 -*-
__version__ = "0.1.0"
| 9.6 | 23 | 0.5 | [
"MIT"
] | ruzhnikov/exness-crowler | resources_crawler/__init__.py | 48 | Python |
from typing import List
import unittest
from model import tasks
class TestRepository(unittest.TestCase):
def test_list(self):
rep = tasks.Repository()
l = rep.list()
self.assertEqual(len(l), 2)
self.assertEqual(l[0].id, 1)
self.assertEqual(l[0].text, "task1")
self.assertEqual(l[0].done, False)
self.assertEqual(l[1].id, 2)
rep._tasks[0].done = True
l = rep.list()
self.assertEqual(len(l), 1)
self.assertEqual(l[0].id, 2)
self.assertEqual(l[0].done, False)
def test_add(self):
rep = tasks.Repository()
task = tasks.Task(100, "new task")
rep.add(task)
l = rep.list()
self.assertEqual(len(l), 3)
self.assertEqual(l[2].id, 3)
self.assertEqual(l[2].text, "new task")
self.assertEqual(l[2].done, False)
def test_done(self):
rep = tasks.Repository()
rep.done(1)
l = rep.list()
self.assertEqual(len(l), 1)
self.assertEqual(l[0].id, 2)
self.assertEqual(l[0].done, False)
| 24.311111 | 47 | 0.565814 | [
"MIT"
] | 74th/vscode-book-python | server/tests/test_repository.py | 1,094 | Python |
from app import models
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
submit = SubmitField('Sign In')
| 37.454545 | 76 | 0.783981 | [
"MIT"
] | justinvasel/justinvasel.com | app/forms.py | 412 | Python |
"""
Version information
"""
__version__ = "1.0.0"
| 8.5 | 21 | 0.627451 | [
"MIT"
] | CiaranCurran/auto-sync-lingq | sync_lingq/_version.py | 51 | Python |
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import io
import json
import os
import unittest
import mock
import six
from six.moves import http_client
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
class Test_Blob(unittest.TestCase):
@staticmethod
def _make_one(*args, **kw):
from google.cloud.storage.blob import Blob
properties = kw.pop('properties', None)
blob = Blob(*args, **kw)
blob._properties = properties or {}
return blob
def test_ctor_wo_encryption_key(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
properties = {'key': 'value'}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertIs(blob.bucket, bucket)
self.assertEqual(blob.name, BLOB_NAME)
self.assertEqual(blob._properties, properties)
self.assertFalse(blob._acl.loaded)
self.assertIs(blob._acl.blob, blob)
self.assertEqual(blob._encryption_key, None)
def test_ctor_with_encoded_unicode(self):
blob_name = b'wet \xe2\x9b\xb5'
blob = self._make_one(blob_name, bucket=None)
unicode_name = u'wet \N{sailboat}'
self.assertNotIsInstance(blob.name, bytes)
self.assertIsInstance(blob.name, six.text_type)
self.assertEqual(blob.name, unicode_name)
def test_ctor_w_encryption_key(self):
KEY = b'01234567890123456789012345678901' # 32 bytes
BLOB_NAME = 'blob-name'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket, encryption_key=KEY)
self.assertEqual(blob._encryption_key, KEY)
def test_chunk_size_ctor(self):
from google.cloud.storage.blob import Blob
BLOB_NAME = 'blob-name'
BUCKET = object()
chunk_size = 10 * Blob._CHUNK_SIZE_MULTIPLE
blob = self._make_one(BLOB_NAME, bucket=BUCKET, chunk_size=chunk_size)
self.assertEqual(blob._chunk_size, chunk_size)
def test_chunk_size_getter(self):
BLOB_NAME = 'blob-name'
BUCKET = object()
blob = self._make_one(BLOB_NAME, bucket=BUCKET)
self.assertIsNone(blob.chunk_size)
VALUE = object()
blob._chunk_size = VALUE
self.assertIs(blob.chunk_size, VALUE)
def test_chunk_size_setter(self):
BLOB_NAME = 'blob-name'
BUCKET = object()
blob = self._make_one(BLOB_NAME, bucket=BUCKET)
self.assertIsNone(blob._chunk_size)
blob._CHUNK_SIZE_MULTIPLE = 10
blob.chunk_size = 20
self.assertEqual(blob._chunk_size, 20)
def test_chunk_size_setter_bad_value(self):
BLOB_NAME = 'blob-name'
BUCKET = object()
blob = self._make_one(BLOB_NAME, bucket=BUCKET)
self.assertIsNone(blob._chunk_size)
blob._CHUNK_SIZE_MULTIPLE = 10
with self.assertRaises(ValueError):
blob.chunk_size = 11
def test_acl_property(self):
from google.cloud.storage.acl import ObjectACL
fake_bucket = _Bucket()
blob = self._make_one(u'name', bucket=fake_bucket)
acl = blob.acl
self.assertIsInstance(acl, ObjectACL)
self.assertIs(acl, blob._acl)
def test_path_bad_bucket(self):
fake_bucket = object()
name = u'blob-name'
blob = self._make_one(name, bucket=fake_bucket)
self.assertRaises(AttributeError, getattr, blob, 'path')
def test_path_no_name(self):
bucket = _Bucket()
blob = self._make_one(u'', bucket=bucket)
self.assertRaises(ValueError, getattr, blob, 'path')
def test_path_normal(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.path, '/b/name/o/%s' % BLOB_NAME)
def test_path_w_slash_in_name(self):
BLOB_NAME = 'parent/child'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.path, '/b/name/o/parent%2Fchild')
def test_path_with_non_ascii(self):
blob_name = u'Caf\xe9'
bucket = _Bucket()
blob = self._make_one(blob_name, bucket=bucket)
self.assertEqual(blob.path, '/b/name/o/Caf%C3%A9')
def test_public_url(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(blob.public_url,
'https://storage.googleapis.com/name/%s' %
BLOB_NAME)
def test_public_url_w_slash_in_name(self):
BLOB_NAME = 'parent/child'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertEqual(
blob.public_url,
'https://storage.googleapis.com/name/parent%2Fchild')
def test_public_url_with_non_ascii(self):
blob_name = u'winter \N{snowman}'
bucket = _Bucket()
blob = self._make_one(blob_name, bucket=bucket)
expected_url = 'https://storage.googleapis.com/name/winter%20%E2%98%83'
self.assertEqual(blob.public_url, expected_url)
def _basic_generate_signed_url_helper(self, credentials=None):
BLOB_NAME = 'blob-name'
EXPIRATION = '2014-10-16T20:34:37.000Z'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=2014-10-16T20:34:37.000Z')
SIGNER = _Signer()
with mock.patch('google.cloud.storage.blob.generate_signed_url',
new=SIGNER):
signed_uri = blob.generate_signed_url(EXPIRATION,
credentials=credentials)
self.assertEqual(signed_uri, URI)
PATH = '/name/%s' % (BLOB_NAME,)
if credentials is None:
EXPECTED_ARGS = (_Connection.credentials,)
else:
EXPECTED_ARGS = (credentials,)
EXPECTED_KWARGS = {
'api_access_endpoint': 'https://storage.googleapis.com',
'expiration': EXPIRATION,
'method': 'GET',
'resource': PATH,
'content_type': None,
'response_type': None,
'response_disposition': None,
'generation': None,
}
self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)])
def test_generate_signed_url_w_default_method(self):
self._basic_generate_signed_url_helper()
def test_generate_signed_url_w_content_type(self):
BLOB_NAME = 'blob-name'
EXPIRATION = '2014-10-16T20:34:37.000Z'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=2014-10-16T20:34:37.000Z')
SIGNER = _Signer()
CONTENT_TYPE = "text/html"
with mock.patch('google.cloud.storage.blob.generate_signed_url',
new=SIGNER):
signed_url = blob.generate_signed_url(EXPIRATION,
content_type=CONTENT_TYPE)
self.assertEqual(signed_url, URI)
PATH = '/name/%s' % (BLOB_NAME,)
EXPECTED_ARGS = (_Connection.credentials,)
EXPECTED_KWARGS = {
'api_access_endpoint': 'https://storage.googleapis.com',
'expiration': EXPIRATION,
'method': 'GET',
'resource': PATH,
'content_type': CONTENT_TYPE,
'response_type': None,
'response_disposition': None,
'generation': None,
}
self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)])
def test_generate_signed_url_w_credentials(self):
credentials = object()
self._basic_generate_signed_url_helper(credentials=credentials)
def test_generate_signed_url_w_slash_in_name(self):
BLOB_NAME = 'parent/child'
EXPIRATION = '2014-10-16T20:34:37.000Z'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=2014-10-16T20:34:37.000Z')
SIGNER = _Signer()
with mock.patch('google.cloud.storage.blob.generate_signed_url',
new=SIGNER):
signed_url = blob.generate_signed_url(EXPIRATION)
self.assertEqual(signed_url, URI)
EXPECTED_ARGS = (_Connection.credentials,)
EXPECTED_KWARGS = {
'api_access_endpoint': 'https://storage.googleapis.com',
'expiration': EXPIRATION,
'method': 'GET',
'resource': '/name/parent%2Fchild',
'content_type': None,
'response_type': None,
'response_disposition': None,
'generation': None,
}
self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)])
def test_generate_signed_url_w_method_arg(self):
BLOB_NAME = 'blob-name'
EXPIRATION = '2014-10-16T20:34:37.000Z'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
URI = ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=2014-10-16T20:34:37.000Z')
SIGNER = _Signer()
with mock.patch('google.cloud.storage.blob.generate_signed_url',
new=SIGNER):
signed_uri = blob.generate_signed_url(EXPIRATION, method='POST')
self.assertEqual(signed_uri, URI)
PATH = '/name/%s' % (BLOB_NAME,)
EXPECTED_ARGS = (_Connection.credentials,)
EXPECTED_KWARGS = {
'api_access_endpoint': 'https://storage.googleapis.com',
'expiration': EXPIRATION,
'method': 'POST',
'resource': PATH,
'content_type': None,
'response_type': None,
'response_disposition': None,
'generation': None,
}
self.assertEqual(SIGNER._signed, [(EXPECTED_ARGS, EXPECTED_KWARGS)])
def test_exists_miss(self):
NONESUCH = 'nonesuch'
not_found_response = ({'status': http_client.NOT_FOUND}, b'')
connection = _Connection(not_found_response)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(NONESUCH, bucket=bucket)
self.assertFalse(blob.exists())
def test_exists_hit(self):
BLOB_NAME = 'blob-name'
found_response = ({'status': http_client.OK}, b'')
connection = _Connection(found_response)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
bucket._blobs[BLOB_NAME] = 1
self.assertTrue(blob.exists())
def test_delete(self):
BLOB_NAME = 'blob-name'
not_found_response = ({'status': http_client.NOT_FOUND}, b'')
connection = _Connection(not_found_response)
client = _Client(connection)
bucket = _Bucket(client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
bucket._blobs[BLOB_NAME] = 1
blob.delete()
self.assertFalse(blob.exists())
self.assertEqual(bucket._deleted, [(BLOB_NAME, None)])
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test__make_transport(self, fake_session_factory):
client = mock.Mock(spec=[u'_credentials'])
blob = self._make_one(u'blob-name', bucket=None)
transport = blob._make_transport(client)
self.assertIs(transport, fake_session_factory.return_value)
fake_session_factory.assert_called_once_with(client._credentials)
def test__get_download_url_with_media_link(self):
blob_name = 'something.txt'
bucket = mock.Mock(spec=[])
blob = self._make_one(blob_name, bucket=bucket)
media_link = 'http://test.invalid'
# Set the media link on the blob
blob._properties['mediaLink'] = media_link
download_url = blob._get_download_url()
self.assertEqual(download_url, media_link)
def test__get_download_url_on_the_fly(self):
blob_name = 'bzzz-fly.txt'
bucket = mock.Mock(path='/b/buhkit', spec=['path'])
blob = self._make_one(blob_name, bucket=bucket)
self.assertIsNone(blob.media_link)
download_url = blob._get_download_url()
expected_url = (
'https://www.googleapis.com/download/storage/v1/b/'
'buhkit/o/bzzz-fly.txt?alt=media')
self.assertEqual(download_url, expected_url)
def test__get_download_url_on_the_fly_with_generation(self):
blob_name = 'pretend.txt'
bucket = mock.Mock(path='/b/fictional', spec=['path'])
blob = self._make_one(blob_name, bucket=bucket)
generation = 1493058489532987
# Set the media link on the blob
blob._properties['generation'] = str(generation)
self.assertIsNone(blob.media_link)
download_url = blob._get_download_url()
expected_url = (
'https://www.googleapis.com/download/storage/v1/b/'
'fictional/o/pretend.txt?alt=media&generation=1493058489532987')
self.assertEqual(download_url, expected_url)
@staticmethod
def _mock_requests_response(status_code, headers, content=b''):
import requests
response = requests.Response()
response.status_code = status_code
response.headers.update(headers)
response._content = content
response.request = requests.Request(
'POST', 'http://example.com').prepare()
return response
def _mock_download_transport(self):
fake_transport = mock.Mock(spec=['request'])
# Give the transport two fake responses.
chunk1_response = self._mock_requests_response(
http_client.PARTIAL_CONTENT,
{'content-length': '3', 'content-range': 'bytes 0-2/6'},
content=b'abc')
chunk2_response = self._mock_requests_response(
http_client.PARTIAL_CONTENT,
{'content-length': '3', 'content-range': 'bytes 3-5/6'},
content=b'def')
fake_transport.request.side_effect = [chunk1_response, chunk2_response]
return fake_transport
def _check_session_mocks(self, client, fake_session_factory,
expected_url, headers=None):
# Check that exactly one transport was created.
fake_session_factory.assert_called_once_with(client._credentials)
fake_transport = fake_session_factory.return_value
# Check that the transport was called exactly twice.
self.assertEqual(fake_transport.request.call_count, 2)
if headers is None:
headers = {}
# NOTE: bytes=0-2 never shows up because the mock was called with
# **MUTABLE** headers and it was mutated before the
# second request.
headers['range'] = 'bytes=3-5'
call = mock.call(
'GET', expected_url, data=None, headers=headers)
self.assertEqual(fake_transport.request.mock_calls, [call, call])
def test__do_download_simple(self):
blob_name = 'blob-name'
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
# Make sure this will not be chunked.
self.assertIsNone(blob.chunk_size)
transport = mock.Mock(spec=['request'])
transport.request.return_value = self._mock_requests_response(
http_client.OK,
{'content-length': '6', 'content-range': 'bytes 0-5/6'},
content=b'abcdef')
file_obj = io.BytesIO()
download_url = 'http://test.invalid'
headers = {}
blob._do_download(transport, file_obj, download_url, headers)
# Make sure the download was as expected.
self.assertEqual(file_obj.getvalue(), b'abcdef')
transport.request.assert_called_once_with(
'GET', download_url, data=None, headers=headers)
def test__do_download_chunked(self):
blob_name = 'blob-name'
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
# Modify the blob so there there will be 2 chunks of size 3.
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
transport = self._mock_download_transport()
file_obj = io.BytesIO()
download_url = 'http://test.invalid'
headers = {}
blob._do_download(transport, file_obj, download_url, headers)
# Make sure the download was as expected.
self.assertEqual(file_obj.getvalue(), b'abcdef')
# Check that the transport was called exactly twice.
self.assertEqual(transport.request.call_count, 2)
# ``headers`` was modified (in place) once for each API call.
self.assertEqual(headers, {'range': 'bytes=3-5'})
call = mock.call(
'GET', download_url, data=None, headers=headers)
self.assertEqual(transport.request.mock_calls, [call, call])
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_file_with_failure(self, fake_session_factory):
from google.cloud import exceptions
blob_name = 'blob-name'
transport = mock.Mock(spec=['request'])
bad_response_headers = {
'Content-Length': '9',
'Content-Type': 'text/html; charset=UTF-8',
}
transport.request.return_value = self._mock_requests_response(
http_client.NOT_FOUND, bad_response_headers, content=b'Not found')
fake_session_factory.return_value = transport
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
# Set the media link on the blob
blob._properties['mediaLink'] = 'http://test.invalid'
file_obj = io.BytesIO()
with self.assertRaises(exceptions.NotFound):
blob.download_to_file(file_obj)
self.assertEqual(file_obj.tell(), 0)
# Check that exactly one transport was created.
fake_session_factory.assert_called_once_with(client._credentials)
# Check that the transport was called once.
transport.request.assert_called_once_with(
'GET', blob.media_link, data=None, headers={})
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_file_wo_media_link(self, fake_session_factory):
blob_name = 'blob-name'
fake_session_factory.return_value = self._mock_download_transport()
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
blob = self._make_one(blob_name, bucket=bucket)
# Modify the blob so there there will be 2 chunks of size 3.
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
file_obj = io.BytesIO()
blob.download_to_file(file_obj)
self.assertEqual(file_obj.getvalue(), b'abcdef')
# Make sure the media link is still unknown.
self.assertIsNone(blob.media_link)
expected_url = (
'https://www.googleapis.com/download/storage/v1/b/'
'name/o/blob-name?alt=media')
self._check_session_mocks(client, fake_session_factory, expected_url)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def _download_to_file_helper(self, fake_session_factory, use_chunks=False):
blob_name = 'blob-name'
fake_transport = self._mock_download_transport()
fake_session_factory.return_value = fake_transport
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
media_link = 'http://example.com/media/'
properties = {'mediaLink': media_link}
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
if use_chunks:
# Modify the blob so there there will be 2 chunks of size 3.
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
else:
# Modify the response.
single_chunk_response = self._mock_requests_response(
http_client.OK,
{'content-length': '6', 'content-range': 'bytes 0-5/6'},
content=b'abcdef')
fake_transport.request.side_effect = [single_chunk_response]
file_obj = io.BytesIO()
blob.download_to_file(file_obj)
self.assertEqual(file_obj.getvalue(), b'abcdef')
if use_chunks:
self._check_session_mocks(client, fake_session_factory, media_link)
else:
# Check that exactly one transport was created.
fake_session_factory.assert_called_once_with(client._credentials)
fake_transport.request.assert_called_once_with(
'GET', media_link, data=None, headers={})
def test_download_to_file_default(self):
self._download_to_file_helper()
def test_download_to_file_with_chunk_size(self):
self._download_to_file_helper(use_chunks=True)
def _download_to_filename_helper(self, fake_session_factory, updated=None):
import os
import time
from google.cloud._testing import _NamedTemporaryFile
blob_name = 'blob-name'
fake_session_factory.return_value = self._mock_download_transport()
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
media_link = 'http://example.com/media/'
properties = {'mediaLink': media_link}
if updated is not None:
properties['updated'] = updated
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
# Modify the blob so there there will be 2 chunks of size 3.
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
with _NamedTemporaryFile() as temp:
blob.download_to_filename(temp.name)
with open(temp.name, 'rb') as file_obj:
wrote = file_obj.read()
if updated is None:
self.assertIsNone(blob.updated)
else:
mtime = os.path.getmtime(temp.name)
updated_time = time.mktime(blob.updated.timetuple())
self.assertEqual(mtime, updated_time)
self.assertEqual(wrote, b'abcdef')
self._check_session_mocks(client, fake_session_factory, media_link)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_filename(self, fake_session_factory):
updated = '2014-12-06T13:13:50.690Z'
self._download_to_filename_helper(
fake_session_factory, updated=updated)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_filename_wo_updated(self, fake_session_factory):
self._download_to_filename_helper(fake_session_factory)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_to_filename_w_key(self, fake_session_factory):
import os
import time
from google.cloud._testing import _NamedTemporaryFile
blob_name = 'blob-name'
fake_session_factory.return_value = self._mock_download_transport()
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
media_link = 'http://example.com/media/'
properties = {'mediaLink': media_link,
'updated': '2014-12-06T13:13:50.690Z'}
key = b'aa426195405adee2c8081bb9e7e74b19'
blob = self._make_one(
blob_name, bucket=bucket, properties=properties, encryption_key=key)
# Modify the blob so there there will be 2 chunks of size 3.
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
with _NamedTemporaryFile() as temp:
blob.download_to_filename(temp.name)
with open(temp.name, 'rb') as file_obj:
wrote = file_obj.read()
mtime = os.path.getmtime(temp.name)
updated_time = time.mktime(blob.updated.timetuple())
self.assertEqual(wrote, b'abcdef')
self.assertEqual(mtime, updated_time)
header_key_value = 'YWE0MjYxOTU0MDVhZGVlMmM4MDgxYmI5ZTdlNzRiMTk='
header_key_hash_value = 'V3Kwe46nKc3xLv96+iJ707YfZfFvlObta8TQcx2gpm0='
key_headers = {
'X-Goog-Encryption-Key-Sha256': header_key_hash_value,
'X-Goog-Encryption-Algorithm': 'AES256',
'X-Goog-Encryption-Key': header_key_value,
}
self._check_session_mocks(
client, fake_session_factory, media_link, headers=key_headers)
@mock.patch('google.auth.transport.requests.AuthorizedSession')
def test_download_as_string(self, fake_session_factory):
blob_name = 'blob-name'
fake_session_factory.return_value = self._mock_download_transport()
# Create a fake client/bucket and use them in the Blob() constructor.
client = mock.Mock(
_credentials=_make_credentials(), spec=['_credentials'])
bucket = _Bucket(client)
media_link = 'http://example.com/media/'
properties = {'mediaLink': media_link}
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
# Modify the blob so there there will be 2 chunks of size 3.
blob._CHUNK_SIZE_MULTIPLE = 1
blob.chunk_size = 3
fetched = blob.download_as_string()
self.assertEqual(fetched, b'abcdef')
self._check_session_mocks(client, fake_session_factory, media_link)
def test__get_content_type_explicit(self):
blob = self._make_one(u'blob-name', bucket=None)
content_type = u'text/plain'
return_value = blob._get_content_type(content_type)
self.assertEqual(return_value, content_type)
def test__get_content_type_from_blob(self):
blob = self._make_one(u'blob-name', bucket=None)
blob.content_type = u'video/mp4'
return_value = blob._get_content_type(None)
self.assertEqual(return_value, blob.content_type)
def test__get_content_type_from_filename(self):
blob = self._make_one(u'blob-name', bucket=None)
return_value = blob._get_content_type(None, filename='archive.tar')
self.assertEqual(return_value, 'application/x-tar')
def test__get_content_type_default(self):
blob = self._make_one(u'blob-name', bucket=None)
return_value = blob._get_content_type(None)
self.assertEqual(return_value, u'application/octet-stream')
def test__get_writable_metadata_no_changes(self):
name = u'blob-name'
blob = self._make_one(name, bucket=None)
object_metadata = blob._get_writable_metadata()
expected = {'name': name}
self.assertEqual(object_metadata, expected)
def test__get_writable_metadata_with_changes(self):
name = u'blob-name'
blob = self._make_one(name, bucket=None)
blob.storage_class = 'NEARLINE'
blob.cache_control = 'max-age=3600'
blob.metadata = {'color': 'red'}
object_metadata = blob._get_writable_metadata()
expected = {
'cacheControl': blob.cache_control,
'metadata': blob.metadata,
'name': name,
'storageClass': blob.storage_class,
}
self.assertEqual(object_metadata, expected)
def test__get_writable_metadata_unwritable_field(self):
name = u'blob-name'
properties = {'updated': '2016-10-16T18:18:18.181Z'}
blob = self._make_one(name, bucket=None, properties=properties)
# Fake that `updated` is in changes.
blob._changes.add('updated')
object_metadata = blob._get_writable_metadata()
expected = {'name': name}
self.assertEqual(object_metadata, expected)
def test__get_upload_arguments(self):
name = u'blob-name'
key = b'[pXw@,p@@AfBfrR3x-2b2SCHR,.?YwRO'
blob = self._make_one(name, bucket=None, encryption_key=key)
blob.content_disposition = 'inline'
content_type = u'image/jpeg'
info = blob._get_upload_arguments(content_type)
headers, object_metadata, new_content_type = info
header_key_value = 'W3BYd0AscEBAQWZCZnJSM3gtMmIyU0NIUiwuP1l3Uk8='
header_key_hash_value = 'G0++dxF4q5rG4o9kE8gvEKn15RH6wLm0wXV1MgAlXOg='
expected_headers = {
'X-Goog-Encryption-Algorithm': 'AES256',
'X-Goog-Encryption-Key': header_key_value,
'X-Goog-Encryption-Key-Sha256': header_key_hash_value,
}
self.assertEqual(headers, expected_headers)
expected_metadata = {
'contentDisposition': blob.content_disposition,
'name': name,
}
self.assertEqual(object_metadata, expected_metadata)
self.assertEqual(new_content_type, content_type)
def _mock_transport(self, status_code, headers, content=b''):
fake_transport = mock.Mock(spec=['request'])
fake_response = self._mock_requests_response(
status_code, headers, content=content)
fake_transport.request.return_value = fake_response
return fake_transport
def _do_multipart_success(self, mock_get_boundary, size=None,
num_retries=None):
bucket = mock.Mock(path='/b/w00t', spec=[u'path'])
blob = self._make_one(u'blob-name', bucket=bucket)
self.assertIsNone(blob.chunk_size)
# Create mocks to be checked for doing transport.
fake_transport = self._mock_transport(http_client.OK, {})
blob._make_transport = mock.Mock(return_value=fake_transport, spec=[])
# Create some mock arguments.
client = mock.sentinel.client
data = b'data here hear hier'
stream = io.BytesIO(data)
content_type = u'application/xml'
response = blob._do_multipart_upload(
client, stream, content_type, size, num_retries)
# Check the mocks and the returned value.
self.assertIs(response, fake_transport.request.return_value)
if size is None:
data_read = data
self.assertEqual(stream.tell(), len(data))
else:
data_read = data[:size]
self.assertEqual(stream.tell(), size)
blob._make_transport.assert_called_once_with(client)
mock_get_boundary.assert_called_once_with()
upload_url = (
'https://www.googleapis.com/upload/storage/v1' +
bucket.path +
'/o?uploadType=multipart')
payload = (
b'--==0==\r\n' +
b'content-type: application/json; charset=UTF-8\r\n\r\n' +
b'{"name": "blob-name"}\r\n' +
b'--==0==\r\n' +
b'content-type: application/xml\r\n\r\n' +
data_read +
b'\r\n--==0==--')
headers = {'content-type': b'multipart/related; boundary="==0=="'}
fake_transport.request.assert_called_once_with(
'POST', upload_url, data=payload, headers=headers)
@mock.patch(u'google.resumable_media._upload.get_boundary',
return_value=b'==0==')
def test__do_multipart_upload_no_size(self, mock_get_boundary):
self._do_multipart_success(mock_get_boundary)
@mock.patch(u'google.resumable_media._upload.get_boundary',
return_value=b'==0==')
def test__do_multipart_upload_with_size(self, mock_get_boundary):
self._do_multipart_success(mock_get_boundary, size=10)
@mock.patch(u'google.resumable_media._upload.get_boundary',
return_value=b'==0==')
def test__do_multipart_upload_with_retry(self, mock_get_boundary):
self._do_multipart_success(mock_get_boundary, num_retries=8)
def test__do_multipart_upload_bad_size(self):
blob = self._make_one(u'blob-name', bucket=None)
data = b'data here hear hier'
stream = io.BytesIO(data)
size = 50
self.assertGreater(size, len(data))
with self.assertRaises(ValueError) as exc_info:
blob._do_multipart_upload(None, stream, None, size, None)
exc_contents = str(exc_info.exception)
self.assertIn(
'was specified but the file-like object only had', exc_contents)
self.assertEqual(stream.tell(), len(data))
def _initiate_resumable_helper(self, size=None, extra_headers=None,
chunk_size=None, num_retries=None):
from google.resumable_media.requests import ResumableUpload
bucket = mock.Mock(path='/b/whammy', spec=[u'path'])
blob = self._make_one(u'blob-name', bucket=bucket)
blob.metadata = {'rook': 'takes knight'}
blob.chunk_size = 3 * blob._CHUNK_SIZE_MULTIPLE
self.assertIsNotNone(blob.chunk_size)
# Need to make sure **same** dict is used because ``json.dumps()``
# will depend on the hash order.
object_metadata = blob._get_writable_metadata()
blob._get_writable_metadata = mock.Mock(
return_value=object_metadata, spec=[])
# Create mocks to be checked for doing transport.
resumable_url = 'http://test.invalid?upload_id=hey-you'
response_headers = {'location': resumable_url}
fake_transport = self._mock_transport(
http_client.OK, response_headers)
blob._make_transport = mock.Mock(return_value=fake_transport, spec=[])
# Create some mock arguments and call the method under test.
client = mock.sentinel.client
data = b'hello hallo halo hi-low'
stream = io.BytesIO(data)
content_type = u'text/plain'
upload, transport = blob._initiate_resumable_upload(
client, stream, content_type, size, num_retries,
extra_headers=extra_headers, chunk_size=chunk_size)
# Check the returned values.
self.assertIsInstance(upload, ResumableUpload)
upload_url = (
'https://www.googleapis.com/upload/storage/v1' +
bucket.path +
'/o?uploadType=resumable')
self.assertEqual(upload.upload_url, upload_url)
if extra_headers is None:
self.assertEqual(upload._headers, {})
else:
self.assertEqual(upload._headers, extra_headers)
self.assertIsNot(upload._headers, extra_headers)
self.assertFalse(upload.finished)
if chunk_size is None:
self.assertEqual(upload._chunk_size, blob.chunk_size)
else:
self.assertNotEqual(blob.chunk_size, chunk_size)
self.assertEqual(upload._chunk_size, chunk_size)
self.assertIs(upload._stream, stream)
if size is None:
self.assertIsNone(upload._total_bytes)
else:
self.assertEqual(upload._total_bytes, size)
self.assertEqual(upload._content_type, content_type)
self.assertEqual(upload.resumable_url, resumable_url)
retry_strategy = upload._retry_strategy
self.assertEqual(retry_strategy.max_sleep, 64.0)
if num_retries is None:
self.assertEqual(retry_strategy.max_cumulative_retry, 600.0)
self.assertIsNone(retry_strategy.max_retries)
else:
self.assertIsNone(retry_strategy.max_cumulative_retry)
self.assertEqual(retry_strategy.max_retries, num_retries)
self.assertIs(transport, fake_transport)
# Make sure we never read from the stream.
self.assertEqual(stream.tell(), 0)
# Check the mocks.
blob._get_writable_metadata.assert_called_once_with()
blob._make_transport.assert_called_once_with(client)
payload = json.dumps(object_metadata).encode('utf-8')
expected_headers = {
'content-type': 'application/json; charset=UTF-8',
'x-upload-content-type': content_type,
}
if size is not None:
expected_headers['x-upload-content-length'] = str(size)
if extra_headers is not None:
expected_headers.update(extra_headers)
fake_transport.request.assert_called_once_with(
'POST', upload_url, data=payload, headers=expected_headers)
def test__initiate_resumable_upload_no_size(self):
self._initiate_resumable_helper()
def test__initiate_resumable_upload_with_size(self):
self._initiate_resumable_helper(size=10000)
def test__initiate_resumable_upload_with_chunk_size(self):
one_mb = 1048576
self._initiate_resumable_helper(chunk_size=one_mb)
def test__initiate_resumable_upload_with_extra_headers(self):
extra_headers = {'origin': 'http://not-in-kansas-anymore.invalid'}
self._initiate_resumable_helper(extra_headers=extra_headers)
def test__initiate_resumable_upload_with_retry(self):
self._initiate_resumable_helper(num_retries=11)
def _make_resumable_transport(self, headers1, headers2,
headers3, total_bytes):
from google import resumable_media
fake_transport = mock.Mock(spec=['request'])
fake_response1 = self._mock_requests_response(
http_client.OK, headers1)
fake_response2 = self._mock_requests_response(
resumable_media.PERMANENT_REDIRECT, headers2)
json_body = '{{"size": "{:d}"}}'.format(total_bytes)
fake_response3 = self._mock_requests_response(
http_client.OK, headers3,
content=json_body.encode('utf-8'))
responses = [fake_response1, fake_response2, fake_response3]
fake_transport.request.side_effect = responses
return fake_transport, responses
@staticmethod
def _do_resumable_upload_call0(blob, content_type, size=None):
# First mock transport.request() does initiates upload.
upload_url = (
'https://www.googleapis.com/upload/storage/v1' +
blob.bucket.path +
'/o?uploadType=resumable')
expected_headers = {
'content-type': 'application/json; charset=UTF-8',
'x-upload-content-type': content_type,
}
if size is not None:
expected_headers['x-upload-content-length'] = str(size)
payload = json.dumps({'name': blob.name}).encode('utf-8')
return mock.call(
'POST', upload_url, data=payload, headers=expected_headers)
@staticmethod
def _do_resumable_upload_call1(blob, content_type, data,
resumable_url, size=None):
# Second mock transport.request() does sends first chunk.
if size is None:
content_range = 'bytes 0-{:d}/*'.format(blob.chunk_size - 1)
else:
content_range = 'bytes 0-{:d}/{:d}'.format(
blob.chunk_size - 1, size)
expected_headers = {
'content-type': content_type,
'content-range': content_range,
}
payload = data[:blob.chunk_size]
return mock.call(
'PUT', resumable_url, data=payload, headers=expected_headers)
@staticmethod
def _do_resumable_upload_call2(blob, content_type, data,
resumable_url, total_bytes):
# Third mock transport.request() does sends last chunk.
content_range = 'bytes {:d}-{:d}/{:d}'.format(
blob.chunk_size, total_bytes - 1, total_bytes)
expected_headers = {
'content-type': content_type,
'content-range': content_range,
}
payload = data[blob.chunk_size:]
return mock.call(
'PUT', resumable_url, data=payload, headers=expected_headers)
def _do_resumable_helper(self, use_size=False, num_retries=None):
bucket = mock.Mock(path='/b/yesterday', spec=[u'path'])
blob = self._make_one(u'blob-name', bucket=bucket)
blob.chunk_size = blob._CHUNK_SIZE_MULTIPLE
self.assertIsNotNone(blob.chunk_size)
# Data to be uploaded.
data = b'<html>' + (b'A' * blob.chunk_size) + b'</html>'
total_bytes = len(data)
if use_size:
size = total_bytes
else:
size = None
# Create mocks to be checked for doing transport.
resumable_url = 'http://test.invalid?upload_id=and-then-there-was-1'
headers1 = {'location': resumable_url}
headers2 = {'range': 'bytes=0-{:d}'.format(blob.chunk_size - 1)}
fake_transport, responses = self._make_resumable_transport(
headers1, headers2, {}, total_bytes)
blob._make_transport = mock.Mock(return_value=fake_transport, spec=[])
# Create some mock arguments and call the method under test.
client = mock.sentinel.client
stream = io.BytesIO(data)
content_type = u'text/html'
response = blob._do_resumable_upload(
client, stream, content_type, size, num_retries)
# Check the returned values.
self.assertIs(response, responses[2])
self.assertEqual(stream.tell(), total_bytes)
# Check the mocks.
blob._make_transport.assert_called_once_with(client)
call0 = self._do_resumable_upload_call0(blob, content_type, size=size)
call1 = self._do_resumable_upload_call1(
blob, content_type, data, resumable_url, size=size)
call2 = self._do_resumable_upload_call2(
blob, content_type, data, resumable_url, total_bytes)
self.assertEqual(
fake_transport.request.mock_calls, [call0, call1, call2])
def test__do_resumable_upload_no_size(self):
self._do_resumable_helper()
def test__do_resumable_upload_with_size(self):
self._do_resumable_helper(use_size=True)
def test__do_resumable_upload_with_retry(self):
self._do_resumable_helper(num_retries=6)
def _do_upload_helper(self, chunk_size=None, num_retries=None):
blob = self._make_one(u'blob-name', bucket=None)
# Create a fake response.
response = mock.Mock(spec=[u'json'])
response.json.return_value = mock.sentinel.json
# Mock **both** helpers.
blob._do_multipart_upload = mock.Mock(return_value=response, spec=[])
blob._do_resumable_upload = mock.Mock(return_value=response, spec=[])
if chunk_size is None:
self.assertIsNone(blob.chunk_size)
else:
blob.chunk_size = chunk_size
self.assertIsNotNone(blob.chunk_size)
client = mock.sentinel.client
stream = mock.sentinel.stream
content_type = u'video/mp4'
size = 12345654321
# Make the request and check the mocks.
created_json = blob._do_upload(
client, stream, content_type, size, num_retries)
self.assertIs(created_json, mock.sentinel.json)
response.json.assert_called_once_with()
if chunk_size is None:
blob._do_multipart_upload.assert_called_once_with(
client, stream, content_type, size, num_retries)
blob._do_resumable_upload.assert_not_called()
else:
blob._do_multipart_upload.assert_not_called()
blob._do_resumable_upload.assert_called_once_with(
client, stream, content_type, size, num_retries)
def test__do_upload_without_chunk_size(self):
self._do_upload_helper()
def test__do_upload_with_chunk_size(self):
chunk_size = 1024 * 1024 * 1024 # 1GB
self._do_upload_helper(chunk_size=chunk_size)
def test__do_upload_with_retry(self):
self._do_upload_helper(num_retries=20)
def _upload_from_file_helper(self, side_effect=None, **kwargs):
from google.cloud._helpers import UTC
blob = self._make_one('blob-name', bucket=None)
# Mock low-level upload helper on blob (it is tested elsewhere).
created_json = {'updated': '2017-01-01T09:09:09.081Z'}
blob._do_upload = mock.Mock(return_value=created_json, spec=[])
if side_effect is not None:
blob._do_upload.side_effect = side_effect
# Make sure `updated` is empty before the request.
self.assertIsNone(blob.updated)
data = b'data is here'
stream = io.BytesIO(data)
stream.seek(2) # Not at zero.
content_type = u'font/woff'
client = mock.sentinel.client
ret_val = blob.upload_from_file(
stream, size=len(data), content_type=content_type,
client=client, **kwargs)
# Check the response and side-effects.
self.assertIsNone(ret_val)
new_updated = datetime.datetime(
2017, 1, 1, 9, 9, 9, 81000, tzinfo=UTC)
self.assertEqual(blob.updated, new_updated)
# Check the mock.
num_retries = kwargs.get('num_retries')
blob._do_upload.assert_called_once_with(
client, stream, content_type, len(data), num_retries)
return stream
def test_upload_from_file_success(self):
stream = self._upload_from_file_helper()
assert stream.tell() == 2
@mock.patch('warnings.warn')
def test_upload_from_file_with_retries(self, mock_warn):
from google.cloud.storage import blob as blob_module
self._upload_from_file_helper(num_retries=20)
mock_warn.assert_called_once_with(
blob_module._NUM_RETRIES_MESSAGE, DeprecationWarning)
def test_upload_from_file_with_rewind(self):
stream = self._upload_from_file_helper(rewind=True)
assert stream.tell() == 0
def test_upload_from_file_failure(self):
import requests
from google.resumable_media import InvalidResponse
from google.cloud import exceptions
message = b'Someone is already in this spot.'
response = requests.Response()
response._content = message
response.status_code = http_client.CONFLICT
response.request = requests.Request(
'POST', 'http://example.com').prepare()
side_effect = InvalidResponse(response)
with self.assertRaises(exceptions.Conflict) as exc_info:
self._upload_from_file_helper(side_effect=side_effect)
self.assertIn(message.decode('utf-8'), exc_info.exception.message)
self.assertEqual(exc_info.exception.errors, [])
def _do_upload_mock_call_helper(self, blob, client, content_type, size):
self.assertEqual(blob._do_upload.call_count, 1)
mock_call = blob._do_upload.mock_calls[0]
call_name, pos_args, kwargs = mock_call
self.assertEqual(call_name, '')
self.assertEqual(len(pos_args), 5)
self.assertEqual(pos_args[0], client)
self.assertEqual(pos_args[2], content_type)
self.assertEqual(pos_args[3], size)
self.assertIsNone(pos_args[4]) # num_retries
self.assertEqual(kwargs, {})
return pos_args[1]
def test_upload_from_filename(self):
from google.cloud._testing import _NamedTemporaryFile
blob = self._make_one('blob-name', bucket=None)
# Mock low-level upload helper on blob (it is tested elsewhere).
created_json = {'metadata': {'mint': 'ice-cream'}}
blob._do_upload = mock.Mock(return_value=created_json, spec=[])
# Make sure `metadata` is empty before the request.
self.assertIsNone(blob.metadata)
data = b'soooo much data'
content_type = u'image/svg+xml'
client = mock.sentinel.client
with _NamedTemporaryFile() as temp:
with open(temp.name, 'wb') as file_obj:
file_obj.write(data)
ret_val = blob.upload_from_filename(
temp.name, content_type=content_type, client=client)
# Check the response and side-effects.
self.assertIsNone(ret_val)
self.assertEqual(blob.metadata, created_json['metadata'])
# Check the mock.
stream = self._do_upload_mock_call_helper(
blob, client, content_type, len(data))
self.assertTrue(stream.closed)
self.assertEqual(stream.mode, 'rb')
self.assertEqual(stream.name, temp.name)
def _upload_from_string_helper(self, data, **kwargs):
from google.cloud._helpers import _to_bytes
blob = self._make_one('blob-name', bucket=None)
# Mock low-level upload helper on blob (it is tested elsewhere).
created_json = {'componentCount': '5'}
blob._do_upload = mock.Mock(return_value=created_json, spec=[])
# Make sure `metadata` is empty before the request.
self.assertIsNone(blob.component_count)
client = mock.sentinel.client
ret_val = blob.upload_from_string(data, client=client, **kwargs)
# Check the response and side-effects.
self.assertIsNone(ret_val)
self.assertEqual(blob.component_count, 5)
# Check the mock.
payload = _to_bytes(data, encoding='utf-8')
stream = self._do_upload_mock_call_helper(
blob, client, 'text/plain', len(payload))
self.assertIsInstance(stream, io.BytesIO)
self.assertEqual(stream.getvalue(), payload)
def test_upload_from_string_w_bytes(self):
data = b'XB]jb\xb8tad\xe0'
self._upload_from_string_helper(data)
def test_upload_from_string_w_text(self):
data = u'\N{snowman} \N{sailboat}'
self._upload_from_string_helper(data)
def _create_resumable_upload_session_helper(self, origin=None,
side_effect=None):
bucket = mock.Mock(path='/b/alex-trebek', spec=[u'path'])
blob = self._make_one('blob-name', bucket=bucket)
chunk_size = 99 * blob._CHUNK_SIZE_MULTIPLE
blob.chunk_size = chunk_size
# Create mocks to be checked for doing transport.
resumable_url = 'http://test.invalid?upload_id=clean-up-everybody'
response_headers = {'location': resumable_url}
fake_transport = self._mock_transport(
http_client.OK, response_headers)
blob._make_transport = mock.Mock(return_value=fake_transport, spec=[])
if side_effect is not None:
fake_transport.request.side_effect = side_effect
# Create some mock arguments and call the method under test.
content_type = u'text/plain'
size = 10000
client = mock.sentinel.client
new_url = blob.create_resumable_upload_session(
content_type=content_type, size=size,
origin=origin, client=client)
# Check the returned value and (lack of) side-effect.
self.assertEqual(new_url, resumable_url)
self.assertEqual(blob.chunk_size, chunk_size)
# Check the mocks.
blob._make_transport.assert_called_once_with(client)
upload_url = (
'https://www.googleapis.com/upload/storage/v1' +
bucket.path +
'/o?uploadType=resumable')
payload = b'{"name": "blob-name"}'
expected_headers = {
'content-type': 'application/json; charset=UTF-8',
'x-upload-content-length': str(size),
'x-upload-content-type': content_type,
}
if origin is not None:
expected_headers['Origin'] = origin
fake_transport.request.assert_called_once_with(
'POST', upload_url, data=payload, headers=expected_headers)
def test_create_resumable_upload_session(self):
self._create_resumable_upload_session_helper()
def test_create_resumable_upload_session_with_origin(self):
self._create_resumable_upload_session_helper(
origin='http://google.com')
def test_create_resumable_upload_session_with_failure(self):
from google.resumable_media import InvalidResponse
from google.cloud import exceptions
message = b'5-oh-3 woe is me.'
response = self._mock_requests_response(
content=message, status_code=http_client.SERVICE_UNAVAILABLE,
headers={})
side_effect = InvalidResponse(response)
with self.assertRaises(exceptions.ServiceUnavailable) as exc_info:
self._create_resumable_upload_session_helper(
side_effect=side_effect)
self.assertIn(message.decode('utf-8'), exc_info.exception.message)
self.assertEqual(exc_info.exception.errors, [])
def test_get_iam_policy(self):
from google.cloud.storage.iam import STORAGE_OWNER_ROLE
from google.cloud.storage.iam import STORAGE_EDITOR_ROLE
from google.cloud.storage.iam import STORAGE_VIEWER_ROLE
from google.cloud.iam import Policy
BLOB_NAME = 'blob-name'
PATH = '/b/name/o/%s' % (BLOB_NAME,)
ETAG = 'DEADBEEF'
VERSION = 17
OWNER1 = 'user:[email protected]'
OWNER2 = 'group:[email protected]'
EDITOR1 = 'domain:google.com'
EDITOR2 = 'user:[email protected]'
VIEWER1 = 'serviceAccount:[email protected]'
VIEWER2 = 'user:[email protected]'
RETURNED = {
'resourceId': PATH,
'etag': ETAG,
'version': VERSION,
'bindings': [
{'role': STORAGE_OWNER_ROLE, 'members': [OWNER1, OWNER2]},
{'role': STORAGE_EDITOR_ROLE, 'members': [EDITOR1, EDITOR2]},
{'role': STORAGE_VIEWER_ROLE, 'members': [VIEWER1, VIEWER2]},
],
}
after = ({'status': http_client.OK}, RETURNED)
EXPECTED = {
binding['role']: set(binding['members'])
for binding in RETURNED['bindings']}
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
policy = blob.get_iam_policy()
self.assertIsInstance(policy, Policy)
self.assertEqual(policy.etag, RETURNED['etag'])
self.assertEqual(policy.version, RETURNED['version'])
self.assertEqual(dict(policy), EXPECTED)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'GET')
self.assertEqual(kw[0]['path'], '%s/iam' % (PATH,))
def test_set_iam_policy(self):
import operator
from google.cloud.storage.iam import STORAGE_OWNER_ROLE
from google.cloud.storage.iam import STORAGE_EDITOR_ROLE
from google.cloud.storage.iam import STORAGE_VIEWER_ROLE
from google.cloud.iam import Policy
BLOB_NAME = 'blob-name'
PATH = '/b/name/o/%s' % (BLOB_NAME,)
ETAG = 'DEADBEEF'
VERSION = 17
OWNER1 = 'user:[email protected]'
OWNER2 = 'group:[email protected]'
EDITOR1 = 'domain:google.com'
EDITOR2 = 'user:[email protected]'
VIEWER1 = 'serviceAccount:[email protected]'
VIEWER2 = 'user:[email protected]'
BINDINGS = [
{'role': STORAGE_OWNER_ROLE, 'members': [OWNER1, OWNER2]},
{'role': STORAGE_EDITOR_ROLE, 'members': [EDITOR1, EDITOR2]},
{'role': STORAGE_VIEWER_ROLE, 'members': [VIEWER1, VIEWER2]},
]
RETURNED = {
'etag': ETAG,
'version': VERSION,
'bindings': BINDINGS,
}
after = ({'status': http_client.OK}, RETURNED)
policy = Policy()
for binding in BINDINGS:
policy[binding['role']] = binding['members']
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
returned = blob.set_iam_policy(policy)
self.assertEqual(returned.etag, ETAG)
self.assertEqual(returned.version, VERSION)
self.assertEqual(dict(returned), dict(policy))
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PUT')
self.assertEqual(kw[0]['path'], '%s/iam' % (PATH,))
sent = kw[0]['data']
self.assertEqual(sent['resourceId'], PATH)
self.assertEqual(len(sent['bindings']), len(BINDINGS))
key = operator.itemgetter('role')
for found, expected in zip(
sorted(sent['bindings'], key=key),
sorted(BINDINGS, key=key)):
self.assertEqual(found['role'], expected['role'])
self.assertEqual(
sorted(found['members']), sorted(expected['members']))
def test_test_iam_permissions(self):
from google.cloud.storage.iam import STORAGE_OBJECTS_LIST
from google.cloud.storage.iam import STORAGE_BUCKETS_GET
from google.cloud.storage.iam import STORAGE_BUCKETS_UPDATE
BLOB_NAME = 'blob-name'
PATH = '/b/name/o/%s' % (BLOB_NAME,)
PERMISSIONS = [
STORAGE_OBJECTS_LIST,
STORAGE_BUCKETS_GET,
STORAGE_BUCKETS_UPDATE,
]
ALLOWED = PERMISSIONS[1:]
RETURNED = {'permissions': ALLOWED}
after = ({'status': http_client.OK}, RETURNED)
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
allowed = blob.test_iam_permissions(PERMISSIONS)
self.assertEqual(allowed, ALLOWED)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'GET')
self.assertEqual(kw[0]['path'], '%s/iam/testPermissions' % (PATH,))
self.assertEqual(kw[0]['query_params'], {'permissions': PERMISSIONS})
def test_make_public(self):
from google.cloud.storage.acl import _ACLEntity
BLOB_NAME = 'blob-name'
permissive = [{'entity': 'allUsers', 'role': _ACLEntity.READER_ROLE}]
after = ({'status': http_client.OK}, {'acl': permissive})
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
blob.acl.loaded = True
blob.make_public()
self.assertEqual(list(blob.acl), permissive)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'PATCH')
self.assertEqual(kw[0]['path'], '/b/name/o/%s' % BLOB_NAME)
self.assertEqual(kw[0]['data'], {'acl': permissive})
self.assertEqual(kw[0]['query_params'], {'projection': 'full'})
def test_compose_wo_content_type_set(self):
SOURCE_1 = 'source-1'
SOURCE_2 = 'source-2'
DESTINATION = 'destinaton'
connection = _Connection()
client = _Client(connection)
bucket = _Bucket(client=client)
source_1 = self._make_one(SOURCE_1, bucket=bucket)
source_2 = self._make_one(SOURCE_2, bucket=bucket)
destination = self._make_one(DESTINATION, bucket=bucket)
with self.assertRaises(ValueError):
destination.compose(sources=[source_1, source_2])
def test_compose_minimal(self):
SOURCE_1 = 'source-1'
SOURCE_2 = 'source-2'
DESTINATION = 'destinaton'
RESOURCE = {
'etag': 'DEADBEEF'
}
after = ({'status': http_client.OK}, RESOURCE)
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
source_1 = self._make_one(SOURCE_1, bucket=bucket)
source_2 = self._make_one(SOURCE_2, bucket=bucket)
destination = self._make_one(DESTINATION, bucket=bucket)
destination.content_type = 'text/plain'
destination.compose(sources=[source_1, source_2])
self.assertEqual(destination.etag, 'DEADBEEF')
SENT = {
'sourceObjects': [
{'name': source_1.name},
{'name': source_2.name},
],
'destination': {
'contentType': 'text/plain',
},
}
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
self.assertEqual(kw[0]['path'], '/b/name/o/%s/compose' % DESTINATION)
self.assertEqual(kw[0]['data'], SENT)
def test_compose_w_additional_property_changes(self):
SOURCE_1 = 'source-1'
SOURCE_2 = 'source-2'
DESTINATION = 'destinaton'
RESOURCE = {
'etag': 'DEADBEEF'
}
after = ({'status': http_client.OK}, RESOURCE)
connection = _Connection(after)
client = _Client(connection)
bucket = _Bucket(client=client)
source_1 = self._make_one(SOURCE_1, bucket=bucket)
source_2 = self._make_one(SOURCE_2, bucket=bucket)
destination = self._make_one(DESTINATION, bucket=bucket)
destination.content_type = 'text/plain'
destination.content_language = 'en-US'
destination.metadata = {'my-key': 'my-value'}
destination.compose(sources=[source_1, source_2])
self.assertEqual(destination.etag, 'DEADBEEF')
SENT = {
'sourceObjects': [
{'name': source_1.name},
{'name': source_2.name},
],
'destination': {
'contentType': 'text/plain',
'contentLanguage': 'en-US',
'metadata': {
'my-key': 'my-value',
}
},
}
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
self.assertEqual(kw[0]['path'], '/b/name/o/%s/compose' % DESTINATION)
self.assertEqual(kw[0]['data'], SENT)
def test_rewrite_response_without_resource(self):
SOURCE_BLOB = 'source'
DEST_BLOB = 'dest'
DEST_BUCKET = 'other-bucket'
TOKEN = 'TOKEN'
RESPONSE = {
'totalBytesRewritten': 33,
'objectSize': 42,
'done': False,
'rewriteToken': TOKEN,
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
source_bucket = _Bucket(client=client)
source_blob = self._make_one(SOURCE_BLOB, bucket=source_bucket)
dest_bucket = _Bucket(client=client, name=DEST_BUCKET)
dest_blob = self._make_one(DEST_BLOB, bucket=dest_bucket)
token, rewritten, size = dest_blob.rewrite(source_blob)
self.assertEqual(token, TOKEN)
self.assertEqual(rewritten, 33)
self.assertEqual(size, 42)
def test_rewrite_other_bucket_other_name_no_encryption_partial(self):
SOURCE_BLOB = 'source'
DEST_BLOB = 'dest'
DEST_BUCKET = 'other-bucket'
TOKEN = 'TOKEN'
RESPONSE = {
'totalBytesRewritten': 33,
'objectSize': 42,
'done': False,
'rewriteToken': TOKEN,
'resource': {'etag': 'DEADBEEF'},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
source_bucket = _Bucket(client=client)
source_blob = self._make_one(SOURCE_BLOB, bucket=source_bucket)
dest_bucket = _Bucket(client=client, name=DEST_BUCKET)
dest_blob = self._make_one(DEST_BLOB, bucket=dest_bucket)
token, rewritten, size = dest_blob.rewrite(source_blob)
self.assertEqual(token, TOKEN)
self.assertEqual(rewritten, 33)
self.assertEqual(size, 42)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/%s/o/%s' % (
SOURCE_BLOB, DEST_BUCKET, DEST_BLOB)
self.assertEqual(kw[0]['path'], PATH)
self.assertEqual(kw[0]['query_params'], {})
SENT = {}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
self.assertNotIn('X-Goog-Copy-Source-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key-Sha256', headers)
self.assertNotIn('X-Goog-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Encryption-Key', headers)
self.assertNotIn('X-Goog-Encryption-Key-Sha256', headers)
def test_rewrite_same_name_no_old_key_new_key_done(self):
import base64
import hashlib
KEY = b'01234567890123456789012345678901' # 32 bytes
KEY_B64 = base64.b64encode(KEY).rstrip().decode('ascii')
KEY_HASH = hashlib.sha256(KEY).digest()
KEY_HASH_B64 = base64.b64encode(KEY_HASH).rstrip().decode('ascii')
BLOB_NAME = 'blob'
RESPONSE = {
'totalBytesRewritten': 42,
'objectSize': 42,
'done': True,
'resource': {'etag': 'DEADBEEF'},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
plain = self._make_one(BLOB_NAME, bucket=bucket)
encrypted = self._make_one(BLOB_NAME, bucket=bucket,
encryption_key=KEY)
token, rewritten, size = encrypted.rewrite(plain)
self.assertIsNone(token)
self.assertEqual(rewritten, 42)
self.assertEqual(size, 42)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/name/o/%s' % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]['path'], PATH)
self.assertEqual(kw[0]['query_params'], {})
SENT = {}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
self.assertNotIn('X-Goog-Copy-Source-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key-Sha256', headers)
self.assertEqual(headers['X-Goog-Encryption-Algorithm'], 'AES256')
self.assertEqual(headers['X-Goog-Encryption-Key'], KEY_B64)
self.assertEqual(headers['X-Goog-Encryption-Key-Sha256'], KEY_HASH_B64)
def test_rewrite_same_name_no_key_new_key_w_token(self):
import base64
import hashlib
SOURCE_KEY = b'01234567890123456789012345678901' # 32 bytes
SOURCE_KEY_B64 = base64.b64encode(SOURCE_KEY).rstrip().decode('ascii')
SOURCE_KEY_HASH = hashlib.sha256(SOURCE_KEY).digest()
SOURCE_KEY_HASH_B64 = base64.b64encode(
SOURCE_KEY_HASH).rstrip().decode('ascii')
DEST_KEY = b'90123456789012345678901234567890' # 32 bytes
DEST_KEY_B64 = base64.b64encode(DEST_KEY).rstrip().decode('ascii')
DEST_KEY_HASH = hashlib.sha256(DEST_KEY).digest()
DEST_KEY_HASH_B64 = base64.b64encode(
DEST_KEY_HASH).rstrip().decode('ascii')
BLOB_NAME = 'blob'
TOKEN = 'TOKEN'
RESPONSE = {
'totalBytesRewritten': 42,
'objectSize': 42,
'done': True,
'resource': {'etag': 'DEADBEEF'},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
source = self._make_one(
BLOB_NAME, bucket=bucket, encryption_key=SOURCE_KEY)
dest = self._make_one(BLOB_NAME, bucket=bucket,
encryption_key=DEST_KEY)
token, rewritten, size = dest.rewrite(source, token=TOKEN)
self.assertIsNone(token)
self.assertEqual(rewritten, 42)
self.assertEqual(size, 42)
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/name/o/%s' % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]['path'], PATH)
self.assertEqual(kw[0]['query_params'], {'rewriteToken': TOKEN})
SENT = {}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Algorithm'], 'AES256')
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Key'], SOURCE_KEY_B64)
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Key-Sha256'],
SOURCE_KEY_HASH_B64)
self.assertEqual(
headers['X-Goog-Encryption-Algorithm'], 'AES256')
self.assertEqual(
headers['X-Goog-Encryption-Key'], DEST_KEY_B64)
self.assertEqual(
headers['X-Goog-Encryption-Key-Sha256'], DEST_KEY_HASH_B64)
def test_update_storage_class_invalid(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
with self.assertRaises(ValueError):
blob.update_storage_class(u'BOGUS')
def test_update_storage_class_wo_encryption_key(self):
BLOB_NAME = 'blob-name'
STORAGE_CLASS = u'NEARLINE'
RESPONSE = {
'resource': {'storageClass': STORAGE_CLASS},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(BLOB_NAME, bucket=bucket)
blob.update_storage_class('NEARLINE')
self.assertEqual(blob.storage_class, 'NEARLINE')
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/name/o/%s' % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]['path'], PATH)
self.assertNotIn('query_params', kw[0])
SENT = {'storageClass': STORAGE_CLASS}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
# Blob has no key, and therefore the relevant headers are not sent.
self.assertNotIn('X-Goog-Copy-Source-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key', headers)
self.assertNotIn('X-Goog-Copy-Source-Encryption-Key-Sha256', headers)
self.assertNotIn('X-Goog-Encryption-Algorithm', headers)
self.assertNotIn('X-Goog-Encryption-Key', headers)
self.assertNotIn('X-Goog-Encryption-Key-Sha256', headers)
def test_update_storage_class_w_encryption_key(self):
import base64
import hashlib
BLOB_NAME = 'blob-name'
BLOB_KEY = b'01234567890123456789012345678901' # 32 bytes
BLOB_KEY_B64 = base64.b64encode(BLOB_KEY).rstrip().decode('ascii')
BLOB_KEY_HASH = hashlib.sha256(BLOB_KEY).digest()
BLOB_KEY_HASH_B64 = base64.b64encode(
BLOB_KEY_HASH).rstrip().decode('ascii')
STORAGE_CLASS = u'NEARLINE'
RESPONSE = {
'resource': {'storageClass': STORAGE_CLASS},
}
response = ({'status': http_client.OK}, RESPONSE)
connection = _Connection(response)
client = _Client(connection)
bucket = _Bucket(client=client)
blob = self._make_one(
BLOB_NAME, bucket=bucket, encryption_key=BLOB_KEY)
blob.update_storage_class('NEARLINE')
self.assertEqual(blob.storage_class, 'NEARLINE')
kw = connection._requested
self.assertEqual(len(kw), 1)
self.assertEqual(kw[0]['method'], 'POST')
PATH = '/b/name/o/%s/rewriteTo/b/name/o/%s' % (BLOB_NAME, BLOB_NAME)
self.assertEqual(kw[0]['path'], PATH)
self.assertNotIn('query_params', kw[0])
SENT = {'storageClass': STORAGE_CLASS}
self.assertEqual(kw[0]['data'], SENT)
headers = {
key.title(): str(value) for key, value in kw[0]['headers'].items()}
# Blob has key, and therefore the relevant headers are sent.
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Algorithm'], 'AES256')
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Key'], BLOB_KEY_B64)
self.assertEqual(
headers['X-Goog-Copy-Source-Encryption-Key-Sha256'],
BLOB_KEY_HASH_B64)
self.assertEqual(
headers['X-Goog-Encryption-Algorithm'], 'AES256')
self.assertEqual(
headers['X-Goog-Encryption-Key'], BLOB_KEY_B64)
self.assertEqual(
headers['X-Goog-Encryption-Key-Sha256'], BLOB_KEY_HASH_B64)
def test_cache_control_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CACHE_CONTROL = 'no-cache'
properties = {'cacheControl': CACHE_CONTROL}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.cache_control, CACHE_CONTROL)
def test_cache_control_setter(self):
BLOB_NAME = 'blob-name'
CACHE_CONTROL = 'no-cache'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.cache_control)
blob.cache_control = CACHE_CONTROL
self.assertEqual(blob.cache_control, CACHE_CONTROL)
def test_component_count(self):
BUCKET = object()
COMPONENT_COUNT = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'componentCount': COMPONENT_COUNT})
self.assertEqual(blob.component_count, COMPONENT_COUNT)
def test_component_count_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.component_count)
def test_component_count_string_val(self):
BUCKET = object()
COMPONENT_COUNT = 42
blob = self._make_one(
'blob-name', bucket=BUCKET,
properties={'componentCount': str(COMPONENT_COUNT)})
self.assertEqual(blob.component_count, COMPONENT_COUNT)
def test_content_disposition_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_DISPOSITION = 'Attachment; filename=example.jpg'
properties = {'contentDisposition': CONTENT_DISPOSITION}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION)
def test_content_disposition_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_DISPOSITION = 'Attachment; filename=example.jpg'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_disposition)
blob.content_disposition = CONTENT_DISPOSITION
self.assertEqual(blob.content_disposition, CONTENT_DISPOSITION)
def test_content_encoding_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_ENCODING = 'gzip'
properties = {'contentEncoding': CONTENT_ENCODING}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_encoding, CONTENT_ENCODING)
def test_content_encoding_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_ENCODING = 'gzip'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_encoding)
blob.content_encoding = CONTENT_ENCODING
self.assertEqual(blob.content_encoding, CONTENT_ENCODING)
def test_content_language_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_LANGUAGE = 'pt-BR'
properties = {'contentLanguage': CONTENT_LANGUAGE}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_language, CONTENT_LANGUAGE)
def test_content_language_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_LANGUAGE = 'pt-BR'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_language)
blob.content_language = CONTENT_LANGUAGE
self.assertEqual(blob.content_language, CONTENT_LANGUAGE)
def test_content_type_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CONTENT_TYPE = 'image/jpeg'
properties = {'contentType': CONTENT_TYPE}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.content_type, CONTENT_TYPE)
def test_content_type_setter(self):
BLOB_NAME = 'blob-name'
CONTENT_TYPE = 'image/jpeg'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.content_type)
blob.content_type = CONTENT_TYPE
self.assertEqual(blob.content_type, CONTENT_TYPE)
def test_crc32c_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
CRC32C = 'DEADBEEF'
properties = {'crc32c': CRC32C}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.crc32c, CRC32C)
def test_crc32c_setter(self):
BLOB_NAME = 'blob-name'
CRC32C = 'DEADBEEF'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.crc32c)
blob.crc32c = CRC32C
self.assertEqual(blob.crc32c, CRC32C)
def test_etag(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
ETAG = 'ETAG'
properties = {'etag': ETAG}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.etag, ETAG)
def test_generation(self):
BUCKET = object()
GENERATION = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'generation': GENERATION})
self.assertEqual(blob.generation, GENERATION)
def test_generation_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.generation)
def test_generation_string_val(self):
BUCKET = object()
GENERATION = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'generation': str(GENERATION)})
self.assertEqual(blob.generation, GENERATION)
def test_id(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
ID = 'ID'
properties = {'id': ID}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.id, ID)
def test_md5_hash_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
MD5_HASH = 'DEADBEEF'
properties = {'md5Hash': MD5_HASH}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.md5_hash, MD5_HASH)
def test_md5_hash_setter(self):
BLOB_NAME = 'blob-name'
MD5_HASH = 'DEADBEEF'
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.md5_hash)
blob.md5_hash = MD5_HASH
self.assertEqual(blob.md5_hash, MD5_HASH)
def test_media_link(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
MEDIA_LINK = 'http://example.com/media/'
properties = {'mediaLink': MEDIA_LINK}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.media_link, MEDIA_LINK)
def test_metadata_getter(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
METADATA = {'foo': 'Foo'}
properties = {'metadata': METADATA}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.metadata, METADATA)
def test_metadata_setter(self):
BLOB_NAME = 'blob-name'
METADATA = {'foo': 'Foo'}
bucket = _Bucket()
blob = self._make_one(BLOB_NAME, bucket=bucket)
self.assertIsNone(blob.metadata)
blob.metadata = METADATA
self.assertEqual(blob.metadata, METADATA)
def test_metageneration(self):
BUCKET = object()
METAGENERATION = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'metageneration': METAGENERATION})
self.assertEqual(blob.metageneration, METAGENERATION)
def test_metageneration_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.metageneration)
def test_metageneration_string_val(self):
BUCKET = object()
METAGENERATION = 42
blob = self._make_one(
'blob-name', bucket=BUCKET,
properties={'metageneration': str(METAGENERATION)})
self.assertEqual(blob.metageneration, METAGENERATION)
def test_owner(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
OWNER = {'entity': 'project-owner-12345', 'entityId': '23456'}
properties = {'owner': OWNER}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
owner = blob.owner
self.assertEqual(owner['entity'], 'project-owner-12345')
self.assertEqual(owner['entityId'], '23456')
def test_self_link(self):
BLOB_NAME = 'blob-name'
bucket = _Bucket()
SELF_LINK = 'http://example.com/self/'
properties = {'selfLink': SELF_LINK}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.self_link, SELF_LINK)
def test_size(self):
BUCKET = object()
SIZE = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'size': SIZE})
self.assertEqual(blob.size, SIZE)
def test_size_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.size)
def test_size_string_val(self):
BUCKET = object()
SIZE = 42
blob = self._make_one('blob-name', bucket=BUCKET,
properties={'size': str(SIZE)})
self.assertEqual(blob.size, SIZE)
def test_storage_class_getter(self):
blob_name = 'blob-name'
bucket = _Bucket()
storage_class = 'MULTI_REGIONAL'
properties = {'storageClass': storage_class}
blob = self._make_one(blob_name, bucket=bucket, properties=properties)
self.assertEqual(blob.storage_class, storage_class)
def test_storage_class_setter(self):
blob_name = 'blob-name'
bucket = _Bucket()
storage_class = 'COLDLINE'
blob = self._make_one(blob_name, bucket=bucket)
self.assertIsNone(blob.storage_class)
blob.storage_class = storage_class
self.assertEqual(blob.storage_class, storage_class)
self.assertEqual(blob._properties, {'storageClass': storage_class})
def test_time_deleted(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud._helpers import UTC
BLOB_NAME = 'blob-name'
bucket = _Bucket()
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
TIME_DELETED = TIMESTAMP.strftime(_RFC3339_MICROS)
properties = {'timeDeleted': TIME_DELETED}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.time_deleted, TIMESTAMP)
def test_time_deleted_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.time_deleted)
def test_time_created(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud._helpers import UTC
BLOB_NAME = 'blob-name'
bucket = _Bucket()
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
TIME_CREATED = TIMESTAMP.strftime(_RFC3339_MICROS)
properties = {'timeCreated': TIME_CREATED}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.time_created, TIMESTAMP)
def test_time_created_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.time_created)
def test_updated(self):
from google.cloud._helpers import _RFC3339_MICROS
from google.cloud._helpers import UTC
BLOB_NAME = 'blob-name'
bucket = _Bucket()
TIMESTAMP = datetime.datetime(2014, 11, 5, 20, 34, 37, tzinfo=UTC)
UPDATED = TIMESTAMP.strftime(_RFC3339_MICROS)
properties = {'updated': UPDATED}
blob = self._make_one(BLOB_NAME, bucket=bucket, properties=properties)
self.assertEqual(blob.updated, TIMESTAMP)
def test_updated_unset(self):
BUCKET = object()
blob = self._make_one('blob-name', bucket=BUCKET)
self.assertIsNone(blob.updated)
class Test__quote(unittest.TestCase):
@staticmethod
def _call_fut(value):
from google.cloud.storage.blob import _quote
return _quote(value)
def test_bytes(self):
quoted = self._call_fut(b'\xDE\xAD\xBE\xEF')
self.assertEqual(quoted, '%DE%AD%BE%EF')
def test_unicode(self):
helicopter = u'\U0001f681'
quoted = self._call_fut(helicopter)
self.assertEqual(quoted, '%F0%9F%9A%81')
def test_bad_type(self):
with self.assertRaises(TypeError):
self._call_fut(None)
class Test__maybe_rewind(unittest.TestCase):
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage.blob import _maybe_rewind
return _maybe_rewind(*args, **kwargs)
def test_default(self):
stream = mock.Mock(spec=[u'seek'])
ret_val = self._call_fut(stream)
self.assertIsNone(ret_val)
stream.seek.assert_not_called()
def test_do_not_rewind(self):
stream = mock.Mock(spec=[u'seek'])
ret_val = self._call_fut(stream, rewind=False)
self.assertIsNone(ret_val)
stream.seek.assert_not_called()
def test_do_rewind(self):
stream = mock.Mock(spec=[u'seek'])
ret_val = self._call_fut(stream, rewind=True)
self.assertIsNone(ret_val)
stream.seek.assert_called_once_with(0, os.SEEK_SET)
class Test__raise_from_invalid_response(unittest.TestCase):
@staticmethod
def _call_fut(*args, **kwargs):
from google.cloud.storage.blob import _raise_from_invalid_response
return _raise_from_invalid_response(*args, **kwargs)
def _helper(self, message, **kwargs):
import requests
from google.resumable_media import InvalidResponse
from google.cloud import exceptions
response = requests.Response()
response.request = requests.Request(
'GET', 'http://example.com').prepare()
response.status_code = http_client.BAD_REQUEST
response._content = message
error = InvalidResponse(response)
with self.assertRaises(exceptions.BadRequest) as exc_info:
self._call_fut(error, **kwargs)
return exc_info
def test_default(self):
message = b'Failure'
exc_info = self._helper(message)
message_str = message.decode('utf-8')
expected = 'GET http://example.com/: {}'.format(message_str)
self.assertEqual(exc_info.exception.message, expected)
self.assertEqual(exc_info.exception.errors, [])
class _Connection(object):
API_BASE_URL = 'http://example.com'
USER_AGENT = 'testing 1.2.3'
credentials = object()
def __init__(self, *responses):
self._responses = responses[:]
self._requested = []
self._signed = []
def _respond(self, **kw):
self._requested.append(kw)
response, self._responses = self._responses[0], self._responses[1:]
return response
def api_request(self, **kw):
from google.cloud.exceptions import NotFound
info, content = self._respond(**kw)
if info.get('status') == http_client.NOT_FOUND:
raise NotFound(info)
return content
class _Bucket(object):
def __init__(self, client=None, name='name'):
if client is None:
connection = _Connection()
client = _Client(connection)
self.client = client
self._blobs = {}
self._copied = []
self._deleted = []
self.name = name
self.path = '/b/' + name
def delete_blob(self, blob_name, client=None):
del self._blobs[blob_name]
self._deleted.append((blob_name, client))
class _Signer(object):
def __init__(self):
self._signed = []
def __call__(self, *args, **kwargs):
self._signed.append((args, kwargs))
return ('http://example.com/abucket/a-blob-name?Signature=DEADBEEF'
'&Expiration=%s' % kwargs.get('expiration'))
class _Client(object):
def __init__(self, connection):
self._base_connection = connection
@property
def _connection(self):
return self._base_connection
@property
def _credentials(self):
return self._base_connection.credentials
| 39.369957 | 80 | 0.637804 | [
"Apache-2.0"
] | rodrigodias27/google-cloud-python | storage/tests/unit/test_blob.py | 91,732 | Python |
class Hero:
def __init__(self,name,health,attackPower):
self.__name = name
self.__health = health
self.__attPower = attackPower
# getter
def getName(self):
return self.__name
def getHealth(self):
return self.__health
# setter
def diserang(self,serangPower):
self.__health -= serangPower
def setAttPower(self,nilaibaru):
self.__attPower = nilaibaru
# awal dari game
earthshaker = Hero("earthshaker",50, 5)
# game berjalan
print(earthshaker.getName())
print(earthshaker.getHealth())
earthshaker.diserang(5)
print(earthshaker.getHealth()) | 18.225806 | 44 | 0.748673 | [
"MIT"
] | zharmedia386/Data-Science-Stuff | Python OOP/test.py | 565 | Python |
import argparse
import datasets
import matplotlib.pyplot as plt
import numpy as np
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, required=True, help='Path to the directory with input dataset')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
dataset = datasets.load_from_disk(args.input).shuffle()
for part in dataset:
print()
print('part', part)
xs = []
ys = []
for i, x in enumerate(dataset[part]):
print(x['tse'], len(x['input_ids']))
xs.append(len(x['input_ids']))
ys.append(x['tse'])
if i >= 10000:
break
plt.clf()
plt.cla()
plt.title(f'{part} CDF')
# plt.xlabel('len')
# plt.ylabel('tse / len')
# plt.scatter(xs, ys)
# plt.hist(ys, bins=5000)
ys.sort()
ys = np.array(ys)
plt.plot(ys, np.arange(len(ys)))
plt.savefig(f'{part}.png')
| 27.410256 | 109 | 0.529467 | [
"MIT"
] | maximumSHOT-HSE/CurriculumLearning | src/cluster/sort_dataset_by_column/test.py | 1,069 | Python |
# coding: utf-8
"""
API's OpenData do Open Banking Brasil
As API's descritas neste documento são referentes as API's da fase OpenData do Open Banking Brasil. # noqa: E501
OpenAPI spec version: 1.0.0-rc5.2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from products_and_services_client.api_client import ApiClient
class InvoiceFinancingsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_business_invoice_financings(self, **kwargs): # noqa: E501
"""Obtém a lista de Adiantamento de Recebíveis de Pessoa Jurídica. # noqa: E501
Obtém a lista de Adiantamento de Recebíveis de Pessoa Jurídica. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_business_invoice_financings(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Número da página que está sendo requisitada (o valor da primeira página é 1).
:param int page_size: Quantidade total de registros por páginas.
:return: ResponseBusinessInvoiceFinancings
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_business_invoice_financings_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_business_invoice_financings_with_http_info(**kwargs) # noqa: E501
return data
def get_business_invoice_financings_with_http_info(self, **kwargs): # noqa: E501
"""Obtém a lista de Adiantamento de Recebíveis de Pessoa Jurídica. # noqa: E501
Obtém a lista de Adiantamento de Recebíveis de Pessoa Jurídica. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_business_invoice_financings_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Número da página que está sendo requisitada (o valor da primeira página é 1).
:param int page_size: Quantidade total de registros por páginas.
:return: ResponseBusinessInvoiceFinancings
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'page_size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_business_invoice_financings" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page-size', params['page_size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/business-invoice-financings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseBusinessInvoiceFinancings', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_personal_invoice_financings(self, **kwargs): # noqa: E501
"""Obtém a lista de Adiantamento de Recebíveis de Pessoa Natural. # noqa: E501
Obtém a lista de Adiantamento de Recebíveis de Pessoa Natural. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_personal_invoice_financings(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Número da página que está sendo requisitada (o valor da primeira página é 1).
:param int page_size: Quantidade total de registros por páginas.
:return: ResponsePersonalInvoiceFinancings
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_personal_invoice_financings_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_personal_invoice_financings_with_http_info(**kwargs) # noqa: E501
return data
def get_personal_invoice_financings_with_http_info(self, **kwargs): # noqa: E501
"""Obtém a lista de Adiantamento de Recebíveis de Pessoa Natural. # noqa: E501
Obtém a lista de Adiantamento de Recebíveis de Pessoa Natural. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_personal_invoice_financings_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Número da página que está sendo requisitada (o valor da primeira página é 1).
:param int page_size: Quantidade total de registros por páginas.
:return: ResponsePersonalInvoiceFinancings
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'page_size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_personal_invoice_financings" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'page_size' in params:
query_params.append(('page-size', params['page_size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/personal-invoice-financings', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponsePersonalInvoiceFinancings', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 39.709821 | 117 | 0.636875 | [
"MIT"
] | pitzer42/opbk-br-quickstart | products_and_services_client/api/invoice_financings_api.py | 8,940 | Python |
# -*- coding: utf-8 -*-
from __future__ import print_function
from warnings import catch_warnings
from datetime import datetime
import itertools
import pytest
from numpy.random import randn
from numpy import nan
import numpy as np
from pandas.compat import u
from pandas import (DataFrame, Index, Series, MultiIndex, date_range,
Timedelta, Period)
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameReshape(TestData):
def test_pivot(self):
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data)
pivoted = frame.pivot(
index='index', columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
tm.assert_frame_equal(pivoted, expected)
# name tracking
assert pivoted.index.name == 'index'
assert pivoted.columns.name == 'columns'
# don't specify values
pivoted = frame.pivot(index='index', columns='columns')
assert pivoted.index.name == 'index'
assert pivoted.columns.names == (None, 'columns')
with catch_warnings(record=True):
# pivot multiple columns
wp = tm.makePanel()
lp = wp.to_frame()
df = lp.reset_index()
tm.assert_frame_equal(df.pivot('major', 'minor'), lp.unstack())
def test_pivot_duplicates(self):
data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],
'b': ['one', 'two', 'one', 'one', 'two'],
'c': [1., 2., 3., 3., 4.]})
with tm.assert_raises_regex(ValueError, 'duplicate entries'):
data.pivot('a', 'b', 'c')
def test_pivot_empty(self):
df = DataFrame({}, columns=['a', 'b', 'c'])
result = df.pivot('a', 'b', 'c')
expected = DataFrame({})
tm.assert_frame_equal(result, expected, check_names=False)
def test_pivot_integer_bug(self):
df = DataFrame(data=[("A", "1", "A1"), ("B", "2", "B2")])
result = df.pivot(index=1, columns=0, values=2)
repr(result)
tm.assert_index_equal(result.columns, Index(['A', 'B'], name=0))
def test_pivot_index_none(self):
# gh-3962
data = {
'index': ['A', 'B', 'C', 'C', 'B', 'A'],
'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],
'values': [1., 2., 3., 3., 2., 1.]
}
frame = DataFrame(data).set_index('index')
result = frame.pivot(columns='columns', values='values')
expected = DataFrame({
'One': {'A': 1., 'B': 2., 'C': 3.},
'Two': {'A': 1., 'B': 2., 'C': 3.}
})
expected.index.name, expected.columns.name = 'index', 'columns'
assert_frame_equal(result, expected)
# omit values
result = frame.pivot(columns='columns')
expected.columns = pd.MultiIndex.from_tuples([('values', 'One'),
('values', 'Two')],
names=[None, 'columns'])
expected.index.name = 'index'
tm.assert_frame_equal(result, expected, check_names=False)
assert result.index.name == 'index'
assert result.columns.names == (None, 'columns')
expected.columns = expected.columns.droplevel(0)
result = frame.pivot(columns='columns', values='values')
expected.columns.name = 'columns'
tm.assert_frame_equal(result, expected)
def test_stack_unstack(self):
df = self.frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack()
stacked_df = DataFrame({'foo': stacked, 'bar': stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
assert_frame_equal(unstacked, df)
assert_frame_equal(unstacked_df['bar'], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
assert_frame_equal(unstacked_cols.T, df)
assert_frame_equal(unstacked_cols_df['bar'].T, df)
def test_stack_mixed_level(self):
# GH 18310
levels = [range(3), [3, 'a', 'b'], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack()
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0],
columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1)
expected = DataFrame(1, index=MultiIndex.from_product([levels[0],
levels[2]]),
columns=levels[1])
assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[['a', 'b']].stack(1)
expected = expected[['a', 'b']]
assert_frame_equal(result, expected)
def test_unstack_fill(self):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack(fill_value=-1)
expected = DataFrame({'a': [1, -1, 5], 'b': [2, 4, -1]},
index=['x', 'y', 'z'], dtype=np.int16)
assert_frame_equal(result, expected)
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame({'a': [1, 0.5, 5], 'b': [2, 4, 0.5]},
index=['x', 'y', 'z'], dtype=np.float)
assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame({'x': ['a', 'a', 'b'],
'y': ['j', 'k', 'j'],
'z': [0, 1, 2],
'w': [0, 1, 2]}).set_index(['x', 'y', 'z'])
unstacked = df.unstack(['x', 'y'], fill_value=0)
key = ('w', 'b', 'j')
expected = unstacked[key]
result = pd.Series([0, 0, 2], index=unstacked.index, name=key)
assert_series_equal(result, expected)
stacked = unstacked.stack(['x', 'y'])
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
assert_frame_equal(result, df)
# From a series
s = df['w']
result = s.unstack(['x', 'y'], fill_value=0)
expected = unstacked['w']
assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list('AB'), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
# From a mixed type dataframe
df['A'] = df['A'].astype(np.int16)
df['B'] = df['B'].astype(np.float64)
result = df.unstack(fill_value=-1)
expected['A'] = expected['A'].astype(np.int16)
expected['B'] = expected['B'].astype(np.float64)
assert_frame_equal(result, expected)
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list('xyz'), dtype=np.float)
expected.columns = MultiIndex.from_tuples(
[('A', 'a'), ('A', 'b'), ('B', 'a'), ('B', 'b')])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = pd.date_range('2012-01-01', periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [dv[0], pd.NaT, dv[3]],
'b': [dv[1], dv[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame({'a': [dv[0], dv[0], dv[3]],
'b': [dv[1], dv[2], dv[0]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [td[0], pd.NaT, td[3]],
'b': [td[1], td[2], pd.NaT]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame({'a': [td[0], td[1], td[3]],
'b': [td[1], td[2], td[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [Period('2012-01'), Period('2012-02'), Period('2012-03'),
Period('2012-04')]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
result = data.unstack()
expected = DataFrame({'a': [periods[0], None, periods[3]],
'b': [periods[1], periods[2], None]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame({'a': [periods[0], periods[1], periods[3]],
'b': [periods[1], periods[2], periods[1]]},
index=['x', 'y', 'z'])
assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = pd.Series(['a', 'b', 'c', 'a'], dtype='category')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame({'a': pd.Categorical(list('axa'),
categories=list('abc')),
'b': pd.Categorical(list('bcx'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
# Fill with non-category results in NaN entries similar to above
result = data.unstack(fill_value='d')
assert_frame_equal(result, expected)
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value='c')
expected = DataFrame({'a': pd.Categorical(list('aca'),
categories=list('abc')),
'b': pd.Categorical(list('bcc'),
categories=list('abc'))},
index=list('xyz'))
assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = pd.DataFrame(dict(state=['IL', 'MI', 'NC'],
index=['a', 'b', 'c'],
some_categories=pd.Series(['a', 'b', 'c']
).astype('category'),
A=np.random.rand(3),
B=1,
C='foo',
D=pd.Timestamp('20010102'),
E=pd.Series([1.0, 50.0, 100.0]
).astype('float32'),
F=pd.Series([3.0, 4.0, 5.0]).astype('float64'),
G=False,
H=pd.Series([1, 200, 923442], dtype='int8')))
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(['state', 'index'])
unstack_and_compare(df1, 'index')
df1 = df.set_index(['state', 'some_categories'])
unstack_and_compare(df1, 'some_categories')
df1 = df.set_index(['F', 'C'])
unstack_and_compare(df1, 'F')
df1 = df.set_index(['G', 'B', 'state'])
unstack_and_compare(df1, 'B')
df1 = df.set_index(['E', 'A'])
unstack_and_compare(df1, 'E')
df1 = df.set_index(['state', 'index'])
s = df1['A']
unstack_and_compare(s, 'index')
def test_stack_ints(self):
columns = MultiIndex.from_tuples(list(itertools.product(range(3),
repeat=3)))
df = DataFrame(np.random.randn(30, 27), columns=columns)
assert_frame_equal(df.stack(level=[1, 2]),
df.stack(level=1).stack(level=1))
assert_frame_equal(df.stack(level=[-2, -1]),
df.stack(level=1).stack(level=1))
df_named = df.copy()
df_named.columns.set_names(range(3), inplace=True)
assert_frame_equal(df_named.stack(level=[1, 2]),
df_named.stack(level=1).stack(level=1))
def test_stack_mixed_levels(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ['exp', 'animal', 1]
assert_frame_equal(df2.stack(level=['animal', 1]),
animal_hair_stacked, check_names=False)
assert_frame_equal(df2.stack(level=['exp', 1]),
exp_hair_stacked, check_names=False)
# When mixed types are passed and the ints are not level
# names, raise
pytest.raises(ValueError, df2.stack, level=['animal', 0])
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ['exp', 'animal', 0]
assert_frame_equal(df3.stack(level=['animal', 0]),
animal_hair_stacked, check_names=False)
def test_stack_int_level_names(self):
columns = MultiIndex.from_tuples(
[('A', 'cat', 'long'), ('B', 'cat', 'long'),
('A', 'dog', 'short'), ('B', 'dog', 'short')],
names=['exp', 'animal', 'hair_length']
)
df = DataFrame(randn(4, 4), columns=columns)
exp_animal_stacked = df.stack(level=['exp', 'animal'])
animal_hair_stacked = df.stack(level=['animal', 'hair_length'])
exp_hair_stacked = df.stack(level=['exp', 'hair_length'])
df2 = df.copy()
df2.columns.names = [0, 1, 2]
assert_frame_equal(df2.stack(level=[1, 2]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 1]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df2.stack(level=[0, 2]), exp_hair_stacked,
check_names=False)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
assert_frame_equal(df3.stack(level=[0, 1]), animal_hair_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 0]), exp_animal_stacked,
check_names=False)
assert_frame_equal(df3.stack(level=[2, 1]), exp_hair_stacked,
check_names=False)
def test_unstack_bool(self):
df = DataFrame([False, False],
index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),
columns=['col'])
rs = df.unstack()
xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],
dtype=object),
index=['a', 'b'],
columns=MultiIndex.from_arrays([['col', 'col'],
['c', 'l']]))
assert_frame_equal(rs, xp)
def test_unstack_level_binding(self):
# GH9856
mi = pd.MultiIndex(
levels=[[u('foo'), u('bar')], [u('one'), u('two')],
[u('a'), u('b')]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=[u('first'), u('second'), u('third')])
s = pd.Series(0, index=mi)
result = s.unstack([1, 2]).stack(0)
expected_mi = pd.MultiIndex(
levels=[['foo', 'bar'], ['one', 'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=['first', 'second'])
expected = pd.DataFrame(np.array([[np.nan, 0],
[0, np.nan],
[np.nan, 0],
[0, np.nan]],
dtype=np.float64),
index=expected_mi,
columns=pd.Index(['a', 'b'], name='third'))
assert_frame_equal(result, expected)
def test_unstack_to_series(self):
# check reversibility
data = self.frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
assert_frame_equal(undo, self.frame)
# check NA handling
data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})
data.index = Index(['a', 'b', 'c'])
result = data.unstack()
midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']],
labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)
assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
assert_frame_equal(old_data, data)
def test_unstack_dtypes(self):
# GH 2929
rows = [[1, 1, 3, 4],
[1, 2, 3, 4],
[2, 1, 3, 4],
[2, 2, 3, 4]]
df = DataFrame(rows, columns=list('ABCD'))
result = df.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(['A', 'B'])
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 4})
assert_series_equal(result, expected)
# mixed
df2 = df.set_index(['A', 'B'])
df2['C'] = 3.
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'int64': 2, 'float64': 2})
assert_series_equal(result, expected)
df2['D'] = 'foo'
df3 = df2.unstack('B')
result = df3.get_dtype_counts()
expected = Series({'float64': 2, 'object': 2})
assert_series_equal(result, expected)
# GH7405
for c, d in (np.zeros(5), np.zeros(5)), \
(np.arange(5, dtype='f8'), np.arange(5, 10, dtype='f8')):
df = DataFrame({'A': ['a'] * 5, 'C': c, 'D': d,
'B': pd.date_range('2012-01-01', periods=5)})
right = df.iloc[:3].copy(deep=True)
df = df.set_index(['A', 'B'])
df['D'] = df['D'].astype('int64')
left = df.iloc[:3].unstack(0)
right = right.set_index(['A', 'B']).unstack(0)
right[('D', 'a')] = right[('D', 'a')].astype('int64')
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
def test_unstack_unused_levels(self):
# GH 17845: unused labels in index make unstack() cast int to float
idx = pd.MultiIndex.from_product([['a'], ['A', 'B', 'C', 'D']])[:-1]
df = pd.DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = pd.MultiIndex.from_product([[0, 1], ['A', 'B', 'C']])
expected = pd.DataFrame([[1, 1, 1, 0, 0, 0]], index=['a'],
columns=exp_col)
tm.assert_frame_equal(result, expected)
assert((result.columns.levels[1] == idx.levels[1]).all())
# Unused items on both levels
levels = [[0, 1, 7], [0, 1, 2, 3]]
labels = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = pd.MultiIndex(levels, labels)
block = np.arange(4).reshape(2, 2)
df = pd.DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = pd.DataFrame(np.concatenate([block * 2, block * 2 + 1],
axis=1),
columns=idx)
tm.assert_frame_equal(result, expected)
assert((result.columns.levels[1] == idx.levels[1]).all())
# With mixed dtype and NaN
levels = [['a', 2, 'c'], [1, 3, 5, 7]]
labels = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = pd.MultiIndex(levels, labels)
data = np.arange(8)
df = pd.DataFrame(data.reshape(4, 2), index=idx)
cases = ((0, [13, 16, 6, 9, 2, 5, 8, 11],
[np.nan, 'a', 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16],
[np.nan, 5, 1], [np.nan, 'a', 2]))
for level, idces, col_level, idx_level in cases:
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = pd.MultiIndex.from_product([[0, 1], col_level])
expected = pd.DataFrame(exp_data.reshape(3, 6),
index=idx_level, columns=cols)
# Broken (GH 18455):
# tm.assert_frame_equal(result, expected)
diff = result - expected
assert(diff.sum().sum() == 0)
assert((diff + 1).sum().sum() == 8)
assert((result.columns.levels[1] == idx.levels[level]).all())
@pytest.mark.parametrize("cols", [['A', 'C'], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused labels on the unstacked level
df = pd.DataFrame([[2010, 'a', 'I'],
[2011, 'b', 'II']],
columns=['A', 'B', 'C'])
ind = df.set_index(['A', 'B', 'C'], drop=False)
selection = ind.loc[(slice(None), slice(None), 'I'), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product([expected.columns, ['I']],
names=[None, 'C'])
expected.index = expected.index.droplevel('C')
tm.assert_frame_equal(result, expected)
def test_unstack_nan_index(self): # GH7466
cast = lambda val: '{0:1}'.format('' if val != val else val)
nan = np.nan
def verify(df):
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = df.notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(df.iloc[i, j].split('.'))
right = mk_list(df.index[i]) + mk_list(df.columns[j])
right = sorted(list(map(cast, right)))
assert left == right
df = DataFrame({'jim': ['a', 'b', nan, 'd'],
'joe': ['w', 'x', 'y', 'z'],
'jolie': ['a.w', 'b.x', ' .y', 'd.z']})
left = df.set_index(['jim', 'joe']).unstack()['jolie']
right = df.set_index(['joe', 'jim']).unstack()['jolie'].T
assert_frame_equal(left, right)
for idx in itertools.permutations(df.columns[:2]):
mi = df.set_index(list(idx))
for lev in range(2):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
verify(udf['jolie'])
df = DataFrame({'1st': ['d'] * 3 + [nan] * 5 + ['a'] * 2 +
['c'] * 3 + ['e'] * 2 + ['b'] * 5,
'2nd': ['y'] * 2 + ['w'] * 3 + [nan] * 3 +
['z'] * 4 + [nan] * 3 + ['x'] * 3 + [nan] * 2,
'3rd': [67, 39, 53, 72, 57, 80, 31, 18, 11, 30, 59,
50, 62, 59, 76, 52, 14, 53, 60, 51]})
df['4th'], df['5th'] = \
df.apply(lambda r: '.'.join(map(cast, r)), axis=1), \
df.apply(lambda r: '.'.join(map(cast, r.iloc[::-1])), axis=1)
for idx in itertools.permutations(['1st', '2nd', '3rd']):
mi = df.set_index(list(idx))
for lev in range(3):
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
for col in ['4th', '5th']:
verify(udf[col])
# GH7403
df = pd.DataFrame(
{'A': list('aaaabbbb'), 'B': range(8), 'C': range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, 0, 1, 2, nan, nan, nan, nan],
[nan, nan, nan, nan, 4, 5, 6, 7]]
vals = list(map(list, zip(*vals)))
idx = Index([nan, 0, 1, 2, 4, 5, 6, 7], name='B')
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = DataFrame({'A': list('aaaabbbb'), 'B': list(range(4)) * 2,
'C': range(8)})
df.iloc[2, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[2, nan], [0, 4], [1, 5], [nan, 6], [3, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
df = pd.DataFrame({'A': list('aaaabbbb'), 'B': list(range(4)) * 2,
'C': range(8)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack(0)
vals = [[3, nan], [0, 4], [1, 5], [2, 6], [nan, 7]]
cols = MultiIndex(levels=[['C'], ['a', 'b']],
labels=[[0, 0], [0, 1]],
names=[None, 'A'])
idx = Index([nan, 0, 1, 2, 3], name='B')
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH7401
df = pd.DataFrame({'A': list('aaaaabbbbb'), 'C': np.arange(10),
'B': (date_range('2012-01-01', periods=5)
.tolist() * 2)})
df.iloc[3, 1] = np.NaN
left = df.set_index(['A', 'B']).unstack()
vals = np.array([[3, 0, 1, 2, nan, 4], [nan, 5, 6, 7, 8, 9]])
idx = Index(['a', 'b'], name='A')
cols = MultiIndex(levels=[['C'], date_range('2012-01-01', periods=5)],
labels=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, 'B'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
# GH4862
vals = [['Hg', nan, nan, 680585148],
['U', 0.0, nan, 680585148],
['Pb', 7.07e-06, nan, 680585148],
['Sn', 2.3614e-05, 0.0133, 680607017],
['Ag', 0.0, 0.0133, 680607017],
['Hg', -0.00015, 0.0133, 680607017]]
df = DataFrame(vals, columns=['agent', 'change', 'dosage', 's_id'],
index=[17263, 17264, 17265, 17266, 17267, 17268])
left = df.copy().set_index(['s_id', 'dosage', 'agent']).unstack()
vals = [[nan, nan, 7.07e-06, nan, 0.0],
[0.0, -0.00015, nan, 2.3614e-05, nan]]
idx = MultiIndex(levels=[[680585148, 680607017], [0.0133]],
labels=[[0, 1], [-1, 0]],
names=['s_id', 'dosage'])
cols = MultiIndex(levels=[['change'], ['Ag', 'Hg', 'Pb', 'Sn', 'U']],
labels=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, 'agent'])
right = DataFrame(vals, columns=cols, index=idx)
assert_frame_equal(left, right)
left = df.loc[17264:].copy().set_index(['s_id', 'dosage', 'agent'])
assert_frame_equal(left.unstack(), right)
# GH9497 - multiple unstack with nulls
df = DataFrame({'1st': [1, 2, 1, 2, 1, 2],
'2nd': pd.date_range('2014-02-01', periods=6,
freq='D'),
'jim': 100 + np.arange(6),
'joe': (np.random.randn(6) * 10).round(2)})
df['3rd'] = df['2nd'] - pd.Timestamp('2014-02-02')
df.loc[1, '2nd'] = df.loc[3, '2nd'] = nan
df.loc[1, '3rd'] = df.loc[4, '3rd'] = nan
left = df.set_index(['1st', '2nd', '3rd']).unstack(['2nd', '3rd'])
assert left.notna().values.sum() == 2 * len(df)
for col in ['jim', 'joe']:
for _, r in df.iterrows():
key = r['1st'], (col, r['2nd'], r['3rd'])
assert r[col] == left.loc[key]
def test_stack_datetime_column_multiIndex(self):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame(
[1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, 'A', 'B')]))
result = df.stack()
eidx = MultiIndex.from_product([(0, 1, 2, 3), ('B',)])
ecols = MultiIndex.from_tuples([(t, 'A')])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
assert_frame_equal(result, expected)
def test_stack_partial_multiIndex(self):
# GH 8844
def _test_stack_with_multiindex(multiindex):
df = DataFrame(np.arange(3 * len(multiindex))
.reshape(3, len(multiindex)),
columns=multiindex)
for level in (-1, 0, 1, [0, 1], [1, 0]):
result = df.stack(level=level, dropna=False)
if isinstance(level, int):
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(df.columns.get_values(),
names=df.columns.names)
expected = df.stack(level=level, dropna=False)
if isinstance(expected, Series):
assert_series_equal(result, expected)
else:
assert_frame_equal(result, expected)
full_multiindex = MultiIndex.from_tuples([('B', 'x'), ('B', 'z'),
('A', 'y'),
('C', 'x'), ('C', 'u')],
names=['Upper', 'Lower'])
for multiindex_columns in ([0, 1, 2, 3, 4],
[0, 1, 2, 3], [0, 1, 2, 4],
[0, 1, 2], [1, 2, 3], [2, 3, 4],
[0, 1], [0, 2], [0, 3],
[0], [2], [4]):
_test_stack_with_multiindex(full_multiindex[multiindex_columns])
if len(multiindex_columns) > 1:
multiindex_columns.reverse()
_test_stack_with_multiindex(
full_multiindex[multiindex_columns])
df = DataFrame(np.arange(6).reshape(2, 3),
columns=full_multiindex[[0, 1, 3]])
result = df.stack(dropna=False)
expected = DataFrame([[0, 2], [1, nan], [3, 5], [4, nan]],
index=MultiIndex(
levels=[[0, 1], ['u', 'x', 'y', 'z']],
labels=[[0, 0, 1, 1],
[1, 3, 1, 3]],
names=[None, 'Lower']),
columns=Index(['B', 'C'], name='Upper'),
dtype=df.dtypes[0])
assert_frame_equal(result, expected)
def test_stack_preserve_categorical_dtype(self):
# GH13854
for ordered in [False, True]:
for labels in [list("yxz"), list("yxy")]:
cidx = pd.CategoricalIndex(labels, categories=list("xyz"),
ordered=ordered)
df = DataFrame([[10, 11, 12]], columns=cidx)
result = df.stack()
# `MutliIndex.from_product` preserves categorical dtype -
# it's tested elsewhere.
midx = pd.MultiIndex.from_product([df.index, cidx])
expected = Series([10, 11, 12], index=midx)
tm.assert_series_equal(result, expected)
def test_unstack_fill_frame_object():
# GH12815 Test unstacking with object.
data = pd.Series(['a', 'b', 'c', 'a'], dtype='object')
data.index = pd.MultiIndex.from_tuples(
[('x', 'a'), ('x', 'b'), ('y', 'b'), ('z', 'a')])
# By default missing values will be NaN
result = data.unstack()
expected = pd.DataFrame(
{'a': ['a', np.nan, 'a'], 'b': ['b', 'c', np.nan]},
index=list('xyz')
)
assert_frame_equal(result, expected)
# Fill with any value replaces missing values as expected
result = data.unstack(fill_value='d')
expected = pd.DataFrame(
{'a': ['a', 'd', 'a'], 'b': ['b', 'c', 'd']},
index=list('xyz')
)
assert_frame_equal(result, expected)
| 40.52862 | 79 | 0.480131 | [
"BSD-3-Clause"
] | wla80/pandas | pandas/tests/frame/test_reshape.py | 36,111 | Python |
from os import path
import autolens as al
"""
This script simulates `Imaging` of a strong lens where:
- The lens `Galaxy`'s total mass distribution is a *SphericalIsothermal*.
- The source `Galaxy`'s `LightProfile` is a *SphericalExponential*.
This dataset is used in chapter 2, tutorials 1-3.
"""
"""
The `dataset_type` describes the type of data being simulated (in this case, `Imaging` data) and `dataset_name`
gives it a descriptive name. They define the folder the dataset is output to on your hard-disk:
- The image will be output to `/autolens_workspace/dataset/dataset_type/dataset_name/image.fits`.
- The noise-map will be output to `/autolens_workspace/dataset/dataset_type/dataset_name/lens_name/noise_map.fits`.
- The psf will be output to `/autolens_workspace/dataset/dataset_type/dataset_name/psf.fits`.
"""
dataset_type = "chapter_2"
dataset_name = "mass_sis__source_exp_x2"
"""
The path where the dataset will be output, which in this case is:
`/autolens_workspace/howtolens/dataset/chapter_2/mass_sis__source_exp/`
"""
dataset_path = path.join("dataset", "howtolens", dataset_type, dataset_name)
"""
For simulating an image of a strong lens, we recommend using a GridIterate object. This represents a grid of $(y,x)$
coordinates like an ordinary Grid, but when the light-profile`s image is evaluated below (using the Tracer) the
sub-size of the grid is iteratively increased (in steps of 2, 4, 8, 16, 24) until the input fractional accuracy of
99.99% is met.
This ensures that the divergent and bright central regions of the source galaxy are fully resolved when determining the
total flux emitted within a pixel.
"""
grid = al.GridIterate.uniform(
shape_2d=(100, 100),
pixel_scales=0.1,
fractional_accuracy=0.9999,
sub_steps=[2, 4, 8, 16, 24],
)
"""Simulate a simple Gaussian PSF for the image."""
psf = al.Kernel.from_gaussian(
shape_2d=(11, 11), sigma=0.1, pixel_scales=grid.pixel_scales
)
"""
To simulate the `Imaging` dataset we first create a simulator, which defines the exposure time, background sky,
noise levels and psf of the dataset that is simulated.
"""
simulator = al.SimulatorImaging(
exposure_time=300.0, psf=psf, background_sky_level=0.1, add_poisson_noise=True
)
"""
Setup the lens `Galaxy`'s mass (SIE+Shear) and source galaxy light (elliptical Sersic) for this simulated lens.
For lens modeling, defining ellipticity in terms of the `elliptical_comps` improves the model-fitting procedure.
However, for simulating a strong lens you may find it more intuitive to define the elliptical geometry using the
axis-ratio of the profile (axis_ratio = semi-major axis / semi-minor axis = b/a) and position angle phi, where phi is
in degrees and defined counter clockwise from the positive x-axis.
We can use the **PyAutoLens** `convert` module to determine the elliptical components from the axis-ratio and phi.
"""
lens_galaxy = al.Galaxy(
redshift=0.5, mass=al.mp.SphericalIsothermal(centre=(0.0, 0.0), einstein_radius=1.6)
)
source_galaxy_0 = al.Galaxy(
redshift=1.0,
bulge=al.lp.SphericalExponential(
centre=(0.2, 0.0), intensity=0.2, effective_radius=0.2
),
)
source_galaxy_1 = al.Galaxy(
redshift=1.0,
bulge=al.lp.SphericalExponential(
centre=(-0.2, 0.0), intensity=0.2, effective_radius=0.2
),
)
"""Use these galaxies to setup a tracer, which will generate the image for the simulated `Imaging` dataset."""
tracer = al.Tracer.from_galaxies(
galaxies=[lens_galaxy, source_galaxy_0, source_galaxy_1]
)
"""
We can now pass this simulator a tracer, which creates the ray-traced image plotted above and simulates it as an
imaging dataset.
"""
imaging = simulator.from_tracer_and_grid(tracer=tracer, grid=grid)
"""Output our simulated dataset to the dataset path as .fits files"""
imaging.output_to_fits(
image_path=path.join(dataset_path, "image.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
overwrite=True,
)
"""
Pickle the `Tracer` in the dataset folder, ensuring the true `Tracer` is safely stored and available if we need to
check how the dataset was simulated in the future.
This will also be accessible via the `Aggregator` if a model-fit is performed using the dataset.
"""
tracer.save(file_path=dataset_path, filename="true_tracer")
| 37.107438 | 120 | 0.73029 | [
"MIT"
] | a-mere-peasant/PyAutoLens | howtolens/simulators/chapter_2/mass_sis__source_exp_x2.py | 4,490 | Python |
#!/usr/bin/env python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
all_raw = open(sys.argv[1], 'r')
# init empty lists
cell0v = []
cell1v = []
cell2v = []
cell3v = []
totalv = []
# Process data into lists
for line in all_raw:
if 'voltage cell 0: ' in line:
try:
cell0v.append(float(line.replace('voltage cell 0: ', '')[:-4]))
except:
print('Malformed data: ' + line)
if 'voltage cell 1: ' in line:
try:
cell1v.append(float(line.replace('voltage cell 1: ', '')[:-4]))
except:
print('Malformed data: ' + line)
if 'voltage cell 2: ' in line:
try:
cell2v.append(float(line.replace('voltage cell 2: ', '')[:-4]))
except:
print('Malformed data: ' + line)
if 'voltage cell 3: ' in line:
try:
cell3v.append(float(line.replace('voltage cell 3: ', '')[:-4]))
except:
print('Malformed data: ' + line)
if 'voltage total: ' in line:
try:
totalv.append(float(line.replace('voltage total: ', '')[:-4]))
except:
print('Malformed data: ' + line)
# Write images
# Total voltage of pack
plt.figure(figsize=(15, 15))
plt.tight_layout()
plt.plot(totalv)
plt.savefig(sys.argv[1]+'_total_voltage.png')
plt.clf()
# Cells
plt.figure(figsize=(15, 15))
plt.tight_layout()
plt.plot(cell0v, color='blue')
plt.plot(cell1v, color='red')
plt.plot(cell2v, color='green')
plt.plot(cell3v, color='cyan')
plt.xlabel('C0 = blue C1 = red C2 = green C3 = cyan')
plt.savefig(sys.argv[1]+'_cell_voltage.png')
| 29.035714 | 75 | 0.587946 | [
"Unlicense"
] | rjmendez/lifepo4_bms | plot_battery.py | 1,626 | Python |
import time
from messaging_pyx import Context, Poller, SubSocket, PubSocket # pylint: disable=no-name-in-module, import-error
MSGS = 1e5
if __name__ == "__main__":
c = Context()
sub_sock = SubSocket()
pub_sock = PubSocket()
sub_sock.connect(c, "controlsState")
pub_sock.connect(c, "controlsState")
poller = Poller()
poller.registerSocket(sub_sock)
t = time.time()
for i in range(int(MSGS)):
bts = i.to_bytes(4, 'little')
pub_sock.send(bts)
for s in poller.poll(100):
dat = s.receive()
ii = int.from_bytes(dat, 'little')
assert(i == ii)
dt = time.time() - t
print("%.1f msg/s" % (MSGS / dt))
| 21.032258 | 113 | 0.642638 | [
"MIT"
] | 1Thamer/openpilot0.6.6 | selfdrive/messaging/demo.py | 652 | Python |
###########################
#
# #21 Amicable numbers - Project Euler
# https://projecteuler.net/problem=21
#
# Code by Kevin Marciniak
#
###########################
def sumproperdivisors(num):
sum = 0
for x in range(1, int((num / 2)) + 1):
if num % x == 0:
sum += x
return sum
amicableList = []
for x in range(0, 10000):
temp = sumproperdivisors(x)
if sumproperdivisors(temp) == x and sumproperdivisors(x) == temp and temp != x:
if x not in amicableList and temp not in amicableList:
amicableList.append(x)
amicableList.append(temp)
totalSum = 0
for y in range(0, len(amicableList)):
totalSum += amicableList[y]
print(totalSum)
| 20.941176 | 83 | 0.573034 | [
"BSD-3-Clause"
] | kmarcini/Project-Euler-Python | problem0021.py | 712 | Python |
import threading
from typing import Callable, List, MutableMapping, NamedTuple
from dagster import check
from dagster.core.events.log import EventLogEntry
from .sql_event_log import SqlEventLogStorage
POLLING_CADENCE = 0.1 # 100 ms
class CallbackAfterCursor(NamedTuple):
"""Callback passed from Observer class in event polling
start_cursor (int): Only process EventLogEntrys with an id >= start_cursor
(earlier ones have presumably already been processed)
callback (Callable[[EventLogEntry], None]): callback passed from Observer
to call on new EventLogEntrys
"""
start_cursor: int
callback: Callable[[EventLogEntry], None]
class SqlPollingEventWatcher:
"""Event Log Watcher that uses a multithreaded polling approach to retrieving new events for run_ids
This class' job is to manage a collection of threads that each poll the event log for a given run_id
Uses one thread (SqlPollingRunIdEventWatcherThread) per watched run_id
LOCKING INFO:
ORDER: _dict_lock -> run_id_thread.callback_fn_list_lock
INVARIANTS: _dict_lock protects _run_id_to_watcher_dict
"""
def __init__(self, event_log_storage: SqlEventLogStorage):
self._event_log_storage = check.inst_param(
event_log_storage, "event_log_storage", SqlEventLogStorage
)
# INVARIANT: dict_lock protects _run_id_to_watcher_dict
self._dict_lock: threading.Lock = threading.Lock()
self._run_id_to_watcher_dict: MutableMapping[str, SqlPollingRunIdEventWatcherThread] = {}
self._disposed = False
def has_run_id(self, run_id: str) -> bool:
run_id = check.str_param(run_id, "run_id")
with self._dict_lock:
_has_run_id = run_id in self._run_id_to_watcher_dict
return _has_run_id
def watch_run(self, run_id: str, start_cursor: int, callback: Callable[[EventLogEntry], None]):
run_id = check.str_param(run_id, "run_id")
start_cursor = check.int_param(start_cursor, "start_cursor")
callback = check.callable_param(callback, "callback")
with self._dict_lock:
if run_id not in self._run_id_to_watcher_dict:
self._run_id_to_watcher_dict[run_id] = SqlPollingRunIdEventWatcherThread(
self._event_log_storage, run_id
)
self._run_id_to_watcher_dict[run_id].daemon = True
self._run_id_to_watcher_dict[run_id].start()
self._run_id_to_watcher_dict[run_id].add_callback(start_cursor, callback)
def unwatch_run(self, run_id: str, handler: Callable[[EventLogEntry], None]):
run_id = check.str_param(run_id, "run_id")
handler = check.callable_param(handler, "handler")
with self._dict_lock:
if run_id in self._run_id_to_watcher_dict:
self._run_id_to_watcher_dict[run_id].remove_callback(handler)
if self._run_id_to_watcher_dict[run_id].should_thread_exit.is_set():
del self._run_id_to_watcher_dict[run_id]
def __del__(self):
self.close()
def close(self):
if not self._disposed:
self._disposed = True
with self._dict_lock:
for watcher_thread in self._run_id_to_watcher_dict.values():
if not watcher_thread.should_thread_exit.is_set():
watcher_thread.should_thread_exit.set()
for run_id in self._run_id_to_watcher_dict:
self._run_id_to_watcher_dict[run_id].join()
del self._run_id_to_watcher_dict
class SqlPollingRunIdEventWatcherThread(threading.Thread):
"""subclass of Thread that watches a given run_id for new Events by polling every POLLING_CADENCE
Holds a list of callbacks (_callback_fn_list) each passed in by an `Observer`. Note that
the callbacks have a cursor associated; this means that the callbacks should be
only executed on EventLogEntrys with an associated id >= callback.start_cursor
Exits when `self.should_thread_exit` is set.
LOCKING INFO:
INVARIANTS: _callback_fn_list_lock protects _callback_fn_list
"""
def __init__(self, event_log_storage: SqlEventLogStorage, run_id: str):
super(SqlPollingRunIdEventWatcherThread, self).__init__()
self._event_log_storage = check.inst_param(
event_log_storage, "event_log_storage", SqlEventLogStorage
)
self._run_id = check.str_param(run_id, "run_id")
self._callback_fn_list_lock: threading.Lock = threading.Lock()
self._callback_fn_list: List[CallbackAfterCursor] = []
self._should_thread_exit = threading.Event()
self.name = f"mysql-event-watch-run-id-{self._run_id}"
@property
def should_thread_exit(self) -> threading.Event:
return self._should_thread_exit
def add_callback(self, start_cursor: int, callback: Callable[[EventLogEntry], None]):
"""Observer has started watching this run.
Add a callback to execute on new EventLogEntrys st. id >= start_cursor
Args:
start_cursor (int): minimum event_id for the callback to execute
callback (Callable[[EventLogEntry], None]): callback to update the Dagster UI
"""
start_cursor = check.int_param(start_cursor, "start_cursor")
callback = check.callable_param(callback, "callback")
with self._callback_fn_list_lock:
self._callback_fn_list.append(CallbackAfterCursor(start_cursor, callback))
def remove_callback(self, callback: Callable[[EventLogEntry], None]):
"""Observer has stopped watching this run;
Remove a callback from the list of callbacks to execute on new EventLogEntrys
Also kill thread if no callbacks remaining (i.e. no Observers are watching this run_id)
Args:
callback (Callable[[EventLogEntry], None]): callback to remove from list of callbacks
"""
callback = check.callable_param(callback, "callback")
with self._callback_fn_list_lock:
self._callback_fn_list = [
callback_with_cursor
for callback_with_cursor in self._callback_fn_list
if callback_with_cursor.callback != callback
]
if not self._callback_fn_list:
self._should_thread_exit.set()
def run(self):
"""Polling function to update Observers with EventLogEntrys from Event Log DB.
Wakes every POLLING_CADENCE &
1. executes a SELECT query to get new EventLogEntrys
2. fires each callback (taking into account the callback.cursor) on the new EventLogEntrys
Uses max_index_so_far as a cursor in the DB to make sure that only new records are retrieved
"""
cursor = -1
while not self._should_thread_exit.wait(POLLING_CADENCE):
events = self._event_log_storage.get_logs_for_run(self._run_id, cursor=cursor)
for event_record in events:
cursor += 1
with self._callback_fn_list_lock:
for callback_with_cursor in self._callback_fn_list:
if callback_with_cursor.start_cursor < cursor:
callback_with_cursor.callback(event_record)
| 44.841463 | 104 | 0.68371 | [
"Apache-2.0"
] | AndreaGiardini/dagster | python_modules/dagster/dagster/core/storage/event_log/polling_event_watcher.py | 7,354 | Python |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u'bdg-sequila'
copyright = u'2019, biodatageeks.org'
author = u'biodatageeks.org'
# The short X.Y version
version = u'|version|'
# The full version, including alpha/beta/rc tags
release = u'|version|'
project_name = u'bdg-sequila'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinxcontrib.github_ribbon',
# 'sphinx.ext.ifconfig',
'sphinxcontrib.bibtex',
'sphinx.ext.autosectionlabel',
'rst2pdf.pdfbuilder'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
#html_logo='sequila.png'
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'bdg-sequiladoc'
#--- Options for PDF ------------------------
pdf_documents = [('index', u'rst2pdf', u'SeQuiLa documentation', u'biodatageeks.org'),]
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'bdg-sequila.tex', u'SeQuiLa Documentation',
u'biodatageeks.org', 'howto'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bdg-sequila', u'bdg-spark-granges Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'bdg-spark-granges', u'bdg-spark-granges Documentation',
author, 'bdg-spark-granges', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
##github
#pip install sphinxcontrib-github_ribbon
github_ribbon_repo = 'ZSI-Bio/bdg-sequila'
github_ribbon_position = "right"
github_ribbon_color ="red"
#latexpdf
text_add_secnumbers = False
#latex_logo = "sequila.png"
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| 29.146226 | 87 | 0.653666 | [
"Apache-2.0"
] | agaszmurlo/bdg-sequila | docs/source/conf.py | 6,179 | Python |
from typing import Optional, Type
from pydantic import UUID4
from tortoise import fields, models
from tortoise.exceptions import DoesNotExist
from fastapi_users.db.base import BaseUserDatabase
from fastapi_users.models import UD
class TortoiseBaseUserModel(models.Model):
id = fields.UUIDField(pk=True, generated=False)
email = fields.CharField(index=True, unique=True, null=False, max_length=255)
hashed_password = fields.CharField(null=False, max_length=255)
is_active = fields.BooleanField(default=True, null=False)
is_superuser = fields.BooleanField(default=False, null=False)
async def to_dict(self):
d = {}
for field in self._meta.db_fields:
d[field] = getattr(self, field)
for field in self._meta.backward_fk_fields:
d[field] = await getattr(self, field).all().values()
return d
class Meta:
abstract = True
class TortoiseBaseOAuthAccountModel(models.Model):
id = fields.UUIDField(pk=True, generated=False, max_length=255)
oauth_name = fields.CharField(null=False, max_length=255)
access_token = fields.CharField(null=False, max_length=255)
expires_at = fields.IntField(null=False)
refresh_token = fields.CharField(null=True, max_length=255)
account_id = fields.CharField(index=True, null=False, max_length=255)
account_email = fields.CharField(null=False, max_length=255)
class Meta:
abstract = True
class TortoiseUserDatabase(BaseUserDatabase[UD]):
"""
Database adapter for Tortoise ORM.
:param user_db_model: Pydantic model of a DB representation of a user.
:param model: Tortoise ORM model.
:param oauth_account_model: Optional Tortoise ORM model of a OAuth account.
"""
model: Type[TortoiseBaseUserModel]
oauth_account_model: Optional[Type[TortoiseBaseOAuthAccountModel]]
def __init__(
self,
user_db_model: Type[UD],
model: Type[TortoiseBaseUserModel],
oauth_account_model: Optional[Type[TortoiseBaseOAuthAccountModel]] = None,
):
super().__init__(user_db_model)
self.model = model
self.oauth_account_model = oauth_account_model
async def get(self, id: UUID4) -> Optional[UD]:
try:
query = self.model.get(id=id)
if self.oauth_account_model is not None:
query = query.prefetch_related("oauth_accounts")
user = await query
user_dict = await user.to_dict()
return self.user_db_model(**user_dict)
except DoesNotExist:
return None
async def get_by_email(self, email: str) -> Optional[UD]:
query = self.model.filter(email__iexact=email).first()
if self.oauth_account_model is not None:
query = query.prefetch_related("oauth_accounts")
user = await query
if user is None:
return None
user_dict = await user.to_dict()
return self.user_db_model(**user_dict)
async def get_by_oauth_account(self, oauth: str, account_id: str) -> Optional[UD]:
try:
query = self.model.get(
oauth_accounts__oauth_name=oauth, oauth_accounts__account_id=account_id
).prefetch_related("oauth_accounts")
user = await query
user_dict = await user.to_dict()
return self.user_db_model(**user_dict)
except DoesNotExist:
return None
async def create(self, user: UD) -> UD:
user_dict = user.dict()
oauth_accounts = user_dict.pop("oauth_accounts", None)
model = self.model(**user_dict)
await model.save()
if oauth_accounts and self.oauth_account_model:
oauth_account_objects = []
for oauth_account in oauth_accounts:
oauth_account_objects.append(
self.oauth_account_model(user=model, **oauth_account)
)
await self.oauth_account_model.bulk_create(oauth_account_objects)
return user
async def update(self, user: UD) -> UD:
user_dict = user.dict()
user_dict.pop("id") # Tortoise complains if we pass the PK again
oauth_accounts = user_dict.pop("oauth_accounts", None)
model = await self.model.get(id=user.id)
for field in user_dict:
setattr(model, field, user_dict[field])
await model.save()
if oauth_accounts and self.oauth_account_model:
await model.oauth_accounts.all().delete()
oauth_account_objects = []
for oauth_account in oauth_accounts:
oauth_account_objects.append(
self.oauth_account_model(user=model, **oauth_account)
)
await self.oauth_account_model.bulk_create(oauth_account_objects)
return user
async def delete(self, user: UD) -> None:
await self.model.filter(id=user.id).delete()
| 33.917808 | 87 | 0.657108 | [
"MIT"
] | JeffreyThijs/fastapi-users | fastapi_users/db/tortoise.py | 4,952 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.